focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public final List<? extends FileBasedSource<T>> split(
long desiredBundleSizeBytes, PipelineOptions options) throws Exception {
// This implementation of method split is provided to simplify subclasses. Here we
// split a FileBasedSource based on a file pattern to FileBasedSources based on full single
// files. For files that can be efficiently seeked, we further split FileBasedSources based on
// those files to FileBasedSources based on sub ranges of single files.
String fileOrPattern = fileOrPatternSpec.get();
if (mode == Mode.FILEPATTERN) {
long startTime = System.currentTimeMillis();
List<Metadata> expandedFiles =
FileSystems.match(fileOrPattern, emptyMatchTreatment).metadata();
List<FileBasedSource<T>> splitResults = new ArrayList<>(expandedFiles.size());
for (Metadata metadata : expandedFiles) {
FileBasedSource<T> split = createForSubrangeOfFile(metadata, 0, metadata.sizeBytes());
verify(
split.getMode() == Mode.SINGLE_FILE_OR_SUBRANGE,
"%s.createForSubrangeOfFile must return a source in mode %s",
split,
Mode.SINGLE_FILE_OR_SUBRANGE);
// The split is NOT in FILEPATTERN mode, so we can call its split without fear
// of recursion. This will break a single file into multiple splits when the file is
// splittable and larger than the desired bundle size.
splitResults.addAll(split.split(desiredBundleSizeBytes, options));
}
LOG.info(
"Splitting filepattern {} into bundles of size {} took {} ms "
+ "and produced {} files and {} bundles",
fileOrPattern,
desiredBundleSizeBytes,
System.currentTimeMillis() - startTime,
expandedFiles.size(),
splitResults.size());
return splitResults;
} else {
FileSystems.reportSourceLineage(getSingleFileMetadata().resourceId());
if (isSplittable()) {
@SuppressWarnings("unchecked")
List<FileBasedSource<T>> splits =
(List<FileBasedSource<T>>) super.split(desiredBundleSizeBytes, options);
return splits;
} else {
LOG.debug(
"The source for file {} is not split into sub-range based sources since "
+ "the file is not seekable",
fileOrPattern);
return ImmutableList.of(this);
}
}
} | @Test
public void testSplittingFailsOnEmptyFileExpansion() throws Exception {
PipelineOptions options = PipelineOptionsFactory.create();
String missingFilePath = tempFolder.newFolder().getAbsolutePath() + "/missing.txt";
TestFileBasedSource source = new TestFileBasedSource(missingFilePath, Long.MAX_VALUE, null);
thrown.expect(FileNotFoundException.class);
thrown.expectMessage(missingFilePath);
source.split(1234, options);
} |
public static CopyFilter getCopyFilter(Configuration conf) {
String filtersClassName = conf
.get(DistCpConstants.CONF_LABEL_FILTERS_CLASS);
if (filtersClassName != null) {
try {
Class<? extends CopyFilter> filtersClass = conf
.getClassByName(filtersClassName)
.asSubclass(CopyFilter.class);
filtersClassName = filtersClass.getName();
Constructor<? extends CopyFilter> constructor = filtersClass
.getDeclaredConstructor(Configuration.class);
return constructor.newInstance(conf);
} catch (Exception e) {
LOG.error(DistCpConstants.CLASS_INSTANTIATION_ERROR_MSG +
filtersClassName, e);
throw new RuntimeException(
DistCpConstants.CLASS_INSTANTIATION_ERROR_MSG +
filtersClassName, e);
}
} else {
return getDefaultCopyFilter(conf);
}
} | @Test
public void testGetCopyFilterNonExistingClass() throws Exception {
final String filterName =
"org.apache.hadoop.tools.RegexpInConfigurationWrongFilter";
Configuration configuration = new Configuration(false);
configuration.set(DistCpConstants.CONF_LABEL_FILTERS_CLASS, filterName);
intercept(RuntimeException.class,
DistCpConstants.CLASS_INSTANTIATION_ERROR_MSG + filterName,
() -> CopyFilter.getCopyFilter(configuration));
} |
@Override
public void executeUpdate(final RegisterStorageUnitStatement sqlStatement, final ContextManager contextManager) {
checkDataSource(sqlStatement, contextManager);
Map<String, DataSourcePoolProperties> propsMap = DataSourceSegmentsConverter.convert(database.getProtocolType(), sqlStatement.getStorageUnits());
if (sqlStatement.isIfNotExists()) {
Collection<String> currentStorageUnits = getCurrentStorageUnitNames(contextManager);
Collection<String> logicalDataSourceNames = getLogicalDataSourceNames();
propsMap.keySet().removeIf(currentStorageUnits::contains);
propsMap.keySet().removeIf(logicalDataSourceNames::contains);
}
if (propsMap.isEmpty()) {
return;
}
validateHandler.validate(propsMap, getExpectedPrivileges(sqlStatement));
try {
contextManager.getPersistServiceFacade().getMetaDataManagerPersistService().registerStorageUnits(database.getName(), propsMap);
} catch (final SQLException | ShardingSphereExternalException ex) {
throw new StorageUnitsOperateException("register", propsMap.keySet(), ex);
}
} | @Test
void assertExecuteUpdateSuccess() {
assertDoesNotThrow(() -> executor.executeUpdate(createRegisterStorageUnitStatement(), mock(ContextManager.class, RETURNS_DEEP_STUBS)));
} |
@Override
public String query(final String key) {
try (
Connection connection = dataSource.getConnection();
PreparedStatement preparedStatement = connection.prepareStatement(repositorySQL.getSelectByKeySQL())) {
preparedStatement.setString(1, key);
try (ResultSet resultSet = preparedStatement.executeQuery()) {
if (resultSet.next()) {
return resultSet.getString("value");
}
}
} catch (final SQLException ex) {
log.error("Get {} data by key: {} failed", getType(), key, ex);
}
return "";
} | @Test
void assertGetFailure() throws SQLException {
when(mockJdbcConnection.prepareStatement(repositorySQL.getSelectByKeySQL())).thenReturn(mockPreparedStatement);
when(mockPreparedStatement.executeQuery()).thenReturn(mockResultSet);
when(mockResultSet.next()).thenReturn(false);
String actual = repository.query("key");
assertThat(actual, is(""));
} |
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
throw new UnsupportedOperationException(getClass().getName() + " is only used locally!");
} | @Test(expected = UnsupportedOperationException.class)
public void testReadInternal() throws Exception {
operation.readInternal(null);
} |
public static ShowResultSet execute(ShowStmt statement, ConnectContext context) {
return GlobalStateMgr.getCurrentState().getShowExecutor().showExecutorVisitor.visit(statement, context);
} | @Test
public void testShowMaterializedViewFromUnknownDatabase() throws DdlException, AnalysisException {
ShowMaterializedViewsStmt stmt = new ShowMaterializedViewsStmt("emptyDb", (String) null);
expectedEx.expect(SemanticException.class);
expectedEx.expectMessage("Unknown database 'emptyDb'");
ShowExecutor.execute(stmt, ctx);
} |
@VisibleForTesting
@SuppressWarnings("WeakerAccess")
LookupResult parseResponse(@Nullable ResponseBody body) {
if (body != null) {
try {
final JsonNode json = OBJECT_MAPPER.readTree(body.string());
return LookupResult.withoutTTL()
.single(json.path("pulse_info").path("count").asLong(0))
.multiValue(OBJECT_MAPPER.convertValue(json, MAP_TYPE_REFERENCE))
.build();
} catch (IOException e) {
LOG.warn("Couldn't parse OTX response as JSON", e);
}
}
return LookupResult.empty();
} | @Test
public void parseResponse() throws Exception {
final URL url = Resources.getResource(getClass(), "otx-IPv4-response.json");
final ResponseBody body = ResponseBody.create(null, Resources.toByteArray(url));
final LookupResult result = otxDataAdapter.parseResponse(body);
assertThat(result.singleValue()).isEqualTo(0L);
assertThat(result.multiValue()).isNotNull();
assertThat(requireNonNull(result.multiValue()).get("country_name")).isEqualTo("Ireland");
} |
public <T extends Notification> T getFromQueue() {
int batchSize = 1;
List<NotificationQueueDto> notificationDtos = dbClient.notificationQueueDao().selectOldest(batchSize);
if (notificationDtos.isEmpty()) {
return null;
}
dbClient.notificationQueueDao().delete(notificationDtos);
return convertToNotification(notificationDtos);
} | @Test
public void shouldGetFromQueueAndDelete() {
Notification notification = new Notification("test");
NotificationQueueDto dto = NotificationQueueDto.toNotificationQueueDto(notification);
List<NotificationQueueDto> dtos = Arrays.asList(dto);
when(notificationQueueDao.selectOldest(1)).thenReturn(dtos);
assertThat(underTest.<Notification>getFromQueue()).isNotNull();
InOrder inOrder = inOrder(notificationQueueDao);
inOrder.verify(notificationQueueDao).selectOldest(1);
inOrder.verify(notificationQueueDao).delete(dtos);
} |
public static KeyPair recoverKeyPair(byte[] encoded) throws NoSuchAlgorithmException,
InvalidKeySpecException {
final String algo = getAlgorithmForOid(getOidFromPkcs8Encoded(encoded));
final KeySpec privKeySpec = new PKCS8EncodedKeySpec(encoded);
final KeyFactory kf = KeyFactory.getInstance(algo);
final PrivateKey priv = kf.generatePrivate(privKeySpec);
return new KeyPair(recoverPublicKey(kf, priv), priv);
} | @Test
public void recoverKeyPair_Rsa() throws Exception {
KeyPair kp = PubkeyUtils.recoverKeyPair(RSA_KEY_PKCS8);
RSAPublicKey pubKey = (RSAPublicKey) kp.getPublic();
assertEquals(RSA_KEY_N, pubKey.getModulus());
assertEquals(RSA_KEY_E, pubKey.getPublicExponent());
} |
void runOnce() {
if (transactionManager != null) {
try {
transactionManager.maybeResolveSequences();
RuntimeException lastError = transactionManager.lastError();
// do not continue sending if the transaction manager is in a failed state
if (transactionManager.hasFatalError()) {
if (lastError != null)
maybeAbortBatches(lastError);
client.poll(retryBackoffMs, time.milliseconds());
return;
}
if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) {
return;
}
// Check whether we need a new producerId. If so, we will enqueue an InitProducerId
// request which will be sent below
transactionManager.bumpIdempotentEpochAndResetIdIfNeeded();
if (maybeSendAndPollTransactionalRequest()) {
return;
}
} catch (AuthenticationException e) {
// This is already logged as error, but propagated here to perform any clean ups.
log.trace("Authentication exception while processing transactional request", e);
transactionManager.authenticationFailed(e);
}
}
long currentTimeMs = time.milliseconds();
long pollTimeout = sendProducerData(currentTimeMs);
client.poll(pollTimeout, currentTimeMs);
} | @Test
public void testIdempotentUnknownProducerHandlingWhenRetentionLimitReached() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = createTransactionManager();
setupWithTransactionState(transactionManager);
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
assertEquals(0, transactionManager.sequenceNumber(tp0));
// Send first ProduceRequest
Future<RecordMetadata> request1 = appendToAccumulator(tp0);
sender.runOnce();
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, transactionManager.sequenceNumber(tp0));
assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0));
sendIdempotentProducerResponse(0, tp0, Errors.NONE, 1000L, 10L);
sender.runOnce(); // receive the response.
assertTrue(request1.isDone());
assertEquals(1000L, request1.get().offset());
assertEquals(OptionalInt.of(0), transactionManager.lastAckedSequence(tp0));
assertEquals(OptionalLong.of(1000L), transactionManager.lastAckedOffset(tp0));
// Send second ProduceRequest, a single batch with 2 records.
appendToAccumulator(tp0);
Future<RecordMetadata> request2 = appendToAccumulator(tp0);
sender.runOnce();
assertEquals(3, transactionManager.sequenceNumber(tp0));
assertEquals(OptionalInt.of(0), transactionManager.lastAckedSequence(tp0));
assertFalse(request2.isDone());
sendIdempotentProducerResponse(1, tp0, Errors.UNKNOWN_PRODUCER_ID, -1L, 1010L);
sender.runOnce(); // receive response 0, should be retried since the logStartOffset > lastAckedOffset.
sender.runOnce(); // bump epoch and retry request
// We should have reset the sequence number state of the partition because the state was lost on the broker.
assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0));
assertEquals(2, transactionManager.sequenceNumber(tp0));
assertFalse(request2.isDone());
assertTrue(client.hasInFlightRequests());
assertEquals((short) 1, transactionManager.producerIdAndEpoch().epoch);
// resend the request. Note that the expected sequence is 0, since we have lost producer state on the broker.
sendIdempotentProducerResponse(0, tp0, Errors.NONE, 1011L, 1010L);
sender.runOnce(); // receive response 1
assertEquals(OptionalInt.of(1), transactionManager.lastAckedSequence(tp0));
assertEquals(2, transactionManager.sequenceNumber(tp0));
assertFalse(client.hasInFlightRequests());
assertTrue(request2.isDone());
assertEquals(1012L, request2.get().offset());
assertEquals(OptionalLong.of(1012L), transactionManager.lastAckedOffset(tp0));
} |
@Override
public org.apache.parquet.hadoop.api.ReadSupport.ReadContext init(
Configuration configuration, Map<String, String> keyValueMetaData, MessageType fileSchema) {
return init(new HadoopParquetConfiguration(configuration), keyValueMetaData, fileSchema);
} | @Test
public void testInitWithPartialSchema() {
GroupReadSupport s = new GroupReadSupport();
Configuration configuration = new Configuration();
Map<String, String> keyValueMetaData = new HashMap<String, String>();
MessageType fileSchema = MessageTypeParser.parseMessageType(fullSchemaStr);
MessageType partialSchema = MessageTypeParser.parseMessageType(partialSchemaStr);
configuration.set(ReadSupport.PARQUET_READ_SCHEMA, partialSchemaStr);
ReadSupport.ReadContext context = s.init(configuration, keyValueMetaData, fileSchema);
assertEquals(context.getRequestedSchema(), partialSchema);
} |
public OpenAPI read(Class<?> cls) {
return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>());
} | @Test
public void testSiblings() {
Reader reader = new Reader(new SwaggerConfiguration().openAPI(new OpenAPI()).openAPI31(true));
OpenAPI openAPI = reader.read(SiblingsResource.class);
String yaml = "openapi: 3.1.0\n" +
"paths:\n" +
" /test:\n" +
" get:\n" +
" operationId: getCart\n" +
" responses:\n" +
" default:\n" +
" description: default response\n" +
" content:\n" +
" '*/*':\n" +
" schema:\n" +
" $ref: '#/components/schemas/Pet'\n" +
"components:\n" +
" schemas:\n" +
" Category:\n" +
" description: parent\n" +
" properties:\n" +
" id:\n" +
" type: integer\n" +
" format: int64\n" +
" Pet:\n" +
" description: Pet\n" +
" properties:\n" +
" category:\n" +
" $ref: '#/components/schemas/Category'\n" +
" description: child\n";
SerializationMatchers.assertEqualsToYaml31(openAPI, yaml);
} |
@Override
public void declareResourceRequirements(ResourceRequirements resourceRequirements) {
synchronized (lock) {
checkNotClosed();
if (isConnected()) {
currentResourceRequirements = resourceRequirements;
triggerResourceRequirementsSubmission(
Duration.ofMillis(1L),
Duration.ofMillis(10000L),
currentResourceRequirements);
}
}
} | @Test
void testDeclareResourceRequirementsSendsRequirementsIfConnected() {
final DeclareResourceRequirementServiceConnectionManager
declareResourceRequirementServiceConnectionManager =
createResourceManagerConnectionManager();
final CompletableFuture<ResourceRequirements> declareResourceRequirementsFuture =
new CompletableFuture<>();
declareResourceRequirementServiceConnectionManager.connect(
resourceRequirements -> {
declareResourceRequirementsFuture.complete(resourceRequirements);
return CompletableFuture.completedFuture(Acknowledge.get());
});
final ResourceRequirements resourceRequirements = createResourceRequirements();
declareResourceRequirementServiceConnectionManager.declareResourceRequirements(
resourceRequirements);
assertThat(declareResourceRequirementsFuture.join()).isEqualTo(resourceRequirements);
} |
@Override
public Duration convert(String source) {
try {
if (ISO8601.matcher(source).matches()) {
return Duration.parse(source);
}
Matcher matcher = SIMPLE.matcher(source);
Assert.state(matcher.matches(), "'" + source + "' is not a valid duration");
long amount = Long.parseLong(matcher.group(1));
ChronoUnit unit = getUnit(matcher.group(2));
return Duration.of(amount, unit);
} catch (Exception ex) {
throw new IllegalStateException("'" + source + "' is not a valid duration", ex);
}
} | @Test
public void convertWhenSimpleSecondsShouldReturnDuration() {
assertThat(convert("10s")).isEqualTo(Duration.ofSeconds(10));
assertThat(convert("10S")).isEqualTo(Duration.ofSeconds(10));
assertThat(convert("+10s")).isEqualTo(Duration.ofSeconds(10));
assertThat(convert("-10s")).isEqualTo(Duration.ofSeconds(-10));
} |
@Override
public void encode(final ChannelHandlerContext context, final DatabasePacket message, final ByteBuf out) {
MySQLPacketPayload payload = new MySQLPacketPayload(prepareMessageHeader(out).markWriterIndex(), context.channel().attr(CommonConstants.CHARSET_ATTRIBUTE_KEY).get());
try {
message.write(payload);
// CHECKSTYLE:OFF
} catch (final RuntimeException ex) {
// CHECKSTYLE:ON
out.resetWriterIndex();
new MySQLErrPacket(new UnknownSQLException(ex).toSQLException()).write(payload);
} finally {
if (out.readableBytes() - PAYLOAD_LENGTH - SEQUENCE_LENGTH < MAX_PACKET_LENGTH) {
updateMessageHeader(out, context.channel().attr(MySQLConstants.SEQUENCE_ID_ATTRIBUTE_KEY).get().getAndIncrement());
} else {
writeMultiPackets(context, out);
}
}
} | @Test
void assertEncode() {
when(byteBuf.writeInt(anyInt())).thenReturn(byteBuf);
when(byteBuf.markWriterIndex()).thenReturn(byteBuf);
when(byteBuf.readableBytes()).thenReturn(8);
MySQLPacket actualMessage = mock(MySQLPacket.class);
context.channel().attr(MySQLConstants.SEQUENCE_ID_ATTRIBUTE_KEY).get().set(1);
new MySQLPacketCodecEngine().encode(context, actualMessage, byteBuf);
verify(byteBuf).writeInt(0);
verify(byteBuf).markWriterIndex();
verify(actualMessage).write(any(MySQLPacketPayload.class));
verify(byteBuf).setMediumLE(0, 4);
verify(byteBuf).setByte(3, 1);
} |
public Optional<Result> execute( List<RowMetaAndData> rows ) throws KettleException {
if ( rows.isEmpty() || stopped ) {
return Optional.empty();
}
Trans subtrans = this.createSubtrans();
running.add( subtrans );
parentTrans.addActiveSubTransformation( subTransName, subtrans );
// Pass parameter values
passParametersToTrans( subtrans, rows.get( 0 ) );
Result result = new Result();
result.setRows( rows );
subtrans.setPreviousResult( result );
subtrans.prepareExecution( this.parentTrans.getArguments() );
List<RowMetaAndData> rowMetaAndData = new ArrayList<>();
subtrans.getSteps().stream()
.filter( c -> c.step.getStepname().equalsIgnoreCase( subStep ) )
.findFirst()
.ifPresent( c -> c.step.addRowListener( new RowAdapter() {
@Override public void rowWrittenEvent( RowMetaInterface rowMeta, Object[] row ) {
rowMetaAndData.add( new RowMetaAndData( rowMeta, row ) );
}
} ) );
subtrans.startThreads();
subtrans.waitUntilFinished();
updateStatuses( subtrans );
running.remove( subtrans );
Result subtransResult = subtrans.getResult();
subtransResult.setRows( rowMetaAndData );
releaseBufferPermits( rows.size() );
return Optional.of( subtransResult );
} | @Test
public void testRunningZeroRowsIsEmptyOptional() throws Exception {
SubtransExecutor subtransExecutor = new SubtransExecutor( "subtransname", null, null, false, null, "", 0 );
Optional<Result> execute = subtransExecutor.execute( Collections.emptyList() );
assertFalse( execute.isPresent() );
} |
public static SourceConfig validateUpdate(SourceConfig existingConfig, SourceConfig newConfig) {
SourceConfig mergedConfig = clone(existingConfig);
if (!existingConfig.getTenant().equals(newConfig.getTenant())) {
throw new IllegalArgumentException("Tenants differ");
}
if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) {
throw new IllegalArgumentException("Namespaces differ");
}
if (!existingConfig.getName().equals(newConfig.getName())) {
throw new IllegalArgumentException("Function Names differ");
}
if (!StringUtils.isEmpty(newConfig.getClassName())) {
mergedConfig.setClassName(newConfig.getClassName());
}
if (!StringUtils.isEmpty(newConfig.getTopicName())) {
mergedConfig.setTopicName(newConfig.getTopicName());
}
if (!StringUtils.isEmpty(newConfig.getSerdeClassName())) {
mergedConfig.setSerdeClassName(newConfig.getSerdeClassName());
}
if (!StringUtils.isEmpty(newConfig.getSchemaType())) {
mergedConfig.setSchemaType(newConfig.getSchemaType());
}
if (newConfig.getConfigs() != null) {
mergedConfig.setConfigs(newConfig.getConfigs());
}
if (newConfig.getSecrets() != null) {
mergedConfig.setSecrets(newConfig.getSecrets());
}
if (!StringUtils.isEmpty(newConfig.getLogTopic())) {
mergedConfig.setLogTopic(newConfig.getLogTopic());
}
if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees()
.equals(existingConfig.getProcessingGuarantees())) {
throw new IllegalArgumentException("Processing Guarantees cannot be altered");
}
if (newConfig.getParallelism() != null) {
mergedConfig.setParallelism(newConfig.getParallelism());
}
if (newConfig.getResources() != null) {
mergedConfig
.setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources()));
}
if (!StringUtils.isEmpty(newConfig.getArchive())) {
mergedConfig.setArchive(newConfig.getArchive());
}
if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) {
mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags());
}
if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) {
mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions());
}
if (isBatchSource(existingConfig) != isBatchSource(newConfig)) {
throw new IllegalArgumentException("Sources cannot be update between regular sources and batchsource");
}
if (newConfig.getBatchSourceConfig() != null) {
validateBatchSourceConfigUpdate(existingConfig.getBatchSourceConfig(), newConfig.getBatchSourceConfig());
mergedConfig.setBatchSourceConfig(newConfig.getBatchSourceConfig());
}
if (newConfig.getProducerConfig() != null) {
mergedConfig.setProducerConfig(newConfig.getProducerConfig());
}
return mergedConfig;
} | @Test
public void testMergeDifferentUserConfig() {
SourceConfig sourceConfig = createSourceConfig();
Map<String, String> myConfig = new HashMap<>();
myConfig.put("MyKey", "MyValue");
SourceConfig newSourceConfig = createUpdatedSourceConfig("configs", myConfig);
SourceConfig mergedConfig = SourceConfigUtils.validateUpdate(sourceConfig, newSourceConfig);
assertEquals(
mergedConfig.getConfigs(),
myConfig
);
mergedConfig.setConfigs(sourceConfig.getConfigs());
assertEquals(
new Gson().toJson(sourceConfig),
new Gson().toJson(mergedConfig)
);
} |
public static SnapshotRef fromJson(String json) {
Preconditions.checkArgument(
json != null && !json.isEmpty(), "Cannot parse snapshot ref from invalid JSON: %s", json);
return JsonUtil.parse(json, SnapshotRefParser::fromJson);
} | @Test
public void testFailParsingWhenMissingRequiredFields() {
String refMissingType = "{\"snapshot-id\":1}";
assertThatThrownBy(() -> SnapshotRefParser.fromJson(refMissingType))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageStartingWith("Cannot parse missing string");
String refMissingSnapshotId = "{\"type\":\"branch\"}";
assertThatThrownBy(() -> SnapshotRefParser.fromJson(refMissingSnapshotId))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageStartingWith("Cannot parse missing long");
} |
static PrimitiveIterator.OfInt filterTranslate(int arrayLength, IntPredicate filter, IntUnaryOperator translator) {
return new IndexIterator(0, arrayLength, filter, translator);
} | @Test
public void testFilterTranslate() {
assertEquals(IndexIterator.filterTranslate(20, value -> value < 5, Math::negateExact), 0, -1, -2, -3, -4);
} |
private TemporaryClassLoaderContext(Thread thread, ClassLoader originalContextClassLoader) {
this.thread = thread;
this.originalContextClassLoader = originalContextClassLoader;
} | @Test
void testTemporaryClassLoaderContext() {
final ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
final ChildFirstClassLoader temporaryClassLoader =
new ChildFirstClassLoader(
new URL[0], contextClassLoader, new String[0], NOOP_EXCEPTION_HANDLER);
try (TemporaryClassLoaderContext ignored =
TemporaryClassLoaderContext.of(temporaryClassLoader)) {
assertThat(Thread.currentThread().getContextClassLoader())
.isEqualTo(temporaryClassLoader);
}
assertThat(Thread.currentThread().getContextClassLoader()).isEqualTo(contextClassLoader);
} |
public boolean isForce() {
return args.length >= 4 && parseForceParameter(args[3]);
} | @Test
void assertGetForce() {
assertFalse(new BootstrapArguments(new String[]{"3306", "test_conf", "127.0.0.1"}).isForce());
assertFalse(new BootstrapArguments(new String[]{"3306", "test_conf", "127.0.0.1", "false"}).isForce());
assertTrue(new BootstrapArguments(new String[]{"3306", "test_conf", "127.0.0.1", "true "}).isForce());
assertTrue(new BootstrapArguments(new String[]{"3306", "test_conf", "127.0.0.1", "true"}).isForce());
assertTrue(new BootstrapArguments(new String[]{"3306", "test_conf", "127.0.0.1", "TrUe"}).isForce());
} |
@Override
public AdminUserDO getUser(Long id) {
return userMapper.selectById(id);
} | @Test
public void testGetUser() {
// mock 数据
AdminUserDO dbUser = randomAdminUserDO();
userMapper.insert(dbUser);
// 准备参数
Long userId = dbUser.getId();
// 调用
AdminUserDO user = userService.getUser(userId);
// 断言
assertPojoEquals(dbUser, user);
} |
@Override
public void importData(JsonReader reader) throws IOException {
logger.info("Reading configuration for 1.1");
// this *HAS* to start as an object
reader.beginObject();
while (reader.hasNext()) {
JsonToken tok = reader.peek();
switch (tok) {
case NAME:
String name = reader.nextName();
// find out which member it is
if (name.equals(CLIENTS)) {
readClients(reader);
} else if (name.equals(GRANTS)) {
readGrants(reader);
} else if (name.equals(WHITELISTEDSITES)) {
readWhitelistedSites(reader);
} else if (name.equals(BLACKLISTEDSITES)) {
readBlacklistedSites(reader);
} else if (name.equals(AUTHENTICATIONHOLDERS)) {
readAuthenticationHolders(reader);
} else if (name.equals(ACCESSTOKENS)) {
readAccessTokens(reader);
} else if (name.equals(REFRESHTOKENS)) {
readRefreshTokens(reader);
} else if (name.equals(SYSTEMSCOPES)) {
readSystemScopes(reader);
} else {
for (MITREidDataServiceExtension extension : extensions) {
if (extension.supportsVersion(THIS_VERSION)) {
if (extension.supportsVersion(THIS_VERSION)) {
extension.importExtensionData(name, reader);
break;
}
}
}
// unknown token, skip it
reader.skipValue();
}
break;
case END_OBJECT:
// the object ended, we're done here
reader.endObject();
continue;
default:
logger.debug("Found unexpected entry");
reader.skipValue();
continue;
}
}
fixObjectReferences();
for (MITREidDataServiceExtension extension : extensions) {
if (extension.supportsVersion(THIS_VERSION)) {
extension.fixExtensionObjectReferences(maps);
break;
}
}
maps.clearAll();
} | @Test
public void testImportWhitelistedSites() throws IOException {
WhitelistedSite site1 = new WhitelistedSite();
site1.setId(1L);
site1.setClientId("foo");
WhitelistedSite site2 = new WhitelistedSite();
site2.setId(2L);
site2.setClientId("bar");
WhitelistedSite site3 = new WhitelistedSite();
site3.setId(3L);
site3.setClientId("baz");
//site3.setAllowedScopes(null);
String configJson = "{" +
"\"" + MITREidDataService.CLIENTS + "\": [], " +
"\"" + MITREidDataService.ACCESSTOKENS + "\": [], " +
"\"" + MITREidDataService.REFRESHTOKENS + "\": [], " +
"\"" + MITREidDataService.GRANTS + "\": [], " +
"\"" + MITREidDataService.BLACKLISTEDSITES + "\": [], " +
"\"" + MITREidDataService.SYSTEMSCOPES + "\": [], " +
"\"" + MITREidDataService.AUTHENTICATIONHOLDERS + "\": [], " +
"\"" + MITREidDataService.WHITELISTEDSITES + "\": [" +
"{\"id\":1,\"clientId\":\"foo\"}," +
"{\"id\":2,\"clientId\":\"bar\"}," +
"{\"id\":3,\"clientId\":\"baz\"}" +
" ]" +
"}";
System.err.println(configJson);
JsonReader reader = new JsonReader(new StringReader(configJson));
final Map<Long, WhitelistedSite> fakeDb = new HashMap<>();
when(wlSiteRepository.save(isA(WhitelistedSite.class))).thenAnswer(new Answer<WhitelistedSite>() {
Long id = 333L;
@Override
public WhitelistedSite answer(InvocationOnMock invocation) throws Throwable {
WhitelistedSite _site = (WhitelistedSite) invocation.getArguments()[0];
if(_site.getId() == null) {
_site.setId(id++);
}
fakeDb.put(_site.getId(), _site);
return _site;
}
});
when(wlSiteRepository.getById(anyLong())).thenAnswer(new Answer<WhitelistedSite>() {
@Override
public WhitelistedSite answer(InvocationOnMock invocation) throws Throwable {
Long _id = (Long) invocation.getArguments()[0];
return fakeDb.get(_id);
}
});
dataService.importData(reader);
verify(wlSiteRepository, times(3)).save(capturedWhitelistedSites.capture());
List<WhitelistedSite> savedSites = capturedWhitelistedSites.getAllValues();
assertThat(savedSites.size(), is(3));
assertThat(savedSites.get(0).getClientId(), equalTo(site1.getClientId()));
assertThat(savedSites.get(1).getClientId(), equalTo(site2.getClientId()));
assertThat(savedSites.get(2).getClientId(), equalTo(site3.getClientId()));
} |
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) {
return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature);
} | @Test
public void testMultipleSchemaParameters() {
DoFnSignature sig =
DoFnSignatures.getSignature(
new DoFn<String, String>() {
@ProcessElement
public void process(
@Element Row row1,
@Timestamp Instant ts,
@Element Row row2,
OutputReceiver<String> o,
@Element Integer intParameter) {}
}.getClass());
assertEquals(3, sig.processElement().getSchemaElementParameters().size());
assertEquals(0, sig.processElement().getSchemaElementParameters().get(0).index());
assertEquals(
TypeDescriptors.rows(),
sig.processElement().getSchemaElementParameters().get(0).elementT());
assertEquals(1, sig.processElement().getSchemaElementParameters().get(1).index());
assertEquals(
TypeDescriptors.rows(),
sig.processElement().getSchemaElementParameters().get(1).elementT());
assertEquals(2, sig.processElement().getSchemaElementParameters().get(2).index());
assertEquals(
TypeDescriptors.integers(),
sig.processElement().getSchemaElementParameters().get(2).elementT());
} |
public static boolean isIPv6MixedAddress(final String input) {
int splitIndex = input.lastIndexOf(':');
if (splitIndex == -1) {
return false;
}
//the last part is a ipv4 address
boolean ipv4PartValid = isIPv4Address(input.substring(splitIndex + 1));
String ipV6Part = input.substring(ZERO, splitIndex + 1);
if (DOUBLE_COLON.equals(ipV6Part)) {
return ipv4PartValid;
}
boolean ipV6UncompressedDetected = IPV6_MIXED_UNCOMPRESSED_REGEX.matcher(ipV6Part).matches();
boolean ipV6CompressedDetected = IPV6_MIXED_COMPRESSED_REGEX.matcher(ipV6Part).matches();
return ipv4PartValid && (ipV6UncompressedDetected || ipV6CompressedDetected);
} | @Test
void isIPv6MixedAddress() {
assertTrue(InetAddressValidator.isIPv6MixedAddress("1:0:0:0:0:0:172.12.55.18"));
assertTrue(InetAddressValidator.isIPv6MixedAddress("::172.12.55.18"));
assertFalse(InetAddressValidator.isIPv6MixedAddress("2001:DB8::8:800:200C141aA"));
} |
@Override
@CacheEvict(cacheNames = RedisKeyConstants.SMS_TEMPLATE,
allEntries = true) // allEntries 清空所有缓存,因为可能修改到 code 字段,不好清理
public void updateSmsTemplate(SmsTemplateSaveReqVO updateReqVO) {
// 校验存在
validateSmsTemplateExists(updateReqVO.getId());
// 校验短信渠道
SmsChannelDO channelDO = validateSmsChannel(updateReqVO.getChannelId());
// 校验短信编码是否重复
validateSmsTemplateCodeDuplicate(updateReqVO.getId(), updateReqVO.getCode());
// 校验短信模板
validateApiTemplate(updateReqVO.getChannelId(), updateReqVO.getApiTemplateId());
// 更新
SmsTemplateDO updateObj = BeanUtils.toBean(updateReqVO, SmsTemplateDO.class);
updateObj.setParams(parseTemplateContentParams(updateObj.getContent()));
updateObj.setChannelCode(channelDO.getCode());
smsTemplateMapper.updateById(updateObj);
} | @Test
public void testUpdateSmsTemplate_notExists() {
// 准备参数
SmsTemplateSaveReqVO reqVO = randomPojo(SmsTemplateSaveReqVO.class);
// 调用, 并断言异常
assertServiceException(() -> smsTemplateService.updateSmsTemplate(reqVO), SMS_TEMPLATE_NOT_EXISTS);
} |
static void printEntries(PrintWriter writer,
String intro,
OptionalInt screenWidth,
List<String> entries) {
if (entries.isEmpty()) {
return;
}
if (!intro.isEmpty()) {
writer.println(intro);
}
ColumnSchema columnSchema = calculateColumnSchema(screenWidth, entries);
int numColumns = columnSchema.numColumns();
int numLines = (entries.size() + numColumns - 1) / numColumns;
for (int line = 0; line < numLines; line++) {
StringBuilder output = new StringBuilder();
for (int column = 0; column < numColumns; column++) {
int entryIndex = line + (column * columnSchema.entriesPerColumn());
if (entryIndex < entries.size()) {
String entry = entries.get(entryIndex);
output.append(entry);
if (column < numColumns - 1) {
int width = columnSchema.columnWidth(column);
for (int i = 0; i < width - entry.length(); i++) {
output.append(" ");
}
}
}
}
writer.println(output);
}
} | @Test
public void testPrintEntries() throws Exception {
try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) {
try (PrintWriter writer = new PrintWriter(new OutputStreamWriter(
stream, StandardCharsets.UTF_8))) {
LsCommandHandler.printEntries(writer, "", OptionalInt.of(18),
Arrays.asList("alphabet", "beta", "gamma", "theta", "zeta"));
}
assertEquals(String.join(String.format("%n"), Arrays.asList(
"alphabet theta",
"beta zeta",
"gamma")), stream.toString().trim());
}
} |
public void process()
throws Exception {
if (_segmentMetadata.getTotalDocs() == 0) {
LOGGER.info("Skip preprocessing empty segment: {}", _segmentMetadata.getName());
return;
}
// Segment processing has to be done with a local directory.
File indexDir = new File(_indexDirURI);
// This fixes the issue of temporary files not getting deleted after creating new inverted indexes.
removeInvertedIndexTempFiles(indexDir);
try (SegmentDirectory.Writer segmentWriter = _segmentDirectory.createWriter()) {
// Update default columns according to the schema.
if (_schema != null) {
DefaultColumnHandler defaultColumnHandler = DefaultColumnHandlerFactory
.getDefaultColumnHandler(indexDir, _segmentMetadata, _indexLoadingConfig, _schema, segmentWriter);
defaultColumnHandler.updateDefaultColumns();
_segmentMetadata = new SegmentMetadataImpl(indexDir);
_segmentDirectory.reloadMetadata();
} else {
LOGGER.warn("Skip creating default columns for segment: {} without schema", _segmentMetadata.getName());
}
// Update single-column indices, like inverted index, json index etc.
List<IndexHandler> indexHandlers = new ArrayList<>();
// We cannot just create all the index handlers in a random order.
// Specifically, ForwardIndexHandler needs to be executed first. This is because it modifies the segment metadata
// while rewriting forward index to create a dictionary. Some other handlers (like the range one) assume that
// metadata was already been modified by ForwardIndexHandler.
IndexHandler forwardHandler = createHandler(StandardIndexes.forward());
indexHandlers.add(forwardHandler);
forwardHandler.updateIndices(segmentWriter);
// Now that ForwardIndexHandler.updateIndices has been updated, we can run all other indexes in any order
_segmentMetadata = new SegmentMetadataImpl(indexDir);
_segmentDirectory.reloadMetadata();
for (IndexType<?, ?, ?> type : IndexService.getInstance().getAllIndexes()) {
if (type != StandardIndexes.forward()) {
IndexHandler handler = createHandler(type);
indexHandlers.add(handler);
handler.updateIndices(segmentWriter);
// Other IndexHandler classes may modify the segment metadata while creating a temporary forward
// index to generate their respective indexes from if the forward index was disabled. This new metadata is
// needed to construct other indexes like RangeIndex.
_segmentMetadata = _segmentDirectory.getSegmentMetadata();
}
}
// Perform post-cleanup operations on the index handlers.
for (IndexHandler handler : indexHandlers) {
handler.postUpdateIndicesCleanup(segmentWriter);
}
// Add min/max value to column metadata according to the prune mode.
ColumnMinMaxValueGeneratorMode columnMinMaxValueGeneratorMode =
_indexLoadingConfig.getColumnMinMaxValueGeneratorMode();
if (columnMinMaxValueGeneratorMode != ColumnMinMaxValueGeneratorMode.NONE) {
ColumnMinMaxValueGenerator columnMinMaxValueGenerator =
new ColumnMinMaxValueGenerator(_segmentMetadata, segmentWriter, columnMinMaxValueGeneratorMode);
columnMinMaxValueGenerator.addColumnMinMaxValue();
// NOTE: This step may modify the segment metadata. When adding new steps after this, un-comment the next line.
// _segmentMetadata = new SegmentMetadataImpl(indexDir);
}
segmentWriter.save();
}
// Startree creation will load the segment again, so we need to close and re-open the segment writer to make sure
// that the other required indices (e.g. forward index) are up-to-date.
try (SegmentDirectory.Writer segmentWriter = _segmentDirectory.createWriter()) {
// Create/modify/remove star-trees if required.
processStarTrees(indexDir);
_segmentDirectory.reloadMetadata();
segmentWriter.save();
}
} | @Test
public void testV3CleanupH3AndTextIndices()
throws Exception {
constructV3Segment();
SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(_indexDir);
assertEquals(segmentMetadata.getVersion(), SegmentVersion.v3);
// V3 use single file for all column indices.
File segmentDirectoryPath = SegmentDirectoryPaths.segmentDirectoryFor(_indexDir, SegmentVersion.v3);
File singleFileIndex = new File(segmentDirectoryPath, "columns.psf");
// There are a few indices initially. Remove them to prepare an initial state.
// Also use the schema with columns for H3 and Json index to add those columns.
try (SegmentDirectory segmentDirectory = SegmentDirectoryLoaderRegistry.getDefaultSegmentDirectoryLoader()
.load(_indexDir.toURI(),
new SegmentDirectoryLoaderContext.Builder().setSegmentDirectoryConfigs(_configuration).build());
SegmentPreProcessor processor = new SegmentPreProcessor(segmentDirectory, getDefaultIndexLoadingConfig(),
_newColumnsSchemaWithH3Json)) {
processor.process();
}
segmentMetadata = new SegmentMetadataImpl(_indexDir);
assertNotNull(segmentMetadata.getColumnMetadataFor("newH3Col"));
assertNotNull(segmentMetadata.getColumnMetadataFor("newJsonCol"));
long initFileSize = singleFileIndex.length();
IndexLoadingConfig indexLoadingConfig = getDefaultIndexLoadingConfig();
indexLoadingConfig.setH3IndexConfigs(
ImmutableMap.of("newH3Col", new H3IndexConfig(ImmutableMap.of("resolutions", "5"))));
indexLoadingConfig.setJsonIndexColumns(new HashSet<>(Collections.singletonList("newJsonCol")));
// Create H3 and Json indices.
try (SegmentDirectory segmentDirectory = SegmentDirectoryLoaderRegistry.getDefaultSegmentDirectoryLoader()
.load(_indexDir.toURI(),
new SegmentDirectoryLoaderContext.Builder().setSegmentDirectoryConfigs(_configuration).build());
SegmentPreProcessor processor = new SegmentPreProcessor(segmentDirectory, indexLoadingConfig, null)) {
processor.process();
}
long addedLength = 0;
try (SegmentDirectory segmentDirectory = SegmentDirectoryLoaderRegistry.getDefaultSegmentDirectoryLoader()
.load(_indexDir.toURI(),
new SegmentDirectoryLoaderContext.Builder().setSegmentDirectoryConfigs(_configuration).build());
SegmentDirectory.Reader reader = segmentDirectory.createReader()) {
addedLength += reader.getIndexFor("newH3Col", StandardIndexes.h3()).size() + 8;
addedLength += reader.getIndexFor("newJsonCol", StandardIndexes.json()).size() + 8;
}
assertEquals(singleFileIndex.length(), initFileSize + addedLength);
// Remove H3 and Json indices, and size gets back to initial.
try (SegmentDirectory segmentDirectory = SegmentDirectoryLoaderRegistry.getDefaultSegmentDirectoryLoader()
.load(_indexDir.toURI(),
new SegmentDirectoryLoaderContext.Builder().setSegmentDirectoryConfigs(_configuration).build());
SegmentPreProcessor processor = new SegmentPreProcessor(segmentDirectory, getDefaultIndexLoadingConfig(),
null)) {
processor.process();
}
assertEquals(singleFileIndex.length(), initFileSize);
} |
static Map<Integer, COSObjectable> getNumberTreeAsMap(PDNumberTreeNode tree)
throws IOException
{
if (tree == null)
{
return new LinkedHashMap<>();
}
Map<Integer, COSObjectable> numbers = tree.getNumbers();
if (numbers == null)
{
numbers = new LinkedHashMap<>();
}
else
{
// must copy because the map is read only
numbers = new LinkedHashMap<>(numbers);
}
List<PDNumberTreeNode> kids = tree.getKids();
if (kids != null)
{
for (PDNumberTreeNode kid : kids)
{
numbers.putAll(getNumberTreeAsMap(kid));
}
}
return numbers;
} | @Test
void testParentTree() throws IOException
{
try (PDDocument doc = Loader
.loadPDF(new File(TARGETPDFDIR, "PDFBOX-3999-GeneralForbearance.pdf")))
{
PDStructureTreeRoot structureTreeRoot = doc.getDocumentCatalog().getStructureTreeRoot();
PDNumberTreeNode parentTree = structureTreeRoot.getParentTree();
parentTree.getValue(0);
Map<Integer, COSObjectable> numberTreeAsMap = PDFMergerUtility.getNumberTreeAsMap(parentTree);
assertEquals(31, numberTreeAsMap.size());
assertEquals(31, Collections.max(numberTreeAsMap.keySet()) + 1);
assertEquals(0, (int) Collections.min(numberTreeAsMap.keySet()));
assertEquals(31, structureTreeRoot.getParentTreeNextKey());
}
} |
public List<Flow> convertFlows(String componentName, @Nullable DbIssues.Locations issueLocations) {
if (issueLocations == null) {
return Collections.emptyList();
}
return issueLocations.getFlowList().stream()
.map(sourceFlow -> toFlow(componentName, sourceFlow))
.collect(Collectors.toCollection(LinkedList::new));
} | @Test
public void convertFlows_withNullDbLocations_returnsEmptyList() {
assertThat(flowGenerator.convertFlows(COMPONENT_NAME, null)).isEmpty();
} |
BackgroundJobRunner getBackgroundJobRunner(Job job) {
assertJobExists(job.getJobDetails());
return backgroundJobRunners.stream()
.filter(jobRunner -> jobRunner.supports(job))
.findFirst()
.orElseThrow(() -> problematicConfigurationException("Could not find a BackgroundJobRunner: either no JobActivator is registered, your Background Job Class is not registered within the IoC container or your Job does not have a default no-arg constructor."));
} | @Test
void getBackgroundJobRunnerForJobThatCannotBeRun() {
final Job job = anEnqueuedJob()
.<TestServiceThatCannotBeRun>withJobDetails(ts -> ts.doWork())
.build();
assertThatThrownBy(() -> backgroundJobServer.getBackgroundJobRunner(job))
.isInstanceOf(JobRunrException.class);
} |
public static void validatePath(String path) throws InvalidPathException {
boolean invalid = (path == null || path.isEmpty());
if (!OSUtils.isWindows()) {
invalid = (invalid || !path.startsWith(AlluxioURI.SEPARATOR));
}
if (invalid) {
throw new InvalidPathException(ExceptionMessage.PATH_INVALID.getMessage(path));
}
} | @Test
public void validatePath() throws InvalidPathException {
// check valid paths
PathUtils.validatePath("/foo/bar");
PathUtils.validatePath("/foo/bar/");
PathUtils.validatePath("/foo/./bar/");
PathUtils.validatePath("/foo/././bar/");
PathUtils.validatePath("/foo/../bar");
PathUtils.validatePath("/foo/../bar/");
// check invalid paths
ArrayList<String> invalidPaths = new ArrayList<>();
invalidPaths.add(null);
invalidPaths.add("");
invalidPaths.add("not a path");
for (String invalidPath : invalidPaths) {
try {
PathUtils.validatePath(invalidPath);
fail("validatePath(" + invalidPath + ") did not fail");
} catch (InvalidPathException e) {
// this is expected
}
}
} |
@Override
protected void runTask() {
LOGGER.trace("Looking for scheduled jobs... ");
Instant scheduledBefore = now().plus(backgroundJobServerConfiguration().getPollInterval());
processManyJobs(previousResults -> getJobsToSchedule(scheduledBefore, previousResults),
Job::enqueue,
totalAmountOfEnqueuedJobs -> LOGGER.debug("Found {} scheduled jobs to enqueue.", totalAmountOfEnqueuedJobs));
} | @Test
void testTask() {
final Job scheduledJob = aScheduledJob().build();
when(storageProvider.getScheduledJobs(any(), any(AmountRequest.class))).thenReturn(singletonList(scheduledJob), emptyJobList());
runTask(task);
verify(storageProvider).save(jobsToSaveArgumentCaptor.capture());
assertThat(jobsToSaveArgumentCaptor.getValue().get(0)).hasStates(SCHEDULED, ENQUEUED);
} |
@Udf
public <T> boolean contains(
@UdfParameter final List<T> array,
@UdfParameter final T val
) {
return array != null && array.contains(val);
} | @Test
public void shouldNotFindValuesInNullListElements() {
assertTrue(udf.contains(Collections.singletonList(null), null));
assertFalse(udf.contains(Collections.singletonList(null), "null"));
assertFalse(udf.contains(Collections.singletonList(null), true));
assertFalse(udf.contains(Collections.singletonList(null), false));
assertFalse(udf.contains(Collections.singletonList(null), 1.0));
assertFalse(udf.contains(Collections.singletonList(null), 100));
assertFalse(udf.contains(Collections.singletonList(null), "abc"));
assertFalse(udf.contains(Collections.singletonList(null), ""));
assertFalse(udf.contains(null, "null"));
} |
public final TraceContext decorate(TraceContext context) {
long traceId = context.traceId(), spanId = context.spanId();
E claimed = null;
int existingIndex = -1, extraLength = context.extra().size();
for (int i = 0; i < extraLength; i++) {
Object next = context.extra().get(i);
if (next instanceof Extra) {
Extra nextExtra = (Extra) next;
// Don't interfere with other instances or subtypes
if (nextExtra.factory != this) continue;
if (claimed == null && nextExtra.tryToClaim(traceId, spanId)) {
claimed = (E) nextExtra;
continue;
}
if (existingIndex == -1) {
existingIndex = i;
} else {
Platform.get().log("BUG: something added redundant extra instances %s", context, null);
return context;
}
}
}
// Easiest when there is neither existing state to assign, nor need to change context.extra()
if (claimed != null && existingIndex == -1) {
return context;
}
// If context.extra() didn't have an unclaimed extra instance, create one for this context.
if (claimed == null) {
claimed = create();
if (claimed == null) {
Platform.get().log("BUG: create() returned null", null);
return context;
}
claimed.tryToClaim(traceId, spanId);
}
TraceContext.Builder builder = context.toBuilder().clearExtra().addExtra(claimed);
for (int i = 0; i < extraLength; i++) {
Object next = context.extra().get(i);
if (i == existingIndex) {
E existing = (E) next;
// If the claimed extra instance was new or had no changes, simply assign existing to it
if (claimed.state == initialState) {
claimed.state = existing.state;
} else if (existing.state != initialState) {
claimed.mergeStateKeepingOursOnConflict(existing);
}
} else if (!next.equals(claimed)) {
builder.addExtra(next);
}
}
return builder.build();
} | @Test void decorate_forksWhenFieldsAlreadyClaimed() {
TraceContext other = TraceContext.newBuilder().traceId(98L).spanId(99L).build();
BasicMapExtra claimed = factory.decorate(other).findExtra(BasicMapExtra.class);
List<TraceContext> contexts = asList(
context.toBuilder().addExtra(claimed).build(),
context.toBuilder().addExtra(1L).addExtra(claimed).build(),
context.toBuilder().addExtra(claimed).addExtra(1L).build(),
context.toBuilder().addExtra(1L).addExtra(claimed).addExtra(2L).build()
);
for (TraceContext context : contexts) {
TraceContext ensured = factory.decorate(context);
assertThat(ensured).isNotSameAs(context);
assertThat(ensured.extra())
.isNotSameAs(context.extra())
.hasSize(context.extra().size());
assertExtraClaimed(ensured);
}
} |
@Override
public byte[] decrypt(byte[] bytes, KeyType keyType) {
// 在非使用BC库情况下,blockSize使用默认的算法
if (this.decryptBlockSize < 0 && null == GlobalBouncyCastleProvider.INSTANCE.getProvider()) {
// 加密数据长度 <= 模长-11
this.decryptBlockSize = ((RSAKey) getKeyByType(keyType)).getModulus().bitLength() / 8;
}
return super.decrypt(bytes, keyType);
} | @Test
public void rsaDecodeTest() {
final String PRIVATE_KEY = "MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAIL7pbQ+5KKGYRhw7jE31hmA" //
+ "f8Q60ybd+xZuRmuO5kOFBRqXGxKTQ9TfQI+aMW+0lw/kibKzaD/EKV91107xE384qOy6IcuBfaR5lv39OcoqNZ"//
+ "5l+Dah5ABGnVkBP9fKOFhPgghBknTRo0/rZFGI6Q1UHXb+4atP++LNFlDymJcPAgMBAAECgYBammGb1alndta" //
+ "xBmTtLLdveoBmp14p04D8mhkiC33iFKBcLUvvxGg2Vpuc+cbagyu/NZG+R/WDrlgEDUp6861M5BeFN0L9O4hz"//
+ "GAEn8xyTE96f8sh4VlRmBOvVdwZqRO+ilkOM96+KL88A9RKdp8V2tna7TM6oI3LHDyf/JBoXaQJBAMcVN7fKlYP" //
+ "Skzfh/yZzW2fmC0ZNg/qaW8Oa/wfDxlWjgnS0p/EKWZ8BxjR/d199L3i/KMaGdfpaWbYZLvYENqUCQQCobjsuCW"//
+ "nlZhcWajjzpsSuy8/bICVEpUax1fUZ58Mq69CQXfaZemD9Ar4omzuEAAs2/uee3kt3AvCBaeq05NyjAkBme8SwB0iK"//
+ "kLcaeGuJlq7CQIkjSrobIqUEf+CzVZPe+AorG+isS+Cw2w/2bHu+G0p5xSYvdH59P0+ZT0N+f9LFAkA6v3Ae56OrI"//
+ "wfMhrJksfeKbIaMjNLS9b8JynIaXg9iCiyOHmgkMl5gAbPoH/ULXqSKwzBw5mJ2GW1gBlyaSfV3AkA/RJC+adIjsRGg"//
+ "JOkiRjSmPpGv3FOhl9fsBPjupZBEIuoMWOC8GXK/73DHxwmfNmN7C9+sIi4RBcjEeQ5F5FHZ";
final RSA rsa = new RSA(PRIVATE_KEY, null);
final String a = "2707F9FD4288CEF302C972058712F24A5F3EC62C5A14AD2FC59DAB93503AA0FA17113A020EE4EA35EB53F" //
+ "75F36564BA1DABAA20F3B90FD39315C30E68FE8A1803B36C29029B23EB612C06ACF3A34BE815074F5EB5AA3A"//
+ "C0C8832EC42DA725B4E1C38EF4EA1B85904F8B10B2D62EA782B813229F9090E6F7394E42E6F44494BB8";
final byte[] aByte = HexUtil.decodeHex(a);
final byte[] decrypt = rsa.decrypt(aByte, KeyType.PrivateKey);
assertEquals("虎头闯杭州,多抬头看天,切勿只管种地", StrUtil.str(decrypt, CharsetUtil.CHARSET_UTF_8));
} |
public static Map<String, Integer> retrieveTags(Note note) {
HashMap<String, Integer> tagsMap = new HashMap<>();
String[] words = (note.getTitle() + " " + note.getContent()).replaceAll("\n", " ").trim()
.split(" ");
for (String word : words) {
String parsedHashtag = UrlCompleter.parseHashtag(word);
if (StringUtils.isNotEmpty(parsedHashtag)) {
int count = tagsMap.get(parsedHashtag) == null ? 0 : tagsMap.get(parsedHashtag);
tagsMap.put(parsedHashtag, ++count);
}
}
return tagsMap;
} | @Test
public void retrievesTagsFromNote() {
Map<String, Integer> tags = TagsHelper.retrieveTags(note);
assertEquals(4, tags.size());
assertTrue(tags.containsKey(TAG1.getText()) && tags.containsKey(TAG2.getText())
&& tags.containsKey(TAG3.getText()) && tags.containsKey(TAG4.getText()));
assertFalse(tags.containsKey(TAG_INVALID.getText()));
} |
@Override
public boolean retryRequest(IOException exception, int executionCount, HttpContext ctx) {
log.fine(() -> String.format("retryRequest(exception='%s', executionCount='%d', ctx='%s'",
exception.getClass().getName(), executionCount, ctx));
HttpClientContext clientCtx = HttpClientContext.adapt(ctx);
if (!predicate.test(exception, clientCtx)) {
log.fine(() -> String.format("Not retrying for '%s'", ctx));
return false;
}
if (executionCount > maxRetries) {
log.fine(() -> String.format("Max retries exceeded for '%s'", ctx));
retryFailedConsumer.onRetryFailed(exception, executionCount, clientCtx);
return false;
}
Duration delay = delaySupplier.getDelay(executionCount);
log.fine(() -> String.format("Retrying after %s for '%s'", delay, ctx));
retryConsumer.onRetry(exception, delay, executionCount, clientCtx);
sleeper.sleep(delay);
return true;
} | @Test
void retries_for_listed_exceptions_until_max_retries_exceeded() {
int maxRetries = 2;
DelayedConnectionLevelRetryHandler handler = DelayedConnectionLevelRetryHandler.Builder
.withFixedDelay(Duration.ofSeconds(2), maxRetries)
.retryForExceptions(List.of(SSLException.class, ConnectException.class))
.withSleeper(mock(Sleeper.class))
.build();
SSLException sslException = new SSLException("ssl error");
HttpClientContext ctx = new HttpClientContext();
int lastExecutionCount = maxRetries + 1;
for (int i = 1; i < lastExecutionCount; i++) {
assertTrue(handler.retryRequest(sslException, i, ctx));
}
assertFalse(handler.retryRequest(sslException, lastExecutionCount, ctx));
} |
public ColumnName getName() {
return name;
} | @Test
public void shouldReturnName() {
// Given:
final TableElement element =
new TableElement(NAME, new Type(SqlTypes.STRING), NO_COLUMN_CONSTRAINTS);
// Then:
assertThat(element.getName(), is(NAME));
} |
public static String getCheckJobIdsRootPath(final String jobId) {
return String.join("/", getJobRootPath(jobId), "check", "job_ids");
} | @Test
void assertGetCheckJobIdsPath() {
assertThat(PipelineMetaDataNode.getCheckJobIdsRootPath(jobId), is(jobCheckRootPath + "/job_ids"));
} |
public static MemberLookup createLookUp(ServerMemberManager memberManager) throws NacosException {
if (!EnvUtil.getStandaloneMode()) {
String lookupType = EnvUtil.getProperty(LOOKUP_MODE_TYPE);
LookupType type = chooseLookup(lookupType);
LOOK_UP = find(type);
currentLookupType = type;
} else {
LOOK_UP = new StandaloneMemberLookup();
}
LOOK_UP.injectMemberManager(memberManager);
Loggers.CLUSTER.info("Current addressing mode selection : {}", LOOK_UP.getClass().getSimpleName());
return LOOK_UP;
} | @Test
void createLookUpStandaloneMemberLookup() throws NacosException {
EnvUtil.setIsStandalone(true);
memberLookup = LookupFactory.createLookUp(memberManager);
assertEquals(StandaloneMemberLookup.class, memberLookup.getClass());
} |
@Override
public HttpResponseOutputStream<Node> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final DelayedHttpEntityCallable<Node> command = new DelayedHttpEntityCallable<Node>(file) {
@Override
public Node call(final HttpEntity entity) throws BackgroundException {
try {
final HttpEntityEnclosingRequestBase request;
if(status.isExists()) {
request = new HttpPut(String.format("%s/api/v1/nodes/%s/revisions", session.getClient().getBasePath(), fileid.getFileId(file)));
}
else {
request = new HttpPost(String.format("%s/api/v1/deepBoxes/%s/boxes/%s/files/%s",
session.getClient().getBasePath(),
fileid.getDeepBoxNodeId(file),
fileid.getBoxNodeId(file),
fileid.getFileId(file.getParent())));
}
final Checksum checksum = status.getChecksum();
if(Checksum.NONE != checksum) {
switch(checksum.algorithm) {
case sha1:
request.addHeader(HttpHeaders.CONTENT_MD5, checksum.hash);
}
}
final MultipartEntityBuilder multipart = MultipartEntityBuilder.create();
multipart.setMode(HttpMultipartMode.BROWSER_COMPATIBLE);
multipart.setCharset(StandardCharsets.UTF_8);
final ByteArrayOutputStream out = new ByteArrayOutputStream();
entity.writeTo(out);
if(status.isExists()) {
multipart.addBinaryBody("file", out.toByteArray(),
null == status.getMime() ? ContentType.APPLICATION_OCTET_STREAM : ContentType.create(status.getMime()), file.getName());
request.setEntity(multipart.build());
return session.getClient().getClient().execute(request, new AbstractResponseHandler<Node>() {
@Override
public Node handleEntity(final HttpEntity entity) throws IOException {
final ObjectReader reader = new JSON().getContext(null).reader(Node.class);
return reader.readValue(entity.getContent());
}
});
}
else {
multipart.addBinaryBody("files", out.toByteArray(),
null == status.getMime() ? ContentType.APPLICATION_OCTET_STREAM : ContentType.create(status.getMime()), file.getName());
request.setEntity(multipart.build());
return session.getClient().getClient().execute(request, new AbstractResponseHandler<Node>() {
@Override
public Node handleEntity(final HttpEntity entity) throws IOException {
final ObjectReader reader = new JSON().getContext(null).readerForArrayOf(Node.class);
final Node[] node = reader.readValue(entity.getContent());
return node[0];
}
});
}
}
catch(HttpResponseException e) {
throw new DefaultHttpResponseExceptionMappingService().map("Upload {0} failed", e, file);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file);
}
}
@Override
public long getContentLength() {
return -1L;
}
};
return this.write(file, status, command);
} | @Test
public void testReadWrite() throws Exception {
final DeepboxIdProvider nodeid = new DeepboxIdProvider(session);
final Path documents = new Path("/ORG 4 - DeepBox Desktop App/ORG3:Box1/Documents/", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path room = new DeepboxDirectoryFeature(session, nodeid).mkdir(
new Path(documents,
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final long folderTimestamp = new DeepboxAttributesFinderFeature(session, nodeid).find(room).getModificationDate();
final byte[] content = RandomUtils.nextBytes(32769);
final Path test = new Path(room, String.format("%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.file));
{
final TransferStatus status = new TransferStatus();
status.setLength(content.length);
final DeepboxWriteFeature writer = new DeepboxWriteFeature(session, nodeid);
final HttpResponseOutputStream<Node> out = writer.write(test, status, new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
}
assertTrue(new DefaultFindFeature(session).find(test));
assertEquals(folderTimestamp, new DeepboxAttributesFinderFeature(session, nodeid).find(room).getModificationDate());
PathAttributes attributes = new DeepboxAttributesFinderFeature(session, nodeid).find(test);
final String versionId = attributes.getVersionId();
assertNull(versionId);
final String nodeId = attributes.getFileId();
assertNotNull(nodeId);
assertEquals(new DeepboxAttributesFinderFeature(session, nodeid).find(test), attributes);
final byte[] compare = new byte[content.length];
final InputStream stream = new DeepboxReadFeature(session, nodeid).read(test, new TransferStatus().withLength(content.length), new DisabledConnectionCallback());
IOUtils.readFully(stream, compare);
stream.close();
assertArrayEquals(content, compare);
// Overwrite
{
final byte[] change = RandomUtils.nextBytes(256);
final TransferStatus status = new TransferStatus();
status.setLength(change.length);
final DeepboxWriteFeature writer = new DeepboxWriteFeature(session, nodeid);
final HttpResponseOutputStream<Node> out = writer.write(test, status.exists(true), new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(status, status).transfer(new ByteArrayInputStream(change), out);
assertEquals(nodeId, new DeepboxAttributesFinderFeature(session, nodeid).find(test).getFileId());
}
test.attributes().setCustom(Collections.emptyMap());
attributes = new DeepboxAttributesFinderFeature(session, nodeid).find(test);
assertNotNull(attributes.getFileId());
assertEquals(nodeId, new DeepboxIdProvider(session).getFileId(test));
new DeepboxDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
public void addEntity(Entity entity) {
entities.add(entity);
} | @Test
void testAddEntity() {
var entity = new Skeleton(1);
world.addEntity(entity);
assertEquals(entity, world.entities.get(0));
} |
protected FixedTemplate(Object object, FixedDataSchema schema) throws TemplateOutputCastException
{
Class<?> objectClass = object.getClass();
if (objectClass == String.class)
{
String data = (String) object;
if (data.length() != schema.getSize())
{
throw new TemplateOutputCastException("Fixed size is " + schema.getSize() + ", string length is " + data.length());
}
_schema = schema;
_data = ByteString.copyAvroString(data, true);
if (_data == null)
{
throw new TemplateOutputCastException("String is not a valid representation of bytes");
}
}
else if (objectClass == ByteString.class)
{
ByteString bytes = (ByteString) object;
if (bytes.length() != schema.getSize())
{
throw new TemplateOutputCastException("Fixed size is " + schema.getSize() + ", ByteString length is " + bytes.length());
}
_schema = schema;
_data = bytes;
}
else
{
throw new TemplateOutputCastException("Fixed input " + object + " is not a string or ByteString");
}
} | @Test
public void testFixedTemplate()
{
String goodObjects[] = {
"12345",
"ABCDF"
};
ByteString goodByteStrings[] = {
ByteString.copyAvroString("qwert", false)
};
Object badObjects[] = {
"", "1", "12", "123", "1234", "1234\u0100", "123456",
1, 2.0f, 3.0, 4L, new DataMap(), new DataList()
};
ByteString badByteStrings[] = {
ByteString.copyAvroString("", false),
ByteString.copyAvroString("a", false),
ByteString.copyAvroString("ab", false),
ByteString.copyAvroString("abc", false),
ByteString.copyAvroString("abcd", false),
ByteString.copyAvroString("abcdef", false)
};
Integer lastHashCode = null;
ByteString lastByteString = null;
for (String o : goodObjects)
{
Exception exc = null;
Fixed5 fixed = null;
try
{
fixed = new Fixed5(o);
}
catch (Exception e)
{
exc = e;
}
assertNull(exc);
// equals
ByteString expectedByteString = ByteString.copyAvroString(o, false);
assertEquals(fixed.data(), expectedByteString);
assertTrue(fixed.equals(new Fixed5(expectedByteString)));
if (lastByteString != null)
{
assertFalse(fixed.equals(lastByteString));
}
assertFalse(fixed.equals(null));
assertFalse(fixed.equals(new Object()));
// hashCode
int newHashCode = fixed.hashCode();
if (lastHashCode != null)
{
assertTrue(newHashCode != lastHashCode);
}
// toString
assertEquals(expectedByteString.toString(), fixed.toString());
lastHashCode = newHashCode;
lastByteString = expectedByteString;
// clone and copy
testCopiers(fixed);
}
for (ByteString o : goodByteStrings)
{
Exception exc = null;
Fixed5 fixed = null;
try
{
fixed = new Fixed5(o);
}
catch (Exception e)
{
exc = e;
}
assertNull(exc);
// equals
assertEquals(fixed.data(), o);
assertTrue(fixed.equals(new Fixed5(o)));
if (lastByteString != null)
{
assertFalse(fixed.equals(lastByteString));
}
assertFalse(fixed.equals(null));
assertFalse(fixed.equals(new Object()));
// hashCode
int newHashCode = fixed.hashCode();
if (lastHashCode != null)
{
assertTrue(newHashCode != lastHashCode);
}
// toString
assertEquals(o.toString(), fixed.toString());
lastHashCode = newHashCode;
lastByteString = o;
// clone and copy
testCopiers(fixed);
}
for (Object o : badObjects)
{
Exception exc = null;
try
{
new Fixed5(o);
}
catch (Exception e)
{
exc = e;
}
assertTrue(exc != null);
assertTrue(exc instanceof TemplateOutputCastException);
}
for (ByteString o : badByteStrings)
{
Exception exc = null;
try
{
new Fixed5(o);
}
catch (Exception e)
{
exc = e;
}
assertTrue(exc != null);
assertTrue(exc instanceof TemplateOutputCastException);
}
} |
@Bean
public CorsFilter corsFilter() {
UrlBasedCorsConfigurationSource source = new UrlBasedCorsConfigurationSource();
CorsConfiguration config = jHipsterProperties.getCors();
if (!CollectionUtils.isEmpty(config.getAllowedOrigins()) || !CollectionUtils.isEmpty(config.getAllowedOriginPatterns())) {
log.debug("Registering CORS filter");
source.registerCorsConfiguration("/api/**", config);
source.registerCorsConfiguration("/management/**", config);
source.registerCorsConfiguration("/v3/api-docs", config);
source.registerCorsConfiguration("/swagger-ui/**", config);
}
return new CorsFilter(source);
} | @Test
void shouldCorsFilterOnApiPath() throws Exception {
props.getCors().setAllowedOrigins(Collections.singletonList("other.domain.com"));
props.getCors().setAllowedMethods(Arrays.asList("GET", "POST", "PUT", "DELETE"));
props.getCors().setAllowedHeaders(Collections.singletonList("*"));
props.getCors().setMaxAge(1800L);
props.getCors().setAllowCredentials(true);
MockMvc mockMvc = MockMvcBuilders.standaloneSetup(new WebConfigurerTestController()).addFilters(webConfigurer.corsFilter()).build();
mockMvc
.perform(
options("/api/test-cors")
.header(HttpHeaders.ORIGIN, "other.domain.com")
.header(HttpHeaders.ACCESS_CONTROL_REQUEST_METHOD, "POST")
)
.andExpect(status().isOk())
.andExpect(header().string(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN, "other.domain.com"))
.andExpect(header().string(HttpHeaders.VARY, "Origin"))
.andExpect(header().string(HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS, "GET,POST,PUT,DELETE"))
.andExpect(header().string(HttpHeaders.ACCESS_CONTROL_ALLOW_CREDENTIALS, "true"))
.andExpect(header().string(HttpHeaders.ACCESS_CONTROL_MAX_AGE, "1800"));
mockMvc
.perform(get("/api/test-cors").header(HttpHeaders.ORIGIN, "other.domain.com"))
.andExpect(status().isOk())
.andExpect(header().string(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN, "other.domain.com"));
} |
public static String toJson(MetadataUpdate metadataUpdate) {
return toJson(metadataUpdate, false);
} | @Test
public void testSetSnapshotRefBranchToJsonAllFields() {
long snapshotId = 1L;
SnapshotRefType type = SnapshotRefType.BRANCH;
String refName = "hank";
Integer minSnapshotsToKeep = 2;
Long maxSnapshotAgeMs = 3L;
Long maxRefAgeMs = 4L;
String expected =
"{\"action\":\"set-snapshot-ref\",\"ref-name\":\"hank\",\"snapshot-id\":1,\"type\":\"branch\","
+ "\"min-snapshots-to-keep\":2,\"max-snapshot-age-ms\":3,\"max-ref-age-ms\":4}";
MetadataUpdate update =
new MetadataUpdate.SetSnapshotRef(
refName, snapshotId, type, minSnapshotsToKeep, maxSnapshotAgeMs, maxRefAgeMs);
String actual = MetadataUpdateParser.toJson(update);
assertThat(actual)
.as(
"Set snapshot ref should serialize to the correct JSON value for branch with all fields")
.isEqualTo(expected);
} |
@Override
public String convertToDatabaseColumn(Map<String, String> attribute) {
return GSON.toJson(attribute);
} | @Test
void convertToDatabaseColumn_null() {
assertEquals("null", this.converter.convertToDatabaseColumn(null));
} |
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TopicIdPartition that = (TopicIdPartition) o;
return topicId.equals(that.topicId) &&
topicPartition.equals(that.topicPartition);
} | @Test
public void testEquals() {
assertEquals(topicIdPartition0, topicIdPartition1);
assertEquals(topicIdPartition1, topicIdPartition0);
assertEquals(topicIdPartitionWithNullTopic0, topicIdPartitionWithNullTopic1);
assertNotEquals(topicIdPartition0, topicIdPartition2);
assertNotEquals(topicIdPartition2, topicIdPartition0);
assertNotEquals(topicIdPartition0, topicIdPartitionWithNullTopic0);
assertNotEquals(topicIdPartitionWithNullTopic0, topicIdPartitionWithNullTopic2);
} |
static Future<Secret> getValidatedSecret(SecretOperator secretOperator, String namespace, String name, String... items) {
return secretOperator.getAsync(namespace, name)
.compose(secret -> validatedSecret(namespace, name, secret, items));
} | @Test
void testGetValidateSecret() {
String namespace = "ns";
String secretName = "my-secret";
Secret secret = new SecretBuilder()
.withNewMetadata()
.withName(secretName)
.withNamespace(namespace)
.endMetadata()
.withData(Map.of("key1", "value", "key2", "value", "key3", "value"))
.build();
SecretOperator secretOps = mock(SecretOperator.class);
when(secretOps.getAsync(eq(namespace), eq(secretName))).thenReturn(Future.succeededFuture(secret));
VertxUtil.getValidatedSecret(secretOps, namespace, secretName, "key1", "key2")
.onComplete(r -> {
assertThat(r.succeeded(), is(true));
assertThat(r.result(), is(secret));
});
} |
public boolean isRunning() {
return isRunning.get();
} | @Test
public void testServerStartOnCreation() {
assertTrue(application.isRunning());
} |
@Override
public List<Type> getColumnTypes()
{
return columnTypes;
} | @Test
public void testGetColumnTypes()
{
RecordSet recordSet = new JdbcRecordSet(jdbcClient, session, split, ImmutableList.of(
new JdbcColumnHandle("test", "text", JDBC_VARCHAR, VARCHAR, true, Optional.empty()),
new JdbcColumnHandle("test", "text_short", JDBC_VARCHAR, createVarcharType(32), true, Optional.empty()),
new JdbcColumnHandle("test", "value", JDBC_BIGINT, BIGINT, true, Optional.empty())));
assertEquals(recordSet.getColumnTypes(), ImmutableList.of(VARCHAR, createVarcharType(32), BIGINT));
recordSet = new JdbcRecordSet(jdbcClient, session, split, ImmutableList.of(
new JdbcColumnHandle("test", "value", JDBC_BIGINT, BIGINT, true, Optional.empty()),
new JdbcColumnHandle("test", "text", JDBC_VARCHAR, VARCHAR, true, Optional.empty())));
assertEquals(recordSet.getColumnTypes(), ImmutableList.of(BIGINT, VARCHAR));
recordSet = new JdbcRecordSet(jdbcClient, session, split, ImmutableList.of(
new JdbcColumnHandle("test", "value", JDBC_BIGINT, BIGINT, true, Optional.empty()),
new JdbcColumnHandle("test", "value", JDBC_BIGINT, BIGINT, true, Optional.empty()),
new JdbcColumnHandle("test", "text", JDBC_VARCHAR, VARCHAR, true, Optional.empty())));
assertEquals(recordSet.getColumnTypes(), ImmutableList.of(BIGINT, BIGINT, VARCHAR));
recordSet = new JdbcRecordSet(jdbcClient, session, split, ImmutableList.of());
assertEquals(recordSet.getColumnTypes(), ImmutableList.of());
} |
@Override
public Batch toBatch() {
return new SparkBatch(
sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode());
} | @Test
public void testPartitionedHours() throws Exception {
createPartitionedTable(spark, tableName, "hours(ts)");
SparkScanBuilder builder = scanBuilder();
HoursFunction.TimestampToHoursFunction function = new HoursFunction.TimestampToHoursFunction();
UserDefinedScalarFunc udf = toUDF(function, expressions(fieldRef("ts")));
Predicate predicate =
new Predicate(
">=",
expressions(
udf, intLit(timestampStrToHourOrdinal("2017-11-22T06:02:09.243857+00:00"))));
pushFilters(builder, predicate);
Batch scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(8);
// NOT GTEQ
builder = scanBuilder();
predicate = new Not(predicate);
pushFilters(builder, predicate);
scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(2);
} |
public static AnnotateImagesFromBytesWithContext annotateImagesFromBytesWithContext(
List<Feature> features, long batchSize, int desiredRequestParallelism) {
return new AnnotateImagesFromBytesWithContext(features, batchSize, desiredRequestParallelism);
} | @Test
public void shouldConvertKVOfBytesToRequest() {
CloudVision.AnnotateImagesFromBytesWithContext annotateImagesFromBytesWithContext =
CloudVision.annotateImagesFromBytesWithContext(features, 1, 1);
AnnotateImageRequest request =
annotateImagesFromBytesWithContext.mapToRequest(KV.of(TEST_BYTES, null), null);
assertEquals(1, request.getFeaturesCount());
assertEquals(TEST_BYTES, request.getImage().getContent());
} |
protected Object getValidJMSHeaderValue(String headerName, Object headerValue) {
if (headerValue instanceof String) {
return headerValue;
} else if (headerValue instanceof BigInteger) {
return headerValue.toString();
} else if (headerValue instanceof BigDecimal) {
return headerValue.toString();
} else if (headerValue instanceof Number) {
return headerValue;
} else if (headerValue instanceof Character) {
return headerValue;
} else if (headerValue instanceof CharSequence) {
return headerValue.toString();
} else if (headerValue instanceof Boolean) {
return headerValue;
} else if (headerValue instanceof Date) {
if (this.endpoint.getConfiguration().isFormatDateHeadersToIso8601()) {
return ZonedDateTime.ofInstant(((Date) headerValue).toInstant(), ZoneOffset.UTC).toString();
} else {
return headerValue.toString();
}
}
return null;
} | @Test
public void testGetValidJmsHeaderValueWithIso8601DateShouldSucceed() {
when(mockJmsConfiguration.isFormatDateHeadersToIso8601()).thenReturn(true);
Object value = jmsBindingUnderTest.getValidJMSHeaderValue("foo", Date.from(instant));
assertEquals("2018-02-26T19:12:18Z", value);
} |
@Override
public void onSwipeLeft(boolean twoFingers) {} | @Test
public void testOnSwipeLeft() {
mUnderTest.onSwipeLeft(true);
Mockito.verifyZeroInteractions(mMockParentListener, mMockKeyboardDismissAction);
} |
public static Upstream selector(final List<Upstream> upstreamList, final String algorithm, final String ip) {
LoadBalancer loadBalance = ExtensionLoader.getExtensionLoader(LoadBalancer.class).getJoin(algorithm);
return loadBalance.select(upstreamList, ip);
} | @Test
public void loadBalanceUtilsOrderedWeightTest() {
List<Upstream> upstreamList =
Stream.of(10, 20, 70)
.map(weight -> Upstream.builder()
.url("upstream-" + weight)
.weight(weight)
.build())
.collect(Collectors.toList());
Map<String, Integer> countMap = new HashMap<>();
IntStream.range(0, 120).forEach(i -> {
Upstream result = LoadBalancerFactory.selector(upstreamList, LoadBalanceEnum.ROUND_ROBIN.getName(), "");
int count = countMap.getOrDefault(result.getUrl(), 0);
countMap.put(result.getUrl(), ++count);
});
assertEquals(12, countMap.get("upstream-10").intValue());
} |
@Override
protected Monitor createMonitor(URL url) {
URLBuilder urlBuilder = URLBuilder.from(url);
urlBuilder.setProtocol(url.getParameter(PROTOCOL_KEY, DUBBO_PROTOCOL));
if (StringUtils.isEmpty(url.getPath())) {
urlBuilder.setPath(MonitorService.class.getName());
}
String filter = url.getParameter(REFERENCE_FILTER_KEY);
if (StringUtils.isEmpty(filter)) {
filter = "";
} else {
filter = filter + ",";
}
urlBuilder.addParameters(CHECK_KEY, String.valueOf(false), REFERENCE_FILTER_KEY, filter + "-monitor");
Invoker<MonitorService> monitorInvoker = protocol.refer(MonitorService.class, urlBuilder.build());
MonitorService monitorService = proxyFactory.getProxy(monitorInvoker);
return new DubboMonitor(monitorInvoker, monitorService);
} | @Test
void testCreateMonitor() {
URL urlWithoutPath = URL.valueOf("http://10.10.10.11");
Monitor monitor = dubboMonitorFactory.createMonitor(urlWithoutPath);
assertThat(monitor, not(nullValue()));
URL urlWithFilterKey = URL.valueOf("http://10.10.10.11/").addParameter(REFERENCE_FILTER_KEY, "testFilter");
monitor = dubboMonitorFactory.createMonitor(urlWithFilterKey);
assertThat(monitor, not(nullValue()));
ArgumentCaptor<Invoker> invokerArgumentCaptor = ArgumentCaptor.forClass(Invoker.class);
verify(proxyFactory, atLeastOnce()).getProxy(invokerArgumentCaptor.capture());
Invoker invoker = invokerArgumentCaptor.getValue();
assertThat(invoker.getUrl().getParameter(REFERENCE_FILTER_KEY), containsString("testFilter"));
} |
public static String format(String source, Object... parameters) {
String current = source;
for (Object parameter : parameters) {
if (!current.contains("{}")) {
return current;
}
current = current.replaceFirst("\\{\\}", String.valueOf(parameter));
}
return current;
} | @Test
public void testFormatExtraBracket() {
String fmt = "Some string {} 2 {}";
assertEquals("Some string 1 2 {}", format(fmt, 1));
} |
public String determineRootDir(String location) {
int prefixEnd = location.indexOf(':') + 1;
int rootDirEnd = location.length();
while (rootDirEnd > prefixEnd && isPattern(location.substring(prefixEnd, rootDirEnd))) {
rootDirEnd = location.lastIndexOf('/', rootDirEnd - 2) + 1;
}
if (rootDirEnd == 0) {
rootDirEnd = prefixEnd;
}
return location.substring(0, rootDirEnd);
} | @Test
public void testDetermineRoot() {
AntPathMatcher matcher = new AntPathMatcher();
assertEquals("org/apache/camel", matcher.determineRootDir("org/apache/camel"));
assertEquals("org/apache/camel/", matcher.determineRootDir("org/apache/camel/"));
assertEquals("org/apache/camel/", matcher.determineRootDir("org/apache/camel/*.xml"));
assertEquals("WEB-INF/", matcher.determineRootDir("WEB-INF/*.xml"));
// this is not a pattern
assertEquals("org/apache/camel/mycamel.xml", matcher.determineRootDir("org/apache/camel/mycamel.xml"));
} |
public Group getGroup(JID jid) throws GroupNotFoundException {
JID groupJID = GroupJID.fromJID(jid);
return (groupJID instanceof GroupJID) ? getGroup(((GroupJID)groupJID).getGroupName()) : null;
} | @Test
public void aForceLookupHitWillIgnoreTheExistingCache() throws Exception {
groupCache.put(GROUP_NAME, CacheableOptional.of(cachedGroup));
doReturn(unCachedGroup).when(groupProvider).getGroup(GROUP_NAME);
final Group returnedGroup = groupManager.getGroup(GROUP_NAME, true);
assertThat(returnedGroup, is(unCachedGroup));
verify(groupProvider).getGroup(GROUP_NAME);
assertThat(groupCache.get(GROUP_NAME), is(CacheableOptional.of(unCachedGroup)));
} |
@Override
public Result apply(PathData item, int depth) throws IOException {
if (expression != null) {
return expression.apply(item, -1);
}
return Result.PASS;
} | @Test
public void apply() throws IOException {
PathData item = mock(PathData.class);
when(expr.apply(item, -1)).thenReturn(Result.PASS).thenReturn(Result.FAIL);
assertEquals(Result.PASS, test.apply(item, -1));
assertEquals(Result.FAIL, test.apply(item, -1));
verify(expr, times(2)).apply(item, -1);
verifyNoMoreInteractions(expr);
} |
@Bean("dispatcherHandler")
public DispatcherHandler dispatcherHandler() {
return new DispatcherHandler();
} | @Test
public void testDispatcherHandler() {
applicationContextRunner.run(context -> {
DispatcherHandler handler = context.getBean("dispatcherHandler", DispatcherHandler.class);
assertNotNull(handler);
}
);
} |
@Override
public void lock() {
try {
lock(-1, null, false);
} catch (InterruptedException e) {
throw new IllegalStateException();
}
} | @Test
public void testExpire() throws InterruptedException {
RLock lock = redisson.getLock("lock");
lock.lock(2, TimeUnit.SECONDS);
final long startTime = System.currentTimeMillis();
Thread t = new Thread() {
public void run() {
RLock lock1 = redisson.getLock("lock");
lock1.lock();
long spendTime = System.currentTimeMillis() - startTime;
Assertions.assertTrue(spendTime < 2020);
lock1.unlock();
};
};
t.start();
t.join();
assertThatThrownBy(() -> {
lock.unlock();
}).isInstanceOf(IllegalMonitorStateException.class);
} |
public static int getEditDistance(
String source,
String target,
boolean caseSensitive,
int changeCost,
int openGapCost,
int continueGapCost) {
if (!caseSensitive) {
source = Ascii.toLowerCase(source);
target = Ascii.toLowerCase(target);
}
int sourceLength = source.length();
int targetLength = target.length();
if (sourceLength == 0) {
return scriptCost(openGapCost, continueGapCost, targetLength);
}
if (targetLength == 0) {
return scriptCost(openGapCost, continueGapCost, sourceLength);
}
// mMatrix[i][j] = Cost of aligning source.substring(0,i) with
// target.substring(0,j), using an edit script ending with
// matched characters.
int[][] mMatrix = new int[sourceLength + 1][targetLength + 1];
// Cost of an alignment that ends with a bunch of deletions.
// dMatrix[i][j] = best found cost of changing the first i chars
// of source into the first j chars of target, ending with one
// or more deletes of source characters.
int[][] dMatrix = new int[sourceLength + 1][targetLength + 1];
// Cost of an alignment that ends with one or more insertions.
int[][] iMatrix = new int[sourceLength + 1][targetLength + 1];
mMatrix[0][0] = dMatrix[0][0] = iMatrix[0][0] = 0;
// Any edit script that changes i chars of source into zero
// chars of target will only involve deletions. So only the
// d&m Matrix entries are relevant, because dMatrix[i][0] gives
// the cost of changing an i-length string into a 0-length string,
// using an edit script ending in deletions.
for (int i = 1; i <= sourceLength; i++) {
mMatrix[i][0] = dMatrix[i][0] = scriptCost(openGapCost, continueGapCost, i);
// Make the iMatrix entries impossibly expensive, so they'll be
// ignored as inputs to min(). Use a big cost but not
// max int because that will overflow if anything's added to it.
iMatrix[i][0] = Integer.MAX_VALUE / 2;
}
for (int j = 1; j <= targetLength; j++) {
// Only the i&m Matrix entries are relevant here, because they represent
// the cost of changing a 0-length string into a j-length string, using
// an edit script ending in insertions.
mMatrix[0][j] = iMatrix[0][j] = scriptCost(openGapCost, continueGapCost, j);
// Make the dMatrix entries impossibly expensive, so they'll be
// ignored as inputs to min(). Use a big cost but not
// max int because that will overflow if anything's added to it.
dMatrix[0][j] = Integer.MAX_VALUE / 2;
}
for (int i = 1; i <= sourceLength; i++) {
char sourceI = source.charAt(i - 1);
for (int j = 1; j <= targetLength; j++) {
char targetJ = target.charAt(j - 1);
int cost = (sourceI == targetJ) ? 0 : changeCost;
// Cost of changing i chars of source into j chars of target,
// using an edit script ending in matched characters.
mMatrix[i][j] =
cost + Ints.min(mMatrix[i - 1][j - 1], iMatrix[i - 1][j - 1], dMatrix[i - 1][j - 1]);
// Cost of an edit script ending in a deletion.
dMatrix[i][j] =
Math.min(
mMatrix[i - 1][j] + openGapCost + continueGapCost,
dMatrix[i - 1][j] + continueGapCost);
// Cost of an edit script ending in an insertion.
iMatrix[i][j] =
Math.min(
mMatrix[i][j - 1] + openGapCost + continueGapCost,
iMatrix[i][j - 1] + continueGapCost);
}
}
// Return the minimum cost.
int costOfEditScriptEndingWithMatch = mMatrix[sourceLength][targetLength];
int costOfEditScriptEndingWithDelete = dMatrix[sourceLength][targetLength];
int costOfEditScriptEndingWithInsert = iMatrix[sourceLength][targetLength];
return Ints.min(
costOfEditScriptEndingWithMatch,
costOfEditScriptEndingWithDelete,
costOfEditScriptEndingWithInsert);
} | @Test
public void needlemanWunschEditDistance_matchesLevenschtein_withHugeGapCost() {
String identifier = "fooBar";
String otherIdentifier = "bazQux";
double levenschtein = LevenshteinEditDistance.getEditDistance(identifier, otherIdentifier);
double needlemanWunsch =
NeedlemanWunschEditDistance.getEditDistance(
identifier, otherIdentifier, /* caseSensitive= */ false, 1, 1000, 1000);
assertThat(needlemanWunsch).isEqualTo(levenschtein);
} |
public static void isGreaterThanOrEqualTo(int value, int minimumValue) {
isGreaterThanOrEqualTo(
value,
minimumValue,
String.format("value [%s] is less than minimum value [%s]", value, minimumValue));
} | @Test
public void testIsGreaterThanOrEqualTo1() {
Precondition.isGreaterThanOrEqualTo(1, 1);
} |
public ConfigCenterBuilder appendParameter(String key, String value) {
this.parameters = appendParameter(this.parameters, key, value);
return getThis();
} | @Test
void appendParameter() {
ConfigCenterBuilder builder = ConfigCenterBuilder.newBuilder();
builder.appendParameter("default.num", "one").appendParameter("num", "ONE");
Map<String, String> parameters = builder.build().getParameters();
Assertions.assertTrue(parameters.containsKey("default.num"));
Assertions.assertEquals("ONE", parameters.get("num"));
} |
@Override
public TaskType taskType() {
return taskType;
} | @Test
public void shouldReportTaskType() {
ProcessorStateManager stateMgr = getStateManager(Task.TaskType.STANDBY);
assertEquals(Task.TaskType.STANDBY, stateMgr.taskType());
stateMgr = getStateManager(Task.TaskType.ACTIVE);
assertEquals(Task.TaskType.ACTIVE, stateMgr.taskType());
} |
@Override
public ParamCheckResponse checkParamInfoList(List<ParamInfo> paramInfos) {
ParamCheckResponse paramCheckResponse = new ParamCheckResponse();
if (paramInfos == null) {
paramCheckResponse.setSuccess(true);
return paramCheckResponse;
}
for (ParamInfo paramInfo : paramInfos) {
paramCheckResponse = checkParamInfoFormat(paramInfo);
if (!paramCheckResponse.isSuccess()) {
return paramCheckResponse;
}
}
paramCheckResponse.setSuccess(true);
return paramCheckResponse;
} | @Test
void testCheckParamInfoForMetadata() {
ParamInfo paramInfo = new ParamInfo();
ArrayList<ParamInfo> paramInfos = new ArrayList<>();
paramInfos.add(paramInfo);
Map<String, String> metadata = new HashMap<>();
paramInfo.setMetadata(metadata);
// Max length
metadata.put("key1", "");
metadata.put("key2", buildStringLength(maxMetadataLength));
ParamCheckResponse actual = paramChecker.checkParamInfoList(paramInfos);
assertFalse(actual.isSuccess());
assertEquals(String.format("Param 'Metadata' is illegal, the param length should not exceed %d.", maxMetadataLength), actual.getMessage());
// Success
metadata.put("key2", String.format("Any key and value, only require length sum not more than %d.", maxMetadataLength));
actual = paramChecker.checkParamInfoList(paramInfos);
assertTrue(actual.isSuccess());
} |
public Set<String> makeReady(final Map<String, InternalTopicConfig> topics) {
// we will do the validation / topic-creation in a loop, until we have confirmed all topics
// have existed with the expected number of partitions, or some create topic returns fatal errors.
log.debug("Starting to validate internal topics {} in partition assignor.", topics);
long currentWallClockMs = time.milliseconds();
final long deadlineMs = currentWallClockMs + retryTimeoutMs;
Set<String> topicsNotReady = new HashSet<>(topics.keySet());
final Set<String> newlyCreatedTopics = new HashSet<>();
while (!topicsNotReady.isEmpty()) {
final Set<String> tempUnknownTopics = new HashSet<>();
topicsNotReady = validateTopics(topicsNotReady, topics, tempUnknownTopics);
newlyCreatedTopics.addAll(topicsNotReady);
if (!topicsNotReady.isEmpty()) {
final Set<NewTopic> newTopics = new HashSet<>();
for (final String topicName : topicsNotReady) {
if (tempUnknownTopics.contains(topicName)) {
// for the tempUnknownTopics, don't create topic for them
// we'll check again later if remaining retries > 0
continue;
}
final InternalTopicConfig internalTopicConfig = Objects.requireNonNull(topics.get(topicName));
final Map<String, String> topicConfig = internalTopicConfig.properties(defaultTopicConfigs, windowChangeLogAdditionalRetention);
log.debug("Going to create topic {} with {} partitions and config {}.",
internalTopicConfig.name(),
internalTopicConfig.numberOfPartitions(),
topicConfig);
newTopics.add(
new NewTopic(
internalTopicConfig.name(),
internalTopicConfig.numberOfPartitions(),
Optional.of(replicationFactor))
.configs(topicConfig));
}
// it's possible that although some topics are not ready yet because they
// are temporarily not available, not that they do not exist; in this case
// the new topics to create may be empty and hence we can skip here
if (!newTopics.isEmpty()) {
final CreateTopicsResult createTopicsResult = adminClient.createTopics(newTopics);
for (final Map.Entry<String, KafkaFuture<Void>> createTopicResult : createTopicsResult.values().entrySet()) {
final String topicName = createTopicResult.getKey();
try {
createTopicResult.getValue().get();
topicsNotReady.remove(topicName);
} catch (final InterruptedException fatalException) {
// this should not happen; if it ever happens it indicate a bug
Thread.currentThread().interrupt();
log.error(INTERRUPTED_ERROR_MESSAGE, fatalException);
throw new IllegalStateException(INTERRUPTED_ERROR_MESSAGE, fatalException);
} catch (final ExecutionException executionException) {
final Throwable cause = executionException.getCause();
if (cause instanceof TopicExistsException) {
// This topic didn't exist earlier or its leader not known before; just retain it for next round of validation.
log.info(
"Could not create topic {}. Topic is probably marked for deletion (number of partitions is unknown).\n"
+
"Will retry to create this topic in {} ms (to let broker finish async delete operation first).\n"
+
"Error message was: {}", topicName, retryBackOffMs,
cause.toString());
} else {
log.error("Unexpected error during topic creation for {}.\n" +
"Error message was: {}", topicName, cause.toString());
if (cause instanceof UnsupportedVersionException) {
final String errorMessage = cause.getMessage();
if (errorMessage != null &&
errorMessage.startsWith("Creating topics with default partitions/replication factor are only supported in CreateTopicRequest version 4+")) {
throw new StreamsException(String.format(
"Could not create topic %s, because brokers don't support configuration replication.factor=-1."
+ " You can change the replication.factor config or upgrade your brokers to version 2.4 or newer to avoid this error.",
topicName)
);
}
} else if (cause instanceof TimeoutException) {
log.error("Creating topic {} timed out.\n" +
"Error message was: {}", topicName, cause.toString());
} else {
throw new StreamsException(
String.format("Could not create topic %s.", topicName),
cause
);
}
}
}
}
}
}
if (!topicsNotReady.isEmpty()) {
currentWallClockMs = time.milliseconds();
if (currentWallClockMs >= deadlineMs) {
final String timeoutError = String.format("Could not create topics within %d milliseconds. " +
"This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs);
log.error(timeoutError);
throw new TimeoutException(timeoutError);
}
log.info(
"Topics {} could not be made ready. Will retry in {} milliseconds. Remaining time in milliseconds: {}",
topicsNotReady,
retryBackOffMs,
deadlineMs - currentWallClockMs
);
Utils.sleep(retryBackOffMs);
}
}
log.debug("Completed validating internal topics and created {}", newlyCreatedTopics);
return newlyCreatedTopics;
} | @Test
public void shouldExhaustRetriesOnTimeoutExceptionForMakeReady() {
mockAdminClient.timeoutNextRequest(5);
final InternalTopicManager topicManager = new InternalTopicManager(
new AutoAdvanceMockTime(time),
mockAdminClient,
new StreamsConfig(config)
);
final InternalTopicConfig internalTopicConfig = new RepartitionTopicConfig(topic1, Collections.emptyMap());
internalTopicConfig.setNumberOfPartitions(1);
try {
topicManager.makeReady(Collections.singletonMap(topic1, internalTopicConfig));
fail("Should have thrown TimeoutException.");
} catch (final TimeoutException expected) {
assertThat(expected.getMessage(), is("Could not create topics within 50 milliseconds. " +
"This can happen if the Kafka cluster is temporarily not available."));
}
} |
@Override
public void processElement(StreamRecord<RowData> element) throws Exception {
RowData inputRow = element.getValue();
long timestamp;
if (windowAssigner.isEventTime()) {
if (inputRow.isNullAt(rowtimeIndex)) {
// null timestamp would be dropped
numNullRowTimeRecordsDropped.inc();
return;
}
timestamp = inputRow.getTimestamp(rowtimeIndex, 3).getMillisecond();
} else {
timestamp = getProcessingTimeService().getCurrentProcessingTime();
}
timestamp = toUtcTimestampMills(timestamp, shiftTimeZone);
Collection<TimeWindow> elementWindows = windowAssigner.assignWindows(inputRow, timestamp);
collect(inputRow, elementWindows);
} | @Test
public void testTumblingWindows() throws Exception {
final TumblingWindowAssigner assigner = TumblingWindowAssigner.of(Duration.ofSeconds(3));
OneInputStreamOperatorTestHarness<RowData, RowData> testHarness =
createTestHarness(assigner, shiftTimeZone);
testHarness.setup(OUT_SERIALIZER);
testHarness.open();
// process elements
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
testHarness.processElement(insertRecord("key1", 1, 20L));
testHarness.processElement(insertRecord("key2", 1, 3999L));
testHarness.processWatermark(new Watermark(999));
// append 3 fields: window_start, window_end, window_time
expectedOutput.add(insertRecord("key1", 1, 20L, localMills(0L), localMills(3000L), 2999L));
expectedOutput.add(
insertRecord("key2", 1, 3999L, localMills(3000L), localMills(6000L), 5999L));
expectedOutput.add(new Watermark(999));
ASSERTER.assertOutputEqualsSorted(
"Output was not correct.", expectedOutput, testHarness.getOutput());
// late element would not be dropped
testHarness.processElement(insertRecord("key2", 1, 80L));
// rowtime is null, should be dropped
testHarness.processElement(insertRecord("key2", 1, ((Long) null)));
expectedOutput.add(insertRecord("key2", 1, 80L, localMills(0L), localMills(3000L), 2999L));
ASSERTER.assertOutputEqualsSorted(
"Output was not correct.", expectedOutput, testHarness.getOutput());
assertThat(
((AlignedWindowTableFunctionOperator) testHarness.getOperator())
.getNumNullRowTimeRecordsDropped()
.getCount())
.isEqualTo(1);
testHarness.close();
} |
@Override
@SuppressWarnings("unchecked")
public CalendarEventModel apply(Map<String, Object> event, TransformerContext context) {
if (!"singleInstance"
.equals(event.get("type"))) { // support single instances for now;recurring events later
return null;
}
String calendarId = context.getProperty(CALENDAR_ID);
String title = (String) event.getOrDefault("subject", "");
String location = TransformerHelper.getOrDefault("location", "displayName", event, "");
// Notes is itemBody resource type defined as: { "content": "string", "contentType": "String"}
String notes = TransformerHelper.getOrDefault("body", "content", event, "");
List<Map<String, Object>> rawAttendees =
(List<Map<String, Object>>) event.getOrDefault("attendees", emptyList());
List<CalendarAttendeeModel> attendees = new ArrayList<>();
for (Object rawAttendee : rawAttendees) {
CalendarAttendeeModel attendee = context.transform(CalendarAttendeeModel.class, rawAttendee);
if (attendee != null) {
attendees.add(attendee);
}
}
CalendarEventModel.CalendarEventTime startTime =
context.transform(CalendarEventModel.CalendarEventTime.class, event.get("start"));
if (startTime == null) {
context.problem("Could not parse start time. Skipping event.");
return null;
}
CalendarEventModel.CalendarEventTime endTime =
context.transform(CalendarEventModel.CalendarEventTime.class, event.get("end"));
if (endTime == null) {
context.problem("Could not parse end time. Skipping event.");
return null;
}
return new CalendarEventModel(
calendarId, title, notes, attendees, location, startTime, endTime, null);
} | @SuppressWarnings("unchecked")
@Test
public void testTransform() throws IOException {
context.setProperty(CALENDAR_ID, "123");
Map<String, Object> rawEvent = mapper.readValue(SAMPLE_CALENDAR_EVENT, Map.class);
CalendarEventModel event = transformer.apply(rawEvent, context);
assertEquals("123", event.getCalendarId());
assertEquals("Some Place", event.getLocation());
assertEquals("Test Appointment 1", event.getTitle());
assertTrue(event.getNotes().length() > 5);
assertEquals(1, event.getAttendees().size());
CalendarAttendeeModel attendee = event.getAttendees().get(0);
assertEquals("Test Test1", attendee.getDisplayName());
assertEquals("foo@foo.com", attendee.getEmail());
assertFalse(attendee.getOptional());
assertEquals(18, event.getStartTime().getDateTime().getHour());
assertEquals(0, event.getStartTime().getDateTime().getMinute());
assertEquals(18, event.getEndTime().getDateTime().getHour());
assertEquals(30, event.getEndTime().getDateTime().getMinute());
} |
public ParseResult parse(File file) throws IOException, SchemaParseException {
return parse(file, null);
} | @Test
void testParseStream() throws IOException {
Schema schema = new SchemaParser().parse(new ByteArrayInputStream(SCHEMA_JSON.getBytes(StandardCharsets.UTF_16)))
.mainSchema();
assertEquals(SCHEMA_REAL, schema);
} |
@Override
public String normalise(String text) {
if (Objects.isNull(text) || text.isEmpty()) {
throw new IllegalArgumentException("Text cannot be null or empty");
}
return text.trim()
.toLowerCase()
.replaceAll("\\p{Punct}", "")
.replaceAll("\\s+", " ");
} | @Description("Normalise, when text is null, then throw IllegalArgumentException")
@Test
void normalise_WhenTextIsNull_ThenThrowIllegalArgumentException() {
// When & Then
assertThrows(IllegalArgumentException.class, () -> textNormaliser.normalise(null));
} |
@Override
public void doFilter(HttpRequest request, HttpResponse response, FilterChain filterChain) throws IOException {
try {
oAuthCsrfVerifier.verifyState(request, response, samlIdentityProvider, "CSRFToken");
} catch (AuthenticationException exception) {
AuthenticationError.handleError(request, response, exception.getMessage());
return;
}
if (!userSession.hasSession() || !userSession.isSystemAdministrator()) {
AuthenticationError.handleError(request, response, "User needs to be logged in as system administrator to access this page.");
return;
}
HttpServletRequest httpRequest = new HttpServletRequestWrapper(((JavaxHttpRequest) request).getDelegate()) {
@Override
public StringBuffer getRequestURL() {
return new StringBuffer(oAuth2ContextFactory.generateCallbackUrl(SamlIdentityProvider.KEY));
}
};
response.setContentType("text/html");
String htmlResponse = samlAuthenticator.getAuthenticationStatusPage(new JavaxHttpRequest(httpRequest), response);
String nonce = SamlValidationCspHeaders.addCspHeadersWithNonceToResponse(response);
htmlResponse = htmlResponse.replace("%NONCE%", nonce);
response.getWriter().print(htmlResponse);
} | @Test
public void do_filter_admin() throws IOException {
HttpServletRequest servletRequest = spy(HttpServletRequest.class);
HttpServletResponse servletResponse = mock(HttpServletResponse.class);
StringWriter stringWriter = new StringWriter();
doReturn(new PrintWriter(stringWriter)).when(servletResponse).getWriter();
FilterChain filterChain = mock(FilterChain.class);
doReturn(true).when(userSession).hasSession();
doReturn(true).when(userSession).isSystemAdministrator();
final String mockedHtmlContent = "mocked html content";
doReturn(mockedHtmlContent).when(samlAuthenticator).getAuthenticationStatusPage(any(), any());
underTest.doFilter(new JavaxHttpRequest(servletRequest), new JavaxHttpResponse(servletResponse), filterChain);
verify(samlAuthenticator).getAuthenticationStatusPage(any(), any());
verify(servletResponse).getWriter();
CSP_HEADERS.forEach(h -> verify(servletResponse).setHeader(eq(h), anyString()));
assertEquals(mockedHtmlContent, stringWriter.toString());
} |
@Override
public void publishLeaderInformation(String componentId, LeaderInformation leaderInformation) {
Preconditions.checkState(running.get());
if (!leaderLatch.hasLeadership()) {
return;
}
final String connectionInformationPath =
ZooKeeperUtils.generateConnectionInformationPath(componentId);
LOG.debug(
"Write leader information {} for component '{}' to {}.",
leaderInformation,
componentId,
ZooKeeperUtils.generateZookeeperPath(
curatorFramework.getNamespace(), connectionInformationPath));
try {
ZooKeeperUtils.writeLeaderInformationToZooKeeper(
leaderInformation,
curatorFramework,
leaderLatch::hasLeadership,
connectionInformationPath);
} catch (Exception e) {
leaderElectionListener.onError(e);
}
} | @Test
void testPublishEmptyLeaderInformation() throws Exception {
new Context() {
{
runTest(
() -> {
leaderElectionListener.await(LeaderElectionEvent.IsLeaderEvent.class);
final String componentId = "retrieved-component";
final DefaultLeaderRetrievalService defaultLeaderRetrievalService =
new DefaultLeaderRetrievalService(
new ZooKeeperLeaderRetrievalDriverFactory(
curatorFramework.asCuratorFramework(),
componentId,
ZooKeeperLeaderRetrievalDriver
.LeaderInformationClearancePolicy
.ON_LOST_CONNECTION));
final TestingListener leaderRetrievalListener = new TestingListener();
defaultLeaderRetrievalService.start(leaderRetrievalListener);
leaderElectionDriver.publishLeaderInformation(
componentId,
LeaderInformation.known(UUID.randomUUID(), "foobar"));
leaderRetrievalListener.waitForNewLeader();
leaderElectionDriver.publishLeaderInformation(
componentId, LeaderInformation.empty());
leaderRetrievalListener.waitForEmptyLeaderInformation();
assertThat(leaderRetrievalListener.getLeader())
.isEqualTo(LeaderInformation.empty());
});
}
};
} |
public static SupportedVersionRange fromMap(Map<String, Short> versionRangeMap) {
return new SupportedVersionRange(
BaseVersionRange.valueOrThrow(MIN_VERSION_KEY_LABEL, versionRangeMap),
BaseVersionRange.valueOrThrow(MAX_VERSION_KEY_LABEL, versionRangeMap));
} | @Test
public void testFromMapFailure() {
// min_version can't be < 0.
Map<String, Short> invalidWithBadMinVersion =
mkMap(mkEntry("min_version", (short) -1), mkEntry("max_version", (short) 0));
assertThrows(
IllegalArgumentException.class,
() -> SupportedVersionRange.fromMap(invalidWithBadMinVersion));
// max_version can't be < 0.
Map<String, Short> invalidWithBadMaxVersion =
mkMap(mkEntry("min_version", (short) 0), mkEntry("max_version", (short) -1));
assertThrows(
IllegalArgumentException.class,
() -> SupportedVersionRange.fromMap(invalidWithBadMaxVersion));
// min_version and max_version can't be < 0.
Map<String, Short> invalidWithBadMinMaxVersion =
mkMap(mkEntry("min_version", (short) -1), mkEntry("max_version", (short) -1));
assertThrows(
IllegalArgumentException.class,
() -> SupportedVersionRange.fromMap(invalidWithBadMinMaxVersion));
// min_version can't be > max_version.
Map<String, Short> invalidWithLowerMaxVersion =
mkMap(mkEntry("min_version", (short) 2), mkEntry("max_version", (short) 1));
assertThrows(
IllegalArgumentException.class,
() -> SupportedVersionRange.fromMap(invalidWithLowerMaxVersion));
// min_version key missing.
Map<String, Short> invalidWithMinKeyMissing =
mkMap(mkEntry("max_version", (short) 1));
assertThrows(
IllegalArgumentException.class,
() -> SupportedVersionRange.fromMap(invalidWithMinKeyMissing));
// max_version key missing.
Map<String, Short> invalidWithMaxKeyMissing =
mkMap(mkEntry("min_version", (short) 1));
assertThrows(
IllegalArgumentException.class,
() -> SupportedVersionRange.fromMap(invalidWithMaxKeyMissing));
} |
public static boolean startsWithScheme( String vfsFileName ) {
FileSystemManager fsManager = getInstance().getFileSystemManager();
boolean found = false;
String[] schemes = fsManager.getSchemes();
for (int i = 0; i < schemes.length; i++) {
if ( vfsFileName.startsWith( schemes[i] + ":" ) ) {
found = true;
break;
}
}
return found;
} | @Test
public void testStartsWithScheme() {
String fileName = "zip:file:///SavedLinkedres.zip!Calculate median and percentiles using the group by steps.ktr";
assertTrue( KettleVFS.startsWithScheme( fileName ) );
fileName = "SavedLinkedres.zip!Calculate median and percentiles using the group by steps.ktr";
assertFalse( KettleVFS.startsWithScheme( fileName ) );
} |
public SSLContext createContext(ContextAware context) throws NoSuchProviderException, NoSuchAlgorithmException,
KeyManagementException, UnrecoverableKeyException, KeyStoreException, CertificateException {
SSLContext sslContext = getProvider() != null ? SSLContext.getInstance(getProtocol(), getProvider())
: SSLContext.getInstance(getProtocol());
context.addInfo("SSL protocol '" + sslContext.getProtocol() + "' provider '" + sslContext.getProvider() + "'");
KeyManager[] keyManagers = createKeyManagers(context);
TrustManager[] trustManagers = createTrustManagers(context);
SecureRandom secureRandom = createSecureRandom(context);
sslContext.init(keyManagers, trustManagers, secureRandom);
return sslContext;
} | @Test
public void testCreateContext() throws Exception {
factoryBean.setKeyManagerFactory(keyManagerFactory);
factoryBean.setKeyStore(keyStore);
factoryBean.setTrustManagerFactory(trustManagerFactory);
factoryBean.setTrustStore(trustStore);
factoryBean.setSecureRandom(secureRandom);
assertNotNull(factoryBean.createContext(context));
assertTrue(keyManagerFactory.isFactoryCreated());
assertTrue(trustManagerFactory.isFactoryCreated());
assertTrue(keyStore.isKeyStoreCreated());
assertTrue(trustStore.isKeyStoreCreated());
assertTrue(secureRandom.isSecureRandomCreated());
// it's important that each configured component output an appropriate
// informational message to the context; i.e. this logging is not just
// for programmers, it's there for systems administrators to use in
// verifying that SSL is configured properly
assertTrue(context.hasInfoMatching(SSL_CONFIGURATION_MESSAGE_PATTERN));
assertTrue(context.hasInfoMatching(KEY_MANAGER_FACTORY_MESSAGE_PATTERN));
assertTrue(context.hasInfoMatching(TRUST_MANAGER_FACTORY_MESSAGE_PATTERN));
assertTrue(context.hasInfoMatching(KEY_STORE_MESSAGE_PATTERN));
assertTrue(context.hasInfoMatching(TRUST_STORE_MESSAGE_PATTERN));
assertTrue(context.hasInfoMatching(SECURE_RANDOM_MESSAGE_PATTERN));
} |
@Override
public double calcNormalizedEdgeDistance(double ry, double rx,
double ay, double ax,
double by, double bx) {
return calcNormalizedEdgeDistance3D(
ry, rx, 0,
ay, ax, 0,
by, bx, 0
);
} | @Test
public void testCalcNormalizedEdgeDistance() {
DistanceCalcEuclidean distanceCalc = new DistanceCalcEuclidean();
double distance = distanceCalc.calcNormalizedEdgeDistance(0, 10, 0, 0, 10, 10);
assertEquals(50, distance, 0);
} |
public static UnixMountInfo parseMountInfo(String line) {
// Example mount lines:
// ramfs on /mnt/ramdisk type ramfs (rw,relatime,size=1gb)
// map -hosts on /net (autofs, nosuid, automounted, nobrowse)
UnixMountInfo.Builder builder = new UnixMountInfo.Builder();
// First get and remove the mount type if it's provided.
Matcher matcher = Pattern.compile(".* (type \\w+ ).*").matcher(line);
String lineWithoutType;
if (matcher.matches()) {
String match = matcher.group(1);
builder.setFsType(match.replace("type", "").trim());
lineWithoutType = line.replace(match, "");
} else {
lineWithoutType = line;
}
// Now parse the rest
matcher = Pattern.compile("(.*) on (.*) \\((.*)\\)").matcher(lineWithoutType);
if (!matcher.matches()) {
LOG.warn("Unable to parse output of '{}': {}", MOUNT_COMMAND, line);
return builder.build();
}
builder.setDeviceSpec(matcher.group(1));
builder.setMountPoint(matcher.group(2));
builder.setOptions(parseUnixMountOptions(matcher.group(3)));
return builder.build();
} | @Test
public void parseRamfsMountInfoWithType() throws Exception {
// Linux mount info.
UnixMountInfo info =
ShellUtils.parseMountInfo("ramfs on /mnt/ramdisk type ramfs (rw,relatime,size=1gb)");
assertEquals(Optional.of("ramfs"), info.getDeviceSpec());
assertEquals(Optional.of("/mnt/ramdisk"), info.getMountPoint());
assertEquals(Optional.of("ramfs"), info.getFsType());
assertEquals(Optional.of(Long.valueOf(Constants.GB)), info.getOptions().getSize());
} |
public static <T> T checkFoundWithOptional(java.util.Optional<T> value, String message, Object... messageArguments) {
if (!value.isPresent()) {
throw new NotFoundException(format(message, messageArguments));
}
return value.get();
} | @Test
public void checkFoundWithOptional_throws_NotFoundException_if_empty_and_formats_message() {
String message = "foo %s";
assertThatExceptionOfType(NotFoundException.class)
.isThrownBy(() -> checkFoundWithOptional(Optional.empty(), message, "bar"))
.withMessage("foo bar");
} |
public static boolean hasRestartMethods(Class<? extends ConfigInstance> configClass) {
try {
configClass.getDeclaredMethod("containsFieldsFlaggedWithRestart");
configClass.getDeclaredMethod("getChangesRequiringRestart", configClass);
return true;
} catch (NoSuchMethodException e) {
return false;
}
} | @Test
void requireThatRestartMethodsAreDetectedProperly() {
assertFalse(ReflectionUtil.hasRestartMethods(NonRestartConfig.class));
assertTrue(ReflectionUtil.hasRestartMethods(RestartConfig.class));
} |
public FEELFnResult<List> invoke(@ParameterName("list") List list, @ParameterName("position") BigDecimal position,
@ParameterName("newItem") Object newItem) {
if (list == null) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", CANNOT_BE_NULL));
}
if (position == null) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", CANNOT_BE_NULL));
}
int intPosition = position.intValue();
if (intPosition == 0 || Math.abs(intPosition) > list.size()) {
String paramProblem = String.format("%s outside valid boundaries (1-%s)", intPosition, list.size());
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", paramProblem));
}
Object e = NumberEvalHelper.coerceNumber(newItem);
List toReturn = new ArrayList(list);
int replacementPosition = intPosition > 0 ? intPosition -1 : list.size() - Math.abs(intPosition);
toReturn.set(replacementPosition, e);
return FEELFnResult.ofResult(toReturn);
} | @Test
void invokeReplaceByNegativePositionWithNotNull() {
List list = getList();
List expected = new ArrayList<>(list);
expected.set(2, "test");
FunctionTestUtil.assertResult(listReplaceFunction.invoke(list, BigDecimal.valueOf(-1), "test"), expected);
} |
public void setUsers(List<?> users) {
userPasswords.clear();
userGroups.clear();
for (Iterator<?> it = users.iterator(); it.hasNext();) {
AuthenticationUser user = (AuthenticationUser)it.next();
userPasswords.put(user.getUsername(), user.getPassword());
Set<Principal> groups = new HashSet<Principal>();
if (user.getGroups() != null) {
StringTokenizer iter = new StringTokenizer(user.getGroups(), ",");
while (iter.hasMoreTokens()) {
String name = iter.nextToken().trim();
groups.add(new GroupPrincipal(name));
}
}
userGroups.put(user.getUsername(), groups);
}
} | @Test
public void testSetUsers() {
AuthenticationUser alice = new AuthenticationUser("alice", "password", "group1");
AuthenticationUser bob = new AuthenticationUser("bob", "security", "group2");
SimpleAuthenticationPlugin authenticationPlugin = new SimpleAuthenticationPlugin();
authenticationPlugin.setUsers(Arrays.asList(alice, bob));
assertFalse(authenticationPlugin.isAnonymousAccessAllowed());
Map<String, String> userPasswords = authenticationPlugin.getUserPasswords();
assertEquals(2, userPasswords.size());
assertEquals("password", userPasswords.get("alice"));
assertEquals("security", userPasswords.get("bob"));
Map<String, Set<Principal>> userGroups = authenticationPlugin.getUserGroups();
assertEquals(2, userGroups.size());
Set<Principal> aliceGroups = userGroups.get("alice");
assertNotNull(aliceGroups);
assertEquals(1, aliceGroups.size());
assertEquals("group1", aliceGroups.iterator().next().getName());
Set<Principal> bobGroups = userGroups.get("bob");
assertNotNull(bobGroups);
assertEquals(1, bobGroups.size());
assertEquals("group2", bobGroups.iterator().next().getName());
} |
@Override
public Publisher<Exchange> from(String uri) {
final String name = publishedUriToStream.computeIfAbsent(uri, camelUri -> {
try {
String uuid = context.getUuidGenerator().generateUuid();
RouteBuilder.addRoutes(context, rb -> rb.from(camelUri).to("reactive-streams:" + uuid));
return uuid;
} catch (Exception e) {
throw new IllegalStateException("Unable to create source reactive stream from direct URI: " + uri, e);
}
});
return fromStream(name);
} | @Test
public void testFrom() throws Exception {
context.start();
Publisher<Exchange> timer = crs.from("timer:reactive?period=250&repeatCount=3&includeMetadata=true");
AtomicInteger value = new AtomicInteger();
CountDownLatch latch = new CountDownLatch(3);
Flux.from(timer)
.map(exchange -> ExchangeHelper.getHeaderOrProperty(exchange, Exchange.TIMER_COUNTER, Integer.class))
.doOnNext(res -> assertEquals(value.incrementAndGet(), res.intValue()))
.doOnNext(res -> latch.countDown())
.subscribe();
assertTrue(latch.await(2, TimeUnit.SECONDS));
} |
@Override
public Num calculate(BarSeries series, Position position) {
if (position.isClosed()) {
final int exitIndex = position.getExit().getIndex();
final int entryIndex = position.getEntry().getIndex();
return series.numOf(exitIndex - entryIndex + 1);
}
return series.zero();
} | @Test
public void calculateWithNoPositions() {
MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 110, 100, 95, 105);
AnalysisCriterion numberOfBars = getCriterion();
assertNumEquals(0, numberOfBars.calculate(series, new BaseTradingRecord()));
} |
public void lock(long lockedByPipelineId) {
this.lockedByPipelineId = lockedByPipelineId;
locked = true;
} | @Test
void shouldBeEqualToAnotherPipelineStateIfBothDoNotHaveLockedBy() {
PipelineState pipelineState1 = new PipelineState("p");
PipelineState pipelineState2 = new PipelineState("p");
pipelineState1.lock(1);
pipelineState2.lock(1);
assertThat(pipelineState2).isEqualTo(pipelineState1);
} |
public IntersectOperator(OpChainExecutionContext opChainExecutionContext, List<MultiStageOperator> inputOperators,
DataSchema dataSchema) {
super(opChainExecutionContext, inputOperators, dataSchema);
} | @Test
public void testIntersectOperator() {
DataSchema schema = new DataSchema(new String[]{"int_col", "string_col"}, new DataSchema.ColumnDataType[]{
DataSchema.ColumnDataType.INT, DataSchema.ColumnDataType.STRING
});
Mockito.when(_leftOperator.nextBlock())
.thenReturn(OperatorTestUtil.block(schema, new Object[]{1, "AA"}, new Object[]{2, "BB"}, new Object[]{3, "CC"}))
.thenReturn(TransferableBlockTestUtils.getEndOfStreamTransferableBlock(0));
Mockito.when(_rightOperator.nextBlock()).thenReturn(
OperatorTestUtil.block(schema, new Object[]{1, "AA"}, new Object[]{2, "BB"}, new Object[]{4, "DD"}))
.thenReturn(TransferableBlockTestUtils.getEndOfStreamTransferableBlock(0));
IntersectOperator intersectOperator =
new IntersectOperator(OperatorTestUtil.getTracingContext(), ImmutableList.of(_leftOperator, _rightOperator),
schema);
TransferableBlock result = intersectOperator.nextBlock();
while (result.getType() != DataBlock.Type.ROW) {
result = intersectOperator.nextBlock();
}
List<Object[]> resultRows = result.getContainer();
List<Object[]> expectedRows = Arrays.asList(new Object[]{1, "AA"}, new Object[]{2, "BB"});
Assert.assertEquals(resultRows.size(), expectedRows.size());
for (int i = 0; i < resultRows.size(); i++) {
Assert.assertEquals(resultRows.get(i), expectedRows.get(i));
}
} |
public boolean fileIsInAllowedPath(Path path) {
if (allowedPaths.isEmpty()) {
return true;
}
final Path realFilePath = resolveRealPath(path);
if (realFilePath == null) {
return false;
}
for (Path allowedPath : allowedPaths) {
final Path realAllowedPath = resolveRealPath(allowedPath);
if (realAllowedPath != null && realFilePath.startsWith(realAllowedPath)) {
return true;
}
}
return false;
} | @Test
public void inAllowedPath() throws IOException {
final Path permittedPath = permittedTempDir.getRoot().toPath();
final Path filePath = permittedTempDir.newFile(FILE).toPath();
pathChecker = new AllowedAuxiliaryPathChecker(new TreeSet<>(Collections.singleton(permittedPath)));
assertTrue(pathChecker.fileIsInAllowedPath(filePath));
} |
public int generateWorkerId(final Properties props) {
Preconditions.checkArgument(workerIdGenerator.get() != null, "Worker id generator is not initialized.");
int result = workerIdGenerator.get().generate(props);
instance.setWorkerId(result);
return result;
} | @Test
void assertGenerateWorkerId() {
ComputeNodeInstanceContext context = new ComputeNodeInstanceContext(
new ComputeNodeInstance(mock(InstanceMetaData.class)), mock(WorkerIdGenerator.class), modeConfig, lockContext, eventBusContext);
assertThat(context.generateWorkerId(new Properties()), is(0));
} |
@Override
public void writeTo(ByteBuf byteBuf) throws LispWriterException {
WRITER.writeTo(byteBuf, this);
} | @Test
public void testSerialization() throws LispReaderException, LispWriterException,
LispParseError, DeserializationException {
ByteBuf byteBuf = Unpooled.buffer();
MapReferralWriter writer = new MapReferralWriter();
writer.writeTo(byteBuf, referral1);
MapReferralReader reader = new MapReferralReader();
LispMapReferral deserialized = reader.readFrom(byteBuf);
new EqualsTester().addEqualityGroup(referral1, deserialized).testEquals();
} |
@Override
@SuppressWarnings("unchecked")
public Neighbor<double[], E>[] search(double[] q, int k) {
if (k < 1) {
throw new IllegalArgumentException("Invalid k: " + k);
}
Set<Integer> candidates = getCandidates(q);
k = Math.min(k, candidates.size());
HeapSelect<Neighbor<double[], E>> heap = new HeapSelect<>(new Neighbor[k]);
for (int index : candidates) {
double[] key = keys.get(index);
if (q != key) {
double distance = MathEx.distance(q, key);
heap.add(new Neighbor<>(key, data.get(index), index, distance));
}
}
heap.sort();
return heap.toArray();
} | @Test
public void testKnn() {
System.out.println("knn");
int[] recall = new int[testx.length];
for (int i = 0; i < testx.length; i++) {
int k = 7;
Neighbor[] n1 = lsh.search(testx[i], k);
Neighbor[] n2 = naive.search(testx[i], k);
for (Neighbor m2 : n2) {
for (Neighbor m1 : n1) {
if (m1.index == m2.index) {
recall[i]++;
break;
}
}
}
}
System.out.format("q1 of recall is %d%n", MathEx.q1(recall));
System.out.format("median of recall is %d%n", MathEx.median(recall));
System.out.format("q3 of recall is %d%n", MathEx.q3(recall));
} |
@Override
public void close() throws IOException {
if (mUfsInStream.isPresent()) {
mUfsInStream.get().close();
mUfsInStream = Optional.empty();
}
} | @Test
public void readWithNullUfsStream() {
assertThrows(NullPointerException.class,
() -> new UfsFileInStream(null, 0L).close());
} |
static Set<PipelineOptionSpec> getOptionSpecs(
Class<? extends PipelineOptions> optionsInterface, boolean skipHidden) {
Iterable<Method> methods = ReflectHelpers.getClosureOfMethodsOnInterface(optionsInterface);
Multimap<String, Method> propsToGetters = getPropertyNamesToGetters(methods);
ImmutableSet.Builder<PipelineOptionSpec> setBuilder = ImmutableSet.builder();
for (Map.Entry<String, Method> propAndGetter : propsToGetters.entries()) {
String prop = propAndGetter.getKey();
Method getter = propAndGetter.getValue();
@SuppressWarnings("unchecked")
Class<? extends PipelineOptions> declaringClass =
(Class<? extends PipelineOptions>) getter.getDeclaringClass();
if (!PipelineOptions.class.isAssignableFrom(declaringClass)) {
continue;
}
if (skipHidden && declaringClass.isAnnotationPresent(Hidden.class)) {
continue;
}
setBuilder.add(PipelineOptionSpec.of(declaringClass, prop, getter));
}
return setBuilder.build();
} | @Test
public void testShouldSerialize() {
Set<PipelineOptionSpec> properties =
PipelineOptionsReflector.getOptionSpecs(JsonIgnoreOptions.class, true);
assertThat(properties, hasItem(allOf(hasName("notIgnored"), shouldSerialize())));
assertThat(properties, hasItem(allOf(hasName("ignored"), not(shouldSerialize()))));
} |
static <T> Optional<T> lookupByNameAndType(CamelContext camelContext, String name, Class<T> type) {
return Optional.ofNullable(ObjectHelper.isEmpty(name) ? null : name)
.map(n -> EndpointHelper.isReferenceParameter(n)
? EndpointHelper.resolveReferenceParameter(camelContext, n, type, false)
: camelContext.getRegistry().lookupByNameAndType(n, type));
} | @Test
void testLookupByNameAndTypeWithReferenceParameter() {
String name = "#referenceParameter";
when(camelContext.getRegistry()).thenReturn(mockRegistry);
Optional<Object> object = DynamicRouterRecipientListHelper.lookupByNameAndType(camelContext, name, Object.class);
Assertions.assertFalse(object.isPresent());
} |
@Override
public List<Operation> parse(String statement) {
CalciteParser parser = calciteParserSupplier.get();
FlinkPlannerImpl planner = validatorSupplier.get();
Optional<Operation> command = EXTENDED_PARSER.parse(statement);
if (command.isPresent()) {
return Collections.singletonList(command.get());
}
// parse the sql query
// use parseSqlList here because we need to support statement end with ';' in sql client.
SqlNodeList sqlNodeList = parser.parseSqlList(statement);
List<SqlNode> parsed = sqlNodeList.getList();
Preconditions.checkArgument(parsed.size() == 1, "only single statement supported");
return Collections.singletonList(
SqlNodeToOperationConversion.convert(planner, catalogManager, parsed.get(0))
.orElseThrow(() -> new TableException("Unsupported query: " + statement)));
} | @Test
void testParseLegalStatements() {
for (TestSpec spec : TEST_SPECS) {
if (spec.expectedSummary != null) {
Operation op = parser.parse(spec.statement).get(0);
assertThat(op.asSummaryString()).isEqualTo(spec.expectedSummary);
}
if (spec.expectedError != null) {
assertThatThrownBy(() -> parser.parse(spec.statement))
.isInstanceOf(SqlParserException.class)
.hasMessageContaining(spec.expectedError);
}
}
} |
@Override
public ConfigDef config() {
return CONFIG_DEF;
} | @Test
public void testPatternRequiredInConfig() {
Map<String, String> props = new HashMap<>();
ConfigException e = assertThrows(ConfigException.class, () -> config(props));
assertTrue(e.getMessage().contains("Missing required configuration \"pattern\""));
} |
@Override
public void renameTable(ObjectPath tablePath, String newTableName, boolean ignoreIfNotExists)
throws TableNotExistException, TableAlreadyExistException, CatalogException {
checkNotNull(tablePath, "Table path cannot be null");
checkArgument(
!isNullOrWhitespaceOnly(newTableName), "New table name cannot be null or empty");
try {
// alter_table() doesn't throw a clear exception when target table doesn't exist.
// Thus, check the table existence explicitly
if (tableExists(tablePath)) {
ObjectPath newPath = new ObjectPath(tablePath.getDatabaseName(), newTableName);
// alter_table() doesn't throw a clear exception when new table already exists.
// Thus, check the table existence explicitly
if (tableExists(newPath)) {
throw new TableAlreadyExistException(getName(), newPath);
} else {
Table hiveTable = getHiveTable(tablePath);
//update hoodie
StorageDescriptor sd = hiveTable.getSd();
String location = sd.getLocation();
HoodieTableMetaClient metaClient = HoodieTableMetaClient.builder().setBasePath(location)
.setConf(HadoopFSUtils.getStorageConfWithCopy(hiveConf)).build();
//Init table with new name
HoodieTableMetaClient.withPropertyBuilder().fromProperties(metaClient.getTableConfig().getProps())
.setTableName(newTableName)
.initTable(HadoopFSUtils.getStorageConfWithCopy(hiveConf), location);
hiveTable.setTableName(newTableName);
client.alter_table(
tablePath.getDatabaseName(), tablePath.getObjectName(), hiveTable);
}
} else if (!ignoreIfNotExists) {
throw new TableNotExistException(getName(), tablePath);
}
} catch (Exception e) {
throw new HoodieCatalogException(
String.format("Failed to rename table %s", tablePath.getFullName()), e);
}
} | @Test
public void testRenameTable() throws Exception {
Map<String, String> originOptions = new HashMap<>();
originOptions.put(FactoryUtil.CONNECTOR.key(), "hudi");
CatalogTable originTable =
new CatalogTableImpl(schema, partitions, originOptions, "hudi table");
hoodieCatalog.createTable(tablePath, originTable, false);
hoodieCatalog.renameTable(tablePath, "test1", false);
assertEquals(hoodieCatalog.getHiveTable(new ObjectPath("default", "test1")).getTableName(), "test1");
hoodieCatalog.renameTable(new ObjectPath("default", "test1"), "test", false);
} |
static Serde<List<?>> createSerde(final PersistenceSchema schema) {
final List<SimpleColumn> columns = schema.columns();
if (columns.isEmpty()) {
// No columns:
return new KsqlVoidSerde<>();
}
if (columns.size() != 1) {
throw new KsqlException("The '" + FormatFactory.KAFKA.name()
+ "' format only supports a single field. Got: " + columns);
}
final SimpleColumn singleColumn = columns.get(0);
final Class<?> javaType = SchemaConverters.sqlToJavaConverter()
.toJavaType(singleColumn.type());
return createSerde(singleColumn, javaType);
} | @Test
public void shouldThrowIfMap() {
// Given:
final PersistenceSchema schema = schemaWithFieldOfType(
SqlTypes.map(SqlTypes.STRING, SqlTypes.STRING
));
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> KafkaSerdeFactory.createSerde(schema)
);
// Then:
assertThat(e.getMessage(), containsString("The 'KAFKA' format does not support type 'MAP'"));
} |
@Override
void decode(ByteBufAllocator alloc, ByteBuf headerBlock, SpdyHeadersFrame frame) throws Exception {
ObjectUtil.checkNotNull(headerBlock, "headerBlock");
ObjectUtil.checkNotNull(frame, "frame");
if (cumulation == null) {
decodeHeaderBlock(headerBlock, frame);
if (headerBlock.isReadable()) {
cumulation = alloc.buffer(headerBlock.readableBytes());
cumulation.writeBytes(headerBlock);
}
} else {
cumulation.writeBytes(headerBlock);
decodeHeaderBlock(cumulation, frame);
if (cumulation.isReadable()) {
cumulation.discardReadBytes();
} else {
releaseBuffer();
}
}
} | @Test
public void testMultipleValuesEndsWithNull() throws Exception {
ByteBuf headerBlock = Unpooled.buffer(28);
headerBlock.writeInt(1);
headerBlock.writeInt(4);
headerBlock.writeBytes(nameBytes);
headerBlock.writeInt(12);
headerBlock.writeBytes(valueBytes);
headerBlock.writeByte(0);
headerBlock.writeBytes(valueBytes);
headerBlock.writeByte(0);
decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame);
assertFalse(headerBlock.isReadable());
assertTrue(frame.isInvalid());
assertEquals(1, frame.headers().names().size());
assertTrue(frame.headers().contains(name));
assertEquals(1, frame.headers().getAll(name).size());
assertEquals(value, frame.headers().get(name));
headerBlock.release();
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.