focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public Processor<K, Change<V>, KO, SubscriptionWrapper<K>> get() {
return new UnbindChangeProcessor();
} | @Test
public void leftJoinShouldPropagateNewPrimaryKeyWithNonNullFK() {
final MockInternalNewProcessorContext<String, SubscriptionWrapper<String>> context = new MockInternalNewProcessorContext<>();
leftJoinProcessor.init(context);
context.setRecordMetadata("topic", 0, 0);
final LeftValue leftRecordValue = new LeftValue(fk1);
leftJoinProcessor.process(new Record<>(pk, new Change<>(leftRecordValue, null), 0));
assertThat(context.forwarded().size(), is(1));
assertThat(
context.forwarded().get(0).record(),
is(new Record<>(fk1, new SubscriptionWrapper<>(hash(leftRecordValue), PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE, pk, 0), 0))
);
} |
@Override
public boolean isTypeOf(Class<?> type) {
checkNotNull(type);
return id.isTypeOf(type);
} | @Test
public void testTypeOf() {
DiscreteResource discrete = Resources.discrete(D1, P1, VLAN1).resource();
assertThat(discrete.isTypeOf(DeviceId.class), is(false));
assertThat(discrete.isTypeOf(PortNumber.class), is(false));
assertThat(discrete.isTypeOf(VlanId.class), is(true));
} |
@Override
public boolean tryStartNewSegment(
TieredStorageSubpartitionId subpartitionId, int segmentId, int minNumBuffers) {
File filePath = dataFilePath.toFile();
boolean canStartNewSegment =
filePath.getUsableSpace()
- (long) Math.max(numBuffersPerSegment, minNumBuffers)
* bufferSizeBytes
> (long) (filePath.getTotalSpace() * minReservedDiskSpaceFraction);
if (canStartNewSegment) {
firstBufferIndexInSegment
.get(subpartitionId.getSubpartitionId())
.put(
diskCacheManager.getBufferIndex(subpartitionId.getSubpartitionId()),
segmentId);
diskCacheManager.startSegment(subpartitionId.getSubpartitionId(), segmentId);
}
return canStartNewSegment;
} | @Test
void testStartNewSegmentSuccess() throws IOException {
String partitionFile = TempDirUtils.newFile(tempFolder, "test").toString();
File testFile = new File(partitionFile + DATA_FILE_SUFFIX);
assertThat(testFile.createNewFile()).isTrue();
try (DiskTierProducerAgent diskTierProducerAgent =
createDiskTierProducerAgent(
false,
NUM_BYTES_PER_SEGMENT,
0,
partitionFile,
new TestingPartitionFileWriter.Builder().build(),
new TieredStorageResourceRegistry())) {
assertThat(diskTierProducerAgent.tryStartNewSegment(SUBPARTITION_ID, 0, 0)).isTrue();
}
} |
@Override
public void configure(ResourceGroup group, SelectionContext<VariableMap> context)
{
Map.Entry<ResourceGroupIdTemplate, ResourceGroupSpec> entry = getMatchingSpec(group, context);
configureGroup(group, entry.getValue());
} | @Test
public void testExtractVariableConfiguration() throws IOException
{
ResourceGroupConfigurationManager<VariableMap> manager = parse("resource_groups_config_extract_variable.json");
VariableMap variableMap = new VariableMap(ImmutableMap.of("USER", "user", "domain", "prestodb", "region", "us_east", "cluster", "12"));
ResourceGroupId globalId = new ResourceGroupId("global");
manager.configure(new TestingResourceGroup(globalId), new SelectionContext<>(globalId, variableMap));
ResourceGroupId childId = new ResourceGroupId(new ResourceGroupId("global"), "prestodb:us_east:12");
TestingResourceGroup child = new TestingResourceGroup(childId);
manager.configure(child, new SelectionContext<>(childId, variableMap));
assertEquals(child.getHardConcurrencyLimit(), 3);
} |
@Override
public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback callback, final ConnectionCallback connectionCallback) throws BackgroundException {
try {
if(status.isExists()) {
if(log.isWarnEnabled()) {
log.warn(String.format("Delete file %s to be replaced with %s", renamed, file));
}
new DropboxDeleteFeature(session).delete(Collections.singletonMap(renamed, status), connectionCallback, callback);
}
final RelocationResult result = new DbxUserFilesRequests(session.getClient(file)).moveV2(containerService.getKey(file), containerService.getKey(renamed));
return renamed.withAttributes(new DropboxAttributesFinderFeature(session).toAttributes(result.getMetadata()));
}
catch(DbxException e) {
throw new DropboxExceptionMappingService().map("Cannot move {0}", e, file);
}
} | @Test
public void testMoveFile() throws Exception {
final Path home = new DefaultHomeFinderService(session).find();
final Path file = new DropboxTouchFeature(session).touch(new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
assertTrue(new DropboxFindFeature(session).find(file));
assertTrue(new DefaultFindFeature(session).find(file));
final Path target = new DropboxMoveFeature(session).move(file, new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback());
assertFalse(new DropboxFindFeature(session).find(file));
assertTrue(new DropboxFindFeature(session).find(target));
assertTrue(new DefaultFindFeature(session).find(target));
assertNotEquals(target.attributes().getVersionId(), file.attributes().getVersionId());
assertEquals(target.attributes().getModificationDate(), file.attributes().getModificationDate());
final PathAttributes targetAttributes = new DropboxAttributesFinderFeature(session).find(target);
assertEquals(Comparison.equal, session.getHost().getProtocol().getFeature(ComparisonService.class).compare(Path.Type.file, file.attributes(), targetAttributes));
assertEquals(target.attributes(), targetAttributes);
new DropboxDeleteFeature(session).delete(Collections.singletonList(target), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
public static AclOperation getDeniedOperation(final String errorMessage) {
final Matcher matcher = DENIED_OPERATION_STRING_PATTERN.matcher(errorMessage);
if (matcher.matches()) {
return AclOperation.fromString(matcher.group(1));
} else {
return AclOperation.UNKNOWN;
}
} | @Test
public void shouldReturnKnownDeniedOperationFromValidAuthorizationMessage() {
// When:
final AclOperation operation = SchemaRegistryUtil.getDeniedOperation(
"User is denied operation Write on Subject: t2-value; error code: 40301");
// Then:
assertThat(operation, is(AclOperation.WRITE));
} |
public static Combine.BinaryCombineDoubleFn ofDoubles() {
return new Max.MaxDoubleFn();
} | @Test
public void testMaxDoubleFn() {
testCombineFn(Max.ofDoubles(), Lists.newArrayList(1.0, 2.0, 3.0, 4.0), 4.0);
} |
public static Read<JmsRecord> read() {
return new AutoValue_JmsIO_Read.Builder<JmsRecord>()
.setMaxNumRecords(Long.MAX_VALUE)
.setCoder(SerializableCoder.of(JmsRecord.class))
.setCloseTimeout(DEFAULT_CLOSE_TIMEOUT)
.setRequiresDeduping(false)
.setMessageMapper(
new MessageMapper<JmsRecord>() {
@Override
public JmsRecord mapMessage(Message message) throws Exception {
TextMessage textMessage = (TextMessage) message;
Map<String, Object> properties = new HashMap<>();
@SuppressWarnings("rawtypes")
Enumeration propertyNames = textMessage.getPropertyNames();
while (propertyNames.hasMoreElements()) {
String propertyName = (String) propertyNames.nextElement();
properties.put(propertyName, textMessage.getObjectProperty(propertyName));
}
return new JmsRecord(
textMessage.getJMSMessageID(),
textMessage.getJMSTimestamp(),
textMessage.getJMSCorrelationID(),
textMessage.getJMSReplyTo(),
textMessage.getJMSDestination(),
textMessage.getJMSDeliveryMode(),
textMessage.getJMSRedelivered(),
textMessage.getJMSType(),
textMessage.getJMSExpiration(),
textMessage.getJMSPriority(),
properties,
textMessage.getText());
}
})
.build();
} | @Test
public void testSplitForQueue() throws Exception {
JmsIO.Read read = JmsIO.read().withQueue(QUEUE);
PipelineOptions pipelineOptions = PipelineOptionsFactory.create();
int desiredNumSplits = 5;
JmsIO.UnboundedJmsSource initialSource = new JmsIO.UnboundedJmsSource(read);
List<JmsIO.UnboundedJmsSource> splits = initialSource.split(desiredNumSplits, pipelineOptions);
// in the case of a queue, we have concurrent consumers by default, so the initial number
// splits is equal to the desired number of splits
assertEquals(desiredNumSplits, splits.size());
} |
public static LogicalSchema buildSchema(
final LogicalSchema sourceSchema,
final List<Expression> partitionBys,
final FunctionRegistry functionRegistry
) {
final List<PartitionByColumn> partitionByCols =
getPartitionByColumnName(sourceSchema, partitionBys);
return buildSchema(sourceSchema, partitionBys, functionRegistry, partitionByCols);
} | @Test
public void shouldThrowIfPartitioningByMultipleExpressionsIncludingNull() {
// Given:
final List<Expression> partitionBy = ImmutableList.of(
new UnqualifiedColumnReferenceExp(COL1),
new NullLiteral()
);
// Expect / When:
final Exception e = assertThrows(
KsqlException.class,
() -> PartitionByParamsFactory.buildSchema(
SCHEMA,
partitionBy,
functionRegistry
));
// Then:
assertThat(e.getMessage(), containsString("Cannot PARTITION BY multiple columns including NULL"));
} |
@Override
public void disconnectResourceManager(
final ResourceManagerId resourceManagerId, final Exception cause) {
if (isConnectingToResourceManager(resourceManagerId)) {
reconnectToResourceManager(cause);
}
} | @Test
void testReconnectionAfterDisconnect() throws Exception {
try (final JobMaster jobMaster =
new JobMasterBuilder(jobGraph, rpcService)
.withJobMasterId(jobMasterId)
.withConfiguration(configuration)
.withHighAvailabilityServices(haServices)
.withHeartbeatServices(heartbeatServices)
.createJobMaster()) {
jobMaster.start();
final JobMasterGateway jobMasterGateway =
jobMaster.getSelfGateway(JobMasterGateway.class);
final TestingResourceManagerGateway testingResourceManagerGateway =
createAndRegisterTestingResourceManagerGateway();
final BlockingQueue<JobMasterId> registrationsQueue = new ArrayBlockingQueue<>(1);
testingResourceManagerGateway.setRegisterJobManagerFunction(
(jobMasterId, resourceID, s, jobID) -> {
registrationsQueue.offer(jobMasterId);
return CompletableFuture.completedFuture(
testingResourceManagerGateway.getJobMasterRegistrationSuccess());
});
final ResourceManagerId resourceManagerId =
testingResourceManagerGateway.getFencingToken();
notifyResourceManagerLeaderListeners(testingResourceManagerGateway);
// wait for first registration attempt
final JobMasterId firstRegistrationAttempt = registrationsQueue.take();
assertThat(firstRegistrationAttempt).isEqualTo(jobMasterId);
assertThat(registrationsQueue).isEmpty();
jobMasterGateway.disconnectResourceManager(
resourceManagerId, new FlinkException("Test exception"));
// wait for the second registration attempt after the disconnect call
assertThat(registrationsQueue.take()).isEqualTo(jobMasterId);
}
} |
public boolean shouldLog(final Logger logger, final String path, final int responseCode) {
if (rateLimitersByPath.containsKey(path)) {
final RateLimiter rateLimiter = rateLimitersByPath.get(path);
if (!rateLimiter.tryAcquire()) {
if (pathLimitHit.tryAcquire()) {
logger.info("Hit rate limit for path " + path + " with limit " + rateLimiter.getRate());
}
return false;
}
}
if (rateLimitersByResponseCode.containsKey(responseCode)) {
final RateLimiter rateLimiter = rateLimitersByResponseCode.get(responseCode);
if (!rateLimiter.tryAcquire()) {
if (responseCodeLimitHit.tryAcquire()) {
logger.info("Hit rate limit for response code " + responseCode + " with limit "
+ rateLimiter.getRate());
}
return false;
}
}
return true;
} | @Test
public void shouldLog() {
// When:
assertThat(loggingRateLimiter.shouldLog(logger, PATH, 200), is(true));
// Then:
verify(rateLimiter).tryAcquire();
verify(logger, never()).info(any());
} |
public B sticky(Boolean sticky) {
this.sticky = sticky;
return getThis();
} | @Test
void sticky() {
ReferenceBuilder builder = new ReferenceBuilder();
builder.sticky(true);
Assertions.assertTrue(builder.build().getSticky());
builder.sticky(false);
Assertions.assertFalse(builder.build().getSticky());
} |
public static <K, V> Read<K, V> read() {
return new AutoValue_KafkaIO_Read.Builder<K, V>()
.setTopics(new ArrayList<>())
.setTopicPartitions(new ArrayList<>())
.setConsumerFactoryFn(KafkaIOUtils.KAFKA_CONSUMER_FACTORY_FN)
.setConsumerConfig(KafkaIOUtils.DEFAULT_CONSUMER_PROPERTIES)
.setMaxNumRecords(Long.MAX_VALUE)
.setCommitOffsetsInFinalizeEnabled(false)
.setDynamicRead(false)
.setTimestampPolicyFactory(TimestampPolicyFactory.withProcessingTime())
.setConsumerPollingTimeout(2L)
.setRedistributed(false)
.setAllowDuplicates(false)
.setRedistributeNumKeys(0)
.build();
} | @Test
public void testDeserializationWithHeaders() throws Exception {
// To assert that we continue to prefer the Deserializer API with headers in Kafka API 2.1.0
// onwards
int numElements = 1000;
String topic = "my_topic";
KafkaIO.Read<Integer, Long> reader =
KafkaIO.<Integer, Long>read()
.withBootstrapServers("none")
.withTopic("my_topic")
.withConsumerFactoryFn(
new ConsumerFactoryFn(
ImmutableList.of(topic), 10, numElements, OffsetResetStrategy.EARLIEST))
.withMaxNumRecords(numElements)
.withKeyDeserializerAndCoder(
KafkaIOTest.IntegerDeserializerWithHeadersAssertor.class,
BigEndianIntegerCoder.of())
.withValueDeserializerAndCoder(
KafkaIOTest.LongDeserializerWithHeadersAssertor.class, BigEndianLongCoder.of());
PCollection<Long> input = p.apply(reader.withoutMetadata()).apply(Values.create());
addCountingAsserts(input, numElements);
p.run();
} |
public static boolean isDone(
Map<String, StepTransition> runtimeDag,
Map<String, Boolean> idStatusMap,
RestartConfig restartConfig) {
Map<String, Set<String>> parentMap = new HashMap<>();
Map<String, Set<String>> childMap = new HashMap<>();
Deque<String> deque =
prepareDagForTraversal(
runtimeDag, idStatusMap.keySet(), restartConfig, parentMap, childMap);
return isDone(idStatusMap, deque, parentMap, childMap);
} | @Test
public void testRestartIncomplete() {
Map<String, Boolean> idStatusMap = new LinkedHashMap<>();
idStatusMap.put("job_3", Boolean.FALSE);
idStatusMap.put("job_9", Boolean.TRUE);
Assert.assertFalse(
DagHelper.isDone(
runtimeDag1,
idStatusMap,
RestartConfig.builder().addRestartNode("sample-dag-test-1", 1, "job_3").build()));
} |
protected static TransferItem resolve(final Path remote, final Local local, final boolean append) {
if(local.isDirectory()) {
// Local path resolves to folder
if(remote.isDirectory()) {
if(append) {
return new TransferItem(new Path(remote, local.getName(), EnumSet.of(Path.Type.directory)), local);
}
}
// Append local name to remote target
return new TransferItem(remote, local);
}
// Local path resolves to file
if(remote.isDirectory()) {
// Append local name to remote target
return new TransferItem(new Path(remote, local.getName(), EnumSet.of(Path.Type.file)), local);
}
// Keep from input for file transfer
return new TransferItem(remote, local);
} | @Test
public void testResolveFileToFile() {
final Local temp = new FlatTemporaryFileService().create(new AlphanumericRandomStringService().random());
final Path file = new Path("/f", EnumSet.of(Path.Type.file));
final TransferItem item = UploadTransferItemFinder.resolve(file, temp, false);
assertEquals(file, item.remote);
assertEquals(temp, item.local);
} |
@Override
protected final CompletableFuture<MetricCollectionResponseBody> handleRequest(
@Nonnull HandlerRequest<EmptyRequestBody> request, @Nonnull RestfulGateway gateway)
throws RestHandlerException {
metricFetcher.update();
final MetricStore.ComponentMetricStore componentMetricStore =
getComponentMetricStore(request, metricFetcher.getMetricStore());
if (componentMetricStore == null || componentMetricStore.metrics == null) {
return CompletableFuture.completedFuture(
new MetricCollectionResponseBody(Collections.emptyList()));
}
final Set<String> requestedMetrics =
new HashSet<>(request.getQueryParameter(MetricsFilterParameter.class));
if (requestedMetrics.isEmpty()) {
return CompletableFuture.completedFuture(
new MetricCollectionResponseBody(getAvailableMetrics(componentMetricStore)));
} else {
final List<Metric> metrics =
getRequestedMetrics(componentMetricStore, requestedMetrics);
return CompletableFuture.completedFuture(new MetricCollectionResponseBody(metrics));
}
} | @Test
void testReturnEmptyListIfNoComponentMetricStore() throws Exception {
testMetricsHandler.returnComponentMetricStore = false;
final CompletableFuture<MetricCollectionResponseBody> completableFuture =
testMetricsHandler.handleRequest(
HandlerRequest.create(
EmptyRequestBody.getInstance(),
new TestMessageParameters(),
Collections.emptyList()),
mockDispatcherGateway);
assertThat(completableFuture).isDone();
final MetricCollectionResponseBody metricCollectionResponseBody = completableFuture.get();
assertThat(metricCollectionResponseBody.getMetrics()).isEmpty();
} |
public static boolean objectContainsFieldOrProperty(Object object, String fieldName) {
if (object == null) return false;
return objectContainsField(object, fieldName) || objectContainsProperty(object, fieldName);
} | @Test
void testObjectContainsFieldOrProperty() {
final TestObject test = new TestObject("test");
assertThat(objectContainsFieldOrProperty(test, "field")).isTrue();
assertThat(objectContainsFieldOrProperty(test, "anotherField")).isTrue();
assertThat(objectContainsFieldOrProperty(test, "doesNotExist")).isFalse();
} |
@Override
public void run() {
if (!redoService.isConnected()) {
LogUtils.NAMING_LOGGER.warn("Grpc Connection is disconnect, skip current redo task");
return;
}
try {
redoForInstances();
redoForSubscribes();
} catch (Exception e) {
LogUtils.NAMING_LOGGER.warn("Redo task run with unexpected exception: ", e);
}
} | @Test
void testRunRedoDeregisterInstance() throws NacosException {
Set<InstanceRedoData> mockData = generateMockInstanceData(true, true, false);
when(redoService.findInstanceRedoData()).thenReturn(mockData);
redoTask.run();
verify(clientProxy).doDeregisterService(SERVICE, GROUP, INSTANCE);
} |
@Override
public void open(Configuration parameters) throws Exception {
this.rateLimiterTriggeredCounter =
getRuntimeContext()
.getMetricGroup()
.addGroup(
TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT)
.counter(TableMaintenanceMetrics.RATE_LIMITER_TRIGGERED);
this.concurrentRunThrottledCounter =
getRuntimeContext()
.getMetricGroup()
.addGroup(
TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT)
.counter(TableMaintenanceMetrics.CONCURRENT_RUN_THROTTLED);
this.nothingToTriggerCounter =
getRuntimeContext()
.getMetricGroup()
.addGroup(
TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT)
.counter(TableMaintenanceMetrics.NOTHING_TO_TRIGGER);
this.triggerCounters =
taskNames.stream()
.map(
name ->
getRuntimeContext()
.getMetricGroup()
.addGroup(TableMaintenanceMetrics.GROUP_KEY, name)
.counter(TableMaintenanceMetrics.TRIGGERED))
.collect(Collectors.toList());
this.nextEvaluationTimeState =
getRuntimeContext()
.getState(new ValueStateDescriptor<>("triggerManagerNextTriggerTime", Types.LONG));
this.accumulatedChangesState =
getRuntimeContext()
.getListState(
new ListStateDescriptor<>(
"triggerManagerAccumulatedChange", TypeInformation.of(TableChange.class)));
this.lastTriggerTimesState =
getRuntimeContext()
.getListState(new ListStateDescriptor<>("triggerManagerLastTriggerTime", Types.LONG));
tableLoader.open();
} | @Test
void testDataFileCount() throws Exception {
TriggerManager manager =
manager(
sql.tableLoader(TABLE_NAME), new TriggerEvaluator.Builder().dataFileCount(3).build());
try (KeyedOneInputStreamOperatorTestHarness<Boolean, TableChange, Trigger> testHarness =
harness(manager)) {
testHarness.open();
addEventAndCheckResult(testHarness, TableChange.builder().dataFileCount(1).build(), 0);
addEventAndCheckResult(testHarness, TableChange.builder().dataFileCount(2).build(), 1);
addEventAndCheckResult(testHarness, TableChange.builder().dataFileCount(3).build(), 2);
addEventAndCheckResult(testHarness, TableChange.builder().dataFileCount(5).build(), 3);
// No trigger in this case
addEventAndCheckResult(testHarness, TableChange.builder().dataFileCount(1).build(), 3);
addEventAndCheckResult(testHarness, TableChange.builder().dataFileCount(2).build(), 4);
}
} |
public FEELFnResult<Boolean> invoke(@ParameterName( "list" ) List list) {
if ( list == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null"));
}
boolean result = false;
boolean containsNull = false;
// Spec. definition: return true if any item is true, else false if all items are false, else null
for ( final Object element : list ) {
if (element != null && !(element instanceof Boolean)) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not a Boolean"));
} else {
if (element != null) {
result |= (Boolean) element;
} else if (!containsNull) {
containsNull = true;
}
}
}
if (containsNull && !result) {
return FEELFnResult.ofResult( null );
} else {
return FEELFnResult.ofResult( result );
}
} | @Test
void invokeArrayParamTypeHeterogenousArray() {
FunctionTestUtil.assertResultError(anyFunction.invoke(new Object[]{Boolean.FALSE, 1}),
InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(anyFunction.invoke(new Object[]{Boolean.TRUE, 1}),
InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(anyFunction.invoke(new Object[]{Boolean.TRUE, null, 1}),
InvalidParametersEvent.class);
} |
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof TableMeta)) {
return false;
}
TableMeta tableMeta = (TableMeta) o;
if (!Objects.equals(tableMeta.tableName, this.tableName)) {
return false;
}
if (!Objects.equals(tableMeta.allColumns, this.allColumns)) {
return false;
}
if (!Objects.equals(tableMeta.allIndexes, this.allIndexes)) {
return false;
}
return true;
} | @Test
public void testEquals() {
assertTrue(tableMeta.equals(tableMeta2));
tableMeta2.setTableName("different_table");
assertFalse(tableMeta.equals(tableMeta2));
} |
@Override
public T deserialize(final String topicName, final byte[] record) {
return deserializer.get().deserialize(topicName, record);
} | @Test
public void shouldUseAThreadLocalDeserializer() throws InterruptedException {
final List<Deserializer<GenericRow>> serializers = new LinkedList<>();
final ThreadLocalDeserializer<GenericRow> serializer = new ThreadLocalDeserializer<>(
() -> {
final Deserializer<GenericRow> local = mock(Deserializer.class);
serializers.add(local);
expect(local.deserialize(anyString(), anyObject(byte[].class)))
.andReturn(new GenericRow())
.times(1);
replay(local);
return serializers.get(serializers.size() - 1);
}
);
for (int i = 0; i < 3; i++) {
final Thread t = new Thread(
() -> serializer.deserialize("foo", new byte[32])
);
t.start();
t.join();
assertThat(serializers.size(), equalTo(i + 1));
serializers.forEach(EasyMock::verify);
}
} |
public static List<Object> getObjects() {
FS.FileStoreAttributes.setBackground(true);
return Arrays.asList(
JGitBlameCommand.class,
CompositeBlameCommand.class,
NativeGitBlameCommand.class,
DefaultBlameStrategy.class,
ProcessWrapperFactory.class,
GitScmProvider.class,
GitIgnoreCommand.class);
} | @Test
public void getClasses() {
assertThat(GitScmSupport.getObjects()).isNotEmpty();
} |
public void init(ApplicationConfiguration applicationConfiguration)
throws ModuleNotFoundException, ProviderNotFoundException, ServiceNotProvidedException,
CycleDependencyException, ModuleConfigException, ModuleStartException {
String[] moduleNames = applicationConfiguration.moduleList();
ServiceLoader<ModuleDefine> moduleServiceLoader = ServiceLoader.load(ModuleDefine.class);
ServiceLoader<ModuleProvider> moduleProviderLoader = ServiceLoader.load(ModuleProvider.class);
HashSet<String> moduleSet = new HashSet<>(Arrays.asList(moduleNames));
for (ModuleDefine module : moduleServiceLoader) {
if (moduleSet.contains(module.name())) {
module.prepare(
this,
applicationConfiguration.getModuleConfiguration(module.name()),
moduleProviderLoader,
bootingParameters
);
loadedModules.put(module.name(), module);
moduleSet.remove(module.name());
}
}
// Finish prepare stage
isInPrepareStage = false;
if (moduleSet.size() > 0) {
throw new ModuleNotFoundException(moduleSet.toString() + " missing.");
}
BootstrapFlow bootstrapFlow = new BootstrapFlow(loadedModules);
bootstrapFlow.start(this);
bootstrapFlow.notifyAfterCompleted();
} | @Test
public void testModuleMissing() {
assertThrows(ModuleNotFoundException.class, () -> {
ApplicationConfiguration configuration = new ApplicationConfiguration();
configuration.addModule("BaseA").addProviderConfiguration("P-A", new Properties());
configuration.addModule("BaseB").addProviderConfiguration("P-B2", new Properties());
ModuleManager manager = new ModuleManager("Test");
manager.init(configuration);
});
} |
public AlarmCallback create(AlarmCallbackConfiguration configuration) throws ClassNotFoundException, AlarmCallbackConfigurationException {
AlarmCallback alarmCallback = create(configuration.getType());
alarmCallback.initialize(new Configuration(configuration.getConfiguration()));
return alarmCallback;
} | @Test
public void testCreateByClass() throws Exception {
AlarmCallback alarmCallback = alarmCallbackFactory.create(DummyAlarmCallback.class);
assertTrue(alarmCallback instanceof DummyAlarmCallback);
assertEquals(dummyAlarmCallback, alarmCallback);
} |
SqlResult execute(CreateMappingPlan plan, SqlSecurityContext ssc) {
catalog.createMapping(plan.mapping(), plan.replace(), plan.ifNotExists(), ssc);
return UpdateSqlResultImpl.createUpdateCountResult(0);
} | @Test
@Parameters({
"true",
"false"
})
public void test_dropMappingExecution(boolean ifExists) {
// given
String name = "name";
DropMappingPlan plan = new DropMappingPlan(planKey(), name, ifExists, planExecutor);
// when
SqlResult result = planExecutor.execute(plan);
// then
assertThat(result.updateCount()).isEqualTo(0);
verify(catalog).removeMapping(name, ifExists);
} |
public ModuleBuilder owner(String owner) {
this.owner = owner;
return getThis();
} | @Test
void owner() {
ModuleBuilder builder = ModuleBuilder.newBuilder();
builder.owner("owner");
Assertions.assertEquals("owner", builder.build().getOwner());
} |
@Override
protected boolean isSelectable(InstancesChangeEvent event) {
return event != null && event.getHosts() != null && event.getInstancesDiff() != null;
} | @Test
public void testSelectable() {
NamingSelectorWrapper selectorWrapper = new NamingSelectorWrapper(null, null);
assertFalse(selectorWrapper.isSelectable(null));
InstancesChangeEvent event1 = new InstancesChangeEvent(null, null, null, null, null, null);
assertFalse(selectorWrapper.isSelectable(event1));
InstancesChangeEvent event2 = new InstancesChangeEvent(null, null, null, null, null, new InstancesDiff());
assertFalse(selectorWrapper.isSelectable(event2));
InstancesChangeEvent event3 = new InstancesChangeEvent(null, null, null, null, Collections.emptyList(), null);
assertFalse(selectorWrapper.isSelectable(event3));
InstancesChangeEvent event4 = new InstancesChangeEvent(null, null, null, null, Collections.emptyList(),
new InstancesDiff());
assertTrue(selectorWrapper.isSelectable(event4));
} |
public static FileSystem write(final FileSystem fs, final Path path,
final byte[] bytes) throws IOException {
Objects.requireNonNull(path);
Objects.requireNonNull(bytes);
try (FSDataOutputStream out = fs.createFile(path).overwrite(true).build()) {
out.write(bytes);
}
return fs;
} | @Test
public void testWriteStringNoCharSetFileSystem() throws IOException {
URI uri = tmp.toURI();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(uri, conf);
Path testPath = new Path(new Path(uri), "writestring.out");
String write = "A" + "\u00ea" + "\u00f1" + "\u00fc" + "C";
FileUtil.write(fs, testPath, write);
String read = FileUtils.readFileToString(new File(testPath.toUri()),
StandardCharsets.UTF_8);
assertEquals(write, read);
} |
private PolicerId(URI u) {
super(u.toString());
uri = u;
} | @Test
public void testEquality() {
// Create URI representing the id one
URI uriOne = URI.create(OF_SCHEME + ":" + Integer.toHexString(ONE));
// Create String representing the id one
String stringOne = OF_SCHEME + ":" + Integer.toHexString(ONE);
// Create String representing the id two
String stringTwo = OF_SCHEME + ":" + Integer.toHexString(TWO);
// Create String representing the id A
String stringA = FOO_SCHEME + ":" + A;
// Create String representing the id LA
String stringLA = FOO_SCHEME + ":" + LA;
// Create policer id one
PolicerId one = PolicerId.policerId(uriOne);
// Create policer id one
PolicerId copyOfOne = PolicerId.policerId(stringOne);
// Verify equality
assertEquals(one, copyOfOne);
// Create a different policer id
PolicerId two = PolicerId.policerId(stringTwo);
// Verify not equals
assertNotEquals(two, one);
assertNotEquals(two, copyOfOne);
// Create policer id A
PolicerId a = PolicerId.policerId(A);
// Create policer id LA
PolicerId la = PolicerId.policerId(LA);
// Verify not equals
assertNotEquals(a, la);
} |
public static ByteBuf newTxnAbortMarker(long sequenceId, long txnMostBits,
long txnLeastBits) {
return newTxnMarker(
MarkerType.TXN_ABORT, sequenceId, txnMostBits, txnLeastBits);
} | @Test
public void testTxnAbortMarker() throws IOException {
long sequenceId = 1L;
long mostBits = 1234L;
long leastBits = 2345L;
ByteBuf buf = Markers.newTxnAbortMarker(sequenceId, mostBits, leastBits);
MessageMetadata msgMetadata = Commands.parseMessageMetadata(buf);
assertEquals(msgMetadata.getMarkerType(), MarkerType.TXN_ABORT_VALUE);
assertEquals(msgMetadata.getSequenceId(), sequenceId);
assertEquals(msgMetadata.getTxnidMostBits(), mostBits);
assertEquals(msgMetadata.getTxnidLeastBits(), leastBits);
} |
public Map<String, LdapUserMapping> getUserMappings() {
if (userMappings == null) {
createUserMappings();
}
return userMappings;
} | @Test
public void getUserMappings_shouldCreateUserMappings_whenSingleLdapConfig() {
Configuration configuration = generateSingleLdapSettingsWithUserAndGroupMapping().asConfig();
LdapSettingsManager settingsManager = new LdapSettingsManager(configuration);
Map<String, LdapUserMapping> result = settingsManager.getUserMappings();
assertThat(result).hasSize(1).containsOnlyKeys("default");
assertThat(result.get("default")).usingRecursiveComparison().isEqualTo(new LdapUserMapping(configuration, "ldap"));
} |
public static ByteBuf wrappedBuffer(byte[] array) {
if (array.length == 0) {
return EMPTY_BUFFER;
}
return new UnpooledHeapByteBuf(ALLOC, array, array.length);
} | @Test
public void testHexDump() {
assertEquals("", ByteBufUtil.hexDump(EMPTY_BUFFER));
ByteBuf buffer = wrappedBuffer(new byte[]{ 0x12, 0x34, 0x56 });
assertEquals("123456", ByteBufUtil.hexDump(buffer));
buffer.release();
buffer = wrappedBuffer(new byte[]{
0x12, 0x34, 0x56, 0x78,
(byte) 0x90, (byte) 0xAB, (byte) 0xCD, (byte) 0xEF
});
assertEquals("1234567890abcdef", ByteBufUtil.hexDump(buffer));
buffer.release();
} |
@Nullable
@Override
public Message decode(@Nonnull final RawMessage rawMessage) {
final GELFMessage gelfMessage = new GELFMessage(rawMessage.getPayload(), rawMessage.getRemoteAddress());
final String json = gelfMessage.getJSON(decompressSizeLimit, charset);
final JsonNode node;
try {
node = objectMapper.readTree(json);
if (node == null) {
throw new IOException("null result");
}
} catch (final Exception e) {
log.error("Could not parse JSON, first 400 characters: " +
StringUtils.abbreviate(json, 403), e);
throw new IllegalStateException("JSON is null/could not be parsed (invalid JSON)", e);
}
try {
validateGELFMessage(node, rawMessage.getId(), rawMessage.getRemoteAddress());
} catch (IllegalArgumentException e) {
log.trace("Invalid GELF message <{}>", node);
throw e;
}
// Timestamp.
final double messageTimestamp = timestampValue(node);
final DateTime timestamp;
if (messageTimestamp <= 0) {
timestamp = rawMessage.getTimestamp();
} else {
// we treat this as a unix timestamp
timestamp = Tools.dateTimeFromDouble(messageTimestamp);
}
final Message message = messageFactory.createMessage(
stringValue(node, "short_message"),
stringValue(node, "host"),
timestamp
);
message.addField(Message.FIELD_FULL_MESSAGE, stringValue(node, "full_message"));
final String file = stringValue(node, "file");
if (file != null && !file.isEmpty()) {
message.addField("file", file);
}
final long line = longValue(node, "line");
if (line > -1) {
message.addField("line", line);
}
// Level is set by server if not specified by client.
final int level = intValue(node, "level");
if (level > -1) {
message.addField("level", level);
}
// Facility is set by server if not specified by client.
final String facility = stringValue(node, "facility");
if (facility != null && !facility.isEmpty()) {
message.addField("facility", facility);
}
// Add additional data if there is some.
final Iterator<Map.Entry<String, JsonNode>> fields = node.fields();
while (fields.hasNext()) {
final Map.Entry<String, JsonNode> entry = fields.next();
String key = entry.getKey();
// Do not index useless GELF "version" field.
if ("version".equals(key)) {
continue;
}
// Don't include GELF syntax underscore in message field key.
if (key.startsWith("_") && key.length() > 1) {
key = key.substring(1);
}
// We already set short_message and host as message and source. Do not add as fields again.
if ("short_message".equals(key) || "host".equals(key)) {
continue;
}
// Skip standard or already set fields.
if (message.getField(key) != null || Message.RESERVED_FIELDS.contains(key) && !Message.RESERVED_SETTABLE_FIELDS.contains(key)) {
continue;
}
// Convert JSON containers to Strings, and pick a suitable number representation.
final JsonNode value = entry.getValue();
final Object fieldValue;
if (value.isContainerNode()) {
fieldValue = value.toString();
} else if (value.isFloatingPointNumber()) {
fieldValue = value.asDouble();
} else if (value.isIntegralNumber()) {
fieldValue = value.asLong();
} else if (value.isNull()) {
log.debug("Field [{}] is NULL. Skipping.", key);
continue;
} else if (value.isTextual()) {
fieldValue = value.asText();
} else {
log.debug("Field [{}] has unknown value type. Skipping.", key);
continue;
}
message.addField(key, fieldValue);
}
return message;
} | @Test
public void decodeFailsWithBlankMessage() throws Exception {
final String json = "{"
+ "\"version\": \"1.1\","
+ "\"host\": \"example.org\","
+ "\"message\": \" \""
+ "}";
final RawMessage rawMessage = new RawMessage(json.getBytes(StandardCharsets.UTF_8));
assertThatIllegalArgumentException().isThrownBy(() -> codec.decode(rawMessage))
.withNoCause()
.withMessageMatching("GELF message <[0-9a-f-]+> has empty mandatory \"message\" field.");
} |
public void process() {
LOGGER.debug("Beginning Composer lock processing");
try {
final JsonObject composer = jsonReader.readObject();
if (composer.containsKey("packages")) {
LOGGER.debug("Found packages");
final JsonArray packages = composer.getJsonArray("packages");
for (JsonObject pkg : packages.getValuesAs(JsonObject.class)) {
if (pkg.containsKey("name")) {
final String groupName = pkg.getString("name");
if (groupName.indexOf('/') >= 0 && groupName.indexOf('/') <= groupName.length() - 1) {
if (pkg.containsKey("version")) {
final String group = groupName.substring(0, groupName.indexOf('/'));
final String project = groupName.substring(groupName.indexOf('/') + 1);
String version = pkg.getString("version");
// Some version numbers begin with v - which doesn't end up matching CPE's
if (version.startsWith("v")) {
version = version.substring(1);
}
LOGGER.debug("Got package {}/{}/{}", group, project, version);
composerDependencies.add(new ComposerDependency(group, project, version));
} else {
LOGGER.debug("Group/package {} does not have a version", groupName);
}
} else {
LOGGER.debug("Got a dependency with no name");
}
}
}
}
} catch (JsonParsingException jsonpe) {
throw new ComposerException("Error parsing stream", jsonpe);
} catch (JsonException jsone) {
throw new ComposerException("Error reading stream", jsone);
} catch (IllegalStateException ise) {
throw new ComposerException("Illegal state in composer stream", ise);
} catch (ClassCastException cce) {
throw new ComposerException("Not exactly composer lock", cce);
}
} | @Test(expected = ComposerException.class)
public void testNotComposer() throws Exception {
String input = "[\"ham\",\"eggs\"]";
ComposerLockParser clp = new ComposerLockParser(new ByteArrayInputStream(input.getBytes(Charset.defaultCharset())));
clp.process();
} |
public static String processingLogStreamCreateStatement(
final ProcessingLogConfig config,
final KsqlConfig ksqlConfig
) {
return processingLogStreamCreateStatement(
config.getString(ProcessingLogConfig.STREAM_NAME),
getTopicName(config, ksqlConfig)
);
} | @Test
public void shouldBuildCorrectStreamCreateDDL() {
// Given:
serviceContext.getTopicClient().createTopic(TOPIC, 1, (short) 1);
// When:
final String statement =
ProcessingLogServerUtils.processingLogStreamCreateStatement(
config,
ksqlConfig);
// Then:
assertThat(statement, equalTo(
"CREATE STREAM PROCESSING_LOG_STREAM ("
+ "logger VARCHAR, "
+ "level VARCHAR, "
+ "time BIGINT, "
+ "message STRUCT<"
+ "type INT, "
+ "deserializationError STRUCT<target VARCHAR, errorMessage VARCHAR, recordB64 VARCHAR, cause ARRAY<VARCHAR>, `topic` VARCHAR>, "
+ "recordProcessingError STRUCT<errorMessage VARCHAR, record VARCHAR, cause ARRAY<VARCHAR>>, "
+ "productionError STRUCT<errorMessage VARCHAR>, "
+ "serializationError STRUCT<target VARCHAR, errorMessage VARCHAR, record VARCHAR, cause ARRAY<VARCHAR>, `topic` VARCHAR>, "
+ "kafkaStreamsThreadError STRUCT<errorMessage VARCHAR, threadName VARCHAR, cause ARRAY<VARCHAR>>"
+ ">"
+ ") WITH(KAFKA_TOPIC='processing_log_topic', VALUE_FORMAT='JSON', KEY_FORMAT='KAFKA');"));
} |
public static Stream<Vertex> reverseDepthFirst(Graph g) {
return reverseDepthFirst(g.getLeaves());
} | @Test
public void testDFSReverse() {
DepthFirst.reverseDepthFirst(g).forEach(v -> visitCount.incrementAndGet());
assertEquals("It should visit each node once", visitCount.get(), 3);
} |
Future<Boolean> canRoll(int podId) {
LOGGER.debugCr(reconciliation, "Determining whether broker {} can be rolled", podId);
return canRollBroker(descriptions, podId);
} | @Test
public void testNoMinIsr(VertxTestContext context) {
KSB ksb = new KSB()
.addNewTopic("A", false)
.addNewPartition(0)
.replicaOn(0, 1, 2)
.leader(0)
.isr(0, 1, 2)
.endPartition()
.endTopic()
.addNewTopic("B", false)
.addNewPartition(0)
.replicaOn(0, 1, 2)
.leader(1)
.isr(1, 0, 2)
.endPartition()
.endTopic()
.addBroker(3);
KafkaAvailability kafkaAvailability = new KafkaAvailability(new Reconciliation("dummy", "kind", "namespace", "A"), ksb.ac());
Checkpoint a = context.checkpoint(ksb.brokers.size());
for (Integer brokerId : ksb.brokers.keySet()) {
kafkaAvailability.canRoll(brokerId).onComplete(context.succeeding(canRoll -> context.verify(() -> {
assertTrue(canRoll,
"broker " + brokerId + " should be rollable, being minisr = 1 and having two brokers in its isr");
a.flag();
})));
}
} |
@Override
public void register(PiPipeconf pipeconf) throws IllegalStateException {
checkNotNull(pipeconf);
if (pipeconfs.containsKey(pipeconf.id())) {
throw new IllegalStateException(format("Pipeconf %s is already registered", pipeconf.id()));
}
pipeconfs.put(pipeconf.id(), pipeconf);
log.info("New pipeconf registered: {} (fingerprint={})",
pipeconf.id(), HexString.toHexString(pipeconf.fingerprint()));
executor.execute(() -> attemptMergeAll(pipeconf.id()));
post(new PiPipeconfEvent(PiPipeconfEvent.Type.REGISTERED, pipeconf));
} | @Test
public void register() {
mgr.register(pipeconf);
assertTrue("PiPipeconf should be registered", mgr.pipeconfs.containsValue(pipeconf));
} |
@Override
public List<TableInfo> getTableList(Long dataSourceConfigId, String nameLike, String commentLike) {
List<TableInfo> tables = getTableList0(dataSourceConfigId, null);
return tables.stream().filter(tableInfo -> (StrUtil.isEmpty(nameLike) || tableInfo.getName().contains(nameLike))
&& (StrUtil.isEmpty(commentLike) || tableInfo.getComment().contains(commentLike)))
.collect(Collectors.toList());
} | @Test
public void testGetTableList() {
// 准备参数
Long dataSourceConfigId = randomLongId();
// mock 方法
DataSourceConfigDO dataSourceConfig = new DataSourceConfigDO().setUsername("sa").setPassword("")
.setUrl("jdbc:h2:mem:testdb");
when(dataSourceConfigService.getDataSourceConfig(eq(dataSourceConfigId)))
.thenReturn(dataSourceConfig);
// 调用
List<TableInfo> tables = databaseTableService.getTableList(dataSourceConfigId,
"config", "参数");
// 断言
assertEquals(1, tables.size());
assertTableInfo(tables.get(0));
} |
@Override
public SelType call(String methodName, SelType[] args) {
if (args.length == 1) {
if ("dateIntToTs".equals(methodName)) {
return dateIntToTs(args[0]);
} else if ("tsToDateInt".equals(methodName)) {
return tsToDateInt(args[0]);
}
} else if (args.length == 2) {
if ("incrementDateInt".equals(methodName)) {
return incrementDateInt(args[0], args[1]);
} else if ("timeoutForDateTimeDeadline".equals(methodName)) {
return timeoutForDateTimeDeadline(args[0], args[1]);
} else if ("timeoutForDateIntDeadline".equals(methodName)) {
return timeoutForDateIntDeadline(args[0], args[1]);
}
} else if (args.length == 3) {
if ("dateIntsBetween".equals(methodName)) {
return dateIntsBetween(args[0], args[1], args[2]);
} else if ("intsBetween".equals(methodName)) {
return intsBetween(args[0], args[1], args[2]);
}
} else if (args.length == 5 && "dateIntHourToTs".equals(methodName)) {
return dateIntHourToTs(args);
}
throw new UnsupportedOperationException(
type()
+ " DO NOT support calling method: "
+ methodName
+ " with args: "
+ Arrays.toString(args));
} | @Test(expected = NumberFormatException.class)
public void testCallTsToDateIntInvalid() {
SelUtilFunc.INSTANCE.call("tsToDateInt", new SelType[] {SelString.of("123.45")});
} |
public synchronized boolean processRow( StepMetaInterface smi, StepDataInterface sdi ) throws KettleStepException {
meta = (ShapeFileReaderMeta) smi;
data = (ShapeFileReaderData) sdi;
int partnr;
boolean retval = true;
if ( data.shapeNr >= data.shapeFile.getNrShapes() ) {
setOutputDone();
return false;
}
if ( first ) {
first = false;
data.outputRowMeta = new RowMeta();
meta.getFields( data.outputRowMeta, getStepname(), null, null, this );
}
// building new row
Object[] outputRow = RowDataUtil.allocateRowData( data.outputRowMeta.size() );
int outputIndex;
// getting shape # data.shapeNr from shapefile
// Get the shape from the shapefile
//
ShapeInterface si = data.shapeFile.getShape( data.shapeNr );
switch ( si.getType() ) {
case Shape.SHAPE_TYPE_POLYLINE_M:
// PolyLimeM";
ShapePolyLineM eplm = (ShapePolyLineM) si;
partnr = 0;
for ( int j = 0; j < eplm.nrpoints; j++ ) {
// PolyLimeM, point #"+j;
for ( int k = 0; k < eplm.nrparts; k++ ) {
if ( j == eplm.part_starts[ k ] ) {
partnr++;
}
}
outputIndex = 0;
// adding the basics";
// Add the basics...
// The filename...
outputRow[ outputIndex++ ] = meta.getShapeFilename();
// The file type
outputRow[ outputIndex++ ] = data.shapeFile.getFileHeader().getShapeTypeDesc();
// The shape nr
outputRow[ outputIndex++ ] = new Long( data.shapeNr + 1 );
// The part nr
outputRow[ outputIndex++ ] = new Long( partnr );
// The nr of parts
outputRow[ outputIndex++ ] = new Long( eplm.nrparts );
// The point nr
outputRow[ outputIndex++ ] = new Long( j + 1 );
// The nr of points
outputRow[ outputIndex++ ] = new Long( eplm.nrpoints );
// The X coordinate
outputRow[ outputIndex++ ] = new Double( eplm.point[ j ].x );
// The Y coordinate
outputRow[ outputIndex++ ] = new Double( eplm.point[ j ].y );
// The measure
outputRow[ outputIndex++ ] = new Double( eplm.measures[ j ] );
// The Values in the DBF file...
// PolyLimeM, point #"+j+", add dbf data";
Object[] dbfData = si.getDbfData();
RowMetaInterface dbfMeta = si.getDbfMeta();
for ( int d = 0; d < dbfMeta.size(); d++ ) {
outputRow[ outputIndex++ ] = dbfData[ d ];
}
linesInput++;
// Put it out to the rest of the world...
try {
putRow( data.outputRowMeta, data.outputRowMeta.cloneRow( outputRow ) );
} catch ( KettleValueException e ) {
throw new KettleStepException( "Unable to clone row", e );
}
}
break;
case Shape.SHAPE_TYPE_POLYGON:
// ShapePolygon";
ShapePolygon eplg = (ShapePolygon) si;
partnr = 0;
for ( int j = 0; j < eplg.nrpoints; j++ ) {
// PolyLime, point #"+j;
for ( int k = 0; k < eplg.nrparts; k++ ) {
if ( j == eplg.part_starts[ k ] ) {
partnr++;
}
}
outputIndex = 0;
// adding the basics";
// Add the basics...
// The filename...
outputRow[ outputIndex++ ] = meta.getShapeFilename();
// The file type
outputRow[ outputIndex++ ] = data.shapeFile.getFileHeader().getShapeTypeDesc();
// The shape nr
outputRow[ outputIndex++ ] = new Long( data.shapeNr + 1 );
// The part nr
outputRow[ outputIndex++ ] = new Long( partnr );
// The nr of parts
outputRow[ outputIndex++ ] = new Long( eplg.nrparts );
// The point nr
outputRow[ outputIndex++ ] = new Long( j + 1 );
// The nr of points
outputRow[ outputIndex++ ] = new Long( eplg.nrpoints );
// The X coordinate
outputRow[ outputIndex++ ] = new Double( eplg.point[ j ].x );
// The Y coordinate
outputRow[ outputIndex++ ] = new Double( eplg.point[ j ].y );
// The measure
outputRow[ outputIndex++ ] = new Double( 0.0 );
// The Values in the DBF file...
// PolyLime, point #"+j+", add dbf data";
//
Object[] dbfData = si.getDbfData();
RowMetaInterface dbfMeta = si.getDbfMeta();
for ( int d = 0; d < dbfMeta.size(); d++ ) {
outputRow[ outputIndex++ ] = dbfData[ d ];
}
linesInput++;
// Put it out to the rest of the world...
try {
putRow( data.outputRowMeta, data.outputRowMeta.cloneRow( outputRow ) );
} catch ( KettleValueException e ) {
throw new KettleStepException( "Unable to clone row", e );
}
}
break;
case Shape.SHAPE_TYPE_POLYLINE:
// PolyLime";
ShapePolyLine epl = (ShapePolyLine) si;
partnr = 0;
for ( int j = 0; j < epl.nrpoints; j++ ) {
// PolyLime, point #"+j;
for ( int k = 0; k < epl.nrparts; k++ ) {
if ( j == epl.part_starts[ k ] ) {
partnr++;
}
}
outputIndex = 0;
// adding the basics";
// Add the basics...
// The filename...
outputRow[ outputIndex++ ] = meta.getShapeFilename();
// The file type
outputRow[ outputIndex++ ] = data.shapeFile.getFileHeader().getShapeTypeDesc();
// The shape nr
outputRow[ outputIndex++ ] = new Long( data.shapeNr + 1 );
// The part nr
outputRow[ outputIndex++ ] = new Long( partnr );
// The nr of parts
outputRow[ outputIndex++ ] = new Long( epl.nrparts );
// The point nr
outputRow[ outputIndex++ ] = new Long( j + 1 );
// The nr of points
outputRow[ outputIndex++ ] = new Long( epl.nrpoints );
// The X coordinate
outputRow[ outputIndex++ ] = new Double( epl.point[ j ].x );
// The Y coordinate
outputRow[ outputIndex++ ] = new Double( epl.point[ j ].y );
// The measure
outputRow[ outputIndex++ ] = new Double( 0.0 );
// The Values in the DBF file...
// PolyLime, point #"+j+", add dbf data";
//
Object[] dbfData = si.getDbfData();
RowMetaInterface dbfMeta = si.getDbfMeta();
for ( int d = 0; d < dbfMeta.size(); d++ ) {
outputRow[ outputIndex++ ] = dbfData[ d ];
}
linesInput++;
// Put it out to the rest of the world...
try {
putRow( data.outputRowMeta, data.outputRowMeta.cloneRow( outputRow ) );
} catch ( KettleValueException e ) {
throw new KettleStepException( "Unable to clone row", e );
}
}
break;
case Shape.SHAPE_TYPE_POINT:
// Point";
ShapePoint ep = (ShapePoint) si;
// Add the basics...
outputIndex = 0;
// The filename...
outputRow[ outputIndex++ ] = meta.getShapeFilename();
// The file type
outputRow[ outputIndex++ ] = data.shapeFile.getFileHeader().getShapeTypeDesc();
// The shape nr
outputRow[ outputIndex++ ] = new Long( data.shapeNr );
// The part nr
outputRow[ outputIndex++ ] = new Long( 0L );
// The nr of parts
outputRow[ outputIndex++ ] = new Long( 0L );
// The point nr
outputRow[ outputIndex++ ] = new Long( 0L );
// The nr of points
outputRow[ outputIndex++ ] = new Long( 0L );
// The X coordinate
outputRow[ outputIndex++ ] = new Double( ep.x );
// The Y coordinate
outputRow[ outputIndex++ ] = new Double( ep.y );
// The measure
outputRow[ outputIndex++ ] = new Double( 0.0 );
// The Values in the DBF file...
// PolyLimeM, point #"+data.shapeNr+", add dbf data";
//
Object[] dbfData = si.getDbfData();
RowMetaInterface dbfMeta = si.getDbfMeta();
for ( int d = 0; d < dbfMeta.size(); d++ ) {
outputRow[ outputIndex++ ] = dbfData[ d ];
}
linesInput++;
// Put it out to the rest of the world...
try {
putRow( data.outputRowMeta, data.outputRowMeta.cloneRow( outputRow ) );
} catch ( KettleValueException e ) {
throw new KettleStepException( "Unable to clone row", e );
}
break;
default:
System.out.println(
"Unable to parse shape type [" + Shape.getEsriTypeDesc( si.getType() ) + "] : not yet implemented." );
throw new KettleStepException(
"Unable to parse shape type [" + Shape.getEsriTypeDesc( si.getType() ) + "] : not yet implemented." );
}
// Next shape please!
data.shapeNr++;
if ( ( linesInput % Const.ROWS_UPDATE ) == 0 ) {
logBasic( "linenr " + linesInput );
}
return retval;
} | @Test
public void processRowPolyLineCloneRowTest() throws KettleException {
shapePoint = mock( ShapePoint.class );
shapePoint.x = 0;
shapePoint.y = 0;
shapePolyLine = mock( ShapePolyLine.class );
shapePolyLine.nrparts = 0;
shapePolyLine.nrpoints = 1;
shapePolyLine.point = new ShapePoint[] { shapePoint };
when( shapePolyLine.getType() ).thenReturn( Shape.SHAPE_TYPE_POLYLINE );
when( shapePolyLine.getDbfData() ).thenReturn( new Object[] { new Object() } );
when( shapePolyLine.getDbfMeta() ).thenReturn( rowMeta );
shapeFileHeader = mock( ShapeFileHeader.class );
when( shapeFileHeader.getShapeTypeDesc() ).thenReturn( "ShapeFileHeader Test" );
shapeFile = mock( ShapeFile.class );
when( shapeFile.getNrShapes() ).thenReturn( 1 );
when( shapeFile.getShape( anyInt() ) ).thenReturn( shapePolyLine );
when( shapeFile.getFileHeader() ).thenReturn( shapeFileHeader );
shapeFileReaderData = new ShapeFileReaderData();
shapeFileReaderData.outputRowMeta = rowMeta;
shapeFileReaderData.shapeFile = shapeFile;
shapeFileReaderData.shapeNr = 0;
shapeFileReader = spy( createShapeFileReader() );
shapeFileReader.first = false;
Object[] outputRow = new Object[ RowDataUtil.allocateRowData( shapeFileReaderData.outputRowMeta.size() ).length ];
try (
MockedStatic<RowDataUtil> rowDataUtilMockedStatic = mockStatic( RowDataUtil.class, withSettings().lenient() ) ) {
rowDataUtilMockedStatic.when( () -> RowDataUtil.allocateRowData( anyInt() ) ).thenReturn( outputRow );
shapeFileReader.processRow( stepMockHelper.initStepMetaInterface, shapeFileReaderData );
verify( shapeFileReader, times( 1 ) ).putRow( eq( shapeFileReaderData.outputRowMeta ), any( Object[].class ) );
// Changing the original outputRow in order to test if the outputRow was cloned
outputRow[ 0 ] = "outputRow Clone Test";
verify( shapeFileReader, times( 0 ) ).putRow( shapeFileReaderData.outputRowMeta, outputRow );
}
} |
public static List<AclEntry> filterAclEntriesByAclSpec(
List<AclEntry> existingAcl, List<AclEntry> inAclSpec) throws AclException {
ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec);
ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES);
EnumMap<AclEntryScope, AclEntry> providedMask =
Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class);
EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class);
for (AclEntry existingEntry: existingAcl) {
if (aclSpec.containsKey(existingEntry)) {
scopeDirty.add(existingEntry.getScope());
if (existingEntry.getType() == MASK) {
maskDirty.add(existingEntry.getScope());
}
} else {
if (existingEntry.getType() == MASK) {
providedMask.put(existingEntry.getScope(), existingEntry);
} else {
aclBuilder.add(existingEntry);
}
}
}
copyDefaultsIfNeeded(aclBuilder);
calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty);
return buildAndValidateAcl(aclBuilder);
} | @Test
public void testFilterAclEntriesByAclSpecDefaultMaskPreserved()
throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, USER, "diana", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ_WRITE))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "diana", ALL))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "diana"));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "diana", ALL))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
assertEquals(expected, filterAclEntriesByAclSpec(existing, aclSpec));
} |
@Override
public Long createDataSourceConfig(DataSourceConfigSaveReqVO createReqVO) {
DataSourceConfigDO config = BeanUtils.toBean(createReqVO, DataSourceConfigDO.class);
validateConnectionOK(config);
// 插入
dataSourceConfigMapper.insert(config);
// 返回
return config.getId();
} | @Test
public void testCreateDataSourceConfig_success() {
try (MockedStatic<JdbcUtils> databaseUtilsMock = mockStatic(JdbcUtils.class)) {
// 准备参数
DataSourceConfigSaveReqVO reqVO = randomPojo(DataSourceConfigSaveReqVO.class)
.setId(null); // 避免 id 被设置
// mock 方法
databaseUtilsMock.when(() -> JdbcUtils.isConnectionOK(eq(reqVO.getUrl()),
eq(reqVO.getUsername()), eq(reqVO.getPassword()))).thenReturn(true);
// 调用
Long dataSourceConfigId = dataSourceConfigService.createDataSourceConfig(reqVO);
// 断言
assertNotNull(dataSourceConfigId);
// 校验记录的属性是否正确
DataSourceConfigDO dataSourceConfig = dataSourceConfigMapper.selectById(dataSourceConfigId);
assertPojoEquals(reqVO, dataSourceConfig, "id");
}
} |
public File dumpHeap()
throws MalformedObjectNameException, InstanceNotFoundException, ReflectionException,
MBeanException, IOException {
return dumpHeap(localDumpFolder);
} | @Test
public void heapDumpOnce() throws Exception {
File folder = tempFolder.newFolder();
File dump1 = MemoryMonitor.dumpHeap(folder);
assertNotNull(dump1);
assertTrue(dump1.exists());
assertThat(dump1.getParentFile(), Matchers.equalTo(folder));
} |
@Override
public Optional<DatabaseAdminExecutor> create(final SQLStatementContext sqlStatementContext) {
SQLStatement sqlStatement = sqlStatementContext.getSqlStatement();
if (sqlStatement instanceof ShowStatement) {
return Optional.of(new PostgreSQLShowVariableExecutor((ShowStatement) sqlStatement));
}
return Optional.empty();
} | @Test
void assertCreateWithSelectDatabase() {
SQLStatement sqlStatement = parseSQL(PSQL_SELECT_DATABASES);
SelectStatementContext selectStatementContext = mock(SelectStatementContext.class);
when(selectStatementContext.getSqlStatement()).thenReturn((SelectStatement) sqlStatement);
Optional<DatabaseAdminExecutor> actual = new PostgreSQLAdminExecutorCreator().create(selectStatementContext, PSQL_SELECT_DATABASES, "", Collections.emptyList());
assertTrue(actual.isPresent());
} |
public abstract VoiceInstructionValue getConfigForDistance(
double distance,
String turnDescription,
String thenVoiceInstruction); | @Test
public void fixedDistanceInitialVICImperialTest() {
FixedDistanceVoiceInstructionConfig configImperial = new FixedDistanceVoiceInstructionConfig(IN_HIGHER_DISTANCE_PLURAL.imperial,
trMap, locale, 2000, 2);
compareVoiceInstructionValues(
2000,
"In 2 miles turn",
configImperial.getConfigForDistance(2100, "turn", " then")
);
compareVoiceInstructionValues(
2000,
"In 2 miles turn",
configImperial.getConfigForDistance(2000, "turn", " then")
);
assertNull(configImperial.getConfigForDistance(1999, "turn", " then"));
} |
public static CompositeData parseComposite(URI uri) throws URISyntaxException {
CompositeData rc = new CompositeData();
rc.scheme = uri.getScheme();
String ssp = stripPrefix(uri.getRawSchemeSpecificPart().trim(), "//").trim();
parseComposite(uri, rc, ssp);
rc.fragment = uri.getFragment();
return rc;
} | @Test
public void testCompositePath() throws Exception {
CompositeData data = URISupport.parseComposite(new URI("test:(path)/path"));
assertEquals("path", data.getPath());
data = URISupport.parseComposite(new URI("test:path"));
assertNull(data.getPath());
} |
@Override
public ChannelFuture resetStream(final ChannelHandlerContext ctx, int streamId, long errorCode,
ChannelPromise promise) {
final Http2Stream stream = connection().stream(streamId);
if (stream == null) {
return resetUnknownStream(ctx, streamId, errorCode, promise.unvoid());
}
return resetStream(ctx, stream, errorCode, promise);
} | @Test
public void writeRstOnClosedStreamShouldSucceed() throws Exception {
handler = newHandler();
when(stream.id()).thenReturn(STREAM_ID);
when(frameWriter.writeRstStream(eq(ctx), eq(STREAM_ID),
anyLong(), any(ChannelPromise.class))).thenReturn(future);
when(stream.state()).thenReturn(CLOSED);
when(stream.isHeadersSent()).thenReturn(true);
// The stream is "closed" but is still known about by the connection (connection().stream(..)
// will return the stream). We should still write a RST_STREAM frame in this scenario.
handler.resetStream(ctx, STREAM_ID, STREAM_CLOSED.code(), promise);
verify(frameWriter).writeRstStream(eq(ctx), eq(STREAM_ID), anyLong(), any(ChannelPromise.class));
} |
public static <E> List<E> ensureImmutable(List<E> list) {
if (list.isEmpty()) return Collections.emptyList();
// Faster to make a copy than check the type to see if it is already a singleton list
if (list.size() == 1) return Collections.singletonList(list.get(0));
if (isImmutable(list)) return list;
return Collections.unmodifiableList(new ArrayList<E>(list));
} | @Test void ensureImmutable_doesntCopyImmutableList() {
List<Object> list = ImmutableList.of("foo", "bar");
assertThat(Lists.ensureImmutable(list))
.isSameAs(list);
} |
public static <T> Collector<T, ?, Optional<T>> singleton() {
return Collectors.collectingAndThen(
Collectors.toList(),
list -> {
if (list.size() > 1) throw new IllegalArgumentException("More than one element");
return list.stream().findAny();
}
);
} | @Test
public void collector_returns_singleton() {
List<String> items = List.of("foo1", "bar", "foo2");
Optional<String> bar = items.stream().filter(s -> s.startsWith("bar")).collect(CustomCollectors.singleton());
assertEquals(Optional.of("bar"), bar);
} |
boolean publishVersion() {
try {
TxnInfoPB txnInfo = new TxnInfoPB();
txnInfo.txnId = watershedTxnId;
txnInfo.combinedTxnLog = false;
txnInfo.txnType = TxnTypePB.TXN_NORMAL;
txnInfo.commitTime = finishedTimeMs / 1000;
for (long partitionId : physicalPartitionIndexMap.rowKeySet()) {
long commitVersion = commitVersionMap.get(partitionId);
Map<Long, MaterializedIndex> shadowIndexMap = physicalPartitionIndexMap.row(partitionId);
for (MaterializedIndex shadowIndex : shadowIndexMap.values()) {
Utils.publishVersion(shadowIndex.getTablets(), txnInfo, 1, commitVersion, warehouseId);
}
}
return true;
} catch (Exception e) {
LOG.error("Fail to publish version for schema change job {}: {}", jobId, e.getMessage());
return false;
}
} | @Test
public void testPublishVersion() throws AlterCancelException {
new MockUp<Utils>() {
@Mock
public void publishVersion(@NotNull List<Tablet> tablets, TxnInfoPB txnInfo, long baseVersion,
long newVersion, long warehouseId)
throws
RpcException {
throw new RpcException("publish version failed", "127.0.0.1");
}
};
new MockUp<LakeTableSchemaChangeJob>() {
@Mock
public void sendAgentTask(AgentBatchTask batchTask) {
batchTask.getAllTasks().forEach(t -> t.setFinished(true));
}
};
schemaChangeJob.runPendingJob();
Assert.assertEquals(AlterJobV2.JobState.WAITING_TXN, schemaChangeJob.getJobState());
schemaChangeJob.runWaitingTxnJob();
Assert.assertEquals(AlterJobV2.JobState.RUNNING, schemaChangeJob.getJobState());
Collection<Partition> partitions = table.getPartitions();
Assert.assertEquals(1, partitions.size());
Partition partition = partitions.stream().findFirst().orElse(null);
Assert.assertNotNull(partition);
Assert.assertEquals(1, partition.getVisibleVersion());
Assert.assertEquals(2, partition.getNextVersion());
// Disable send publish version
partition.setNextVersion(3);
schemaChangeJob.runRunningJob();
Assert.assertEquals(AlterJobV2.JobState.FINISHED_REWRITING, schemaChangeJob.getJobState());
List<MaterializedIndex> shadowIndexes =
partition.getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW);
Assert.assertEquals(1, shadowIndexes.size());
// The partition's visible version has not catch up with the commit version of this schema change job now.
schemaChangeJob.runFinishedRewritingJob();
Assert.assertEquals(AlterJobV2.JobState.FINISHED_REWRITING, schemaChangeJob.getJobState());
// Reset partition's next version
partition.setVisibleVersion(2, System.currentTimeMillis());
// Drop table
db.dropTable(table.getName());
Exception exception = Assert.assertThrows(AlterCancelException.class, () -> {
schemaChangeJob.runFinishedRewritingJob();
});
Assert.assertTrue(exception.getMessage().contains("Table does not exist"));
Assert.assertEquals(AlterJobV2.JobState.FINISHED_REWRITING, schemaChangeJob.getJobState());
// Add table back to database
db.registerTableUnlocked(table);
// We've mocked ColumnTypeConverter.publishVersion to throw RpcException, should this runFinishedRewritingJob will fail but
// should not throw any exception.
schemaChangeJob.runFinishedRewritingJob();
Assert.assertEquals(AlterJobV2.JobState.FINISHED_REWRITING, schemaChangeJob.getJobState());
// Make publish version success
new MockUp<Utils>() {
@Mock
public void publishVersion(@NotNull List<Tablet> tablets, TxnInfoPB txnInfo, long baseVersion,
long newVersion, long warehouseId) {
// nothing to do
}
};
schemaChangeJob.runFinishedRewritingJob();
Assert.assertEquals(AlterJobV2.JobState.FINISHED, schemaChangeJob.getJobState());
Assert.assertTrue(schemaChangeJob.getFinishedTimeMs() > System.currentTimeMillis() - 10_000L);
Assert.assertEquals(2, table.getBaseSchema().size());
Assert.assertEquals("c0", table.getBaseSchema().get(0).getName());
Assert.assertEquals("c1", table.getBaseSchema().get(1).getName());
Assert.assertSame(partition, table.getPartitions().stream().findFirst().get());
Assert.assertEquals(3, partition.getVisibleVersion());
Assert.assertEquals(4, partition.getNextVersion());
shadowIndexes = partition.getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW);
Assert.assertEquals(0, shadowIndexes.size());
List<MaterializedIndex> normalIndexes =
partition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE);
Assert.assertEquals(1, normalIndexes.size());
MaterializedIndex normalIndex = normalIndexes.get(0);
// Does not support cancel job in FINISHED state.
schemaChangeJob.cancel("test");
Assert.assertEquals(AlterJobV2.JobState.FINISHED, schemaChangeJob.getJobState());
} |
static Map<String, Comparable> prepareProperties(Map<String, Comparable> properties,
Collection<PropertyDefinition> propertyDefinitions) {
Map<String, Comparable> mappedProperties = createHashMap(propertyDefinitions.size());
for (PropertyDefinition propertyDefinition : propertyDefinitions) {
String propertyKey = propertyDefinition.key();
if (properties.containsKey(propertyKey.replace("-", ""))) {
properties.put(propertyKey, properties.remove(propertyKey.replace("-", "")));
}
if (!properties.containsKey(propertyKey)) {
if (!propertyDefinition.optional()) {
throw new InvalidConfigurationException(
String.format("Missing property '%s' on discovery strategy", propertyKey));
}
continue;
}
Comparable value = properties.get(propertyKey);
TypeConverter typeConverter = propertyDefinition.typeConverter();
Comparable mappedValue = typeConverter.convert(value);
ValueValidator validator = propertyDefinition.validator();
if (validator != null) {
validator.validate(mappedValue);
}
mappedProperties.put(propertyKey, mappedValue);
}
verifyNoUnknownProperties(mappedProperties, properties);
return mappedProperties;
} | @Test(expected = InvalidConfigurationException.class)
public void unsatisfiedRequiredProperty() {
// given
Map<String, Comparable> properties = emptyMap();
Collection<PropertyDefinition> propertyDefinitions = singletonList(PROPERTY_DEFINITION_1);
// when
prepareProperties(properties, propertyDefinitions);
// then
// throw exception
} |
public static Logger logger(Class<?> clazz) {
return getLogger(clazz);
} | @Test
void testLogger() {
Logger logger = LogUtils.logger(LogUtilsTest.class);
assertNotNull(logger);
} |
@Override
public void upgrade() {
if (clusterConfigService.get(V20161216123500_Succeeded.class) != null) {
return;
}
// The default index set must have been created first.
checkState(clusterConfigService.get(DefaultIndexSetCreated.class) != null, "The default index set hasn't been created yet. This is a bug!");
final IndexSetConfig defaultIndexSet = indexSetService.getDefault();
migrateIndexSet(defaultIndexSet, elasticsearchConfiguration.getDefaultIndexTemplateName());
final List<IndexSetConfig> allWithoutDefault = indexSetService.findAll()
.stream()
.filter(indexSetConfig -> !indexSetConfig.equals(defaultIndexSet))
.collect(Collectors.toList());
for (IndexSetConfig indexSetConfig : allWithoutDefault) {
migrateIndexSet(indexSetConfig, indexSetConfig.indexPrefix() + "-template");
}
clusterConfigService.write(V20161216123500_Succeeded.create());
} | @Test
public void migrationDoesNotRunAgainIfMigrationWasSuccessfulBefore() {
when(clusterConfigService.get(V20161216123500_Succeeded.class)).thenReturn(V20161216123500_Succeeded.create());
migration.upgrade();
verify(clusterConfigService).get(V20161216123500_Succeeded.class);
verifyNoMoreInteractions(clusterConfigService);
verifyNoMoreInteractions(indexSetService);
} |
public static void deleteIfExists(final File file)
{
try
{
Files.deleteIfExists(file.toPath());
}
catch (final IOException ex)
{
LangUtil.rethrowUnchecked(ex);
}
} | @Test
void deleteIfExistsEmptyDirectory() throws IOException
{
final Path dir = tempDir.resolve("dir");
Files.createDirectory(dir);
IoUtil.deleteIfExists(dir.toFile());
assertFalse(Files.exists(dir));
} |
static AnnotatedClusterState generatedStateFrom(final Params params) {
final ContentCluster cluster = params.cluster;
final ClusterState workingState = ClusterState.emptyState();
final Map<Node, NodeStateReason> nodeStateReasons = new HashMap<>();
for (final NodeInfo nodeInfo : cluster.getNodeInfos()) {
final NodeState nodeState = computeEffectiveNodeState(nodeInfo, params, nodeStateReasons);
workingState.setNodeState(nodeInfo.getNode(), nodeState);
}
takeDownGroupsWithTooLowAvailability(workingState, nodeStateReasons, params);
final Optional<ClusterStateReason> reasonToBeDown = clusterDownReason(workingState, params);
if (reasonToBeDown.isPresent()) {
workingState.setClusterState(State.DOWN);
}
workingState.setDistributionBits(inferDistributionBitCount(cluster, workingState, params));
return new AnnotatedClusterState(workingState, reasonToBeDown, nodeStateReasons);
} | @Test
void maintenance_nodes_in_downed_group_are_not_affected() {
final ClusterFixture fixture = ClusterFixture
.forHierarchicCluster(DistributionBuilder.withGroups(3).eachWithNodeCount(3))
.bringEntireClusterUp()
.proposeStorageNodeWantedState(3, State.MAINTENANCE)
.reportStorageNodeState(4, State.DOWN);
final ClusterStateGenerator.Params params = fixture.generatorParams().minNodeRatioPerGroup(0.68);
final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
// 4 is down by itself, 5 is down implicitly and 3 should happily stay in Maintenance mode.
// Side note: most special cases for when a node should and should not be affected by group
// down edges are covered in GroupAvailabilityCalculatorTest and GroupAutoTakedownTest.
// We test this case explicitly since it's an assurance that code integration works as expected.
assertThat(state.toString(), equalTo("distributor:9 storage:9 .3.s:m .4.s:d .5.s:d"));
} |
@Override
public CompletableFuture<Collection<PartitionWithMetrics>> getPartitionWithMetrics(
Duration timeout, Set<ResultPartitionID> expectedPartitions) {
this.partitionsToFetch = expectedPartitions;
this.fetchPartitionsFuture = new CompletableFuture<>();
// check already fetched partitions
checkPartitionOnTaskManagerReportFinished();
return FutureUtils.orTimeout(
fetchPartitionsFuture, timeout.toMillis(), TimeUnit.MILLISECONDS, null)
.handleAsync(
(metrics, throwable) -> {
stopFetchAndRetainPartitionWithMetricsOnTaskManager();
if (throwable != null) {
if (throwable instanceof TimeoutException) {
log.warn(
"Timeout occurred after {} ms "
+ "while fetching partition(s) ({}) from task managers.",
timeout.toMillis(),
expectedPartitions);
return new ArrayList<>(fetchedPartitionsWithMetrics.values());
}
throw new CompletionException(throwable);
}
return new ArrayList<>(fetchedPartitionsWithMetrics.values());
},
getMainThreadExecutor());
} | @Test
void testGetPartitionWithMetrics() throws Exception {
JobVertex jobVertex = new JobVertex("jobVertex");
jobVertex.setInvokableClass(NoOpInvokable.class);
jobVertex.setParallelism(1);
final JobGraph jobGraph = JobGraphTestUtils.batchJobGraph(jobVertex);
try (final JobMaster jobMaster =
new JobMasterBuilder(jobGraph, rpcService)
.withConfiguration(configuration)
.withHighAvailabilityServices(haServices)
.withHeartbeatServices(heartbeatServices)
.withBlocklistHandlerFactory(
new DefaultBlocklistHandler.Factory(Duration.ofMillis((100L))))
.createJobMaster()) {
jobMaster.start();
final JobMasterGateway jobMasterGateway =
jobMaster.getSelfGateway(JobMasterGateway.class);
DefaultShuffleMetrics shuffleMetrics1 =
new DefaultShuffleMetrics(new ResultPartitionBytes(new long[] {1, 2, 3}));
NettyShuffleDescriptor descriptor1 =
NettyShuffleDescriptorBuilder.newBuilder().buildLocal();
Collection<PartitionWithMetrics> defaultPartitionWithMetrics1 =
Collections.singletonList(
new DefaultPartitionWithMetrics(descriptor1, shuffleMetrics1));
DefaultShuffleMetrics shuffleMetrics2 =
new DefaultShuffleMetrics(new ResultPartitionBytes(new long[] {4, 5, 6}));
NettyShuffleDescriptor descriptor2 =
NettyShuffleDescriptorBuilder.newBuilder().buildLocal();
Collection<PartitionWithMetrics> defaultPartitionWithMetrics2 =
Collections.singletonList(
new DefaultPartitionWithMetrics(descriptor2, shuffleMetrics2));
DefaultShuffleMetrics shuffleMetrics3 =
new DefaultShuffleMetrics(new ResultPartitionBytes(new long[] {7, 8, 9}));
NettyShuffleDescriptor descriptor3 =
NettyShuffleDescriptorBuilder.newBuilder().buildLocal();
Collection<PartitionWithMetrics> defaultPartitionWithMetrics3 =
Collections.singletonList(
new DefaultPartitionWithMetrics(descriptor3, shuffleMetrics3));
DefaultShuffleMetrics shuffleMetrics4 =
new DefaultShuffleMetrics(new ResultPartitionBytes(new long[] {10, 11}));
NettyShuffleDescriptor descriptor4 =
NettyShuffleDescriptorBuilder.newBuilder().buildLocal();
Collection<PartitionWithMetrics> defaultPartitionWithMetrics4 =
Collections.singletonList(
new DefaultPartitionWithMetrics(descriptor4, shuffleMetrics4));
// start fetch and retain partitions and then register tm1
final TestingTaskExecutorGateway taskExecutorGateway1 =
new TestingTaskExecutorGatewayBuilder()
.setRequestPartitionWithMetricsFunction(
ignored ->
CompletableFuture.completedFuture(
defaultPartitionWithMetrics1))
.setAddress("tm1")
.createTestingTaskExecutorGateway();
registerSlotsAtJobMaster(
1,
jobMasterGateway,
jobGraph.getJobID(),
taskExecutorGateway1,
new LocalUnresolvedTaskManagerLocation());
jobMaster.startFetchAndRetainPartitionWithMetricsOnTaskManager();
verifyPartitionMetrics(
jobMaster.getPartitionWithMetricsOnTaskManagers(),
defaultPartitionWithMetrics1);
// register tm2
TestingTaskExecutorGateway taskExecutorGateway2 =
new TestingTaskExecutorGatewayBuilder()
.setRequestPartitionWithMetricsFunction(
ignored ->
CompletableFuture.completedFuture(
defaultPartitionWithMetrics2))
.setAddress("tm2")
.createTestingTaskExecutorGateway();
registerSlotsAtJobMaster(
1,
jobMasterGateway,
jobGraph.getJobID(),
taskExecutorGateway2,
new LocalUnresolvedTaskManagerLocation());
Collection<PartitionWithMetrics> expectedMetrics =
new ArrayList<>(defaultPartitionWithMetrics1);
expectedMetrics.addAll(defaultPartitionWithMetrics2);
verifyPartitionMetrics(
jobMaster.getPartitionWithMetricsOnTaskManagers(), expectedMetrics);
// register tm3 which received fetch request but not response on time.
CompletableFuture<Tuple2<JobID, Set<ResultPartitionID>>> releaseRequest =
new CompletableFuture<>();
CompletableFuture<Collection<PartitionWithMetrics>> fetchPartitionsFuture =
new CompletableFuture<>();
TestingTaskExecutorGateway taskExecutorGateway3 =
new TestingTaskExecutorGatewayBuilder()
.setRequestPartitionWithMetricsFunction(
ignored -> fetchPartitionsFuture)
.setReleasePartitionsConsumer(
(id, partitions) ->
releaseRequest.complete(Tuple2.of(id, partitions)))
.setAddress("tm3")
.createTestingTaskExecutorGateway();
registerSlotsAtJobMaster(
1,
jobMasterGateway,
jobGraph.getJobID(),
taskExecutorGateway3,
new LocalUnresolvedTaskManagerLocation());
// register tm4 which is not included in expected partitions
TestingTaskExecutorGateway taskExecutorGateway4 =
new TestingTaskExecutorGatewayBuilder()
.setRequestPartitionWithMetricsFunction(
ignored ->
CompletableFuture.completedFuture(
defaultPartitionWithMetrics4))
.setAddress("tm4")
.createTestingTaskExecutorGateway();
registerSlotsAtJobMaster(
1,
jobMasterGateway,
jobGraph.getJobID(),
taskExecutorGateway4,
new LocalUnresolvedTaskManagerLocation());
Duration timeout = Duration.ofSeconds(10);
Set<ResultPartitionID> expectedResultPartitions = new HashSet<>();
expectedResultPartitions.add(descriptor1.getResultPartitionID());
expectedResultPartitions.add(descriptor2.getResultPartitionID());
expectedResultPartitions.add(descriptor4.getResultPartitionID());
CompletableFuture<Collection<PartitionWithMetrics>> future =
jobMasterGateway.getPartitionWithMetrics(timeout, expectedResultPartitions);
expectedMetrics = new ArrayList<>(defaultPartitionWithMetrics1);
expectedMetrics.addAll(defaultPartitionWithMetrics2);
expectedMetrics.addAll(defaultPartitionWithMetrics4);
assertThat(future).succeedsWithin(timeout);
verifyPartitionMetrics(
future.get().stream()
.collect(
Collectors.toMap(
metrics ->
metrics.getPartition().getResultPartitionID(),
metrics -> metrics)),
expectedMetrics);
// fetch partition after timeout
fetchPartitionsFuture.complete(defaultPartitionWithMetrics3);
assertThat(releaseRequest.get())
.isEqualTo(
Tuple2.of(
jobGraph.getJobID(),
Collections.singleton(descriptor3.getResultPartitionID())));
// after partitions fetching finished and then register tm5
CompletableFuture<Collection<PartitionWithMetrics>> requestFuture =
new CompletableFuture<>();
TestingTaskExecutorGateway taskExecutorGateway5 =
new TestingTaskExecutorGatewayBuilder()
.setRequestPartitionWithMetricsFunction(
ignored -> {
requestFuture.complete(null);
return requestFuture;
})
.setAddress("tm5")
.createTestingTaskExecutorGateway();
registerSlotsAtJobMaster(
1,
jobMasterGateway,
jobGraph.getJobID(),
taskExecutorGateway5,
new LocalUnresolvedTaskManagerLocation());
assertThatThrownBy(() -> requestFuture.get(timeout.toMillis(), TimeUnit.MILLISECONDS))
.isInstanceOf(TimeoutException.class);
}
} |
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
PiAction piAction = (PiAction) o;
return Objects.equal(actionId, piAction.actionId) &&
Objects.equal(runtimeParams, piAction.runtimeParams);
} | @Test
public void testEquals() {
new EqualsTester()
.addEqualityGroup(piAction1, sameAsPiAction1)
.addEqualityGroup(piAction2)
.testEquals();
} |
public List<TimerangePreset> convert(final Map<Period, String> timerangeOptions) {
if (timerangeOptions == null) {
return List.of();
}
return timerangeOptions.entrySet()
.stream()
.map(entry -> new TimerangePreset(
periodConverter.apply(entry.getKey()),
entry.getValue())
)
.collect(Collectors.toList());
} | @Test
void testConversionReturnsEmptyListOnEmptyInput() {
assertThat(toTest.convert(Map.of())).isEmpty();
} |
@Override
public Option<IndexedRecord> combineAndGetUpdateValue(IndexedRecord currentValue, Schema schema, Properties properties) throws IOException {
// Specific to Postgres: If the updated record has TOASTED columns,
// we will need to keep the previous value for those columns
// see https://debezium.io/documentation/reference/connectors/postgresql.html#postgresql-toasted-values
Option<IndexedRecord> insertOrDeleteRecord = super.combineAndGetUpdateValue(currentValue, schema, properties);
if (insertOrDeleteRecord.isPresent()) {
mergeToastedValuesIfPresent(insertOrDeleteRecord.get(), currentValue);
}
return insertOrDeleteRecord;
} | @Test
public void testMergeWithUpdate() throws IOException {
GenericRecord updateRecord = createRecord(1, Operation.UPDATE, 100L);
PostgresDebeziumAvroPayload payload = new PostgresDebeziumAvroPayload(updateRecord, 100L);
GenericRecord existingRecord = createRecord(1, Operation.INSERT, 99L);
Option<IndexedRecord> mergedRecord = payload.combineAndGetUpdateValue(existingRecord, avroSchema);
validateRecord(mergedRecord, 1, Operation.UPDATE, 100L);
GenericRecord lateRecord = createRecord(1, Operation.UPDATE, 98L);
payload = new PostgresDebeziumAvroPayload(lateRecord, 98L);
mergedRecord = payload.combineAndGetUpdateValue(existingRecord, avroSchema);
validateRecord(mergedRecord, 1, Operation.INSERT, 99L);
} |
@Override
public WxMaPhoneNumberInfo getWxMaPhoneNumberInfo(Integer userType, String phoneCode) {
WxMaService service = getWxMaService(userType);
try {
return service.getUserService().getPhoneNoInfo(phoneCode);
} catch (WxErrorException e) {
log.error("[getPhoneNoInfo][userType({}) phoneCode({}) 获得手机号失败]", userType, phoneCode, e);
throw exception(SOCIAL_CLIENT_WEIXIN_MINI_APP_PHONE_CODE_ERROR);
}
} | @Test
public void testGetWxMaPhoneNumberInfo_success() throws WxErrorException {
// 准备参数
Integer userType = randomPojo(UserTypeEnum.class).getValue();
String phoneCode = randomString();
// mock 方法
WxMaUserService userService = mock(WxMaUserService.class);
when(wxMaService.getUserService()).thenReturn(userService);
WxMaPhoneNumberInfo phoneNumber = randomPojo(WxMaPhoneNumberInfo.class);
when(userService.getPhoneNoInfo(eq(phoneCode))).thenReturn(phoneNumber);
// 调用
WxMaPhoneNumberInfo result = socialClientService.getWxMaPhoneNumberInfo(userType, phoneCode);
// 断言
assertSame(phoneNumber, result);
} |
public int read(final MessageHandler handler)
{
return read(handler, Integer.MAX_VALUE);
} | @Test
void shouldCopeWithExceptionFromHandler()
{
final int msgLength = 16;
final int recordLength = HEADER_LENGTH + msgLength;
final int alignedRecordLength = align(recordLength, ALIGNMENT);
final long tail = alignedRecordLength * 2L;
final long head = 0L;
final int headIndex = (int)head;
when(buffer.getLong(HEAD_COUNTER_INDEX)).thenReturn(head);
when(buffer.getInt(typeOffset(headIndex))).thenReturn(MSG_TYPE_ID);
when(buffer.getIntVolatile(lengthOffset(headIndex))).thenReturn(recordLength);
when(buffer.getInt(typeOffset(headIndex + alignedRecordLength))).thenReturn(MSG_TYPE_ID);
when(buffer.getIntVolatile(lengthOffset(headIndex + alignedRecordLength))).thenReturn(recordLength);
final MutableInteger times = new MutableInteger();
final MessageHandler handler =
(msgTypeId, buffer, index, length) ->
{
if (times.incrementAndGet() == 2)
{
throw new RuntimeException();
}
};
try
{
ringBuffer.read(handler);
}
catch (final RuntimeException ignore)
{
assertThat(times.get(), is(2));
final InOrder inOrder = inOrder(buffer);
inOrder.verify(buffer, times(1)).putLongOrdered(HEAD_COUNTER_INDEX, tail);
inOrder.verify(buffer, times(0)).setMemory(anyInt(), anyInt(), anyByte());
return;
}
fail("Should have thrown exception");
} |
@Override
public void runCheck() throws PreflightCheckException {
checkDatanodeLock(directories.getConfigurationTargetDir());
checkDatanodeLock(directories.getLogsTargetDir());
} | @Test
void testLockCreation(@TempDir Path dataDir,
@TempDir Path logsDir,
@TempDir Path configDir) throws IOException {
final Path logsDirLock = logsDir.resolve(DatanodeDirectoriesLockfileCheck.DATANODE_LOCKFILE);
final Path configDirLock = configDir.resolve(DatanodeDirectoriesLockfileCheck.DATANODE_LOCKFILE);
final PreflightCheck check = new DatanodeDirectoriesLockfileCheck(VALID_NODE_ID, new DatanodeDirectories(dataDir, logsDir, null, configDir));
check.runCheck();
Assertions.assertThat(Files.readString(logsDirLock)).isEqualTo(VALID_NODE_ID);
Assertions.assertThat(Files.readString(configDirLock)).isEqualTo(VALID_NODE_ID);
} |
public static FieldScope all() {
return FieldScopeImpl.all();
} | @Test
public void testFieldScopes_all() {
Message message = parse("o_int: 3 r_string: \"foo\"");
Message diffMessage = parse("o_int: 5 r_string: \"bar\"");
expectThat(diffMessage).withPartialScope(FieldScopes.all()).isNotEqualTo(message);
expectThat(diffMessage).ignoringFieldScope(FieldScopes.all()).isEqualTo(message);
expectFailureWhenTesting()
.that(diffMessage)
.ignoringFieldScope(FieldScopes.all())
.isNotEqualTo(message);
expectIsNotEqualToFailed();
expectThatFailure().hasMessageThat().contains("ignored: o_int");
expectThatFailure().hasMessageThat().contains("ignored: r_string");
} |
public LoadCompConf scaleParallel(double v) {
return setParallel(Math.max(1, (int) Math.ceil(parallelism * v)));
} | @Test
public void scaleParallel() {
LoadCompConf orig = new LoadCompConf.Builder()
.withId("SOME_SPOUT")
.withParallelism(1)
.withStream(new OutputStream("default", new NormalDistStats(500.0, 100.0, 300.0, 600.0), false))
.build();
assertEquals(500.0, orig.getAllEmittedAggregate(), 0.001);
LoadCompConf scaled = orig.scaleParallel(2);
//Parallelism is double
assertEquals(2, scaled.parallelism);
assertEquals("SOME_SPOUT", scaled.id);
//But throughput is the same
assertEquals(500.0, scaled.getAllEmittedAggregate(), 0.001);
} |
@Override
public void createOrUpdate(final String path, final Object data) {
zkClient.createOrUpdate(path, data, CreateMode.PERSISTENT);
} | @Test
public void testOnRuleChangedCreate() {
RuleData ruleData = RuleData.builder()
.id(MOCK_ID)
.name(MOCK_NAME)
.pluginName(MOCK_PLUGIN_NAME)
.selectorId(MOCK_SELECTOR_ID)
.build();
String ruleRealPath = DefaultPathConstants.buildRulePath(ruleData.getPluginName(), ruleData.getSelectorId(), ruleData.getId());
zookeeperDataChangedListener.onRuleChanged(ImmutableList.of(ruleData), DataEventTypeEnum.CREATE);
verify(zkClient, times(1)).createOrUpdate(ruleRealPath, ruleData, CreateMode.PERSISTENT);
} |
public static <T> T loadWithSecrets(Map<String, Object> map, Class<T> clazz, SourceContext sourceContext) {
return loadWithSecrets(map, clazz, secretName -> sourceContext.getSecret(secretName));
} | @Test
public void testSinkLoadWithSecrets() {
Map<String, Object> configMap = new HashMap<>();
configMap.put("notSensitive", "foo");
TestConfig testConfig = IOConfigUtils.loadWithSecrets(configMap, TestConfig.class, new TestSinkContext());
Assert.assertEquals(testConfig.notSensitive, "foo");
Assert.assertEquals(testConfig.password, "my-sink-password");
configMap = new HashMap<>();
configMap.put("notSensitive", "foo");
configMap.put("password", "another-password");
configMap.put("sensitiveLong", 5L);
testConfig = IOConfigUtils.loadWithSecrets(configMap, TestConfig.class, new TestSinkContext());
Assert.assertEquals(testConfig.notSensitive, "foo");
Assert.assertEquals(testConfig.password, "my-sink-password");
Assert.assertEquals(testConfig.sensitiveLong, 5L);
// test derived classes
configMap = new HashMap<>();
configMap.put("notSensitive", "foo");
configMap.put("sensitiveLong", 5L);
DerivedConfig derivedConfig = IOConfigUtils.loadWithSecrets(configMap, DerivedConfig.class, new TestSinkContext());
Assert.assertEquals(derivedConfig.notSensitive, "foo");
Assert.assertEquals(derivedConfig.password, "my-sink-password");
Assert.assertEquals(derivedConfig.sensitiveLong, 5L);
Assert.assertEquals(derivedConfig.moreSensitiveStuff, "more-sensitive-stuff");
configMap = new HashMap<>();
configMap.put("notSensitive", "foo");
configMap.put("sensitiveLong", 5L);
DerivedDerivedConfig derivedDerivedConfig = IOConfigUtils.loadWithSecrets(configMap, DerivedDerivedConfig.class, new TestSinkContext());
Assert.assertEquals(derivedDerivedConfig.notSensitive, "foo");
Assert.assertEquals(derivedDerivedConfig.password, "my-sink-password");
Assert.assertEquals(derivedDerivedConfig.sensitiveLong, 5L);
Assert.assertEquals(derivedDerivedConfig.moreSensitiveStuff, "more-sensitive-stuff");
Assert.assertEquals(derivedDerivedConfig.derivedDerivedSensitive, "derived-derived-sensitive");
} |
@SuppressWarnings("unchecked")
public static <E extends Enum<E>> EnumSet<E> parseEnumSet(final String key,
final String valueString,
final Class<E> enumClass,
final boolean ignoreUnknown) throws IllegalArgumentException {
// build a map of lower case string to enum values.
final Map<String, E> mapping = mapEnumNamesToValues("", enumClass);
// scan the input string and add all which match
final EnumSet<E> enumSet = noneOf(enumClass);
for (String element : getTrimmedStringCollection(valueString)) {
final String item = element.toLowerCase(Locale.ROOT);
if ("*".equals(item)) {
enumSet.addAll(mapping.values());
continue;
}
final E e = mapping.get(item);
if (e != null) {
enumSet.add(e);
} else {
// no match
// unless configured to ignore unknown values, raise an exception
checkArgument(ignoreUnknown, "%s: Unknown option value: %s in list %s."
+ " Valid options for enum class %s are: %s",
key, element, valueString,
enumClass.getName(),
mapping.keySet().stream().collect(Collectors.joining(",")));
}
}
return enumSet;
} | @Test
public void testUnknownEnumNotIgnored() throws Throwable {
intercept(IllegalArgumentException.class, "unrecognized", () ->
parseEnumSet("key", "c, unrecognized", SimpleEnum.class, false));
} |
@Override
public void open(Map<String, Object> config, SinkContext ctx) throws Exception {
kafkaSinkConfig = PulsarKafkaConnectSinkConfig.load(config);
Objects.requireNonNull(kafkaSinkConfig.getTopic(), "Kafka topic is not set");
Preconditions.checkArgument(ctx.getSubscriptionType() == SubscriptionType.Failover
|| ctx.getSubscriptionType() == SubscriptionType.Exclusive,
"Source must run with Exclusive or Failover subscription type");
topicName = kafkaSinkConfig.getTopic();
unwrapKeyValueIfAvailable = kafkaSinkConfig.isUnwrapKeyValueIfAvailable();
sanitizeTopicName = kafkaSinkConfig.isSanitizeTopicName();
collapsePartitionedTopics = kafkaSinkConfig.isCollapsePartitionedTopics();
useOptionalPrimitives = kafkaSinkConfig.isUseOptionalPrimitives();
useIndexAsOffset = kafkaSinkConfig.isUseIndexAsOffset();
maxBatchBitsForOffset = kafkaSinkConfig.getMaxBatchBitsForOffset();
Preconditions.checkArgument(maxBatchBitsForOffset <= 20,
"Cannot use more than 20 bits for maxBatchBitsForOffset");
String kafkaConnectorFQClassName = kafkaSinkConfig.getKafkaConnectorSinkClass();
kafkaSinkConfig.getKafkaConnectorConfigProperties().forEach(props::put);
Class<?> clazz = Class.forName(kafkaConnectorFQClassName);
connector = (SinkConnector) clazz.getConstructor().newInstance();
Class<? extends Task> taskClass = connector.taskClass();
sinkContext = new PulsarKafkaSinkContext();
connector.initialize(sinkContext);
connector.start(Maps.fromProperties(props));
List<Map<String, String>> configs = connector.taskConfigs(1);
Preconditions.checkNotNull(configs);
Preconditions.checkArgument(configs.size() == 1);
// configs may contain immutable/unmodifiable maps
configs = configs.stream()
.map(HashMap::new)
.collect(Collectors.toList());
configs.forEach(x -> {
x.put(PulsarKafkaWorkerConfig.OFFSET_STORAGE_TOPIC_CONFIG, kafkaSinkConfig.getOffsetStorageTopic());
});
task = (SinkTask) taskClass.getConstructor().newInstance();
taskContext =
new PulsarKafkaSinkTaskContext(configs.get(0), ctx, task::open, kafkaName -> {
if (sanitizeTopicName) {
String pulsarTopicName = desanitizedTopicCache.getIfPresent(kafkaName);
if (log.isDebugEnabled()) {
log.debug("desanitizedTopicCache got: kafkaName: {}, pulsarTopicName: {}",
kafkaName, pulsarTopicName);
}
return pulsarTopicName != null ? pulsarTopicName : kafkaName;
} else {
return kafkaName;
}
});
task.initialize(taskContext);
task.start(configs.get(0));
maxBatchSize = kafkaSinkConfig.getBatchSize();
lingerMs = kafkaSinkConfig.getLingerTimeMs();
isRunning = true;
scheduledExecutor.scheduleWithFixedDelay(() ->
this.flushIfNeeded(true), lingerMs, lingerMs, TimeUnit.MILLISECONDS);
log.info("Kafka sink started : {}.", props);
} | @Test
public void subscriptionTypeTest() throws Exception {
try (KafkaConnectSink sink = new KafkaConnectSink()) {
log.info("Failover is allowed");
sink.open(props, context);
}
when(context.getSubscriptionType()).thenReturn(SubscriptionType.Exclusive);
try (KafkaConnectSink sink = new KafkaConnectSink()) {
log.info("Exclusive is allowed");
sink.open(props, context);
}
when(context.getSubscriptionType()).thenReturn(SubscriptionType.Key_Shared);
try (KafkaConnectSink sink = new KafkaConnectSink()) {
log.info("Key_Shared is not allowed");
sink.open(props, context);
fail("expected exception");
} catch (IllegalArgumentException iae) {
// pass
}
when(context.getSubscriptionType()).thenReturn(SubscriptionType.Shared);
try (KafkaConnectSink sink = new KafkaConnectSink()) {
log.info("Shared is not allowed");
sink.open(props, context);
fail("expected exception");
} catch (IllegalArgumentException iae) {
// pass
}
when(context.getSubscriptionType()).thenReturn(null);
try (KafkaConnectSink sink = new KafkaConnectSink()) {
log.info("Type is required");
sink.open(props, context);
fail("expected exception");
} catch (IllegalArgumentException iae) {
// pass
}
} |
@Override
public LabelsToNodesInfo getLabelsToNodes(Set<String> labels)
throws IOException {
try {
long startTime = clock.getTime();
Collection<SubClusterInfo> subClustersActive = federationFacade.getActiveSubClusters();
Class[] argsClasses = new Class[]{Set.class};
Object[] args = new Object[]{labels};
ClientMethod remoteMethod = new ClientMethod("getLabelsToNodes", argsClasses, args);
Map<SubClusterInfo, LabelsToNodesInfo> labelsToNodesInfoMap =
invokeConcurrent(subClustersActive, remoteMethod, LabelsToNodesInfo.class);
Map<NodeLabelInfo, NodeIDsInfo> labelToNodesMap = new HashMap<>();
labelsToNodesInfoMap.values().forEach(labelsToNode -> {
Map<NodeLabelInfo, NodeIDsInfo> values = labelsToNode.getLabelsToNodes();
for (Map.Entry<NodeLabelInfo, NodeIDsInfo> item : values.entrySet()) {
NodeLabelInfo key = item.getKey();
NodeIDsInfo leftValue = item.getValue();
NodeIDsInfo rightValue = labelToNodesMap.getOrDefault(key, null);
NodeIDsInfo newValue = NodeIDsInfo.add(leftValue, rightValue);
labelToNodesMap.put(key, newValue);
}
});
LabelsToNodesInfo labelsToNodesInfo = new LabelsToNodesInfo(labelToNodesMap);
if (labelsToNodesInfo != null) {
long stopTime = clock.getTime();
RouterAuditLogger.logSuccess(getUser().getShortUserName(), GET_LABELSTONODES,
TARGET_WEB_SERVICE);
routerMetrics.succeededGetLabelsToNodesRetrieved(stopTime - startTime);
return labelsToNodesInfo;
}
} catch (NotFoundException e) {
routerMetrics.incrLabelsToNodesFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_LABELSTONODES, UNKNOWN,
TARGET_WEB_SERVICE, e.getLocalizedMessage());
RouterServerUtil.logAndThrowIOException("get all active sub cluster(s) error.", e);
} catch (YarnException e) {
routerMetrics.incrLabelsToNodesFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_LABELSTONODES, UNKNOWN,
TARGET_WEB_SERVICE, e.getLocalizedMessage());
RouterServerUtil.logAndThrowIOException(
e, "getLabelsToNodes by labels = %s with yarn error.", StringUtils.join(labels, ","));
}
routerMetrics.incrLabelsToNodesFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_LABELSTONODES, UNKNOWN,
TARGET_WEB_SERVICE, "getLabelsToNodes Failed.");
throw RouterServerUtil.logAndReturnRunTimeException(
"getLabelsToNodes by labels = %s Failed.", StringUtils.join(labels, ","));
} | @Test
public void testGetLabelsToNodes() throws Exception {
LabelsToNodesInfo labelsToNodesInfo = interceptor.getLabelsToNodes(null);
Map<NodeLabelInfo, NodeIDsInfo> map = labelsToNodesInfo.getLabelsToNodes();
Assert.assertNotNull(map);
Assert.assertEquals(3, map.size());
NodeLabel labelX = NodeLabel.newInstance("x", false);
NodeLabelInfo nodeLabelInfoX = new NodeLabelInfo(labelX);
NodeIDsInfo nodeIDsInfoX = map.get(nodeLabelInfoX);
Assert.assertNotNull(nodeIDsInfoX);
Assert.assertEquals(2, nodeIDsInfoX.getNodeIDs().size());
Resource resourceX =
nodeIDsInfoX.getPartitionInfo().getResourceAvailable().getResource();
Assert.assertNotNull(resourceX);
Assert.assertEquals(4*10, resourceX.getVirtualCores());
Assert.assertEquals(4*20*1024, resourceX.getMemorySize());
NodeLabel labelY = NodeLabel.newInstance("y", false);
NodeLabelInfo nodeLabelInfoY = new NodeLabelInfo(labelY);
NodeIDsInfo nodeIDsInfoY = map.get(nodeLabelInfoY);
Assert.assertNotNull(nodeIDsInfoY);
Assert.assertEquals(2, nodeIDsInfoY.getNodeIDs().size());
Resource resourceY =
nodeIDsInfoY.getPartitionInfo().getResourceAvailable().getResource();
Assert.assertNotNull(resourceY);
Assert.assertEquals(4*20, resourceY.getVirtualCores());
Assert.assertEquals(4*40*1024, resourceY.getMemorySize());
} |
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("innerCoder", innerCoder)
.add("dict", dict == null ? null : "base64:" + BaseEncoding.base64().encode(dict))
.add("level", level)
.toString();
} | @Test
public void testToString() throws Exception {
assertEquals(
"ZstdCoder{innerCoder=StringUtf8Coder, dict=null, level=0}",
ZstdCoder.of(StringUtf8Coder.of(), null, 0).toString());
assertEquals(
"ZstdCoder{innerCoder=ByteArrayCoder, dict=base64:, level=1}",
ZstdCoder.of(ByteArrayCoder.of(), new byte[0], 1).toString());
assertEquals(
"ZstdCoder{innerCoder=TextualIntegerCoder, dict=base64:AA==, level=2}",
ZstdCoder.of(TextualIntegerCoder.of(), new byte[1], 2).toString());
} |
@Override
public ManagementMetadata get(EurekaInstanceConfigBean instance, int serverPort, String serverContextPath,
String managementContextPath, Integer managementPort) {
if (isRandom(managementPort)) {
return null;
}
if (managementPort == null && isRandom(serverPort)) {
return null;
}
String healthCheckUrl = getHealthCheckUrl(instance, serverPort, serverContextPath, managementContextPath,
managementPort, false);
String statusPageUrl = getStatusPageUrl(instance, serverPort, serverContextPath, managementContextPath,
managementPort);
ManagementMetadata metadata = new ManagementMetadata(healthCheckUrl, statusPageUrl,
managementPort == null ? serverPort : managementPort);
if (instance.isSecurePortEnabled()) {
metadata.setSecureHealthCheckUrl(getHealthCheckUrl(instance, serverPort, serverContextPath,
managementContextPath, managementPort, true));
}
return metadata;
} | @Test
void managementPortIsRandom() {
int serverPort = 0;
String serverContextPath = "/";
String managementContextPath = null;
Integer managementPort = 0;
ManagementMetadata actual = provider.get(INSTANCE, serverPort, serverContextPath, managementContextPath,
managementPort);
assertThat(actual).isNull();
} |
public AstNode rewrite(final AstNode node, final C context) {
return rewriter.process(node, context);
} | @Test
public void shouldRewriteInsertIntoWithPartitionBy() {
// Given:
final InsertInto ii = new InsertInto(location, sourceName, query, insertIntoProperties);
when(mockRewriter.apply(query, context)).thenReturn(rewrittenQuery);
when(expressionRewriter.apply(expression, context)).thenReturn(rewrittenExpression);
// When:
final AstNode rewritten = rewriter.rewrite(ii, context);
// Then:
assertThat(
rewritten,
equalTo(
new InsertInto(
location,
sourceName,
rewrittenQuery,
insertIntoProperties
)
)
);
} |
@Override
protected void rename(
List<HadoopResourceId> srcResourceIds,
List<HadoopResourceId> destResourceIds,
MoveOptions... moveOptions)
throws IOException {
if (moveOptions.length > 0) {
throw new UnsupportedOperationException("Support for move options is not yet implemented.");
}
for (int i = 0; i < srcResourceIds.size(); ++i) {
final Path srcPath = srcResourceIds.get(i).toPath();
final Path destPath = destResourceIds.get(i).toPath();
// this enforces src and dest file systems to match
final org.apache.hadoop.fs.FileSystem fs = srcPath.getFileSystem(configuration);
// rename in HDFS requires the target directory to exist or silently fails (BEAM-4861)
mkdirs(destPath);
boolean success = fs.rename(srcPath, destPath);
// If the failure was due to the file already existing, delete and retry (BEAM-5036).
// This should be the exceptional case, so handle here rather than incur the overhead of
// testing first
if (!success && fs.exists(srcPath) && fs.exists(destPath)) {
LOG.debug(LOG_DELETING_EXISTING_FILE, Path.getPathWithoutSchemeAndAuthority(destPath));
fs.delete(destPath, false); // not recursive
success = fs.rename(srcPath, destPath);
}
if (!success) {
if (!fs.exists(srcPath)) {
throw new FileNotFoundException(
String.format(
"Unable to rename resource %s to %s as source not found.", srcPath, destPath));
} else if (fs.exists(destPath)) {
throw new FileAlreadyExistsException(
String.format(
"Unable to rename resource %s to %s as destination already exists and couldn't be deleted.",
srcPath, destPath));
} else {
throw new IOException(
String.format(
"Unable to rename resource %s to %s. No further information provided by underlying filesystem.",
srcPath, destPath));
}
}
}
} | @Test
public void testRename() throws Exception {
create("testFileA", "testDataA".getBytes(StandardCharsets.UTF_8));
create("testFileB", "testDataB".getBytes(StandardCharsets.UTF_8));
// ensure files exist
assertArrayEquals("testDataA".getBytes(StandardCharsets.UTF_8), read("testFileA", 0));
assertArrayEquals("testDataB".getBytes(StandardCharsets.UTF_8), read("testFileB", 0));
fileSystem.rename(
ImmutableList.of(testPath("testFileA"), testPath("testFileB")),
ImmutableList.of(testPath("renameFileA"), testPath("renameFileB")));
List<MatchResult> results = fileSystem.match(ImmutableList.of(testPath("*").toString()));
assertEquals(Status.OK, Iterables.getOnlyElement(results).status());
assertThat(
Iterables.getOnlyElement(results).metadata(),
containsInAnyOrder(
Metadata.builder()
.setResourceId(testPath("renameFileA"))
.setIsReadSeekEfficient(true)
.setSizeBytes("testDataA".getBytes(StandardCharsets.UTF_8).length)
.setLastModifiedMillis(lastModified("renameFileA"))
.build(),
Metadata.builder()
.setResourceId(testPath("renameFileB"))
.setIsReadSeekEfficient(true)
.setSizeBytes("testDataB".getBytes(StandardCharsets.UTF_8).length)
.setLastModifiedMillis(lastModified("renameFileB"))
.build()));
// ensure files exist
assertArrayEquals("testDataA".getBytes(StandardCharsets.UTF_8), read("renameFileA", 0));
assertArrayEquals("testDataB".getBytes(StandardCharsets.UTF_8), read("renameFileB", 0));
} |
@Override
public Serializable getValueFromText(final String xmlMessage) {
Serializable readObject = null;
try {
XStream xstream = JMeterUtils.createXStream();
readObject = (Serializable) xstream.fromXML(xmlMessage, readObject);
} catch (Exception e) {
throw new IllegalStateException("Unable to load object instance from text", e);
}
return readObject;
} | @Test
public void getValueFromText() {
String text = "<org.apache.jmeter.protocol.jms.sampler.render.Person><name>Doe</name></org.apache.jmeter.protocol.jms.sampler.render.Person>";
Serializable object = render.getValueFromText(text);
assertObject(object, "Doe");
} |
@Override
public TableBuilder buildTable(TableIdentifier identifier, Schema schema) {
return new ViewAwareTableBuilder(identifier, schema);
} | @Test
public void testCommitExceptionWithoutMessage() {
TableIdentifier tableIdent = TableIdentifier.of("db", "tbl");
BaseTable table = (BaseTable) catalog.buildTable(tableIdent, SCHEMA).create();
TableOperations ops = table.operations();
TableMetadata metadataV1 = ops.current();
table.updateSchema().addColumn("n", Types.IntegerType.get()).commit();
ops.refresh();
try (MockedStatic<JdbcUtil> mockedStatic = Mockito.mockStatic(JdbcUtil.class)) {
mockedStatic
.when(() -> JdbcUtil.loadTable(any(), any(), any(), any()))
.thenThrow(new SQLException());
assertThatThrownBy(() -> ops.commit(ops.current(), metadataV1))
.isInstanceOf(UncheckedSQLException.class)
.hasMessageStartingWith("Unknown failure");
}
} |
@Override
public String getFieldDefinition( ValueMetaInterface v, String tk, String pk, boolean useAutoinc,
boolean addFieldName, boolean addCr ) {
String retval = "";
String fieldname = v.getName();
int length = v.getLength();
int precision = v.getPrecision();
if ( addFieldName ) {
retval += fieldname + " ";
}
int type = v.getType();
switch ( type ) {
case ValueMetaInterface.TYPE_TIMESTAMP:
case ValueMetaInterface.TYPE_DATE:
retval += "DATETIME";
break;
// Move back to Y/N for bug - [# 1538] Repository on MS ACCESS: error creating repository
case ValueMetaInterface.TYPE_BOOLEAN:
if ( supportsBooleanDataType() ) {
retval += "BIT";
} else {
retval += "CHAR(1)";
}
break;
case ValueMetaInterface.TYPE_NUMBER:
case ValueMetaInterface.TYPE_INTEGER:
case ValueMetaInterface.TYPE_BIGNUMBER:
if ( fieldname.equalsIgnoreCase( tk ) || // Technical key
fieldname.equalsIgnoreCase( pk ) // Primary key
) {
if ( useAutoinc ) {
retval += "COUNTER PRIMARY KEY";
} else {
retval += "LONG PRIMARY KEY";
}
} else {
if ( precision == 0 ) {
if ( length > 9 ) {
retval += "DOUBLE";
} else {
if ( length > 5 ) {
retval += "LONG";
} else {
retval += "INTEGER";
}
}
} else {
retval += "DOUBLE";
}
}
break;
case ValueMetaInterface.TYPE_STRING:
if ( length > 0 ) {
if ( length < 256 ) {
retval += "TEXT(" + length + ")";
} else {
retval += "MEMO";
}
} else {
retval += "TEXT";
}
break;
case ValueMetaInterface.TYPE_BINARY:
retval += " LONGBINARY";
break;
default:
retval += " UNKNOWN";
break;
}
if ( addCr ) {
retval += Const.CR;
}
return retval;
} | @Test
public void testGetFieldDefinition() {
assertEquals( "FOO DATETIME",
odbcMeta.getFieldDefinition( new ValueMetaDate( "FOO" ), "", "", false, true, false ) );
assertEquals( "DATETIME",
odbcMeta.getFieldDefinition( new ValueMetaTimestamp( "FOO" ), "", "", false, false, false ) );
assertFalse( odbcMeta.supportsBooleanDataType() );
assertEquals( "CHAR(1)",
odbcMeta.getFieldDefinition( new ValueMetaBoolean( "FOO" ), "", "", false, false, false ) );
odbcMeta.setSupportsBooleanDataType( true );
assertEquals( "BIT",
odbcMeta.getFieldDefinition( new ValueMetaBoolean( "FOO" ), "", "", false, false, false ) );
odbcMeta.setSupportsBooleanDataType( false );
// Key field Stuff
assertEquals( "COUNTER PRIMARY KEY",
odbcMeta.getFieldDefinition( new ValueMetaBigNumber( "FOO", 8, 0 ), "", "FOO", true, false, false ) );
assertEquals( "LONG PRIMARY KEY",
odbcMeta.getFieldDefinition( new ValueMetaNumber( "FOO", 10, 0 ), "FOO", "", false, false, false ) );
assertEquals( "LONG PRIMARY KEY",
odbcMeta.getFieldDefinition( new ValueMetaBigNumber( "FOO", 8, 0 ), "", "FOO", false, false, false ) );
// Integer types
assertEquals( "INTEGER",
odbcMeta.getFieldDefinition( new ValueMetaInteger( "FOO", 4, 0 ), "", "", false, false, false ) );
assertEquals( "LONG",
odbcMeta.getFieldDefinition( new ValueMetaNumber( "FOO", 6, 0 ), "", "", false, false, false ) );
assertEquals( "LONG",
odbcMeta.getFieldDefinition( new ValueMetaNumber( "FOO", 9, 0 ), "", "", false, false, false ) );
assertEquals( "DOUBLE",
odbcMeta.getFieldDefinition( new ValueMetaBigNumber( "FOO", 10, 0 ), "", "", false, false, false ) );
// Number Types ( as written, precision != 0 )
assertEquals( "DOUBLE",
odbcMeta.getFieldDefinition( new ValueMetaBigNumber( "FOO", 10, 1 ), "", "", false, false, false ) );
assertEquals( "DOUBLE",
odbcMeta.getFieldDefinition( new ValueMetaBigNumber( "FOO", 3, 1 ), "", "", false, false, false ) );
assertEquals( "DOUBLE",
odbcMeta.getFieldDefinition( new ValueMetaBigNumber( "FOO", 3, -5 ), "", "", false, false, false ) );
assertEquals( "DOUBLE",
odbcMeta.getFieldDefinition( new ValueMetaBigNumber( "FOO", -3, -5 ), "", "", false, false, false ) );
// String Types
assertEquals( "TEXT(255)",
odbcMeta.getFieldDefinition( new ValueMetaString( "FOO", 255, 0 ), "", "", false, false, false ) ); // Likely a bug - the maxTextFieldLength is set to 65536 - so this limitation is likely wrong
assertEquals( "TEXT(1)",
odbcMeta.getFieldDefinition( new ValueMetaString( "FOO", 1, 0 ), "", "", false, false, false ) );
assertEquals( "MEMO",
odbcMeta.getFieldDefinition( new ValueMetaString( "FOO", 256, 0 ), "", "", false, false, false ) );
assertEquals( "TEXT",
odbcMeta.getFieldDefinition( new ValueMetaString( "FOO", 0, 0 ), "", "", false, false, false ) );
assertEquals( "TEXT",
odbcMeta.getFieldDefinition( new ValueMetaString( "FOO" ), "", "", false, false, false ) );
// Other Types
assertEquals( " LONGBINARY",
odbcMeta.getFieldDefinition( new ValueMetaBinary( "FOO", 200, 1 ), "", "", false, false, false ) );
// Unknowns
assertEquals( " UNKNOWN",
odbcMeta.getFieldDefinition( new ValueMetaInternetAddress( "FOO" ), "", "", false, false, false ) );
assertEquals( " UNKNOWN" + System.getProperty( "line.separator" ),
odbcMeta.getFieldDefinition( new ValueMetaInternetAddress( "FOO" ), "", "", false, false, true ) );
} |
@Override
public Write.Append append(final Path file, final TransferStatus status) throws BackgroundException {
return new Write.Append(status.isExists()).withStatus(status);
} | @Test
@Ignore
public void testAppend() throws Exception {
final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new IRODSProtocol())));
final Profile profile = new ProfilePlistReader(factory).read(
this.getClass().getResourceAsStream("/iRODS (iPlant Collaborative).cyberduckprofile"));
final Host host = new Host(profile, profile.getDefaultHostname(), new Credentials(
PROPERTIES.get("irods.key"), PROPERTIES.get("irods.secret")
));
final IRODSSession session = new IRODSSession(host);
session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback());
session.login(new DisabledLoginCallback(), new DisabledCancelCallback());
final Local local = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString());
final int length = 32770;
final byte[] content = RandomUtils.nextBytes(length);
final OutputStream out = local.getOutputStream(false);
IOUtils.write(content, out);
out.close();
final Checksum checksumPart1;
final Checksum checksumPart2;
final Path test = new Path(new IRODSHomeFinderService(session).find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
{
final TransferStatus status = new TransferStatus().withLength(content.length / 2);
final BytecountStreamListener count = new BytecountStreamListener();
checksumPart1 = new IRODSUploadFeature(session).upload(
test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), count,
status,
new DisabledConnectionCallback());
assertEquals(content.length / 2, count.getSent());
}
{
final TransferStatus status = new TransferStatus().withLength(content.length / 2).withOffset(content.length / 2).append(true);
checksumPart2 = new IRODSUploadFeature(session).upload(
test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener(),
status,
new DisabledConnectionCallback());
assertEquals(content.length / 2, status.getOffset());
}
assertNotEquals(checksumPart1, checksumPart2);
final byte[] buffer = new byte[content.length];
final InputStream in = new IRODSReadFeature(session).read(test, new TransferStatus().withLength(content.length), new DisabledConnectionCallback());
IOUtils.readFully(in, buffer);
in.close();
assertArrayEquals(content, buffer);
new IRODSDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
local.delete();
session.close();
} |
public static COSName getPDFName(String aName)
{
WeakReference<COSName> weakRef = NAME_MAP.get(aName);
COSName name = weakRef != null ? weakRef.get() : null;
if (name == null)
{
// Although we use a ConcurrentHashMap, we cannot use computeIfAbsent() because the returned reference
// might be stale (even the newly created one).
// Use double checked locking to make the code thread safe.
synchronized (NAME_MAP)
{
weakRef = NAME_MAP.get(aName);
name = weakRef != null ? weakRef.get() : null;
if (name == null)
{
name = new COSName(aName);
CLEANER.register(name, () -> NAME_MAP.remove(aName));
NAME_MAP.put(aName, new WeakReference<>(name));
}
}
}
return name;
} | @Test
void PDFBox4076() throws IOException
{
String special = "中国你好!";
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try (PDDocument document = new PDDocument())
{
PDPage page = new PDPage();
document.addPage(page);
document.getDocumentCatalog().getCOSObject().setString(COSName.getPDFName(special), special);
document.save(baos);
}
try (PDDocument document = Loader.loadPDF(baos.toByteArray()))
{
COSDictionary catalogDict = document.getDocumentCatalog().getCOSObject();
assertTrue(catalogDict.containsKey(special));
assertEquals(special, catalogDict.getString(special));
}
} |
@Override
public boolean onTouchEvent(MotionEvent ev) {
if (isEnabled()) return super.onTouchEvent(ev);
else return false;
} | @Test
public void testOnTouchEventDisabled() throws Exception {
mUnderTest.setEnabled(false);
Assert.assertFalse(
mUnderTest.onTouchEvent(MotionEvent.obtain(10, 10, MotionEvent.ACTION_DOWN, 1f, 1f, 0)));
} |
static void addGetCharacteristicMethod(final String characteristicVariableName,
final Characteristic characteristic,
final List<Field<?>> fields,
final ClassOrInterfaceDeclaration characteristicsTemplate) {
BlockStmt toAdd = getCharacteristicVariableDeclaration(characteristicVariableName, characteristic, fields);
toAdd.addStatement(new ReturnStmt(characteristicVariableName));
final MethodDeclaration methodDeclaration =
characteristicsTemplate.addMethod("get" + characteristicVariableName).setBody(toAdd);
methodDeclaration.setType(KiePMMLCharacteristic.class);
methodDeclaration.setModifiers(Modifier.Keyword.PRIVATE, Modifier.Keyword.STATIC);
} | @Test
void addGetCharacteristicMethod() throws IOException {
final String characteristicName = "CharacteristicName";
String expectedMethod = "get" + characteristicName;
assertThat(characteristicsTemplate.getMethodsByName(expectedMethod)).isEmpty();
KiePMMLCharacteristicsFactory.addGetCharacteristicMethod(characteristicName,
basicComplexPartialScoreFirstCharacteristic,
getFieldsFromDataDictionary(basicComplexPartialScoreDataDictionary),
characteristicsTemplate);
assertThat(characteristicsTemplate.getMethodsByName(expectedMethod)).hasSize(1);
MethodDeclaration retrieved = characteristicsTemplate.getMethodsByName(expectedMethod).get(0);
String text = getFileContent(TEST_01_SOURCE);
MethodDeclaration expected = JavaParserUtils
.parseMethod(String.format(text, characteristicName));
assertThat(JavaParserUtils.equalsNode(expected, retrieved)).isTrue();
List<Class<?>> imports = Arrays.asList(KiePMMLApply.class,
KiePMMLAttribute.class,
KiePMMLCharacteristic.class,
KiePMMLCharacteristics.class,
KiePMMLComplexPartialScore.class,
KiePMMLCompoundPredicate.class,
KiePMMLConstant.class,
KiePMMLFieldRef.class,
KiePMMLSimplePredicate.class,
KiePMMLSimpleSetPredicate.class,
KiePMMLTruePredicate.class,
Arrays.class,
Collections.class);
commonValidateCompilationWithImports(retrieved, imports);
} |
@Override
public void upgrade() {
final DBCollection collection = mongoConnection.getDatabase().getCollection("preflight");
final WriteResult result = collection.update(
new BasicDBObject("result", new BasicDBObject("$exists", true)),
new BasicDBObject(Map.of(
"$set", new BasicDBObject("type", "preflight_result"),
"$rename", new BasicDBObject("result", "value")
)),
false,
true);
collection.update(
new BasicDBObject("type", "preflight_password"),
new BasicDBObject("$setOnInsert", new BasicDBObject(Map.of(
"type", "preflight_password",
"value", RandomStringUtils.randomAlphabetic(PreflightConstants.INITIAL_PASSWORD_LENGTH)
))),
true,
false);
} | @Test
@MongoDBFixtures({"V20230929142900_CreateInitialPreflightPassword/old_preflight_config_structure.json"})
void testMigrateConfigCreatePassword() {
Assertions.assertThat(collection.countDocuments()).isEqualTo(1); // the old format of configuration, one doc
migration.upgrade();
Assertions.assertThat(collection.countDocuments()).isEqualTo(2); // the old format of configuration, one doc
String result = (String) collection.find(Filters.eq("type", "preflight_result")).first().get("value");
Assertions.assertThat(result).isEqualTo("FINISHED");
String password = (String) collection.find(Filters.eq("type", "preflight_password")).first().get("value");
Assertions.assertThat(password).hasSizeGreaterThanOrEqualTo(PreflightConstants.INITIAL_PASSWORD_LENGTH);
} |
@SuppressWarnings("MethodDoesntCallSuperMethod")
@Override
public DataSchema clone() {
return new DataSchema(_columnNames.clone(), _columnDataTypes.clone());
} | @Test
public void testClone() {
DataSchema dataSchema = new DataSchema(COLUMN_NAMES, COLUMN_DATA_TYPES);
DataSchema dataSchemaClone = dataSchema.clone();
Assert.assertEquals(dataSchema, dataSchemaClone);
Assert.assertEquals(dataSchema.hashCode(), dataSchemaClone.hashCode());
} |
public void removeByInstanceIdAndIdNot(String instanceId, String appSessionId) {
repository.findAllByInstanceFlow(AuthenticateLoginFlow.NAME + instanceId ).stream()
.filter(s -> !s.getId().equals(appSessionId))
.forEach(s -> removeById(s.getId()));
} | @Test
void removeByInstanceIdAndIdNotTest() {
// persist app session
AppSession session = new AppSession();
session.setId(T_APP_SESSION_ID);
session.setFlow(AuthenticateLoginFlow.NAME);
session.setState("AUTHENTICATED");
session.setUserAppId(T_USER_APP_ID);
session.setInstanceId(T_INSTANCE_ID);
session.setDeviceName(T_DEVICE_NAME);
repository.save(session);
// Given app session is created
assertTrue(repository.findById(T_APP_SESSION_ID).isPresent());
// Should not be removed when removing with same instanceId and appSessionId
service.removeByInstanceIdAndIdNot(T_INSTANCE_ID, T_APP_SESSION_ID);
assertTrue(repository.findById(T_APP_SESSION_ID).isPresent());
// Old session should be removed when removing with same instanceId and new appSessionId
service.removeByInstanceIdAndIdNot(T_INSTANCE_ID, T_APP_SESSION_ID + "1");
assertFalse(repository.findById(T_APP_SESSION_ID).isPresent());
} |
public static SourceConfig validateUpdate(SourceConfig existingConfig, SourceConfig newConfig) {
SourceConfig mergedConfig = clone(existingConfig);
if (!existingConfig.getTenant().equals(newConfig.getTenant())) {
throw new IllegalArgumentException("Tenants differ");
}
if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) {
throw new IllegalArgumentException("Namespaces differ");
}
if (!existingConfig.getName().equals(newConfig.getName())) {
throw new IllegalArgumentException("Function Names differ");
}
if (!StringUtils.isEmpty(newConfig.getClassName())) {
mergedConfig.setClassName(newConfig.getClassName());
}
if (!StringUtils.isEmpty(newConfig.getTopicName())) {
mergedConfig.setTopicName(newConfig.getTopicName());
}
if (!StringUtils.isEmpty(newConfig.getSerdeClassName())) {
mergedConfig.setSerdeClassName(newConfig.getSerdeClassName());
}
if (!StringUtils.isEmpty(newConfig.getSchemaType())) {
mergedConfig.setSchemaType(newConfig.getSchemaType());
}
if (newConfig.getConfigs() != null) {
mergedConfig.setConfigs(newConfig.getConfigs());
}
if (newConfig.getSecrets() != null) {
mergedConfig.setSecrets(newConfig.getSecrets());
}
if (!StringUtils.isEmpty(newConfig.getLogTopic())) {
mergedConfig.setLogTopic(newConfig.getLogTopic());
}
if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees()
.equals(existingConfig.getProcessingGuarantees())) {
throw new IllegalArgumentException("Processing Guarantees cannot be altered");
}
if (newConfig.getParallelism() != null) {
mergedConfig.setParallelism(newConfig.getParallelism());
}
if (newConfig.getResources() != null) {
mergedConfig
.setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources()));
}
if (!StringUtils.isEmpty(newConfig.getArchive())) {
mergedConfig.setArchive(newConfig.getArchive());
}
if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) {
mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags());
}
if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) {
mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions());
}
if (isBatchSource(existingConfig) != isBatchSource(newConfig)) {
throw new IllegalArgumentException("Sources cannot be update between regular sources and batchsource");
}
if (newConfig.getBatchSourceConfig() != null) {
validateBatchSourceConfigUpdate(existingConfig.getBatchSourceConfig(), newConfig.getBatchSourceConfig());
mergedConfig.setBatchSourceConfig(newConfig.getBatchSourceConfig());
}
if (newConfig.getProducerConfig() != null) {
mergedConfig.setProducerConfig(newConfig.getProducerConfig());
}
return mergedConfig;
} | @Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Function Names differ")
public void testMergeDifferentName() {
SourceConfig sourceConfig = createSourceConfig();
SourceConfig newSourceConfig = createUpdatedSourceConfig("name", "Different");
SourceConfigUtils.validateUpdate(sourceConfig, newSourceConfig);
} |
@Override
public String getMessage() {
if (!logPhi) {
return super.getMessage();
}
String answer;
if (hasHl7MessageBytes() || hasHl7AcknowledgementBytes()) {
String parentMessage = super.getMessage();
StringBuilder messageBuilder = new StringBuilder(
parentMessage.length()
+ (hasHl7MessageBytes() ? hl7MessageBytes.length : 0)
+ (hasHl7AcknowledgementBytes()
? hl7AcknowledgementBytes.length : 0));
messageBuilder.append(parentMessage);
if (hasHl7MessageBytes()) {
messageBuilder.append("\n\t{hl7Message [")
.append(hl7MessageBytes.length)
.append("] = ");
hl7Util.appendBytesAsPrintFriendlyString(messageBuilder, hl7MessageBytes, 0, hl7MessageBytes.length);
messageBuilder.append('}');
}
if (hasHl7AcknowledgementBytes()) {
messageBuilder.append("\n\t{hl7Acknowledgement [")
.append(hl7AcknowledgementBytes.length)
.append("] = ");
hl7Util.appendBytesAsPrintFriendlyString(messageBuilder, hl7AcknowledgementBytes, 0,
hl7AcknowledgementBytes.length);
messageBuilder.append('}');
}
answer = messageBuilder.toString();
} else {
answer = super.getMessage();
}
return answer;
} | @Test
public void testNullHl7Acknowledgement() {
instance = new MllpException(EXCEPTION_MESSAGE, HL7_MESSAGE_BYTES, NULL_BYTE_ARRAY, LOG_PHI_TRUE);
assertEquals(expectedMessage(HL7_MESSAGE, null), instance.getMessage());
} |
public static void mergeCacheProperties(JSONObject properties) {
if (properties == null || mCacheProperties == null || mCacheProperties.length() == 0) {
return;
}
JSONUtils.mergeJSONObject(mCacheProperties, properties);
mCacheProperties = null;
} | @Test
public void mergeCacheProperties() {
DeepLinkManager.mergeCacheProperties(null);
} |
public void validateUrl(String serverUrl) {
HttpUrl url = buildUrl(serverUrl, "/rest/api/1.0/repos");
doGet("", url, body -> buildGson().fromJson(body, RepositoryList.class));
} | @Test
public void fail_validate_url_when_validate_url_return_non_json_payload() {
server.enqueue(new MockResponse().setResponseCode(400)
.setBody("this is not a json payload"));
String serverUrl = server.url("/").toString();
assertThatThrownBy(() -> underTest.validateUrl(serverUrl))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Unable to contact Bitbucket server");
} |
@Override
public DevOpsProjectCreationContext create(AlmSettingDto almSettingDto, DevOpsProjectDescriptor devOpsProjectDescriptor) {
String url = requireNonNull(almSettingDto.getUrl(), "DevOps Platform url cannot be null");
Long gitlabProjectId = getGitlabProjectId(devOpsProjectDescriptor);
String pat = findPersonalAccessTokenOrThrow(almSettingDto);
Project gitlabProject = fetchGitlabProject(url, pat, gitlabProjectId);
String defaultBranchName = getDefaultBranchOnGitlab(url, pat, gitlabProjectId).orElse(null);
return new DevOpsProjectCreationContext(gitlabProject.getName(), gitlabProject.getPathWithNamespace(),
String.valueOf(gitlabProjectId), gitlabProject.getVisibility().equals("public"), defaultBranchName, almSettingDto, userSession);
} | @Test
void create_whenGitlabProjectIdIsInvalid_throws() {
DevOpsProjectDescriptor devOpsProjectDescriptor = mock();
when(devOpsProjectDescriptor.repositoryIdentifier()).thenReturn("invalid");
assertThatIllegalArgumentException()
.isThrownBy(() -> gitlabDevOpsProjectService.create(ALM_SETTING_DTO, devOpsProjectDescriptor))
.withMessage("GitLab project identifier must be a number, was 'invalid'");
} |
public static <T> void forward(CompletableFuture<T> source, CompletableFuture<T> target) {
source.whenComplete(forwardTo(target));
} | @Test
void testForwardNormal() throws Exception {
final CompletableFuture<String> source = new CompletableFuture<>();
final CompletableFuture<String> target = new CompletableFuture<>();
FutureUtils.forward(source, target);
assertThat(source).isNotDone();
assertThat(target).isNotDone();
source.complete("foobar");
assertThat(source).isDone();
assertThat(target).isDone();
assertThat(source.get()).isEqualTo(target.get());
} |
@Override
public ConfigHistoryInfo detailPreviousConfigHistory(Long id) {
HistoryConfigInfoMapper historyConfigInfoMapper = mapperManager.findMapper(
dataSourceService.getDataSourceType(), TableConstant.HIS_CONFIG_INFO);
MapperContext context = new MapperContext();
context.putWhereParameter(FieldConstant.ID, id);
MapperResult sqlFetchRows = historyConfigInfoMapper.detailPreviousConfigHistory(context);
try {
ConfigHistoryInfo historyInfo = jt.queryForObject(sqlFetchRows.getSql(),
sqlFetchRows.getParamList().toArray(), HISTORY_DETAIL_ROW_MAPPER);
return historyInfo;
} catch (EmptyResultDataAccessException emptyResultDataAccessException) {
return null;
} catch (DataAccessException e) {
LogUtil.FATAL_LOG.error("[detail-previous-config-history] error, id:{}", new Object[] {id}, e);
throw e;
}
} | @Test
void testDetailPreviousConfigHistory() {
long nid = 256789;
//mock query
ConfigHistoryInfo mockConfigHistoryInfo = createMockConfigHistoryInfo(0);
Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {nid}), eq(HISTORY_DETAIL_ROW_MAPPER)))
.thenReturn(mockConfigHistoryInfo);
//execute & verify
ConfigHistoryInfo historyReturn = externalHistoryConfigInfoPersistService.detailPreviousConfigHistory(nid);
assertEquals(mockConfigHistoryInfo, historyReturn);
//mock exception EmptyResultDataAccessException
Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {nid}), eq(HISTORY_DETAIL_ROW_MAPPER)))
.thenThrow(new EmptyResultDataAccessException(1));
ConfigHistoryInfo historyReturnNull = externalHistoryConfigInfoPersistService.detailPreviousConfigHistory(nid);
assertNull(historyReturnNull);
//mock exception CannotGetJdbcConnectionException
Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {nid}), eq(HISTORY_DETAIL_ROW_MAPPER)))
.thenThrow(new CannotGetJdbcConnectionException("conn error111"));
try {
externalHistoryConfigInfoPersistService.detailPreviousConfigHistory(nid);
assertTrue(false);
} catch (Exception e) {
assertEquals("conn error111", e.getMessage());
}
} |
@VisibleForTesting
Object[] callHttpService( RowMetaInterface rowMeta, Object[] rowData ) throws KettleException {
HttpClientManager.HttpClientBuilderFacade clientBuilder = HttpClientManager.getInstance().createBuilder();
if ( data.realConnectionTimeout > -1 ) {
clientBuilder.setConnectionTimeout( data.realConnectionTimeout );
}
if ( data.realSocketTimeout > -1 ) {
clientBuilder.setSocketTimeout( data.realSocketTimeout );
}
if ( StringUtils.isNotBlank( data.realHttpLogin ) ) {
clientBuilder.setCredentials( data.realHttpLogin, data.realHttpPassword );
}
if ( StringUtils.isNotBlank( data.realProxyHost ) ) {
clientBuilder.setProxy( data.realProxyHost, data.realProxyPort );
}
CloseableHttpClient httpClient = clientBuilder.build();
// Prepare HTTP get
URI uri = null;
try {
URIBuilder uriBuilder = constructUrlBuilder( rowMeta, rowData );
uri = uriBuilder.build();
HttpGet method = new HttpGet( uri );
// Add Custom HTTP headers
if ( data.useHeaderParameters ) {
for ( int i = 0; i < data.header_parameters_nrs.length; i++ ) {
method.addHeader( data.headerParameters[ i ].getName(), data.inputRowMeta.getString( rowData,
data.header_parameters_nrs[ i ] ) );
if ( isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "HTTPDialog.Log.HeaderValue",
data.headerParameters[ i ].getName(), data.inputRowMeta
.getString( rowData, data.header_parameters_nrs[ i ] ) ) );
}
}
}
Object[] newRow = null;
if ( rowData != null ) {
newRow = rowData.clone();
}
// Execute request
CloseableHttpResponse httpResponse = null;
try {
// used for calculating the responseTime
long startTime = System.currentTimeMillis();
HttpHost target = new HttpHost( uri.getHost(), uri.getPort(), uri.getScheme() );
// Create AuthCache instance
AuthCache authCache = new BasicAuthCache();
// Generate BASIC scheme object and add it to the local
// auth cache
BasicScheme basicAuth = new BasicScheme();
authCache.put( target, basicAuth );
// Add AuthCache to the execution context
HttpClientContext localContext = HttpClientContext.create();
localContext.setAuthCache( authCache );
// Preemptive authentication
if ( StringUtils.isNotBlank( data.realProxyHost ) ) {
httpResponse = httpClient.execute( target, method, localContext );
} else {
httpResponse = httpClient.execute( method, localContext );
}
// calculate the responseTime
long responseTime = System.currentTimeMillis() - startTime;
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "HTTP.Log.ResponseTime", responseTime, uri ) );
}
int statusCode = requestStatusCode( httpResponse );
// The status code
if ( isDebug() ) {
logDebug( BaseMessages.getString( PKG, "HTTP.Log.ResponseStatusCode", "" + statusCode ) );
}
String body;
switch ( statusCode ) {
case HttpURLConnection.HTTP_UNAUTHORIZED:
throw new KettleStepException( BaseMessages
.getString( PKG, "HTTP.Exception.Authentication", data.realUrl ) );
case -1:
throw new KettleStepException( BaseMessages
.getString( PKG, "HTTP.Exception.IllegalStatusCode", data.realUrl ) );
case HttpURLConnection.HTTP_NO_CONTENT:
body = "";
break;
default:
HttpEntity entity = httpResponse.getEntity();
if ( entity != null ) {
body = StringUtils.isEmpty( meta.getEncoding() ) ? EntityUtils.toString( entity ) : EntityUtils.toString( entity, meta.getEncoding() );
} else {
body = "";
}
break;
}
Header[] headers = searchForHeaders( httpResponse );
JSONObject json = new JSONObject();
for ( Header header : headers ) {
Object previousValue = json.get( header.getName() );
if ( previousValue == null ) {
json.put( header.getName(), header.getValue() );
} else if ( previousValue instanceof List ) {
List<String> list = (List<String>) previousValue;
list.add( header.getValue() );
} else {
ArrayList<String> list = new ArrayList<String>();
list.add( (String) previousValue );
list.add( header.getValue() );
json.put( header.getName(), list );
}
}
String headerString = json.toJSONString();
int returnFieldsOffset = rowMeta.size();
if ( !Utils.isEmpty( meta.getFieldName() ) ) {
newRow = RowDataUtil.addValueData( newRow, returnFieldsOffset, body );
returnFieldsOffset++;
}
if ( !Utils.isEmpty( meta.getResultCodeFieldName() ) ) {
newRow = RowDataUtil.addValueData( newRow, returnFieldsOffset, new Long( statusCode ) );
returnFieldsOffset++;
}
if ( !Utils.isEmpty( meta.getResponseTimeFieldName() ) ) {
newRow = RowDataUtil.addValueData( newRow, returnFieldsOffset, new Long( responseTime ) );
returnFieldsOffset++;
}
if ( !Utils.isEmpty( meta.getResponseHeaderFieldName() ) ) {
newRow = RowDataUtil.addValueData( newRow, returnFieldsOffset, headerString );
}
} finally {
if ( httpResponse != null ) {
httpResponse.close();
}
// Release current connection to the connection pool once you are done
method.releaseConnection();
}
return newRow;
} catch ( UnknownHostException uhe ) {
throw new KettleException( BaseMessages.getString( PKG, "HTTP.Error.UnknownHostException", uhe.getMessage() ) );
} catch ( Exception e ) {
throw new KettleException( BaseMessages.getString( PKG, "HTTP.Log.UnableGetResult", uri ), e );
}
} | @Test
public void testCallHttpServiceWasCalledWithContext() throws Exception {
try ( MockedStatic<HttpClientManager> httpClientManagerMockedStatic = mockStatic( HttpClientManager.class ) ) {
httpClientManagerMockedStatic.when( HttpClientManager::getInstance ).thenReturn( manager );
doReturn( null ).when( meta ).getEncoding();
http.callHttpService( rmi, new Object[] { 0 } );
verify( client, times( 1 ) ).execute( any( HttpGet.class ), any( HttpClientContext.class ) );
}
} |
public static Builder custom() {
return new Builder();
} | @Test(expected = IllegalArgumentException.class)
public void testBuildWithIllegalMaxConcurrent() {
BulkheadConfig.custom()
.maxConcurrentCalls(-1)
.build();
} |
@Override
public long remaining() {
return mLength - mPosition;
} | @Test
public void remaining() throws IOException, AlluxioException {
AlluxioURI ufsPath = getUfsPath();
createFile(ufsPath, CHUNK_SIZE);
try (FileInStream inStream = getStream(ufsPath)) {
assertEquals(CHUNK_SIZE, inStream.remaining());
assertEquals(0, inStream.read());
assertEquals(CHUNK_SIZE - 1, inStream.remaining());
int len = CHUNK_SIZE / 2;
assertEquals(len, inStream.read(new byte[len], 0, len));
assertEquals(CHUNK_SIZE - len - 1, inStream.remaining());
len = CHUNK_SIZE / 4;
inStream.read(ByteBuffer.allocate(len), 0, len);
assertEquals(CHUNK_SIZE / 4 - 1, inStream.remaining());
}
} |
@Override
public OFAgent removeAgent(NetworkId networkId) {
checkNotNull(networkId, ERR_NULL_NETID);
synchronized (this) {
OFAgent existing = ofAgentStore.ofAgent(networkId);
if (existing == null) {
final String error = String.format(MSG_OFAGENT, networkId, ERR_NOT_EXIST);
throw new IllegalStateException(error);
}
if (existing.state() == STARTED) {
final String error = String.format(MSG_OFAGENT, networkId, ERR_IN_USE);
throw new IllegalStateException(error);
}
log.info(String.format(MSG_OFAGENT, networkId, MSG_REMOVED));
return ofAgentStore.removeOfAgent(networkId);
}
} | @Test(expected = NullPointerException.class)
public void testRemoveNullAgent() {
target.removeAgent(null);
} |
public Set<String> getMatchKeys() {
return routerConfig.isUseRequestRouter() ? requestTags : RuleUtils.getMatchKeys();
} | @Test
public void testGetMatchKeysWhenUseRequestRouter() {
config.setUseRequestRouter(true);
Match match = new Match();
match.setHeaders(Collections.singletonMap("bar", Collections.singletonList(new MatchRule())));
Rule rule = new Rule();
rule.setMatch(match);
EntireRule entireRule = new EntireRule();
entireRule.setKind(RouterConstant.FLOW_MATCH_KIND);
entireRule.setRules(Collections.singletonList(rule));
RouterConfiguration configuration = new RouterConfiguration();
configuration.updateServiceRule("foo", Collections.singletonList(entireRule));
RuleUtils.initKeys(configuration);
DubboConfigServiceImpl dubboConfigService = new DubboConfigServiceImpl();
Set<String> headerKeys = dubboConfigService.getMatchKeys();
Assert.assertEquals(3, headerKeys.size());
// 清除缓存
RuleUtils.initMatchKeys(new RouterConfiguration());
configuration.updateServiceRule("foo", Collections.singletonList(entireRule));
RuleUtils.initKeys(configuration);
SpringConfigServiceImpl springConfigService = new SpringConfigServiceImpl();
headerKeys = springConfigService.getMatchKeys();
Assert.assertEquals(3, headerKeys.size());
} |
public String render(Object o) {
StringBuilder result = new StringBuilder(template.length());
render(o, result);
return result.toString();
} | @Test
public void canSubstituteMultipleValuesFromLists() {
Template template = new Template(
"Hello {{#getValues}}{{toString}},{{/getValues}} {{getPrivateNonStringValue}} " +
"{{#getValues}}{{toString}}.{{/getValues}} {{getPrivateValue}} ");
assertEquals("Hello 1,2,3, 3 1.2.3. World ", template.render(foo));
} |
public static Map<String, Object> toValueMap(ReferenceMap m, Map<String, ValueReference> parameters) {
final ImmutableMap.Builder<String, Object> mapBuilder = ImmutableMap.builder();
for (Map.Entry<String, Reference> entry : m.entrySet()) {
final Object value = valueOf(entry.getValue(), parameters);
if (value != null) {
mapBuilder.put(entry.getKey(), value);
}
}
return mapBuilder.build();
} | @Test
public void toValueMap() {
final Map<String, ValueReference> parameters = ImmutableMap.<String, ValueReference>builder()
.put("BOOLEAN", ValueReference.of(true))
.put("FLOAT", ValueReference.of(1.0f))
.put("INTEGER", ValueReference.of(42))
.put("STRING", ValueReference.of("String"))
.put("ENUM", ValueReference.of(TestEnum.A))
.build();
final ReferenceMap map = new ReferenceMap(ImmutableMap.<String, Reference>builder()
.put("boolean", ValueReference.of(true))
.put("param_boolean", ValueReference.createParameter("BOOLEAN"))
.put("float", ValueReference.of(1.0f))
.put("param_float", ValueReference.createParameter("FLOAT"))
.put("integer", ValueReference.of(42))
.put("param_integer", ValueReference.createParameter("INTEGER"))
.put("string", ValueReference.of("String"))
.put("param_string", ValueReference.createParameter("STRING"))
.put("enum", ValueReference.of(TestEnum.A))
.put("param_enum", ValueReference.createParameter("ENUM"))
.put("list", new ReferenceList(ImmutableList.of(
ValueReference.of(1),
ValueReference.of(2.0f),
ValueReference.of("3"),
ValueReference.of(true),
ValueReference.createParameter("STRING"))))
.put("nestedList", new ReferenceList(ImmutableList.of(
new ReferenceMap(ImmutableMap.of(
"k1", ValueReference.of("v1"),
"k2", ValueReference.of(2))),
new ReferenceMap(ImmutableMap.of(
"k1", ValueReference.of("v2"),
"k2", ValueReference.of(4))))))
.put("map", new ReferenceMap(ImmutableMap.of(
"k1", ValueReference.of("v1"),
"k2", ValueReference.of(2),
"k3", ValueReference.createParameter("INTEGER"))))
.build());
final ImmutableMap<String, Object> expectedMap = ImmutableMap.<String, Object>builder()
.put("boolean", true)
.put("param_boolean", true)
.put("float", 1.0f)
.put("param_float", 1.0f)
.put("integer", 42)
.put("param_integer", 42)
.put("string", "String")
.put("param_string", "String")
.put("enum", "A")
.put("param_enum", "A")
.put("list", ImmutableList.of(1, 2.0f, "3", true, "String"))
.put("nestedList", ImmutableList.of(
ImmutableMap.of( "k1", "v1", "k2", 2),
ImmutableMap.of( "k1", "v2", "k2", 4)
))
.put("map", ImmutableMap.of(
"k1", "v1",
"k2", 2,
"k3", 42))
.build();
final Map<String, Object> valueReferenceMap = ReferenceMapUtils.toValueMap(map, parameters);
assertThat(valueReferenceMap).isEqualTo(expectedMap);
} |
@Override
public CRTask deserialize(JsonElement json,
Type type,
JsonDeserializationContext context) throws JsonParseException {
return determineJsonElementForDistinguishingImplementers(json, context, TYPE, ARTIFACT_ORIGIN);
} | @Test
public void shouldInstantiateATaskForTypeNant() {
JsonObject jsonObject = new JsonObject();
jsonObject.addProperty("type", "nant");
taskTypeAdapter.deserialize(jsonObject, type, jsonDeserializationContext);
verify(jsonDeserializationContext).deserialize(jsonObject, CRNantTask.class);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.