focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static SchemaAnnotationProcessResult process(List<SchemaAnnotationHandler> handlers,
DataSchema dataSchema, AnnotationProcessOption options)
{
return process(handlers, dataSchema, options, true);
} | @Test(dataProvider = "denormalizedSchemaTestCases_invalid")
public void testDenormalizedSchemaProcessing_invalid(String filePath, String errorMsg) throws Exception
{
DataSchema dataSchema = TestUtil.dataSchemaFromPdlInputStream(getClass().getResourceAsStream(filePath));
PegasusSchemaAnnotationHandlerImpl customAnnotationHandler = new PegasusSchemaAnnotationHandlerImpl(TEST_ANNOTATION_LABEL);
SchemaAnnotationProcessor.SchemaAnnotationProcessResult result =
SchemaAnnotationProcessor.process(Arrays.asList(customAnnotationHandler), dataSchema,
new SchemaAnnotationProcessor.AnnotationProcessOption());
System.out.println(result.getErrorMsgs());
assert(result.hasError());
assert(result.getErrorMsgs().equals(errorMsg));
} |
@Override
public void registerInstance(Service service, Instance instance, String clientId) throws NacosException {
NamingUtils.checkInstanceIsLegal(instance);
Service singleton = ServiceManager.getInstance().getSingleton(service);
if (!singleton.isEphemeral()) {
throw new NacosRuntimeException(NacosException.INVALID_PARAM,
String.format("Current service %s is persistent service, can't register ephemeral instance.",
singleton.getGroupedServiceName()));
}
Client client = clientManager.getClient(clientId);
checkClientIsLegal(client, clientId);
InstancePublishInfo instanceInfo = getPublishInfo(instance);
client.addServiceInstance(singleton, instanceInfo);
client.setLastUpdatedTime();
client.recalculateRevision();
NotifyCenter.publishEvent(new ClientOperationEvent.ClientRegisterServiceEvent(singleton, clientId));
NotifyCenter
.publishEvent(new MetadataEvent.InstanceMetadataEvent(singleton, instanceInfo.getMetadataId(), false));
} | @Test
void testRegisterPersistentInstance() throws NacosException {
assertThrows(NacosRuntimeException.class, () -> {
when(service.isEphemeral()).thenReturn(false);
// Excepted exception
ephemeralClientOperationServiceImpl.registerInstance(service, instance, ipPortBasedClientId);
});
} |
public void removeDataSources() {
if (nodesToDel == null || nodesToDel.isEmpty()) {
return;
}
lock.lock();
try {
Map<String, DataSource> map = highAvailableDataSource.getDataSourceMap();
Set<String> copySet = new HashSet<String>(nodesToDel);
for (String nodeName : copySet) {
LOG.info("Start removing Node " + nodeName + ".");
if (!map.containsKey(nodeName)) {
LOG.info("Node " + nodeName + " is NOT existed in the map.");
cancelBlacklistNode(nodeName);
continue;
}
DataSource ds = map.get(nodeName);
if (ds instanceof DruidDataSource) {
DruidDataSource dds = (DruidDataSource) ds;
int activeCount = dds.getActiveCount(); // CAUTION, activeCount MAYBE changed!
if (activeCount > 0) {
LOG.warn("Node " + nodeName + " is still running [activeCount=" + activeCount
+ "], try next time.");
continue;
} else {
LOG.info("Close Node " + nodeName + " and remove it.");
try {
dds.close();
} catch (Exception e) {
LOG.error("Exception occurred while closing Node " + nodeName
+ ", just remove it.", e);
}
}
}
map.remove(nodeName); // Remove the node directly if it is NOT a DruidDataSource.
cancelBlacklistNode(nodeName);
}
} catch (Exception e) {
LOG.error("Exception occurred while removing DataSources.", e);
} finally {
lock.unlock();
}
} | @Test
public void testRemoveDataSources() {
String url = "jdbc:derby:memory:foo;create=true";
String name = "foo";
addNode(url, name);
DruidDataSource ds = (DruidDataSource) haDataSource.getDataSourceMap().get(name);
updater.getNodesToDel().add(name);
haDataSource.addBlackList(name);
updater.removeDataSources();
assertTrue(haDataSource.getDataSourceMap().isEmpty());
assertFalse(haDataSource.isInBlackList(name));
assertTrue(ds.isClosed());
} |
protected static String getReverseZoneNetworkAddress(String baseIp, int range,
int index) throws UnknownHostException {
if (index < 0) {
throw new IllegalArgumentException(
String.format("Invalid index provided, must be positive: %d", index));
}
if (range < 0) {
throw new IllegalArgumentException(
String.format("Invalid range provided, cannot be negative: %d",
range));
}
return calculateIp(baseIp, range, index);
} | @Test
public void testThrowIllegalArgumentExceptionIfIndexIsNegative()
throws Exception {
exception.expect(IllegalArgumentException.class);
ReverseZoneUtils.getReverseZoneNetworkAddress(NET, RANGE, -1);
} |
public FifoOrderingPolicyForPendingApps() {
List<Comparator<SchedulableEntity>> comparators =
new ArrayList<Comparator<SchedulableEntity>>();
comparators.add(new RecoveryComparator());
comparators.add(new PriorityComparator());
comparators.add(new FifoComparator());
this.comparator = new CompoundComparator(comparators);
this.schedulableEntities = new ConcurrentSkipListSet<S>(comparator);
} | @Test
public void testFifoOrderingPolicyForPendingApps() {
FifoOrderingPolicyForPendingApps<MockSchedulableEntity> policy =
new FifoOrderingPolicyForPendingApps<MockSchedulableEntity>();
MockSchedulableEntity r1 = new MockSchedulableEntity();
MockSchedulableEntity r2 = new MockSchedulableEntity();
assertThat(policy.getComparator().compare(r1, r2)).isEqualTo(0);
r1.setSerial(1);
r1.setRecovering(true);
assertThat(policy.getComparator().compare(r1, r2)).isEqualTo(-1);
r1.setRecovering(false);
r2.setSerial(2);
r2.setRecovering(true);
assertThat(policy.getComparator().compare(r1, r2)).isEqualTo(1);
} |
@Override
@Transactional(rollbackFor = Exception.class) // 添加事务,异常则回滚所有导入
public UserImportRespVO importUserList(List<UserImportExcelVO> importUsers, boolean isUpdateSupport) {
if (CollUtil.isEmpty(importUsers)) {
throw exception(USER_IMPORT_LIST_IS_EMPTY);
}
UserImportRespVO respVO = UserImportRespVO.builder().createUsernames(new ArrayList<>())
.updateUsernames(new ArrayList<>()).failureUsernames(new LinkedHashMap<>()).build();
importUsers.forEach(importUser -> {
// 校验,判断是否有不符合的原因
try {
validateUserForCreateOrUpdate(null, null, importUser.getMobile(), importUser.getEmail(),
importUser.getDeptId(), null);
} catch (ServiceException ex) {
respVO.getFailureUsernames().put(importUser.getUsername(), ex.getMessage());
return;
}
// 判断如果不存在,在进行插入
AdminUserDO existUser = userMapper.selectByUsername(importUser.getUsername());
if (existUser == null) {
userMapper.insert(BeanUtils.toBean(importUser, AdminUserDO.class)
.setPassword(encodePassword(userInitPassword)).setPostIds(new HashSet<>())); // 设置默认密码及空岗位编号数组
respVO.getCreateUsernames().add(importUser.getUsername());
return;
}
// 如果存在,判断是否允许更新
if (!isUpdateSupport) {
respVO.getFailureUsernames().put(importUser.getUsername(), USER_USERNAME_EXISTS.getMsg());
return;
}
AdminUserDO updateUser = BeanUtils.toBean(importUser, AdminUserDO.class);
updateUser.setId(existUser.getId());
userMapper.updateById(updateUser);
respVO.getUpdateUsernames().add(importUser.getUsername());
});
return respVO;
} | @Test
public void testImportUserList_01() {
// 准备参数
UserImportExcelVO importUser = randomPojo(UserImportExcelVO.class, o -> {
});
// mock 方法,模拟失败
doThrow(new ServiceException(DEPT_NOT_FOUND)).when(deptService).validateDeptList(any());
// 调用
UserImportRespVO respVO = userService.importUserList(newArrayList(importUser), true);
// 断言
assertEquals(0, respVO.getCreateUsernames().size());
assertEquals(0, respVO.getUpdateUsernames().size());
assertEquals(1, respVO.getFailureUsernames().size());
assertEquals(DEPT_NOT_FOUND.getMsg(), respVO.getFailureUsernames().get(importUser.getUsername()));
} |
public static StorageInterface make(final PluginRegistry pluginRegistry,
final String pluginId,
final Map<String, Object> pluginConfiguration,
final Validator validator) {
Optional<Class<? extends StorageInterface>> optional = allStorageClasses(pluginRegistry)
.filter(clazz -> Plugin.getId(clazz).map(id -> id.equalsIgnoreCase(pluginId)).orElse(false))
.findFirst();
if (optional.isEmpty()) {
String storageIds = getLoggableStorageIds(pluginRegistry);
throw new KestraRuntimeException(String.format(
"No storage interface can be found for '%s=%s'. Supported types are: %s", KESTRA_STORAGE_TYPE_CONFIG, pluginId, storageIds
));
}
Class<? extends StorageInterface> pluginClass = optional.get();
// Storage are handle as any serializable/deserialize plugins.
StorageInterface plugin;
try {
// Make sure config is not null, otherwise deserialization result will be null too.
Map<String, Object> nonEmptyConfig = Optional.ofNullable(pluginConfiguration).orElse(Map.of());
plugin = JacksonMapper.toMap(nonEmptyConfig, pluginClass);
} catch (Exception e) {
throw new KestraRuntimeException(String.format(
"Failed to create storage '%s'. Error: %s", pluginId, e.getMessage())
);
}
// Validate configuration.
Set<ConstraintViolation<StorageInterface>> violations;
try {
violations = validator.validate(plugin);
} catch (ConstraintViolationException e) {
throw new KestraRuntimeException(String.format(
"Failed to validate configuration for storage '%s'. Error: %s", pluginId, e.getMessage())
);
}
if (!violations.isEmpty()) {
ConstraintViolationException e = new ConstraintViolationException(violations);
throw new KestraRuntimeException(String.format(
"Invalid configuration for storage '%s'. Error: '%s'", pluginId, e.getMessage()), e
);
}
try {
plugin.init();
} catch (IOException e) {
throw new KestraRuntimeException(String.format(
"Failed to initialize storage '%s'. Error: %s", pluginId, e.getMessage()), e
);
}
return plugin;
} | @Test
void shouldFailedGivenInvalidId() {
Assertions.assertThrows(KestraRuntimeException.class,
() -> StorageInterfaceFactory.make(registry, "invalid", Map.of(), validator));
} |
@Override
public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException {
LOG.debug("Fetching catalog {}", catalogName);
MCatalog mCat = getMCatalog(catalogName);
if (mCat == null) {
throw new NoSuchObjectException("No catalog " + catalogName);
}
return mCatToCat(mCat);
} | @Test(expected = NoSuchObjectException.class)
public void getNoSuchCatalog() throws MetaException, NoSuchObjectException {
objectStore.getCatalog("no_such_catalog");
} |
public boolean isAggregatable() {
return (state & MASK_AGG) != 0;
} | @Test
public void isAggregatable() {
LacpState state = new LacpState((byte) 0x4);
assertTrue(state.isAggregatable());
} |
@Override
protected void doStart() throws Exception {
super.doStart();
try {
plc4XEndpoint.setupConnection();
} catch (PlcConnectionException e) {
if (LOGGER.isTraceEnabled()) {
LOGGER.error("Connection setup failed, stopping Consumer", e);
} else {
LOGGER.error("Connection setup failed, stopping Consumer");
}
doStop();
}
if (trigger == null) {
startUnTriggered();
} else {
startTriggered();
}
} | @Test
public void doStart() {
} |
public static CurlOption parse(String cmdLine) {
List<String> args = ShellWords.parse(cmdLine);
URI url = null;
HttpMethod method = HttpMethod.PUT;
List<Entry<String, String>> headers = new ArrayList<>();
Proxy proxy = NO_PROXY;
while (!args.isEmpty()) {
String arg = args.remove(0);
if (arg.equals("-X")) {
String methodArg = removeArgFor(arg, args);
method = HttpMethod.parse(methodArg);
} else if (arg.equals("-H")) {
String headerArg = removeArgFor(arg, args);
SimpleEntry<String, String> e = parseHeader(headerArg);
headers.add(e);
} else if (arg.equals("-x")) {
String proxyArg = removeArgFor(arg, args);
proxy = parseProxy(proxyArg);
} else {
if (url != null) {
throw new IllegalArgumentException("'" + cmdLine + "' was not a valid curl command");
}
url = parseUrl(arg);
}
}
if (url == null) {
throw new IllegalArgumentException("'" + cmdLine + "' was not a valid curl command");
}
return new CurlOption(proxy, method, url, headers);
} | @Test
public void must_provide_valid_proxy_protocol() {
String uri = "https://example.com -x no-such-protocol://proxy.example.com:3129";
IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> CurlOption.parse(uri));
assertThat(exception.getMessage(),
is("'no-such-protocol://proxy.example.com:3129' did not have a valid proxy protocol"));
} |
protected FairSchedulerQueueInfoList getChildQueues(FSQueue queue,
FairScheduler scheduler) {
// Return null to omit 'childQueues' field from the return value of
// REST API if it is empty. We omit the field to keep the consistency
// with CapacitySchedulerQueueInfo, which omits 'queues' field if empty.
Collection<FSQueue> children = queue.getChildQueues();
if (children.isEmpty()) {
return null;
}
FairSchedulerQueueInfoList list = new FairSchedulerQueueInfoList();
for (FSQueue child : children) {
if (child instanceof FSLeafQueue) {
list.addToQueueInfoList(
new FairSchedulerLeafQueueInfo((FSLeafQueue) child, scheduler));
} else {
list.addToQueueInfoList(
new FairSchedulerQueueInfo(child, scheduler));
}
}
return list;
} | @Test
public void testEmptyChildQueues() {
FairSchedulerConfiguration fsConf = new FairSchedulerConfiguration();
RMContext rmContext = mock(RMContext.class);
PlacementManager placementManager = new PlacementManager();
SystemClock clock = SystemClock.getInstance();
FairScheduler scheduler = mock(FairScheduler.class);
when(scheduler.getConf()).thenReturn(fsConf);
when(scheduler.getConfig()).thenReturn(fsConf);
when(scheduler.getRMContext()).thenReturn(rmContext);
when(rmContext.getQueuePlacementManager()).thenReturn(placementManager);
when(scheduler.getClusterResource()).thenReturn(
Resource.newInstance(1, 1));
when(scheduler.getResourceCalculator()).thenReturn(
new DefaultResourceCalculator());
when(scheduler.getClock()).thenReturn(clock);
AllocationConfiguration allocConf = new AllocationConfiguration(scheduler);
when(scheduler.getAllocationConfiguration()).thenReturn(allocConf);
QueueManager queueManager = new QueueManager(scheduler);
queueManager.initialize();
FSQueue testQueue = queueManager.getLeafQueue("test", true);
FairSchedulerQueueInfo queueInfo =
new FairSchedulerQueueInfo(testQueue, scheduler);
Collection<FairSchedulerQueueInfo> childQueues =
queueInfo.getChildQueues();
Assert.assertNotNull(childQueues);
Assert.assertEquals("Child QueueInfo was not empty", 0, childQueues.size());
} |
public void setWriteTimeout(int writeTimeout) {
this.writeTimeout = writeTimeout;
} | @Test
public void testRetryableErrorRetryEnoughTimes() throws IOException {
List<MockLowLevelHttpResponse> responses = new ArrayList<>();
final int retries = 10;
// The underlying http library calls getStatusCode method of a response multiple times. For a
// response, the method should return the same value. Therefore this test cannot rely on
// `mockLowLevelResponse` variable that are reused across responses.
when(mockLowLevelRequest.execute())
.thenAnswer(
new Answer<MockLowLevelHttpResponse>() {
int n = 0;
@Override
public MockLowLevelHttpResponse answer(InvocationOnMock invocation) throws Throwable {
MockLowLevelHttpResponse response = mock(MockLowLevelHttpResponse.class);
responses.add(response);
when(response.getStatusCode()).thenReturn(n++ < retries ? 503 : 9999);
return response;
}
});
Storage.Buckets.Get result = storage.buckets().get("test");
try {
result.executeUnparsed();
fail();
} catch (IOException e) {
}
verify(mockHttpResponseInterceptor).interceptResponse(any(HttpResponse.class));
verify(mockLowLevelRequest, atLeastOnce()).addHeader(anyString(), anyString());
verify(mockLowLevelRequest, times(retries + 1)).setTimeout(anyInt(), anyInt());
verify(mockLowLevelRequest, times(retries + 1)).setWriteTimeout(anyInt());
verify(mockLowLevelRequest, times(retries + 1)).execute();
assertThat(responses, Matchers.hasSize(retries + 1));
for (MockLowLevelHttpResponse response : responses) {
verify(response, atLeastOnce()).getStatusCode();
}
expectedLogs.verifyWarn("performed 10 retries due to unsuccessful status codes");
} |
<T extends PipelineOptions> T as(Class<T> iface) {
checkNotNull(iface);
checkArgument(iface.isInterface(), "Not an interface: %s", iface);
T existingOption = computedProperties.interfaceToProxyCache.getInstance(iface);
if (existingOption == null) {
synchronized (this) {
// double check
existingOption = computedProperties.interfaceToProxyCache.getInstance(iface);
if (existingOption == null) {
Registration<T> registration =
PipelineOptionsFactory.CACHE
.get()
.validateWellFormed(iface, computedProperties.knownInterfaces);
List<PropertyDescriptor> propertyDescriptors = registration.getPropertyDescriptors();
Class<T> proxyClass = registration.getProxyClass();
existingOption =
InstanceBuilder.ofType(proxyClass)
.fromClass(proxyClass)
.withArg(InvocationHandler.class, this)
.build();
computedProperties =
computedProperties.updated(iface, existingOption, propertyDescriptors);
}
}
}
return existingOption;
} | @Test
public void testDisplayDataExcludedFromOverriddenBaseClass() {
ExtendsBaseOptions options = PipelineOptionsFactory.as(ExtendsBaseOptions.class);
options.setFoo("bar");
DisplayData displayData = DisplayData.from(options);
assertThat(displayData, not(hasDisplayItem(hasNamespace(BaseOptions.class))));
} |
public static int toMonths(int year, int months)
{
try {
return addExact(multiplyExact(year, 12), months);
}
catch (ArithmeticException e) {
throw new IllegalArgumentException(e);
}
} | @Test
public void testFormat()
{
assertMonths(0, "0-0");
assertMonths(toMonths(0, 0), "0-0");
assertMonths(3, "0-3");
assertMonths(-3, "-0-3");
assertMonths(toMonths(0, 3), "0-3");
assertMonths(toMonths(0, -3), "-0-3");
assertMonths(28, "2-4");
assertMonths(-28, "-2-4");
assertMonths(toMonths(2, 4), "2-4");
assertMonths(toMonths(-2, -4), "-2-4");
assertMonths(Integer.MAX_VALUE, "178956970-7");
assertMonths(Integer.MIN_VALUE + 1, "-178956970-7");
assertMonths(Integer.MIN_VALUE, "-178956970-8");
} |
T getFunction(final List<SqlArgument> arguments) {
// first try to get the candidates without any implicit casting
Optional<T> candidate = findMatchingCandidate(arguments, false);
if (candidate.isPresent()) {
return candidate.get();
} else if (!supportsImplicitCasts) {
throw createNoMatchingFunctionException(arguments);
}
// if none were found (candidate isn't present) try again with implicit casting
candidate = findMatchingCandidate(arguments, true);
if (candidate.isPresent()) {
return candidate.get();
}
throw createNoMatchingFunctionException(arguments);
} | @Test
public void shouldMatchNestedGenericMethodWithMultipleGenerics() {
// Given:
final ArrayType generic = ArrayType.of(GenericType.of("A"));
givenFunctions(
function(EXPECTED, -1, generic, generic)
);
// When:
final KsqlScalarFunction fun = udfIndex.getFunction(ImmutableList.of(SqlArgument.of(SqlArray.of(SqlTypes.INTEGER)), SqlArgument.of(SqlArray.of(SqlTypes.INTEGER))));
// Then:
assertThat(fun.name(), equalTo(EXPECTED));
} |
@Override
public Object clone() {
try {
SampleSaveConfiguration clone = (SampleSaveConfiguration)super.clone();
if(this.dateFormat != null) {
clone.timestampFormatter = (FastDateFormat)this.threadSafeLenientFormatter().clone();
}
return clone;
}
catch(CloneNotSupportedException e) {
throw new RuntimeException("Should not happen",e);
}
} | @Test
public void testClone() throws Exception {
SampleSaveConfiguration a = new SampleSaveConfiguration();
a.setUrl(false);
a.setAssertions(true);
a.setDefaultDelimiter();
a.setDefaultTimeStampFormat();
a.setDataType(true);
assertFalse(a.saveUrl());
assertNotNull(a.getDelimiter());
assertTrue(a.saveAssertions());
assertTrue(a.saveDataType());
// Original and clone should be equal
SampleSaveConfiguration cloneA = (SampleSaveConfiguration) a.clone();
assertNotSame(a, cloneA);
assertEquals(a, cloneA);
assertTrue(a.equals(cloneA));
assertTrue(cloneA.equals(a));
assertEquals(a.hashCode(), cloneA.hashCode());
// Change the original
a.setUrl(true);
assertFalse(a.equals(cloneA));
assertFalse(cloneA.equals(a));
assertFalse(a.hashCode() == cloneA.hashCode());
// Change the original back again
a.setUrl(false);
assertEquals(a, cloneA);
assertTrue(a.equals(cloneA));
assertTrue(cloneA.equals(a));
assertEquals(a.hashCode(), cloneA.hashCode());
} |
public static RedisSinkConfig load(String yamlFile) throws IOException {
ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
return mapper.readValue(new File(yamlFile), RedisSinkConfig.class);
} | @Test
public final void loadFromMapTest() throws IOException {
Map<String, Object> map = new HashMap<String, Object>();
map.put("redisHosts", "localhost:6379");
map.put("redisPassword", "fake@123");
map.put("redisDatabase", "1");
map.put("clientMode", "Standalone");
map.put("operationTimeout", "2000");
map.put("batchSize", "100");
map.put("batchTimeMs", "1000");
map.put("connectTimeout", "3000");
SinkContext sinkContext = Mockito.mock(SinkContext.class);
RedisSinkConfig config = RedisSinkConfig.load(map, sinkContext);
assertNotNull(config);
assertEquals(config.getRedisHosts(), "localhost:6379");
assertEquals(config.getRedisPassword(), "fake@123");
assertEquals(config.getRedisDatabase(), Integer.parseInt("1"));
assertEquals(config.getClientMode(), "Standalone");
assertEquals(config.getOperationTimeout(), Long.parseLong("2000"));
assertEquals(config.getBatchSize(), Integer.parseInt("100"));
assertEquals(config.getBatchTimeMs(), Long.parseLong("1000"));
assertEquals(config.getConnectTimeout(), Long.parseLong("3000"));
} |
@CanIgnoreReturnValue
public final Ordered containsExactlyElementsIn(@Nullable Iterable<?> expected) {
return containsExactlyElementsIn(expected, false);
} | @Test
@SuppressWarnings("ContainsExactlyElementsInWithVarArgsToExactly")
public void iterableContainsExactlyElementsInIterable() {
assertThat(asList(1, 2)).containsExactlyElementsIn(asList(1, 2));
expectFailureWhenTestingThat(asList(1, 2)).containsExactlyElementsIn(asList(1, 2, 4));
assertFailureValue("missing (1)", "4");
} |
public int getLength() {
return length;
} | @Test
public void testGetLength() {
lz4CompressData.setLength(6);
Assertions.assertEquals(lz4CompressData.getLength(), 6);
} |
protected String getSimpleTypeNodeTextValue(JsonNode jsonNode) {
if (!isSimpleTypeNode(jsonNode)) {
throw new IllegalArgumentException("Parameter does not contains a simple type");
}
return jsonNode.get(VALUE).textValue();
} | @Test
public void getSimpleTypeNodeTextValue_intNode() {
ObjectNode jsonNode = new ObjectNode(factory);
jsonNode.set(VALUE, new IntNode(10));
assertThat(expressionEvaluator.getSimpleTypeNodeTextValue(jsonNode)).isNull();
} |
public static FieldScope none() {
return FieldScopeImpl.none();
} | @Test
public void testFieldScopes_none_withAnyField() {
String typeUrl =
isProto3()
? "type.googleapis.com/com.google.common.truth.extensions.proto.SubTestMessage3"
: "type.googleapis.com/com.google.common.truth.extensions.proto.SubTestMessage2";
Message message = parse("o_int: 3 o_any_message { [" + typeUrl + "]: { r_string: \"foo\" } }");
Message diffMessage =
parse("o_int: 5 o_any_message { [" + typeUrl + "]: { r_string: \"bar\" } }");
expectThat(diffMessage).ignoringFieldScope(FieldScopes.none()).isNotEqualTo(message);
expectThat(diffMessage).withPartialScope(FieldScopes.none()).isEqualTo(message);
expectFailureWhenTesting()
.that(diffMessage)
.withPartialScope(FieldScopes.none())
.isNotEqualTo(message);
expectIsNotEqualToFailed();
expectThatFailure().hasMessageThat().contains("ignored: o_int");
expectThatFailure().hasMessageThat().contains("ignored: o_any_message");
} |
@Override
public Optional<Decision> onMemoryUsageChanged(
int numTotalRequestedBuffers, int currentPoolSize) {
return numTotalRequestedBuffers < currentPoolSize * releaseThreshold
? Optional.of(Decision.NO_ACTION)
: Optional.empty();
} | @Test
void testOnUsedMemoryBelowThreshold() {
Optional<Decision> memoryUsageChangedDecision = spillStrategy.onMemoryUsageChanged(5, 10);
assertThat(memoryUsageChangedDecision).hasValue(Decision.NO_ACTION);
} |
@Override
public String encrypt(String value) {
return encrypt(value, null);
} | @Test
public void testEncryptionForNullString() {
Encryptor encryptor = new AesEncryptor();
String b64Encrypted = encryptor.encrypt(null);
assertNull(b64Encrypted);
} |
@Override
public void deleteTag(Long id) {
// 校验存在
validateTagExists(id);
// 校验标签下是否有用户
validateTagHasUser(id);
// 删除
memberTagMapper.deleteById(id);
} | @Test
public void testDeleteTag_success() {
// mock 数据
MemberTagDO dbTag = randomPojo(MemberTagDO.class);
tagMapper.insert(dbTag);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbTag.getId();
// 调用
tagService.deleteTag(id);
// 校验数据不存在了
assertNull(tagMapper.selectById(id));
} |
public Long dimInsert( RowMetaInterface inputRowMeta, Object[] row, Long technicalKey, boolean newEntry,
Long versionNr, Date dateFrom, Date dateTo ) throws KettleException {
DatabaseMeta databaseMeta = meta.getDatabaseMeta();
if ( data.prepStatementInsert == null
&& data.prepStatementUpdate == null ) { // first time: construct prepared statement
RowMetaInterface insertRowMeta = new RowMeta();
/*
* Construct the SQL statement...
*
* INSERT INTO d_customer(keyfield, versionfield, datefrom, dateto, key[], fieldlookup[], last_updated,
* last_inserted, last_version) VALUES (val_key ,val_version , val_datfrom, val_datto, keynrs[], fieldnrs[],
* last_updated, last_inserted, last_version) ;
*/
String sql = "INSERT INTO " + data.schemaTable + "( ";
if ( !isAutoIncrement() ) {
sql += databaseMeta.quoteField( meta.getKeyField() ) + ", "; // NO
// AUTOINCREMENT
insertRowMeta.addValueMeta( data.outputRowMeta.getValueMeta( inputRowMeta.size() ) ); // the first return value
// after the input
} else {
if ( databaseMeta.needsPlaceHolder() ) {
sql += "0, "; // placeholder on informix!
}
}
sql +=
databaseMeta.quoteField( meta.getVersionField() )
+ ", " + databaseMeta.quoteField( meta.getDateFrom() ) + ", "
+ databaseMeta.quoteField( meta.getDateTo() );
insertRowMeta.addValueMeta( new ValueMetaInteger( meta.getVersionField() ) );
insertRowMeta.addValueMeta( new ValueMetaDate( meta.getDateFrom() ) );
insertRowMeta.addValueMeta( new ValueMetaDate( meta.getDateTo() ) );
for ( int i = 0; i < meta.getKeyLookup().length; i++ ) {
sql += ", " + databaseMeta.quoteField( meta.getKeyLookup()[ i ] );
insertRowMeta.addValueMeta( inputRowMeta.getValueMeta( data.keynrs[ i ] ) );
}
for ( int i = 0; i < meta.getFieldLookup().length; i++ ) {
// Ignore last_version, last_updated etc, they are handled below (at the
// back of the row).
//
if ( !DimensionLookupMeta.isUpdateTypeWithoutArgument( meta.isUpdate(), meta.getFieldUpdate()[ i ] ) ) {
sql += ", " + databaseMeta.quoteField( meta.getFieldLookup()[ i ] );
insertRowMeta.addValueMeta( inputRowMeta.getValueMeta( data.fieldnrs[ i ] ) );
}
}
// Finally, the special update fields...
//
for ( int i = 0; i < meta.getFieldUpdate().length; i++ ) {
ValueMetaInterface valueMeta = null;
switch ( meta.getFieldUpdate()[ i ] ) {
case DimensionLookupMeta.TYPE_UPDATE_DATE_INSUP:
case DimensionLookupMeta.TYPE_UPDATE_DATE_INSERTED:
valueMeta = new ValueMetaDate( meta.getFieldLookup()[ i ] );
break;
case DimensionLookupMeta.TYPE_UPDATE_LAST_VERSION:
valueMeta = new ValueMetaBoolean( meta.getFieldLookup()[ i ] );
break;
default:
break;
}
if ( valueMeta != null ) {
sql += ", " + databaseMeta.quoteField( valueMeta.getName() );
insertRowMeta.addValueMeta( valueMeta );
}
}
sql += ") VALUES (";
if ( !isAutoIncrement() ) {
sql += "?, ";
}
sql += "?, ?, ?";
for ( int i = 0; i < data.keynrs.length; i++ ) {
sql += ", ?";
}
for ( int i = 0; i < meta.getFieldLookup().length; i++ ) {
// Ignore last_version, last_updated, etc. These are handled below...
//
if ( !DimensionLookupMeta.isUpdateTypeWithoutArgument( meta.isUpdate(), meta.getFieldUpdate()[ i ] ) ) {
sql += ", ?";
}
}
// The special update fields...
//
for ( int i = 0; i < meta.getFieldUpdate().length; i++ ) {
switch ( meta.getFieldUpdate()[ i ] ) {
case DimensionLookupMeta.TYPE_UPDATE_DATE_INSUP:
case DimensionLookupMeta.TYPE_UPDATE_DATE_INSERTED:
case DimensionLookupMeta.TYPE_UPDATE_LAST_VERSION:
sql += ", ?";
break;
default:
break;
}
}
sql += " )";
try {
if ( technicalKey == null && databaseMeta.supportsAutoGeneratedKeys() ) {
logDetailed( "SQL w/ return keys=[" + sql + "]" );
data.prepStatementInsert =
data.db.getConnection().prepareStatement(
databaseMeta.stripCR( sql ), Statement.RETURN_GENERATED_KEYS );
} else {
logDetailed( "SQL=[" + sql + "]" );
data.prepStatementInsert = data.db.getConnection().prepareStatement( databaseMeta.stripCR( sql ) );
}
// pstmt=con.prepareStatement(sql, new String[] { "klant_tk" } );
} catch ( SQLException ex ) {
throw new KettleDatabaseException( "Unable to prepare dimension insert :" + Const.CR + sql, ex );
}
/*
* UPDATE d_customer SET dateto = val_datnow, last_updated = <now> last_version = false WHERE keylookup[] =
* keynrs[] AND versionfield = val_version - 1 ;
*/
RowMetaInterface updateRowMeta = new RowMeta();
String sql_upd = "UPDATE " + data.schemaTable + Const.CR;
// The end of the date range
//
sql_upd += "SET " + databaseMeta.quoteField( meta.getDateTo() ) + " = ?" + Const.CR;
updateRowMeta.addValueMeta( new ValueMetaDate( meta.getDateTo() ) );
// The special update fields...
//
for ( int i = 0; i < meta.getFieldUpdate().length; i++ ) {
ValueMetaInterface valueMeta = null;
switch ( meta.getFieldUpdate()[ i ] ) {
case DimensionLookupMeta.TYPE_UPDATE_DATE_INSUP:
case DimensionLookupMeta.TYPE_UPDATE_DATE_UPDATED:
valueMeta = new ValueMetaDate( meta.getFieldLookup()[ i ] );
break;
case DimensionLookupMeta.TYPE_UPDATE_LAST_VERSION:
valueMeta = new ValueMetaBoolean( meta.getFieldLookup()[ i ] );
break;
default:
break;
}
if ( valueMeta != null ) {
sql_upd += ", " + databaseMeta.quoteField( valueMeta.getName() ) + " = ?" + Const.CR;
updateRowMeta.addValueMeta( valueMeta );
}
}
sql_upd += "WHERE ";
for ( int i = 0; i < meta.getKeyLookup().length; i++ ) {
if ( i > 0 ) {
sql_upd += "AND ";
}
sql_upd += databaseMeta.quoteField( meta.getKeyLookup()[ i ] ) + " = ?" + Const.CR;
updateRowMeta.addValueMeta( inputRowMeta.getValueMeta( data.keynrs[ i ] ) );
}
sql_upd += "AND " + databaseMeta.quoteField( meta.getVersionField() ) + " = ? ";
updateRowMeta.addValueMeta( new ValueMetaInteger( meta.getVersionField() ) );
try {
logDetailed( "Preparing update: " + Const.CR + sql_upd + Const.CR );
data.prepStatementUpdate = data.db.getConnection().prepareStatement( databaseMeta.stripCR( sql_upd ) );
} catch ( SQLException ex ) {
throw new KettleDatabaseException( "Unable to prepare dimension update :" + Const.CR + sql_upd, ex );
}
data.insertRowMeta = insertRowMeta;
data.updateRowMeta = updateRowMeta;
}
Object[] insertRow = new Object[ data.insertRowMeta.size() ];
int insertIndex = 0;
if ( !isAutoIncrement() ) {
insertRow[ insertIndex++ ] = technicalKey;
}
// Caller is responsible for setting proper version number depending
// on if newEntry == true
insertRow[ insertIndex++ ] = versionNr;
switch ( data.startDateChoice ) {
case DimensionLookupMeta.START_DATE_ALTERNATIVE_NONE:
insertRow[ insertIndex++ ] = dateFrom;
break;
case DimensionLookupMeta.START_DATE_ALTERNATIVE_SYSDATE:
// use the time the step execution begins as the date from (passed in as dateFrom).
// before, the current system time was used. this caused an exclusion of the row in the
// lookup portion of the step that uses this 'valueDate' and not the current time.
// the result was multiple inserts for what should have been 1 [PDI-4317]
insertRow[ insertIndex++ ] = dateFrom;
break;
case DimensionLookupMeta.START_DATE_ALTERNATIVE_START_OF_TRANS:
insertRow[ insertIndex++ ] = getTrans().getStartDate();
break;
case DimensionLookupMeta.START_DATE_ALTERNATIVE_NULL:
insertRow[ insertIndex++ ] = null;
break;
case DimensionLookupMeta.START_DATE_ALTERNATIVE_COLUMN_VALUE:
insertRow[ insertIndex++ ] = inputRowMeta.getDate( row, data.startDateFieldIndex );
break;
default:
throw new KettleStepException( BaseMessages.getString(
PKG, "DimensionLookup.Exception.IllegalStartDateSelection", Integer.toString( data.startDateChoice ) ) );
}
insertRow[ insertIndex++ ] = dateTo;
for ( int i = 0; i < data.keynrs.length; i++ ) {
insertRow[ insertIndex++ ] = row[ data.keynrs[ i ] ];
}
for ( int i = 0; i < data.fieldnrs.length; i++ ) {
if ( data.fieldnrs[ i ] >= 0 ) {
// Ignore last_version, last_updated, etc. These are handled below...
//
insertRow[ insertIndex++ ] = row[ data.fieldnrs[ i ] ];
}
}
// The special update fields...
//
for ( int i = 0; i < meta.getFieldUpdate().length; i++ ) {
switch ( meta.getFieldUpdate()[ i ] ) {
case DimensionLookupMeta.TYPE_UPDATE_DATE_INSUP:
case DimensionLookupMeta.TYPE_UPDATE_DATE_INSERTED:
insertRow[ insertIndex++ ] = new Date();
break;
case DimensionLookupMeta.TYPE_UPDATE_LAST_VERSION:
insertRow[ insertIndex++ ] = Boolean.TRUE;
break; // Always the last version on insert.
default:
break;
}
}
if ( isDebug() ) {
logDebug( "rins, size=" + data.insertRowMeta.size() + ", values=" + data.insertRowMeta.getString( insertRow ) );
}
// INSERT NEW VALUE!
data.db.setValues( data.insertRowMeta, insertRow, data.prepStatementInsert );
data.db.insertRow( data.prepStatementInsert );
if ( isDebug() ) {
logDebug( "Row inserted!" );
}
if ( technicalKey == null && databaseMeta.supportsAutoGeneratedKeys() ) {
try {
RowMetaAndData keys = data.db.getGeneratedKeys( data.prepStatementInsert );
if ( keys.getRowMeta().size() > 0 ) {
technicalKey = keys.getRowMeta().getInteger( keys.getData(), 0 );
} else {
throw new KettleDatabaseException(
"Unable to retrieve value of auto-generated technical key : no value found!" );
}
} catch ( Exception e ) {
throw new KettleDatabaseException(
"Unable to retrieve value of auto-generated technical key : unexpected error: ", e );
}
}
if ( !newEntry ) { // we have to update the previous version in the dimension!
/*
* UPDATE d_customer SET dateto = val_datfrom , last_updated = <now> , last_version = false WHERE keylookup[] =
* keynrs[] AND versionfield = val_version - 1 ;
*/
Object[] updateRow = new Object[ data.updateRowMeta.size() ];
int updateIndex = 0;
switch ( data.startDateChoice ) {
case DimensionLookupMeta.START_DATE_ALTERNATIVE_NONE:
updateRow[ updateIndex++ ] = dateFrom;
break;
case DimensionLookupMeta.START_DATE_ALTERNATIVE_SYSDATE:
updateRow[ updateIndex++ ] = new Date();
break;
case DimensionLookupMeta.START_DATE_ALTERNATIVE_START_OF_TRANS:
updateRow[ updateIndex++ ] = getTrans().getCurrentDate();
break;
case DimensionLookupMeta.START_DATE_ALTERNATIVE_NULL:
updateRow[ updateIndex++ ] = null;
break;
case DimensionLookupMeta.START_DATE_ALTERNATIVE_COLUMN_VALUE:
updateRow[ updateIndex++ ] = inputRowMeta.getDate( row, data.startDateFieldIndex );
break;
default:
throw new KettleStepException( BaseMessages.getString(
"DimensionLookup.Exception.IllegalStartDateSelection", Integer.toString( data.startDateChoice ) ) );
}
// The special update fields...
//
for ( int i = 0; i < meta.getFieldUpdate().length; i++ ) {
switch ( meta.getFieldUpdate()[ i ] ) {
case DimensionLookupMeta.TYPE_UPDATE_DATE_INSUP:
updateRow[ updateIndex++ ] = new Date();
break;
case DimensionLookupMeta.TYPE_UPDATE_LAST_VERSION:
updateRow[ updateIndex++ ] = Boolean.FALSE;
break; // Never the last version on this update
case DimensionLookupMeta.TYPE_UPDATE_DATE_UPDATED:
updateRow[ updateIndex++ ] = new Date();
break;
default:
break;
}
}
for ( int i = 0; i < data.keynrs.length; i++ ) {
updateRow[ updateIndex++ ] = row[ data.keynrs[ i ] ];
}
updateRow[ updateIndex++ ] = versionNr - 1;
if ( isRowLevel() ) {
logRowlevel( "UPDATE using rupd=" + data.updateRowMeta.getString( updateRow ) );
}
// UPDATE VALUES
// set values for update
//
data.db.setValues( data.updateRowMeta, updateRow, data.prepStatementUpdate );
if ( isDebug() ) {
logDebug( "Values set for update (" + data.updateRowMeta.size() + ")" );
}
data.db.insertRow( data.prepStatementUpdate ); // do the actual update
if ( isDebug() ) {
logDebug( "Row updated!" );
}
}
return technicalKey;
} | @Test
public void testDimInsert() throws Exception {
RowMetaInterface rowMetaInterface = mock( RowMetaInterface.class );
Object[] objects = mock( List.class ).toArray();
Date date = mock( Date.class );
dimensionLookupSpy.dimInsert( rowMetaInterface, objects, 132323L, true, null, date, date );
verify( databaseMeta, times( 0 ) ).supportsAutoGeneratedKeys();
dimensionLookupSpy.dimInsert( rowMetaInterface, objects, null, true, null, date, date );
verify( databaseMeta, times( 2 ) ).supportsAutoGeneratedKeys();
} |
@Override
public long position() {
int pos = byteBufferHeader.position();
List<ByteBuffer> messageBufferList = this.getMessageResult.getMessageBufferList();
for (ByteBuffer bb : messageBufferList) {
pos += bb.position();
}
return pos;
} | @Test
public void ManyMessageTransferPosTest() {
ByteBuffer byteBuffer = ByteBuffer.allocate(20);
byteBuffer.putInt(20);
GetMessageResult getMessageResult = new GetMessageResult();
ManyMessageTransfer manyMessageTransfer = new ManyMessageTransfer(byteBuffer,getMessageResult);
Assert.assertEquals(manyMessageTransfer.position(),4);
} |
@Override
public MeterCellId allocateMeterId(DeviceId deviceId, MeterScope meterScope) {
if (userDefinedIndexMode) {
log.warn("Unable to allocate meter id when user defined index mode is enabled");
return null;
}
MeterTableKey meterTableKey = MeterTableKey.key(deviceId, meterScope);
MeterCellId meterCellId;
long id;
// First, search for reusable key
meterCellId = firstReusableMeterId(meterTableKey);
if (meterCellId != null) {
return meterCellId;
}
// If there was no reusable meter id we have to generate a new value
// using start and end index as lower and upper bound respectively.
long startIndex = getStartIndex(meterTableKey);
long endIndex = getEndIndex(meterTableKey);
// If the device does not give us MeterFeatures fallback to queryMeters
if (startIndex == -1L || endIndex == -1L) {
// Only meaningful for OpenFlow today
long maxMeters = queryMaxMeters(deviceId);
if (maxMeters == 0L) {
return null;
} else {
// OpenFlow meter index starts from 1, ends with max
startIndex = 1L;
endIndex = maxMeters;
}
}
do {
id = meterIdGenerators.getAndIncrement(meterTableKey);
} while (id < startIndex);
if (id > endIndex) {
return null;
}
// For backward compatibility if we are using global scope,
// return a MeterId, otherwise we create a PiMeterCellId
if (meterScope.isGlobal()) {
return MeterId.meterId(id);
} else {
return PiMeterCellId.ofIndirect(PiMeterId.of(meterScope.id()), id);
}
} | @Test
public void testAllocateIdInUserDefinedIndexMode() {
initMeterStore(true);
assertNull(meterStore.allocateMeterId(did1, MeterScope.globalScope()));
} |
public static String getCertFingerPrint(Certificate cert) {
byte [] digest = null;
try {
byte[] encCertInfo = cert.getEncoded();
MessageDigest md = MessageDigest.getInstance("SHA-1");
digest = md.digest(encCertInfo);
} catch (Exception e) {
logger.error("Exception:", e);
}
if (digest != null) {
return bytesToHex(digest).toLowerCase();
}
return null;
} | @Test
public void testGetCertFingerPrintAlice() throws Exception {
X509Certificate cert = null;
try (InputStream is = Config.getInstance().getInputStreamFromFile("alice.crt")){
CertificateFactory cf = CertificateFactory.getInstance("X.509");
cert = (X509Certificate) cf.generateCertificate(is);
} catch (Exception e) {
e.printStackTrace();
}
String fp = FingerPrintUtil.getCertFingerPrint(cert);
Assert.assertEquals("0ea49f0d1f89ae839e96c3665beb4ff6d0033c33", fp);
} |
public boolean isAssociatedWithEnvironmentOtherThan(String environmentName) {
return !(this.environmentName == null || this.environmentName.equals(environmentName));
} | @Test
public void shouldUnderstandWhenAssociatedWithADifferentEnvironment() {
EnvironmentPipelineModel foo = new EnvironmentPipelineModel("foo", "env");
assertThat(foo.isAssociatedWithEnvironmentOtherThan("env"), is(false));
assertThat(foo.isAssociatedWithEnvironmentOtherThan("env2"), is(true));
assertThat(foo.isAssociatedWithEnvironmentOtherThan(null), is(true));
foo = new EnvironmentPipelineModel("foo");
assertThat(foo.isAssociatedWithEnvironmentOtherThan("env"), is(false));
assertThat(foo.isAssociatedWithEnvironmentOtherThan("env2"), is(false));
assertThat(foo.isAssociatedWithEnvironmentOtherThan(null), is(false));
} |
@Override
protected int compareFirst(final Path p1, final Path p2) {
if(p1.attributes().getSize() > p2.attributes().getSize()) {
return ascending ? 1 : -1;
}
else if(p1.attributes().getSize() < p2.attributes().getSize()) {
return ascending ? -1 : 1;
}
return 0;
} | @Test
public void testCompareFirst() {
assertEquals(0,
new SizeComparator(true).compareFirst(new Path("/a", EnumSet.of(Path.Type.file)), new Path("/b", EnumSet.of(Path.Type.file))));
} |
public String transform() throws ScanException {
StringBuilder stringBuilder = new StringBuilder();
compileNode(node, stringBuilder, new Stack<Node>());
return stringBuilder.toString();
} | @Test
public void literal() throws ScanException {
String input = "abv";
Node node = makeNode(input);
NodeToStringTransformer nodeToStringTransformer = new NodeToStringTransformer(node, propertyContainer0);
Assertions.assertEquals(input, nodeToStringTransformer.transform());
} |
@Override
public Map<CompoundKey, AlbumEntry> batchGet(Set<CompoundKey> ids)
{
Map<CompoundKey, AlbumEntry> result = new HashMap<>();
for (CompoundKey key : ids)
result.put(key, get(key));
return result;
} | @Test
public void testBatchGet()
{
// get keys 1-3
Set<CompoundKey> batchIds = new HashSet<>();
for (int i = 1; i <= 3; i++)
{
batchIds.add(_keys[i]);
}
Map<CompoundKey, AlbumEntry> batchEntries = _entryRes.batchGet(batchIds);
Assert.assertEquals(batchEntries.size(), 3);
for (int i = 1; i <= 3; i++)
{
Assert.assertEquals(batchEntries.get(_keys[i]), _entries[i]);
}
} |
@NonNull
public Client authenticate(@NonNull Request request) {
// https://datatracker.ietf.org/doc/html/rfc7521#section-4.2
try {
if (!CLIENT_ASSERTION_TYPE_PRIVATE_KEY_JWT.equals(request.clientAssertionType())) {
throw new AuthenticationException(
"unsupported client_assertion_type='%s', expected '%s'"
.formatted(request.clientAssertionType(), CLIENT_ASSERTION_TYPE_PRIVATE_KEY_JWT));
}
var processor = new DefaultJWTProcessor<>();
var keySelector =
new JWSVerificationKeySelector<>(
Set.of(JWSAlgorithm.RS256, JWSAlgorithm.ES256), jwkSource);
processor.setJWSKeySelector(keySelector);
processor.setJWTClaimsSetVerifier(
new DefaultJWTClaimsVerifier<>(
new JWTClaimsSet.Builder().audience(baseUri.toString()).build(),
Set.of(
JWTClaimNames.JWT_ID,
JWTClaimNames.EXPIRATION_TIME,
JWTClaimNames.ISSUER,
JWTClaimNames.SUBJECT)));
var claims = processor.process(request.clientAssertion(), null);
var clientId = clientIdFromAssertion(request.clientId(), claims);
return new Client(clientId);
} catch (ParseException e) {
throw new AuthenticationException("failed to parse client assertion", e);
} catch (BadJOSEException | JOSEException e) {
throw new AuthenticationException("failed to verify client assertion", e);
}
} | @Test
void authenticate() throws JOSEException {
var key = generateKey();
var jwkSource = new StaticJwkSource<>(key);
var claims =
new JWTClaimsSet.Builder()
.audience(RP_ISSUER.toString())
.subject(CLIENT_ID)
.issuer(CLIENT_ID)
.expirationTime(Date.from(Instant.now().plusSeconds(60)))
.jwtID(UUID.randomUUID().toString())
.build();
var signed = signJwt(claims, key);
var authenticator = new ClientAuthenticator(jwkSource, RP_ISSUER);
// when & then
var client =
authenticator.authenticate(
new Request(
CLIENT_ID, ClientAuthenticator.CLIENT_ASSERTION_TYPE_PRIVATE_KEY_JWT, signed));
assertEquals(CLIENT_ID, client.clientId());
} |
@SuppressWarnings({"unchecked", "rawtypes"})
public static int compareTo(final Comparable thisValue, final Comparable otherValue, final OrderDirection orderDirection, final NullsOrderType nullsOrderType,
final boolean caseSensitive) {
if (null == thisValue && null == otherValue) {
return 0;
}
if (null == thisValue) {
return NullsOrderType.FIRST == nullsOrderType ? -1 : 1;
}
if (null == otherValue) {
return NullsOrderType.FIRST == nullsOrderType ? 1 : -1;
}
if (!caseSensitive && thisValue instanceof String && otherValue instanceof String) {
return compareToCaseInsensitiveString((String) thisValue, (String) otherValue, orderDirection);
}
return OrderDirection.ASC == orderDirection ? thisValue.compareTo(otherValue) : -thisValue.compareTo(otherValue);
} | @Test
void assertCompareToWhenFirstValueIsNullForOrderByDescAndNullsFirst() {
assertThat(CompareUtils.compareTo(null, 1, OrderDirection.DESC, NullsOrderType.FIRST, caseSensitive), is(-1));
} |
@SuppressWarnings("deprecation")
public static List<SimpleAclRule> fromCrd(AclRule rule) {
if (rule.getOperations() != null && rule.getOperation() != null) {
throw new InvalidResourceException("Both fields `operations` and `operation` cannot be filled in at the same time");
} else if (rule.getOperations() != null) {
List<SimpleAclRule> simpleAclRules = new ArrayList<>();
for (AclOperation operation : rule.getOperations()) {
simpleAclRules.add(new SimpleAclRule(rule.getType(), SimpleAclRuleResource.fromCrd(rule.getResource()), rule.getHost(), operation));
}
return simpleAclRules;
} else {
return List.of(new SimpleAclRule(rule.getType(), SimpleAclRuleResource.fromCrd(rule.getResource()), rule.getHost(), rule.getOperation()));
}
} | @Test
public void testFromCrdWithBothOperationsAndOperationSetAtTheSameTime() {
assertThrows(InvalidResourceException.class, () -> {
AclRule rule = new AclRuleBuilder()
.withType(AclRuleType.ALLOW)
.withResource(ACL_RULE_TOPIC_RESOURCE)
.withHost("127.0.0.1")
.withOperation(AclOperation.READ)
.withOperations(AclOperation.READ, AclOperation.WRITE, AclOperation.DESCRIBECONFIGS)
.build();
SimpleAclRule.fromCrd(rule);
});
} |
@SuppressWarnings("squid:S1181")
// Yes we really do want to catch Throwable
@Override
public V apply(U input) {
int retryAttempts = 0;
while (true) {
try {
return baseFunction.apply(input);
} catch (Throwable t) {
if (!exceptionClass.isAssignableFrom(t.getClass()) || retryAttempts == maxRetries) {
Throwables.throwIfUnchecked(t);
throw new RetriesExceededException(t);
}
Tools.randomDelay(maxDelayBetweenRetries);
retryAttempts++;
}
}
} | @Test
public void testFailureAfterTwoRetries() {
new RetryingFunction<>(this::succeedAfterTwoFailures, RetryableException.class, 2, 10).apply(null);
} |
public static RestSettingBuilder delete(final RestIdMatcher idMatcher) {
return single(HttpMethod.DELETE, checkNotNull(idMatcher, "ID Matcher should not be null"));
} | @Test
public void should_delete_with_response() throws Exception {
server.resource("targets",
delete("1").response(status(409))
);
running(server, () -> {
HttpResponse httpResponse = helper.deleteForResponse(remoteUrl("/targets/1"));
assertThat(httpResponse.getCode(), is(409));
});
} |
public SelType evaluate(String expr, Map<String, Object> varsMap, Extension ext)
throws Exception {
checkExprLength(expr);
selParser.ReInit(new ByteArrayInputStream(expr.getBytes()));
ASTExecute n = selParser.Execute();
try {
selEvaluator.resetWithInput(varsMap, ext);
return (SelType) n.jjtAccept(selEvaluator, null);
} finally {
selEvaluator.clearState();
}
} | @Test
public void testEvaluate() throws Exception {
SelType res = t1.evaluate("1+1;", new HashMap<>(), null);
assertEquals("LONG: 2", res.type() + ": " + res);
} |
public List<Map<String, Artifact>> getBatchStepInstancesArtifactsFromList(
String workflowId, long workflowInstanceId, Map<String, Long> stepIdToRunId) {
List<Map<String, Long>> batches = splitMap(stepIdToRunId);
List<Map<String, Artifact>> results = new ArrayList<>();
for (Map<String, Long> batch : batches) {
results.addAll(
getBatchStepInstancesArtifactsFromListLimited(workflowId, workflowInstanceId, batch));
}
return results;
} | @Test
public void testGetBatchStepInstancesArtifactsFromList() throws IOException {
MaestroStepInstanceDao stepDaoSpy = Mockito.spy(stepDao);
StepInstance siSubWf = loadObject(TEST_STEP_INSTANCE_SUBWORKFLOW, StepInstance.class);
Map<String, Long> stepIdToRunId = new LinkedHashMap<>();
int numberOfInstancesToInsert = Constants.BATCH_SIZE_ROLLUP_STEP_ARTIFACTS_QUERY * 2 + 3;
long runId = 1;
for (int i = 1; i <= numberOfInstancesToInsert; i++) {
siSubWf.setStepId("step_" + i);
siSubWf.setWorkflowRunId(runId);
stepIdToRunId.put("step_" + i, runId);
stepDaoSpy.insertOrUpsertStepInstance(siSubWf, false);
if (i == numberOfInstancesToInsert / 2) {
runId = 2;
}
}
List<Map<String, Artifact>> artifacts =
stepDaoSpy.getBatchStepInstancesArtifactsFromList(
siSubWf.getWorkflowId(), siSubWf.getWorkflowInstanceId(), stepIdToRunId);
assertEquals(numberOfInstancesToInsert, artifacts.size());
assertEquals(
"sample-dag-test-3",
artifacts.get(10).get("maestro_subworkflow").asSubworkflow().getSubworkflowId());
assertNotNull(
artifacts.get(10).get("maestro_subworkflow").asSubworkflow().getSubworkflowOverview());
Mockito.verify(stepDaoSpy, Mockito.times(3))
.getBatchStepInstancesArtifactsFromListLimited(
eq(siSubWf.getWorkflowId()), eq(siSubWf.getWorkflowInstanceId()), any());
} |
@Override
public boolean checkCredentials(String username, String password) {
if (username == null || password == null) {
return false;
}
Credentials credentials = new Credentials(username, password);
if (validCredentialsCache.contains(credentials)) {
return true;
} else if (invalidCredentialsCache.contains(credentials)) {
return false;
}
boolean isValid =
this.username.equals(username)
&& this.passwordHash.equals(
generatePasswordHash(
algorithm, salt, iterations, keyLength, password));
if (isValid) {
validCredentialsCache.add(credentials);
} else {
invalidCredentialsCache.add(credentials);
}
return isValid;
} | @Test
public void testPBKDF2WithHmacSHA512_upperCaseWithoutColon() throws Exception {
String algorithm = "PBKDF2WithHmacSHA512";
int iterations = 1000;
int keyLength = 128;
String hash =
"07:6F:E2:27:9B:CA:48:66:9B:13:9E:02:9C:AE:FC:E4:1A:2F:0F:E6:48:A3:FF:8E:D2:30:59:68:12:A6:29:34:FC:99:29:8A:98:65:AE:4B:05:7C:B6:83:A4:83:C0:32:E4:90:61:1D:DD:2E:53:17:01:FF:6A:64:48:B2:AA:22:DE:B3:BC:56:08:C6:66:EC:98:F8:96:8C:1B:DA:B2:F2:2A:6C:22:8E:19:CC:B2:62:55:3E:BE:DC:C7:58:36:9D:92:CF:D7:D2:A1:6D:8F:DC:DE:8E:E9:36:D4:E7:2D:0A:6D:A1:B8:56:0A:53:BB:17:E2:D5:DE:A0:48:51:FC:33";
hash = hash.toUpperCase().replace(":", "");
PBKDF2Authenticator PBKDF2Authenticator =
new PBKDF2Authenticator(
"/", VALID_USERNAME, hash, algorithm, SALT, iterations, keyLength);
for (String username : TEST_USERNAMES) {
for (String password : TEST_PASSWORDS) {
boolean expectedIsAuthenticated =
VALID_USERNAME.equals(username) && VALID_PASSWORD.equals(password);
boolean actualIsAuthenticated =
PBKDF2Authenticator.checkCredentials(username, password);
assertEquals(expectedIsAuthenticated, actualIsAuthenticated);
}
}
} |
@Override
public CompletableFuture<List<DescribeGroupsResponseData.DescribedGroup>> describeGroups(
RequestContext context,
List<String> groupIds
) {
if (!isActive.get()) {
return CompletableFuture.completedFuture(DescribeGroupsRequest.getErrorDescribedGroupList(
groupIds,
Errors.COORDINATOR_NOT_AVAILABLE
));
}
final List<CompletableFuture<List<DescribeGroupsResponseData.DescribedGroup>>> futures =
new ArrayList<>(groupIds.size());
final Map<TopicPartition, List<String>> groupsByTopicPartition = new HashMap<>();
groupIds.forEach(groupId -> {
// For backwards compatibility, we support DescribeGroups for the empty group id.
if (groupId == null) {
futures.add(CompletableFuture.completedFuture(Collections.singletonList(
new DescribeGroupsResponseData.DescribedGroup()
.setGroupId(null)
.setErrorCode(Errors.INVALID_GROUP_ID.code())
)));
} else {
final TopicPartition topicPartition = topicPartitionFor(groupId);
groupsByTopicPartition
.computeIfAbsent(topicPartition, __ -> new ArrayList<>())
.add(groupId);
}
});
groupsByTopicPartition.forEach((topicPartition, groupList) -> {
CompletableFuture<List<DescribeGroupsResponseData.DescribedGroup>> future =
runtime.scheduleReadOperation(
"describe-groups",
topicPartition,
(coordinator, lastCommittedOffset) -> coordinator.describeGroups(context, groupList, lastCommittedOffset)
).exceptionally(exception -> handleOperationException(
"describe-groups",
groupList,
exception,
(error, __) -> DescribeGroupsRequest.getErrorDescribedGroupList(groupList, error)
));
futures.add(future);
});
return FutureUtils.combineFutures(futures, ArrayList::new, List::addAll);
} | @Test
public void testDescribeGroupsWhenNotStarted() throws ExecutionException, InterruptedException {
CoordinatorRuntime<GroupCoordinatorShard, CoordinatorRecord> runtime = mockRuntime();
GroupCoordinatorService service = new GroupCoordinatorService(
new LogContext(),
createConfig(),
runtime,
new GroupCoordinatorMetrics(),
createConfigManager()
);
CompletableFuture<List<DescribeGroupsResponseData.DescribedGroup>> future = service.describeGroups(
requestContext(ApiKeys.DESCRIBE_GROUPS),
Collections.singletonList("group-id")
);
assertEquals(
Collections.singletonList(new DescribeGroupsResponseData.DescribedGroup()
.setGroupId("group-id")
.setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code())
),
future.get()
);
} |
public NotFoundException(String message) {
super(STATUS, NAME, message);
} | @Test
public void testNotFoundException() throws Exception {
try {
throw new NotFoundException("/hello");
} catch (NotFoundException e) {
assertEquals(e.getStatus(), 404);
assertEquals(e.getName(), "Not Found");
}
} |
@Override
public List<DeptTreeVO> getDeptTree(Integer excludeNodeId, Boolean appendRoot, Boolean needSetTotal) {
QueryWrapper wrapper = QueryWrapper.create()
// .orderBy(SysDept::getDeep).asc()
.orderBy(SysDept::getSort).asc();
// 获取所有部门信息
List<SysDept> list = list(wrapper);
List<DeptTreeVO> deptTreeVOS = BeanCopyUtils.copyList(list, DeptTreeVO.class);
if (needSetTotal) {
setUseTotal(deptTreeVOS);
}
DeptTreeVO root = TreeUtils.getRoot(DeptTreeVO.class);
root.setName("根部门");
List<DeptTreeVO> trees = TreeUtils.buildTree(deptTreeVOS, root, excludeNodeId);
if (appendRoot != null && !appendRoot) {
if (trees.get(0).getChildren() == null) {
trees = new ArrayList<>();
} else {
trees = trees.get(0).getChildren();
}
}
return trees;
} | @Test
void getDeepTreeVOS() {
List<DeptTreeVO> tree = sysDeptService.getDeptTree(null, true, false);
System.out.println("树形:" + JsonUtils.toJsonString(tree));
} |
@Override
public synchronized DefaultConnectClient get(
final Optional<String> ksqlAuthHeader,
final List<Entry<String, String>> incomingRequestHeaders,
final Optional<KsqlPrincipal> userPrincipal
) {
if (defaultConnectAuthHeader == null) {
defaultConnectAuthHeader = buildDefaultAuthHeader();
}
final Map<String, Object> configWithPrefixOverrides =
ksqlConfig.valuesWithPrefixOverride(KsqlConfig.KSQL_CONNECT_PREFIX);
return new DefaultConnectClient(
ksqlConfig.getString(KsqlConfig.CONNECT_URL_PROPERTY),
buildAuthHeader(ksqlAuthHeader, incomingRequestHeaders),
requestHeadersExtension
.map(extension -> extension.getHeaders(userPrincipal))
.orElse(Collections.emptyMap()),
Optional.ofNullable(newSslContext(configWithPrefixOverrides)),
shouldVerifySslHostname(configWithPrefixOverrides),
ksqlConfig.getLong(KsqlConfig.CONNECT_REQUEST_TIMEOUT_MS)
);
} | @Test
public void shouldBuildAuthHeader() throws Exception {
// Given:
givenCustomBasicAuthHeader();
givenValidCredentialsFile();
// When:
final DefaultConnectClient connectClient =
connectClientFactory.get(Optional.empty(), Collections.emptyList(), Optional.empty());
// Then:
assertThat(connectClient.getRequestHeaders(),
arrayContaining(header(AUTH_HEADER_NAME, EXPECTED_HEADER)));
} |
@Override
@NonNull public CharSequence getKeyboardName() {
return mKeyboardName;
} | @Test
public void testKeyboardPopupCharacterStringThreeRowsConstructor() throws Exception {
AnyPopupKeyboard keyboard =
new AnyPopupKeyboard(
new DefaultAddOn(getApplicationContext(), getApplicationContext()),
getApplicationContext(),
"qwertasdfgzxc",
SIMPLE_KeyboardDimens,
"POP_KEYBOARD");
Assert.assertEquals("POP_KEYBOARD", keyboard.getKeyboardName());
Assert.assertEquals(13, keyboard.getKeys().size());
Assert.assertEquals(3, keyboard.getKeys().stream().map(k -> k.y).distinct().count());
int vGap = (int) SIMPLE_KeyboardDimens.getRowVerticalGap();
int keyHeight = (int) SIMPLE_KeyboardDimens.getNormalKeyHeight();
// NOTE: the first characters in the list are in the bottom row!
// zxc
// asdfg
// qwert
assertKeyValues(keyboard, 'z', vGap);
assertKeyValues(keyboard, 'x', vGap);
assertKeyValues(keyboard, 'c', vGap);
assertKeyValues(keyboard, 'a', vGap + keyHeight + vGap);
assertKeyValues(keyboard, 's', vGap + keyHeight + vGap);
assertKeyValues(keyboard, 'd', vGap + keyHeight + vGap);
assertKeyValues(keyboard, 'f', vGap + keyHeight + vGap);
assertKeyValues(keyboard, 'g', vGap + keyHeight + vGap);
assertKeyValues(keyboard, 'q', vGap + keyHeight + vGap + keyHeight + vGap);
assertKeyValues(keyboard, 'w', vGap + keyHeight + vGap + keyHeight + vGap);
assertKeyValues(keyboard, 'e', vGap + keyHeight + vGap + keyHeight + vGap);
assertKeyValues(keyboard, 'r', vGap + keyHeight + vGap + keyHeight + vGap);
assertKeyValues(keyboard, 't', vGap + keyHeight + vGap + keyHeight + vGap);
} |
public static Getter newThisGetter(Getter parent, Object object) {
return new ThisGetter(parent, object);
} | @Test
public void newThisGetter() {
OuterObject object = new OuterObject("name", new InnerObject("inner", 0, 1, 2, 3));
Getter innerObjectThisGetter = GetterFactory.newThisGetter(null, object);
Class<?> returnType = innerObjectThisGetter.getReturnType();
assertEquals(OuterObject.class, returnType);
} |
public void recordFailedAttempt(String username, String address) {
Log.warn("Failed admin console login attempt by "+username+" from "+address);
Long cnt = (long)0;
if (attemptsPerIP.get(address) != null) {
cnt = attemptsPerIP.get(address);
}
cnt++;
attemptsPerIP.put(address, cnt);
final StringBuilder sb = new StringBuilder();
if (cnt > MAX_ATTEMPTS_PER_IP.getValue()) {
Log.warn("Login attempt limit breached for address "+address);
sb.append("Future login attempts from this address will be temporarily locked out. ");
}
cnt = (long)0;
if (attemptsPerUsername.get(username) != null) {
cnt = attemptsPerUsername.get(username);
}
cnt++;
attemptsPerUsername.put(username, cnt);
if (cnt > MAX_ATTEMPTS_PER_USERNAME.getValue()) {
Log.warn("Login attempt limit breached for username "+username);
sb.append("Future login attempts for this user will be temporarily locked out. ");
}
securityAuditManager.logEvent(username, "Failed admin console login attempt", "A failed login attempt to the admin console was made from address " + address + ". " + sb);
} | @Test
public void aFailedLoginWillBeAudited() {
final String username = "test-user-b-" + StringUtils.randomString(10);
loginLimitManager.recordFailedAttempt(username, "a.b.c.e");
verify(securityAuditManager).logEvent(username, "Failed admin console login attempt", "A failed login attempt to the admin console was made from address a.b.c.e. ");
} |
void checkPerm(PlainAccessResource needCheckedAccess, PlainAccessResource ownedAccess) {
permissionChecker.check(needCheckedAccess, ownedAccess);
} | @Test(expected = AclException.class)
public void checkPermAdmin() {
PlainAccessResource plainAccessResource = new PlainAccessResource();
plainAccessResource.setRequestCode(17);
plainPermissionManager.checkPerm(plainAccessResource, pubPlainAccessResource);
} |
public DateTimeFormatSpec(String format) {
Preconditions.checkArgument(StringUtils.isNotEmpty(format), "Must provide format");
if (Character.isDigit(format.charAt(0))) {
// Colon format
String[] tokens = StringUtil.split(format, COLON_SEPARATOR, COLON_FORMAT_MAX_TOKENS);
Preconditions.checkArgument(tokens.length >= COLON_FORMAT_MIN_TOKENS && tokens.length <= COLON_FORMAT_MAX_TOKENS,
"Invalid format: %s, must be of format 'size:timeUnit:timeFormat(:patternWithTz)'", format);
TimeFormat timeFormat;
try {
timeFormat = TimeFormat.valueOf(tokens[COLON_FORMAT_TIME_FORMAT_POSITION]);
} catch (Exception e) {
throw new IllegalArgumentException(
String.format("Invalid time format: %s in format: %s", tokens[COLON_FORMAT_TIME_FORMAT_POSITION], format));
}
switch (timeFormat) {
case EPOCH:
String sizeStr = tokens[COLON_FORMAT_SIZE_POSITION];
try {
_size = Integer.parseInt(sizeStr);
} catch (Exception e) {
throw new IllegalArgumentException(String.format("Invalid size: %s in format: %s", sizeStr, format));
}
Preconditions.checkArgument(_size > 0, "Invalid size: %s in format: %s, must be positive", _size, format);
String timeUnitStr = tokens[COLON_FORMAT_TIME_UNIT_POSITION];
try {
_unitSpec = new DateTimeFormatUnitSpec(timeUnitStr);
} catch (Exception e) {
throw new IllegalArgumentException(
String.format("Invalid time unit: %s in format: %s", timeUnitStr, format));
}
_patternSpec = DateTimeFormatPatternSpec.EPOCH;
break;
case TIMESTAMP:
_size = 1;
_unitSpec = DateTimeFormatUnitSpec.MILLISECONDS;
_patternSpec = DateTimeFormatPatternSpec.TIMESTAMP;
break;
case SIMPLE_DATE_FORMAT:
_size = 1;
_unitSpec = DateTimeFormatUnitSpec.MILLISECONDS;
String patternStr =
tokens.length > COLON_FORMAT_PATTERN_POSITION ? tokens[COLON_FORMAT_PATTERN_POSITION] : null;
try {
_patternSpec = new DateTimeFormatPatternSpec(TimeFormat.SIMPLE_DATE_FORMAT, patternStr);
} catch (Exception e) {
throw new IllegalArgumentException(
String.format("Invalid SIMPLE_DATE_FORMAT pattern: %s in format: %s", patternStr, format));
}
break;
default:
throw new IllegalStateException("Unsupported time format: " + timeFormat);
}
} else {
// Pipe format
String[] tokens = StringUtil.split(format, PIPE_SEPARATOR, PIPE_FORMAT_MAX_TOKENS);
Preconditions.checkArgument(tokens.length >= PIPE_FORMAT_MIN_TOKENS && tokens.length <= PIPE_FORMAT_MAX_TOKENS,
"Invalid format: %s, must be of format 'EPOCH|<timeUnit>(|<size>)' or "
+ "'SIMPLE_DATE_FORMAT|<pattern>(|<timeZone>)' or 'TIMESTAMP'", format);
TimeFormat timeFormat;
try {
timeFormat = TimeFormat.valueOf(tokens[PIPE_FORMAT_TIME_FORMAT_POSITION]);
} catch (Exception e) {
throw new IllegalArgumentException(
String.format("Invalid time format: %s in format: %s", tokens[PIPE_FORMAT_TIME_FORMAT_POSITION], format));
}
switch (timeFormat) {
case EPOCH:
if (tokens.length > PIPE_FORMAT_SIZE_POSITION) {
try {
_size = Integer.parseInt(tokens[PIPE_FORMAT_SIZE_POSITION]);
} catch (Exception e) {
throw new IllegalArgumentException(
String.format("Invalid size: %s in format: %s", tokens[COLON_FORMAT_SIZE_POSITION], format));
}
Preconditions.checkArgument(_size > 0, "Invalid size: %s in format: %s, must be positive", _size, format);
} else {
_size = 1;
}
try {
_unitSpec = tokens.length > PIPE_FORMAT_TIME_UNIT_POSITION ? new DateTimeFormatUnitSpec(
tokens[PIPE_FORMAT_TIME_UNIT_POSITION]) : DateTimeFormatUnitSpec.MILLISECONDS;
} catch (Exception e) {
throw new IllegalArgumentException(
String.format("Invalid time unit: %s in format: %s", tokens[PIPE_FORMAT_TIME_UNIT_POSITION], format));
}
_patternSpec = DateTimeFormatPatternSpec.EPOCH;
break;
case TIMESTAMP:
_size = 1;
_unitSpec = DateTimeFormatUnitSpec.MILLISECONDS;
_patternSpec = DateTimeFormatPatternSpec.TIMESTAMP;
break;
case SIMPLE_DATE_FORMAT:
_size = 1;
_unitSpec = DateTimeFormatUnitSpec.MILLISECONDS;
if (tokens.length > PIPE_FORMAT_TIME_ZONE_POSITION) {
try {
_patternSpec =
new DateTimeFormatPatternSpec(TimeFormat.SIMPLE_DATE_FORMAT, tokens[PIPE_FORMAT_PATTERN_POSITION],
tokens[PIPE_FORMAT_TIME_ZONE_POSITION]);
} catch (Exception e) {
throw new IllegalArgumentException(
String.format("Invalid SIMPLE_DATE_FORMAT pattern: %s, time zone: %s in format: %s",
tokens[PIPE_FORMAT_PATTERN_POSITION], tokens[PIPE_FORMAT_TIME_ZONE_POSITION], format));
}
} else {
try {
String pattern =
tokens.length > PIPE_FORMAT_PATTERN_POSITION ? tokens[PIPE_FORMAT_PATTERN_POSITION] : null;
_patternSpec = new DateTimeFormatPatternSpec(TimeFormat.SIMPLE_DATE_FORMAT, pattern);
} catch (Exception e) {
throw new IllegalArgumentException(String.format("Invalid SIMPLE_DATE_FORMAT pattern: %s in format: %s",
tokens[PIPE_FORMAT_PATTERN_POSITION], format));
}
}
break;
default:
throw new IllegalStateException("Unsupported time format: " + timeFormat);
}
}
} | @Test
public void testDateTimeFormatSpec() {
DateTimeFormatSpec dateTimeFormatSpec = new DateTimeFormatSpec("5:DAYS:EPOCH");
assertEquals(dateTimeFormatSpec.getTimeFormat(), DateTimeFieldSpec.TimeFormat.EPOCH);
assertEquals(dateTimeFormatSpec.getColumnSize(), 5);
assertEquals(dateTimeFormatSpec.getColumnUnit(), TimeUnit.DAYS);
assertEquals(dateTimeFormatSpec.getColumnDateTimeTransformUnit(),
DateTimeFormatUnitSpec.DateTimeTransformUnit.DAYS);
assertNull(dateTimeFormatSpec.getSDFPattern());
assertEquals(new DateTimeFormatSpec("EPOCH|DAYS|5"), dateTimeFormatSpec);
dateTimeFormatSpec = new DateTimeFormatSpec("1:DAYS:TIMESTAMP");
assertEquals(dateTimeFormatSpec.getTimeFormat(), DateTimeFieldSpec.TimeFormat.TIMESTAMP);
assertEquals(dateTimeFormatSpec.getColumnSize(), 1);
assertEquals(dateTimeFormatSpec.getColumnUnit(), TimeUnit.MILLISECONDS);
assertEquals(dateTimeFormatSpec.getColumnDateTimeTransformUnit(),
DateTimeFormatUnitSpec.DateTimeTransformUnit.MILLISECONDS);
assertNull(dateTimeFormatSpec.getSDFPattern());
assertEquals(new DateTimeFormatSpec("TIMESTAMP"), dateTimeFormatSpec);
dateTimeFormatSpec = new DateTimeFormatSpec("1:DAYS:SIMPLE_DATE_FORMAT:yyyyMMdd");
assertEquals(dateTimeFormatSpec.getTimeFormat(), DateTimeFieldSpec.TimeFormat.SIMPLE_DATE_FORMAT);
assertEquals(dateTimeFormatSpec.getColumnSize(), 1);
assertEquals(dateTimeFormatSpec.getColumnUnit(), TimeUnit.MILLISECONDS);
assertEquals(dateTimeFormatSpec.getColumnDateTimeTransformUnit(),
DateTimeFormatUnitSpec.DateTimeTransformUnit.MILLISECONDS);
assertEquals(dateTimeFormatSpec.getSDFPattern(), "yyyyMMdd");
assertEquals(dateTimeFormatSpec.getDateTimezone(), DateTimeZone.UTC);
assertEquals(new DateTimeFormatSpec("SIMPLE_DATE_FORMAT|yyyyMMdd"), dateTimeFormatSpec);
dateTimeFormatSpec = new DateTimeFormatSpec("1:DAYS:SIMPLE_DATE_FORMAT:yyyy-MM-dd tz(CST)");
assertEquals(dateTimeFormatSpec.getTimeFormat(), DateTimeFieldSpec.TimeFormat.SIMPLE_DATE_FORMAT);
assertEquals(dateTimeFormatSpec.getColumnSize(), 1);
assertEquals(dateTimeFormatSpec.getColumnUnit(), TimeUnit.MILLISECONDS);
assertEquals(dateTimeFormatSpec.getColumnDateTimeTransformUnit(),
DateTimeFormatUnitSpec.DateTimeTransformUnit.MILLISECONDS);
assertEquals(dateTimeFormatSpec.getSDFPattern(), "yyyy-MM-dd");
assertEquals(dateTimeFormatSpec.getDateTimezone(), DateTimeZone.forTimeZone(TimeZone.getTimeZone("CST")));
assertEquals(new DateTimeFormatSpec("SIMPLE_DATE_FORMAT|yyyy-MM-dd|CST"), dateTimeFormatSpec);
assertThrows(IllegalArgumentException.class, () -> new DateTimeFormatSpec("1:DAY"));
assertThrows(IllegalArgumentException.class, () -> new DateTimeFormatSpec("one:DAYS:EPOCH"));
assertThrows(IllegalArgumentException.class, () -> new DateTimeFormatSpec("EPOCH|DAYS|one"));
assertThrows(IllegalArgumentException.class, () -> new DateTimeFormatSpec("1:DAY:EPOCH"));
assertThrows(IllegalArgumentException.class, () -> new DateTimeFormatSpec("EPOCH|DAY"));
assertThrows(IllegalArgumentException.class, () -> new DateTimeFormatSpec("1:DAY:EPOCH:yyyyMMdd"));
assertThrows(IllegalArgumentException.class, () -> new DateTimeFormatSpec("EPOCH|yyyyMMdd"));
assertThrows(IllegalArgumentException.class, () -> new DateTimeFormatSpec("1:DAY:SIMPLE_DATE_FORMAT:yyycMMdd"));
assertThrows(IllegalArgumentException.class, () -> new DateTimeFormatSpec("SIMPLE_DATE_FORMAT|yyycMMdd"));
} |
public Optional<User> login(String nameOrEmail, String password) {
if (nameOrEmail == null || password == null) {
return Optional.empty();
}
User user = userDAO.findByName(nameOrEmail);
if (user == null) {
user = userDAO.findByEmail(nameOrEmail);
}
if (user != null && !user.isDisabled()) {
boolean authenticated = encryptionService.authenticate(password, user.getPassword(), user.getSalt());
if (authenticated) {
performPostLoginActivities(user);
return Optional.of(user);
}
}
return Optional.empty();
} | @Test
void callingLoginShouldExecutePostLoginActivitiesForUserOnSuccessfulAuthentication() {
Mockito.when(userDAO.findByName("test")).thenReturn(normalUser);
Mockito.when(passwordEncryptionService.authenticate(Mockito.anyString(), Mockito.any(byte[].class), Mockito.any(byte[].class)))
.thenReturn(true);
Mockito.doNothing().when(postLoginActivities).executeFor(Mockito.any(User.class));
userService.login("test", "password");
Mockito.verify(postLoginActivities).executeFor(normalUser);
} |
@CheckForNull
@Override
public Set<Path> branchChangedFiles(String targetBranchName, Path rootBaseDir) {
return Optional.ofNullable((branchChangedFilesWithFileMovementDetection(targetBranchName, rootBaseDir)))
.map(GitScmProvider::extractAbsoluteFilePaths)
.orElse(null);
} | @Test
public void branchChangedFiles_finds_branch_in_specific_origin() throws IOException, GitAPIException {
git.branchCreate().setName("b1").call();
git.checkout().setName("b1").call();
createAndCommitFile("file-b1");
Path worktree2 = temp.newFolder().toPath();
Git.cloneRepository()
.setURI(worktree.toString())
.setRemote("upstream")
.setDirectory(worktree2.toFile())
.call();
assertThat(newScmProvider().branchChangedFiles("upstream/master", worktree2))
.containsOnly(worktree2.resolve("file-b1"));
verifyNoInteractions(analysisWarnings);
} |
public ByteBuffer fetchOnePacket() throws IOException {
int readLen;
ByteBuffer result = defaultBuffer;
result.clear();
while (true) {
headerByteBuffer.clear();
readLen = readAll(headerByteBuffer);
if (readLen != PACKET_HEADER_LEN) {
// remote has close this channel
LOG.info("Receive packet header failed, " +
"remote {} may close the channel.", remoteHostPortString);
return null;
}
if (packetId() != sequenceId) {
LOG.warn("receive packet sequence id[" + packetId() + "] want to get[" + sequenceId + "]");
throw new IOException("Bad packet sequence.");
}
int packetLen = packetLen();
if ((result.capacity() - result.position()) < packetLen) {
// byte buffer is not enough, new one packet
ByteBuffer tmp;
if (packetLen < MAX_PHYSICAL_PACKET_LENGTH) {
// last packet, enough to this packet is OK.
tmp = ByteBuffer.allocate(packetLen + result.position());
} else {
// already have packet, to allocate two packet.
tmp = ByteBuffer.allocate(2 * packetLen + result.position());
}
tmp.put(result.array(), 0, result.position());
result = tmp;
}
// read one physical packet
// before read, set limit to make read only one packet
result.limit(result.position() + packetLen);
readLen = readAll(result);
if (readLen != packetLen) {
LOG.warn("Length of received packet content(" + readLen
+ ") is not equal with length in head.(" + packetLen + ")");
return null;
}
accSequenceId();
if (packetLen != MAX_PHYSICAL_PACKET_LENGTH) {
result.flip();
break;
}
}
return result;
} | @Test(expected = IOException.class)
public void testException() throws IOException {
// mock
new Expectations() {
{
channel.read((ByteBuffer) any);
minTimes = 0;
result = new IOException();
}
};
MysqlChannel channel1 = new MysqlChannel(channel);
ByteBuffer buf = channel1.fetchOnePacket();
Assert.fail("No Exception throws.");
} |
public void editConnection() {
try {
Collection<UIDatabaseConnection> connections = connectionsTable.getSelectedItems();
if ( connections != null && !connections.isEmpty() ) {
// Grab the first item in the list & send it to the database dialog
DatabaseMeta databaseMeta = ( (UIDatabaseConnection) connections.toArray()[0] ).getDatabaseMeta();
// Make sure this connection already exists and store its id for updating
ObjectId idDatabase = repository.getDatabaseID( databaseMeta.getName() );
if ( idDatabase == null ) {
MessageBox mb = new MessageBox( shell, SWT.ICON_ERROR | SWT.OK );
mb.setMessage( BaseMessages.getString(
PKG, "RepositoryExplorerDialog.Connection.Edit.DoesNotExists.Message" ) );
mb.setText( BaseMessages.getString(
PKG, "RepositoryExplorerDialog.Connection.Edit.DoesNotExists.Title" ) );
mb.open();
} else {
getDatabaseDialog().setDatabaseMeta( databaseMeta );
String dbName = getDatabaseDialog().open();
if ( dbName != null ) {
dbName = dbName.trim();
databaseMeta.setName( dbName );
databaseMeta.setDisplayName( dbName );
if ( !dbName.isEmpty() ) {
ObjectId idRenamed = repository.getDatabaseID( dbName );
if ( idRenamed == null || idRenamed.equals( idDatabase ) ) {
// renaming to non-existing name or updating the current
repository.insertLogEntry( BaseMessages.getString(
PKG, "ConnectionsController.Message.UpdatingDatabase", databaseMeta.getName() ) );
repository.save( databaseMeta, Const.VERSION_COMMENT_EDIT_VERSION, null );
reloadLoadedJobsAndTransformations();
} else {
// trying to rename to an existing name - show error dialog
showAlreadyExistsMessage();
}
}
}
// We should be able to tell the difference between a cancel and an empty database name
//
// else {
// MessageBox mb = new MessageBox(shell, SWT.ICON_ERROR | SWT.OK);
// mb.setMessage(BaseMessages.getString(PKG, "RepositoryExplorerDialog.Connection.Edit.MissingName.Message"));
// mb.setText(BaseMessages.getString(PKG, "RepositoryExplorerDialog.Connection.Edit.MissingName.Title"));
// mb.open();
// }
}
} else {
MessageBox mb = new MessageBox( shell, SWT.ICON_ERROR | SWT.OK );
mb.setMessage( BaseMessages.getString(
PKG, "RepositoryExplorerDialog.Connection.Edit.NoItemSelected.Message" ) );
mb
.setText( BaseMessages
.getString( PKG, "RepositoryExplorerDialog.Connection.Edit.NoItemSelected.Title" ) );
mb.open();
}
} catch ( KettleException e ) {
if ( mainController == null || !mainController.handleLostRepository( e ) ) {
new ErrorDialog( shell,
BaseMessages.getString( PKG, "RepositoryExplorerDialog.Connection.Create.UnexpectedError.Title" ),
BaseMessages.getString( PKG, "RepositoryExplorerDialog.Connection.Edit.UnexpectedError.Message" ), e );
}
} finally {
refreshConnectionList();
}
} | @Test
public void editConnection_NameExists_SameWithSpaces() throws Exception {
final String dbName = " name";
DatabaseMeta dbmeta = spy( new DatabaseMeta() );
dbmeta.setName( dbName );
List<UIDatabaseConnection> selectedConnection = singletonList( new UIDatabaseConnection( dbmeta, repository ) );
when( connectionsTable.<UIDatabaseConnection>getSelectedItems() ).thenReturn( selectedConnection );
when( repository.getDatabaseID( dbName ) ).thenReturn( new StringObjectId( "existing" ) );
when( databaseDialog.open() ).thenReturn( dbName );
controller.editConnection();
verify( dbmeta ).setName( dbName.trim() );
} |
@Override
public void upgrade() {
if (clusterConfigService.get(MigrationCompleted.class) != null) {
LOG.debug("Migration already completed!");
return;
}
final List<SearchPivotLimitMigration> pivotLimitMigrations = StreamSupport.stream(this.searches.find().spliterator(), false)
.flatMap(document -> {
final String searchId = document.get("_id", ObjectId.class).toHexString();
final List<Document> queries = document.get("queries", Collections.emptyList());
return EntryStream.of(queries)
.flatMap(entry -> {
final Integer queryIndex = entry.getKey();
final List<Document> searchTypes = entry.getValue().get("search_types", Collections.emptyList());
return EntryStream.of(searchTypes)
.filter(searchType -> "pivot".equals(searchType.getValue().getString("type")))
.flatMap(searchTypeEntry -> {
final Document searchType = searchTypeEntry.getValue();
final Integer searchTypeIndex = searchTypeEntry.getKey();
final boolean hasRowLimit = searchType.containsKey("row_limit");
final boolean hasColumnLimit = searchType.containsKey("column_limit");
final Optional<Integer> rowLimit = Optional.ofNullable(searchType.getInteger("row_limit"));
final Optional<Integer> columnLimit = Optional.ofNullable(searchType.getInteger("column_limit"));
if (searchTypeIndex != null && (hasRowLimit || hasColumnLimit)) {
return Stream.of(new SearchPivotLimitMigration(searchId, queryIndex, searchTypeIndex, rowLimit, columnLimit));
}
return Stream.empty();
});
});
})
.collect(Collectors.toList());
final List<WriteModel<Document>> operations = pivotLimitMigrations.stream()
.flatMap(pivotMigration -> {
final ImmutableList.Builder<WriteModel<Document>> builder = ImmutableList.builder();
builder.add(
updateSearch(
pivotMigration.searchId(),
doc("$unset", doc(pivotPath(pivotMigration) + ".row_limit", 1))
)
);
builder.add(
updateSearch(
pivotMigration.searchId(),
doc("$set", doc(pivotPath(pivotMigration) + ".row_groups.$[pivot].limit", pivotMigration.rowLimit.orElse(DEFAULT_LIMIT))),
matchValuePivots
)
);
builder.add(
updateSearch(
pivotMigration.searchId(),
doc("$unset", doc(pivotPath(pivotMigration) + ".column_limit", 1))
)
);
builder.add(
updateSearch(
pivotMigration.searchId(),
doc("$set", doc(pivotPath(pivotMigration) + ".column_groups.$[pivot].limit", pivotMigration.columnLimit.orElse(DEFAULT_LIMIT))),
matchValuePivots
)
);
return builder.build().stream();
})
.collect(Collectors.toList());
if (!operations.isEmpty()) {
LOG.debug("Updating {} search types ...", pivotLimitMigrations.size());
this.searches.bulkWrite(operations);
}
clusterConfigService.write(new MigrationCompleted(pivotLimitMigrations.size()));
} | @Test
@MongoDBFixtures("V20230113095301_MigrateGlobalPivotLimitsToGroupingsInSearchesTest_simpleSearch.json")
void migratingSimpleView() {
this.migration.upgrade();
assertThat(migrationCompleted().migratedSearchTypes()).isEqualTo(1);
final Document document = this.collection.find().first();
assertThat(document).isNotNull();
final Document searchType = getSearchTypes(document).get(0);
assertThat(limits(rowGroups(searchType))).containsExactly(10);
assertThat(limits(columnGroups(searchType))).isEmpty();
assertThatFieldsAreUnset(searchType);
} |
public static String normalize(String url) {
return normalize(url, false);
} | @Test
public void normalizeIpv6Test() {
String url = "http://[fe80::8f8:2022:a603:d180]:9439";
String normalize = URLUtil.normalize("http://[fe80::8f8:2022:a603:d180]:9439", true);
assertEquals(url, normalize);
} |
public static void print(Context context) {
SINGLETON.print(context, 0);
} | @Test
public void testWithException() {
Status s0 = new ErrorStatus("test0", this);
Status s1 = new InfoStatus("test1", this, new Exception("testEx"));
Status s11 = new InfoStatus("test11", this);
Status s12 = new InfoStatus("test12", this);
s1.add(s11);
s1.add(s12);
Status s2 = new InfoStatus("test2", this);
Status s21 = new InfoStatus("test21", this);
Status s211 = new WarnStatus("test211", this);
Status s22 = new InfoStatus("test22", this);
s2.add(s21);
s2.add(s22);
s21.add(s211);
Context context = new ContextBase();
context.getStatusManager().add(s0);
context.getStatusManager().add(s1);
context.getStatusManager().add(s2);
StatusPrinter.print(context);
String result = outputStream.toString();
assertTrue(result.contains("|-ERROR in " + this.getClass().getName()));
assertTrue(result.contains("+ INFO in " + this.getClass().getName()));
assertTrue(result.contains("ch.qos.logback.core.util.StatusPrinterTest.testWithException"));
} |
@Udf
public String concatWS(
@UdfParameter(description = "Separator string and values to join") final String... inputs) {
if (inputs == null || inputs.length < 2) {
throw new KsqlFunctionException("Function Concat_WS expects at least two input arguments.");
}
final String separator = inputs[0];
if (separator == null) {
return null;
}
return Arrays.stream(inputs, 1,
inputs.length)
.filter(Objects::nonNull)
.collect(Collectors.joining(separator));
} | @Test
public void shouldWorkWithEmptySeparator() {
assertThat(udf.concatWS("", "foo", "bar"), is("foobar"));
assertThat(udf.concatWS(EMPTY_BYTES, ByteBuffer.wrap(new byte[] {1}), ByteBuffer.wrap(new byte[] {2})),
is(ByteBuffer.wrap(new byte[] {1, 2})));
} |
@Override
public void writeAndFlush(final Object obj) {
Future future = writeQueue.enqueue(obj);
future.addListener((FutureListener) future1 -> {
if (!future1.isSuccess()) {
Throwable throwable = future1.cause();
LOGGER.error("Failed to send to "
+ NetUtils.channelToString(localAddress(), remoteAddress())
+ " for msg : " + obj
+ ", Cause by:", throwable);
}
});
} | @Test
public void testRunSuccess() throws Exception {
nettyChannel.writeAndFlush("111");
Mockito.verify(mockWriteQueue).enqueue("111");
ArgumentCaptor<GenericFutureListener> captor = ArgumentCaptor.forClass(GenericFutureListener.class);
Mockito.verify(mockFuture).addListener(captor.capture());
// 模拟 FutureListener 的回调
GenericFutureListener<Future<Object>> listener = captor.getValue();
listener.operationComplete((Future) mockFuture);
// 验证没有错误日志被记录(因为操作是成功的)
// Mockito.verify(Mockito.mock(NetUtils.class), times(10));
// NetUtils.channelToString(any(InetSocketAddress.class), any(InetSocketAddress.class));
} |
@Override
public void configureAuthenticationConfig(AuthenticationConfig authConfig,
Optional<FunctionAuthData> functionAuthData) {
if (!functionAuthData.isPresent()) {
// if auth data is not present maybe user is trying to use anonymous role thus don't pass in any auth config
authConfig.setClientAuthenticationPlugin(null);
authConfig.setClientAuthenticationParameters(null);
} else {
authConfig.setClientAuthenticationPlugin(AuthenticationToken.class.getName());
authConfig.setClientAuthenticationParameters(Paths.get(DEFAULT_SECRET_MOUNT_DIR, FUNCTION_AUTH_TOKEN)
.toUri().toString());
// if we have ca bytes, update the new path for the CA
if (this.caBytes != null) {
authConfig.setTlsTrustCertsFilePath(String.format("%s/%s", DEFAULT_SECRET_MOUNT_DIR, FUNCTION_CA_CERT));
}
}
} | @Test
public void configureAuthenticationConfig() {
byte[] testBytes = new byte[]{0, 1, 2, 3, 4};
CoreV1Api coreV1Api = mock(CoreV1Api.class);
KubernetesSecretsTokenAuthProvider kubernetesSecretsTokenAuthProvider = new KubernetesSecretsTokenAuthProvider();
kubernetesSecretsTokenAuthProvider.initialize(coreV1Api, testBytes, (fd) -> "default");
AuthenticationConfig authenticationConfig = AuthenticationConfig.builder().build();
FunctionAuthData functionAuthData = FunctionAuthData.builder().data("foo".getBytes()).build();
kubernetesSecretsTokenAuthProvider.configureAuthenticationConfig(authenticationConfig, Optional.of(functionAuthData));
Assert.assertEquals(authenticationConfig.getClientAuthenticationPlugin(), AuthenticationToken.class.getName());
Assert.assertEquals(authenticationConfig.getClientAuthenticationParameters(), "file:///etc/auth/token");
Assert.assertEquals(authenticationConfig.getTlsTrustCertsFilePath(), "/etc/auth/ca.pem");
} |
@Override
public boolean emitMetric(SinglePointMetric metric) {
if (!shouldEmitMetric(metric)) {
return false;
}
emitted.add(metric);
return true;
} | @Test
public void testEmitMetric() {
Predicate<? super MetricKeyable> selector = ClientTelemetryUtils.getSelectorFromRequestedMetrics(
Collections.singletonList("name"));
ClientTelemetryEmitter emitter = new ClientTelemetryEmitter(selector, true);
SinglePointMetric gauge = SinglePointMetric.gauge(metricKey, Long.valueOf(1), now, Collections.emptySet());
SinglePointMetric sum = SinglePointMetric.sum(metricKey, 1.0, true, now, Collections.emptySet());
assertTrue(emitter.emitMetric(gauge));
assertTrue(emitter.emitMetric(sum));
MetricKey anotherKey = new MetricKey("io.name", Collections.emptyMap());
assertFalse(emitter.emitMetric(SinglePointMetric.gauge(anotherKey, Long.valueOf(1), now, Collections.emptySet())));
assertEquals(2, emitter.emittedMetrics().size());
assertEquals(Arrays.asList(gauge, sum), emitter.emittedMetrics());
} |
@Override
public Boolean login(Properties properties) {
if (ramContext.validate()) {
return true;
}
loadRoleName(properties);
loadAccessKey(properties);
loadSecretKey(properties);
loadRegionId(properties);
return true;
} | @Test
void testLoginWithRoleName() {
assertTrue(ramClientAuthService.login(roleProperties));
assertNull(ramContext.getAccessKey(), PropertyKeyConst.ACCESS_KEY);
assertNull(ramContext.getSecretKey(), PropertyKeyConst.SECRET_KEY);
assertEquals(PropertyKeyConst.RAM_ROLE_NAME, ramContext.getRamRoleName());
assertTrue(ramClientAuthService.login(akSkProperties));
assertNull(ramContext.getAccessKey(), PropertyKeyConst.ACCESS_KEY);
assertNull(ramContext.getSecretKey(), PropertyKeyConst.SECRET_KEY);
assertEquals(PropertyKeyConst.RAM_ROLE_NAME, ramContext.getRamRoleName());
} |
@Override
public void close() {
watchCache.forEach((key, configFileChangeListener) -> {
if (Objects.nonNull(configFileChangeListener)) {
final ConfigFile configFile = configFileService.getConfigFile(polarisConfig.getNamespace(), polarisConfig.getFileGroup(), key);
configFile.removeChangeListener(configFileChangeListener);
}
});
} | @Test
public void testClose() {
polarisSyncDataService.close();
} |
public List<AbstractResultMessage> getResultMessages() {
return resultMessages;
} | @Test
void getResultMessages() {
BatchResultMessage batchResultMessage = new BatchResultMessage();
Assertions.assertTrue(batchResultMessage.getResultMessages().isEmpty());
} |
@VisibleForTesting
static JibContainerBuilder processCommonConfiguration(
RawConfiguration rawConfiguration,
InferredAuthProvider inferredAuthProvider,
ProjectProperties projectProperties)
throws InvalidFilesModificationTimeException, InvalidAppRootException,
IncompatibleBaseImageJavaVersionException, IOException, InvalidImageReferenceException,
InvalidContainerizingModeException, MainClassInferenceException, InvalidPlatformException,
InvalidContainerVolumeException, InvalidWorkingDirectoryException,
InvalidCreationTimeException, ExtraDirectoryNotFoundException {
// Create and configure JibContainerBuilder
ModificationTimeProvider modificationTimeProvider =
createModificationTimeProvider(rawConfiguration.getFilesModificationTime());
JavaContainerBuilder javaContainerBuilder =
getJavaContainerBuilderWithBaseImage(
rawConfiguration, projectProperties, inferredAuthProvider)
.setAppRoot(getAppRootChecked(rawConfiguration, projectProperties))
.setModificationTimeProvider(modificationTimeProvider);
JibContainerBuilder jibContainerBuilder =
projectProperties.createJibContainerBuilder(
javaContainerBuilder,
getContainerizingModeChecked(rawConfiguration, projectProperties));
jibContainerBuilder
.setFormat(rawConfiguration.getImageFormat())
.setPlatforms(getPlatformsSet(rawConfiguration))
.setEntrypoint(computeEntrypoint(rawConfiguration, projectProperties, jibContainerBuilder))
.setProgramArguments(rawConfiguration.getProgramArguments().orElse(null))
.setEnvironment(rawConfiguration.getEnvironment())
.setExposedPorts(Ports.parse(rawConfiguration.getPorts()))
.setVolumes(getVolumesSet(rawConfiguration))
.setLabels(rawConfiguration.getLabels())
.setUser(rawConfiguration.getUser().orElse(null))
.setCreationTime(getCreationTime(rawConfiguration.getCreationTime(), projectProperties));
getWorkingDirectoryChecked(rawConfiguration)
.ifPresent(jibContainerBuilder::setWorkingDirectory);
// Adds all the extra files.
for (ExtraDirectoriesConfiguration extraDirectory : rawConfiguration.getExtraDirectories()) {
Path from = extraDirectory.getFrom();
if (Files.exists(from)) {
jibContainerBuilder.addFileEntriesLayer(
JavaContainerBuilderHelper.extraDirectoryLayerConfiguration(
from,
AbsoluteUnixPath.get(extraDirectory.getInto()),
extraDirectory.getIncludesList(),
extraDirectory.getExcludesList(),
rawConfiguration.getExtraDirectoryPermissions(),
modificationTimeProvider));
} else if (!from.endsWith(DEFAULT_JIB_DIR)) {
throw new ExtraDirectoryNotFoundException(from.toString(), from.toString());
}
}
return jibContainerBuilder;
} | @Test
public void testEntrypoint_warningOnExpandClasspathDependenciesForWar()
throws IOException, InvalidCreationTimeException, InvalidImageReferenceException,
IncompatibleBaseImageJavaVersionException, InvalidPlatformException,
InvalidContainerVolumeException, MainClassInferenceException, InvalidAppRootException,
InvalidWorkingDirectoryException, InvalidFilesModificationTimeException,
InvalidContainerizingModeException, ExtraDirectoryNotFoundException {
when(rawConfiguration.getExpandClasspathDependencies()).thenReturn(true);
when(projectProperties.isWarProject()).thenReturn(true);
ContainerBuildPlan buildPlan = processCommonConfiguration();
assertThat(buildPlan.getEntrypoint())
.containsExactly("java", "-jar", "/usr/local/jetty/start.jar", "--module=ee10-deploy")
.inOrder();
verify(projectProperties)
.log(
LogEvent.warn(
"mainClass, extraClasspath, jvmFlags, and expandClasspathDependencies "
+ "are ignored for WAR projects"));
} |
public static <T> Write<T> write(String jdbcUrl, String table) {
return new AutoValue_ClickHouseIO_Write.Builder<T>()
.jdbcUrl(jdbcUrl)
.table(table)
.properties(new Properties())
.maxInsertBlockSize(DEFAULT_MAX_INSERT_BLOCK_SIZE)
.initialBackoff(DEFAULT_INITIAL_BACKOFF)
.maxRetries(DEFAULT_MAX_RETRIES)
.maxCumulativeBackoff(DEFAULT_MAX_CUMULATIVE_BACKOFF)
.build()
.withInsertDeduplicate(true)
.withInsertDistributedSync(true);
} | @Test
public void testInt64() throws Exception {
Schema schema =
Schema.of(Schema.Field.of("f0", FieldType.INT64), Schema.Field.of("f1", FieldType.INT64));
Row row1 = Row.withSchema(schema).addValue(1L).addValue(2L).build();
Row row2 = Row.withSchema(schema).addValue(2L).addValue(4L).build();
Row row3 = Row.withSchema(schema).addValue(3L).addValue(6L).build();
executeSql("CREATE TABLE test_int64 (f0 Int64, f1 Int64) ENGINE=Log");
pipeline.apply(Create.of(row1, row2, row3).withRowSchema(schema)).apply(write("test_int64"));
pipeline.run().waitUntilFinish();
long sum0 = executeQueryAsLong("SELECT SUM(f0) FROM test_int64");
long sum1 = executeQueryAsLong("SELECT SUM(f1) FROM test_int64");
assertEquals(6L, sum0);
assertEquals(12L, sum1);
} |
@Override
public JFieldVar apply(String nodeName, JsonNode node, JsonNode parent, JFieldVar field, Schema currentSchema) {
if (ruleFactory.getGenerationConfig().isIncludeJsr303Annotations()
&& (node.has("minLength") || node.has("maxLength"))
&& isApplicableType(field)) {
final Class<? extends Annotation> sizeClass
= ruleFactory.getGenerationConfig().isUseJakartaValidation()
? Size.class
: javax.validation.constraints.Size.class;
JAnnotationUse annotation = field.annotate(sizeClass);
if (node.has("minLength")) {
annotation.param("min", node.get("minLength").asInt());
}
if (node.has("maxLength")) {
annotation.param("max", node.get("maxLength").asInt());
}
}
return field;
} | @Test
public void jsrDisable() {
when(config.isIncludeJsr303Annotations()).thenReturn(false);
JFieldVar result = rule.apply("node", node, null, fieldVar, null);
assertSame(fieldVar, result);
verify(fieldVar, never()).annotate(sizeClass);
verify(annotation, never()).param(anyString(), anyInt());
} |
private boolean rename(ChannelSftp channel, Path src, Path dst)
throws IOException {
Path workDir;
try {
workDir = new Path(channel.pwd());
} catch (SftpException e) {
throw new IOException(e);
}
Path absoluteSrc = makeAbsolute(workDir, src);
Path absoluteDst = makeAbsolute(workDir, dst);
if (!exists(channel, absoluteSrc)) {
throw new IOException(String.format(E_SPATH_NOTEXIST, src));
}
if (exists(channel, absoluteDst)) {
throw new IOException(String.format(E_DPATH_EXIST, dst));
}
boolean renamed = true;
try {
final String previousCwd = channel.pwd();
channel.cd("/");
channel.rename(src.toUri().getPath(), dst.toUri().getPath());
channel.cd(previousCwd);
} catch (SftpException e) {
renamed = false;
}
return renamed;
} | @Test(expected=java.io.IOException.class)
public void testRenameNonExistFile() throws Exception {
Path file1 = new Path(localDir, name.getMethodName().toLowerCase() + "1");
Path file2 = new Path(localDir, name.getMethodName().toLowerCase() + "2");
sftpFs.rename(file1, file2);
} |
public AggregateAnalysisResult analyze(
final ImmutableAnalysis analysis,
final List<SelectExpression> finalProjection
) {
if (!analysis.getGroupBy().isPresent()) {
throw new IllegalArgumentException("Not an aggregate query");
}
final AggAnalyzer aggAnalyzer = new AggAnalyzer(analysis, functionRegistry);
aggAnalyzer.process(finalProjection);
return aggAnalyzer.result();
} | @Test
public void shouldNotCaptureNonAggregateGroupByFunction() {
// given:
givenGroupByExpressions(FUNCTION_CALL);
// When:
final AggregateAnalysisResult result = analyzer.analyze(analysis, selects);
// Then:
assertThat(result.getAggregateFunctions(), contains(REQUIRED_AGG_FUNC_CALL));
} |
public static String validateColumnName(@Nullable String columnName) {
String name = requireNonNull(columnName, "Column name cannot be null");
checkDbIdentifierCharacters(columnName, "Column name");
return name;
} | @Test
public void fail_with_NPE_if_name_is_null() {
assertThatThrownBy(() -> validateColumnName(null))
.isInstanceOf(NullPointerException.class)
.hasMessage("Column name cannot be null");
} |
public static BigDecimal cast(final Integer value, final int precision, final int scale) {
if (value == null) {
return null;
}
return cast(value.longValue(), precision, scale);
} | @Test
public void shouldCastDoubleNegative() {
// When:
final BigDecimal decimal = DecimalUtil.cast(-1.1, 2, 1);
// Then:
assertThat(decimal, is(new BigDecimal("-1.1")));
} |
@Override
public void apply(IntentOperationContext<FlowObjectiveIntent> intentOperationContext) {
Objects.requireNonNull(intentOperationContext);
Optional<IntentData> toUninstall = intentOperationContext.toUninstall();
Optional<IntentData> toInstall = intentOperationContext.toInstall();
List<FlowObjectiveIntent> uninstallIntents = intentOperationContext.intentsToUninstall();
List<FlowObjectiveIntent> installIntents = intentOperationContext.intentsToInstall();
if (!toInstall.isPresent() && !toUninstall.isPresent()) {
intentInstallCoordinator.intentInstallSuccess(intentOperationContext);
return;
}
if (toUninstall.isPresent()) {
IntentData intentData = toUninstall.get();
trackerService.removeTrackedResources(intentData.key(), intentData.intent().resources());
uninstallIntents.forEach(installable ->
trackerService.removeTrackedResources(intentData.intent().key(),
installable.resources()));
}
if (toInstall.isPresent()) {
IntentData intentData = toInstall.get();
trackerService.addTrackedResources(intentData.key(), intentData.intent().resources());
installIntents.forEach(installable ->
trackerService.addTrackedResources(intentData.key(),
installable.resources()));
}
FlowObjectiveIntentInstallationContext intentInstallationContext =
new FlowObjectiveIntentInstallationContext(intentOperationContext);
uninstallIntents.stream()
.map(intent -> buildObjectiveContexts(intent, REMOVE))
.flatMap(Collection::stream)
.forEach(context -> {
context.intentInstallationContext(intentInstallationContext);
intentInstallationContext.addContext(context);
intentInstallationContext.addPendingContext(context);
});
installIntents.stream()
.map(intent -> buildObjectiveContexts(intent, ADD))
.flatMap(Collection::stream)
.forEach(context -> {
context.intentInstallationContext(intentInstallationContext);
intentInstallationContext.addContext(context);
intentInstallationContext.addNextPendingContext(context);
});
intentInstallationContext.apply();
} | @Test
public void testFlowInstallationFailedErrorUnderThreshold() {
// And retry two times and success
intentInstallCoordinator = new TestIntentInstallCoordinator();
installer.intentInstallCoordinator = intentInstallCoordinator;
errors = ImmutableList.of(FLOWINSTALLATIONFAILED, FLOWINSTALLATIONFAILED);
installer.flowObjectiveService = new TestFailedFlowObjectiveService(errors);
context = createInstallContext();
installer.apply(context);
successContext = intentInstallCoordinator.successContext;
assertEquals(successContext, context);
} |
@Override
public void removeConfigInfo4Beta(final String dataId, final String group, final String tenant) {
final String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant;
tjt.execute(status -> {
try {
ConfigInfoStateWrapper configInfo = findConfigInfo4BetaState(dataId, group, tenant);
if (configInfo != null) {
ConfigInfoBetaMapper configInfoBetaMapper = mapperManager.findMapper(
dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO_BETA);
jt.update(configInfoBetaMapper.delete(Arrays.asList("data_id", "group_id", "tenant_id")), dataId,
group, tenantTmp);
}
} catch (CannotGetJdbcConnectionException e) {
LogUtil.FATAL_LOG.error("[db-error] " + e, e);
throw e;
}
return Boolean.TRUE;
});
} | @Test
void testRemoveConfigInfo4Beta() {
String dataId = "dataId456789";
String group = "group4567";
String tenant = "tenant56789o0";
//mock exist beta
ConfigInfoStateWrapper mockedConfigInfoStateWrapper = new ConfigInfoStateWrapper();
mockedConfigInfoStateWrapper.setDataId(dataId);
mockedConfigInfoStateWrapper.setGroup(group);
mockedConfigInfoStateWrapper.setTenant(tenant);
mockedConfigInfoStateWrapper.setId(123456L);
mockedConfigInfoStateWrapper.setLastModified(System.currentTimeMillis());
when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant}),
eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))).thenReturn(mockedConfigInfoStateWrapper);
externalConfigInfoBetaPersistService.removeConfigInfo4Beta(dataId, group, tenant);
//verity
Mockito.verify(jdbcTemplate, times(1)).update(anyString(), eq(dataId), eq(group), eq(tenant));
//mock query throw CannotGetJdbcConnectionException
when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant}),
eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))).thenThrow(new CannotGetJdbcConnectionException("mock fail11111"));
try {
externalConfigInfoBetaPersistService.removeConfigInfo4Beta(dataId, group, tenant);
assertTrue(false);
} catch (Exception exception) {
assertEquals("mock fail11111", exception.getMessage());
}
} |
@Override
@SuppressWarnings({"unchecked", "rawtypes"})
public RouteContext route(final ConnectionContext connectionContext, final QueryContext queryContext, final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database) {
RouteContext result = new RouteContext();
Optional<String> dataSourceName = findDataSourceByHint(queryContext.getHintValueContext(), database.getResourceMetaData().getStorageUnits());
if (dataSourceName.isPresent()) {
result.getRouteUnits().add(new RouteUnit(new RouteMapper(dataSourceName.get(), dataSourceName.get()), Collections.emptyList()));
return result;
}
for (Entry<ShardingSphereRule, SQLRouter> entry : routers.entrySet()) {
if (result.getRouteUnits().isEmpty() && entry.getValue() instanceof EntranceSQLRouter) {
result = ((EntranceSQLRouter) entry.getValue()).createRouteContext(queryContext, globalRuleMetaData, database, entry.getKey(), props, connectionContext);
} else if (entry.getValue() instanceof DecorateSQLRouter) {
((DecorateSQLRouter) entry.getValue()).decorateRouteContext(result, queryContext, database, entry.getKey(), props, connectionContext);
}
}
if (result.getRouteUnits().isEmpty() && 1 == database.getResourceMetaData().getStorageUnits().size()) {
String singleDataSourceName = database.getResourceMetaData().getStorageUnits().keySet().iterator().next();
result.getRouteUnits().add(new RouteUnit(new RouteMapper(singleDataSourceName, singleDataSourceName), Collections.emptyList()));
}
return result;
} | @Test
void assertRouteByHintManagerHintWithException() {
try (HintManager hintManager = HintManager.getInstance()) {
hintManager.setDataSourceName("ds-3");
QueryContext logicSQL = new QueryContext(commonSQLStatementContext, "", Collections.emptyList(), new HintValueContext(), connectionContext, metaData);
assertThrows(DataSourceHintNotExistsException.class, () -> partialSQLRouteExecutor.route(connectionContext, logicSQL, mock(RuleMetaData.class), database));
}
} |
@Override
public Multimap<String, String> findBundlesForUnloading(final LoadData loadData, final ServiceConfiguration conf) {
selectedBundlesCache.clear();
Map<String, BrokerData> brokersData = loadData.getBrokerData();
Map<String, BundleData> loadBundleData = loadData.getBundleDataForLoadShedding();
Map<String, Long> recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles();
MutableObject<String> msgRateOverloadedBroker = new MutableObject<>();
MutableObject<String> msgThroughputOverloadedBroker = new MutableObject<>();
MutableObject<String> msgRateUnderloadedBroker = new MutableObject<>();
MutableObject<String> msgThroughputUnderloadedBroker = new MutableObject<>();
MutableDouble maxMsgRate = new MutableDouble(-1);
MutableDouble maxThroughput = new MutableDouble(-1);
MutableDouble minMsgRate = new MutableDouble(Integer.MAX_VALUE);
MutableDouble minThroughput = new MutableDouble(Integer.MAX_VALUE);
brokersData.forEach((broker, data) -> {
double msgRate = data.getLocalData().getMsgRateIn() + data.getLocalData().getMsgRateOut();
double throughputRate = data.getLocalData().getMsgThroughputIn()
+ data.getLocalData().getMsgThroughputOut();
if (msgRate > maxMsgRate.getValue()) {
msgRateOverloadedBroker.setValue(broker);
maxMsgRate.setValue(msgRate);
}
if (throughputRate > maxThroughput.getValue()) {
msgThroughputOverloadedBroker.setValue(broker);
maxThroughput.setValue(throughputRate);
}
if (msgRate < minMsgRate.getValue()) {
msgRateUnderloadedBroker.setValue(broker);
minMsgRate.setValue(msgRate);
}
if (throughputRate < minThroughput.getValue()) {
msgThroughputUnderloadedBroker.setValue(broker);
minThroughput.setValue(throughputRate);
}
});
// find the difference between two brokers based on msgRate and throughout and check if the load distribution
// discrepancy is higher than threshold. if that matches then try to unload bundle from overloaded brokers to
// give chance of uniform load distribution.
if (minMsgRate.getValue() <= EPS && minMsgRate.getValue() >= -EPS) {
minMsgRate.setValue(1.0);
}
if (minThroughput.getValue() <= EPS && minThroughput.getValue() >= -EPS) {
minThroughput.setValue(1.0);
}
double msgRateDifferencePercentage = ((maxMsgRate.getValue() - minMsgRate.getValue()) * 100)
/ (minMsgRate.getValue());
double msgThroughputDifferenceRate = maxThroughput.getValue() / minThroughput.getValue();
// if the threshold matches then find out how much load needs to be unloaded by considering number of msgRate
// and throughput.
boolean isMsgRateThresholdExceeded = conf.getLoadBalancerMsgRateDifferenceShedderThreshold() > 0
&& msgRateDifferencePercentage > conf.getLoadBalancerMsgRateDifferenceShedderThreshold();
boolean isMsgThroughputThresholdExceeded = conf
.getLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold() > 0
&& msgThroughputDifferenceRate > conf
.getLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold();
if (isMsgRateThresholdExceeded || isMsgThroughputThresholdExceeded) {
MutableInt msgRateRequiredFromUnloadedBundles = new MutableInt(
(int) ((maxMsgRate.getValue() - minMsgRate.getValue()) * conf.getMaxUnloadPercentage()));
MutableInt msgThroughputRequiredFromUnloadedBundles = new MutableInt(
(int) ((maxThroughput.getValue() - minThroughput.getValue())
* conf.getMaxUnloadPercentage()));
if (isMsgRateThresholdExceeded) {
if (log.isDebugEnabled()) {
log.debug("Found bundles for uniform load balancing. "
+ "msgRate overloaded broker: {} with msgRate: {}, "
+ "msgRate underloaded broker: {} with msgRate: {}",
msgRateOverloadedBroker.getValue(), maxMsgRate.getValue(),
msgRateUnderloadedBroker.getValue(), minMsgRate.getValue());
}
LocalBrokerData overloadedBrokerData =
brokersData.get(msgRateOverloadedBroker.getValue()).getLocalData();
if (overloadedBrokerData.getBundles().size() > 1
&& (msgRateRequiredFromUnloadedBundles.getValue() >= conf.getMinUnloadMessage())) {
// Sort bundles by msgRate, then pick the bundle which can help to reduce load uniformly with
// under-loaded broker
loadBundleData.entrySet().stream()
.filter(e -> overloadedBrokerData.getBundles().contains(e.getKey()))
.map((e) -> {
String bundle = e.getKey();
TimeAverageMessageData shortTermData = e.getValue().getShortTermData();
double msgRate = shortTermData.getMsgRateIn() + shortTermData.getMsgRateOut();
return Pair.of(bundle, msgRate);
}).filter(e -> !recentlyUnloadedBundles.containsKey(e.getLeft()))
.sorted((e1, e2) -> Double.compare(e2.getRight(), e1.getRight())).forEach((e) -> {
if (conf.getMaxUnloadBundleNumPerShedding() != -1
&& selectedBundlesCache.size() >= conf.getMaxUnloadBundleNumPerShedding()) {
return;
}
String bundle = e.getLeft();
double bundleMsgRate = e.getRight();
if (bundleMsgRate <= (msgRateRequiredFromUnloadedBundles.getValue()
+ 1000/* delta */)) {
log.info("Found bundle to unload with msgRate {}", bundleMsgRate);
msgRateRequiredFromUnloadedBundles.add(-bundleMsgRate);
selectedBundlesCache.put(msgRateOverloadedBroker.getValue(), bundle);
}
});
}
} else {
if (log.isDebugEnabled()) {
log.debug("Found bundles for uniform load balancing. "
+ "msgThroughput overloaded broker: {} with msgThroughput {}, "
+ "msgThroughput underloaded broker: {} with msgThroughput: {}",
msgThroughputOverloadedBroker.getValue(), maxThroughput.getValue(),
msgThroughputUnderloadedBroker.getValue(), minThroughput.getValue());
}
LocalBrokerData overloadedBrokerData =
brokersData.get(msgThroughputOverloadedBroker.getValue()).getLocalData();
if (overloadedBrokerData.getBundles().size() > 1
&&
msgThroughputRequiredFromUnloadedBundles.getValue() >= conf.getMinUnloadMessageThroughput()) {
// Sort bundles by throughput, then pick the bundle which can help to reduce load uniformly with
// under-loaded broker
loadBundleData.entrySet().stream()
.filter(e -> overloadedBrokerData.getBundles().contains(e.getKey()))
.map((e) -> {
String bundle = e.getKey();
TimeAverageMessageData shortTermData = e.getValue().getShortTermData();
double msgThroughput = shortTermData.getMsgThroughputIn()
+ shortTermData.getMsgThroughputOut();
return Pair.of(bundle, msgThroughput);
}).filter(e -> !recentlyUnloadedBundles.containsKey(e.getLeft()))
.sorted((e1, e2) -> Double.compare(e2.getRight(), e1.getRight())).forEach((e) -> {
if (conf.getMaxUnloadBundleNumPerShedding() != -1
&& selectedBundlesCache.size() >= conf.getMaxUnloadBundleNumPerShedding()) {
return;
}
String bundle = e.getLeft();
double msgThroughput = e.getRight();
if (msgThroughput <= (msgThroughputRequiredFromUnloadedBundles.getValue()
+ 1000/* delta */)) {
log.info("Found bundle to unload with msgThroughput {}", msgThroughput);
msgThroughputRequiredFromUnloadedBundles.add(-msgThroughput);
selectedBundlesCache.put(msgThroughputOverloadedBroker.getValue(), bundle);
}
});
}
}
}
return selectedBundlesCache;
} | @Test
public void testMaxUnloadBundleNumPerShedding(){
conf.setMaxUnloadBundleNumPerShedding(2);
int numBundles = 20;
LoadData loadData = new LoadData();
LocalBrokerData broker1 = new LocalBrokerData();
LocalBrokerData broker2 = new LocalBrokerData();
String broker2Name = "broker2";
double brokerThroughput = 0;
for (int i = 1; i <= numBundles; ++i) {
broker1.getBundles().add("bundle-" + i);
BundleData bundle = new BundleData();
TimeAverageMessageData timeAverageMessageData = new TimeAverageMessageData();
double throughput = 1 * 1024 * 1024;
timeAverageMessageData.setMsgThroughputIn(throughput);
timeAverageMessageData.setMsgThroughputOut(throughput);
bundle.setShortTermData(timeAverageMessageData);
loadData.getBundleData().put("bundle-" + i, bundle);
brokerThroughput += throughput;
}
broker1.setMsgThroughputIn(brokerThroughput);
broker1.setMsgThroughputOut(brokerThroughput);
loadData.getBrokerData().put("broker-1", new BrokerData(broker1));
loadData.getBrokerData().put(broker2Name, new BrokerData(broker2));
Multimap<String, String> bundlesToUnload = uniformLoadShedder.findBundlesForUnloading(loadData, conf);
assertEquals(bundlesToUnload.size(),2);
} |
public static LocalDateTime parse(CharSequence text) {
return parse(text, (DateTimeFormatter) null);
} | @Test
public void parseOffsetTest() {
final LocalDateTime localDateTime = LocalDateTimeUtil.parse("2021-07-30T16:27:27+08:00", DateTimeFormatter.ISO_OFFSET_DATE_TIME);
assertEquals("2021-07-30T16:27:27", Objects.requireNonNull(localDateTime).toString());
} |
public void delete(String host) {
db.delete(TABLE_HOSTS, COLUMN_HOST + " = ?", new String[] { host });
} | @Test
public void testDelete() {
Hosts hosts = new Hosts(RuntimeEnvironment.application, "hosts.db");
hosts.put("ni.hao", "127.0.0.1");
assertEquals("ni.hao/127.0.0.1", hosts.get("ni.hao").toString());
hosts.put("ni.hao", "127.0.0.2");
hosts.delete("ni.hao");
assertEquals(null, hosts.get("ni.hao"));
hosts.delete(null);
} |
public void setIncludeCallerData(boolean includeCallerData) {
this.includeCallerData = includeCallerData;
} | @Test
public void settingIncludeCallerDataPropertyCausedCallerDataToBeIncluded() {
asyncAppender.addAppender(listAppender);
asyncAppender.setIncludeCallerData(true);
asyncAppender.start();
asyncAppender.doAppend(builder.build(diff));
asyncAppender.stop();
// check the event
assertEquals(1, listAppender.list.size());
ILoggingEvent e = listAppender.list.get(0);
assertTrue(e.hasCallerData());
StackTraceElement ste = e.getCallerData()[0];
assertEquals(thisClassName, ste.getClassName());
} |
public static void clean(
Object func, ExecutionConfig.ClosureCleanerLevel level, boolean checkSerializable) {
clean(func, level, checkSerializable, Collections.newSetFromMap(new IdentityHashMap<>()));
} | @Test
void testSelfReferencingClean() {
final NestedSelfReferencing selfReferencing = new NestedSelfReferencing();
ClosureCleaner.clean(selfReferencing, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
} |
@Override
public DdlCommand create(
final String sqlExpression,
final DdlStatement ddlStatement,
final SessionConfig config
) {
return FACTORIES
.getOrDefault(ddlStatement.getClass(), (statement, cf, ci) -> {
throw new KsqlException(
"Unable to find ddl command factory for statement:"
+ statement.getClass()
+ " valid statements:"
+ FACTORIES.keySet()
);
})
.handle(
this,
new CallInfo(sqlExpression, config),
ddlStatement);
} | @Test(expected = KsqlException.class)
public void shouldThrowOnUnsupportedStatementType() {
// Given:
final ExecutableDdlStatement ddlStatement = new ExecutableDdlStatement() {
};
// Then:
commandFactories.create(sqlExpression, ddlStatement, SessionConfig.of(ksqlConfig, emptyMap()));
} |
@Override
public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay readerWay, IntsRef relationFlags) {
if (readerWay.hasTag("hazmat:adr_tunnel_cat", TUNNEL_CATEGORY_NAMES)) {
HazmatTunnel code = HazmatTunnel.valueOf(readerWay.getTag("hazmat:adr_tunnel_cat"));
hazTunnelEnc.setEnum(false, edgeId, edgeIntAccess, code);
} else if (readerWay.hasTag("hazmat:tunnel_cat", TUNNEL_CATEGORY_NAMES)) {
HazmatTunnel code = HazmatTunnel.valueOf(readerWay.getTag("hazmat:tunnel_cat"));
hazTunnelEnc.setEnum(false, edgeId, edgeIntAccess, code);
} else if (readerWay.hasTag("tunnel", "yes")) {
HazmatTunnel[] codes = HazmatTunnel.values();
for (int i = codes.length - 1; i >= 0; i--) {
if (readerWay.hasTag("hazmat:" + codes[i].name(), "no")) {
hazTunnelEnc.setEnum(false, edgeId, edgeIntAccess, codes[i]);
break;
}
}
}
} | @Test
public void testADRTunnelCat() {
EdgeIntAccess edgeIntAccess = new ArrayEdgeIntAccess(1);
int edgeId = 0;
ReaderWay readerWay = new ReaderWay(1);
readerWay.setTag("hazmat:adr_tunnel_cat", "A");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(HazmatTunnel.A, hazTunnelEnc.getEnum(false, edgeId, edgeIntAccess));
edgeIntAccess = new ArrayEdgeIntAccess(1);
readerWay = new ReaderWay(1);
readerWay.setTag("hazmat:adr_tunnel_cat", "B");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(HazmatTunnel.B, hazTunnelEnc.getEnum(false, edgeId, edgeIntAccess));
edgeIntAccess = new ArrayEdgeIntAccess(1);
readerWay = new ReaderWay(1);
readerWay.setTag("hazmat:adr_tunnel_cat", "C");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(HazmatTunnel.C, hazTunnelEnc.getEnum(false, edgeId, edgeIntAccess));
edgeIntAccess = new ArrayEdgeIntAccess(1);
readerWay = new ReaderWay(1);
readerWay.setTag("hazmat:adr_tunnel_cat", "D");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(HazmatTunnel.D, hazTunnelEnc.getEnum(false, edgeId, edgeIntAccess));
edgeIntAccess = new ArrayEdgeIntAccess(1);
readerWay = new ReaderWay(1);
readerWay.setTag("hazmat:adr_tunnel_cat", "E");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(HazmatTunnel.E, hazTunnelEnc.getEnum(false, edgeId, edgeIntAccess));
} |
@Override
public StatusOutputStream<Void> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
final java.nio.file.Path p = session.toPath(file);
final Set<OpenOption> options = new HashSet<>();
options.add(StandardOpenOption.WRITE);
if(status.isAppend()) {
if(!status.isExists()) {
options.add(StandardOpenOption.CREATE);
}
}
else {
if(status.isExists()) {
if(file.isSymbolicLink()) {
Files.delete(p);
options.add(StandardOpenOption.CREATE);
}
else {
options.add(StandardOpenOption.TRUNCATE_EXISTING);
}
}
else {
options.add(StandardOpenOption.CREATE_NEW);
}
}
final FileChannel channel = FileChannel.open(session.toPath(file), options.stream().toArray(OpenOption[]::new));
channel.position(status.getOffset());
return new VoidStatusOutputStream(Channels.newOutputStream(channel));
}
catch(IOException e) {
throw new LocalExceptionMappingService().map("Upload {0} failed", e, file);
}
} | @Test(expected = NotfoundException.class)
public void testWriteNotFound() throws Exception {
final LocalSession session = new LocalSession(new Host(new LocalProtocol(), new LocalProtocol().getDefaultHostname()));
session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback());
session.login(new DisabledLoginCallback(), new DisabledCancelCallback());
final Path workdir = new LocalHomeFinderFeature().find();
final Path test = new Path(workdir.getAbsolute() + "/nosuchdirectory/" + UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
new LocalWriteFeature(session).write(test, new TransferStatus(), new DisabledConnectionCallback());
} |
@Override
public HttpMethodWrapper getHttpMethod() {
return HttpMethodWrapper.GET;
} | @Test
void testMethod() {
assertThat(instance.getHttpMethod()).isEqualTo(HttpMethodWrapper.GET);
} |
@Override
public long getMax() {
if (values.length == 0) {
return 0;
}
return values[values.length - 1];
} | @Test
public void calculatesTheMaximumValue() {
assertThat(snapshot.getMax())
.isEqualTo(5);
} |
@Override
public RSet<V> get(K key) {
String keyHash = keyHash(key);
String setName = getValuesName(keyHash);
return new RedissonSetMultimapValues<>(codec, commandExecutor, setName, getTimeoutSetName(), key);
} | @Test
public void testValues() throws InterruptedException {
RMultimapCache<String, String> multimap = getMultimapCache("test");
multimap.put("1", "1");
multimap.put("1", "2");
multimap.put("1", "3");
multimap.put("1", "3");
assertThat(multimap.get("1").size()).isEqualTo(3);
assertThat(multimap.get("1")).containsExactlyInAnyOrder("1", "2", "3");
assertThat(multimap.get("1").remove("3")).isTrue();
assertThat(multimap.get("1").contains("3")).isFalse();
assertThat(multimap.get("1").contains("2")).isTrue();
assertThat(multimap.get("1").containsAll(Arrays.asList("1"))).isTrue();
assertThat(multimap.get("1").containsAll(Arrays.asList("1", "2"))).isTrue();
assertThat(multimap.get("1").retainAll(Arrays.asList("1"))).isTrue();
assertThat(multimap.get("1").removeAll(Arrays.asList("1"))).isTrue();
} |
@Override
public void reset() {
resetCount++;
super.reset();
initEvaluatorMap();
initCollisionMaps();
root.recursiveReset();
resetTurboFilterList();
cancelScheduledTasks();
fireOnReset();
resetListenersExceptResetResistant();
resetStatusListeners();
} | @Test
public void evaluatorMapPostReset() {
lc.reset();
assertNotNull(lc.getObject(CoreConstants.EVALUATOR_MAP));
} |
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
if (data.size() < 2) {
onInvalidDataReceived(device, data);
return;
}
// Read flags
int offset = 0;
final int flags = data.getIntValue(Data.FORMAT_UINT8, offset);
final int hearRateType = (flags & 0x01) == 0 ? Data.FORMAT_UINT8 : Data.FORMAT_UINT16_LE;
final int sensorContactStatus = (flags & 0x06) >> 1;
final boolean sensorContactSupported = sensorContactStatus == 2 || sensorContactStatus == 3;
final boolean sensorContactDetected = sensorContactStatus == 3;
final boolean energyExpandedPresent = (flags & 0x08) != 0;
final boolean rrIntervalsPresent = (flags & 0x10) != 0;
offset += 1;
// Validate packet length
if (data.size() < 1 + (hearRateType & 0x0F)
+ (energyExpandedPresent ? 2 : 0)
+ (rrIntervalsPresent ? 2 : 0)) {
onInvalidDataReceived(device, data);
return;
}
// Prepare data
final Boolean sensorContact = sensorContactSupported ? sensorContactDetected : null;
final int heartRate = data.getIntValue(hearRateType, offset);
offset += hearRateType & 0xF;
Integer energyExpanded = null;
if (energyExpandedPresent) {
energyExpanded = data.getIntValue(Data.FORMAT_UINT16_LE, offset);
offset += 2;
}
List<Integer> rrIntervals = null;
if (rrIntervalsPresent) {
final int count = (data.size() - offset) / 2;
final List<Integer> intervals = new ArrayList<>(count);
for (int i = 0; i < count; ++i) {
intervals.add(data.getIntValue(Data.FORMAT_UINT16_LE, offset));
offset += 2;
}
rrIntervals = Collections.unmodifiableList(intervals);
}
onHeartRateMeasurementReceived(device, heartRate, sensorContact, energyExpanded, rrIntervals);
} | @Test
public void onHeartRateMeasurementReceived_noContact() {
success = false;
final Data data = new Data(new byte[] {0x4, (byte) 0xFF});
response.onDataReceived(null, data);
assertTrue(response.isValid());
assertTrue(success);
assertEquals(255, heartRate);
assertNotNull(contactDetected);
assertFalse(contactDetected);
assertNull(energyExpanded);
assertNull(rrIntervals);
} |
@Override
public String toString()
{
final ToStringBuilder builder = new ToStringBuilder(null, ToStringStyle.SHORT_PREFIX_STYLE)
.append("baseUriTemplate", _baseUriTemplate)
.append("pathKeys", _pathKeys)
.append("id", _id)
.append("queryParams", _queryParams);
return builder.toString();
} | @Test
public void testID()
{
final GetRequest<?> getRequest = Mockito.mock(GetRequest.class);
Mockito.when(getRequest.getBaseUriTemplate()).thenReturn(BASE_URI_TEMPLATE);
Mockito.when(getRequest.getPathKeys()).thenReturn(PATH_KEYS);
Mockito.when(getRequest.getObjectId()).thenReturn(ID);
Mockito.when(getRequest.getQueryParamsObjects()).thenReturn(QUERY_PARAMS_OBJECTS);
final RestliRequestUriSignature signature = new RestliRequestUriSignature(getRequest, RestliRequestUriSignature.ALL_FIELDS);
Assert.assertTrue(signature.toString().contains(ID.toString()));
} |
@PublicEvolving
public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes(
MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) {
return getMapReturnTypes(mapInterface, inType, null, false);
} | @SuppressWarnings({"unchecked", "rawtypes"})
@Test
void testTupleSupertype() {
RichMapFunction<?, ?> function =
new RichMapFunction<String, Tuple>() {
private static final long serialVersionUID = 1L;
@Override
public Tuple map(String value) throws Exception {
return null;
}
};
TypeInformation<?> ti =
TypeExtractor.getMapReturnTypes(
function, (TypeInformation) Types.STRING, "name", true);
assertThat(ti).isInstanceOf(MissingTypeInfo.class);
assertThatThrownBy(
() ->
TypeExtractor.getMapReturnTypes(
function, (TypeInformation) Types.STRING))
.isInstanceOf(InvalidTypesException.class);
} |
public static String generateCommandKey(String interfaceId, Method method) {
StringBuilder builder = new StringBuilder(interfaceId)
.append("#")
.append(method.getName())
.append("(");
if (method.getParameterTypes().length > 0) {
for (Class<?> parameterType : method.getParameterTypes()) {
builder.append(parameterType.getSimpleName()).append(",");
}
builder.deleteCharAt(builder.length() - 1);
}
return builder.append(")").toString();
} | @Test
public void testGenerateCommandKey() {
for (Method method : TestCase.class.getMethods()) {
if (method.isAnnotationPresent(HystrixCommandKey.class)) {
HystrixCommandKey annotation = method.getAnnotation(HystrixCommandKey.class);
Assert.assertEquals(annotation.value(), DefaultSetterFactory.generateCommandKey("TestCase", method));
}
}
} |
@Override
public void createSubnet(Subnet osSubnet) {
checkNotNull(osSubnet, ERR_NULL_SUBNET);
checkArgument(!Strings.isNullOrEmpty(osSubnet.getId()), ERR_NULL_SUBNET_ID);
checkArgument(!Strings.isNullOrEmpty(osSubnet.getNetworkId()), ERR_NULL_SUBNET_NET_ID);
checkArgument(!Strings.isNullOrEmpty(osSubnet.getCidr()), ERR_NULL_SUBNET_CIDR);
osNetworkStore.createSubnet(osSubnet);
log.info(String.format(MSG_SUBNET, osSubnet.getCidr(), MSG_CREATED));
} | @Test(expected = IllegalArgumentException.class)
public void testCreateSubnetWithNullId() {
final Subnet testSubnet = NeutronSubnet.builder()
.networkId(NETWORK_ID)
.cidr("192.168.0.0/24")
.build();
target.createSubnet(testSubnet);
} |
@Override
public TransferAction action(final Session<?> source, final Session<?> destination, final boolean resumeRequested, final boolean reloadRequested,
final TransferPrompt prompt, final ListProgressListener listener) {
if(log.isDebugEnabled()) {
log.debug(String.format("Find transfer action with prompt %s", prompt));
}
if(resumeRequested) {
if(action.equals(TransferAction.callback)) {
return action = prompt.prompt(item);
}
return action;
}
// Prompt for synchronization.
return action = prompt.prompt(item);
} | @Test
public void testAction() throws Exception {
final Path p = new Path("t", EnumSet.of(Path.Type.directory));
Transfer t = new SyncTransfer(new Host(new TestProtocol()), new TransferItem(p, new NullLocal("p", "t") {
@Override
public boolean exists() {
return true;
}
@Override
public AttributedList<Local> list() {
return new AttributedList<>(Collections.singletonList(new NullLocal("p", "a")));
}
}));
final AtomicBoolean prompt = new AtomicBoolean();
final NullSession session = new NullSession(new Host(new TestProtocol()));
assertNull(t.action(session, null, false, false, new DisabledTransferPrompt() {
@Override
public TransferAction prompt(final TransferItem file) {
prompt.set(true);
return null;
}
}, new DisabledListProgressListener()));
assertTrue(prompt.get());
} |
@Override
public KeyValueIterator<K, V> range(final K from,
final K to) {
return new KeyValueIteratorFacade<>(inner.range(from, to));
} | @Test
public void shouldReturnPlainKeyValuePairsForRangeIterator() {
when(mockedKeyValueTimestampIterator.next())
.thenReturn(KeyValue.pair("key1", ValueAndTimestamp.make("value1", 21L)))
.thenReturn(KeyValue.pair("key2", ValueAndTimestamp.make("value2", 42L)));
when(mockedKeyValueTimestampStore.range("key1", "key2")).thenReturn(mockedKeyValueTimestampIterator);
final KeyValueIterator<String, String> iterator = readOnlyKeyValueStoreFacade.range("key1", "key2");
assertThat(iterator.next(), is(KeyValue.pair("key1", "value1")));
assertThat(iterator.next(), is(KeyValue.pair("key2", "value2")));
} |
@Override
public Path move(final Path source, final Path target, final TransferStatus status, final Delete.Callback callback,
final ConnectionCallback connectionCallback) throws BackgroundException {
if(containerService.isContainer(source)) {
if(new SimplePathPredicate(source.getParent()).test(target.getParent())) {
// Rename only
return proxy.move(source, target, status, callback, connectionCallback);
}
}
if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(source) ^ new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(target))) {
// Moving into or from an encrypted room
final Copy copy = new SDSDelegatingCopyFeature(session, nodeid, new SDSCopyFeature(session, nodeid));
if(log.isDebugEnabled()) {
log.debug(String.format("Move %s to %s using copy feature %s", source, target, copy));
}
final Path c = copy.copy(source, target, status, connectionCallback, new DisabledStreamListener());
// Delete source file after copy is complete
final Delete delete = new SDSDeleteFeature(session, nodeid);
if(delete.isSupported(source)) {
log.warn(String.format("Delete source %s copied to %s", source, target));
delete.delete(Collections.singletonMap(source, status), connectionCallback, callback);
}
return c;
}
else {
return proxy.move(source, target, status, callback, connectionCallback);
}
} | @Test(expected = InteroperabilityException.class)
public void testMoveDirectoryToRoot() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path test = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final Path target = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory));
final SDSMoveFeature move = new SDSMoveFeature(session, nodeid);
assertFalse(move.isSupported(test, target));
move.move(test, target, new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback());
assertEquals(0, session.getMetrics().get(Copy.class));
assertFalse(new SDSFindFeature(session, nodeid).find(test));
assertTrue(new SDSFindFeature(session, nodeid).find(target));
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public void validateQuery(
final SessionConfig config,
final ExecutionPlan executionPlan,
final Collection<QueryMetadata> runningQueries
) {
validateCacheBytesUsage(
runningQueries.stream()
.filter(q -> q instanceof PersistentQueryMetadata)
.collect(Collectors.toList()),
config,
config.getConfig(false)
.getLong(KsqlConfig.KSQL_TOTAL_CACHE_MAX_BYTES_BUFFERING)
);
} | @Test
public void shouldNotThrowIfUnderLimit() {
// Given:
final SessionConfig config = configWithLimits(5, OptionalLong.of(60));
// When/Then (no throw)
queryValidator.validateQuery(config, plan, queries);
} |
public static Path getConfigHome() {
return getConfigHome(System.getProperties(), System.getenv());
} | @Test
public void testGetConfigHome_linux() {
Properties fakeProperties = new Properties();
fakeProperties.setProperty("user.home", fakeConfigHome);
fakeProperties.setProperty("os.name", "os is LiNuX");
Assert.assertEquals(
Paths.get(fakeConfigHome, ".config").resolve("google-cloud-tools-java").resolve("jib"),
XdgDirectories.getConfigHome(fakeProperties, Collections.emptyMap()));
} |
@Override
public boolean resolve(final Path file) {
if(PreferencesFactory.get().getBoolean("path.symboliclink.resolve")) {
// Follow links instead
return false;
}
// Create symbolic link only if supported by the local file system
if(feature != null) {
final Path target = file.getSymlinkTarget();
// Only create symbolic link if target is included in the download
for(TransferItem root : files) {
if(this.findTarget(target, root.remote)) {
if(log.isDebugEnabled()) {
log.debug(String.format("Resolved target %s for %s", target, file));
}
// Create symbolic link
return true;
}
}
}
// Otherwise download target file
return false;
} | @Test
public void testNotSupported() {
DownloadSymlinkResolver resolver = new DownloadSymlinkResolver(null, Collections.singletonList(
new TransferItem(new Path("/", EnumSet.of(Path.Type.directory)), new Local(System.getProperty("java.io.tmpdir")))
));
Path p = new Path("/a", EnumSet.of(Path.Type.file, AbstractPath.Type.symboliclink));
assertFalse(resolver.resolve(p));
} |
@Override
public String exec(Tuple t) throws IOException {
return SummaryData.toPrettyJSON(sumUp(getInputSchema(), t));
} | @Test
public void testEvalFunc() throws IOException {
Summary summary = new Summary();
String result = summary.exec(t(TEST_BAG));
validate(result, 1);
} |
public static long getNextScheduledTime(final String cronEntry, long currentTime) throws MessageFormatException {
long result = 0;
if (cronEntry == null || cronEntry.length() == 0) {
return result;
}
// Handle the once per minute case "* * * * *"
// starting the next event at the top of the minute.
if (cronEntry.equals("* * * * *")) {
result = currentTime + 60 * 1000;
result = result / 60000 * 60000;
return result;
}
List<String> list = tokenize(cronEntry);
List<CronEntry> entries = buildCronEntries(list);
Calendar working = Calendar.getInstance();
working.setTimeInMillis(currentTime);
working.set(Calendar.SECOND, 0);
CronEntry minutes = entries.get(MINUTES);
CronEntry hours = entries.get(HOURS);
CronEntry dayOfMonth = entries.get(DAY_OF_MONTH);
CronEntry month = entries.get(MONTH);
CronEntry dayOfWeek = entries.get(DAY_OF_WEEK);
// Start at the top of the next minute, cron is only guaranteed to be
// run on the minute.
int timeToNextMinute = 60 - working.get(Calendar.SECOND);
working.add(Calendar.SECOND, timeToNextMinute);
// If its already to late in the day this will roll us over to tomorrow
// so we'll need to check again when done updating month and day.
int currentMinutes = working.get(Calendar.MINUTE);
if (!isCurrent(minutes, currentMinutes)) {
int nextMinutes = getNext(minutes, currentMinutes, working);
working.add(Calendar.MINUTE, nextMinutes);
}
int currentHours = working.get(Calendar.HOUR_OF_DAY);
if (!isCurrent(hours, currentHours)) {
int nextHour = getNext(hours, currentHours, working);
working.add(Calendar.HOUR_OF_DAY, nextHour);
}
// We can roll into the next month here which might violate the cron setting
// rules so we check once then recheck again after applying the month settings.
doUpdateCurrentDay(working, dayOfMonth, dayOfWeek);
// Start by checking if we are in the right month, if not then calculations
// need to start from the beginning of the month to ensure that we don't end
// up on the wrong day. (Can happen when DAY_OF_WEEK is set and current time
// is ahead of the day of the week to execute on).
doUpdateCurrentMonth(working, month);
// Now Check day of week and day of month together since they can be specified
// together in one entry, if both "day of month" and "day of week" are restricted
// (not "*"), then either the "day of month" field (3) or the "day of week" field
// (5) must match the current day or the Calenday must be advanced.
doUpdateCurrentDay(working, dayOfMonth, dayOfWeek);
// Now we can chose the correct hour and minute of the day in question.
currentHours = working.get(Calendar.HOUR_OF_DAY);
if (!isCurrent(hours, currentHours)) {
int nextHour = getNext(hours, currentHours, working);
working.add(Calendar.HOUR_OF_DAY, nextHour);
}
currentMinutes = working.get(Calendar.MINUTE);
if (!isCurrent(minutes, currentMinutes)) {
int nextMinutes = getNext(minutes, currentMinutes, working);
working.add(Calendar.MINUTE, nextMinutes);
}
result = working.getTimeInMillis();
if (result <= currentTime) {
throw new ArithmeticException("Unable to compute next scheduled exection time.");
}
return result;
} | @Test
public void testgetNextTimeDayOfWeekVariant() throws MessageFormatException {
// using an absolute date so that result will be absolute - Monday 7 March 2011
Calendar current = Calendar.getInstance();
current.set(2011, Calendar.MARCH, 7, 9, 15, 30);
LOG.debug("start:" + current.getTime());
String test = "50 20 * * 5";
long next = CronParser.getNextScheduledTime(test, current.getTimeInMillis());
Calendar result = Calendar.getInstance();
result.setTimeInMillis(next);
LOG.debug("next:" + result.getTime());
assertEquals(0,result.get(Calendar.SECOND));
assertEquals(50,result.get(Calendar.MINUTE));
assertEquals(20,result.get(Calendar.HOUR_OF_DAY));
// expecting Friday 11th
assertEquals(11,result.get(Calendar.DAY_OF_MONTH));
assertEquals(Calendar.FRIDAY,result.get(Calendar.DAY_OF_WEEK));
assertEquals(Calendar.MARCH,result.get(Calendar.MONTH));
assertEquals(2011,result.get(Calendar.YEAR));
// Match to the day of week, but to late to run, should just a week forward.
current = Calendar.getInstance();
current.set(2011, Calendar.MARCH, 11, 22, 0, 30);
LOG.debug("update:" + current.getTime());
next = CronParser.getNextScheduledTime(test, current.getTimeInMillis());
result = Calendar.getInstance();
result.setTimeInMillis(next);
LOG.debug("next:" + result.getTime());
//assertEquals(0,result.get(Calendar.SECOND));
assertEquals(50,result.get(Calendar.MINUTE));
assertEquals(20,result.get(Calendar.HOUR_OF_DAY));
// expecting Friday 18th
assertEquals(18,result.get(Calendar.DAY_OF_MONTH));
assertEquals(Calendar.FRIDAY,result.get(Calendar.DAY_OF_WEEK));
assertEquals(Calendar.MARCH,result.get(Calendar.MONTH));
assertEquals(2011,result.get(Calendar.YEAR));
} |
public void setSortOrder(@Nullable SortOrder sortOrder) {
if (sortOrder != null && sortOrder.scope != SortOrder.Scope.INTRA_FEED) {
throw new IllegalArgumentException("The specified sortOrder " + sortOrder
+ " is invalid. Only those with INTRA_FEED scope are allowed.");
}
this.sortOrder = sortOrder;
} | @Test
public void testSetSortOrder_OnlyIntraFeedSortAllowed() {
for (SortOrder sortOrder : SortOrder.values()) {
if (sortOrder.scope == SortOrder.Scope.INTRA_FEED) {
original.setSortOrder(sortOrder); // should be okay
} else {
assertThrows(IllegalArgumentException.class, () -> original.setSortOrder(sortOrder));
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.