focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public void validateToken(String token) throws AccessException {
getExecuteTokenManager().validateToken(token);
} | @Test
void testValidateToken() throws AccessException {
tokenManagerDelegate.validateToken("token");
} |
public Set<Long> findCmdIds(List<Status> statusList) throws JobDoesNotExistException {
Set<Long> set = new HashSet<>();
for (Map.Entry<Long, CmdInfo> x : mInfoMap.entrySet()) {
if (statusList.isEmpty()
|| statusList.contains(getCmdStatus(
x.getValue().getJobControlId()))) {
Long key = x.getKey();
set.add(key);
}
}
return set;
} | @Test
public void testFindCmdIdsForCancel() throws Exception {
long cancelId = generateMigrateCommandForStatus(Status.CANCELED);
mSearchingCriteria.add(Status.CANCELED);
Set<Long> cancelCmdIds = mCmdJobTracker.findCmdIds(mSearchingCriteria);
Assert.assertEquals(cancelCmdIds.size(), 1);
Assert.assertTrue(cancelCmdIds.contains(cancelId));
} |
void refreshTopicPartitions()
throws InterruptedException, ExecutionException {
List<TopicPartition> sourceTopicPartitions = findSourceTopicPartitions();
List<TopicPartition> targetTopicPartitions = findTargetTopicPartitions();
Set<TopicPartition> sourceTopicPartitionsSet = new HashSet<>(sourceTopicPartitions);
Set<TopicPartition> knownSourceTopicPartitionsSet = new HashSet<>(knownSourceTopicPartitions);
Set<TopicPartition> upstreamTargetTopicPartitions = targetTopicPartitions.stream()
.map(x -> new TopicPartition(replicationPolicy.upstreamTopic(x.topic()), x.partition()))
.collect(Collectors.toSet());
Set<TopicPartition> missingInTarget = new HashSet<>(sourceTopicPartitions);
missingInTarget.removeAll(upstreamTargetTopicPartitions);
knownTargetTopicPartitions = targetTopicPartitions;
// Detect if topic-partitions were added or deleted from the source cluster
// or if topic-partitions are missing from the target cluster
if (!knownSourceTopicPartitionsSet.equals(sourceTopicPartitionsSet) || !missingInTarget.isEmpty()) {
Set<TopicPartition> newTopicPartitions = new HashSet<>(sourceTopicPartitions);
newTopicPartitions.removeAll(knownSourceTopicPartitionsSet);
Set<TopicPartition> deletedTopicPartitions = knownSourceTopicPartitionsSet;
deletedTopicPartitions.removeAll(sourceTopicPartitionsSet);
log.info("Found {} new topic-partitions on {}. " +
"Found {} deleted topic-partitions on {}. " +
"Found {} topic-partitions missing on {}.",
newTopicPartitions.size(), sourceAndTarget.source(),
deletedTopicPartitions.size(), sourceAndTarget.source(),
missingInTarget.size(), sourceAndTarget.target());
log.trace("Found new topic-partitions on {}: {}", sourceAndTarget.source(), newTopicPartitions);
log.trace("Found deleted topic-partitions on {}: {}", sourceAndTarget.source(), deletedTopicPartitions);
log.trace("Found missing topic-partitions on {}: {}", sourceAndTarget.target(), missingInTarget);
knownSourceTopicPartitions = sourceTopicPartitions;
computeAndCreateTopicPartitions();
context.requestTaskReconfiguration();
}
} | @Test
public void testRefreshTopicPartitions() throws Exception {
MirrorSourceConnector connector = new MirrorSourceConnector(new SourceAndTarget("source", "target"),
new DefaultReplicationPolicy(), new DefaultTopicFilter(), new DefaultConfigPropertyFilter());
connector.initialize(mock(ConnectorContext.class));
connector = spy(connector);
Config topicConfig = new Config(Arrays.asList(
new ConfigEntry("cleanup.policy", "compact"),
new ConfigEntry("segment.bytes", "100")));
Map<String, Config> configs = Collections.singletonMap("topic", topicConfig);
List<TopicPartition> sourceTopicPartitions = Collections.singletonList(new TopicPartition("topic", 0));
doReturn(sourceTopicPartitions).when(connector).findSourceTopicPartitions();
doReturn(Collections.emptyList()).when(connector).findTargetTopicPartitions();
doReturn(configs).when(connector).describeTopicConfigs(Collections.singleton("topic"));
doNothing().when(connector).createNewTopics(any());
connector.refreshTopicPartitions();
// if target topic is not created, refreshTopicPartitions() will call createTopicPartitions() again
connector.refreshTopicPartitions();
Map<String, Long> expectedPartitionCounts = new HashMap<>();
expectedPartitionCounts.put("source.topic", 1L);
Map<String, String> configMap = MirrorSourceConnector.configToMap(topicConfig);
assertEquals(2, configMap.size(), "configMap has incorrect size");
Map<String, NewTopic> expectedNewTopics = new HashMap<>();
expectedNewTopics.put("source.topic", new NewTopic("source.topic", 1, (short) 0).configs(configMap));
verify(connector, times(2)).computeAndCreateTopicPartitions();
verify(connector, times(2)).createNewTopics(eq(expectedNewTopics));
verify(connector, times(0)).createNewPartitions(any());
List<TopicPartition> targetTopicPartitions = Collections.singletonList(new TopicPartition("source.topic", 0));
doReturn(targetTopicPartitions).when(connector).findTargetTopicPartitions();
connector.refreshTopicPartitions();
// once target topic is created, refreshTopicPartitions() will NOT call computeAndCreateTopicPartitions() again
verify(connector, times(2)).computeAndCreateTopicPartitions();
} |
public MutableTree<K> beginWrite() {
return new MutableTree<>(this);
} | @Test
public void reverseIterationTest() {
Random random = new Random(5743);
Persistent23Tree.MutableTree<Integer> tree = new Persistent23Tree<Integer>().beginWrite();
int[] p = genPermutation(random);
TreeSet<Integer> added = new TreeSet<>();
for (int i = 0; i < ENTRIES_TO_ADD; i++) {
int size = tree.size();
Assert.assertEquals(i, size);
if ((size & 1023) == 0 || size < 100) {
Iterator<Integer> iterator = added.descendingIterator();
for (Iterator<Integer> treeItr = tree.reverseIterator();
treeItr.hasNext(); ) {
Assert.assertTrue(iterator.hasNext());
Integer key = treeItr.next();
Assert.assertEquals(iterator.next(), key);
}
Assert.assertFalse(iterator.hasNext());
iterator = added.descendingIterator();
Iterator<Integer> treeItr = tree.reverseIterator();
for (int j = 0; j < size; j++) {
Integer key = treeItr.next();
Assert.assertTrue(iterator.hasNext());
Assert.assertEquals(iterator.next(), key);
}
Assert.assertFalse(iterator.hasNext());
try {
treeItr.next();
Assert.fail();
} catch (NoSuchElementException e) {
}
Assert.assertFalse(treeItr.hasNext());
}
tree.add(p[i]);
added.add(p[i]);
}
} |
@Override
public void failover(NamedNode master) {
connection.sync(RedisCommands.SENTINEL_FAILOVER, master.getName());
} | @Test
public void testFailover() throws InterruptedException {
Collection<RedisServer> masters = connection.masters();
connection.failover(masters.iterator().next());
Thread.sleep(10000);
RedisServer newMaster = connection.masters().iterator().next();
assertThat(masters.iterator().next().getPort()).isNotEqualTo(newMaster.getPort());
} |
public static ParamType getSchemaFromType(final Type type) {
return getSchemaFromType(type, JAVA_TO_ARG_TYPE);
} | @Test
public void shouldGetPartialGenericFunction() throws NoSuchMethodException {
// Given:
final Type genericType = getClass().getMethod("partialGenericFunctionType").getGenericReturnType();
// When:
final ParamType returnType = UdfUtil.getSchemaFromType(genericType);
// Then:
assertThat(returnType, is(LambdaType.of(ImmutableList.of(ParamTypes.LONG), GenericType.of("U"))));
} |
private RemotingCommand getConsumerConnectionList(ChannelHandlerContext ctx,
RemotingCommand request) throws RemotingCommandException {
final RemotingCommand response = RemotingCommand.createResponseCommand(null);
final GetConsumerConnectionListRequestHeader requestHeader =
(GetConsumerConnectionListRequestHeader) request.decodeCommandCustomHeader(GetConsumerConnectionListRequestHeader.class);
ConsumerGroupInfo consumerGroupInfo =
this.brokerController.getConsumerManager().getConsumerGroupInfo(requestHeader.getConsumerGroup());
if (consumerGroupInfo != null) {
ConsumerConnection bodydata = new ConsumerConnection();
bodydata.setConsumeFromWhere(consumerGroupInfo.getConsumeFromWhere());
bodydata.setConsumeType(consumerGroupInfo.getConsumeType());
bodydata.setMessageModel(consumerGroupInfo.getMessageModel());
bodydata.getSubscriptionTable().putAll(consumerGroupInfo.getSubscriptionTable());
Iterator<Map.Entry<Channel, ClientChannelInfo>> it = consumerGroupInfo.getChannelInfoTable().entrySet().iterator();
while (it.hasNext()) {
ClientChannelInfo info = it.next().getValue();
Connection connection = new Connection();
connection.setClientId(info.getClientId());
connection.setLanguage(info.getLanguage());
connection.setVersion(info.getVersion());
connection.setClientAddr(RemotingHelper.parseChannelRemoteAddr(info.getChannel()));
bodydata.getConnectionSet().add(connection);
}
byte[] body = bodydata.encode();
response.setBody(body);
response.setCode(ResponseCode.SUCCESS);
response.setRemark(null);
return response;
}
response.setCode(ResponseCode.CONSUMER_NOT_ONLINE);
response.setRemark("the consumer group[" + requestHeader.getConsumerGroup() + "] not online");
return response;
} | @Test
public void testGetConsumerConnectionList() throws RemotingCommandException {
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_CONSUMER_CONNECTION_LIST, null);
request.addExtField("consumerGroup", "GID-group-test");
consumerManager = mock(ConsumerManager.class);
when(brokerController.getConsumerManager()).thenReturn(consumerManager);
ConsumerGroupInfo consumerGroupInfo = new ConsumerGroupInfo("GID-group-test", ConsumeType.CONSUME_ACTIVELY, MessageModel.CLUSTERING, ConsumeFromWhere.CONSUME_FROM_FIRST_OFFSET);
when(consumerManager.getConsumerGroupInfo(anyString())).thenReturn(consumerGroupInfo);
RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
} |
public Value parse(String json) {
return this.delegate.parse(json);
} | @Test
public void testParseJson() throws Exception {
final JsonParser parser = new JsonParser();
final Value msgpackValue = parser.parse("{\"col1\": 1, \"col2\": \"foo\", \"col3\": [1,2,3], \"col4\": {\"a\": 1}}");
assertTrue(msgpackValue.isMapValue());
final Map<Value, Value> map = msgpackValue.asMapValue().map();
assertEquals(1, map.get(key("col1")).asIntegerValue().asInt());
assertEquals("foo", map.get(key("col2")).asStringValue().toString());
// Check array value
final Value col3Value = map.get(key("col3"));
assertTrue(col3Value.isArrayValue());
assertEquals(
Arrays.asList(1,2,3),
col3Value.asArrayValue().list().stream()
.map(v -> v.asIntegerValue().asInt())
.collect(Collectors.toList())
);
// Check map value
final Value col4Value = map.get(key("col4"));
assertTrue(col4Value.isMapValue());
final Value aOfCol4 = col4Value.asMapValue().map().get(key("a"));
assertEquals(1, aOfCol4.asIntegerValue().asInt());
} |
@Override
public EdgeExplorer createEdgeExplorer(final EdgeFilter edgeFilter) {
// re-use these objects between setBaseNode calls to prevent GC
final EdgeExplorer mainExplorer = baseGraph.createEdgeExplorer(edgeFilter);
final VirtualEdgeIterator virtualEdgeIterator = new VirtualEdgeIterator(edgeFilter, null);
return new EdgeExplorer() {
@Override
public EdgeIterator setBaseNode(int baseNode) {
if (isVirtualNode(baseNode)) {
List<EdgeIteratorState> virtualEdges = virtualEdgesAtVirtualNodes.get(baseNode - baseNodes);
return virtualEdgeIterator.reset(virtualEdges);
} else {
List<EdgeIteratorState> virtualEdges = virtualEdgesAtRealNodes.get(baseNode);
if (virtualEdges == null) {
return mainExplorer.setBaseNode(baseNode);
} else {
return virtualEdgeIterator.reset(virtualEdges);
}
}
}
};
} | @Test
public void testVirtEdges() {
initGraph(g);
EdgeIterator iter = g.createEdgeExplorer().setBaseNode(0);
iter.next();
List<EdgeIteratorState> vEdges = Collections.singletonList(iter.detach(false));
VirtualEdgeIterator vi = new VirtualEdgeIterator(EdgeFilter.ALL_EDGES, vEdges);
assertTrue(vi.next());
} |
@SuppressWarnings("unchecked")
@Override
public boolean canHandleReturnType(Class returnType) {
return rxSupportedTypes.stream()
.anyMatch(classType -> classType.isAssignableFrom(returnType));
} | @Test
public void testCheckTypes() {
assertThat(rxJava3RetryAspectExt.canHandleReturnType(Flowable.class)).isTrue();
assertThat(rxJava3RetryAspectExt.canHandleReturnType(Single.class)).isTrue();
} |
@Override
public void decryptKey(EncryptionKey key) {
assert plainKey != null;
if (!(key instanceof NormalKey)) {
throw new IllegalArgumentException("NormalKey cannot not decrypt " + key.getClass().getName());
}
NormalKey normalKey = (NormalKey) key;
normalKey.setPlainKey(unwrapKey(algorithm, normalKey.getEncryptedKey()));
} | @Test
public void testDecryptKey() {
NormalKey newKey = (NormalKey) normalKey.generateKey();
byte[] plainKey = newKey.getPlainKey();
newKey.setPlainKey(null);
normalKey.decryptKey(newKey);
assertArrayEquals(newKey.getPlainKey(), plainKey);
} |
public static ShowResultSet execute(ShowStmt statement, ConnectContext context) {
return GlobalStateMgr.getCurrentState().getShowExecutor().showExecutorVisitor.visit(statement, context);
} | @Test(expected = SemanticException.class)
public void testShowCreateTableEmptyDb() throws SemanticException, DdlException {
ShowCreateTableStmt stmt = new ShowCreateTableStmt(new TableName("emptyDb", "testTable"),
ShowCreateTableStmt.CreateTableType.TABLE);
ShowResultSet resultSet = ShowExecutor.execute(stmt, ctx);
Assert.fail("No Exception throws.");
} |
@Override
public KStream<K, V> peek(final ForeachAction<? super K, ? super V> action) {
return peek(action, NamedInternal.empty());
} | @Test
public void shouldNotAllowNullActionOnPeek() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.peek(null));
assertThat(exception.getMessage(), equalTo("action can't be null"));
} |
@VisibleForTesting
void validateDeptExists(Long id) {
if (id == null) {
return;
}
DeptDO dept = deptMapper.selectById(id);
if (dept == null) {
throw exception(DEPT_NOT_FOUND);
}
} | @Test
public void testValidateDeptExists_notFound() {
// 准备参数
Long id = randomLongId();
// 调用, 并断言异常
assertServiceException(() -> deptService.validateDeptExists(id), DEPT_NOT_FOUND);
} |
public static RLESparseResourceAllocation merge(ResourceCalculator resCalc,
Resource clusterResource, RLESparseResourceAllocation a,
RLESparseResourceAllocation b, RLEOperator operator, long start, long end)
throws PlanningException {
NavigableMap<Long, Resource> cumA =
a.getRangeOverlapping(start, end).getCumulative();
NavigableMap<Long, Resource> cumB =
b.getRangeOverlapping(start, end).getCumulative();
NavigableMap<Long, Resource> out =
merge(resCalc, clusterResource, cumA, cumB, start, end, operator);
return new RLESparseResourceAllocation(out, resCalc);
} | @Test
public void testMergeAdd() throws PlanningException {
TreeMap<Long, Resource> a = new TreeMap<>();
TreeMap<Long, Resource> b = new TreeMap<>();
setupArrays(a, b);
RLESparseResourceAllocation rleA =
new RLESparseResourceAllocation(a, new DefaultResourceCalculator());
RLESparseResourceAllocation rleB =
new RLESparseResourceAllocation(b, new DefaultResourceCalculator());
RLESparseResourceAllocation out =
RLESparseResourceAllocation.merge(new DefaultResourceCalculator(),
Resource.newInstance(100 * 128 * 1024, 100 * 32), rleA, rleB,
RLEOperator.add, 18, 45);
System.out.println(out);
long[] time = { 18, 20, 22, 30, 33, 40, 43, 45 };
int[] alloc = { 10, 15, 20, 25, 30, 40, 30 };
validate(out, time, alloc);
} |
public static AbstractProtocolNegotiatorBuilderSingleton getSingleton() {
return SINGLETON;
} | @Test
void testSingletonInstance() {
AbstractProtocolNegotiatorBuilderSingleton singleton1 = ClusterProtocolNegotiatorBuilderSingleton.getSingleton();
AbstractProtocolNegotiatorBuilderSingleton singleton2 = ClusterProtocolNegotiatorBuilderSingleton.getSingleton();
assertSame(singleton1, singleton2);
} |
public Collection<Stream> getByName(String name) {
return nameToStream.get(name);
} | @Test
public void getByName() {
// make sure getByName always returns a collection
final Collection<Stream> streams = cacheService.getByName("nonexisting");
assertThat(streams).isNotNull().isEmpty();
} |
@Override
public Addresses loadAddresses(ClientConnectionProcessListenerRegistry listenerRunner) throws Exception {
response = discovery.discoverNodes();
List<Address> addresses = response.getPrivateMemberAddresses();
listenerRunner.onPossibleAddressesCollected(addresses);
return new Addresses(addresses);
} | @Test(expected = IllegalStateException.class)
public void testLoadAddresses_whenExceptionIsThrown() throws Exception {
ViridianAddressProvider provider = new ViridianAddressProvider(createBrokenDiscovery());
provider.loadAddresses(createListenerRunner());
} |
public String getTableName() {
return tableName;
} | @Test
public void getTableNameOutputNull() {
// Arrange
final DdlResult objectUnderTest = new DdlResult();
// Act
final String actual = objectUnderTest.getTableName();
// Assert result
Assert.assertNull(actual);
} |
@ExceptionHandler(MethodArgumentNotValidException.class)
protected ShenyuAdminResult handleMethodArgumentNotValidException(final MethodArgumentNotValidException e) {
LOG.warn("method argument not valid", e);
BindingResult bindingResult = e.getBindingResult();
String errorMsg = bindingResult.getFieldErrors().stream()
.map(f -> f.getField().concat(": ").concat(Optional.ofNullable(f.getDefaultMessage()).orElse("")))
.collect(Collectors.joining("| "));
return ShenyuAdminResult.error(String.format("Request error! invalid argument [%s]", errorMsg));
} | @Test
public void testHandleMethodArgumentNotValidException() throws InstantiationException, IllegalAccessException, NoSuchMethodException {
BindingResult bindingResult = spy(new DirectFieldBindingResult("test", "TestClass"));
MethodParameter methodParameter = spy(new SynthesizingMethodParameter(this.getClass().getMethod("setUp"), -1));
MethodArgumentNotValidException exception = spy(new MethodArgumentNotValidException(methodParameter, bindingResult));
when(exception.getBindingResult()).thenReturn(bindingResult);
List<FieldError> fieldErrors = spy(Collections.emptyList());
when(bindingResult.getFieldErrors()).thenReturn(fieldErrors);
ShenyuAdminResult result = exceptionHandlersUnderTest.handleMethodArgumentNotValidException(exception);
Assertions.assertEquals(result.getCode().intValue(), CommonErrorCode.ERROR);
MatcherAssert.assertThat(result.getMessage(), containsString("Request error! invalid argument"));
} |
public static ParamType getVarArgsSchemaFromType(final Type type) {
return getSchemaFromType(type, VARARGS_JAVA_TO_ARG_TYPE);
} | @Test
public void shouldGetMapSchemaFromMapClassVariadic() throws NoSuchMethodException {
final Type type = getClass().getDeclaredMethod("mapType", Map.class)
.getGenericParameterTypes()[0];
final ParamType schema = UdfUtil.getVarArgsSchemaFromType(type);
assertThat(schema, instanceOf(MapType.class));
assertThat(((MapType) schema).value(), equalTo(ParamTypes.INTEGER));
} |
public NewIssuesNotification newNewIssuesNotification(Map<String, UserDto> assigneesByUuid) {
verifyAssigneesByUuid(assigneesByUuid);
return new NewIssuesNotification(new DetailsSupplierImpl(assigneesByUuid));
} | @Test
public void newNewIssuesNotification_DetailsSupplier_getComponentNameByUuid_fails_with_NPE_if_uuid_is_null() {
treeRootHolder.setRoot(ReportComponent.builder(PROJECT, 1).setUuid("rootUuid").setName("root").build());
NewIssuesNotification underTest = this.underTest.newNewIssuesNotification(emptyMap());
DetailsSupplier detailsSupplier = readDetailsSupplier(underTest);
assertThatThrownBy(() -> detailsSupplier.getComponentNameByUuid(null))
.isInstanceOf(NullPointerException.class)
.hasMessage("uuid can't be null");
} |
@Override
public void updateNotice(NoticeSaveReqVO updateReqVO) {
// 校验是否存在
validateNoticeExists(updateReqVO.getId());
// 更新通知公告
NoticeDO updateObj = BeanUtils.toBean(updateReqVO, NoticeDO.class);
noticeMapper.updateById(updateObj);
} | @Test
public void testUpdateNotice_success() {
// 插入前置数据
NoticeDO dbNoticeDO = randomPojo(NoticeDO.class);
noticeMapper.insert(dbNoticeDO);
// 准备更新参数
NoticeSaveReqVO reqVO = randomPojo(NoticeSaveReqVO.class, o -> o.setId(dbNoticeDO.getId()));
// 更新
noticeService.updateNotice(reqVO);
// 检验是否更新成功
NoticeDO notice = noticeMapper.selectById(reqVO.getId());
assertPojoEquals(reqVO, notice);
} |
@Override
public void exportData(JsonWriter writer) throws IOException {
// version tag at the root
writer.name(THIS_VERSION);
writer.beginObject();
// clients list
writer.name(CLIENTS);
writer.beginArray();
writeClients(writer);
writer.endArray();
writer.name(GRANTS);
writer.beginArray();
writeGrants(writer);
writer.endArray();
writer.name(WHITELISTEDSITES);
writer.beginArray();
writeWhitelistedSites(writer);
writer.endArray();
writer.name(BLACKLISTEDSITES);
writer.beginArray();
writeBlacklistedSites(writer);
writer.endArray();
writer.name(AUTHENTICATIONHOLDERS);
writer.beginArray();
writeAuthenticationHolders(writer);
writer.endArray();
writer.name(ACCESSTOKENS);
writer.beginArray();
writeAccessTokens(writer);
writer.endArray();
writer.name(REFRESHTOKENS);
writer.beginArray();
writeRefreshTokens(writer);
writer.endArray();
writer.name(SYSTEMSCOPES);
writer.beginArray();
writeSystemScopes(writer);
writer.endArray();
for (MITREidDataServiceExtension extension : extensions) {
if (extension.supportsVersion(THIS_VERSION)) {
extension.exportExtensionData(writer);
break;
}
}
writer.endObject(); // end mitreid-connect-1.3
} | @Test
public void testExportSystemScopes() throws IOException {
SystemScope scope1 = new SystemScope();
scope1.setId(1L);
scope1.setValue("scope1");
scope1.setDescription("Scope 1");
scope1.setRestricted(true);
scope1.setDefaultScope(false);
scope1.setIcon("glass");
SystemScope scope2 = new SystemScope();
scope2.setId(2L);
scope2.setValue("scope2");
scope2.setDescription("Scope 2");
scope2.setRestricted(false);
scope2.setDefaultScope(false);
scope2.setIcon("ball");
SystemScope scope3 = new SystemScope();
scope3.setId(3L);
scope3.setValue("scope3");
scope3.setDescription("Scope 3");
scope3.setRestricted(false);
scope3.setDefaultScope(true);
scope3.setIcon("road");
Set<SystemScope> allScopes = ImmutableSet.of(scope1, scope2, scope3);
Mockito.when(clientRepository.getAllClients()).thenReturn(new HashSet<ClientDetailsEntity>());
Mockito.when(approvedSiteRepository.getAll()).thenReturn(new HashSet<ApprovedSite>());
Mockito.when(wlSiteRepository.getAll()).thenReturn(new HashSet<WhitelistedSite>());
Mockito.when(blSiteRepository.getAll()).thenReturn(new HashSet<BlacklistedSite>());
Mockito.when(authHolderRepository.getAll()).thenReturn(new ArrayList<AuthenticationHolderEntity>());
Mockito.when(tokenRepository.getAllAccessTokens()).thenReturn(new HashSet<OAuth2AccessTokenEntity>());
Mockito.when(tokenRepository.getAllRefreshTokens()).thenReturn(new HashSet<OAuth2RefreshTokenEntity>());
Mockito.when(sysScopeRepository.getAll()).thenReturn(allScopes);
// do the data export
StringWriter stringWriter = new StringWriter();
JsonWriter writer = new JsonWriter(stringWriter);
writer.beginObject();
dataService.exportData(writer);
writer.endObject();
writer.close();
// parse the output as a JSON object for testing
JsonElement elem = new JsonParser().parse(stringWriter.toString());
JsonObject root = elem.getAsJsonObject();
// make sure the root is there
assertThat(root.has(MITREidDataService.MITREID_CONNECT_1_3), is(true));
JsonObject config = root.get(MITREidDataService.MITREID_CONNECT_1_3).getAsJsonObject();
// make sure all the root elements are there
assertThat(config.has(MITREidDataService.CLIENTS), is(true));
assertThat(config.has(MITREidDataService.GRANTS), is(true));
assertThat(config.has(MITREidDataService.WHITELISTEDSITES), is(true));
assertThat(config.has(MITREidDataService.BLACKLISTEDSITES), is(true));
assertThat(config.has(MITREidDataService.REFRESHTOKENS), is(true));
assertThat(config.has(MITREidDataService.ACCESSTOKENS), is(true));
assertThat(config.has(MITREidDataService.SYSTEMSCOPES), is(true));
assertThat(config.has(MITREidDataService.AUTHENTICATIONHOLDERS), is(true));
// make sure the root elements are all arrays
assertThat(config.get(MITREidDataService.CLIENTS).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.GRANTS).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.WHITELISTEDSITES).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.BLACKLISTEDSITES).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.REFRESHTOKENS).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.ACCESSTOKENS).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.SYSTEMSCOPES).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.AUTHENTICATIONHOLDERS).isJsonArray(), is(true));
// check our scope list (this test)
JsonArray scopes = config.get(MITREidDataService.SYSTEMSCOPES).getAsJsonArray();
assertThat(scopes.size(), is(3));
// check for both of our clients in turn
Set<SystemScope> checked = new HashSet<>();
for (JsonElement e : scopes) {
assertThat(e.isJsonObject(), is(true));
JsonObject scope = e.getAsJsonObject();
SystemScope compare = null;
if (scope.get("value").getAsString().equals(scope1.getValue())) {
compare = scope1;
} else if (scope.get("value").getAsString().equals(scope2.getValue())) {
compare = scope2;
} else if (scope.get("value").getAsString().equals(scope3.getValue())) {
compare = scope3;
}
if (compare == null) {
fail("Could not find matching scope value: " + scope.get("value").getAsString());
} else {
assertThat(scope.get("value").getAsString(), equalTo(compare.getValue()));
assertThat(scope.get("description").getAsString(), equalTo(compare.getDescription()));
assertThat(scope.get("icon").getAsString(), equalTo(compare.getIcon()));
assertThat(scope.get("restricted").getAsBoolean(), equalTo(compare.isRestricted()));
assertThat(scope.get("defaultScope").getAsBoolean(), equalTo(compare.isDefaultScope()));
checked.add(compare);
}
}
// make sure all of our clients were found
assertThat(checked.containsAll(allScopes), is(true));
} |
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
final List<Path> deleted = new ArrayList<Path>();
for(Path file : files.keySet()) {
boolean skip = false;
for(Path d : deleted) {
if(file.isChild(d)) {
skip = true;
break;
}
}
if(skip) {
continue;
}
deleted.add(file);
callback.delete(file);
try {
final IRODSFile f = session.getClient().getIRODSFileFactory().instanceIRODSFile(file.getAbsolute());
if(!f.exists()) {
throw new NotfoundException(String.format("%s doesn't exist", file.getAbsolute()));
}
if(f.isFile()) {
session.getClient().fileDeleteForce(f);
}
else if(f.isDirectory()) {
session.getClient().directoryDeleteForce(f);
}
}
catch(JargonException e) {
throw new IRODSExceptionMappingService().map("Cannot delete {0}", e, file);
}
}
} | @Test(expected = NotfoundException.class)
public void testDeleteNotFound() throws Exception {
final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new IRODSProtocol())));
final Profile profile = new ProfilePlistReader(factory).read(
this.getClass().getResourceAsStream("/iRODS (iPlant Collaborative).cyberduckprofile"));
final Host host = new Host(profile, profile.getDefaultHostname(), new Credentials(
PROPERTIES.get("irods.key"), PROPERTIES.get("irods.secret")
));
final IRODSSession session = new IRODSSession(host);
session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback());
session.login(new DisabledLoginCallback(), new DisabledCancelCallback());
final Path test = new Path(new IRODSHomeFinderService(session).find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.directory));
assertFalse(new IRODSFindFeature(session).find(test));
new IRODSDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
public boolean hasNextStage(final CaseInsensitiveString lastStageName) {
if (this.isEmpty()) {
return false;
}
return nextStage(lastStageName) != null;
} | @Test
public void shouldReturnFalseThePassInStageDoesNotExist() {
PipelineConfig pipelineConfig = new PipelineConfig(new CaseInsensitiveString("pipeline"), null, completedStage(), buildingStage());
assertThat(pipelineConfig.hasNextStage(new CaseInsensitiveString("notExist")), is(false));
} |
@Override
public FSDataOutputStream create(Path f, WriteMode overwriteMode) throws IOException {
return createOutputStream(() -> originalFs.create(f, overwriteMode));
} | @Test
void testSlowOutputStreamNotClosed() throws Exception {
final LimitedConnectionsFileSystem fs =
new LimitedConnectionsFileSystem(LocalFileSystem.getSharedInstance(), 1, 0L, 1000L);
// some competing threads
final Random rnd = new Random();
final ReaderThread[] threads = new ReaderThread[10];
for (int i = 0; i < threads.length; i++) {
File file = File.createTempFile("junit", null, tempFolder);
createRandomContents(file, rnd);
Path path = new Path(file.toURI());
threads[i] = new ReaderThread(fs, path, 1, Integer.MAX_VALUE);
}
// open the stream we test
try (FSDataOutputStream out =
fs.create(
new Path(File.createTempFile("junit", null, tempFolder).toURI()),
WriteMode.OVERWRITE)) {
// start the other threads that will try to shoot this stream down
for (ReaderThread t : threads) {
t.start();
}
// read the stream slowly.
Thread.sleep(5);
for (int bytesLeft = 50; bytesLeft > 0; bytesLeft--) {
out.write(bytesLeft);
Thread.sleep(5);
}
}
// wait for clean shutdown
for (ReaderThread t : threads) {
t.sync();
}
} |
@JsonCreator
public static Duration parse(String duration) {
final Matcher matcher = DURATION_PATTERN.matcher(duration);
if (!matcher.matches()) {
throw new IllegalArgumentException("Invalid duration: " + duration);
}
final long count = Long.parseLong(matcher.group(1));
final TimeUnit unit = SUFFIXES.get(matcher.group(2));
if (unit == null) {
throw new IllegalArgumentException("Invalid duration: " + duration + ". Wrong time unit");
}
return new Duration(count, unit);
} | @Test
void unableParseWrongDurationTimeUnit() {
assertThatIllegalArgumentException().isThrownBy(() -> Duration.parse("1gs"));
} |
public Analysis analyze(Statement statement)
{
return analyze(statement, false);
} | @Test
public void testValidJoinOnClause()
{
analyze("SELECT * FROM (VALUES (2, 2)) a(x,y) JOIN (VALUES (2, 2)) b(x,y) ON TRUE");
analyze("SELECT * FROM (VALUES (2, 2)) a(x,y) JOIN (VALUES (2, 2)) b(x,y) ON 1=1");
analyze("SELECT * FROM (VALUES (2, 2)) a(x,y) JOIN (VALUES (2, 2)) b(x,y) ON a.x=b.x AND a.y=b.y");
analyze("SELECT * FROM (VALUES (2, 2)) a(x,y) JOIN (VALUES (2, 2)) b(x,y) ON NULL");
} |
public List<String> parse(final CharSequence line) {
return this.lineParser.parse( line.toString() );
} | @Test
public void testDoubleQuotes() {
final CsvLineParser parser = new CsvLineParser();
final String s = "a,\"\"\"b\"\"\",c";
final List<String> list = parser.parse(s);
assertThat(list).hasSize(3).containsExactly("a", "\"b\"", "c");
} |
@Override
public PageData<WidgetsBundle> findSystemWidgetsBundles(WidgetsBundleFilter widgetsBundleFilter, PageLink pageLink) {
if (widgetsBundleFilter.isFullSearch()) {
return DaoUtil.toPageData(
widgetsBundleRepository
.findSystemWidgetsBundlesFullSearch(
NULL_UUID,
pageLink.getTextSearch(),
DaoUtil.toPageable(pageLink)));
} else {
return DaoUtil.toPageData(
widgetsBundleRepository
.findSystemWidgetsBundles(
NULL_UUID,
pageLink.getTextSearch(),
DaoUtil.toPageable(pageLink)));
}
} | @Test
public void testFindSystemWidgetsBundles() {
createSystemWidgetBundles(30, "WB_");
widgetsBundles = widgetsBundleDao.find(TenantId.SYS_TENANT_ID);
assertEquals(30, widgetsBundles.size());
// Get first page
PageLink pageLink = new PageLink(10, 0, "WB");
PageData<WidgetsBundle> widgetsBundles1 = widgetsBundleDao.findSystemWidgetsBundles(WidgetsBundleFilter.fromTenantId(TenantId.SYS_TENANT_ID), pageLink);
assertEquals(10, widgetsBundles1.getData().size());
// Get next page
pageLink = pageLink.nextPageLink();
PageData<WidgetsBundle> widgetsBundles2 = widgetsBundleDao.findSystemWidgetsBundles(WidgetsBundleFilter.fromTenantId(TenantId.SYS_TENANT_ID), pageLink);
assertEquals(10, widgetsBundles2.getData().size());
} |
@Description("Returns a LineString from an array of points")
@ScalarFunction("ST_LineString")
@SqlType(GEOMETRY_TYPE_NAME)
public static Slice stLineString(@SqlType("array(" + GEOMETRY_TYPE_NAME + ")") Block input)
{
CoordinateSequence coordinates = readPointCoordinates(input, "ST_LineString", true);
if (coordinates.size() < 2) {
return serialize(createJtsEmptyLineString());
}
return serialize(createJtsLineString(coordinates));
} | @Test
public void testSTLineString()
{
// General case, 2+ points
assertFunction("ST_LineString(array[ST_Point(1,2), ST_Point(3,4)])", GEOMETRY, "LINESTRING (1 2, 3 4)");
assertFunction("ST_LineString(array[ST_Point(1,2), ST_Point(3,4), ST_Point(5, 6)])", GEOMETRY, "LINESTRING (1 2, 3 4, 5 6)");
assertFunction("ST_LineString(array[ST_Point(1,2), ST_Point(3,4), ST_Point(5,6), ST_Point(7,8)])", GEOMETRY, "LINESTRING (1 2, 3 4, 5 6, 7 8)");
// Other ways of creating points
assertFunction("ST_LineString(array[ST_GeometryFromText('POINT (1 2)'), ST_GeometryFromText('POINT (3 4)')])", GEOMETRY, "LINESTRING (1 2, 3 4)");
// Duplicate consecutive points throws exception
assertInvalidFunction("ST_LineString(array[ST_Point(1, 2), ST_Point(1, 2)])", INVALID_FUNCTION_ARGUMENT, "Invalid input to ST_LineString: consecutive duplicate points at index 2");
assertFunction("ST_LineString(array[ST_Point(1, 2), ST_Point(3, 4), ST_Point(1, 2)])", GEOMETRY, "LINESTRING (1 2, 3 4, 1 2)");
// Single point
assertFunction("ST_LineString(array[ST_Point(9,10)])", GEOMETRY, "LINESTRING EMPTY");
// Zero points
assertFunction("ST_LineString(array[])", GEOMETRY, "LINESTRING EMPTY");
// Only points can be passed
assertInvalidFunction("ST_LineString(array[ST_Point(7,8), ST_GeometryFromText('LINESTRING (1 2, 3 4)')])", INVALID_FUNCTION_ARGUMENT, "Invalid input to ST_LineString: geometry is not a point: LINE_STRING at index 2");
// Nulls points are invalid
assertInvalidFunction("ST_LineString(array[NULL])", INVALID_FUNCTION_ARGUMENT, "Invalid input to ST_LineString: null at index 1");
assertInvalidFunction("ST_LineString(array[ST_Point(1,2), NULL])", INVALID_FUNCTION_ARGUMENT, "Invalid input to ST_LineString: null at index 2");
assertInvalidFunction("ST_LineString(array[ST_Point(1, 2), NULL, ST_Point(3, 4)])", INVALID_FUNCTION_ARGUMENT, "Invalid input to ST_LineString: null at index 2");
assertInvalidFunction("ST_LineString(array[ST_Point(1, 2), NULL, ST_Point(3, 4), NULL])", INVALID_FUNCTION_ARGUMENT, "Invalid input to ST_LineString: null at index 2");
// Empty points are invalid
assertInvalidFunction("ST_LineString(array[ST_GeometryFromText('POINT EMPTY')])", INVALID_FUNCTION_ARGUMENT, "Invalid input to ST_LineString: empty point at index 1");
assertInvalidFunction("ST_LineString(array[ST_Point(1,2), ST_GeometryFromText('POINT EMPTY')])", INVALID_FUNCTION_ARGUMENT, "Invalid input to ST_LineString: empty point at index 2");
assertInvalidFunction("ST_LineString(array[ST_Point(1,2), ST_GeometryFromText('POINT EMPTY'), ST_Point(3,4)])", INVALID_FUNCTION_ARGUMENT, "Invalid input to ST_LineString: empty point at index 2");
assertInvalidFunction("ST_LineString(array[ST_Point(1,2), ST_GeometryFromText('POINT EMPTY'), ST_Point(3,4), ST_GeometryFromText('POINT EMPTY')])", INVALID_FUNCTION_ARGUMENT, "Invalid input to ST_LineString: empty point at index 2");
} |
public static int readVInt(ByteData arr, long position) {
byte b = arr.get(position++);
if(b == (byte) 0x80)
throw new RuntimeException("Attempting to read null value as int");
int value = b & 0x7F;
while ((b & 0x80) != 0) {
b = arr.get(position++);
value <<= 7;
value |= (b & 0x7F);
}
return value;
} | @Test(expected = EOFException.class)
public void testReadVIntEmptyInputStream() throws IOException {
InputStream is = new ByteArrayInputStream(BYTES_EMPTY);
VarInt.readVInt(is);
} |
@Udf
public <T extends Comparable<? super T>> T arrayMax(@UdfParameter(
description = "Array of values from which to find the maximum") final List<T> input) {
if (input == null) {
return null;
}
T candidate = null;
for (T thisVal : input) {
if (thisVal != null) {
if (candidate == null) {
candidate = thisVal;
} else if (thisVal.compareTo(candidate) > 0) {
candidate = thisVal;
}
}
}
return candidate;
} | @Test
public void shouldFindStringMax() {
final List<String> input = Arrays.asList("foo", "food", "bar");
assertThat(udf.arrayMax(input), is("food"));
} |
@Override
public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo,
List<String> partNames, boolean areAllPartsFound) throws MetaException {
checkStatisticsList(colStatsWithSourceInfo);
ColumnStatisticsObj statsObj = null;
String colType;
String colName = null;
// check if all the ColumnStatisticsObjs contain stats and all the ndv are
// bitvectors
boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size();
NumDistinctValueEstimator ndvEstimator = null;
boolean areAllNDVEstimatorsMergeable = true;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
if (statsObj == null) {
colName = cso.getColName();
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType,
cso.getStatsData().getSetField());
LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName,
doAllPartitionContainStats);
}
DecimalColumnStatsDataInspector columnStatsData = decimalInspectorFromStats(cso);
// check if we can merge NDV estimators
if (columnStatsData.getNdvEstimator() == null) {
areAllNDVEstimatorsMergeable = false;
break;
} else {
NumDistinctValueEstimator estimator = columnStatsData.getNdvEstimator();
if (ndvEstimator == null) {
ndvEstimator = estimator;
} else {
if (!ndvEstimator.canMerge(estimator)) {
areAllNDVEstimatorsMergeable = false;
break;
}
}
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
LOG.debug("all of the bit vectors can merge for {} is {}", colName, areAllNDVEstimatorsMergeable);
ColumnStatisticsData columnStatisticsData = initColumnStatisticsData();
if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) {
DecimalColumnStatsDataInspector aggregateData = null;
long lowerBound = 0;
long higherBound = 0;
double densityAvgSum = 0.0;
DecimalColumnStatsMerger merger = new DecimalColumnStatsMerger();
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
DecimalColumnStatsDataInspector newData = decimalInspectorFromStats(cso);
lowerBound = Math.max(lowerBound, newData.getNumDVs());
higherBound += newData.getNumDVs();
if (newData.isSetLowValue() && newData.isSetHighValue()) {
densityAvgSum += (MetaStoreServerUtils.decimalToDouble(newData.getHighValue()) - MetaStoreServerUtils
.decimalToDouble(newData.getLowValue())) / newData.getNumDVs();
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(merger.mergeLowValue(
merger.getLowValue(aggregateData), merger.getLowValue(newData)));
aggregateData.setHighValue(merger.mergeHighValue(
merger.getHighValue(aggregateData), merger.getHighValue(newData)));
aggregateData.setNumNulls(merger.mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls()));
aggregateData.setNumDVs(merger.mergeNumDVs(aggregateData.getNumDVs(), newData.getNumDVs()));
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
// if all the ColumnStatisticsObjs contain bitvectors, we do not need to
// use uniform distribution assumption because we can merge bitvectors
// to get a good estimation.
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
} else {
long estimation;
if (useDensityFunctionForNDVEstimation && aggregateData != null
&& aggregateData.isSetLowValue() && aggregateData.isSetHighValue()) {
// We have estimation, lowerbound and higherbound. We use estimation
// if it is between lowerbound and higherbound.
double densityAvg = densityAvgSum / partNames.size();
estimation = (long) ((MetaStoreServerUtils.decimalToDouble(aggregateData.getHighValue()) - MetaStoreServerUtils
.decimalToDouble(aggregateData.getLowValue())) / densityAvg);
if (estimation < lowerBound) {
estimation = lowerBound;
} else if (estimation > higherBound) {
estimation = higherBound;
}
} else {
estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner);
}
aggregateData.setNumDVs(estimation);
}
columnStatisticsData.setDecimalStats(aggregateData);
} else {
// TODO: bail out if missing stats are over a certain threshold
// we need extrapolation
LOG.debug("start extrapolation for {}", colName);
Map<String, Integer> indexMap = new HashMap<>();
for (int index = 0; index < partNames.size(); index++) {
indexMap.put(partNames.get(index), index);
}
Map<String, Double> adjustedIndexMap = new HashMap<>();
Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>();
// while we scan the css, we also get the densityAvg, lowerbound and
// higherbound when useDensityFunctionForNDVEstimation is true.
double densityAvgSum = 0.0;
if (!areAllNDVEstimatorsMergeable) {
// if not every partition uses bitvector for ndv, we just fall back to
// the traditional extrapolation methods.
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DecimalColumnStatsData newData = cso.getStatsData().getDecimalStats();
if (useDensityFunctionForNDVEstimation && newData.isSetLowValue() && newData.isSetHighValue()) {
densityAvgSum += (MetaStoreServerUtils.decimalToDouble(newData.getHighValue()) - MetaStoreServerUtils
.decimalToDouble(newData.getLowValue())) / newData.getNumDVs();
}
adjustedIndexMap.put(partName, (double) indexMap.get(partName));
adjustedStatsMap.put(partName, cso.getStatsData());
}
} else {
// we first merge all the adjacent bitvectors that we could merge and
// derive new partition names and index.
StringBuilder pseudoPartName = new StringBuilder();
double pseudoIndexSum = 0;
int length = 0;
int curIndex = -1;
DecimalColumnStatsDataInspector aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DecimalColumnStatsDataInspector newData = decimalInspectorFromStats(cso);
// newData.isSetBitVectors() should be true for sure because we
// already checked it before.
if (indexMap.get(partName) != curIndex) {
// There is bitvector, but it is not adjacent to the previous ones.
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDecimalStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += (MetaStoreServerUtils.decimalToDouble(aggregateData.getHighValue()) - MetaStoreServerUtils
.decimalToDouble(aggregateData.getLowValue())) / aggregateData.getNumDVs();
}
// reset everything
pseudoPartName = new StringBuilder();
pseudoIndexSum = 0;
length = 0;
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
aggregateData = null;
}
curIndex = indexMap.get(partName);
pseudoPartName.append(partName);
pseudoIndexSum += curIndex;
length++;
curIndex++;
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
if (MetaStoreServerUtils.decimalToDouble(aggregateData.getLowValue()) < MetaStoreServerUtils
.decimalToDouble(newData.getLowValue())) {
aggregateData.setLowValue(aggregateData.getLowValue());
} else {
aggregateData.setLowValue(newData.getLowValue());
}
if (MetaStoreServerUtils.decimalToDouble(aggregateData.getHighValue()) > MetaStoreServerUtils
.decimalToDouble(newData.getHighValue())) {
aggregateData.setHighValue(aggregateData.getHighValue());
} else {
aggregateData.setHighValue(newData.getHighValue());
}
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDecimalStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += (MetaStoreServerUtils.decimalToDouble(aggregateData.getHighValue()) - MetaStoreServerUtils
.decimalToDouble(aggregateData.getLowValue())) / aggregateData.getNumDVs();
}
}
}
extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(),
adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size());
}
LOG.debug(
"Ndv estimation for {} is {}. # of partitions requested: {}. # of partitions found: {}",
colName, columnStatisticsData.getDecimalStats().getNumDVs(), partNames.size(),
colStatsWithSourceInfo.size());
KllHistogramEstimator mergedKllHistogramEstimator = mergeHistograms(colStatsWithSourceInfo);
if (mergedKllHistogramEstimator != null) {
columnStatisticsData.getDecimalStats().setHistogram(mergedKllHistogramEstimator.serialize());
}
statsObj.setStatsData(columnStatisticsData);
return statsObj;
} | @Test
public void testAggregateMultiStatsWhenOnlySomeAvailable() throws MetaException {
List<String> partitions = Arrays.asList("part1", "part2", "part3", "part4");
ColumnStatisticsData data1 = new ColStatsBuilder<>(Decimal.class).numNulls(1).numDVs(3)
.low(ONE).high(THREE).hll(1, 2, 3).kll(1, 2, 3).build();
ColumnStatisticsData data3 = new ColStatsBuilder<>(Decimal.class).numNulls(3).numDVs(1)
.low(SEVEN).high(SEVEN).hll(7).kll(7).build();
ColumnStatisticsData data4 = new ColStatsBuilder<>(Decimal.class).numNulls(2).numDVs(3)
.low(THREE).high(FIVE).hll(3, 4, 5).kll(3, 4, 5).build();
List<ColStatsObjWithSourceInfo> statsList = Arrays.asList(
createStatsWithInfo(data1, TABLE, COL, partitions.get(0)),
createStatsWithInfo(data3, TABLE, COL, partitions.get(2)),
createStatsWithInfo(data4, TABLE, COL, partitions.get(3)));
DecimalColumnStatsAggregator aggregator = new DecimalColumnStatsAggregator();
ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, false);
// hll in case of missing stats is left as null, only numDVs is updated
ColumnStatisticsData expectedStats = new ColStatsBuilder<>(Decimal.class).numNulls(8).numDVs(4)
.low(ONE).high(DecimalUtils.createThriftDecimal("9.4")).kll(1, 2, 3, 7, 3, 4, 5).build();
assertEqualStatistics(expectedStats, computedStatsObj.getStatsData());
} |
static int internalEncodeLogHeader(
final MutableDirectBuffer encodingBuffer,
final int offset,
final int captureLength,
final int length,
final NanoClock nanoClock)
{
if (captureLength < 0 || captureLength > length || captureLength > MAX_CAPTURE_LENGTH)
{
throw new IllegalArgumentException("invalid input: captureLength=" + captureLength + ", length=" + length);
}
int encodedLength = 0;
/*
* Stream of values:
* - capture buffer length (int)
* - total buffer length (int)
* - timestamp (long)
* - buffer (until end)
*/
encodingBuffer.putInt(offset + encodedLength, captureLength, LITTLE_ENDIAN);
encodedLength += SIZE_OF_INT;
encodingBuffer.putInt(offset + encodedLength, length, LITTLE_ENDIAN);
encodedLength += SIZE_OF_INT;
encodingBuffer.putLong(offset + encodedLength, nanoClock.nanoTime(), LITTLE_ENDIAN);
encodedLength += SIZE_OF_LONG;
return encodedLength;
} | @Test
void encodeLogHeaderThrowsIllegalArgumentExceptionIfCaptureLengthIsGreaterThanLength()
{
assertThrows(IllegalArgumentException.class,
() -> internalEncodeLogHeader(buffer, 0, 100, 80, () -> 0));
} |
@Operation(summary = "Get single service")
@GetMapping(value = "{id}", produces = "application/json")
@ResponseBody
public Service getById(@PathVariable("id") Long id) {
return serviceService.getServiceById(id);
} | @Test
public void getServiceById() {
Service service = new Service();
service.setName("test");
when(serviceServiceMock.getServiceById(anyLong())).thenReturn(service);
Service result = controller.getById(1L);
assertEquals("test", result.getName());
verify(serviceServiceMock, times(1)).getServiceById(anyLong());
} |
@ConstantFunction(name = "concat_ws", argTypes = {VARCHAR, VARCHAR}, returnType = VARCHAR)
public static ConstantOperator concat_ws(ConstantOperator split, ConstantOperator... values) {
Preconditions.checkArgument(values.length > 0);
if (split.isNull()) {
return ConstantOperator.createNull(Type.VARCHAR);
}
final StringBuilder resultBuilder = new StringBuilder();
for (int i = 0; i < values.length - 1; i++) {
if (values[i].isNull()) {
continue;
}
resultBuilder.append(values[i].getVarchar()).append(split.getVarchar());
}
resultBuilder.append(values[values.length - 1].getVarchar());
return ConstantOperator.createVarchar(resultBuilder.toString());
} | @Test
public void concat_ws_with_null() {
ConstantOperator[] argWithNull = {ConstantOperator.createVarchar("star"),
ConstantOperator.createNull(Type.VARCHAR),
ConstantOperator.createVarchar("cks")};
ConstantOperator result =
ScalarOperatorFunctions.concat_ws(ConstantOperator.createVarchar("ro"), argWithNull);
assertEquals(Type.VARCHAR, result.getType());
assertEquals("starrocks", result.getVarchar());
result = ScalarOperatorFunctions.concat_ws(ConstantOperator.createVarchar(","),
ConstantOperator.createNull(Type.VARCHAR));
assertEquals("", result.getVarchar());
ConstantOperator[] argWithoutNull = {ConstantOperator.createVarchar("star"),
ConstantOperator.createVarchar("cks")};
result = ScalarOperatorFunctions.concat_ws(ConstantOperator.createNull(Type.VARCHAR), argWithoutNull);
assertTrue(result.isNull());
} |
@Override
public void prepare(ExecutorDetails exec) {
this.exec = exec;
} | @Test
public void testPreferRackWithTopoExecutors() {
INimbus iNimbus = new INimbusTest();
double compPcore = 100;
double compOnHeap = 775;
double compOffHeap = 25;
int topo1NumSpouts = 1;
int topo1NumBolts = 5;
int topo1SpoutParallelism = 100;
int topo1BoltParallelism = 200;
int topo2NumSpouts = 1;
int topo2NumBolts = 5;
int topo2SpoutParallelism = 10;
int topo2BoltParallelism = 20;
final int numRacks = 3;
final int numSupersPerRack = 10;
final int numPortsPerSuper = 6;
final int numZonesPerHost = 1;
final double numaResourceMultiplier = 1.0;
int rackStartNum = 0;
int supStartNum = 0;
long compPerRack = (topo1NumSpouts * topo1SpoutParallelism + topo1NumBolts * topo1BoltParallelism
+ topo2NumSpouts * topo2SpoutParallelism); // enough for topo1 but not topo1+topo2
long compPerSuper = compPerRack / numSupersPerRack;
double cpuPerSuper = compPcore * compPerSuper;
double memPerSuper = (compOnHeap + compOffHeap) * compPerSuper;
double topo1MaxHeapSize = memPerSuper;
double topo2MaxHeapSize = memPerSuper;
final String topoName1 = "topology1";
final String topoName2 = "topology2";
Map<String, SupervisorDetails> supMap = genSupervisorsWithRacksAndNuma(
numRacks, numSupersPerRack, numZonesPerHost, numPortsPerSuper, rackStartNum, supStartNum,
cpuPerSuper, memPerSuper, Collections.emptyMap(), numaResourceMultiplier);
TestDNSToSwitchMapping testDNSToSwitchMapping = new TestDNSToSwitchMapping(supMap.values());
Config config = new Config();
config.putAll(createGrasClusterConfig(compPcore, compOnHeap, compOffHeap, null, null));
config.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, GenericResourceAwareStrategy.class.getName());
IScheduler scheduler = new ResourceAwareScheduler();
scheduler.prepare(config, new StormMetricsRegistry());
TopologyDetails td1 = genTopology(topoName1, config, topo1NumSpouts,
topo1NumBolts, topo1SpoutParallelism, topo1BoltParallelism, 0, 0, "user", topo1MaxHeapSize);
//Schedule the topo1 topology and ensure it fits on 1 rack
Topologies topologies = new Topologies(td1);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
cluster.setNetworkTopography(testDNSToSwitchMapping.getRackToHosts());
scheduler.schedule(topologies, cluster);
Set<String> assignedRacks = cluster.getAssignedRacks(td1.getId());
assertEquals(1, assignedRacks.size(), "Racks for topology=" + td1.getId() + " is " + assignedRacks);
TopologyBuilder builder = topologyBuilder(topo2NumSpouts, topo2NumBolts, topo2SpoutParallelism, topo2BoltParallelism);
TopologyDetails td2 = topoToTopologyDetails(topoName2, config, builder.createTopology(), 0, 0,"user", topo2MaxHeapSize);
//Now schedule GPU but with the simple topology in place.
topologies = new Topologies(td1, td2);
cluster = new Cluster(cluster, topologies);
scheduler.schedule(topologies, cluster);
assignedRacks = cluster.getAssignedRacks(td1.getId(), td2.getId());
assertEquals(2, assignedRacks.size(), "Racks for topologies=" + td1.getId() + "/" + td2.getId() + " is " + assignedRacks);
// topo2 gets scheduled on its own rack because it is empty and available
assignedRacks = cluster.getAssignedRacks(td2.getId());
assertEquals(1, assignedRacks.size(), "Racks for topologies=" + td2.getId() + " is " + assignedRacks);
// now unassign topo2, expect only one rack to be in use; free some slots and reschedule topo1 some topo1 executors
cluster.unassign(td2.getId());
assignedRacks = cluster.getAssignedRacks(td2.getId());
assertEquals(0, assignedRacks.size(),
"After unassigning topology " + td2.getId() + ", racks for topology=" + td2.getId() + " is " + assignedRacks);
assignedRacks = cluster.getAssignedRacks(td1.getId());
assertEquals(1, assignedRacks.size(),
"After unassigning topology " + td2.getId() + ", racks for topology=" + td1.getId() + " is " + assignedRacks);
assertFalse(cluster.needsSchedulingRas(td1),
"Topology " + td1.getId() + " should be fully assigned before freeing slots");
freeSomeWorkerSlots(cluster);
assertTrue(cluster.needsSchedulingRas(td1),
"Topology " + td1.getId() + " should need scheduling after freeing slots");
// then reschedule executors
scheduler.schedule(topologies, cluster);
// only one rack should be in use by topology1
assignedRacks = cluster.getAssignedRacks(td1.getId());
assertEquals(1, assignedRacks.size(),
"After reassigning topology " + td2.getId() + ", racks for topology=" + td1.getId() + " is " + assignedRacks);
} |
@VisibleForTesting
static AvroMetadata readMetadataFromFile(ResourceId fileResource) throws IOException {
String codec = null;
String schemaString = null;
byte[] syncMarker;
try (InputStream stream = Channels.newInputStream(FileSystems.open(fileResource))) {
BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(stream, null);
// The header of an object container file begins with a four-byte magic number, followed
// by the file metadata (including the schema and codec), encoded as a map. Finally, the
// header ends with the file's 16-byte sync marker.
// See https://avro.apache.org/docs/1.7.7/spec.html#Object+Container+Files for details on
// the encoding of container files.
// Read the magic number.
byte[] magic = new byte[DataFileConstants.MAGIC.length];
decoder.readFixed(magic);
if (!Arrays.equals(magic, DataFileConstants.MAGIC)) {
throw new IOException("Missing Avro file signature: " + fileResource);
}
// Read the metadata to find the codec and schema.
ByteBuffer valueBuffer = ByteBuffer.allocate(512);
long numRecords = decoder.readMapStart();
while (numRecords > 0) {
for (long recordIndex = 0; recordIndex < numRecords; recordIndex++) {
String key = decoder.readString();
// readBytes() clears the buffer and returns a buffer where:
// - position is the start of the bytes read
// - limit is the end of the bytes read
valueBuffer = decoder.readBytes(valueBuffer);
byte[] bytes = new byte[valueBuffer.remaining()];
valueBuffer.get(bytes);
if (key.equals(DataFileConstants.CODEC)) {
codec = new String(bytes, StandardCharsets.UTF_8);
} else if (key.equals(DataFileConstants.SCHEMA)) {
schemaString = new String(bytes, StandardCharsets.UTF_8);
}
}
numRecords = decoder.mapNext();
}
if (codec == null) {
codec = DataFileConstants.NULL_CODEC;
}
// Finally, read the sync marker.
syncMarker = new byte[DataFileConstants.SYNC_SIZE];
decoder.readFixed(syncMarker);
}
checkState(schemaString != null, "No schema present in Avro file metadata %s", fileResource);
return new AvroMetadata(syncMarker, codec, schemaString);
} | @Test
public void testReadSchemaString() throws Exception {
List<Bird> expected = createRandomRecords(DEFAULT_RECORD_COUNT);
String codec = DataFileConstants.NULL_CODEC;
String filename =
generateTestFile(
codec, expected, SyncBehavior.SYNC_DEFAULT, 0, AvroCoder.of(Bird.class), codec);
Metadata fileMeta = FileSystems.matchSingleFileSpec(filename);
AvroSource.AvroMetadata metadata = AvroSource.readMetadataFromFile(fileMeta.resourceId());
// By default, parse validates the schema, which is what we want.
Schema schema = new Schema.Parser().parse(metadata.getSchemaString());
assertEquals(4, schema.getFields().size());
} |
@Override
public ExportResult<CalendarContainerResource> export(
UUID jobId, TokensAndUrlAuthData authData, Optional<ExportInformation> exportInformation) {
if (!exportInformation.isPresent()) {
return exportCalendars(authData, Optional.empty());
} else {
StringPaginationToken paginationToken =
(StringPaginationToken) exportInformation.get().getPaginationData();
if (paginationToken != null && paginationToken.getToken().startsWith(CALENDAR_TOKEN_PREFIX)) {
// Next thing to export is more calendars
return exportCalendars(authData, Optional.of(paginationToken));
} else {
// Next thing to export is events
IdOnlyContainerResource idOnlyContainerResource =
(IdOnlyContainerResource) exportInformation.get().getContainerResource();
Optional<PaginationData> pageData = Optional.ofNullable(paginationToken);
return getCalendarEvents(authData,
idOnlyContainerResource.getId(),
pageData);
}
}
} | @Test
public void exportEventFirstSet() throws IOException {
setUpSingleEventResponse();
// Looking at first page, with at least one page after it
ContainerResource containerResource = new IdOnlyContainerResource(CALENDAR_ID);
ExportInformation exportInformation = new ExportInformation(null, containerResource);
eventListResponse.setNextPageToken(NEXT_TOKEN);
// Run test
ExportResult<CalendarContainerResource> result =
googleCalendarExporter.export(UUID.randomUUID(), null, Optional.of(exportInformation));
// Check results
// Verify correct methods were called
verify(calendarEvents).list(CALENDAR_ID);
verify(eventListRequest).setMaxAttendees(MAX_ATTENDEES);
verify(eventListRequest).execute();
// Check events
Collection<CalendarEventModel> actualEvents = result.getExportedData().getEvents();
assertThat(
actualEvents
.stream()
.map(CalendarEventModel::getCalendarId)
.collect(Collectors.toList()))
.containsExactly(CALENDAR_ID);
assertThat(actualEvents.stream().map(CalendarEventModel::getTitle).collect(Collectors.toList()))
.containsExactly(EVENT_DESCRIPTION);
// Check pagination token
ContinuationData continuationData = (ContinuationData) result.getContinuationData();
StringPaginationToken paginationToken =
(StringPaginationToken) continuationData.getPaginationData();
assertThat(paginationToken.getToken()).isEqualTo(EVENT_TOKEN_PREFIX + NEXT_TOKEN);
} |
public static VelocityEngine getEngine() {
try {
val props = new Properties();
props.setProperty(RuntimeConstants.INPUT_ENCODING, "UTF-8");
props.setProperty(RuntimeConstants.RESOURCE_LOADER, "classpath");
props.setProperty("resource.loader.string.class", StringResourceLoader.class.getName());
props.setProperty("resource.loader.classpath.class", ClasspathResourceLoader.class.getName());
props.setProperty(RuntimeConstants.RESOURCE_LOADERS, "classpath,string");
val engine = new VelocityEngine();
engine.init(props);
return engine;
} catch (final Exception e) {
throw new TechnicalException("Error configuring velocity", e);
}
} | @Test
public void defaultProperties() {
val engine = VelocityEngineFactory.getEngine();
assertNotNull(engine);
assertEquals("org.apache.velocity.runtime.resource.loader.StringResourceLoader",
engine.getProperty("resource.loader.string.class"));
assertEquals("org.apache.velocity.runtime.resource.loader.ClasspathResourceLoader",
engine.getProperty("resource.loader.classpath.class"));
assertEquals(vector("classpath"), engine.getProperty("resource.loaders"));
assertEquals("UTF-8", engine.getProperty(RuntimeConstants.INPUT_ENCODING));
} |
public static <Key extends Comparable, Value, ListType extends List<Value>> MultiMap<Key, Value, ListType> make(final boolean updatable,
final NewSubMapProvider<Value, ListType> newSubMapProvider) {
if (updatable) {
return new ChangeHandledMultiMap<>(new RawMultiMap<>(newSubMapProvider));
} else {
return new RawMultiMap<>(newSubMapProvider);
}
} | @Test
void normal() throws Exception {
assertThat(MultiMapFactory.make(false) instanceof ChangeHandledMultiMap).isFalse();
} |
public static <T> Partition<T> of(
int numPartitions,
PartitionWithSideInputsFn<? super T> partitionFn,
Requirements requirements) {
Contextful ctfFn =
Contextful.fn(
(T element, Contextful.Fn.Context c) ->
partitionFn.partitionFor(element, numPartitions, c),
requirements);
return new Partition<>(new PartitionDoFn<T>(numPartitions, ctfFn, partitionFn));
} | @Test
public void testPartitionGetName() {
assertEquals("Partition", Partition.of(3, new ModFn()).getName());
} |
public static int AUG_CCITT(@NonNull final byte[] data, final int offset, final int length) {
return CRC(0x1021, 0x1D0F, data, offset, length, false, false, 0x0000);
} | @Test
public void AUG_CCITT_123456789() {
final byte[] data = "123456789".getBytes();
assertEquals(0xE5CC, CRC16.AUG_CCITT(data, 0, 9));
} |
@CanIgnoreReturnValue
public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) {
List<@Nullable Object> expected =
(varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs);
return containsExactlyElementsIn(
expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable);
} | @Test
public void iterableContainsExactlyOutOfOrder() {
assertThat(asList(1, 2, 3, 4)).containsExactly(3, 1, 4, 2);
} |
public FEELFnResult<List<Object>> invoke(@ParameterName("list") Object[] lists) {
if ( lists == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "lists", "cannot be null"));
}
final Set<Object> resultSet = new LinkedHashSet<>();
for ( final Object list : lists ) {
if ( list instanceof Collection ) {
resultSet.addAll((Collection) list);
} else {
resultSet.add(list);
}
}
// spec requires us to return a new list
return FEELFnResult.ofResult( new ArrayList<>(resultSet) );
} | @Test
void invokeSingleObjectInAnArray() {
final int[] testArray = new int[]{10};
FunctionTestUtil.assertResult(unionFunction.invoke(new Object[]{testArray}),
Collections.singletonList(testArray));
} |
@Override
public PageResult<FileConfigDO> getFileConfigPage(FileConfigPageReqVO pageReqVO) {
return fileConfigMapper.selectPage(pageReqVO);
} | @Test
public void testGetFileConfigPage() {
// mock 数据
FileConfigDO dbFileConfig = randomFileConfigDO().setName("芋道源码")
.setStorage(FileStorageEnum.LOCAL.getStorage());
dbFileConfig.setCreateTime(LocalDateTimeUtil.parse("2020-01-23", DatePattern.NORM_DATE_PATTERN));// 等会查询到
fileConfigMapper.insert(dbFileConfig);
// 测试 name 不匹配
fileConfigMapper.insert(cloneIgnoreId(dbFileConfig, o -> o.setName("源码")));
// 测试 storage 不匹配
fileConfigMapper.insert(cloneIgnoreId(dbFileConfig, o -> o.setStorage(FileStorageEnum.DB.getStorage())));
// 测试 createTime 不匹配
fileConfigMapper.insert(cloneIgnoreId(dbFileConfig, o -> o.setCreateTime(LocalDateTimeUtil.parse("2020-11-23", DatePattern.NORM_DATE_PATTERN))));
// 准备参数
FileConfigPageReqVO reqVO = new FileConfigPageReqVO();
reqVO.setName("芋道");
reqVO.setStorage(FileStorageEnum.LOCAL.getStorage());
reqVO.setCreateTime((new LocalDateTime[]{buildTime(2020, 1, 1),
buildTime(2020, 1, 24)}));
// 调用
PageResult<FileConfigDO> pageResult = fileConfigService.getFileConfigPage(reqVO);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbFileConfig, pageResult.getList().get(0));
} |
@Override
public void isEqualTo(@Nullable Object expected) {
if (sameClassMessagesWithDifferentDescriptors(actual, expected)) {
// This can happen with DynamicMessages, and it's very confusing if they both have the
// same string.
failWithoutActual(
simpleFact("Not true that messages compare equal; they have different descriptors."),
fact("expected", expected),
fact("with descriptor", ((Message) expected).getDescriptorForType()),
fact("but was", actual),
fact("with descriptor", actual.getDescriptorForType()));
} else if (notMessagesWithSameDescriptor(actual, expected)) {
super.isEqualTo(expected);
} else {
DiffResult diffResult =
makeDifferencer((Message) expected).diffMessages(actual, (Message) expected);
if (!diffResult.isMatched()) {
failWithoutActual(
simpleFact(
"Not true that messages compare equal.\n"
+ diffResult.printToString(config.reportMismatchesOnly())));
}
}
} | @Test
public void testMapWithDefaultKeysAndValues() throws InvalidProtocolBufferException {
Descriptor descriptor = getFieldDescriptor("o_int").getContainingType();
String defaultString = "";
int defaultInt32 = 0;
Message message = makeProtoMap(ImmutableMap.of(defaultString, 1, "foo", defaultInt32));
Message dynamicMessage =
DynamicMessage.parseFrom(
descriptor, message.toByteString(), ExtensionRegistry.getEmptyRegistry());
expectThat(message).isEqualTo(dynamicMessage);
} |
public static SqlType fromValue(final BigDecimal value) {
// SqlDecimal does not support negative scale:
final BigDecimal decimal = value.scale() < 0
? value.setScale(0, BigDecimal.ROUND_UNNECESSARY)
: value;
/* We can't use BigDecimal.precision() directly for all cases, since it defines
* precision differently from SQL Decimal.
* In particular, if the decimal is between -0.1 and 0.1, BigDecimal precision can be
* lower than scale, which is disallowed in SQL Decimal. For example, 0.005 in
* BigDecimal has a precision,scale of 1,3; whereas we expect 4,3.
* If the decimal is in (-1,1) but outside (-0.1,0.1), the code doesn't throw, but
* gives lower precision than expected (e.g., 0.8 has precision 1 instead of 2).
* To account for this edge case, we just take the scale and add one and use that
* for the precision instead. This works since BigDecimal defines scale as the
* number of digits to the right of the period; which is one lower than the precision for
* anything in the range (-1, 1).
* This covers the case where BigDecimal has a value of 0.
* Note: This solution differs from the SQL definition in that it returns (4, 3) for
* both "0.005" and ".005", whereas SQL expects (3, 3) for the latter. This is unavoidable
* if we use BigDecimal as an intermediate representation, since the two strings are parsed
* identically by it to have precision 1.
*/
if (decimal.compareTo(BigDecimal.ONE) < 0 && decimal.compareTo(BigDecimal.ONE.negate()) > 0) {
return SqlTypes.decimal(decimal.scale() + 1, decimal.scale());
}
return SqlTypes.decimal(decimal.precision(), Math.max(decimal.scale(), 0));
} | @Test
public void shouldGetSchemaFromDecimal2_2() {
// When:
final SqlType schema = DecimalUtil.fromValue(new BigDecimal(".12"));
// Note: this behavior is different from the SQL specification, where
// we expect precision = 2, scale = 2. This difference is because we use
// BigDecimal in our implementation, which treats precision differently.
// Then:
assertThat(schema, is(SqlTypes.decimal(3, 2)));
} |
@Override
public KsMaterializedQueryResult<WindowedRow> get(
final GenericKey key,
final int partition,
final Range<Instant> windowStart,
final Range<Instant> windowEnd,
final Optional<Position> position
) {
try {
final WindowRangeQuery<GenericKey, GenericRow> query = WindowRangeQuery.withKey(key);
StateQueryRequest<KeyValueIterator<Windowed<GenericKey>, GenericRow>> request =
inStore(stateStore.getStateStoreName()).withQuery(query);
if (position.isPresent()) {
request = request.withPositionBound(PositionBound.at(position.get()));
}
final StateQueryResult<KeyValueIterator<Windowed<GenericKey>, GenericRow>> result =
stateStore.getKafkaStreams().query(request);
final QueryResult<KeyValueIterator<Windowed<GenericKey>, GenericRow>> queryResult =
result.getPartitionResults().get(partition);
if (queryResult.isFailure()) {
throw failedQueryException(queryResult);
}
try (KeyValueIterator<Windowed<GenericKey>, GenericRow> it =
queryResult.getResult()) {
final Builder<WindowedRow> builder = ImmutableList.builder();
while (it.hasNext()) {
final KeyValue<Windowed<GenericKey>, GenericRow> next = it.next();
final Window wnd = next.key.window();
if (!windowStart.contains(wnd.startTime())) {
continue;
}
if (!windowEnd.contains(wnd.endTime())) {
continue;
}
final long rowTime = wnd.end();
final WindowedRow row = WindowedRow.of(
stateStore.schema(),
next.key,
next.value,
rowTime
);
builder.add(row);
}
return KsMaterializedQueryResult.rowIteratorWithPosition(
builder.build().iterator(), queryResult.getPosition());
}
} catch (final NotUpToBoundException | MaterializationException e) {
throw e;
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
} | @Test
public void shouldReturnValueIfSessionEndsAtLowerBoundIfLowerStartBoundClosed() {
// Given:
final Range<Instant> endBounds = Range.closed(
LOWER_INSTANT,
UPPER_INSTANT
);
final Instant wstart = LOWER_INSTANT.minusMillis(1);
givenSingleSession(wstart, LOWER_INSTANT);
// When:
final KsMaterializedQueryResult<WindowedRow> result =
table.get(A_KEY, PARTITION, Range.all(), endBounds);
// Then:
final Iterator<WindowedRow> rowIterator = result.getRowIterator();
assertThat(rowIterator.hasNext(), is(true));
assertThat(rowIterator.next(), is(
WindowedRow.of(
SCHEMA,
sessionKey(wstart, LOWER_INSTANT),
A_VALUE,
LOWER_INSTANT.toEpochMilli()
)
));
assertThat(result.getPosition(), not(Optional.empty()));
assertThat(result.getPosition().get(), is(POSITION));
} |
@Override
public void unSubscribe(ConsumerConfig consumerConfig) {
String directUrl = consumerConfig.getDirectUrl();
notifyListeners.get(directUrl).remove(consumerConfig);
} | @Test
public void testUnSubscribe() {
ConsumerConfig<Object> consumerConfig = new ConsumerConfig<>();
String directUrl = "bolt://alipay.com";
consumerConfig.setDirectUrl(directUrl);
List<ProviderGroup> providerGroups = domainRegistry.subscribe(consumerConfig);
assertTrue(domainRegistry.notifyListeners.containsKey(directUrl));
assertSame(consumerConfig, domainRegistry.notifyListeners.get(directUrl).get(0));
assertEquals(1, domainRegistry.notifyListeners.get(directUrl).size());
domainRegistry.unSubscribe(consumerConfig);
assertTrue(domainRegistry.notifyListeners.containsKey(directUrl));
assertEquals(0, domainRegistry.notifyListeners.get(directUrl).size());
} |
@Override
public long getQueryCount() {
throw new UnsupportedOperationException("Queries on replicated maps are not supported.");
} | @Test(expected = UnsupportedOperationException.class)
public void testQueryCount() {
localReplicatedMapStats.getQueryCount();
} |
public SerializableFunction<T, Row> getToRowFunction() {
return toRowFunction;
} | @Test
public void testOuterOneOfProtoToRow() throws InvalidProtocolBufferException {
ProtoDynamicMessageSchema schemaProvider = schemaFromDescriptor(OuterOneOf.getDescriptor());
SerializableFunction<DynamicMessage, Row> toRow = schemaProvider.getToRowFunction();
// equality doesn't work between dynamic messages and other,
// so we compare string representation
assertEquals(OUTER_ONEOF_ROW.toString(), toRow.apply(toDynamic(OUTER_ONEOF_PROTO)).toString());
} |
@Override
public Column convert(BasicTypeDefine typeDefine) {
PhysicalColumn.PhysicalColumnBuilder builder =
PhysicalColumn.builder()
.name(typeDefine.getName())
.sourceType(typeDefine.getColumnType())
.nullable(typeDefine.isNullable())
.defaultValue(typeDefine.getDefaultValue())
.comment(typeDefine.getComment());
String db2Type = typeDefine.getDataType().toUpperCase();
switch (db2Type) {
case DB2_BOOLEAN:
builder.sourceType(DB2_BOOLEAN);
builder.dataType(BasicType.BOOLEAN_TYPE);
break;
case DB2_SMALLINT:
builder.sourceType(DB2_SMALLINT);
builder.dataType(BasicType.SHORT_TYPE);
break;
case DB2_INT:
case DB2_INTEGER:
builder.sourceType(DB2_INT);
builder.dataType(BasicType.INT_TYPE);
break;
case DB2_BIGINT:
builder.sourceType(DB2_BIGINT);
builder.dataType(BasicType.LONG_TYPE);
break;
case DB2_REAL:
builder.sourceType(DB2_REAL);
builder.dataType(BasicType.FLOAT_TYPE);
break;
case DB2_DOUBLE:
builder.sourceType(DB2_DOUBLE);
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case DB2_DECFLOAT:
builder.sourceType(DB2_DECFLOAT);
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case DB2_DECIMAL:
builder.sourceType(
String.format(
"%s(%s,%s)",
DB2_DECIMAL, typeDefine.getPrecision(), typeDefine.getScale()));
builder.dataType(
new DecimalType(
Math.toIntExact(typeDefine.getPrecision()), typeDefine.getScale()));
builder.columnLength(typeDefine.getPrecision());
builder.scale(typeDefine.getScale());
break;
case DB2_CHARACTER:
case DB2_CHAR:
builder.sourceType(String.format("%s(%d)", DB2_CHAR, typeDefine.getLength()));
// For char/varchar this length is in bytes
builder.columnLength(typeDefine.getLength());
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_VARCHAR:
builder.sourceType(String.format("%s(%d)", DB2_VARCHAR, typeDefine.getLength()));
builder.columnLength(typeDefine.getLength());
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_LONG_VARCHAR:
builder.sourceType(DB2_LONG_VARCHAR);
// default length is 32700
builder.columnLength(typeDefine.getLength());
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_CLOB:
builder.sourceType(String.format("%s(%d)", DB2_CLOB, typeDefine.getLength()));
builder.columnLength(typeDefine.getLength());
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_GRAPHIC:
builder.sourceType(String.format("%s(%d)", DB2_GRAPHIC, typeDefine.getLength()));
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_VARGRAPHIC:
builder.sourceType(String.format("%s(%d)", DB2_VARGRAPHIC, typeDefine.getLength()));
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_DBCLOB:
builder.sourceType(String.format("%s(%d)", DB2_DBCLOB, typeDefine.getLength()));
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_XML:
builder.sourceType(DB2_XML);
builder.columnLength((long) Integer.MAX_VALUE);
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_BINARY:
builder.sourceType(String.format("%s(%d)", DB2_BINARY, typeDefine.getLength()));
builder.columnLength(typeDefine.getLength());
builder.dataType(PrimitiveByteArrayType.INSTANCE);
break;
case DB2_VARBINARY:
builder.sourceType(String.format("%s(%d)", DB2_VARBINARY, typeDefine.getLength()));
builder.columnLength(typeDefine.getLength());
builder.dataType(PrimitiveByteArrayType.INSTANCE);
break;
case DB2_BLOB:
builder.sourceType(String.format("%s(%d)", DB2_BLOB, typeDefine.getLength()));
builder.columnLength(typeDefine.getLength());
builder.dataType(PrimitiveByteArrayType.INSTANCE);
break;
case DB2_DATE:
builder.sourceType(DB2_DATE);
builder.dataType(LocalTimeType.LOCAL_DATE_TYPE);
break;
case DB2_TIME:
builder.sourceType(DB2_TIME);
builder.dataType(LocalTimeType.LOCAL_TIME_TYPE);
break;
case DB2_TIMESTAMP:
builder.sourceType(String.format("%s(%d)", DB2_TIMESTAMP, typeDefine.getScale()));
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
default:
throw CommonError.convertToSeaTunnelTypeError(
DatabaseIdentifier.DB_2, db2Type, typeDefine.getName());
}
return builder.build();
} | @Test
public void testConvertBytes() {
BasicTypeDefine<Object> typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("BINARY")
.dataType("BINARY")
.length(1L)
.build();
Column column = DB2TypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(PrimitiveByteArrayType.INSTANCE, column.getDataType());
Assertions.assertEquals(typeDefine.getLength(), column.getColumnLength());
Assertions.assertEquals(
String.format("%s(%s)", DB2TypeConverter.DB2_BINARY, typeDefine.getLength()),
column.getSourceType());
typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("VARBINARY")
.dataType("VARBINARY")
.length(1L)
.build();
column = DB2TypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(PrimitiveByteArrayType.INSTANCE, column.getDataType());
Assertions.assertEquals(typeDefine.getLength(), column.getColumnLength());
Assertions.assertEquals(
String.format("%s(%s)", DB2TypeConverter.DB2_VARBINARY, typeDefine.getLength()),
column.getSourceType());
typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("BLOB")
.dataType("BLOB")
.length(1L)
.build();
column = DB2TypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(PrimitiveByteArrayType.INSTANCE, column.getDataType());
Assertions.assertEquals(
String.format("%s(%s)", DB2TypeConverter.DB2_BLOB, typeDefine.getLength()),
column.getSourceType());
} |
@Deprecated
@Override public void toXML(Object obj, OutputStream out) {
super.toXML(obj, out);
} | @Issue("JENKINS-8006") // Previously a null entry in an array caused NPE
@Test
public void emptyStack() {
assertEquals("<object-array><null/><null/></object-array>",
Run.XSTREAM.toXML(new Object[2]).replaceAll("[ \n\r\t]+", ""));
} |
public final DisposableServer bindNow() {
return bindNow(Duration.ofSeconds(45));
} | @Test
void testDisposeTimeoutLongOverflow() {
assertThatExceptionOfType(ArithmeticException.class)
.isThrownBy(() -> new TestServerTransport(Mono.just(EmbeddedChannel::new)).bindNow().disposeNow(Duration.ofMillis(Long.MAX_VALUE)));
} |
public List<PhotoAlbum> split(int numberOfNewAlbums) {
return IntStream.range(1, numberOfNewAlbums + 1)
.mapToObj(
i ->
new PhotoAlbum(
String.format("%s-pt%d", id, i),
String.format("%s (%d/%d)", id, i, numberOfNewAlbums),
description))
.collect(Collectors.toList());
} | @Test
public void splitNegative() {
PhotoAlbum originalAlbum = new PhotoAlbum("123", "MyAlbum", DESCRIPTION);
List<PhotoAlbum> actual = originalAlbum.split(-1);
Truth.assertThat(actual).isEmpty();
} |
@Override
public Config getConfig() {
return config;
} | @Test
public void testSingleConfigYAML() throws IOException {
RedissonClient r = createInstance();
String t = r.getConfig().toYAML();
Config c = Config.fromYAML(t);
assertThat(c.toYAML()).isEqualTo(t);
} |
public static String resolveRaw(String str) {
int len = str.length();
if (len <= 4) {
return null;
}
int endPos = len - 1;
char last = str.charAt(endPos);
// optimize to not create new objects
if (last == ')') {
char char1 = str.charAt(0);
char char2 = str.charAt(1);
char char3 = str.charAt(2);
char char4 = str.charAt(3);
if (char1 == 'R' && char2 == 'A' && char3 == 'W' && char4 == '(') {
return str.substring(4, endPos);
}
} else if (last == '}') {
char char1 = str.charAt(0);
char char2 = str.charAt(1);
char char3 = str.charAt(2);
char char4 = str.charAt(3);
if (char1 == 'R' && char2 == 'A' && char3 == 'W' && char4 == '{') {
return str.substring(4, endPos);
}
}
// not RAW value
return null;
} | @Test
void testURIScannerType2() {
final String resolvedRaw1 = URIScanner.resolveRaw("RAW{++?w0rd}");
Assertions.assertEquals("++?w0rd", resolvedRaw1);
} |
Set<String> getRetry() {
return retry;
} | @Test
public void determineRetryWhenNotSet() {
Athena2QueryHelper helper = athena2QueryHelperWithRetry(null);
assertEquals(new HashSet<>(Collections.singletonList("never")), helper.getRetry());
} |
public <T> T sendAndReceive(String destination, Message<?> message, Type type) {
return sendAndReceive(destination, message, type, null, producer.getSendMsgTimeout(), 0);
} | @Test
public void testSendAndReceive_Async() {
try {
rocketMQTemplate.sendAndReceive(stringRequestTopic, MessageBuilder.withPayload("requestTopicASync").build(), new RocketMQLocalRequestCallback<String>() {
@Override public void onSuccess(String message) {
System.out.printf("receive string: %s %n", message);
}
@Override public void onException(Throwable e) {
e.printStackTrace();
}
});
} catch (MessagingException e) {
assertThat(e).hasMessageContaining("org.apache.rocketmq.remoting.exception.RemotingConnectException: connect to null failed");
}
try {
rocketMQTemplate.sendAndReceive(stringRequestTopic, "requestTopicAsyncWithHasKey", new RocketMQLocalRequestCallback<String>() {
@Override public void onSuccess(String message) {
System.out.printf("receive string: %s %n", message);
}
@Override public void onException(Throwable e) {
e.printStackTrace();
}
}, "order-id");
} catch (MessagingException e) {
assertThat(e).hasMessageContaining("org.apache.rocketmq.remoting.exception.RemotingConnectException: connect to null failed");
}
try {
rocketMQTemplate.sendAndReceive(stringRequestTopic, "requestTopicAsyncWithTimeout", new RocketMQLocalRequestCallback<String>() {
@Override public void onSuccess(String message) {
System.out.printf("receive string: %s %n", message);
}
@Override public void onException(Throwable e) {
e.printStackTrace();
}
}, "order-id", 5000);
} catch (MessagingException e) {
assertThat(e).hasMessageContaining("org.apache.rocketmq.remoting.exception.RemotingConnectException: connect to null failed");
}
try {
rocketMQTemplate.sendAndReceive(objectRequestTopic, "requestTopicAsyncWithTimeout", new RocketMQLocalRequestCallback<MessageExt>() {
@Override public void onSuccess(MessageExt message) {
System.out.printf("receive messageExt: %s %n", message.toString());
}
@Override public void onException(Throwable e) {
e.printStackTrace();
}
}, 5000);
} catch (MessagingException e) {
assertThat(e).hasMessageContaining("org.apache.rocketmq.remoting.exception.RemotingConnectException: connect to null failed");
}
} |
public Optional<EndpointCertificateSecrets> readEndpointCertificateSecrets(EndpointCertificateMetadata metadata) {
return Optional.of(readFromSecretStore(metadata));
} | @Test
void reads_from_correct_endpoint_certificate_store() {
MockSecretStore secretStore = new MockSecretStore();
secretStore.put("cert", 1, X509CertificateUtils.toPem(digicertCertificate));
secretStore.put("key", 1, KeyUtils.toPem(keyPair.getPrivate()));
DefaultEndpointCertificateSecretStore defaultEndpointCertificateSecretStore = new DefaultEndpointCertificateSecretStore(secretStore);
TestEndpointCertificateSecretStore zerosslStore = new TestEndpointCertificateSecretStore(X509CertificateUtils.toPem(zerosslCertificate), KeyUtils.toPem(keyPair.getPrivate()));
EndpointCertificateRetriever retriever = new EndpointCertificateRetriever(List.of(defaultEndpointCertificateSecretStore, zerosslStore));
{
Optional<EndpointCertificateSecrets> endpointCertificateSecrets = retriever.readEndpointCertificateSecrets(
new EndpointCertificateMetadata("key", "cert", 1, EndpointCertificateMetadata.Provider.digicert));
Assertions.assertTrue(endpointCertificateSecrets.isPresent());
Assertions.assertEquals("CN=digicert", X509CertificateUtils.fromPem(endpointCertificateSecrets.get().certificate()).getSubjectX500Principal().getName());
}
{
Optional<EndpointCertificateSecrets> endpointCertificateSecrets = retriever.readEndpointCertificateSecrets(
new EndpointCertificateMetadata("key", "cert", 1, EndpointCertificateMetadata.Provider.zerossl));
Assertions.assertTrue(endpointCertificateSecrets.isPresent());
Assertions.assertEquals("CN=zerossl", X509CertificateUtils.fromPem(endpointCertificateSecrets.get().certificate()).getSubjectX500Principal().getName());
}
} |
public static Builder newBuilder() {
return new Builder();
} | @Test
public void testBuildSessionEnvironment() {
String sessionName = "test";
Map<String, String> configMap = new HashMap<>();
configMap.put("key1", "value1");
configMap.put("key2", "value2");
SessionEnvironment expectedEnvironment =
new SessionEnvironment(
sessionName,
MockedEndpointVersion.V1,
new HashMap<>(),
new HashMap<>(),
"default",
configMap);
SessionEnvironment actualEnvironment =
SessionEnvironment.newBuilder()
.setSessionName(sessionName)
.setSessionEndpointVersion(MockedEndpointVersion.V1)
.addSessionConfig(configMap)
.setDefaultCatalog("default")
.build();
assertEquals(expectedEnvironment, actualEnvironment);
} |
static boolean solve(RaidRoom[] rooms)
{
if (rooms == null)
{
return false;
}
List<RaidRoom> match = null;
Integer start = null;
Integer index = null;
int known = 0;
for (int i = 0; i < rooms.length; i++)
{
if (rooms[i] == null || rooms[i].getType() != RoomType.COMBAT || rooms[i] == UNKNOWN_COMBAT)
{
continue;
}
if (start == null)
{
start = i;
}
known++;
}
if (known < 2)
{
return false;
}
if (known == rooms.length)
{
return true;
}
for (List rotation : ROTATIONS)
{
COMPARE:
for (int i = 0; i < rotation.size(); i++)
{
if (rooms[start] == rotation.get(i))
{
for (int j = start + 1; j < rooms.length; j++)
{
if (rooms[j].getType() != RoomType.COMBAT || rooms[j] == UNKNOWN_COMBAT)
{
continue;
}
if (rooms[j] != rotation.get(floorMod(i + j - start, rotation.size())))
{
break COMPARE;
}
}
if (match != null && match != rotation)
{
return false;
}
index = i - start;
match = rotation;
}
}
}
if (match == null)
{
return false;
}
for (int i = 0; i < rooms.length; i++)
{
if (rooms[i] == null)
{
continue;
}
if (rooms[i].getType() != RoomType.COMBAT || rooms[i] == UNKNOWN_COMBAT)
{
rooms[i] = match.get(floorMod(index + i, match.size()));
}
}
return true;
} | @Test
public void testSolve5()
{
RaidRoom[] rooms = new RaidRoom[]{GUARDIANS, UNKNOWN_COMBAT, SHAMANS, VASA};
RotationSolver.solve(rooms);
assertArrayEquals(new RaidRoom[]{GUARDIANS, VESPULA, SHAMANS, VASA}, rooms);
} |
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof URL)) {
return false;
}
URL other = (URL) obj;
return Objects.equals(this.getUrlAddress(), other.getUrlAddress())
&& Objects.equals(this.getUrlParam(), other.getUrlParam());
} | @Test
void testEquals() {
URL url1 = URL.valueOf(
"10.20.130.230:20880/context/path?interface=org.apache.dubbo.test.interfaceName&group=group&version=1.0.0");
URL url2 = URL.valueOf(
"10.20.130.230:20880/context/path?interface=org.apache.dubbo.test.interfaceName&group=group&version=1.0.0");
Assertions.assertEquals(url1, url2);
URL url3 = URL.valueOf(
"10.20.130.230:20881/context/path?interface=org.apache.dubbo.test.interfaceName&group=group&version=1.0.0");
Assertions.assertNotEquals(url1, url3);
URL url4 = URL.valueOf(
"10.20.130.230:20880/context/path?interface=org.apache.dubbo.test.interfaceName&weight=10&group=group&version=1.0.0");
Assertions.assertNotEquals(url1, url4);
URL url5 = URL.valueOf(
"10.20.130.230:20880/context/path?interface=org.apache.dubbo.test.interfaceName&weight=10&group=group&version=1.0.0");
Assertions.assertEquals(url4, url5);
URL url6 = URL.valueOf("consumer://30.225.20.150/org.apache.dubbo.rpc.service.GenericService?application="
+ "dubbo-demo-api-consumer&category=consumers&check=false&dubbo=2.0.2&generic=true&interface="
+ "org.apache.dubbo.demo.DemoService&pid=7375&side=consumer&sticky=false×tamp=1599556506417");
URL url7 = URL.valueOf("consumer://30.225.20.150/org.apache.dubbo.rpc.service.GenericService?application="
+ "dubbo-demo-api-consumer&category=consumers&check=false&dubbo=2.0.2&generic=true&interface="
+ "org.apache.dubbo.demo.DemoService&pid=7375&side=consumer&sticky=false×tamp=2299556506417");
assertEquals(url6, url7);
URL url8 = URL.valueOf("consumer://30.225.20.150/org.apache.dubbo.rpc.service.GenericService?application="
+ "dubbo-demo-api-consumer&category=consumers&check=false&dubbo=2.0.2&interface="
+ "org.apache.dubbo.demo.DemoService&pid=7375&side=consumer&sticky=false×tamp=2299556506417");
assertNotEquals(url7, url8);
URL url9 = URL.valueOf("consumer://30.225.20.150/org.apache.dubbo.rpc.service.GenericService?application="
+ "dubbo-demo-api-consumer&category=consumers&check=true&dubbo=2.0.2&interface="
+ "org.apache.dubbo.demo.DemoService&pid=7375&side=consumer&sticky=false×tamp=2299556506417");
assertNotEquals(url8, url9);
} |
public void refreshOptions() {
connectionBox = (XulListbox) document.getElementById( "connection-type-list" );
accessBox = (XulListbox) document.getElementById( "access-type-list" );
Object connectionKey = DataHandler.connectionNametoID.get( connectionBox.getSelectedItem() );
String databaseName = null;
try {
databaseName = PluginRegistry.getInstance().getPlugin( DatabasePluginType.class, "" + connectionKey ).getIds()[0];
} catch ( Exception e ) {
log.error( e.getLocalizedMessage(), e );
}
DatabaseInterface database = DataHandler.connectionMap.get( connectionBox.getSelectedItem() );
Object accessKey = accessBox.getSelectedItem();
int access = DatabaseMeta.getAccessType( (String) accessKey );
String fragment = null;
DataHandler dataHandler = null;
try {
dataHandler = (DataHandler) xulDomContainer.getEventHandler( "dataHandler" );
dataHandler.pushCache();
} catch ( XulException e ) {
// TODO not a critical function, but should log a problem...
}
switch ( access ) {
case DatabaseMeta.TYPE_ACCESS_JNDI:
fragment = getFragment( database, databaseName, "_jndi.xul", "common_jndi.xul" );
break;
case DatabaseMeta.TYPE_ACCESS_NATIVE:
fragment = getFragment( database, databaseName, "_native.xul", "common_native.xul" );
break;
case DatabaseMeta.TYPE_ACCESS_OCI:
fragment = getFragment( database, databaseName, "_oci.xul", "common_native.xul" );
break;
case DatabaseMeta.TYPE_ACCESS_ODBC:
fragment = getFragment( database, databaseName, "_odbc.xul", "common_odbc.xul" );
break;
case DatabaseMeta.TYPE_ACCESS_PLUGIN:
fragment = getFragment( database, databaseName, "_plugin.xul", "common_native.xul" );
break;
default:
break;
}
if ( fragment != null ) {
try {
loadDatabaseOptionsFragment( fragment.toLowerCase(), dataHandler );
} catch ( XulException e ) {
// TODO should be reporting as an error dialog; need error dialog in XUL framework
showMessage( Messages.getString( "FragmentHandler.USER.CANT_LOAD_OPTIONS", databaseName ) );
}
}
XulTextbox portBox = (XulTextbox) document.getElementById( "port-number-text" );
if ( portBox != null ) {
int port = database.getDefaultDatabasePort();
if ( port > 0 ) {
portBox.setValue( Integer.toString( port ) );
}
}
if ( dataHandler != null ) {
dataHandler.popCache();
}
} | @Test
public void testRefreshOptions() throws Exception {
XulListbox connectionBox = mock( XulListbox.class );
when( document.getElementById( "connection-type-list" ) ).thenReturn( connectionBox );
when( connectionBox.getSelectedItem() ).thenReturn( "myDb" );
XulListbox accessBox = mock( XulListbox.class );
when( document.getElementById( "access-type-list" ) ).thenReturn( accessBox );
when( accessBox.getSelectedItem() ).thenReturn( "Native" );
DataHandler dataHandler = mock( DataHandler.class );
when( xulDomContainer.getEventHandler( "dataHandler" ) ).thenReturn( dataHandler );
DatabaseInterface dbInterface = mock( DatabaseInterface.class );
when( dbInterface.getDefaultDatabasePort() ).thenReturn( 5309 );
DataHandler.connectionMap.put( "myDb", dbInterface );
XulComponent component = mock( XulComponent.class );
XulComponent parent = mock( XulComponent.class );
when( component.getParent() ).thenReturn( parent );
when( document.getElementById( "database-options-box" ) ).thenReturn( component );
XulDomContainer fragmentContainer = mock( XulDomContainer.class );
Document mockDoc = mock( Document.class );
XulComponent firstChild = mock( XulComponent.class );
when( mockDoc.getFirstChild() ).thenReturn( firstChild );
when( fragmentContainer.getDocumentRoot() ).thenReturn( mockDoc );
when( xulDomContainer.loadFragment( anyString(), any( Object.class ) ) ).thenReturn( fragmentContainer );
XulTextbox portBox = mock( XulTextbox.class );
when( document.getElementById( "port-number-text" ) ).thenReturn( portBox );
fragmentHandler.refreshOptions();
// Iterate through the other database access types
when( accessBox.getSelectedItem() ).thenReturn( "JNDI" );
fragmentHandler.refreshOptions();
when( accessBox.getSelectedItem() ).thenReturn( "ODBC" );
fragmentHandler.refreshOptions();
when( accessBox.getSelectedItem() ).thenReturn( "OCI" );
fragmentHandler.refreshOptions();
when( accessBox.getSelectedItem() ).thenReturn( "Plugin" );
fragmentHandler.refreshOptions();
} |
CacheConfig<K, V> asCacheConfig() {
return this.copy(new CacheConfig<>(), false);
} | @Test
public void serializationSucceeds_whenKVTypes_setAsClassNames() {
CacheConfig cacheConfig = newDefaultCacheConfig("test");
cacheConfig.setKeyClassName("java.lang.Integer");
cacheConfig.setValueClassName("java.lang.String");
PreJoinCacheConfig preJoinCacheConfig = new PreJoinCacheConfig(cacheConfig);
Data data = serializationService.toData(preJoinCacheConfig);
PreJoinCacheConfig deserialized = serializationService.toObject(data);
assertEquals(preJoinCacheConfig, deserialized);
assertEquals(cacheConfig, deserialized.asCacheConfig());
} |
public void createPartitionMetadataTable() {
List<String> ddl = new ArrayList<>();
if (this.isPostgres()) {
// Literals need be added around literals to preserve casing.
ddl.add(
"CREATE TABLE \""
+ tableName
+ "\"(\""
+ COLUMN_PARTITION_TOKEN
+ "\" text NOT NULL,\""
+ COLUMN_PARENT_TOKENS
+ "\" text[] NOT NULL,\""
+ COLUMN_START_TIMESTAMP
+ "\" timestamptz NOT NULL,\""
+ COLUMN_END_TIMESTAMP
+ "\" timestamptz NOT NULL,\""
+ COLUMN_HEARTBEAT_MILLIS
+ "\" BIGINT NOT NULL,\""
+ COLUMN_STATE
+ "\" text NOT NULL,\""
+ COLUMN_WATERMARK
+ "\" timestamptz NOT NULL,\""
+ COLUMN_CREATED_AT
+ "\" SPANNER.COMMIT_TIMESTAMP NOT NULL,\""
+ COLUMN_SCHEDULED_AT
+ "\" SPANNER.COMMIT_TIMESTAMP,\""
+ COLUMN_RUNNING_AT
+ "\" SPANNER.COMMIT_TIMESTAMP,\""
+ COLUMN_FINISHED_AT
+ "\" SPANNER.COMMIT_TIMESTAMP,"
+ " PRIMARY KEY (\""
+ COLUMN_PARTITION_TOKEN
+ "\")"
+ ")"
+ " TTL INTERVAL '"
+ TTL_AFTER_PARTITION_FINISHED_DAYS
+ " days' ON \""
+ COLUMN_FINISHED_AT
+ "\"");
ddl.add(
"CREATE INDEX \""
+ WATERMARK_INDEX
+ "\" on \""
+ tableName
+ "\" (\""
+ COLUMN_WATERMARK
+ "\") INCLUDE (\""
+ COLUMN_STATE
+ "\")");
ddl.add(
"CREATE INDEX \""
+ CREATED_AT_START_TIMESTAMP_INDEX
+ "\" ON \""
+ tableName
+ "\" (\""
+ COLUMN_CREATED_AT
+ "\",\""
+ COLUMN_START_TIMESTAMP
+ "\")");
} else {
ddl.add(
"CREATE TABLE "
+ tableName
+ " ("
+ COLUMN_PARTITION_TOKEN
+ " STRING(MAX) NOT NULL,"
+ COLUMN_PARENT_TOKENS
+ " ARRAY<STRING(MAX)> NOT NULL,"
+ COLUMN_START_TIMESTAMP
+ " TIMESTAMP NOT NULL,"
+ COLUMN_END_TIMESTAMP
+ " TIMESTAMP NOT NULL,"
+ COLUMN_HEARTBEAT_MILLIS
+ " INT64 NOT NULL,"
+ COLUMN_STATE
+ " STRING(MAX) NOT NULL,"
+ COLUMN_WATERMARK
+ " TIMESTAMP NOT NULL,"
+ COLUMN_CREATED_AT
+ " TIMESTAMP NOT NULL OPTIONS (allow_commit_timestamp=true),"
+ COLUMN_SCHEDULED_AT
+ " TIMESTAMP OPTIONS (allow_commit_timestamp=true),"
+ COLUMN_RUNNING_AT
+ " TIMESTAMP OPTIONS (allow_commit_timestamp=true),"
+ COLUMN_FINISHED_AT
+ " TIMESTAMP OPTIONS (allow_commit_timestamp=true),"
+ ") PRIMARY KEY ("
+ COLUMN_PARTITION_TOKEN
+ "),"
+ " ROW DELETION POLICY (OLDER_THAN("
+ COLUMN_FINISHED_AT
+ ", INTERVAL "
+ TTL_AFTER_PARTITION_FINISHED_DAYS
+ " DAY))");
ddl.add(
"CREATE INDEX "
+ WATERMARK_INDEX
+ " on "
+ tableName
+ " ("
+ COLUMN_WATERMARK
+ ") STORING ("
+ COLUMN_STATE
+ ")");
ddl.add(
"CREATE INDEX "
+ CREATED_AT_START_TIMESTAMP_INDEX
+ " ON "
+ tableName
+ " ("
+ COLUMN_CREATED_AT
+ ","
+ COLUMN_START_TIMESTAMP
+ ")");
}
OperationFuture<Void, UpdateDatabaseDdlMetadata> op =
databaseAdminClient.updateDatabaseDdl(instanceId, databaseId, ddl, null);
try {
// Initiate the request which returns an OperationFuture.
op.get(TIMEOUT_MINUTES, TimeUnit.MINUTES);
} catch (ExecutionException | TimeoutException e) {
// If the operation failed or timed out during execution, expose the cause.
if (e.getCause() != null) {
throw (SpannerException) e.getCause();
} else {
throw SpannerExceptionFactory.asSpannerException(e);
}
} catch (InterruptedException e) {
// Throw when a thread is waiting, sleeping, or otherwise occupied,
// and the thread is interrupted, either before or during the activity.
throw SpannerExceptionFactory.propagateInterrupt(e);
}
} | @Test
public void testCreatePartitionMetadataTablePostgres() throws Exception {
when(op.get(TIMEOUT_MINUTES, TimeUnit.MINUTES)).thenReturn(null);
partitionMetadataAdminDaoPostgres.createPartitionMetadataTable();
verify(databaseAdminClient, times(1))
.updateDatabaseDdl(eq(INSTANCE_ID), eq(DATABASE_ID), statements.capture(), isNull());
assertEquals(3, ((Collection<?>) statements.getValue()).size());
Iterator<String> it = statements.getValue().iterator();
assertTrue(it.next().contains("CREATE TABLE \""));
assertTrue(it.next().contains("CREATE INDEX \""));
assertTrue(it.next().contains("CREATE INDEX \""));
} |
public SmppMessage createSmppMessage(CamelContext camelContext, AlertNotification alertNotification) {
SmppMessage smppMessage = new SmppMessage(camelContext, alertNotification, configuration);
smppMessage.setHeader(SmppConstants.MESSAGE_TYPE, SmppMessageType.AlertNotification.toString());
smppMessage.setHeader(SmppConstants.SEQUENCE_NUMBER, alertNotification.getSequenceNumber());
smppMessage.setHeader(SmppConstants.COMMAND_ID, alertNotification.getCommandId());
smppMessage.setHeader(SmppConstants.COMMAND_STATUS, alertNotification.getCommandStatus());
smppMessage.setHeader(SmppConstants.SOURCE_ADDR, alertNotification.getSourceAddr());
smppMessage.setHeader(SmppConstants.SOURCE_ADDR_NPI, alertNotification.getSourceAddrNpi());
smppMessage.setHeader(SmppConstants.SOURCE_ADDR_TON, alertNotification.getSourceAddrTon());
smppMessage.setHeader(SmppConstants.ESME_ADDR, alertNotification.getEsmeAddr());
smppMessage.setHeader(SmppConstants.ESME_ADDR_NPI, alertNotification.getEsmeAddrNpi());
smppMessage.setHeader(SmppConstants.ESME_ADDR_TON, alertNotification.getEsmeAddrTon());
return smppMessage;
} | @Test
public void createSmppMessageFromDeliveryReceiptWithoutShortMessageShouldNotThrowException() {
DeliverSm deliverSm = new DeliverSm();
deliverSm.setSmscDeliveryReceipt();
deliverSm.setOptionalParameters(new OptionalParameter.Short((short) 0x2153, (short) 0));
try {
SmppMessage smppMessage = binding.createSmppMessage(camelContext, deliverSm);
Map<Short, Object> optionalParameter = smppMessage.getHeader(SmppConstants.OPTIONAL_PARAMETER, Map.class);
assertEquals(Short.valueOf((short) 0), optionalParameter.get(Short.valueOf((short) 0x2153)));
} catch (Exception e) {
fail("Should not throw exception while creating smppMessage in absence of shortMessage");
}
} |
@Override
@CacheEvict(cacheNames = RedisKeyConstants.MAIL_TEMPLATE,
allEntries = true) // allEntries 清空所有缓存,因为 id 不是直接的缓存 code,不好清理
public void deleteMailTemplate(Long id) {
// 校验是否存在
validateMailTemplateExists(id);
// 删除
mailTemplateMapper.deleteById(id);
} | @Test
public void testDeleteMailTemplate_success() {
// mock 数据
MailTemplateDO dbMailTemplate = randomPojo(MailTemplateDO.class);
mailTemplateMapper.insert(dbMailTemplate);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbMailTemplate.getId();
// 调用
mailTemplateService.deleteMailTemplate(id);
// 校验数据不存在了
assertNull(mailTemplateMapper.selectById(id));
} |
public static String convertToHtml(String input) {
return new Markdown().convert(StringEscapeUtils.escapeHtml4(input));
} | @Test
public void shouldEmphasisText() {
assertThat(Markdown.convertToHtml("This is *Sparta !!!*")).isEqualTo("This is <strong>Sparta !!!</strong>");
assertThat(Markdown.convertToHtml("This is *A*")).isEqualTo("This is <strong>A</strong>");
assertThat(Markdown.convertToHtml("This should not be * \n emphasized")).isEqualTo("This should not be * <br/> emphasized");
assertThat(Markdown.convertToHtml("This is *very* very *important*")).isEqualTo("This is <strong>very</strong> very <strong>important</strong>");
assertThat(Markdown.convertToHtml("Not * emphasized * because of whitespaces")).isEqualTo("Not * emphasized * because of whitespaces");
assertThat(Markdown.convertToHtml("Not *emphasized * because of whitespace")).isEqualTo("Not *emphasized * because of whitespace");
assertThat(Markdown.convertToHtml("Not * emphasized* because of whitespace")).isEqualTo("Not * emphasized* because of whitespace");
assertThat(Markdown.convertToHtml("emphasized*inside*word")).isEqualTo("emphasized<strong>inside</strong>word");
assertThat(Markdown.convertToHtml("*Emphasize many words*")).isEqualTo("<strong>Emphasize many words</strong>");
} |
public static Boolean not(Boolean value) {
return value == null ? null : !value;
} | @SuppressWarnings({"ConstantConditions", "SimplifiableJUnitAssertion"})
@Test
public void testNot() {
assertEquals(true, TernaryLogic.not(false));
assertEquals(false, TernaryLogic.not(true));
assertEquals(null, TernaryLogic.not(null));
} |
@Override
public SendResult send(
Message msg) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
msg.setTopic(withNamespace(msg.getTopic()));
if (this.getAutoBatch() && !(msg instanceof MessageBatch)) {
return sendByAccumulator(msg, null, null);
} else {
return sendDirect(msg, null, null);
}
} | @Test
public void testSendMessageAsync_Success() throws RemotingException, InterruptedException, MQBrokerException, MQClientException {
final CountDownLatch countDownLatch = new CountDownLatch(1);
when(mQClientAPIImpl.getTopicRouteInfoFromNameServer(anyString(), anyLong())).thenReturn(createTopicRoute());
producer.send(message, new SendCallback() {
@Override
public void onSuccess(SendResult sendResult) {
assertThat(sendResult.getSendStatus()).isEqualTo(SendStatus.SEND_OK);
assertThat(sendResult.getOffsetMsgId()).isEqualTo("123");
assertThat(sendResult.getQueueOffset()).isEqualTo(456L);
countDownLatch.countDown();
}
@Override
public void onException(Throwable e) {
countDownLatch.countDown();
}
});
countDownLatch.await(defaultTimeout, TimeUnit.MILLISECONDS);
} |
public IssueQuery create(SearchRequest request) {
try (DbSession dbSession = dbClient.openSession(false)) {
final ZoneId timeZone = parseTimeZone(request.getTimeZone()).orElse(clock.getZone());
Collection<RuleDto> ruleDtos = ruleKeysToRuleId(dbSession, request.getRules());
Collection<String> ruleUuids = ruleDtos.stream().map(RuleDto::getUuid).collect(Collectors.toSet());
Collection<String> issueKeys = collectIssueKeys(dbSession, request);
if (request.getRules() != null && request.getRules().stream().collect(Collectors.toSet()).size() != ruleDtos.size()) {
ruleUuids.add("non-existing-uuid");
}
IssueQuery.Builder builder = IssueQuery.builder()
.issueKeys(issueKeys)
.severities(request.getSeverities())
.cleanCodeAttributesCategories(request.getCleanCodeAttributesCategories())
.impactSoftwareQualities(request.getImpactSoftwareQualities())
.impactSeverities(request.getImpactSeverities())
.statuses(request.getStatuses())
.resolutions(request.getResolutions())
.issueStatuses(request.getIssueStatuses())
.resolved(request.getResolved())
.prioritizedRule(request.getPrioritizedRule())
.rules(ruleDtos)
.ruleUuids(ruleUuids)
.assigneeUuids(request.getAssigneeUuids())
.authors(request.getAuthors())
.scopes(request.getScopes())
.languages(request.getLanguages())
.tags(request.getTags())
.types(request.getTypes())
.pciDss32(request.getPciDss32())
.pciDss40(request.getPciDss40())
.owaspAsvs40(request.getOwaspAsvs40())
.owaspAsvsLevel(request.getOwaspAsvsLevel())
.owaspTop10(request.getOwaspTop10())
.owaspTop10For2021(request.getOwaspTop10For2021())
.stigAsdR5V3(request.getStigAsdV5R3())
.casa(request.getCasa())
.sansTop25(request.getSansTop25())
.cwe(request.getCwe())
.sonarsourceSecurity(request.getSonarsourceSecurity())
.assigned(request.getAssigned())
.createdAt(parseStartingDateOrDateTime(request.getCreatedAt(), timeZone))
.createdBefore(parseEndingDateOrDateTime(request.getCreatedBefore(), timeZone))
.facetMode(request.getFacetMode())
.timeZone(timeZone)
.codeVariants(request.getCodeVariants());
List<ComponentDto> allComponents = new ArrayList<>();
boolean effectiveOnComponentOnly = mergeDeprecatedComponentParameters(dbSession, request, allComponents);
addComponentParameters(builder, dbSession, effectiveOnComponentOnly, allComponents, request);
setCreatedAfterFromRequest(dbSession, builder, request, allComponents, timeZone);
String sort = request.getSort();
if (!isNullOrEmpty(sort)) {
builder.sort(sort);
builder.asc(request.getAsc());
}
return builder.build();
}
} | @Test
public void creation_date_support_zoneddatetime() {
SearchRequest request = new SearchRequest()
.setCreatedAt("2013-04-16T09:08:24+0200");
IssueQuery query = underTest.create(request);
assertThat(query.createdAt()).isEqualTo(parseDateTime("2013-04-16T09:08:24+0200"));
} |
public static RestartBackoffTimeStrategy.Factory createRestartBackoffTimeStrategyFactory(
final RestartStrategies.RestartStrategyConfiguration jobRestartStrategyConfiguration,
final Configuration jobConfiguration,
final Configuration clusterConfiguration,
final boolean isCheckpointingEnabled) {
checkNotNull(jobRestartStrategyConfiguration);
checkNotNull(jobConfiguration);
checkNotNull(clusterConfiguration);
return getJobRestartStrategyFactory(jobRestartStrategyConfiguration)
.orElse(
getRestartStrategyFactoryFromConfig(jobConfiguration)
.orElse(
(getRestartStrategyFactoryFromConfig(clusterConfiguration)
.orElse(
getDefaultRestartStrategyFactory(
isCheckpointingEnabled)))));
} | @Test
void testNoRestartStrategySpecifiedInJobConfig() {
final Configuration jobConf = new Configuration();
jobConf.set(RestartStrategyOptions.RESTART_STRATEGY, NO_RESTART_STRATEGY.getMainValue());
final Configuration clusterConf = new Configuration();
clusterConf.set(RestartStrategyOptions.RESTART_STRATEGY, FIXED_DELAY.getMainValue());
final RestartBackoffTimeStrategy.Factory factory =
RestartBackoffTimeStrategyFactoryLoader.createRestartBackoffTimeStrategyFactory(
DEFAULT_JOB_LEVEL_RESTART_CONFIGURATION, jobConf, clusterConf, false);
assertThat(NoRestartBackoffTimeStrategy.NoRestartBackoffTimeStrategyFactory.INSTANCE)
.isEqualTo(factory);
} |
@Override
public <T> T clone(T object) {
if (object instanceof String) {
return object;
} else if (object instanceof Collection) {
Object firstElement = findFirstNonNullElement((Collection) object);
if (firstElement != null && !(firstElement instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), firstElement.getClass());
return objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
} else if (object instanceof Map) {
Map.Entry firstEntry = this.findFirstNonNullEntry((Map) object);
if (firstEntry != null) {
Object key = firstEntry.getKey();
Object value = firstEntry.getValue();
if (!(key instanceof Serializable) || !(value instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), key.getClass(), value.getClass());
return (T) objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
}
} else if (object instanceof JsonNode) {
return (T) ((JsonNode) object).deepCopy();
}
if (object instanceof Serializable) {
try {
return (T) SerializationHelper.clone((Serializable) object);
} catch (SerializationException e) {
//it is possible that object itself implements java.io.Serializable, but underlying structure does not
//in this case we switch to the other JSON marshaling strategy which doesn't use the Java serialization
}
}
return jsonClone(object);
} | @Test
public void should_clone_serializable_complex_object_with_non_serializable_nested_object() {
Map<String, List<NonSerializableObject>> map = new LinkedHashMap<>();
map.put("key1", Lists.newArrayList(new NonSerializableObject("name1")));
map.put("key2", Lists.newArrayList(
new NonSerializableObject("name2"),
new NonSerializableObject("name3")
));
Object original = new SerializableComplexObjectWithNonSerializableNestedObject(map);
Object cloned = serializer.clone(original);
assertEquals(original, cloned);
assertNotSame(original, cloned);
} |
static String headerLine(CSVFormat csvFormat) {
return String.join(String.valueOf(csvFormat.getDelimiter()), csvFormat.getHeader());
} | @Test
public void givenCommentMarker_skipsLine() {
CSVFormat csvFormat = csvFormat().withCommentMarker('#');
PCollection<String> input =
pipeline.apply(
Create.of(headerLine(csvFormat), "#should skip me", "a,1,1.1", "b,2,2.2", "c,3,3.3"));
CsvIOStringToCsvRecord underTest = new CsvIOStringToCsvRecord(csvFormat);
CsvIOParseResult<List<String>> result = input.apply(underTest);
PAssert.that(result.getOutput())
.containsInAnyOrder(
Arrays.asList(
Arrays.asList("a", "1", "1.1"),
Arrays.asList("b", "2", "2.2"),
Arrays.asList("c", "3", "3.3")));
PAssert.that(result.getErrors()).empty();
pipeline.run();
} |
public ScopedSpan startScopedSpanWithParent(String name, @Nullable TraceContext parent) {
if (name == null) throw new NullPointerException("name == null");
TraceContext context =
parent != null ? decorateContext(parent, parent.spanId()) : newRootContext(0);
return newScopedSpan(parent, context, name);
} | @Test void startScopedSpanWithParent_resultantSpanIsLocalRoot() {
ScopedSpan span = tracer.startScopedSpanWithParent("foo", context);
try {
assertThat(span.context().spanId()).isEqualTo(span.context().localRootId()); // Sanity check
assertThat(span.context().isLocalRoot()).isTrue();
ScopedSpan child = tracer.startScopedSpanWithParent("bar", span.context());
try {
// Check we don't always make children local roots
assertThat(child.context().localRootId()).isEqualTo(span.context().localRootId());
assertThat(child.context().isLocalRoot()).isFalse();
} finally {
child.finish();
}
} finally {
span.finish();
}
} |
public static Collection<MetaDataLoaderMaterial> getMetaDataLoaderMaterials(final Collection<String> tableNames,
final GenericSchemaBuilderMaterial material, final boolean checkMetaDataEnable) {
Map<String, Collection<String>> dataSourceTableGroups = new LinkedHashMap<>();
Collection<DatabaseType> unsupportedThreeTierStorageStructureDatabaseTypes = getUnsupportedThreeTierStorageStructureDatabaseTypes(material.getStorageTypes().values());
DataNodes dataNodes = new DataNodes(material.getRules());
for (String each : tableNames) {
checkDataSourceTypeIncludeInstanceAndSetDatabaseTableMap(unsupportedThreeTierStorageStructureDatabaseTypes, dataNodes, each);
if (checkMetaDataEnable) {
addAllActualTableDataNode(material, dataSourceTableGroups, dataNodes, each);
} else {
addOneActualTableDataNode(material, dataSourceTableGroups, dataNodes, each);
}
}
Collection<MetaDataLoaderMaterial> result = new LinkedList<>();
for (Entry<String, Collection<String>> entry : dataSourceTableGroups.entrySet()) {
DatabaseType storageType = material.getStorageTypes().get(entry.getKey());
String defaultSchemaName = getDefaultSchemaNameByStorageType(storageType, material.getDefaultSchemaName());
result.add(new MetaDataLoaderMaterial(entry.getValue(), getDataSource(material, entry.getKey()), storageType, defaultSchemaName));
}
return result;
} | @Test
void assertGetSchemaMetaDataLoaderMaterialsWhenNotConfigCheckMetaDataEnableForSingleTableDataNode() {
ShardingSphereRule rule = mock(ShardingSphereRule.class);
DataNodeRuleAttribute ruleAttribute = mock(DataNodeRuleAttribute.class);
when(ruleAttribute.getDataNodesByTableName("t_single")).thenReturn(mockSingleTableDataNodes());
when(rule.getAttributes()).thenReturn(new RuleAttributes(ruleAttribute));
GenericSchemaBuilderMaterial material = new GenericSchemaBuilderMaterial(mock(DatabaseType.class), mockStorageTypes(), mockDataSourceMap(),
Arrays.asList(rule, mock(ShardingSphereRule.class)), mock(ConfigurationProperties.class), "public");
Collection<MetaDataLoaderMaterial> actual = SchemaMetaDataUtils.getMetaDataLoaderMaterials(Collections.singleton("t_single"), material, false);
assertThat(actual.size(), is(1));
Iterator<MetaDataLoaderMaterial> iterator = actual.iterator();
MetaDataLoaderMaterial firstMaterial = iterator.next();
assertThat(firstMaterial.getDefaultSchemaName(), is("public"));
assertThat(firstMaterial.getActualTableNames(), is(Collections.singletonList("t_single")));
} |
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
getRawMapping().setConf(conf);
} | @Test
public void testFileDoesNotExist() {
TableMapping mapping = new TableMapping();
Configuration conf = new Configuration();
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, "/this/file/does/not/exist");
mapping.setConf(conf);
List<String> names = new ArrayList<String>();
names.add(hostName1);
names.add(hostName2);
List<String> result = mapping.resolve(names);
assertEquals(names.size(), result.size());
assertEquals(result.get(0), NetworkTopology.DEFAULT_RACK);
assertEquals(result.get(1), NetworkTopology.DEFAULT_RACK);
} |
public PipelineTemplateConfig templateByName(CaseInsensitiveString foo) {
for (PipelineTemplateConfig templateConfig : this) {
if (templateConfig.name().equals(foo)) {
return templateConfig;
}
}
return null;
} | @Test
public void shouldReturnTemplateByName() {
PipelineTemplateConfig template1 = template("template1");
TemplatesConfig templates = new TemplatesConfig(template1, template("template2"));
assertThat(templates.templateByName(new CaseInsensitiveString("template1")), is(template1));
} |
public static void resumeConsumers(final Queue<Consumer<byte[]>> consumers) {
consumers.forEach(Consumer::resume);
} | @Test
public void resumeConsumers() {
Consumer<byte[]> consumer = mock(Consumer.class);
Queue<Consumer<byte[]>> consumers = new ConcurrentLinkedQueue<>();
consumers.add(consumer);
PulsarUtils.resumeConsumers(consumers);
verify(consumer).resume();
} |
public static Timestamp parseTimestamp(final String str) {
return PARSER.parseToTimestamp(str);
} | @Test
public void shouldParseTimestamp() {
assertThat(SqlTimeTypes.parseTimestamp("2019-03-17T10:00:00"), is(new Timestamp(1552816800000L)));
assertThat(SqlTimeTypes.parseTimestamp("2019-03-17T03:00-0700"), is(new Timestamp(1552816800000L)));
} |
public CreateStreamCommand createStreamCommand(final KsqlStructuredDataOutputNode outputNode) {
return new CreateStreamCommand(
outputNode.getSinkName().get(),
outputNode.getSchema(),
outputNode.getTimestampColumn(),
outputNode.getKsqlTopic().getKafkaTopicName(),
Formats.from(outputNode.getKsqlTopic()),
outputNode.getKsqlTopic().getKeyFormat().getWindowInfo(),
Optional.of(outputNode.getOrReplace()),
Optional.of(false)
);
} | @Test
public void shouldBuildTimestampColumnWithFormat() {
// Given:
givenProperties(ImmutableMap.of(
CommonCreateConfigs.TIMESTAMP_NAME_PROPERTY,
new StringLiteral(quote(ELEMENT1.getName().text())),
CommonCreateConfigs.TIMESTAMP_FORMAT_PROPERTY,
new StringLiteral("%s")
));
final CreateStream statement =
new CreateStream(SOME_NAME, STREAM_ELEMENTS, false, true, withProperties, false);
// When:
final CreateStreamCommand cmd = createSourceFactory.createStreamCommand(
statement,
ksqlConfig
);
// Then:
assertThat(
cmd.getTimestampColumn(),
is(Optional.of(
new TimestampColumn(ELEMENT1.getName(), Optional.of("%s")))
)
);
} |
public ControllerResult<ElectMasterResponseHeader> electMaster(final ElectMasterRequestHeader request,
final ElectPolicy electPolicy) {
final String brokerName = request.getBrokerName();
final Long brokerId = request.getBrokerId();
final ControllerResult<ElectMasterResponseHeader> result = new ControllerResult<>(new ElectMasterResponseHeader());
final ElectMasterResponseHeader response = result.getResponse();
if (!isContainsBroker(brokerName)) {
// this broker set hasn't been registered
result.setCodeAndRemark(ResponseCode.CONTROLLER_BROKER_NEED_TO_BE_REGISTERED, "Broker hasn't been registered");
return result;
}
final SyncStateInfo syncStateInfo = this.syncStateSetInfoTable.get(brokerName);
final BrokerReplicaInfo brokerReplicaInfo = this.replicaInfoTable.get(brokerName);
final Set<Long> syncStateSet = syncStateInfo.getSyncStateSet();
final Long oldMaster = syncStateInfo.getMasterBrokerId();
Set<Long> allReplicaBrokers = controllerConfig.isEnableElectUncleanMaster() ? brokerReplicaInfo.getAllBroker() : null;
Long newMaster = null;
if (syncStateInfo.isFirstTimeForElect()) {
// If never have a master in this broker set, in other words, it is the first time to elect a master
// elect it as the first master
newMaster = brokerId;
}
// elect by policy
if (newMaster == null || newMaster == -1) {
// we should assign this assignedBrokerId when the brokerAddress need to be elected by force
Long assignedBrokerId = request.getDesignateElect() ? brokerId : null;
newMaster = electPolicy.elect(brokerReplicaInfo.getClusterName(), brokerReplicaInfo.getBrokerName(), syncStateSet, allReplicaBrokers, oldMaster, assignedBrokerId);
}
if (newMaster != null && newMaster.equals(oldMaster)) {
// old master still valid, change nothing
String err = String.format("The old master %s is still alive, not need to elect new master for broker %s", oldMaster, brokerReplicaInfo.getBrokerName());
LOGGER.warn("{}", err);
// the master still exist
response.setMasterEpoch(syncStateInfo.getMasterEpoch());
response.setSyncStateSetEpoch(syncStateInfo.getSyncStateSetEpoch());
response.setMasterBrokerId(oldMaster);
response.setMasterAddress(brokerReplicaInfo.getBrokerAddress(oldMaster));
result.setBody(new ElectMasterResponseBody(syncStateSet).encode());
result.setCodeAndRemark(ResponseCode.CONTROLLER_MASTER_STILL_EXIST, err);
return result;
}
// a new master is elected
if (newMaster != null) {
final int masterEpoch = syncStateInfo.getMasterEpoch();
final int syncStateSetEpoch = syncStateInfo.getSyncStateSetEpoch();
final HashSet<Long> newSyncStateSet = new HashSet<>();
newSyncStateSet.add(newMaster);
response.setMasterBrokerId(newMaster);
response.setMasterAddress(brokerReplicaInfo.getBrokerAddress(newMaster));
response.setMasterEpoch(masterEpoch + 1);
response.setSyncStateSetEpoch(syncStateSetEpoch + 1);
ElectMasterResponseBody responseBody = new ElectMasterResponseBody(newSyncStateSet);
BrokerMemberGroup brokerMemberGroup = buildBrokerMemberGroup(brokerReplicaInfo);
if (null != brokerMemberGroup) {
responseBody.setBrokerMemberGroup(brokerMemberGroup);
}
result.setBody(responseBody.encode());
final ElectMasterEvent event = new ElectMasterEvent(brokerName, newMaster);
result.addEvent(event);
LOGGER.info("Elect new master {} for broker {}", newMaster, brokerName);
return result;
}
// If elect failed and the electMaster is triggered by controller (we can figure it out by brokerAddress),
// we still need to apply an ElectMasterEvent to tell the statemachine
// that the master was shutdown and no new master was elected.
if (request.getBrokerId() == null || request.getBrokerId() == -1) {
final ElectMasterEvent event = new ElectMasterEvent(false, brokerName);
result.addEvent(event);
result.setCodeAndRemark(ResponseCode.CONTROLLER_MASTER_NOT_AVAILABLE, "Old master has down and failed to elect a new broker master");
} else {
result.setCodeAndRemark(ResponseCode.CONTROLLER_ELECT_MASTER_FAILED, "Failed to elect a new master");
}
LOGGER.warn("Failed to elect a new master for broker {}", brokerName);
return result;
} | @Test
public void testElectMasterPreferHigherOffsetWhenEpochEquals() {
mockMetaData();
final ElectMasterRequestHeader request = ElectMasterRequestHeader.ofControllerTrigger(DEFAULT_BROKER_NAME);
ElectPolicy electPolicy = new DefaultElectPolicy(this.heartbeatManager::isBrokerActive, this.heartbeatManager::getBrokerLiveInfo);
mockHeartbeatDataHigherOffset();
final ControllerResult<ElectMasterResponseHeader> cResult = this.replicasInfoManager.electMaster(request,
electPolicy);
final ElectMasterResponseHeader response = cResult.getResponse();
assertEquals(DEFAULT_IP[2], response.getMasterAddress());
assertEquals(3L, response.getMasterBrokerId().longValue());
assertEquals(2, response.getMasterEpoch().intValue());
} |
public EvaluationResult evaluate(Condition condition, Measure measure) {
checkArgument(SUPPORTED_METRIC_TYPE.contains(condition.getMetric().getType()), "Conditions on MetricType %s are not supported", condition.getMetric().getType());
Comparable measureComparable = parseMeasure(measure);
if (measureComparable == null) {
return new EvaluationResult(Measure.Level.OK, null);
}
return evaluateCondition(condition, measureComparable)
.orElseGet(() -> new EvaluationResult(Measure.Level.OK, measureComparable));
} | @Test
@UseDataProvider("unsupportedMetricTypes")
public void fail_when_metric_is_not_supported(MetricType metricType) {
Metric metric = createMetric(metricType);
Measure measure = newMeasureBuilder().create("3.14159265358");
assertThatThrownBy(() -> underTest.evaluate(createCondition(metric, LESS_THAN, "1.60217657"), measure))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage(String.format("Conditions on MetricType %s are not supported", metricType));
} |
public Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch,
Timer timer) {
metadata.addTransientTopics(topicsForPartitions(timestampsToSearch.keySet()));
try {
Map<TopicPartition, ListOffsetData> fetchedOffsets = fetchOffsetsByTimes(timestampsToSearch,
timer, true).fetchedOffsets;
return buildOffsetsForTimesResult(timestampsToSearch, fetchedOffsets);
} finally {
metadata.clearTransientTopics();
}
} | @Test
public void testGetOffsetsForTimesTimeout() {
buildFetcher();
assertThrows(TimeoutException.class, () -> offsetFetcher.offsetsForTimes(
Collections.singletonMap(new TopicPartition(topicName, 2), 1000L), time.timer(100L)));
} |
public static <T extends Classifiable<T>> double accuracy(MetricTarget<T> target, ConfusionMatrix<T> cm) {
if (target.getOutputTarget().isPresent()) {
return accuracy(target.getOutputTarget().get(), cm);
} else {
return accuracy(target.getAverageTarget().get(), cm);
}
} | @Test
public void testAccuracy() {
List<Prediction<Label>> predictions = Arrays.asList(
mkPrediction("a", "a"),
mkPrediction("c", "b"),
mkPrediction("b", "b"),
mkPrediction("b", "c")
);
ImmutableOutputInfo<Label> domain = mkDomain(predictions);
LabelConfusionMatrix cm = new LabelConfusionMatrix(domain, predictions);
assertEquals(1d, ConfusionMetrics.accuracy(label("a"), cm));
assertEquals(0.5, ConfusionMetrics.accuracy(label("b"), cm));
assertEquals(0d, cm.tp(label("c")));
assertEquals(0d, ConfusionMetrics.accuracy(label("c"), cm));
assertEquals(0.5, ConfusionMetrics.accuracy(Average.MICRO, cm));
assertEquals(0.5, ConfusionMetrics.accuracy(Average.MACRO, cm));
} |
@Override
public MergedResult merge(final List<QueryResult> queryResults, final SQLStatementContext sqlStatementContext,
final ShardingSphereDatabase database, final ConnectionContext connectionContext) throws SQLException {
if (1 == queryResults.size() && !isNeedAggregateRewrite(sqlStatementContext)) {
return new IteratorStreamMergedResult(queryResults);
}
Map<String, Integer> columnLabelIndexMap = getColumnLabelIndexMap(queryResults.get(0));
SelectStatementContext selectStatementContext = (SelectStatementContext) sqlStatementContext;
selectStatementContext.setIndexes(columnLabelIndexMap);
MergedResult mergedResult = build(queryResults, selectStatementContext, columnLabelIndexMap, database);
return decorate(queryResults, selectStatementContext, mergedResult);
} | @Test
void assertBuildGroupByMemoryMergedResultWithAggregationOnlyWithSQLServerLimit() throws SQLException {
final ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "SQLServer"));
ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS);
when(database.getSchema(DefaultDatabase.LOGIC_NAME)).thenReturn(mock(ShardingSphereSchema.class));
SQLServerSelectStatement selectStatement = (SQLServerSelectStatement) buildSelectStatement(new SQLServerSelectStatement());
ProjectionsSegment projectionsSegment = new ProjectionsSegment(0, 0);
projectionsSegment.getProjections().add(new AggregationProjectionSegment(0, 0, AggregationType.COUNT, "COUNT(*)"));
selectStatement.setProjections(projectionsSegment);
selectStatement.setLimit(new LimitSegment(0, 0, new NumberLiteralRowNumberValueSegment(0, 0, 1L, true), null));
SelectStatementContext selectStatementContext = new SelectStatementContext(createShardingSphereMetaData(database), Collections.emptyList(),
selectStatement, DefaultDatabase.LOGIC_NAME, Collections.emptyList());
MergedResult actual = resultMerger.merge(createQueryResults(), selectStatementContext, createSQLServerDatabase(), mock(ConnectionContext.class));
assertThat(actual, instanceOf(TopAndRowNumberDecoratorMergedResult.class));
assertThat(((TopAndRowNumberDecoratorMergedResult) actual).getMergedResult(), instanceOf(GroupByMemoryMergedResult.class));
} |
@SuppressWarnings("all")
public static Boolean toBooleanObject(String str) {
String formatStr = (str == null ? StringUtils.EMPTY : str).toLowerCase();
if (TRUE_SET.contains(formatStr)) {
return true;
} else if (FALSE_SET.contains(formatStr)) {
return false;
} else {
return null;
}
} | @Test
void testToBooleanObject() {
assertTrue(ConvertUtils.toBooleanObject("T"));
assertTrue(ConvertUtils.toBooleanObject("t"));
assertTrue(ConvertUtils.toBooleanObject("Y"));
assertTrue(ConvertUtils.toBooleanObject("y"));
assertFalse(ConvertUtils.toBooleanObject("f"));
assertFalse(ConvertUtils.toBooleanObject("F"));
assertFalse(ConvertUtils.toBooleanObject("n"));
assertFalse(ConvertUtils.toBooleanObject("N"));
assertNull(ConvertUtils.toBooleanObject("a"));
assertTrue(ConvertUtils.toBooleanObject("on"));
assertTrue(ConvertUtils.toBooleanObject("oN"));
assertTrue(ConvertUtils.toBooleanObject("On"));
assertTrue(ConvertUtils.toBooleanObject("ON"));
assertFalse(ConvertUtils.toBooleanObject("No"));
assertFalse(ConvertUtils.toBooleanObject("NO"));
assertNull(ConvertUtils.toBooleanObject("an"));
assertNull(ConvertUtils.toBooleanObject("aN"));
assertNull(ConvertUtils.toBooleanObject("oa"));
assertNull(ConvertUtils.toBooleanObject("Oa"));
assertNull(ConvertUtils.toBooleanObject("Na"));
assertNull(ConvertUtils.toBooleanObject("na"));
assertNull(ConvertUtils.toBooleanObject("aO"));
assertNull(ConvertUtils.toBooleanObject("ao"));
assertFalse(ConvertUtils.toBooleanObject("off"));
assertFalse(ConvertUtils.toBooleanObject("ofF"));
assertFalse(ConvertUtils.toBooleanObject("oFf"));
assertFalse(ConvertUtils.toBooleanObject("oFF"));
assertFalse(ConvertUtils.toBooleanObject("Off"));
assertFalse(ConvertUtils.toBooleanObject("OfF"));
assertFalse(ConvertUtils.toBooleanObject("OFf"));
assertFalse(ConvertUtils.toBooleanObject("OFF"));
assertTrue(ConvertUtils.toBooleanObject("yes"));
assertTrue(ConvertUtils.toBooleanObject("yeS"));
assertTrue(ConvertUtils.toBooleanObject("yEs"));
assertTrue(ConvertUtils.toBooleanObject("yES"));
assertTrue(ConvertUtils.toBooleanObject("Yes"));
assertTrue(ConvertUtils.toBooleanObject("YeS"));
assertTrue(ConvertUtils.toBooleanObject("YEs"));
assertTrue(ConvertUtils.toBooleanObject("YES"));
assertNull(ConvertUtils.toBooleanObject("ono"));
assertNull(ConvertUtils.toBooleanObject("aes"));
assertNull(ConvertUtils.toBooleanObject("aeS"));
assertNull(ConvertUtils.toBooleanObject("aEs"));
assertNull(ConvertUtils.toBooleanObject("aES"));
assertNull(ConvertUtils.toBooleanObject("yas"));
assertNull(ConvertUtils.toBooleanObject("yaS"));
assertNull(ConvertUtils.toBooleanObject("Yas"));
assertNull(ConvertUtils.toBooleanObject("YaS"));
assertNull(ConvertUtils.toBooleanObject("yea"));
assertNull(ConvertUtils.toBooleanObject("yEa"));
assertNull(ConvertUtils.toBooleanObject("Yea"));
assertNull(ConvertUtils.toBooleanObject("YEa"));
assertNull(ConvertUtils.toBooleanObject("aff"));
assertNull(ConvertUtils.toBooleanObject("afF"));
assertNull(ConvertUtils.toBooleanObject("aFf"));
assertNull(ConvertUtils.toBooleanObject("aFF"));
assertNull(ConvertUtils.toBooleanObject("oaf"));
assertNull(ConvertUtils.toBooleanObject("oaF"));
assertNull(ConvertUtils.toBooleanObject("Oaf"));
assertNull(ConvertUtils.toBooleanObject("OaF"));
assertNull(ConvertUtils.toBooleanObject("Ofa"));
assertNull(ConvertUtils.toBooleanObject("ofa"));
assertNull(ConvertUtils.toBooleanObject("OFa"));
assertNull(ConvertUtils.toBooleanObject("oFa"));
assertTrue(ConvertUtils.toBooleanObject("true"));
assertTrue(ConvertUtils.toBooleanObject("truE"));
assertTrue(ConvertUtils.toBooleanObject("trUe"));
assertTrue(ConvertUtils.toBooleanObject("trUE"));
assertTrue(ConvertUtils.toBooleanObject("tRue"));
assertTrue(ConvertUtils.toBooleanObject("tRuE"));
assertTrue(ConvertUtils.toBooleanObject("tRUe"));
assertTrue(ConvertUtils.toBooleanObject("tRUE"));
assertTrue(ConvertUtils.toBooleanObject("True"));
assertTrue(ConvertUtils.toBooleanObject("TruE"));
assertTrue(ConvertUtils.toBooleanObject("TrUe"));
assertTrue(ConvertUtils.toBooleanObject("TrUE"));
assertTrue(ConvertUtils.toBooleanObject("TRue"));
assertTrue(ConvertUtils.toBooleanObject("TRuE"));
assertTrue(ConvertUtils.toBooleanObject("TRUe"));
assertTrue(ConvertUtils.toBooleanObject("TRUE"));
assertNull(ConvertUtils.toBooleanObject("Xrue"));
assertNull(ConvertUtils.toBooleanObject("XruE"));
assertNull(ConvertUtils.toBooleanObject("XrUe"));
assertNull(ConvertUtils.toBooleanObject("XrUE"));
assertNull(ConvertUtils.toBooleanObject("XRue"));
assertNull(ConvertUtils.toBooleanObject("XRuE"));
assertNull(ConvertUtils.toBooleanObject("XRUe"));
assertNull(ConvertUtils.toBooleanObject("XRUE"));
assertNull(ConvertUtils.toBooleanObject("tXue"));
assertNull(ConvertUtils.toBooleanObject("tXuE"));
assertNull(ConvertUtils.toBooleanObject("tXUe"));
assertNull(ConvertUtils.toBooleanObject("tXUE"));
assertNull(ConvertUtils.toBooleanObject("TXue"));
assertNull(ConvertUtils.toBooleanObject("TXuE"));
assertNull(ConvertUtils.toBooleanObject("TXUe"));
assertNull(ConvertUtils.toBooleanObject("TXUE"));
assertNull(ConvertUtils.toBooleanObject("trXe"));
assertNull(ConvertUtils.toBooleanObject("trXE"));
assertNull(ConvertUtils.toBooleanObject("tRXe"));
assertNull(ConvertUtils.toBooleanObject("tRXE"));
assertNull(ConvertUtils.toBooleanObject("TrXe"));
assertNull(ConvertUtils.toBooleanObject("TrXE"));
assertNull(ConvertUtils.toBooleanObject("TRXe"));
assertNull(ConvertUtils.toBooleanObject("TRXE"));
assertNull(ConvertUtils.toBooleanObject("truX"));
assertNull(ConvertUtils.toBooleanObject("trUX"));
assertNull(ConvertUtils.toBooleanObject("tRuX"));
assertNull(ConvertUtils.toBooleanObject("tRUX"));
assertNull(ConvertUtils.toBooleanObject("TruX"));
assertNull(ConvertUtils.toBooleanObject("TrUX"));
assertNull(ConvertUtils.toBooleanObject("TRuX"));
assertNull(ConvertUtils.toBooleanObject("TRUX"));
assertFalse(ConvertUtils.toBooleanObject("false"));
assertFalse(ConvertUtils.toBooleanObject("falsE"));
assertFalse(ConvertUtils.toBooleanObject("falSe"));
assertFalse(ConvertUtils.toBooleanObject("falSE"));
assertFalse(ConvertUtils.toBooleanObject("faLse"));
assertFalse(ConvertUtils.toBooleanObject("faLsE"));
assertFalse(ConvertUtils.toBooleanObject("faLSe"));
assertFalse(ConvertUtils.toBooleanObject("faLSE"));
assertFalse(ConvertUtils.toBooleanObject("fAlse"));
assertFalse(ConvertUtils.toBooleanObject("fAlsE"));
assertFalse(ConvertUtils.toBooleanObject("fAlSe"));
assertFalse(ConvertUtils.toBooleanObject("fAlSE"));
assertFalse(ConvertUtils.toBooleanObject("fALse"));
assertFalse(ConvertUtils.toBooleanObject("fALsE"));
assertFalse(ConvertUtils.toBooleanObject("fALSe"));
assertFalse(ConvertUtils.toBooleanObject("fALSE"));
assertFalse(ConvertUtils.toBooleanObject("False"));
assertFalse(ConvertUtils.toBooleanObject("FalsE"));
assertFalse(ConvertUtils.toBooleanObject("FalSe"));
assertFalse(ConvertUtils.toBooleanObject("FalSE"));
assertFalse(ConvertUtils.toBooleanObject("FaLse"));
assertFalse(ConvertUtils.toBooleanObject("FaLsE"));
assertFalse(ConvertUtils.toBooleanObject("FaLSe"));
assertFalse(ConvertUtils.toBooleanObject("FaLSE"));
assertFalse(ConvertUtils.toBooleanObject("FAlse"));
assertFalse(ConvertUtils.toBooleanObject("FAlsE"));
assertFalse(ConvertUtils.toBooleanObject("FAlSe"));
assertFalse(ConvertUtils.toBooleanObject("FAlSE"));
assertFalse(ConvertUtils.toBooleanObject("FALse"));
assertFalse(ConvertUtils.toBooleanObject("FALsE"));
assertFalse(ConvertUtils.toBooleanObject("FALSe"));
assertFalse(ConvertUtils.toBooleanObject("FALSE"));
assertNull(ConvertUtils.toBooleanObject("Xalse"));
assertNull(ConvertUtils.toBooleanObject("XalsE"));
assertNull(ConvertUtils.toBooleanObject("XalSe"));
assertNull(ConvertUtils.toBooleanObject("XalSE"));
assertNull(ConvertUtils.toBooleanObject("XaLse"));
assertNull(ConvertUtils.toBooleanObject("XaLsE"));
assertNull(ConvertUtils.toBooleanObject("XaLSe"));
assertNull(ConvertUtils.toBooleanObject("XaLSE"));
assertNull(ConvertUtils.toBooleanObject("XAlse"));
assertNull(ConvertUtils.toBooleanObject("XAlsE"));
assertNull(ConvertUtils.toBooleanObject("XAlSe"));
assertNull(ConvertUtils.toBooleanObject("XAlSE"));
assertNull(ConvertUtils.toBooleanObject("XALse"));
assertNull(ConvertUtils.toBooleanObject("XALsE"));
assertNull(ConvertUtils.toBooleanObject("XALSe"));
assertNull(ConvertUtils.toBooleanObject("XALSE"));
assertNull(ConvertUtils.toBooleanObject("fXlse"));
assertNull(ConvertUtils.toBooleanObject("fXlsE"));
assertNull(ConvertUtils.toBooleanObject("fXlSe"));
assertNull(ConvertUtils.toBooleanObject("fXlSE"));
assertNull(ConvertUtils.toBooleanObject("fXLse"));
assertNull(ConvertUtils.toBooleanObject("fXLsE"));
assertNull(ConvertUtils.toBooleanObject("fXLSe"));
assertNull(ConvertUtils.toBooleanObject("fXLSE"));
assertNull(ConvertUtils.toBooleanObject("FXlse"));
assertNull(ConvertUtils.toBooleanObject("FXlsE"));
assertNull(ConvertUtils.toBooleanObject("FXlSe"));
assertNull(ConvertUtils.toBooleanObject("FXlSE"));
assertNull(ConvertUtils.toBooleanObject("FXLse"));
assertNull(ConvertUtils.toBooleanObject("FXLsE"));
assertNull(ConvertUtils.toBooleanObject("FXLSe"));
assertNull(ConvertUtils.toBooleanObject("FXLSE"));
assertNull(ConvertUtils.toBooleanObject("faXse"));
assertNull(ConvertUtils.toBooleanObject("faXsE"));
assertNull(ConvertUtils.toBooleanObject("faXSe"));
assertNull(ConvertUtils.toBooleanObject("faXSE"));
assertNull(ConvertUtils.toBooleanObject("fAXse"));
assertNull(ConvertUtils.toBooleanObject("fAXsE"));
assertNull(ConvertUtils.toBooleanObject("fAXSe"));
assertNull(ConvertUtils.toBooleanObject("fAXSE"));
assertNull(ConvertUtils.toBooleanObject("FaXse"));
assertNull(ConvertUtils.toBooleanObject("FaXsE"));
assertNull(ConvertUtils.toBooleanObject("FaXSe"));
assertNull(ConvertUtils.toBooleanObject("FaXSE"));
assertNull(ConvertUtils.toBooleanObject("FAXse"));
assertNull(ConvertUtils.toBooleanObject("FAXsE"));
assertNull(ConvertUtils.toBooleanObject("FAXSe"));
assertNull(ConvertUtils.toBooleanObject("FAXSE"));
assertNull(ConvertUtils.toBooleanObject("falXe"));
assertNull(ConvertUtils.toBooleanObject("falXE"));
assertNull(ConvertUtils.toBooleanObject("faLXe"));
assertNull(ConvertUtils.toBooleanObject("faLXE"));
assertNull(ConvertUtils.toBooleanObject("fAlXe"));
assertNull(ConvertUtils.toBooleanObject("fAlXE"));
assertNull(ConvertUtils.toBooleanObject("fALXe"));
assertNull(ConvertUtils.toBooleanObject("fALXE"));
assertNull(ConvertUtils.toBooleanObject("FalXe"));
assertNull(ConvertUtils.toBooleanObject("FalXE"));
assertNull(ConvertUtils.toBooleanObject("FaLXe"));
assertNull(ConvertUtils.toBooleanObject("FaLXE"));
assertNull(ConvertUtils.toBooleanObject("FAlXe"));
assertNull(ConvertUtils.toBooleanObject("FAlXE"));
assertNull(ConvertUtils.toBooleanObject("FALXe"));
assertNull(ConvertUtils.toBooleanObject("FALXE"));
assertNull(ConvertUtils.toBooleanObject("falsX"));
assertNull(ConvertUtils.toBooleanObject("falSX"));
assertNull(ConvertUtils.toBooleanObject("faLsX"));
assertNull(ConvertUtils.toBooleanObject("faLSX"));
assertNull(ConvertUtils.toBooleanObject("fAlsX"));
assertNull(ConvertUtils.toBooleanObject("fAlSX"));
assertNull(ConvertUtils.toBooleanObject("fALsX"));
assertNull(ConvertUtils.toBooleanObject("fALSX"));
assertNull(ConvertUtils.toBooleanObject("FalsX"));
assertNull(ConvertUtils.toBooleanObject("FalSX"));
assertNull(ConvertUtils.toBooleanObject("FaLsX"));
assertNull(ConvertUtils.toBooleanObject("FaLSX"));
assertNull(ConvertUtils.toBooleanObject("FAlsX"));
assertNull(ConvertUtils.toBooleanObject("FAlSX"));
assertNull(ConvertUtils.toBooleanObject("FALsX"));
assertNull(ConvertUtils.toBooleanObject("FALSX"));
assertNull(ConvertUtils.toBooleanObject(null));
} |
public static List<InetSocketAddress> getJobMasterRpcAddresses(AlluxioConfiguration conf) {
// First check whether job rpc addresses are explicitly configured.
if (conf.isSet(PropertyKey.JOB_MASTER_RPC_ADDRESSES)) {
return parseInetSocketAddresses(
conf.getList(PropertyKey.JOB_MASTER_RPC_ADDRESSES));
}
int jobRpcPort =
NetworkAddressUtils.getPort(NetworkAddressUtils.ServiceType.JOB_MASTER_RPC, conf);
// Fall back on explicitly configured regular master rpc addresses.
if (conf.isSet(PropertyKey.MASTER_RPC_ADDRESSES)) {
List<InetSocketAddress> addrs =
parseInetSocketAddresses(conf.getList(PropertyKey.MASTER_RPC_ADDRESSES));
return overridePort(addrs, jobRpcPort);
}
// Fall back on server-side journal configuration.
return overridePort(getEmbeddedJournalAddresses(conf, ServiceType.JOB_MASTER_RAFT), jobRpcPort);
} | @Test
public void getJobMasterRpcAddresses() {
AlluxioConfiguration conf =
createConf(ImmutableMap.of(PropertyKey.JOB_MASTER_RPC_ADDRESSES, "host1:99,host2:100"));
assertEquals(
Arrays.asList(InetSocketAddress.createUnresolved("host1", 99),
InetSocketAddress.createUnresolved("host2", 100)),
ConfigurationUtils.getJobMasterRpcAddresses(conf));
} |
public SeaTunnelRow reconvert(InternalRow record) throws IOException {
if (isMultiTable) {
String tableId = record.getString(1);
return rowSerializationMap.get(tableId).reconvert(record);
}
return rowSerialization.reconvert(record);
} | @Test
public void testWriteConverter() throws IOException {
initSchema();
initData();
MultiTableManager multiTableManager =
new MultiTableManager(new CatalogTable[] {catalogTable1});
SeaTunnelRow seaTunnelRow = multiTableManager.reconvert(specificInternalRow1);
for (int i = 0; i < seaTunnelRow.getFields().length; i++) {
Object[] values = seaTunnelRow.getFields();
Object[] actual = seaTunnelRow1.getFields();
for (int v = 0; v < values.length; v++) {
if (values[v] instanceof Object[]) {
Assertions.assertArrayEquals((Object[]) values[v], (Object[]) actual[v]);
} else {
Assertions.assertEquals(values[v], actual[v]);
}
}
}
} |
@Override
public AppResponse process(Flow flow, RdaSessionRequest request) throws FlowNotDefinedException, IOException, NoSuchAlgorithmException, SharedServiceClientException {
var authAppSession = appSessionService.getSession(request.getAuthSessionId());
if (!isAppSessionAuthenticated(authAppSession) || !request.getUserAppId().equals(authAppSession.getUserAppId())){
return new NokResponse();
}
AppAuthenticator appAuthenticator = appAuthenticatorService.findByUserAppId(request.getUserAppId());
if (!isAppAuthenticatorActivated(appAuthenticator)) return new NokResponse();
appSession = new AppSession();
appSession.setAction("upgrade_app");
appSession.setFlow(UpgradeLoginLevel.NAME);
appSession.setRdaAction("app");
appSession.setUserAppId(appAuthenticator.getUserAppId());
appSession.setDeviceName(appAuthenticator.getDeviceName());
appSession.setInstanceId(appAuthenticator.getInstanceId());
appSession.setAccountId(appAuthenticator.getAccountId());
digidClient.remoteLog("844", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId(), lowerUnderscore(APP_CODE), appAuthenticator.getAppCode() ,lowerUnderscore(DEVICE_NAME), appAuthenticator.getDeviceName(), lowerUnderscore(HIDDEN), true));
return new RdaSessionResponse(appSession.getId(), appSession.getAction());
} | @Test
void processOk() throws FlowNotDefinedException, SharedServiceClientException, IOException, NoSuchAlgorithmException {
//given
AppAuthenticator appAuthenticator = new AppAuthenticator();
appAuthenticator.setUserAppId("userAppId");
appAuthenticator.setInstanceId("123456");
appAuthenticator.setDeviceName("deviceName");
appAuthenticator.setAccountId(1L);
appAuthenticator.setActivatedAt(ZonedDateTime.now());
AppSession session = new AppSession();
session.setState(State.AUTHENTICATED.name());
session.setUserAppId("userAppId");
when(appAuthenticatorService.findByUserAppId(any())).thenReturn(appAuthenticator);
when(appSessionService.getSession(any())).thenReturn(session);
RdaSessionResponse appResponse = (RdaSessionResponse) initRda.process(mockedFlow, request);
assertEquals("upgrade_app", appResponse.getAction());
} |
@Override
public void initialize(ServiceConfiguration config) throws IOException, IllegalArgumentException {
String prefix = (String) config.getProperty(CONF_TOKEN_SETTING_PREFIX);
if (null == prefix) {
prefix = "";
}
this.confTokenSecretKeySettingName = prefix + CONF_TOKEN_SECRET_KEY;
this.confTokenPublicKeySettingName = prefix + CONF_TOKEN_PUBLIC_KEY;
this.confTokenAuthClaimSettingName = prefix + CONF_TOKEN_AUTH_CLAIM;
this.confTokenPublicAlgSettingName = prefix + CONF_TOKEN_PUBLIC_ALG;
this.confTokenAudienceClaimSettingName = prefix + CONF_TOKEN_AUDIENCE_CLAIM;
this.confTokenAudienceSettingName = prefix + CONF_TOKEN_AUDIENCE;
this.confTokenAllowedClockSkewSecondsSettingName = prefix + CONF_TOKEN_ALLOWED_CLOCK_SKEW_SECONDS;
// we need to fetch the algorithm before we fetch the key
this.publicKeyAlg = getPublicKeyAlgType(config);
this.validationKey = getValidationKey(config);
this.roleClaim = getTokenRoleClaim(config);
this.audienceClaim = getTokenAudienceClaim(config);
this.audience = getTokenAudience(config);
long allowedSkew = getConfTokenAllowedClockSkewSeconds(config);
this.parser = Jwts.parserBuilder()
.setAllowedClockSkewSeconds(allowedSkew)
.setSigningKey(this.validationKey)
.build();
if (audienceClaim != null && audience == null) {
throw new IllegalArgumentException("Token Audience Claim [" + audienceClaim
+ "] configured, but Audience stands for this broker not.");
}
} | @Test
public void testTrimAuthSecretKeyFilePath() throws Exception {
String space = " ";
SecretKey secretKey = AuthTokenUtils.createSecretKey(SignatureAlgorithm.HS256);
File secretKeyFile = File.createTempFile("pulsar-test-secret-key-", ".key");
secretKeyFile.deleteOnExit();
Files.write(Paths.get(secretKeyFile.toString()), secretKey.getEncoded());
AuthenticationProviderToken provider = new AuthenticationProviderToken();
Properties properties = new Properties();
String secretKeyFileUri = secretKeyFile.toURI().toString() + space;
properties.setProperty(AuthenticationProviderToken.CONF_TOKEN_SECRET_KEY, secretKeyFileUri);
ServiceConfiguration conf = new ServiceConfiguration();
conf.setProperties(properties);
provider.initialize(conf);
} |
public static ValueLabel formatPacketRate(long packets) {
return new ValueLabel(packets, PACKETS_UNIT).perSec();
} | @Test
public void formatPacketRateMega() {
vl = TopoUtils.formatPacketRate(9_000_000);
assertEquals(AM_WL, "8.58 Mpps", vl.toString());
} |
@Override
public Path move(final Path source, final Path renamed, final TransferStatus status, final Delete.Callback callback, final ConnectionCallback connectionCallback) throws BackgroundException {
Path target;
if(source.attributes().getCustom().containsKey(KEY_DELETE_MARKER)) {
// Delete marker, copy not supported but we have to retain the delete marker at the target
target = new Path(renamed);
target.attributes().setVersionId(null);
delete.delete(Collections.singletonMap(target, status), connectionCallback, callback);
try {
// Find version id of moved delete marker
final Path bucket = containerService.getContainer(renamed);
final VersionOrDeleteMarkersChunk marker = session.getClient().listVersionedObjectsChunked(
bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(renamed),
String.valueOf(Path.DELIMITER), 1, null, null, false);
if(marker.getItems().length == 1) {
final BaseVersionOrDeleteMarker markerObject = marker.getItems()[0];
target.attributes().withVersionId(markerObject.getVersionId()).setCustom(Collections.singletonMap(KEY_DELETE_MARKER, Boolean.TRUE.toString()));
delete.delete(Collections.singletonMap(source, status), connectionCallback, callback);
}
else {
throw new NotfoundException(String.format("Unable to find delete marker %s", renamed.getName()));
}
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, renamed);
}
}
else {
try {
target = proxy.copy(source, renamed, status.withLength(source.attributes().getSize()), connectionCallback, new DisabledStreamListener());
// Copy source path and nullify version id to add a delete marker
delete.delete(Collections.singletonMap(new Path(source).withAttributes(new PathAttributes(source.attributes()).withVersionId(null)), status),
connectionCallback, callback);
}
catch(NotfoundException e) {
if(source.getType().contains(Path.Type.placeholder)) {
// No placeholder object to copy, create a new one at the target
target = session.getFeature(Directory.class).mkdir(renamed, new TransferStatus().withRegion(source.attributes().getRegion()));
}
else {
throw e;
}
}
}
return target;
} | @Test
public void testMoveVirtualHost() throws Exception {
final S3AccessControlListFeature acl = new S3AccessControlListFeature(virtualhost);
final Path test = new S3TouchFeature(virtualhost, acl).touch(new Path(new AsciiRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
assertTrue(new S3FindFeature(virtualhost, acl).find(test));
final Path renamed = new Path(new AsciiRandomStringService().random(), EnumSet.of(Path.Type.file));
new S3MoveFeature(virtualhost, acl).move(test, renamed, new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback());
assertFalse(new S3FindFeature(virtualhost, acl).find(test));
assertTrue(new S3FindFeature(virtualhost, acl).find(renamed));
new S3DefaultDeleteFeature(virtualhost).delete(Collections.singletonList(renamed), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
public static Future<?> runInThread(String groupId, final Runnable runnable) {
return GROUP_THREAD_POOLS.getOrDefault(groupId, GlobalThreadPoolHolder.INSTANCE).submit(runnable);
} | @Test
public void testRunThread() throws Exception {
CountDownLatch latch = new CountDownLatch(1);
ThreadPoolsFactory.runInThread(GROUP_ID_001, () -> latch.countDown());
latch.await();
} |
public synchronized TopologyDescription describe() {
return internalTopologyBuilder.describe();
} | @Test
public void kGroupedStreamAnonymousStoreTypedMaterializedCountShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.count(Materialized.as(Materialized.StoreType.IN_MEMORY));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000003\n" +
" Processor: KSTREAM-AGGREGATE-0000000003 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000002])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(false));
} |
@Override
public void report() {
try {
tryReport();
} catch (ConcurrentModificationException | NoSuchElementException ignored) {
// at tryReport() we don't synchronize while iterating over the various maps which might
// cause a
// ConcurrentModificationException or NoSuchElementException to be thrown,
// if concurrently a metric is being added or removed.
}
} | @Test
void testOnlyMeterRegistered() {
reporter.notifyOfAddedMetric(new MeterView(new SimpleCounter()), "metric", metricGroup);
reporter.report();
assertThat(testLoggerResource.getMessages())
.noneMatch(logOutput -> logOutput.contains("-- Counter"))
.noneMatch(logOutput -> logOutput.contains("-- Gauge"))
.noneMatch(logOutput -> logOutput.contains("-- Histogram"))
.anyMatch(logOutput -> logOutput.contains("-- Meter"));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.