focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public Type type() { return type; }
@Test public void testTypeNotNull() { assertThrows(SchemaBuilderException.class, () -> SchemaBuilder.type(null)); }
static void clusterIdCommand(PrintStream stream, Admin adminClient) throws Exception { String clusterId = adminClient.describeCluster().clusterId().get(); if (clusterId != null) { stream.println("Cluster ID: " + clusterId); } else { stream.println("No cluster ID found. The Kafka version is probably too old."); } }
@Test public void testClusterTooOldToHaveId() throws Exception { Admin adminClient = new MockAdminClient.Builder(). clusterId(null). build(); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ClusterTool.clusterIdCommand(new PrintStream(stream), adminClient); assertEquals("No cluster ID found. The Kafka version is probably too old.\n", stream.toString()); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { final FileChannel channel = FileChannel.open(session.toPath(file), StandardOpenOption.READ); channel.position(status.getOffset()); return Channels.newInputStream(channel); } catch(IOException e) { throw new LocalExceptionMappingService().map("Download {0} failed", e, file); } }
@Test(expected = NotfoundException.class) public void testReadNotFound() throws Exception { final LocalSession session = new LocalSession(new Host(new LocalProtocol(), new LocalProtocol().getDefaultHostname())); session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); final TransferStatus status = new TransferStatus(); final Path workdir = new LocalHomeFinderFeature().find(); new LocalReadFeature(session).read(new Path(workdir, "nosuchname", EnumSet.of(Path.Type.file)), status, new DisabledConnectionCallback()); }
public DoubleArrayAsIterable usingTolerance(double tolerance) { return new DoubleArrayAsIterable(tolerance(tolerance), iterableSubject()); }
@Test public void usingTolerance_containsExactly_primitiveDoubleArray_success() { assertThat(array(1.1, TOLERABLE_2POINT2, 3.3)) .usingTolerance(DEFAULT_TOLERANCE) .containsExactly(array(2.2, 1.1, 3.3)); }
@Override public void onCommitFailure(GlobalTransaction tx, Throwable cause) { LOGGER.warn("Failed to commit transaction[" + tx.getXid() + "]", cause); TIMER.newTimeout(new CheckTimerTask(tx, GlobalStatus.Committed), SCHEDULE_INTERVAL_SECONDS, TimeUnit.SECONDS); }
@Test void onCommitFailure() throws Exception{ RootContext.bind(DEFAULT_XID); GlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate(); FailureHandler failureHandler = new DefaultFailureHandlerImpl(); failureHandler.onCommitFailure(tx, new MyRuntimeException("").getCause()); // get timer Class<?> c = Class.forName("org.apache.seata.tm.api.DefaultFailureHandlerImpl"); Field field = c.getDeclaredField("TIMER"); field.setAccessible(true); HashedWheelTimer timer = (HashedWheelTimer) field.get(failureHandler); // assert timer pendingCount: first time is 1 Long pendingTimeout = timer.pendingTimeouts(); Assertions.assertEquals(pendingTimeout,1L); //set globalStatus globalStatus= GlobalStatus.Committed; Thread.sleep(25*1000L); pendingTimeout = timer.pendingTimeouts(); LOGGER.info("pendingTimeout {}" ,pendingTimeout); //all timer is done Assertions.assertEquals(pendingTimeout,0L); }
@Override public VirtualNetwork createVirtualNetwork(TenantId tenantId) { checkNotNull(tenantId, TENANT_NULL); return store.addNetwork(tenantId); }
@Test(expected = NullPointerException.class) public void testCreateNullVirtualNetwork() { manager.createVirtualNetwork(null); }
static boolean allowCodeRange(int prev, int next) { if ((prev + 1) != next) { return false; } int prevH = (prev >> 8) & 0xFF; int prevL = prev & 0xFF; int nextH = (next >> 8) & 0xFF; int nextL = next & 0xFF; return prevH == nextH && prevL < nextL; }
@Test void testAllowCodeRange() { // Denied progressions (negative) assertFalse(ToUnicodeWriter.allowCodeRange(0x000F, 0x0007)); assertFalse(ToUnicodeWriter.allowCodeRange(0x00FF, 0x0000)); assertFalse(ToUnicodeWriter.allowCodeRange(0x03FF, 0x0300)); assertFalse(ToUnicodeWriter.allowCodeRange(0x0401, 0x0400)); assertFalse(ToUnicodeWriter.allowCodeRange(0xFFFF, 0x0000)); // Denied progressions (non sequential) assertFalse(ToUnicodeWriter.allowCodeRange(0x0000, 0x0000)); assertFalse(ToUnicodeWriter.allowCodeRange(0x0000, 0x000F)); assertFalse(ToUnicodeWriter.allowCodeRange(0x0000, 0x007F)); assertFalse(ToUnicodeWriter.allowCodeRange(0x0000, 0x00FF)); assertFalse(ToUnicodeWriter.allowCodeRange(0x0007, 0x000F)); assertFalse(ToUnicodeWriter.allowCodeRange(0x007F, 0x00FF)); assertFalse(ToUnicodeWriter.allowCodeRange(0x00FF, 0x00FF)); // Denied progressions (overflow) assertFalse(ToUnicodeWriter.allowCodeRange(0x00FF, 0x0100)); assertFalse(ToUnicodeWriter.allowCodeRange(0x01FF, 0x0200)); assertFalse(ToUnicodeWriter.allowCodeRange(0x03FF, 0x0400)); assertFalse(ToUnicodeWriter.allowCodeRange(0x07FF, 0x0800)); assertFalse(ToUnicodeWriter.allowCodeRange(0x0FFF, 0x1000)); assertFalse(ToUnicodeWriter.allowCodeRange(0x1FFF, 0x2000)); assertFalse(ToUnicodeWriter.allowCodeRange(0x3FFF, 0x4000)); assertFalse(ToUnicodeWriter.allowCodeRange(0x7FFF, 0x8000)); // Allowed progressions (positive, sequential, and w/o overflow) assertTrue(ToUnicodeWriter.allowCodeRange(0x00, 0x01)); assertTrue(ToUnicodeWriter.allowCodeRange(0x01, 0x02)); assertTrue(ToUnicodeWriter.allowCodeRange(0x03, 0x04)); assertTrue(ToUnicodeWriter.allowCodeRange(0x07, 0x08)); assertTrue(ToUnicodeWriter.allowCodeRange(0x0E, 0x0F)); assertTrue(ToUnicodeWriter.allowCodeRange(0x1F, 0x20)); assertTrue(ToUnicodeWriter.allowCodeRange(0x3F, 0x40)); assertTrue(ToUnicodeWriter.allowCodeRange(0x7F, 0x80)); assertTrue(ToUnicodeWriter.allowCodeRange(0xFE, 0xFF)); assertTrue(ToUnicodeWriter.allowCodeRange(0x03FE, 0x03FF)); assertTrue(ToUnicodeWriter.allowCodeRange(0x0400, 0x0401)); assertTrue(ToUnicodeWriter.allowCodeRange(0xFFFE, 0xFFFF)); }
@Override public int compareTo(DateTimeStamp dateTimeStamp) { return comparator.compare(this,dateTimeStamp); }
@Test void compareToTransitivity() { DateTimeStamp stamp1 = new DateTimeStamp("2021-09-01T11:12:13.111-0100", 100); DateTimeStamp stamp2 = new DateTimeStamp((String)null, 200); DateTimeStamp stamp3 = new DateTimeStamp("2021-08-31T11:12:13.111-0100", 300); assertTrue(stamp1.compareTo(stamp2) < 0); assertTrue(stamp2.compareTo(stamp3) < 0); assertTrue(stamp1.compareTo(stamp3) < 0); }
public static Table getTableMeta(DataSource ds, String tableName) { return getTableMeta(ds, null, null, tableName); }
@Test public void getTableMetaTest() { final Table table = MetaUtil.getTableMeta(ds, "user"); assertEquals(CollectionUtil.newHashSet("id"), table.getPkNames()); }
@Override public void remove(NamedNode master) { connection.sync(RedisCommands.SENTINEL_REMOVE, master.getName()); }
@Test public void testRemove() { Collection<RedisServer> masters = connection.masters(); connection.remove(masters.iterator().next()); }
@Override protected Mono<Void> doExecute(final ServerWebExchange exchange, final ShenyuPluginChain chain, final SelectorData selector, final RuleData rule) { ShenyuContext shenyuContext = exchange.getAttribute(Constants.CONTEXT); assert shenyuContext != null; DivideRuleHandle ruleHandle = buildRuleHandle(rule); if (ruleHandle.getHeaderMaxSize() > 0) { long headerSize = exchange.getRequest().getHeaders().values() .stream() .flatMap(Collection::stream) .mapToLong(header -> header.getBytes(StandardCharsets.UTF_8).length) .sum(); if (headerSize > ruleHandle.getHeaderMaxSize()) { LOG.error("request header is too large"); Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.REQUEST_HEADER_TOO_LARGE); return WebFluxResultUtils.result(exchange, error); } } if (ruleHandle.getRequestMaxSize() > 0) { if (exchange.getRequest().getHeaders().getContentLength() > ruleHandle.getRequestMaxSize()) { LOG.error("request entity is too large"); Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.REQUEST_ENTITY_TOO_LARGE); return WebFluxResultUtils.result(exchange, error); } } List<Upstream> upstreamList = UpstreamCacheManager.getInstance().findUpstreamListBySelectorId(selector.getId()); if (CollectionUtils.isEmpty(upstreamList)) { LOG.error("divide upstream configuration error: {}", selector); Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.CANNOT_FIND_HEALTHY_UPSTREAM_URL); return WebFluxResultUtils.result(exchange, error); } String ip = Objects.requireNonNull(exchange.getRequest().getRemoteAddress()).getAddress().getHostAddress(); Upstream upstream = LoadBalancerFactory.selector(upstreamList, ruleHandle.getLoadBalance(), ip); if (Objects.isNull(upstream)) { LOG.error("divide has no upstream"); Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.CANNOT_FIND_HEALTHY_UPSTREAM_URL); return WebFluxResultUtils.result(exchange, error); } // set the http url if (CollectionUtils.isNotEmpty(exchange.getRequest().getHeaders().get(Constants.SPECIFY_DOMAIN))) { upstream.setUrl(exchange.getRequest().getHeaders().get(Constants.SPECIFY_DOMAIN).get(0)); } // set domain String domain = upstream.buildDomain(); exchange.getAttributes().put(Constants.HTTP_DOMAIN, domain); // set the http timeout exchange.getAttributes().put(Constants.HTTP_TIME_OUT, ruleHandle.getTimeout()); exchange.getAttributes().put(Constants.HTTP_RETRY, ruleHandle.getRetry()); // set retry strategy stuff exchange.getAttributes().put(Constants.RETRY_STRATEGY, StringUtils.defaultString(ruleHandle.getRetryStrategy(), RetryEnum.CURRENT.getName())); exchange.getAttributes().put(Constants.LOAD_BALANCE, StringUtils.defaultString(ruleHandle.getLoadBalance(), LoadBalanceEnum.RANDOM.getName())); exchange.getAttributes().put(Constants.DIVIDE_SELECTOR_ID, selector.getId()); if (ruleHandle.getLoadBalance().equals(P2C)) { return chain.execute(exchange).doOnSuccess(e -> responseTrigger(upstream )).doOnError(throwable -> responseTrigger(upstream)); } else if (ruleHandle.getLoadBalance().equals(SHORTEST_RESPONSE)) { beginTime = System.currentTimeMillis(); return chain.execute(exchange).doOnSuccess(e -> successResponseTrigger(upstream )); } return chain.execute(exchange); }
@Test public void doPostExecuteTest() { when(chain.execute(postExchange)).thenReturn(Mono.empty()); Mono<Void> result = dividePlugin.doExecute(postExchange, chain, selectorData, ruleData); StepVerifier.create(result).expectSubscription().verifyComplete(); }
public final static Object getObject(String commandPart, Gateway gateway) { if (isEmpty(commandPart) || isEnd(commandPart)) { throw new Py4JException("Command Part is Empty or is the End of Command Part"); } else { switch (commandPart.charAt(0)) { case BOOLEAN_TYPE: return getBoolean(commandPart); case DOUBLE_TYPE: return getDouble(commandPart); case LONG_TYPE: return getLong(commandPart); case INTEGER_TYPE: try { return getInteger(commandPart); } catch (NumberFormatException e) { return getLong(commandPart); } case BYTES_TYPE: return getBytes(commandPart); case NULL_TYPE: return getNull(commandPart); case VOID: return getNull(commandPart); case REFERENCE_TYPE: return getReference(commandPart, gateway); case STRING_TYPE: return getString(commandPart); case DECIMAL_TYPE: return getDecimal(commandPart); case PYTHON_PROXY_TYPE: return getPythonProxy(commandPart, gateway); default: throw new Py4JException("Command Part is unknown: " + commandPart); } } }
@Test public void testGetObject() { Gateway gateway = new Gateway(null); Object obj1 = new Object(); gateway.putObject("o123", obj1); assertEquals(1, Protocol.getObject("i1", null)); assertEquals(true, Protocol.getObject("bTrue", null)); assertEquals(1.234, (Double) Protocol.getObject("d1.234", null), 0.001); assertEquals(obj1, Protocol.getObject("ro123", gateway)); assertEquals("Hello\nWorld\t", Protocol.getObject("sHello\\nWorld\t", null)); assertEquals(123l, Protocol.getObject("L123", null)); assertEquals(new BigDecimal("-14.456"), Protocol.getObject("D-14.456", null)); assertNull(Protocol.getObject("n", null)); try { Protocol.getObject(null, null); fail(); } catch (Py4JException e) { assertTrue(true); } try { Protocol.getObject("", null); fail(); } catch (Py4JException e) { assertTrue(true); } try { Protocol.getObject("e", null); fail(); } catch (Py4JException e) { assertTrue(true); } try { Protocol.getObject("z123", null); fail(); } catch (Py4JException e) { assertTrue(true); } }
public void recordOffset(TopicPartition partition, long partitionLastOffset) { lastProcessedOffset.put(partition, partitionLastOffset); }
@Order(1) @Test @DisplayName("Tests whether the cache can record offset a single offset") void updateOffsetsSinglePartition() { final TopicPartition topic1 = new TopicPartition("topic1", 1); assertDoesNotThrow(() -> offsetCache.recordOffset(topic1, 1)); assertDoesNotThrow(() -> offsetCache.recordOffset(topic1, 2)); assertDoesNotThrow(() -> offsetCache.recordOffset(topic1, 2), "The cache should not throw exceptions for duplicate records"); }
public static <T> void maybeMergeOptions(Properties props, String key, OptionSet options, OptionSpec<T> spec) { if (options.has(spec) || !props.containsKey(key)) { T value = options.valueOf(spec); if (value == null) { props.remove(key); } else { props.put(key, value.toString()); } } }
@Test public void testMaybeMergeOptionsDefaultValueIfNotExist() { setUpOptions(); OptionSet options = parser.parse(); CommandLineUtils.maybeMergeOptions(props, "skey", options, stringOpt); CommandLineUtils.maybeMergeOptions(props, "ikey", options, intOpt); CommandLineUtils.maybeMergeOptions(props, "sokey", options, stringOptOptionalArg); CommandLineUtils.maybeMergeOptions(props, "iokey", options, intOptOptionalArg); CommandLineUtils.maybeMergeOptions(props, "sondkey", options, stringOptOptionalArgNoDefault); CommandLineUtils.maybeMergeOptions(props, "iondkey", options, intOptOptionalArgNoDefault); assertEquals("default-string", props.get("skey")); assertEquals("100", props.get("ikey")); assertEquals("default-string-2", props.get("sokey")); assertEquals("200", props.get("iokey")); assertNull(props.get("sondkey")); assertNull(props.get("iondkey")); }
@Override public int getOrder() { return PluginEnum.MODIFY_RESPONSE.getCode(); }
@Test public void testGetOrder() { assertEquals(modifyResponsePlugin.getOrder(), PluginEnum.MODIFY_RESPONSE.getCode()); }
@SuppressWarnings("unchecked") @Override public MoveApplicationAcrossQueuesResponse moveApplicationAcrossQueues( MoveApplicationAcrossQueuesRequest request) throws YarnException { ApplicationId applicationId = request.getApplicationId(); UserGroupInformation callerUGI = getCallerUgi(applicationId, AuditConstants.MOVE_APP_REQUEST); RMApp application = verifyUserAccessForRMApp(applicationId, callerUGI, AuditConstants.MOVE_APP_REQUEST, ApplicationAccessType.MODIFY_APP, true); String targetQueue = request.getTargetQueue(); if (!accessToTargetQueueAllowed(callerUGI, application, targetQueue)) { RMAuditLogger.logFailure(callerUGI.getShortUserName(), AuditConstants.MOVE_APP_REQUEST, "Target queue doesn't exist or user" + " doesn't have permissions to submit to target queue: " + targetQueue, "ClientRMService", AuditConstants.UNAUTHORIZED_USER, applicationId); throw RPCUtil.getRemoteException(new AccessControlException("User " + callerUGI.getShortUserName() + " cannot submit applications to" + " target queue or the target queue doesn't exist: " + targetQueue + " while moving " + applicationId)); } // Moves only allowed when app is in a state that means it is tracked by // the scheduler. Introducing SUBMITTED state also to this list as there // could be a corner scenario that app may not be in Scheduler in SUBMITTED // state. if (!ACTIVE_APP_STATES.contains(application.getState())) { String msg = "App in " + application.getState() + " state cannot be moved."; RMAuditLogger.logFailure(callerUGI.getShortUserName(), AuditConstants.MOVE_APP_REQUEST, "UNKNOWN", "ClientRMService", msg); throw new YarnException(msg); } try { this.rmAppManager.moveApplicationAcrossQueue( application.getApplicationId(), request.getTargetQueue()); } catch (YarnException ex) { RMAuditLogger.logFailure(callerUGI.getShortUserName(), AuditConstants.MOVE_APP_REQUEST, "UNKNOWN", "ClientRMService", ex.getMessage()); throw ex; } RMAuditLogger.logSuccess(callerUGI.getShortUserName(), AuditConstants.MOVE_APP_REQUEST, "ClientRMService" , applicationId); return recordFactory .newRecordInstance(MoveApplicationAcrossQueuesResponse.class); }
@Test public void testMoveApplicationSubmitTargetQueue() throws Exception { // move the application as the owner ApplicationId applicationId = getApplicationId(1); UserGroupInformation aclUGI = UserGroupInformation.getCurrentUser(); QueueACLsManager queueACLsManager = getQueueAclManager("allowed_queue", QueueACL.SUBMIT_APPLICATIONS, aclUGI); ApplicationACLsManager appAclsManager = getAppAclManager(); ClientRMService rmService = createClientRMServiceForMoveApplicationRequest( applicationId, aclUGI.getShortUserName(), appAclsManager, queueACLsManager); // move as the owner queue in the acl MoveApplicationAcrossQueuesRequest moveAppRequest = MoveApplicationAcrossQueuesRequest. newInstance(applicationId, "allowed_queue"); rmService.moveApplicationAcrossQueues(moveAppRequest); // move as the owner queue not in the acl moveAppRequest = MoveApplicationAcrossQueuesRequest.newInstance( applicationId, "not_allowed"); try { rmService.moveApplicationAcrossQueues(moveAppRequest); Assert.fail("The request should fail with an AccessControlException"); } catch (YarnException rex) { Assert.assertTrue("AccessControlException is expected", rex.getCause() instanceof AccessControlException); } // ACL is owned by "moveuser", move is performed as a different user aclUGI = UserGroupInformation.createUserForTesting("moveuser", new String[]{}); queueACLsManager = getQueueAclManager("move_queue", QueueACL.SUBMIT_APPLICATIONS, aclUGI); appAclsManager = getAppAclManager(); ClientRMService rmService2 = createClientRMServiceForMoveApplicationRequest(applicationId, aclUGI.getShortUserName(), appAclsManager, queueACLsManager); // access to the queue not OK: user not allowed in this queue MoveApplicationAcrossQueuesRequest moveAppRequest2 = MoveApplicationAcrossQueuesRequest. newInstance(applicationId, "move_queue"); try { rmService2.moveApplicationAcrossQueues(moveAppRequest2); Assert.fail("The request should fail with an AccessControlException"); } catch (YarnException rex) { Assert.assertTrue("AccessControlException is expected", rex.getCause() instanceof AccessControlException); } // execute the move as the acl owner // access to the queue OK: user allowed in this queue aclUGI.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { return rmService2.moveApplicationAcrossQueues(moveAppRequest2); } }); }
@Override public List<Instance> selectInstances(String serviceName, boolean healthy) throws NacosException { return selectInstances(serviceName, new ArrayList<>(), healthy); }
@Test void testSelectInstancesWithHealthyFlag() throws NacosException { //given Instance healthyInstance = new Instance(); healthyInstance.setHealthy(true); Instance instance1 = new Instance(); instance1.setHealthy(false); Instance instance2 = new Instance(); instance2.setHealthy(true); instance2.setEnabled(false); Instance instance3 = new Instance(); instance3.setHealthy(true); instance3.setWeight(0.0); List<Instance> hosts = new ArrayList<>(); hosts.add(healthyInstance); hosts.add(instance1); hosts.add(instance2); hosts.add(instance3); ServiceInfo info = new ServiceInfo(); info.setHosts(hosts); String serviceName = "service1"; String groupName = "group1"; List<String> clusterList = Arrays.asList("cluster1", "cluster2"); when(proxy.queryInstancesOfService(serviceName, groupName, "cluster1,cluster2", false)).thenReturn(info); //when List<Instance> instances = client.selectInstances(serviceName, groupName, clusterList, true, false); //then assertEquals(1, instances.size()); assertSame(healthyInstance, instances.get(0)); }
@Override public Type classify(final Throwable e) { final Type type = e instanceof MissingSourceTopicException ? Type.USER : Type.UNKNOWN; if (type == Type.USER) { LOG.info( "Classified error as USER error based on missing topic. Query ID: {} Exception: {}", queryId, e); } return type; }
@Test public void shouldClassifyMissingTopicAsUserError() { // Given: final Exception e = new MissingSourceTopicException("foo"); // When: final Type type = new MissingTopicClassifier("").classify(e); // Then: assertThat(type, is(Type.USER)); }
public static CoordinatorRecord newConsumerGroupEpochTombstoneRecord( String groupId ) { return new CoordinatorRecord( new ApiMessageAndVersion( new ConsumerGroupMetadataKey() .setGroupId(groupId), (short) 3 ), null // Tombstone. ); }
@Test public void testNewConsumerGroupEpochTombstoneRecord() { CoordinatorRecord expectedRecord = new CoordinatorRecord( new ApiMessageAndVersion( new ConsumerGroupMetadataKey() .setGroupId("group-id"), (short) 3), null); assertEquals(expectedRecord, newConsumerGroupEpochTombstoneRecord( "group-id" )); }
@Override public PageResult<ProductBrandDO> getBrandPage(ProductBrandPageReqVO pageReqVO) { return brandMapper.selectPage(pageReqVO); }
@Test public void testGetBrandPage() { // mock 数据 ProductBrandDO dbBrand = randomPojo(ProductBrandDO.class, o -> { // 等会查询到 o.setName("芋道源码"); o.setStatus(CommonStatusEnum.ENABLE.getStatus()); o.setCreateTime(buildTime(2022, 2, 1)); }); brandMapper.insert(dbBrand); // 测试 name 不匹配 brandMapper.insert(cloneIgnoreId(dbBrand, o -> o.setName("源码"))); // 测试 status 不匹配 brandMapper.insert(cloneIgnoreId(dbBrand, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus()))); // 测试 createTime 不匹配 brandMapper.insert(cloneIgnoreId(dbBrand, o -> o.setCreateTime(buildTime(2022, 3, 1)))); // 准备参数 ProductBrandPageReqVO reqVO = new ProductBrandPageReqVO(); reqVO.setName("芋道"); reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus()); reqVO.setCreateTime((new LocalDateTime[]{buildTime(2022, 1, 1), buildTime(2022, 2, 25)})); // 调用 PageResult<ProductBrandDO> pageResult = brandService.getBrandPage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbBrand, pageResult.getList().get(0)); }
@Override public Long createMailTemplate(MailTemplateSaveReqVO createReqVO) { // 校验 code 是否唯一 validateCodeUnique(null, createReqVO.getCode()); // 插入 MailTemplateDO template = BeanUtils.toBean(createReqVO, MailTemplateDO.class) .setParams(parseTemplateContentParams(createReqVO.getContent())); mailTemplateMapper.insert(template); return template.getId(); }
@Test public void testCreateMailTemplate_success() { // 准备参数 MailTemplateSaveReqVO reqVO = randomPojo(MailTemplateSaveReqVO.class) .setId(null); // 防止 id 被赋值 // 调用 Long mailTemplateId = mailTemplateService.createMailTemplate(reqVO); // 断言 assertNotNull(mailTemplateId); // 校验记录的属性是否正确 MailTemplateDO mailTemplate = mailTemplateMapper.selectById(mailTemplateId); assertPojoEquals(reqVO, mailTemplate, "id"); }
public void handleSnapshot(MetadataImage image, KRaftMigrationOperationConsumer operationConsumer) { handleTopicsSnapshot(image.topics(), operationConsumer); handleConfigsSnapshot(image.configs(), operationConsumer); handleClientQuotasSnapshot(image.clientQuotas(), image.scram(), operationConsumer); handleProducerIdSnapshot(image.producerIds(), operationConsumer); handleAclsSnapshot(image.acls(), operationConsumer); handleDelegationTokenSnapshot(image.delegationTokens(), operationConsumer); }
@Test public void testExtraneousZkPartitions() { CapturingTopicMigrationClient topicClient = new CapturingTopicMigrationClient() { @Override public void iterateTopics(EnumSet<TopicVisitorInterest> interests, TopicVisitor visitor) { Map<Integer, List<Integer>> assignments = new HashMap<>(); assignments.put(0, Arrays.asList(2, 3, 4)); assignments.put(1, Arrays.asList(3, 4, 5)); assignments.put(2, Arrays.asList(2, 4, 5)); assignments.put(3, Arrays.asList(1, 2, 3)); // This one is not in KRaft visitor.visitTopic("foo", TopicsImageTest.FOO_UUID, assignments); // Skip partition 1, visit 3 (the extra one) IntStream.of(0, 2, 3).forEach(partitionId -> visitor.visitPartition( new TopicIdPartition(TopicsImageTest.FOO_UUID, new TopicPartition("foo", partitionId)), TopicsImageTest.IMAGE1.getPartition(TopicsImageTest.FOO_UUID, partitionId) ) ); } }; CapturingConfigMigrationClient configClient = new CapturingConfigMigrationClient(); CapturingMigrationClient migrationClient = CapturingMigrationClient.newBuilder() .setBrokersInZk(0) .setTopicMigrationClient(topicClient) .setConfigMigrationClient(configClient) .build(); KRaftMigrationZkWriter writer = new KRaftMigrationZkWriter(migrationClient, __ -> { }); MetadataImage image = new MetadataImage( MetadataProvenance.EMPTY, FeaturesImage.EMPTY, ClusterImage.EMPTY, TopicsImageTest.IMAGE1, // This includes "foo" with 3 partitions ConfigurationsImage.EMPTY, ClientQuotasImage.EMPTY, ProducerIdsImage.EMPTY, AclsImage.EMPTY, ScramImage.EMPTY, DelegationTokenImage.EMPTY ); writer.handleSnapshot(image, (opType, opLog, operation) -> operation.apply(ZkMigrationLeadershipState.EMPTY) ); assertEquals(topicClient.updatedTopics.get("foo").size(), 3); assertEquals(topicClient.deletedTopicPartitions.get("foo"), Collections.singleton(3)); assertEquals(topicClient.updatedTopicPartitions.get("foo"), Collections.singleton(1)); }
@Override public LSInput resolveResource(String type, String namespaceURI, String publicId, String systemId, String baseURI) { log.debug("Resolving resource with systemId: {}", systemId); InputStream resourceStream = null; if (LOCAL_RESOLUTIONS.containsKey(systemId)) { log.debug("Got a local resolution, loading it from classloader"); resourceStream = this.getClass().getClassLoader().getResourceAsStream(LOCAL_RESOLUTIONS.get(systemId)); } else if (baseResourceURL != null && (!systemId.startsWith("http://") || !systemId.startsWith("https://"))) { log.debug("Got a baseResourceURL defined and relative reference, loading it from URL"); String sanitizedSystemId = systemId; if (systemId.startsWith("./")) { sanitizedSystemId = systemId.substring(2); } try { URL resourceURL = new URL(baseResourceURL + (baseResourceURL.endsWith("/") ? "" : "/") + sanitizedSystemId); resourceStream = resourceURL.openStream(); } catch (Exception e) { log.error("Failed to open stream on {}/{}", baseResourceURL, sanitizedSystemId, e); } } if (resourceStream != null) { Input input = new Input(); input.setSystemId(systemId); input.setPublicId(publicId); input.setBaseURI(baseURI); input.setCharacterStream(new InputStreamReader(resourceStream)); return input; } // Let default behaviour happen! return null; }
@Test void testResolveResourceWithNonExistentResource() { String systemId = "non-existent.xsd"; LSInput lsInput = resolver.resolveResource(null, null, null, systemId, null); assertNull(lsInput); }
public void encode(final ByteBuf in, final ByteBuf out, final int length) { // Write the preamble length to the output buffer for (int i = 0;; i ++) { int b = length >>> i * 7; if ((b & 0xFFFFFF80) != 0) { out.writeByte(b & 0x7f | 0x80); } else { out.writeByte(b); break; } } int inIndex = in.readerIndex(); final int baseIndex = inIndex; int hashTableSize = MathUtil.findNextPositivePowerOfTwo(length); hashTableSize = Math.min(hashTableSize, MAX_HT_SIZE); final short[] table = getHashTable(hashTableSize); final int shift = Integer.numberOfLeadingZeros(hashTableSize) + 1; int nextEmit = inIndex; if (length - inIndex >= MIN_COMPRESSIBLE_BYTES) { int nextHash = hash(in, ++inIndex, shift); outer: while (true) { int skip = 32; int candidate; int nextIndex = inIndex; do { inIndex = nextIndex; int hash = nextHash; int bytesBetweenHashLookups = skip++ >> 5; nextIndex = inIndex + bytesBetweenHashLookups; // We need at least 4 remaining bytes to read the hash if (nextIndex > length - 4) { break outer; } nextHash = hash(in, nextIndex, shift); // equivalent to Short.toUnsignedInt // use unsigned short cast to avoid loss precision when 32767 <= length <= 65355 candidate = baseIndex + ((int) table[hash]) & 0xffff; table[hash] = (short) (inIndex - baseIndex); } while (in.getInt(inIndex) != in.getInt(candidate)); encodeLiteral(in, out, inIndex - nextEmit); int insertTail; do { int base = inIndex; int matched = 4 + findMatchingLength(in, candidate + 4, inIndex + 4, length); inIndex += matched; int offset = base - candidate; encodeCopy(out, offset, matched); in.readerIndex(in.readerIndex() + matched); insertTail = inIndex - 1; nextEmit = inIndex; if (inIndex >= length - 4) { break outer; } int prevHash = hash(in, insertTail, shift); table[prevHash] = (short) (inIndex - baseIndex - 1); int currentHash = hash(in, insertTail + 1, shift); candidate = baseIndex + table[currentHash]; table[currentHash] = (short) (inIndex - baseIndex); } while (in.getInt(insertTail + 1) == in.getInt(candidate)); nextHash = hash(in, insertTail + 2, shift); ++inIndex; } } // If there are any remaining characters, write them out as a literal if (nextEmit < length) { encodeLiteral(in, out, length - nextEmit); } }
@Test public void encodeShortTextIsLiteral() throws Exception { ByteBuf in = Unpooled.wrappedBuffer(new byte[] { 0x6e, 0x65, 0x74, 0x74, 0x79 }); ByteBuf out = Unpooled.buffer(7); snappy.encode(in, out, 5); ByteBuf expected = Unpooled.wrappedBuffer(new byte[] { 0x05, // preamble length 0x04 << 2, // literal tag + length 0x6e, 0x65, 0x74, 0x74, 0x79 // "netty" }); assertEquals(expected, out, "Encoded literal was invalid"); in.release(); out.release(); expected.release(); }
@Override public void remove(NamedNode master) { connection.sync(RedisCommands.SENTINEL_REMOVE, master.getName()); }
@Test public void testRemove() { Collection<RedisServer> masters = connection.masters(); connection.remove(masters.iterator().next()); }
@Override public Set<String> getTopicClusterList( final String topic) throws InterruptedException, MQBrokerException, MQClientException, RemotingException { return this.defaultMQAdminExtImpl.getTopicClusterList(topic); }
@Test public void testGetTopicClusterList() throws InterruptedException, RemotingException, MQClientException, MQBrokerException { Set<String> result = defaultMQAdminExt.getTopicClusterList("unit-test"); assertThat(result.size()).isEqualTo(0); }
public Result combine(Result other) { return new Result(this.isPass() && other.isPass(), this.isDescend() && other.isDescend()); }
@Test public void equalsPass() { Result one = Result.PASS; Result two = Result.PASS.combine(Result.PASS); assertEquals(one, two); }
static SearchProtocol.SearchRequest convertFromQuery(Query query, int hits, String serverId, double requestTimeout) { var builder = SearchProtocol.SearchRequest.newBuilder().setHits(hits).setOffset(query.getOffset()) .setTimeout((int) (requestTimeout * 1000)); var documentDb = query.getModel().getDocumentDb(); if (documentDb != null) { builder.setDocumentType(documentDb); } GrowableByteBuffer scratchPad = threadLocalBuffer.get(); builder.setQueryTreeBlob(serializeQueryTree(query.getModel().getQueryTree(), scratchPad)); if (query.getGroupingSessionCache() || query.getRanking().getQueryCache()) { // TODO verify that the session key is included whenever rank properties would have been builder.setSessionKey(query.getSessionId(serverId).toString()); } if (query.properties().getBoolean(Model.ESTIMATE)) { builder.setHits(0); } if (GroupingExecutor.hasGroupingList(query)) { List<Grouping> groupingList = GroupingExecutor.getGroupingList(query); scratchPad.clear(); BufferSerializer gbuf = new BufferSerializer(scratchPad); gbuf.putInt(null, groupingList.size()); for (Grouping g : groupingList) { g.serialize(gbuf); } gbuf.getBuf().flip(); builder.setGroupingBlob(ByteString.copyFrom(gbuf.getBuf().getByteBuffer())); } if (query.getGroupingSessionCache()) { builder.setCacheGrouping(true); } int traceLevel = getTraceLevelForBackend(query); builder.setTraceLevel(traceLevel); builder.setProfileDepth(query.getTrace().getProfileDepth()); if (traceLevel > 0) { mergeToSearchRequestFromProfiling(query.getTrace().getProfiling(), builder); } mergeToSearchRequestFromRanking(query.getRanking(), scratchPad, builder); return builder.build(); }
@Test void only_set_profiling_parameters_are_serialized_in_search_request() { var q = new Query("?query=test&trace.level=1&" + "trace.profiling.matching.depth=3"); var req = ProtobufSerialization.convertFromQuery(q, 1, "serverId", 0.5); assertEquals(3, req.getProfiling().getMatch().getDepth()); assertFalse(req.getProfiling().hasFirstPhase()); assertFalse(req.getProfiling().hasSecondPhase()); }
public static void checkLiteralOverflowInBinaryStyle(BigDecimal value, ScalarType scalarType) throws AnalysisException { int realPrecision = getRealPrecision(value); int realScale = getRealScale(value); BigInteger underlyingInt = value.setScale(scalarType.getScalarScale(), RoundingMode.HALF_UP).unscaledValue(); int numBytes = scalarType.getPrimitiveType().getTypeSize(); // In BE, Overflow checking uses maximum/minimum binary values instead of maximum/minimum decimal values. // for instance: if PrimitiveType is decimal32, then 2147483647/-2147483648 is used instead of // 999999999/-999999999. BigInteger maxBinary = BigInteger.ONE.shiftLeft(numBytes * 8 - 1).subtract(BigInteger.ONE); BigInteger minBinary = BigInteger.ONE.shiftLeft(numBytes * 8 - 1).negate(); if (underlyingInt.compareTo(minBinary) < 0 || underlyingInt.compareTo(maxBinary) > 0) { String errMsg = String.format( "Typed decimal literal(%s) is overflow, value='%s' (precision=%d, scale=%d)", scalarType.toString(), value.toPlainString(), realPrecision, realScale); throw new AnalysisException(errMsg); } }
@Test public void testCheckLiteralOverflowSuccess() throws AnalysisException { BigDecimal decimal32Values[] = { new BigDecimal("2147483.647"), new BigDecimal("2147483.6474"), new BigDecimal("2147483.6465"), new BigDecimal("2147483.0001"), new BigDecimal("0.0001"), new BigDecimal("0.0"), new BigDecimal("-2147483.648"), new BigDecimal("-2147483.6484"), new BigDecimal("-2147483.6475"), new BigDecimal("-0.0001"), }; ScalarType decimal32p4s3 = ScalarType.createDecimalV3Type(PrimitiveType.DECIMAL32, 4, 3); for (BigDecimal dec32 : decimal32Values) { try { DecimalLiteral.checkLiteralOverflowInBinaryStyle(dec32, decimal32p4s3); } catch (Exception ignored) { Assert.fail("should not throw exception"); } } BigDecimal decimal64Values[] = { new BigDecimal("9223372036854.775807"), new BigDecimal("9223372036854.7758074"), new BigDecimal("9223372036854.7758065"), new BigDecimal("-9223372036854.775808"), new BigDecimal("-9223372036854.7758084"), new BigDecimal("-9223372036854.7758079"), new BigDecimal("-0.000001"), new BigDecimal("0.000001"), new BigDecimal("0.0"), }; ScalarType decimal64p10s6 = ScalarType.createDecimalV3Type(PrimitiveType.DECIMAL64, 10, 6); for (BigDecimal dec64 : decimal64Values) { try { DecimalLiteral.checkLiteralOverflowInBinaryStyle(dec64, decimal64p10s6); } catch (Exception ignored) { Assert.fail("should not throw exception"); } } BigDecimal decimal128Values[] = { new BigDecimal("1701411834604692317316873037.15884105727"), new BigDecimal("1701411834604692317316873037.158841057274"), new BigDecimal("1701411834604692317316873037.158841057265"), new BigDecimal("-1701411834604692317316873037.15884105728"), new BigDecimal("-1701411834604692317316873037.158841057284"), new BigDecimal("-1701411834604692317316873037.158841057275"), new BigDecimal("-0.00000000001"), new BigDecimal("0.00000000001"), new BigDecimal("0.0"), }; ScalarType decimal128p36s11 = ScalarType.createDecimalV3Type(PrimitiveType.DECIMAL128, 36, 11); for (BigDecimal dec128 : decimal128Values) { try { DecimalLiteral.checkLiteralOverflowInBinaryStyle(dec128, decimal128p36s11); } catch (Exception ignored) { Assert.fail("should not throw exception"); } } }
@PostMapping public ResponseEntity<Calificacion> guardarCalificacion (@RequestBody Calificacion calificacion) { return ResponseEntity.status(HttpStatus.CREATED).body(calificacionService.saveCalificacion(calificacion)); }
@Test void testGuardarCalificacion() throws Exception { when(calificacionService.saveCalificacion(ArgumentMatchers.any(Calificacion.class))).thenReturn(calificacion1); mockMvc.perform(post("/calificaciones") .contentType(MediaType.APPLICATION_JSON) .content(objectMapper.writeValueAsString(calificacion1))) .andExpect(status().isCreated()) .andExpect(jsonPath("$.id", is(calificacion1.getId()))) .andExpect(jsonPath("$.usuarioId", is(calificacion1.getUsuarioId()))) .andExpect(jsonPath("$.hotelId", is(calificacion1.getHotelId()))) .andExpect(jsonPath("$.calificacion", is(calificacion1.getCalificacion()))) .andExpect(jsonPath("$.observaciones", is(calificacion1.getObservaciones()))); verify(calificacionService, times(1)).saveCalificacion(ArgumentMatchers.any(Calificacion.class)); }
public final Scheduler scheduler() { return scheduler; }
@Test public void test_scheduler() { Reactor reactor = newReactor(); assertNotNull(reactor.scheduler()); }
@Override public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return true; } try { new SFTPAttributesFinderFeature(session).find(file, listener); return true; } catch(NotfoundException e) { // We expect SSH_FXP_STATUS if the file is not found return false; } }
@Test public void testFindDirectory() throws Exception { assertTrue(new SFTPFindFeature(session).find(new SFTPHomeDirectoryService(session).find())); }
@Override public void isNotEqualTo(@Nullable Object expected) { super.isNotEqualTo(expected); }
@Test public void isNotEqualTo_WithoutToleranceParameter_Success_NotAnArray() { assertThat(array(2.2f, 3.3f, 4.4f)).isNotEqualTo(new Object()); }
public static Integer getPort(String address) { Matcher matcher = HOST_PORT_PATTERN.matcher(address); return matcher.matches() ? Integer.parseInt(matcher.group(2)) : null; }
@Test public void testGetPort() { // valid assertEquals(8000, getPort("127.0.0.1:8000").intValue()); assertEquals(8080, getPort("mydomain.com:8080").intValue()); assertEquals(8080, getPort("MyDomain.com:8080").intValue()); assertEquals(1234, getPort("[::1]:1234").intValue()); assertEquals(5678, getPort("[2001:db8:85a3:8d3:1319:8a2e:370:7348]:5678").intValue()); assertEquals(5678, getPort("[2001:DB8:85A3:8D3:1319:8A2E:370:7348]:5678").intValue()); assertEquals(5678, getPort("[fe80::b1da:69ca:57f7:63d8%3]:5678").intValue()); // invalid assertNull(getPort("host:-92")); assertNull(getPort("host:-9-2")); assertNull(getPort("host:92-")); assertNull(getPort("host:9-2")); }
public static int isOctal(String str) { if (str == null || str.isEmpty()) { return -1; } return str.matches("[0-7]+") ? OCTAL_RADIX : -1; }
@Test public void isOctal_Test() { Assertions.assertEquals(8, TbUtils.isOctal("4567734")); Assertions.assertEquals(-1, TbUtils.isOctal("8100110")); }
public static boolean isEmail(String email) { return isMatch(EMAIL_REGEX, email); }
@Test public void testEmail() { Assert.assertEquals(false, PatternKit.isEmail("abcd")); Assert.assertEquals(true, PatternKit.isEmail("hellokaton@gmail.com")); Assert.assertEquals(true, PatternKit.isEmail("1234@q.com")); }
public static <T> PTransform<PCollection<T>, PCollection<KV<T, Long>>> perElement() { return new PerElement<>(); }
@Test @Category(NeedsRunner.class) @SuppressWarnings("unchecked") public void testCountPerElementEmpty() { PCollection<String> input = p.apply(Create.of(NO_LINES).withCoder(StringUtf8Coder.of())); PCollection<KV<String, Long>> output = input.apply(Count.perElement()); PAssert.that(output).empty(); p.run(); }
public void onFragment(final DirectBuffer buffer, final int offset, final int length, final Header header) { final byte flags = header.flags(); if ((flags & UNFRAGMENTED) == UNFRAGMENTED) { delegate.onFragment(buffer, offset, length, header); } else { handleFragment(buffer, offset, length, header, flags); } }
@Test void shouldSkipOverMessagesWithLoss() { when(header.flags()) .thenReturn(FrameDescriptor.BEGIN_FRAG_FLAG) .thenReturn(FrameDescriptor.END_FRAG_FLAG) .thenReturn(FrameDescriptor.UNFRAGMENTED); final UnsafeBuffer srcBuffer = new UnsafeBuffer(new byte[2048]); final int length = 256; int offset = HEADER_LENGTH; assembler.onFragment(srcBuffer, offset, length, header); offset = BitUtil.align(offset + length + HEADER_LENGTH, FRAME_ALIGNMENT); offset = BitUtil.align(offset + length + HEADER_LENGTH, FRAME_ALIGNMENT); offset = BitUtil.align(offset + length + HEADER_LENGTH, FRAME_ALIGNMENT); assembler.onFragment(srcBuffer, offset, length, header); offset = BitUtil.align(offset + length + HEADER_LENGTH, FRAME_ALIGNMENT); assembler.onFragment(srcBuffer, offset, length, header); final ArgumentCaptor<Header> headerArg = ArgumentCaptor.forClass(Header.class); verify(delegateFragmentHandler, times(1)).onFragment( any(), eq(offset), eq(length), headerArg.capture()); final Header capturedHeader = headerArg.getValue(); assertEquals(SESSION_ID, capturedHeader.sessionId()); assertEquals(FrameDescriptor.UNFRAGMENTED, capturedHeader.flags()); }
static void parseServerIpAndPort(MysqlConnection connection, Span span) { try { URI url = URI.create(connection.getURL().substring(5)); // strip "jdbc:" String remoteServiceName = connection.getProperties().getProperty("zipkinServiceName"); if (remoteServiceName == null || "".equals(remoteServiceName)) { String databaseName = getDatabaseName(connection); if (databaseName != null && !databaseName.isEmpty()) { remoteServiceName = "mysql-" + databaseName; } else { remoteServiceName = "mysql"; } } span.remoteServiceName(remoteServiceName); String host = getHost(connection); if (host != null) { span.remoteIpAndPort(host, url.getPort() == -1 ? 3306 : url.getPort()); } } catch (Exception e) { // remote address is optional } }
@Test void parseServerIpAndPort_ipFromHost_portFromUrl() throws SQLException { setupAndReturnPropertiesForHost("1.2.3.4"); TracingStatementInterceptor.parseServerIpAndPort(connection, span); verify(span).remoteServiceName("mysql"); verify(span).remoteIpAndPort("1.2.3.4", 5555); }
public Future<KafkaVersionChange> reconcile() { return getPods() .compose(this::detectToAndFromVersions) .compose(i -> prepareVersionChange()); }
@Test public void testDowngradeWithUnsetDesiredMetadataVersion(VertxTestContext context) { VersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).version(), VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).metadataVersion(), null), mockRos(mockUniformPods(VERSIONS.defaultVersion().version())) ); Checkpoint async = context.checkpoint(); vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { assertThat(c.from(), is(VERSIONS.defaultVersion())); assertThat(c.to(), is(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION))); assertThat(c.metadataVersion(), is(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).metadataVersion())); async.flag(); }))); }
@Override protected ExecuteContext doBefore(ExecuteContext context) { if (!registerConfig.isOpenMigration() || RegisterDynamicConfig.INSTANCE.isNeedCloseOriginRegisterCenter()) { context.skip(null); } return context; }
@Test public void doBefore() throws NoSuchMethodException { registerConfig.setOpenMigration(true); final ExecuteContext context = interceptor .doBefore(ExecuteContext.forMemberMethod(this, String.class.getDeclaredMethod("trim"), null, null, null)); Assert.assertFalse(context.isSkip()); registerConfig.setOpenMigration(false); final ExecuteContext openContext = interceptor.doBefore(context); Assert.assertTrue(openContext.isSkip()); }
public Distance lateralDistance() { return point1.latLong().distanceTo(point2.latLong()); }
@Test public void testLateralDistance() { Point p1 = Point.builder() .latLong(LatLong.of(0.0, 0.0)) .time(EPOCH) .build(); Point p2 = Point.builder() .latLong(p1.latLong().projectOut(0.0, 10.0)) //move 10 NM North .time(EPOCH) .build(); PointPair pair = new PointPair(p1, p2); Distance expectedDist = Distance.ofNauticalMiles(10.0); Distance actualDist = pair.lateralDistance(); Distance error = actualDist.minus(expectedDist).abs(); Distance tolerance = Distance.ofNauticalMiles(0.001); assertTrue(error.isLessThan(tolerance)); }
@Override @Nullable public V put(@Nullable K key, @Nullable V value) { return put(key, value, true); }
@Test void shouldContainValue() { assertFalse(this.map.containsValue("123")); assertFalse(this.map.containsValue(null)); this.map.put(123, "123"); this.map.put(456, null); assertTrue(this.map.containsValue("123")); assertTrue(this.map.containsValue(null)); }
@Override public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback callback, final ConnectionCallback connectionCallback) throws BackgroundException { try { if(status.isExists()) { if(log.isWarnEnabled()) { log.warn(String.format("Delete file %s to be replaced with %s", renamed, file)); } new DropboxDeleteFeature(session).delete(Collections.singletonMap(renamed, status), connectionCallback, callback); } final RelocationResult result = new DbxUserFilesRequests(session.getClient(file)).moveV2(containerService.getKey(file), containerService.getKey(renamed)); return renamed.withAttributes(new DropboxAttributesFinderFeature(session).toAttributes(result.getMetadata())); } catch(DbxException e) { throw new DropboxExceptionMappingService().map("Cannot move {0}", e, file); } }
@Test public void testMoveDirectory() throws Exception { final Path home = new DefaultHomeFinderService(session).find(); final Path directory = new DropboxDirectoryFeature(session).mkdir(new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); assertTrue(new DropboxFindFeature(session).find(directory)); assertTrue(new DefaultFindFeature(session).find(directory)); final Path target = new DropboxMoveFeature(session).move(directory, new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback()); assertFalse(new DropboxFindFeature(session).find(directory)); assertTrue(new DropboxFindFeature(session).find(target)); assertTrue(new DefaultFindFeature(session).find(target)); assertEquals(target.attributes(), new DropboxAttributesFinderFeature(session).find(target)); new DropboxDeleteFeature(session).delete(Collections.singletonList(target), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public void doRun() { final Instant mustBeOlderThan = Instant.now().minus(maximumSearchAge); searchDbService.getExpiredSearches(findReferencedSearchIds(), mustBeOlderThan).forEach(searchDbService::delete); }
@Test public void testForMixedReferencedNonReferencedExpiredAndNonexpiredSearches() { final ViewSummaryDTO view = ViewSummaryDTO.builder() .title("my-view") .searchId(IN_USE_SEARCH_ID) .build(); when(viewService.streamAll()).thenReturn(Stream.of(view)); final SearchSummary search1 = SearchSummary.builder() .id(IN_USE_SEARCH_ID) .createdAt(DateTime.now(DateTimeZone.UTC).minus(Duration.standardDays(30))) .build(); final SearchSummary search2 = SearchSummary.builder() .createdAt(DateTime.now(DateTimeZone.UTC).minus(Duration.standardHours(2))) .build(); final SearchSummary search3 = SearchSummary.builder() .id("This search is expired and should be deleted") .createdAt(DateTime.now(DateTimeZone.UTC).minus(Duration.standardDays(30))) .build(); when(searchDbService.findSummaries()).thenReturn(Stream.of(search1, search2, search3)); when(searchDbService.getExpiredSearches(any(), any())).thenCallRealMethod(); this.searchesCleanUpJob.doRun(); final ArgumentCaptor<String> deletedSearchId = ArgumentCaptor.forClass(String.class); verify(searchDbService, times(1)).delete(deletedSearchId.capture()); assertThat(deletedSearchId.getValue()).isEqualTo("This search is expired and should be deleted"); }
@Override protected URI getOrigin(final Path container, final Distribution.Method method) { final URI url = URI.create(String.format("%s%s", new DefaultWebUrlProvider().toUrl(origin).getUrl(), PathNormalizer.normalize(origin.getDefaultPath(), true))); if(log.isDebugEnabled()) { log.debug(String.format("Use origin %s for distribution %s", url, method)); } return url; }
@Test public void testGetOrigin() { final Host origin = new Host(new TestProtocol(), "m"); final Path container = new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume)); origin.setWebURL("http://w.example.net"); final CustomOriginCloudFrontDistributionConfiguration configuration = new CustomOriginCloudFrontDistributionConfiguration(origin, new DefaultX509TrustManager(), new DefaultX509KeyManager()); assertEquals("w.example.net", configuration.getOrigin(container, Distribution.CUSTOM).getHost()); origin.setWebURL(null); assertEquals("m", configuration.getOrigin(container, Distribution.CUSTOM).getHost()); origin.setWebURL("f"); assertEquals("f", configuration.getOrigin(container, Distribution.CUSTOM).getHost()); }
public static boolean columnEquals(Column base, Column other) { if (base == other) { return true; } if (!base.getName().equalsIgnoreCase(other.getName())) { return false; } if (!base.getType().equals(other.getType())) { return false; } return true; }
@Test public void testColumnEquals() { Column base = new Column("k1", Type.INT, false); Column other = new Column("k1", Type.INT, false); Assert.assertTrue(columnEquals(base, base)); Assert.assertTrue(columnEquals(base, other)); other = new Column("k2", Type.INT, false); Assert.assertFalse(columnEquals(base, other)); other = new Column("k1", Type.STRING, false); Assert.assertFalse(columnEquals(base, other)); base = new Column("k1", ScalarType.createCharType(5), false); other = new Column("k1", ScalarType.createCharType(10), false); Assert.assertFalse(columnEquals(base, other)); base = new Column("k1", ScalarType.createDecimalV3Type(PrimitiveType.DECIMAL128, 5, 5), false); other = new Column("k1", ScalarType.createDecimalV3Type(PrimitiveType.DECIMAL128, 6, 5), false); Assert.assertFalse(columnEquals(base, other)); base = new Column("k1", ScalarType.createDecimalV3Type(PrimitiveType.DECIMAL128, 5, 5), false); other = new Column("k1", ScalarType.createDecimalV3Type(PrimitiveType.DECIMAL128, 5, 4), false); Assert.assertFalse(columnEquals(base, other)); }
@Override public Page download(Request request, Task task) { if (task == null || task.getSite() == null) { throw new NullPointerException("task or site can not be null"); } CloseableHttpResponse httpResponse = null; CloseableHttpClient httpClient = getHttpClient(task.getSite()); Proxy proxy = proxyProvider != null ? proxyProvider.getProxy(request, task) : null; HttpClientRequestContext requestContext = httpUriRequestConverter.convert(request, task.getSite(), proxy); Page page = Page.fail(request); try { httpResponse = httpClient.execute(requestContext.getHttpUriRequest(), requestContext.getHttpClientContext()); page = handleResponse(request, request.getCharset() != null ? request.getCharset() : task.getSite().getCharset(), httpResponse, task); onSuccess(page, task); return page; } catch (IOException e) { onError(page, task, e); return page; } finally { if (httpResponse != null) { //ensure the connection is released back to pool EntityUtils.consumeQuietly(httpResponse.getEntity()); } if (proxyProvider != null && proxy != null) { proxyProvider.returnProxy(proxy, page, task); } } }
@Test public void test_set_site_header() throws Exception { HttpServer server = httpServer(13423); server.get(eq(header("header"), "header-webmagic")).response("ok"); Runner.running(server, new Runnable() { @Override public void run() throws Exception { HttpClientDownloader httpClientDownloader = new HttpClientDownloader(); Request request = new Request(); request.setUrl("http://127.0.0.1:13423"); Page page = httpClientDownloader.download(request, Site.me().addHeader("header","header-webmagic").toTask()); assertThat(page.getRawText()).isEqualTo("ok"); } }); }
public static PositionBound at(final Position position) { return new PositionBound(position); }
@Test public void shouldEqualSelf() { final PositionBound bound1 = PositionBound.at(Position.emptyPosition()); assertEquals(bound1, bound1); }
public static ReadAll readAll() { return new ReadAll(); }
@Test public void testReadAll() throws Exception { SolrIOTestUtils.insertTestDocuments(SOLR_COLLECTION, NUM_DOCS, solrClient); PCollection<SolrDocument> output = pipeline .apply( Create.of( SolrIO.read() .withConnectionConfiguration(connectionConfiguration) .from(SOLR_COLLECTION) .withBatchSize(101))) .apply(SolrIO.readAll()); PAssert.thatSingleton(output.apply("Count", Count.globally())).isEqualTo(NUM_DOCS); pipeline.run(); }
public ClassloaderBuilder newClassloader(String key) { return newClassloader(key, getSystemClassloader()); }
@Test public void fail_to_create_the_same_classloader_twice() throws Exception { sut.newClassloader("the-cl"); try { sut.newClassloader("the-cl"); fail(); } catch (IllegalStateException e) { assertThat(e).hasMessage("The classloader 'the-cl' already exists. Can not create it twice."); } }
@Override public <VO, VR> KStream<K, VR> leftJoin(final KStream<K, VO> otherStream, final ValueJoiner<? super V, ? super VO, ? extends VR> joiner, final JoinWindows windows) { return leftJoin(otherStream, toValueJoinerWithKey(joiner), windows); }
@Test public void shouldNotAllowNullTableOnLeftJoinWithGlobalTableWithNamed() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.leftJoin( null, MockMapper.selectValueMapper(), MockValueJoiner.TOSTRING_JOINER, Named.as("name"))); assertThat(exception.getMessage(), equalTo("globalTable can't be null")); }
@Override public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return true; } try { new SFTPAttributesFinderFeature(session).find(file, listener); return true; } catch(NotfoundException e) { // We expect SSH_FXP_STATUS if the file is not found return false; } }
@Test public void testFindNotFound() throws Exception { assertFalse(new SFTPFindFeature(session).find(new Path(UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)))); }
public static DaysWindows days(int number) { return new DaysWindows(number, DEFAULT_START_DATE, DateTimeZone.UTC); }
@Test public void testTimeZone() throws Exception { Map<IntervalWindow, Set<String>> expected = new HashMap<>(); DateTimeZone timeZone = DateTimeZone.forID("America/Los_Angeles"); final List<Long> timestamps = Arrays.asList( new DateTime(2014, 1, 1, 0, 0, timeZone).getMillis(), new DateTime(2014, 1, 1, 23, 59, timeZone).getMillis(), new DateTime(2014, 1, 2, 8, 0, DateTimeZone.UTC).getMillis(), new DateTime(2014, 1, 3, 7, 59, DateTimeZone.UTC).getMillis()); expected.put( new IntervalWindow( new DateTime(2014, 1, 1, 0, 0, timeZone).toInstant(), new DateTime(2014, 1, 2, 0, 0, timeZone).toInstant()), set(timestamps.get(0), timestamps.get(1))); expected.put( new IntervalWindow( new DateTime(2014, 1, 2, 0, 0, timeZone).toInstant(), new DateTime(2014, 1, 3, 0, 0, timeZone).toInstant()), set(timestamps.get(2), timestamps.get(3))); assertEquals(expected, runWindowFn(CalendarWindows.days(1).withTimeZone(timeZone), timestamps)); }
static void quoteExternalName(StringBuilder sb, String externalName) { List<String> parts = splitByNonQuotedDots(externalName); for (int i = 0; i < parts.size(); i++) { String unescaped = unescapeQuotes(parts.get(i)); String unquoted = unquoteIfQuoted(unescaped); DIALECT.quoteIdentifier(sb, unquoted); if (i < parts.size() - 1) { sb.append("."); } } }
@Test public void quoteExternalName_with_2dots() { String externalName = "catalog.custom_schema.my_table"; StringBuilder sb = new StringBuilder(); MappingHelper.quoteExternalName(sb, externalName); assertThat(sb.toString()).isEqualTo("\"catalog\".\"custom_schema\".\"my_table\""); }
@Override public void bind() { thread.start(); }
@Test public void bind() throws IOException { ServerTakeHandler handler = new ServerTakeHandler(instance, o -> 1); ServerConnection connection = new SimpleServerConnection(handler); RPCServer rpcServer = new RPCServer(connection, RandomPort::getSafeRandomPort); rpcServer.bind(); while (!rpcServer.isActive()) { LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(1L)); } boolean active = rpcServer.isActive(); Assert.assertTrue(active); rpcServer.close(); }
@Override public Iterable<T> get() { return values; }
@Test public void testGetCached() throws Exception { FakeBeamFnStateClient fakeBeamFnStateClient = new FakeBeamFnStateClient( StringUtf8Coder.of(), ImmutableMap.of(key(), asList("A1", "A2", "A3", "A4", "A5", "A6"))); Cache<?, ?> cache = Caches.eternal(); { // The first side input will populate the cache. IterableSideInput<String> iterableSideInput = new IterableSideInput<>( cache, fakeBeamFnStateClient, "instructionId", key(), StringUtf8Coder.of()); assertArrayEquals( new String[] {"A1", "A2", "A3", "A4", "A5", "A6"}, Iterables.toArray(iterableSideInput.get(), String.class)); } { // The next side input will load all of its contents from the cache. IterableSideInput<String> iterableSideInput = new IterableSideInput<>( cache, requestBuilder -> { throw new IllegalStateException("Unexpected call for test."); }, "instructionId", key(), StringUtf8Coder.of()); assertArrayEquals( new String[] {"A1", "A2", "A3", "A4", "A5", "A6"}, Iterables.toArray(iterableSideInput.get(), String.class)); } }
@Override public void run() { try { // make sure we call afterRun() even on crashes // and operate countdown latches, else we may hang the parallel runner if (steps == null) { beforeRun(); } if (skipped) { return; } int count = steps.size(); int index = 0; while ((index = nextStepIndex()) < count) { currentStep = steps.get(index); execute(currentStep); if (currentStepResult != null) { // can be null if debug step-back or hook skip result.addStepResult(currentStepResult); } } } catch (Exception e) { if (currentStepResult != null) { result.addStepResult(currentStepResult); } logError("scenario [run] failed\n" + StringUtils.throwableToString(e)); currentStepResult = result.addFakeStepResult("scenario [run] failed", e); } finally { if (!skipped) { afterRun(); if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) { featureRuntime.suite.abort(); } } if (caller.isNone()) { logAppender.close(); // reclaim memory } } }
@Test void testJsonEmbeddedExpressionFailuresAreNotBlockers() { run( "def expected = { a: '#number', b: '#(_$.a * 2)' }", "def actual = [{a: 1, b: 2}, {a: 2, b: 4}]", "match each actual == expected" ); }
@SuppressWarnings("dereference.of.nullable") public static PTransform<PCollection<Failure>, PDone> getDlqTransform(String fullConfig) { List<String> strings = Splitter.on(":").limit(2).splitToList(fullConfig); checkArgument( strings.size() == 2, "Invalid config, must start with `identifier:`. %s", fullConfig); String key = strings.get(0); String config = strings.get(1).trim(); GenericDlqProvider provider = PROVIDERS.get(key); checkArgument( provider != null, "Invalid config, no DLQ provider exists with identifier `%s`.", key); return provider.newDlqTransform(config); }
@Test public void testParseFailures() { assertThrows( IllegalArgumentException.class, () -> GenericDlq.getDlqTransform("no colon present")); assertThrows(IllegalArgumentException.class, () -> GenericDlq.getDlqTransform("bad_id:xxx")); assertThrows( IllegalArgumentException.class, () -> GenericDlq.getDlqTransform(StoringDlqProvider.ID + ": not config")); }
@Override public void deleteDiyPage(Long id) { // 校验存在 validateDiyPageExists(id); // 删除 diyPageMapper.deleteById(id); }
@Test public void testDeleteDiyPage_notExists() { // 准备参数 Long id = randomLongId(); // 调用, 并断言异常 assertServiceException(() -> diyPageService.deleteDiyPage(id), DIY_PAGE_NOT_EXISTS); }
public static UserAgent parse(String userAgentString) { return UserAgentParser.parse(userAgentString); }
@Test public void parseFromDeepinTest(){ // https://gitee.com/dromara/hutool/issues/I50YGY final String uaStr = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36"; final UserAgent ua = UserAgentUtil.parse(uaStr); assertEquals("Linux", ua.getOs().toString()); }
@Override public List<PortStatistics> getPortStatistics(DeviceId deviceId) { checkNotNull(deviceId, DEVICE_NULL); // TODO not supported at the moment. return ImmutableList.of(); }
@Test(expected = NullPointerException.class) public void testGetPortsStatisticsByNullId() { manager.registerTenantId(TenantId.tenantId(tenantIdValue1)); VirtualNetwork virtualNetwork = manager.createVirtualNetwork(TenantId.tenantId(tenantIdValue1)); DeviceService deviceService = manager.get(virtualNetwork.id(), DeviceService.class); // test the getPortStatistics() method using a null device identifier deviceService.getPortStatistics(null); }
public static int incrementSequence(int sequence, int increment) { if (sequence > Integer.MAX_VALUE - increment) return increment - (Integer.MAX_VALUE - sequence) - 1; return sequence + increment; }
@Test public void testIncrementSequence() { assertEquals(10, DefaultRecordBatch.incrementSequence(5, 5)); assertEquals(0, DefaultRecordBatch.incrementSequence(Integer.MAX_VALUE, 1)); assertEquals(4, DefaultRecordBatch.incrementSequence(Integer.MAX_VALUE - 5, 10)); }
@Cacheable("digid-app") public boolean digidAppSwitchEnabled(){ return isEnabled("Koppeling met DigiD app"); }
@Test void checkSwitchNotExisting() { var switchObject = createSwitch("other name", "Description van de switch A", SwitchStatus.ALL, 1, ZonedDateTime.now()); when(switchRepository.findByName("other name")).thenReturn(Optional.of(switchObject)); assertFalse(service.digidAppSwitchEnabled()); }
@Override public String getName() { return LocaleFactory.localizedString("Amazon CloudFront", "S3"); }
@Test public void testGetName() { final S3Session session = new S3Session(new Host(new S3Protocol(), new S3Protocol().getDefaultHostname())); final DistributionConfiguration configuration = new CloudFrontDistributionConfiguration( session, new S3LocationFeature(session), new DisabledX509TrustManager(), new DefaultX509KeyManager()); assertEquals("Amazon CloudFront", configuration.getName()); }
public static <T> AvroSchema<T> of(SchemaDefinition<T> schemaDefinition) { if (schemaDefinition.getSchemaReaderOpt().isPresent() && schemaDefinition.getSchemaWriterOpt().isPresent()) { return new AvroSchema<>(schemaDefinition.getSchemaReaderOpt().get(), schemaDefinition.getSchemaWriterOpt().get(), parseSchemaInfo(schemaDefinition, SchemaType.AVRO)); } ClassLoader pojoClassLoader = null; if (schemaDefinition.getClassLoader() != null) { pojoClassLoader = schemaDefinition.getClassLoader(); } else if (schemaDefinition.getPojo() != null) { pojoClassLoader = schemaDefinition.getPojo().getClassLoader(); } return new AvroSchema<>(parseSchemaInfo(schemaDefinition, SchemaType.AVRO), pojoClassLoader); }
@Test public void testLogicalType() { AvroSchema<SchemaLogicalType> avroSchema = AvroSchema.of(SchemaDefinition.<SchemaLogicalType>builder() .withPojo(SchemaLogicalType.class).withJSR310ConversionEnabled(true).build()); SchemaLogicalType schemaLogicalType = new SchemaLogicalType(); schemaLogicalType.setTimestampMicros(System.currentTimeMillis()*1000); schemaLogicalType.setTimestampMillis(Instant.parse("2019-03-26T04:39:58.469Z")); schemaLogicalType.setDecimal(new BigDecimal("12.34")); schemaLogicalType.setDate(LocalDate.now()); schemaLogicalType.setTimeMicros(System.currentTimeMillis()*1000); schemaLogicalType.setTimeMillis(LocalTime.now().truncatedTo(ChronoUnit.MILLIS)); byte[] bytes1 = avroSchema.encode(schemaLogicalType); Assert.assertTrue(bytes1.length > 0); SchemaLogicalType object1 = avroSchema.decode(bytes1); assertEquals(object1, schemaLogicalType); }
public void load() { if (fileHome == null || fileHome.isEmpty()) { return; } Map<String, Map<String, PlainAccessResource>> aclPlainAccessResourceMap = new HashMap<>(); Map<String, String> accessKeyTable = new HashMap<>(); List<RemoteAddressStrategy> globalWhiteRemoteAddressStrategy = new ArrayList<>(); Map<String, List<RemoteAddressStrategy>> globalWhiteRemoteAddressStrategyMap = new HashMap<>(); Map<String, DataVersion> dataVersionMap = new HashMap<>(); assureAclConfigFilesExist(); fileList = getAllAclFiles(defaultAclDir); if (new File(defaultAclFile).exists() && !fileList.contains(defaultAclFile)) { fileList.add(defaultAclFile); } for (String path : fileList) { final String currentFile = MixAll.dealFilePath(path); PlainAccessData plainAclConfData = AclUtils.getYamlDataObject(currentFile, PlainAccessData.class); if (plainAclConfData == null) { log.warn("No data in file {}", currentFile); continue; } log.info("Broker plain acl conf data is : {}", plainAclConfData.toString()); List<RemoteAddressStrategy> globalWhiteRemoteAddressStrategyList = new ArrayList<>(); List<String> globalWhiteRemoteAddressesList = plainAclConfData.getGlobalWhiteRemoteAddresses(); if (globalWhiteRemoteAddressesList != null && !globalWhiteRemoteAddressesList.isEmpty()) { for (String address : globalWhiteRemoteAddressesList) { globalWhiteRemoteAddressStrategyList.add(remoteAddressStrategyFactory.getRemoteAddressStrategy(address)); } } if (!globalWhiteRemoteAddressStrategyList.isEmpty()) { globalWhiteRemoteAddressStrategyMap.put(currentFile, globalWhiteRemoteAddressStrategyList); globalWhiteRemoteAddressStrategy.addAll(globalWhiteRemoteAddressStrategyList); } List<PlainAccessConfig> accounts = plainAclConfData.getAccounts(); Map<String, PlainAccessResource> plainAccessResourceMap = new HashMap<>(); if (accounts != null && !accounts.isEmpty()) { for (PlainAccessConfig plainAccessConfig : accounts) { PlainAccessResource plainAccessResource = buildPlainAccessResource(plainAccessConfig); //AccessKey can not be defined in multiple ACL files if (accessKeyTable.get(plainAccessResource.getAccessKey()) == null) { plainAccessResourceMap.put(plainAccessResource.getAccessKey(), plainAccessResource); accessKeyTable.put(plainAccessResource.getAccessKey(), currentFile); } else { log.warn("The accessKey {} is repeated in multiple ACL files", plainAccessResource.getAccessKey()); } } } if (!plainAccessResourceMap.isEmpty()) { aclPlainAccessResourceMap.put(currentFile, plainAccessResourceMap); } List<PlainAccessData.DataVersion> dataVersions = plainAclConfData.getDataVersion(); DataVersion dataVersion = new DataVersion(); if (dataVersions != null && !dataVersions.isEmpty()) { DataVersion firstElement = new DataVersion(); firstElement.setCounter(new AtomicLong(dataVersions.get(0).getCounter())); firstElement.setTimestamp(dataVersions.get(0).getTimestamp()); dataVersion.assignNewOne(firstElement); } dataVersionMap.put(currentFile, dataVersion); } if (dataVersionMap.containsKey(defaultAclFile)) { this.dataVersion.assignNewOne(dataVersionMap.get(defaultAclFile)); } this.dataVersionMap = dataVersionMap; this.globalWhiteRemoteAddressStrategyMap = globalWhiteRemoteAddressStrategyMap; this.globalWhiteRemoteAddressStrategy = globalWhiteRemoteAddressStrategy; this.aclPlainAccessResourceMap = aclPlainAccessResourceMap; this.accessKeyTable = accessKeyTable; }
@Test public void loadTest() { plainPermissionManager.load(); final Map<String, DataVersion> map = plainPermissionManager.getDataVersionMap(); Assertions.assertThat(map).isNotEmpty(); }
public static <T> List<List<T>> split(List<T> list, int size) { return partition(list, size); }
@Test @Disabled public void splitBenchTest() { final List<String> list = new ArrayList<>(); CollUtil.padRight(list, RandomUtil.randomInt(1000_0000, 1_0000_0000), "test"); final int size = RandomUtil.randomInt(10, 1000); Console.log("\nlist size: {}", list.size()); Console.log("partition size: {}\n", size); final StopWatch stopWatch = new StopWatch(); stopWatch.start("CollUtil#split"); final List<List<String>> CollSplitResult = CollUtil.split(list, size); stopWatch.stop(); stopWatch.start("ListUtil#split"); final List<List<String>> ListSplitResult = ListUtil.split(list, size); stopWatch.stop(); assertEquals(CollSplitResult, ListSplitResult); Console.log(stopWatch.prettyPrint()); }
@Override public double score(int[] truth, int[] prediction) { return of(truth, prediction, strategy); }
@Test public void test() { System.out.println("recall"); int[] truth = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; int[] prediction = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; Recall instance = new Recall(); double expResult = 0.8333; double result = instance.score(truth, prediction); assertEquals(expResult, result, 1E-4); }
public static long elapsed(long started, long finished) { return Times.elapsed(started, finished, true); }
@Test void testNegativeStartandFinishTimes() { long elapsed = Times.elapsed(-5, -10, false); assertEquals(-1, elapsed, "Elapsed time is not -1"); }
@Override public void set(BitSet bs) { get(setAsync(bs)); }
@Test public void testSet() { RBitSet bs = redisson.getBitSet("testbitset"); assertThat(bs.set(3)).isFalse(); assertThat(bs.set(5)).isFalse(); assertThat(bs.set(5)).isTrue(); assertThat(bs.toString()).isEqualTo("{3, 5}"); BitSet bs1 = new BitSet(); bs1.set(1); bs1.set(10); bs.set(bs1); bs = redisson.getBitSet("testbitset"); assertThat(bs.toString()).isEqualTo("{1, 10}"); RBitSet bs2 = redisson.getBitSet("testbitset2"); bs2.set(new long[]{1L,3L,5L,7L}, true); bs2 = redisson.getBitSet("testbitset2"); assertThat(bs2.toString()).isEqualTo("{1, 3, 5, 7}"); bs2.set(new long[]{3L,5L}, false); bs2 = redisson.getBitSet("testbitset2"); assertThat(bs2.toString()).isEqualTo("{1, 7}"); }
@Override public final int available() { return size - pos; }
@Test public void testAvailable() { assertEquals(in.size - in.pos, in.available()); }
@Override public void startWatching() { if (settings.getProps().valueAsBoolean(ENABLE_STOP_COMMAND.getKey())) { super.startWatching(); } }
@Test public void startWatching_does_not_start_thread_if_stop_command_is_disabled() { TestAppSettings appSettings = new TestAppSettings(); StopRequestWatcherImpl underTest = new StopRequestWatcherImpl(appSettings, scheduler, commands); underTest.startWatching(); assertThat(underTest.isAlive()).isFalse(); }
public static UUID createUuid(String input) { if (input == null) { return UUID.randomUUID(); } return UUID.nameUUIDFromBytes(input.getBytes(StandardCharsets.UTF_8)); }
@Test public void testCreateUuid() { String expected = "7b92bd31-8478-35db-bb77-dfdb3d7260fc"; Assert.assertEquals(expected, IdHelper.createUuid("test-uuid1").toString()); expected = "1e98c90b-0adf-3429-9a53-c8620d70fb5d"; Assert.assertEquals(expected, IdHelper.createUuid("test-uuid2").toString()); }
ControllerResult<CreateTopicsResponseData> createTopics( ControllerRequestContext context, CreateTopicsRequestData request, Set<String> describable ) { Map<String, ApiError> topicErrors = new HashMap<>(); List<ApiMessageAndVersion> records = BoundedList.newArrayBacked(MAX_RECORDS_PER_USER_OP); // Check the topic names. validateNewTopicNames(topicErrors, request.topics(), topicsWithCollisionChars); // Identify topics that already exist and mark them with the appropriate error request.topics().stream().filter(creatableTopic -> topicsByName.containsKey(creatableTopic.name())) .forEach(t -> topicErrors.put(t.name(), new ApiError(Errors.TOPIC_ALREADY_EXISTS, "Topic '" + t.name() + "' already exists."))); // Verify that the configurations for the new topics are OK, and figure out what // configurations should be created. Map<ConfigResource, Map<String, Entry<OpType, String>>> configChanges = computeConfigChanges(topicErrors, request.topics()); // Try to create whatever topics are needed. Map<String, CreatableTopicResult> successes = new HashMap<>(); for (CreatableTopic topic : request.topics()) { if (topicErrors.containsKey(topic.name())) continue; // Figure out what ConfigRecords should be created, if any. ConfigResource configResource = new ConfigResource(TOPIC, topic.name()); Map<String, Entry<OpType, String>> keyToOps = configChanges.get(configResource); List<ApiMessageAndVersion> configRecords; if (keyToOps != null) { ControllerResult<ApiError> configResult = configurationControl.incrementalAlterConfig(configResource, keyToOps, true); if (configResult.response().isFailure()) { topicErrors.put(topic.name(), configResult.response()); continue; } else { configRecords = configResult.records(); } } else { configRecords = Collections.emptyList(); } ApiError error; try { error = createTopic(context, topic, records, successes, configRecords, describable.contains(topic.name())); } catch (ApiException e) { error = ApiError.fromThrowable(e); } if (error.isFailure()) { topicErrors.put(topic.name(), error); } } // Create responses for all topics. CreateTopicsResponseData data = new CreateTopicsResponseData(); StringBuilder resultsBuilder = new StringBuilder(); String resultsPrefix = ""; for (CreatableTopic topic : request.topics()) { ApiError error = topicErrors.get(topic.name()); if (error != null) { data.topics().add(new CreatableTopicResult(). setName(topic.name()). setErrorCode(error.error().code()). setErrorMessage(error.message())); resultsBuilder.append(resultsPrefix).append(topic).append(": "). append(error.error()).append(" (").append(error.message()).append(")"); resultsPrefix = ", "; continue; } CreatableTopicResult result = successes.get(topic.name()); data.topics().add(result); resultsBuilder.append(resultsPrefix).append(topic).append(": "). append("SUCCESS"); resultsPrefix = ", "; } if (request.validateOnly()) { log.info("Validate-only CreateTopics result(s): {}", resultsBuilder); return ControllerResult.atomicOf(Collections.emptyList(), data); } else { log.info("CreateTopics result(s): {}", resultsBuilder); return ControllerResult.atomicOf(records, data); } }
@Test public void testInvalidCreateTopicsWithValidateOnlyFlag() { ReplicationControlTestContext ctx = new ReplicationControlTestContext.Builder().build(); ctx.registerBrokers(0, 1, 2); ctx.unfenceBrokers(0, 1, 2); CreateTopicsRequestData request = new CreateTopicsRequestData().setValidateOnly(true); request.topics().add(new CreatableTopic().setName("foo"). setNumPartitions(1).setReplicationFactor((short) 4)); ControllerRequestContext requestContext = anonymousContextFor(ApiKeys.CREATE_TOPICS); ControllerResult<CreateTopicsResponseData> result = ctx.replicationControl.createTopics(requestContext, request, Collections.singleton("foo")); assertEquals(0, result.records().size()); CreateTopicsResponseData expectedResponse = new CreateTopicsResponseData(); expectedResponse.topics().add(new CreatableTopicResult().setName("foo"). setErrorCode(INVALID_REPLICATION_FACTOR.code()). setErrorMessage("Unable to replicate the partition 4 time(s): The target " + "replication factor of 4 cannot be reached because only 3 broker(s) " + "are registered.")); assertEquals(expectedResponse, result.response()); }
@Override protected double maintain() { if ( ! nodeRepository().nodes().isWorking()) return 0.0; // Don't need to maintain spare capacity in dynamically provisioned zones; can provision more on demand. if (nodeRepository().zone().cloud().dynamicProvisioning()) return 1.0; NodeList allNodes = nodeRepository().nodes().list(); CapacityChecker capacityChecker = new CapacityChecker(allNodes); List<Node> overcommittedHosts = capacityChecker.findOvercommittedHosts(); metric.set(ConfigServerMetrics.OVERCOMMITTED_HOSTS.baseName(), overcommittedHosts.size(), null); retireOvercommitedHosts(allNodes, overcommittedHosts); boolean success = true; Optional<CapacityChecker.HostFailurePath> failurePath = capacityChecker.worstCaseHostLossLeadingToFailure(); if (failurePath.isPresent()) { int spareHostCapacity = failurePath.get().hostsCausingFailure.size() - 1; if (spareHostCapacity == 0) { List<Move> mitigation = findMitigation(failurePath.get()); if (execute(mitigation, failurePath.get())) { // We succeeded or are in the process of taking a step to mitigate. // Report with the assumption this will eventually succeed to avoid alerting before we're stuck spareHostCapacity++; } else { success = false; } } metric.set(ConfigServerMetrics.SPARE_HOST_CAPACITY.baseName(), spareHostCapacity, null); } return success ? 1.0 : 0.0; }
@Test public void testNoSpares() { var tester = new SpareCapacityMaintainerTester(); tester.addHosts(2, new NodeResources(10, 100, 1000, 1)); tester.addNodes(0, 2, new NodeResources(10, 100, 1000, 1), 0); tester.maintainer.maintain(); assertEquals(0, tester.deployer.activations); assertEquals(0, tester.nodeRepository.nodes().list().retired().size()); assertEquals(0, tester.metric.values.get("spareHostCapacity")); }
@Override public void calculate() { }
@Test public void testCalculate() { function.accept(MeterEntity.newService("service-test", Layer.GENERAL), SMALL_VALUE); function.accept(MeterEntity.newService("service-test", Layer.GENERAL), LARGE_VALUE); function.calculate(); assertThat(function.getValue()).isEqualTo(SMALL_VALUE); }
@Override public void transform(Message message, DataType fromType, DataType toType) { ProtobufSchema schema = message.getExchange().getProperty(SchemaHelper.CONTENT_SCHEMA, ProtobufSchema.class); if (schema == null) { throw new CamelExecutionException("Missing proper Protobuf schema for data type processing", message.getExchange()); } try { byte[] marshalled; String contentClass = SchemaHelper.resolveContentClass(message.getExchange(), null); if (contentClass != null) { Class<?> contentType = message.getExchange().getContext().getClassResolver().resolveMandatoryClass(contentClass); marshalled = Protobuf.mapper().writer().forType(contentType).with(schema) .writeValueAsBytes(message.getBody()); } else { marshalled = Protobuf.mapper().writer().forType(JsonNode.class).with(schema) .writeValueAsBytes(getBodyAsJsonNode(message, schema)); } message.setBody(marshalled); message.setHeader(Exchange.CONTENT_TYPE, MimeType.PROTOBUF_BINARY.type()); message.setHeader(SchemaHelper.CONTENT_SCHEMA, schema.getSource().toString()); } catch (InvalidPayloadException | IOException | ClassNotFoundException e) { throw new CamelExecutionException( "Failed to apply Protobuf binary data type on exchange", message.getExchange(), e); } }
@Test void shouldHandleJsonString() throws Exception { Exchange exchange = new DefaultExchange(camelContext); ProtobufSchema protobufSchema = getSchema(); exchange.setProperty(SchemaHelper.CONTENT_SCHEMA, protobufSchema); exchange.getMessage().setBody(""" { "name": "Christoph", "age": 32 } """); transformer.transform(exchange.getMessage(), DataType.ANY, DataType.ANY); JSONAssert.assertEquals(""" { "name": "Christoph", "age": 32 } """, Json.mapper().writeValueAsString( Protobuf.mapper().reader().with(protobufSchema).readTree(exchange.getMessage().getBody(byte[].class))), true); }
ClientConfigurationData createClientConfiguration() { ClientConfigurationData initialConf = new ClientConfigurationData(); ProxyConfiguration proxyConfig = service.getConfiguration(); initialConf.setServiceUrl( proxyConfig.isTlsEnabledWithBroker() ? service.getServiceUrlTls() : service.getServiceUrl()); // Apply all arbitrary configuration. This must be called before setting any fields annotated as // @Secret on the ClientConfigurationData object because of the way they are serialized. // See https://github.com/apache/pulsar/issues/8509 for more information. Map<String, Object> overrides = PropertiesUtils .filterAndMapProperties(proxyConfig.getProperties(), "brokerClient_"); ClientConfigurationData clientConf = ConfigurationDataUtils .loadData(overrides, initialConf, ClientConfigurationData.class); /** The proxy service does not need to automatically clean up invalid connections, so set false. **/ initialConf.setConnectionMaxIdleSeconds(-1); clientConf.setAuthentication(this.getClientAuthentication()); if (proxyConfig.isTlsEnabledWithBroker()) { clientConf.setUseTls(true); clientConf.setTlsHostnameVerificationEnable(proxyConfig.isTlsHostnameVerificationEnabled()); if (proxyConfig.isBrokerClientTlsEnabledWithKeyStore()) { clientConf.setUseKeyStoreTls(true); clientConf.setTlsTrustStoreType(proxyConfig.getBrokerClientTlsTrustStoreType()); clientConf.setTlsTrustStorePath(proxyConfig.getBrokerClientTlsTrustStore()); clientConf.setTlsTrustStorePassword(proxyConfig.getBrokerClientTlsTrustStorePassword()); clientConf.setTlsKeyStoreType(proxyConfig.getBrokerClientTlsKeyStoreType()); clientConf.setTlsKeyStorePath(proxyConfig.getBrokerClientTlsKeyStore()); clientConf.setTlsKeyStorePassword(proxyConfig.getBrokerClientTlsKeyStorePassword()); } else { clientConf.setTlsTrustCertsFilePath(proxyConfig.getBrokerClientTrustCertsFilePath()); clientConf.setTlsKeyFilePath(proxyConfig.getBrokerClientKeyFilePath()); clientConf.setTlsCertificateFilePath(proxyConfig.getBrokerClientCertificateFilePath()); } clientConf.setTlsAllowInsecureConnection(proxyConfig.isTlsAllowInsecureConnection()); } return clientConf; }
@Test public void testCreateClientConfiguration() { ProxyConfiguration proxyConfiguration = new ProxyConfiguration(); proxyConfiguration.setTlsEnabledWithBroker(true); String proxyUrlTls = "pulsar+ssl://proxy:6651"; String proxyUrl = "pulsar://proxy:6650"; ProxyService proxyService = mock(ProxyService.class); doReturn(proxyConfiguration).when(proxyService).getConfiguration(); doReturn(proxyUrlTls).when(proxyService).getServiceUrlTls(); doReturn(proxyUrl).when(proxyService).getServiceUrl(); ProxyConnection proxyConnection = new ProxyConnection(proxyService, null); ClientConfigurationData clientConfiguration = proxyConnection.createClientConfiguration(); assertEquals(clientConfiguration.getServiceUrl(), proxyUrlTls); proxyConfiguration.setTlsEnabledWithBroker(false); clientConfiguration = proxyConnection.createClientConfiguration(); assertEquals(clientConfiguration.getServiceUrl(), proxyUrl); }
public GoConfigHolder loadConfigHolder(final String content, Callback callback) throws Exception { CruiseConfig configForEdit; CruiseConfig config; LOGGER.debug("[Config Save] Loading config holder"); configForEdit = deserializeConfig(content); if (callback != null) callback.call(configForEdit); config = preprocessAndValidate(configForEdit); return new GoConfigHolder(config, configForEdit); }
@Test void shouldAllowHashCharacterInLabelTemplate() throws Exception { GoConfigHolder goConfigHolder = xmlLoader.loadConfigHolder(LABEL_TEMPLATE_WITH_LABEL_TEMPLATE("1.3.0-${COUNT}-${git}##")); assertThat(goConfigHolder.config.pipelineConfigByName(new CaseInsensitiveString("cruise")).getLabelTemplate()).isEqualTo("1.3.0-${COUNT}-${git}#"); assertThat(goConfigHolder.configForEdit.pipelineConfigByName(new CaseInsensitiveString("cruise")).getLabelTemplate()).isEqualTo("1.3.0-${COUNT}-${git}##"); }
public JmxCollector register() { return register(PrometheusRegistry.defaultRegistry); }
@Test public void testExcludeObjectNames() throws Exception { JmxCollector jc = new JmxCollector( "\n---\nincludeObjectNames:\n- java.lang:*\n- org.apache.cassandra.concurrent:*\nexcludeObjectNames:\n- org.apache.cassandra.concurrent:*" .replace('`', '"')) .register(prometheusRegistry); // Test what should and shouldn't be present. assertNotNull( getSampleValue( "java_lang_OperatingSystem_ProcessCpuTime", new String[] {}, new String[] {})); assertNull( getSampleValue( "org_apache_cassandra_concurrent_CONSISTENCY_MANAGER_ActiveCount", new String[] {}, new String[] {})); assertNull( getSampleValue( "org_apache_cassandra_metrics_Compaction_Value", new String[] {"name"}, new String[] {"CompletedTasks"})); assertNull( getSampleValue( "hadoop_DataNode_replaceBlockOpMinTime", new String[] {"name"}, new String[] {"DataNodeActivity-ams-hdd001-50010"})); }
public List<Service> importServiceDefinition(String repositoryUrl, Secret repositorySecret, boolean disableSSLValidation, boolean mainArtifact) throws MockRepositoryImportException { log.info("Importing service definitions from {}", repositoryUrl); File localFile = null; Map<String, List<String>> fileProperties = null; if (repositoryUrl.startsWith("http")) { try { HTTPDownloader.FileAndHeaders fileAndHeaders = HTTPDownloader .handleHTTPDownloadToFileAndHeaders(repositoryUrl, repositorySecret, disableSSLValidation); localFile = fileAndHeaders.getLocalFile(); fileProperties = fileAndHeaders.getResponseHeaders(); } catch (IOException ioe) { throw new MockRepositoryImportException(repositoryUrl + " cannot be downloaded", ioe); } } else { // Simply build localFile from repository url. localFile = new File(repositoryUrl); } RelativeReferenceURLBuilder referenceURLBuilder = RelativeReferenceURLBuilderFactory .getRelativeReferenceURLBuilder(fileProperties); String artifactName = referenceURLBuilder.getFileName(repositoryUrl, fileProperties); // Initialize a reference resolver to the folder of this repositoryUrl. ReferenceResolver referenceResolver = new ReferenceResolver(repositoryUrl, repositorySecret, disableSSLValidation, referenceURLBuilder); return importServiceDefinition(localFile, referenceResolver, new ArtifactInfo(artifactName, mainArtifact)); }
@Test void testImportServiceDefinitionFromGitLabURL() { List<Service> services = null; try { services = service.importServiceDefinition( "https://gitlab.com/api/v4/projects/53583367/repository/files/complex-example%2Fopenapi.yaml/raw?head=main", null, true, true); } catch (MockRepositoryImportException mrie) { fail("No MockRepositoryImportException should have be thrown"); } assertNotNull(services); assertEquals(1, services.size()); // Inspect Service own attributes. Service importedSvc = services.get(0); assertEquals("OpenAPI Car API", importedSvc.getName()); assertEquals("1.0.0", importedSvc.getVersion()); assertEquals("openapi.yaml", importedSvc.getSourceArtifact()); // Inspect and check resources. List<Resource> resources = resourceRepository.findByServiceId(importedSvc.getId()); assertEquals(10, resources.size()); // Now inspect operations. assertEquals(1, importedSvc.getOperations().size()); assertEquals("GET /owner/{owner}/car", importedSvc.getOperations().get(0).getName()); assertEquals(DispatchStyles.URI_PARTS, importedSvc.getOperations().get(0).getDispatcher()); assertEquals(3, importedSvc.getOperations().get(0).getResourcePaths().size()); // Inspect and check requests. List<Request> requests = requestRepository .findByOperationId(IdBuilder.buildOperationId(importedSvc, importedSvc.getOperations().get(0))); assertEquals(3, requests.size()); for (Request request : requests) { assertEquals("openapi.yaml", request.getSourceArtifact()); } // Inspect and check responses. List<Response> responses = responseRepository .findByOperationId(IdBuilder.buildOperationId(importedSvc, importedSvc.getOperations().get(0))); assertEquals(3, responses.size()); for (Response response : responses) { assertEquals("openapi.yaml", response.getSourceArtifact()); switch (response.getName()) { case "laurent": assertEquals("/owner=0", response.getDispatchCriteria()); assertEquals("[{\"model\":\"BMW X5\",\"year\":2018},{\"model\":\"Tesla Model 3\",\"year\":2020}]", response.getContent()); break; case "maxime": assertEquals("/owner=1", response.getDispatchCriteria()); assertEquals("[{\"model\":\"Volkswagen Golf\",\"year\":2017}]", response.getContent()); break; case "NOT_FOUND": assertEquals("/owner=999999999", response.getDispatchCriteria()); assertEquals("{\"error\":\"Could not find owner\"}", response.getContent()); break; default: fail("Unknown response message"); } } }
public void popNode() { current.finishSpecifying(); unexpandedInputs.remove(current); current = current.getEnclosingNode(); checkState(current != null, "Can't pop the root node of a TransformHierarchy"); }
@Test public void pushWithoutPushFails() { thrown.expect(IllegalStateException.class); hierarchy.popNode(); }
public File getUniqueFilenameForClass(String className) throws IOException { //class names should be passed in the normal dalvik style, with a leading L, a trailing ;, and using //'/' as a separator. if (className.charAt(0) != 'L' || className.charAt(className.length()-1) != ';') { throw new RuntimeException("Not a valid dalvik class name"); } int packageElementCount = 1; for (int i=1; i<className.length()-1; i++) { if (className.charAt(i) == '/') { packageElementCount++; } } String[] packageElements = new String[packageElementCount]; int elementIndex = 0; int elementStart = 1; for (int i=1; i<className.length()-1; i++) { if (className.charAt(i) == '/') { //if the first char after the initial L is a '/', or if there are //two consecutive '/' if (i-elementStart==0) { throw new RuntimeException("Not a valid dalvik class name"); } packageElements[elementIndex++] = className.substring(elementStart, i); elementStart = ++i; } } //at this point, we have added all the package elements to packageElements, but still need to add //the final class name. elementStart should point to the beginning of the class name //this will be true if the class ends in a '/', i.e. Lsome/package/className/; if (elementStart >= className.length()-1) { throw new RuntimeException("Not a valid dalvik class name"); } packageElements[elementIndex] = className.substring(elementStart, className.length()-1); return addUniqueChild(top, packageElements, 0); }
@Test public void testBasicFunctionality() throws IOException { File tempDir = Files.createTempDir().getCanonicalFile(); ClassFileNameHandler handler = new ClassFileNameHandler(tempDir, ".smali"); File file = handler.getUniqueFilenameForClass("La/b/c/d;"); checkFilename(tempDir, file, "a", "b", "c", "d.smali"); file = handler.getUniqueFilenameForClass("La/b/c/e;"); checkFilename(tempDir, file, "a", "b", "c", "e.smali"); file = handler.getUniqueFilenameForClass("La/b/d/d;"); checkFilename(tempDir, file, "a", "b", "d", "d.smali"); file = handler.getUniqueFilenameForClass("La/b;"); checkFilename(tempDir, file, "a", "b.smali"); file = handler.getUniqueFilenameForClass("Lb;"); checkFilename(tempDir, file, "b.smali"); }
@Override public long add(double longitude, double latitude, V member) { return get(addAsync(longitude, latitude, member)); }
@Test public void testAdd() { RGeo<String> geo = redisson.getGeo("test"); assertThat(geo.add(2.51, 3.12, "city1")).isEqualTo(1); }
public byte[] getCompressedData() { return compressedData; }
@Test public void testGetCompressedData() { byte[] bytes = new byte[]{'h', 'e', 'l', 'l', 'o', '!'}; lz4CompressData.setCompressedData(bytes); Assertions.assertEquals(lz4CompressData.getCompressedData(), bytes); }
private Map<String, String> filterMdc(Map<String, String> mdcPropertyMap) { if (includesMdcKeys.isEmpty()) { return mdcPropertyMap; } return mdcPropertyMap.entrySet() .stream() .filter(e -> includesMdcKeys.contains(e.getKey())) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); }
@Test void testFilterMdc() { final Set<String> includesMdcKeys = Set.of("userId", "orderId"); Map<String, Object> map = new EventJsonLayout(jsonFormatter, timestampFormatter, throwableProxyConverter, DEFAULT_EVENT_ATTRIBUTES, Collections.emptyMap(), Collections.emptyMap(), includesMdcKeys, false) .toJsonMap(event); final Map<String, String> expectedMdc = Map.of( "userId", "18", "orderId", "24"); final HashMap<String, Object> expectedFields = new HashMap<>(defaultExpectedFields); expectedFields.put("mdc", expectedMdc); assertThat(map).isEqualTo(expectedFields); }
public static int getLevelForXp(int xp) { if (xp < 0) { throw new IllegalArgumentException("XP (" + xp + ") must not be negative"); } int low = 0; int high = XP_FOR_LEVEL.length - 1; while (low <= high) { int mid = low + (high - low) / 2; int xpForLevel = XP_FOR_LEVEL[mid]; if (xp < xpForLevel) { high = mid - 1; } else if (xp > xpForLevel) { low = mid + 1; } else { return mid + 1; } } return high + 1; }
@Test(expected = IllegalArgumentException.class) public void testGetLevelForNegativeXP() { Experience.getLevelForXp(-1); }
private static void removeQueue( String queueToRemove, CapacitySchedulerConfiguration proposedConf, Map<String, String> confUpdate) throws IOException { if (queueToRemove == null) { return; } QueuePath queuePath = new QueuePath(queueToRemove); if (queuePath.isRoot() || queuePath.isInvalid()) { throw new IOException("Can't remove queue " + queuePath.getFullPath()); } String queueName = queuePath.getLeafName(); List<String> siblingQueues = getSiblingQueues(queuePath, proposedConf); if (!siblingQueues.contains(queueName)) { throw new IOException("Queue " + queuePath.getFullPath() + " not found"); } siblingQueues.remove(queueName); QueuePath parentPath = queuePath.getParentObject(); proposedConf.setQueues(parentPath, siblingQueues.toArray( new String[0])); String queuesConfig = getQueuesConfig(parentPath); if (siblingQueues.isEmpty()) { confUpdate.put(queuesConfig, null); // Unset Ordering Policy of Leaf Queue converted from // Parent Queue after removeQueue String queueOrderingPolicy = getOrderingPolicyConfig(parentPath); proposedConf.unset(queueOrderingPolicy); confUpdate.put(queueOrderingPolicy, null); } else { confUpdate.put(queuesConfig, Joiner.on(',').join(siblingQueues)); } for (Map.Entry<String, String> confRemove : proposedConf.getValByRegex( ".*" + queuePath.getFullPath() + "\\..*") .entrySet()) { proposedConf.unset(confRemove.getKey()); confUpdate.put(confRemove.getKey(), null); } }
@Test public void testRemoveQueue() throws Exception { SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo(); updateInfo.getRemoveQueueInfo().add(A_PATH); Map<String, String> configurationUpdate = ConfigurationUpdateAssembler.constructKeyValueConfUpdate(csConfig, updateInfo); assertTrue(configurationUpdate.containsKey(A_CONFIG_PATH)); assertNull(configurationUpdate.get(A_CONFIG_PATH)); assertEquals("b", configurationUpdate.get(ROOT_QUEUES_PATH)); }
@Override public KCell[] getRow( int rownr ) { // xlsx raw row numbers are 1-based index, KSheet is 0-based // Don't check the upper limit as not all rows may have been read! // If it's found that the row does not exist, the exception will be thrown at the end of this method. if ( rownr < 0 ) { // KSheet requires out of bounds here throw new ArrayIndexOutOfBoundsException( rownr ); } if ( rownr + 1 < firstRow ) { // before first non-empty row return new KCell[0]; } if ( rownr > 0 && currentRow == rownr + 1 ) { if ( currentRowCells != null ) { return currentRowCells; } // The case when the table contains the empty row(s) before the header // but at the same time user wants to read starting from 0 row return new KCell[0]; } try { if ( currentRow >= rownr + 1 ) { // allow random access per api despite performance hit resetSheetReader(); } while ( sheetReader.hasNext() ) { int event = sheetReader.next(); if ( event == XMLStreamConstants.START_ELEMENT && sheetReader.getLocalName().equals( TAG_ROW ) ) { String rowIndicator = sheetReader.getAttributeValue( null, "r" ); currentRow = Integer.parseInt( rowIndicator ); if ( currentRow < rownr + 1 ) { continue; } currentRowCells = parseRow(); return currentRowCells; } if ( event == XMLStreamConstants.END_ELEMENT && sheetReader.getLocalName().equals( TAG_SHEET_DATA ) ) { // There're no more columns, no need to continue to read break; } } } catch ( Exception e ) { throw new RuntimeException( e ); } // We've read all document rows, let's update the final count. numRows = currentRow; // And, as this was an invalid row to ask for, throw the proper exception! throw new ArrayIndexOutOfBoundsException( rownr ); }
@Test( expected = ArrayIndexOutOfBoundsException.class ) public void testEmptySheet_row1() throws Exception { XSSFReader reader = mockXSSFReader( "sheet1", SHEET_EMPTY, mock( SharedStringsTable.class ), mock( StylesTable.class ) ); StaxPoiSheet sheet = new StaxPoiSheet( reader, "empty", "sheet1" ); sheet.getRow( 1 ); fail( "An exception should have been thrown!" ); }
public String abbreviate(String fqClassName) { StringBuilder buf = new StringBuilder(targetLength); if (fqClassName == null) { throw new IllegalArgumentException("Class name may not be null"); } int inLen = fqClassName.length(); if (inLen < targetLength) { return fqClassName; } int[] dotIndexesArray = new int[ClassicConstants.MAX_DOTS]; // a.b.c contains 2 dots but 2+1 parts. // see also http://jira.qos.ch/browse/LBCLASSIC-110 int[] lengthArray = new int[ClassicConstants.MAX_DOTS + 1]; int dotCount = computeDotIndexes(fqClassName, dotIndexesArray); // System.out.println(); // System.out.println("Dot count for [" + className + "] is " + dotCount); // if there are not dots than abbreviation is not possible if (dotCount == 0) { return fqClassName; } // printArray("dotArray: ", dotArray); computeLengthArray(fqClassName, dotIndexesArray, lengthArray, dotCount); // printArray("lengthArray: ", lengthArray); for (int i = 0; i <= dotCount; i++) { if (i == 0) { buf.append(fqClassName.substring(0, lengthArray[i] - 1)); } else { buf.append(fqClassName.substring(dotIndexesArray[i - 1], dotIndexesArray[i - 1] + lengthArray[i])); } // System.out.println("i=" + i + ", buf=" + buf); } return buf.toString(); }
@Test public void testTwoDot() { { TargetLengthBasedClassNameAbbreviator abbreviator = new TargetLengthBasedClassNameAbbreviator(1); String name = "com.logback.Foobar"; assertEquals("c.l.Foobar", abbreviator.abbreviate(name)); } { TargetLengthBasedClassNameAbbreviator abbreviator = new TargetLengthBasedClassNameAbbreviator(1); String name = "c.logback.Foobar"; assertEquals("c.l.Foobar", abbreviator.abbreviate(name)); } { TargetLengthBasedClassNameAbbreviator abbreviator = new TargetLengthBasedClassNameAbbreviator(1); String name = "c..Foobar"; assertEquals("c..Foobar", abbreviator.abbreviate(name)); } { TargetLengthBasedClassNameAbbreviator abbreviator = new TargetLengthBasedClassNameAbbreviator(1); String name = "..Foobar"; assertEquals("..Foobar", abbreviator.abbreviate(name)); } }
public static Constructor<?> findConstructor(Class<?> clazz, Class<?> paramType) throws NoSuchMethodException { Constructor<?> targetConstructor; try { targetConstructor = clazz.getConstructor(new Class<?>[] {paramType}); } catch (NoSuchMethodException e) { targetConstructor = null; Constructor<?>[] constructors = clazz.getConstructors(); for (Constructor<?> constructor : constructors) { if (Modifier.isPublic(constructor.getModifiers()) && constructor.getParameterTypes().length == 1 && constructor.getParameterTypes()[0].isAssignableFrom(paramType)) { targetConstructor = constructor; break; } } if (targetConstructor == null) { throw e; } } return targetConstructor; }
@Test void testFindConstructor() throws Exception { Constructor constructor = ReflectUtils.findConstructor(Foo3.class, Foo2.class); assertNotNull(constructor); }
public void applyConfig(ClientBwListDTO configDTO) { requireNonNull(configDTO, "Client filtering config must not be null"); requireNonNull(configDTO.mode, "Config mode must not be null"); requireNonNull(configDTO.entries, "Config entries must not be null"); ClientSelector selector; switch (configDTO.mode) { case DISABLED: selector = ClientSelectors.any(); break; case WHITELIST: selector = createSelector(configDTO.entries); break; case BLACKLIST: selector = ClientSelectors.inverse(createSelector(configDTO.entries)); break; default: throw new IllegalArgumentException("Unknown client B/W list mode: " + configDTO.mode); } clientEngine.applySelector(selector); }
@Test public void testApplyConfig_blacklist() { ClientBwListDTO config = createConfig(Mode.BLACKLIST, new ClientBwListEntryDTO(Type.IP_ADDRESS, "127.0.0.*"), new ClientBwListEntryDTO(Type.IP_ADDRESS, "192.168.0.1"), new ClientBwListEntryDTO(Type.IP_ADDRESS, "192.168.*.42"), new ClientBwListEntryDTO(Type.IP_ADDRESS, "fe80:0:0:0:45c5:47ee:fe15:*"), new ClientBwListEntryDTO(Type.INSTANCE_NAME, "*_client"), new ClientBwListEntryDTO(Type.LABEL, "test*label")); handler.applyConfig(config); Client[] allowed = { createClient("192.168.0.101", "a_name", "random"), createClient("fe70:0:0:0:35c5:16ee:fe15:491a", "a_name", "random") }; for (Client client : allowed) { assertTrue(clientEngine.isClientAllowed(client)); } Client[] denied = { createClient("127.0.0.3", "a_name"), createClient("192.168.0.1", "a_name"), createClient("192.168.0.42", "a_name"), createClient("fe80:0:0:0:45c5:47ee:fe15:493a", "a_name"), createClient("192.168.0.101", "java_client"), createClient("192.168.0.101", "a_name", "test_label"), createClient("192.168.0.101", "a_name", "testlabel") }; for (Client client : denied) { assertFalse(clientEngine.isClientAllowed(client)); } }
@Override public boolean isRegistered(JobID jobId) { return jobManagerRunners.containsKey(jobId); }
@Test void testIsNotRegistered() { assertThat(testInstance.isRegistered(new JobID())).isFalse(); }
public List<PrometheusQueryResult> queryMetric(String queryString, long startTimeMs, long endTimeMs) throws IOException { URI queryUri = URI.create(_prometheusEndpoint.toURI() + QUERY_RANGE_API_PATH); HttpPost httpPost = new HttpPost(queryUri); List<NameValuePair> data = new ArrayList<>(); data.add(new BasicNameValuePair(QUERY, queryString)); /* "start" and "end" are expected to be unix timestamp in seconds (number of seconds since the Unix epoch). They accept values with a decimal point (up to 64 bits). The samples returned are inclusive of the "end" timestamp provided. */ data.add(new BasicNameValuePair(START, String.valueOf((double) startTimeMs / SEC_TO_MS))); data.add(new BasicNameValuePair(END, String.valueOf((double) endTimeMs / SEC_TO_MS))); // step is expected to be in seconds, and accept values with a decimal point (up to 64 bits). data.add(new BasicNameValuePair(STEP, String.valueOf((double) _samplingIntervalMs / SEC_TO_MS))); httpPost.setEntity(new UrlEncodedFormEntity(data)); try (CloseableHttpResponse response = _httpClient.execute(httpPost)) { int responseCode = response.getStatusLine().getStatusCode(); HttpEntity entity = response.getEntity(); InputStream content = entity.getContent(); String responseString = IOUtils.toString(content, StandardCharsets.UTF_8); if (responseCode != HttpServletResponse.SC_OK) { throw new IOException(String.format("Received non-success response code on Prometheus API HTTP call," + " response code = %d, response body = %s", responseCode, responseString)); } PrometheusResponse prometheusResponse = GSON.fromJson(responseString, PrometheusResponse.class); if (prometheusResponse == null) { throw new IOException(String.format( "No response received from Prometheus API query, response body = %s", responseString)); } if (!SUCCESS.equals(prometheusResponse.status())) { throw new IOException(String.format( "Prometheus API query was not successful, response body = %s", responseString)); } if (prometheusResponse.data() == null || prometheusResponse.data().result() == null) { throw new IOException(String.format( "Response from Prometheus HTTP API is malformed, response body = %s", responseString)); } EntityUtils.consume(entity); return prometheusResponse.data().result(); } }
@Test(expected = IOException.class) public void testEmptyResult() throws Exception { this.serverBootstrap.registerHandler(PrometheusAdapter.QUERY_RANGE_API_PATH, new HttpRequestHandler() { @Override public void handle(HttpRequest request, HttpResponse response, HttpContext context) { response.setStatusCode(HttpServletResponse.SC_OK); response.setEntity(new StringEntity( "{\"status\": \"success\", \"data\": {}}", StandardCharsets.UTF_8)); } }); HttpHost httpHost = this.start(); PrometheusAdapter prometheusAdapter = new PrometheusAdapter(this.httpclient, httpHost, SAMPLING_INTERVAL_MS); prometheusAdapter.queryMetric( "kafka_server_BrokerTopicMetrics_OneMinuteRate{name=\"BytesOutPerSec\",topic=\"\"}", START_TIME_MS, END_TIME_MS); }
static ContainerPopulator<TaskContainer> newContainerPopulator(CeTask task) { return taskContainer -> { taskContainer.add(task); taskContainer.add(AuditHousekeepingFrequencyHelper.class); taskContainer.add(AuditPurgeStep.class); taskContainer.add(new AuditPurgeComputationSteps(taskContainer)); taskContainer.add(ComputationStepExecutor.class); }; }
@Test public void newContainerPopulator() { CeTask task = new CeTask.Builder() .setUuid("TASK_UUID") .setType("Type") .build(); AuditPurgeTaskProcessor.newContainerPopulator(task).populateContainer(container); Mockito.verify(container, Mockito.times(5)).add(any()); }
public EnumSet<E> get() { return value; }
@SuppressWarnings("unchecked") @Test public void testSerializeAndDeserializeNonEmpty() throws IOException { DataOutputBuffer out = new DataOutputBuffer(); ObjectWritable.writeObject(out, nonEmptyFlagWritable, nonEmptyFlagWritable .getClass(), null); DataInputBuffer in = new DataInputBuffer(); in.reset(out.getData(), out.getLength()); EnumSet<TestEnumSet> read = ((EnumSetWritable<TestEnumSet>) ObjectWritable .readObject(in, null)).get(); assertEquals(read, nonEmptyFlag); }
@Override public void serialize(Asn1OutputStream out, BigInteger obj) { final byte[] data = obj.toByteArray(); if (data[0] == 0) { out.write(data, 1, data.length - 1); } else { out.write(data); } }
@Test public void shouldSerialize() { assertArrayEquals( new byte[] { -128 }, serialize(new BigIntegerConverter(), BigInteger.class, BigInteger.valueOf(128)) ); }
public static Gson instance() { return SingletonHolder.INSTANCE; }
@Test void rejectsDeserializationOfAESEncrypter() { final IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> Serialization.instance().fromJson("{}", AESEncrypter.class)); assertEquals(format("Refusing to deserialize a %s in the JSON stream!", AESEncrypter.class.getName()), e.getMessage()); }
@Override public boolean isGatheringMetrics() { return gatheringMetrics; }
@Test public void isGatheringMetricsTest() { JobMeta jobMetaTest = new JobMeta(); jobMetaTest.setGatheringMetrics( true ); assertTrue( jobMetaTest.isGatheringMetrics() ); jobMetaTest.setGatheringMetrics( false ); assertFalse( jobMetaTest.isGatheringMetrics() ); }