focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override protected void refresh() { Iterable<ServerConfig> dbConfigs = serverConfigRepository.findAll(); Map<String, Object> newConfigs = Maps.newHashMap(); //default cluster's configs for (ServerConfig config : dbConfigs) { if (Objects.equals(ConfigConsts.CLUSTER_NAME_DEFAULT, config.getCluster())) { newConfigs.put(config.getKey(), config.getValue()); } } //data center's configs String dataCenter = getCurrentDataCenter(); for (ServerConfig config : dbConfigs) { if (Objects.equals(dataCenter, config.getCluster())) { newConfigs.put(config.getKey(), config.getValue()); } } //cluster's config if (!Strings.isNullOrEmpty(System.getProperty(ConfigConsts.APOLLO_CLUSTER_KEY))) { String cluster = System.getProperty(ConfigConsts.APOLLO_CLUSTER_KEY); for (ServerConfig config : dbConfigs) { if (Objects.equals(cluster, config.getCluster())) { newConfigs.put(config.getKey(), config.getValue()); } } } //put to environment for (Map.Entry<String, Object> config: newConfigs.entrySet()){ String key = config.getKey(); Object value = config.getValue(); if (this.source.get(key) == null) { logger.info("Load config from DB : {} = {}", key, value); } else if (!Objects.equals(this.source.get(key), value)) { logger.info("Load config from DB : {} = {}. Old value = {}", key, value, this.source.get(key)); } this.source.put(key, value); } }
@Test public void testGetClusterConfig() { propertySource.refresh(); assertEquals(propertySource.getProperty(clusterConfigKey), clusterConfigValue); }
@Override public void login(final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException { try { final IRODSAccount account = client.getIRODSAccount(); final Credentials credentials = host.getCredentials(); account.setUserName(credentials.getUsername()); account.setPassword(credentials.getPassword()); final AuthResponse response = client.getIRODSAccessObjectFactory().authenticateIRODSAccount(account); if(log.isDebugEnabled()) { log.debug(String.format("Connected to %s", response.getStartupResponse())); } if(!response.isSuccessful()) { throw new LoginFailureException(MessageFormat.format(LocaleFactory.localizedString( "Login {0} with username and password", "Credentials"), BookmarkNameProvider.toString(host))); } } catch(JargonException e) { throw new IRODSExceptionMappingService().map(e); } }
@Test(expected = LoginFailureException.class) public void testLoginFailure() throws Exception { final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new IRODSProtocol()))); final Profile profile = new ProfilePlistReader(factory).read( this.getClass().getResourceAsStream("/iRODS (iPlant Collaborative).cyberduckprofile")); final Host host = new Host(profile, profile.getDefaultHostname(), new Credentials("a", "a")); final IRODSSession session = new IRODSSession(host); assertNotNull(session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback())); assertTrue(session.isConnected()); assertNotNull(session.getClient()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); }
@Override public InetSocketAddress resolve(ServerWebExchange exchange) { List<String> xForwardedValues = extractXForwardedValues(exchange); if (!xForwardedValues.isEmpty()) { int index = Math.max(0, xForwardedValues.size() - maxTrustedIndex); return new InetSocketAddress(xForwardedValues.get(index), 0); } return defaultRemoteIpResolver.resolve(exchange); }
@Test public void trustOneFallsBackOnMultipleHeaders() { ServerWebExchange exchange = buildExchange( remoteAddressOnlyBuilder().header("X-Forwarded-For", "0.0.0.1").header("X-Forwarded-For", "0.0.0.2")); InetSocketAddress address = trustOne.resolve(exchange); assertThat(address.getHostName()).isEqualTo("0.0.0.0"); }
static List<MappingField> resolveFields(Schema schema) { Map<String, MappingField> fields = new LinkedHashMap<>(); for (Schema.Field schemaField : schema.getFields()) { String name = schemaField.name(); // SQL types are nullable by default and NOT NULL is currently unsupported. Schema.Type schemaFieldType = unwrapNullableType(schemaField.schema()).getType(); QueryDataType type = AVRO_TO_SQL.getOrDefault(schemaFieldType, OBJECT); MappingField field = new MappingField(name, type); fields.putIfAbsent(field.name(), field); } return new ArrayList<>(fields.values()); }
@Test public void test_resolveFields() { // given Schema schema = SchemaBuilder.record("name") .fields() .name("boolean").type().booleanType().noDefault() .name("int").type().intType().noDefault() .name("long").type().longType().noDefault() .name("float").type().floatType().noDefault() .name("double").type().doubleType().noDefault() .name("string").type().stringType().noDefault() .name("object").type().record("object").fields().endRecord().noDefault() .endRecord(); // when List<MappingField> fields = AvroResolver.resolveFields(schema); // then assertIterableEquals(fields, new MappingField("boolean", QueryDataType.BOOLEAN), new MappingField("int", QueryDataType.INT), new MappingField("long", QueryDataType.BIGINT), new MappingField("float", QueryDataType.REAL), new MappingField("double", QueryDataType.DOUBLE), new MappingField("string", QueryDataType.VARCHAR), new MappingField("object", QueryDataType.OBJECT)); }
@Override public EurekaHttpResponse<Void> statusUpdate(String asgName, ASGStatus newStatus) { Response response = null; try { String urlPath = "asg/" + asgName + "/status"; response = jerseyClient.target(serviceUrl) .path(urlPath) .queryParam("value", newStatus.name()) .request() .header(PeerEurekaNode.HEADER_REPLICATION, "true") .put(Entity.text("")); return EurekaHttpResponse.status(response.getStatus()); } finally { if (response != null) { response.close(); } } }
@Test public void testAsgStatusUpdateReplication() throws Exception { serverMockClient.when( request() .withMethod("PUT") .withHeader(header(PeerEurekaNode.HEADER_REPLICATION, "true")) .withPath("/eureka/v2/asg/" + instanceInfo.getASGName() + "/status") ).respond( response().withStatusCode(200) ); EurekaHttpResponse<Void> response = replicationClient.statusUpdate(instanceInfo.getASGName(), ASGStatus.ENABLED); assertThat(response.getStatusCode(), is(equalTo(200))); }
public static <T extends Throwable> void checkMustEmpty(final Collection<?> values, final Supplier<T> exceptionSupplierIfUnexpected) throws T { if (!values.isEmpty()) { throw exceptionSupplierIfUnexpected.get(); } }
@Test void assertCheckMustEmptyWithMapToThrowsException() { assertThrows(SQLException.class, () -> ShardingSpherePreconditions.checkMustEmpty(Collections.singletonMap("key", "value"), SQLException::new)); }
@Override protected void write(final MySQLPacketPayload payload) { payload.writeIntLenenc(columnCount); }
@Test void assertWrite() { when(payload.readInt1()).thenReturn(3); MySQLFieldCountPacket actual = new MySQLFieldCountPacket(payload); assertThat(actual.getColumnCount(), is(3)); actual.write(payload); verify(payload).writeIntLenenc(3L); }
public long getValue(final K key) { final int mask = values.length - 1; int index = Hashing.hash(key, mask); long value; while (missingValue != (value = values[index])) { if (key.equals(keys[index])) { break; } index = ++index & mask; } return value; }
@Test public void getShouldReturnMissingValueWhenEmpty() { assertEquals(MISSING_VALUE, map.getValue("1")); }
public boolean devMode() { return this.environment.getBoolean(BladeConst.ENV_KEY_DEV_MODE, true); }
@Test public void testDevMode() { Blade blade = Blade.create(); blade.devMode(false); assertEquals(Boolean.FALSE, blade.devMode()); }
@VisibleForTesting static SortedMap<OffsetRange, Integer> computeOverlappingRanges(Iterable<OffsetRange> ranges) { ImmutableSortedMap.Builder<OffsetRange, Integer> rval = ImmutableSortedMap.orderedBy(OffsetRangeComparator.INSTANCE); List<OffsetRange> sortedRanges = Lists.newArrayList(ranges); if (sortedRanges.isEmpty()) { return rval.build(); } Collections.sort(sortedRanges, OffsetRangeComparator.INSTANCE); // Stores ranges in smallest 'from' and then smallest 'to' order // e.g. [2, 7), [3, 4), [3, 5), [3, 5), [3, 6), [4, 0) PriorityQueue<OffsetRange> rangesWithSameFrom = new PriorityQueue<>(OffsetRangeComparator.INSTANCE); Iterator<OffsetRange> iterator = sortedRanges.iterator(); // Stored in reverse sorted order so that when we iterate and re-add them back to // overlappingRanges they are stored in sorted order from smallest to largest range.to List<OffsetRange> rangesToProcess = new ArrayList<>(); while (iterator.hasNext()) { OffsetRange current = iterator.next(); // Skip empty ranges if (current.getFrom() == current.getTo()) { continue; } // If the current range has a different 'from' then a prior range then we must produce // ranges in [rangesWithSameFrom.from, current.from) while (!rangesWithSameFrom.isEmpty() && rangesWithSameFrom.peek().getFrom() != current.getFrom()) { rangesToProcess.addAll(rangesWithSameFrom); Collections.sort(rangesToProcess, OffsetRangeComparator.INSTANCE); rangesWithSameFrom.clear(); int i = 0; long lastTo = rangesToProcess.get(i).getFrom(); // Output all the ranges that are strictly less then current.from // e.g. current.to := 7 for [3, 4), [3, 5), [3, 5), [3, 6) will produce // [3, 4) := 4 // [4, 5) := 3 // [5, 6) := 1 for (; i < rangesToProcess.size(); ++i) { if (rangesToProcess.get(i).getTo() > current.getFrom()) { break; } // Output only the first of any subsequent duplicate ranges if (i == 0 || rangesToProcess.get(i - 1).getTo() != rangesToProcess.get(i).getTo()) { rval.put( new OffsetRange(lastTo, rangesToProcess.get(i).getTo()), rangesToProcess.size() - i); lastTo = rangesToProcess.get(i).getTo(); } } // We exitted the loop with 'to' > current.from, we must add the range [lastTo, // current.from) if it is non-empty if (lastTo < current.getFrom() && i != rangesToProcess.size()) { rval.put(new OffsetRange(lastTo, current.getFrom()), rangesToProcess.size() - i); } // The remaining ranges have a 'to' that is greater then 'current.from' and will overlap // with current so add them back to rangesWithSameFrom with the updated 'from' for (; i < rangesToProcess.size(); ++i) { rangesWithSameFrom.add( new OffsetRange(current.getFrom(), rangesToProcess.get(i).getTo())); } rangesToProcess.clear(); } rangesWithSameFrom.add(current); } // Process the last chunk of overlapping ranges while (!rangesWithSameFrom.isEmpty()) { // This range always represents the range with with the smallest 'to' OffsetRange current = rangesWithSameFrom.remove(); rangesToProcess.addAll(rangesWithSameFrom); Collections.sort(rangesToProcess, OffsetRangeComparator.INSTANCE); rangesWithSameFrom.clear(); rval.put(current, rangesToProcess.size() + 1 /* include current */); // Shorten all the remaining ranges such that they start with current.to for (OffsetRange rangeWithDifferentFrom : rangesToProcess) { // Skip any duplicates of current if (rangeWithDifferentFrom.getTo() > current.getTo()) { rangesWithSameFrom.add(new OffsetRange(current.getTo(), rangeWithDifferentFrom.getTo())); } } rangesToProcess.clear(); } return rval.build(); }
@Test public void testOverlappingFromsAndTos() { Iterable<OffsetRange> ranges = Arrays.asList(range(0, 4), range(0, 4), range(0, 4)); Map<OffsetRange, Integer> nonOverlappingRangesToNumElementsPerPosition = computeOverlappingRanges(ranges); assertEquals( ImmutableMap.builder().put(range(0, 4), 3).build(), nonOverlappingRangesToNumElementsPerPosition); assertNonEmptyRangesAndPositions(ranges, nonOverlappingRangesToNumElementsPerPosition); }
@Override public ParamCheckResponse checkParamInfoList(List<ParamInfo> paramInfos) { ParamCheckResponse paramCheckResponse = new ParamCheckResponse(); if (paramInfos == null) { paramCheckResponse.setSuccess(true); return paramCheckResponse; } for (ParamInfo paramInfo : paramInfos) { paramCheckResponse = checkParamInfoFormat(paramInfo); if (!paramCheckResponse.isSuccess()) { return paramCheckResponse; } } paramCheckResponse.setSuccess(true); return paramCheckResponse; }
@Test void testCheckParamInfoForPort() { ParamInfo paramInfo = new ParamInfo(); ArrayList<ParamInfo> paramInfos = new ArrayList<>(); paramInfos.add(paramInfo); // Negative port paramInfo.setPort("-1"); ParamCheckResponse actual = paramChecker.checkParamInfoList(paramInfos); assertFalse(actual.isSuccess()); assertEquals("Param 'port' is illegal, the value should be between 0 and 65535.", actual.getMessage()); // Over than range paramInfo.setPort("65536"); actual = paramChecker.checkParamInfoList(paramInfos); assertFalse(actual.isSuccess()); assertEquals("Param 'port' is illegal, the value should be between 0 and 65535.", actual.getMessage()); // Not number paramInfo.setPort("port"); actual = paramChecker.checkParamInfoList(paramInfos); assertFalse(actual.isSuccess()); assertEquals("Param 'port' is illegal, the value should be between 0 and 65535.", actual.getMessage()); // Success paramInfo.setPort("8848"); actual = paramChecker.checkParamInfoList(paramInfos); assertTrue(actual.isSuccess()); }
@Udf public String encodeParam( @UdfParameter(description = "the value to encode") final String input) { if (input == null) { return null; } final Escaper escaper = UrlEscapers.urlFormParameterEscaper(); return escaper.escape(input); }
@Test public void shouldReturnEmptyStringForEmptyInput() { assertThat(encodeUdf.encodeParam(""), equalTo("")); }
@Override public boolean accept(RequestedField field) { return FIELD_ENTITY_MAPPER.containsKey(field.name()) && acceptsDecorator(field.decorator()); }
@Test void testPermitted() { final FieldDecorator decorator = new TitleDecorator((request, permissions) -> EntitiesTitleResponse.EMPTY_RESPONSE); Assertions.assertThat(decorator.accept(RequestedField.parse("streams"))).isTrue(); Assertions.assertThat(decorator.accept(RequestedField.parse("streams.title"))).isTrue(); Assertions.assertThat(decorator.accept(RequestedField.parse("gl2_source_input"))).isTrue(); Assertions.assertThat(decorator.accept(RequestedField.parse("gl2_source_input.title"))).isTrue(); // For IDs we have a different decorator Assertions.assertThat(decorator.accept(RequestedField.parse("streams.id"))).isFalse(); Assertions.assertThat(decorator.accept(RequestedField.parse("gl2_source_input.id"))).isFalse(); // unknown decorator Assertions.assertThat(decorator.accept(RequestedField.parse("gl2_source_input.uppercase"))).isFalse(); // other fields and entities are not supported Assertions.assertThat(decorator.accept(RequestedField.parse("http_response_code"))).isFalse(); Assertions.assertThat(decorator.accept(RequestedField.parse("http_response_code.title"))).isFalse(); }
@VisibleForTesting void handleResponse(DiscoveryResponseData response) { ResourceType resourceType = response.getResourceType(); switch (resourceType) { case NODE: handleD2NodeResponse(response); break; case D2_URI_MAP: handleD2URIMapResponse(response); break; case D2_URI: handleD2URICollectionResponse(response); break; default: throw new AssertionError("Missing case in enum switch: " + resourceType); } }
@Test public void testHandleD2URIMapResponseWithRemoval() { XdsClientImplFixture fixture = new XdsClientImplFixture(); fixture._clusterSubscriber.setData(D2_URI_MAP_UPDATE_WITH_DATA1); fixture._xdsClientImpl.handleResponse(DISCOVERY_RESPONSE_URI_MAP_DATA_WITH_REMOVAL); fixture.verifyAckSent(1); verify(fixture._resourceWatcher).onChanged(eq(D2_URI_MAP_UPDATE_WITH_DATA1)); verify(fixture._clusterSubscriber).onRemoval(); verifyZeroInteractions(fixture._serverMetricsProvider); D2URIMapUpdate actualData = (D2URIMapUpdate) fixture._clusterSubscriber.getData(); // removed resource will not overwrite the original valid data Assert.assertEquals(Objects.requireNonNull(actualData).getURIMap(), D2_URI_MAP_UPDATE_WITH_DATA1.getURIMap()); }
@Override public void close() { if (!sharedYarnClient) { yarnClient.stop(); } }
@Test void testYarnClientShutDown() { YarnClusterDescriptor yarnClusterDescriptor = createYarnClusterDescriptor(); yarnClusterDescriptor.close(); assertThat(yarnClient.isInState(Service.STATE.STARTED)).isTrue(); final YarnClient closableYarnClient = YarnClient.createYarnClient(); closableYarnClient.init(yarnConfiguration); closableYarnClient.start(); yarnClusterDescriptor = YarnTestUtils.createClusterDescriptorWithLogging( temporaryFolder.toFile().getAbsolutePath(), new Configuration(), yarnConfiguration, closableYarnClient, false); yarnClusterDescriptor.close(); assertThat(closableYarnClient.isInState(Service.STATE.STOPPED)).isTrue(); }
public Set<String> getFinalParameters() { Set<String> setFinalParams = Collections.newSetFromMap( new ConcurrentHashMap<String, Boolean>()); setFinalParams.addAll(finalParameters); return setFinalParams; }
@Test public void testGetFinalParameters() throws Exception { out=new BufferedWriter(new FileWriter(CONFIG)); startConfig(); declareProperty("my.var", "x", "x", true); endConfig(); Path fileResource = new Path(CONFIG); Configuration conf = new Configuration(); Set<String> finalParameters = conf.getFinalParameters(); assertFalse("my.var already exists", finalParameters.contains("my.var")); conf.addResource(fileResource); assertEquals("my.var is undefined", "x", conf.get("my.var")); assertFalse("finalparams not copied", finalParameters.contains("my.var")); finalParameters = conf.getFinalParameters(); assertTrue("my.var is not final", finalParameters.contains("my.var")); }
public V get(K key) { lock.readLock().lock(); try { return rawMap.get(MutableObj.of(key)); } finally { lock.readLock().unlock(); } }
@Test public void getConcurrencyTest(){ final SimpleCache<String, String> cache = new SimpleCache<>(); final ConcurrencyTester tester = new ConcurrencyTester(9000); tester.test(()-> cache.get("aaa", ()-> { ThreadUtil.sleep(200); return "aaaValue"; })); assertTrue(tester.getInterval() > 0); assertEquals("aaaValue", cache.get("aaa")); IoUtil.close(tester); }
public static FunctionSegment bind(final FunctionSegment segment, final SegmentType parentSegmentType, final SQLStatementBinderContext binderContext, final Map<String, TableSegmentBinderContext> tableBinderContexts, final Map<String, TableSegmentBinderContext> outerTableBinderContexts) { FunctionSegment result = new FunctionSegment(segment.getStartIndex(), segment.getStopIndex(), segment.getFunctionName(), segment.getText()); result.setOwner(segment.getOwner()); result.getParameters().addAll(segment.getParameters().stream() .map(each -> ExpressionSegmentBinder.bind(each, parentSegmentType, binderContext, tableBinderContexts, outerTableBinderContexts)).collect(Collectors.toList())); return result; }
@Test void assertBindFunctionExpressionSegment() { FunctionSegment functionSegment = new FunctionSegment(0, 0, "CONCAT", "('%','abc','%')"); SQLStatementBinderContext binderContext = new SQLStatementBinderContext(new ShardingSphereMetaData(), DefaultDatabase.LOGIC_NAME, new MockedDatabaseType(), Collections.emptyList()); FunctionSegment actual = FunctionExpressionSegmentBinder.bind(functionSegment, SegmentType.PROJECTION, binderContext, Collections.emptyMap(), Collections.emptyMap()); assertThat(actual.getStartIndex(), is(functionSegment.getStartIndex())); assertThat(actual.getStopIndex(), is(functionSegment.getStopIndex())); assertThat(actual.getFunctionName(), is("CONCAT")); assertThat(actual.getText(), is("('%','abc','%')")); }
public int getAppTimeoutFailedRetrieved() { return numGetAppTimeoutFailedRetrieved.value(); }
@Test public void testGetAppTimeoutRetrievedFailed() { long totalBadBefore = metrics.getAppTimeoutFailedRetrieved(); badSubCluster.getAppTimeoutFailed(); Assert.assertEquals(totalBadBefore + 1, metrics.getAppTimeoutFailedRetrieved()); }
public Range<PartitionKey> handleNewSinglePartitionDesc(Map<ColumnId, Column> schema, SingleRangePartitionDesc desc, long partitionId, boolean isTemp) throws DdlException { Range<PartitionKey> range; try { range = checkAndCreateRange(schema, desc, isTemp); setRangeInternal(partitionId, isTemp, range); } catch (IllegalArgumentException e) { // Range.closedOpen may throw this if (lower > upper) throw new DdlException("Invalid key range: " + e.getMessage()); } idToDataProperty.put(partitionId, desc.getPartitionDataProperty()); idToReplicationNum.put(partitionId, desc.getReplicationNum()); idToInMemory.put(partitionId, desc.isInMemory()); idToStorageCacheInfo.put(partitionId, desc.getDataCacheInfo()); return range; }
@Test public void testFixedRange2() throws DdlException, AnalysisException { //add columns int columns = 2; Column k1 = new Column("k1", new ScalarType(PrimitiveType.INT), true, null, "", ""); Column k2 = new Column("k2", new ScalarType(PrimitiveType.BIGINT), true, null, "", ""); partitionColumns.add(k1); partitionColumns.add(k2); //add RangePartitionDescs PartitionKeyDesc p1 = new PartitionKeyDesc(new ArrayList<>(), Lists.newArrayList(new PartitionValue("20190101"), new PartitionValue("200"))); singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p1", p1, null)); partitionInfo = new RangePartitionInfo(partitionColumns); for (SingleRangePartitionDesc singleRangePartitionDesc : singleRangePartitionDescs) { singleRangePartitionDesc.analyze(columns, null); partitionInfo.handleNewSinglePartitionDesc(MetaUtils.buildIdToColumn(partitionColumns), singleRangePartitionDesc, 20000L, false); } }
@VisibleForTesting public File getStorageLocation(JobID jobId, BlobKey key) throws IOException { checkNotNull(jobId); return BlobUtils.getStorageLocation(storageDir.deref(), jobId, key); }
@Test void permanentBlobCacheTimesOutRecoveredBlobs(@TempDir Path storageDirectory) throws Exception { final JobID jobId = new JobID(); final PermanentBlobKey permanentBlobKey = TestingBlobUtils.writePermanentBlob( storageDirectory, jobId, new byte[] {1, 2, 3, 4}); final File blobFile = BlobUtils.getStorageLocation(storageDirectory.toFile(), jobId, permanentBlobKey); final Configuration configuration = new Configuration(); final long cleanupInterval = 1L; configuration.set(BlobServerOptions.CLEANUP_INTERVAL, cleanupInterval); try (final PermanentBlobCache permanentBlobCache = new PermanentBlobCache( configuration, storageDirectory.toFile(), new VoidBlobStore(), null)) { CommonTestUtils.waitUntilCondition(() -> !blobFile.exists()); } }
public static String substVars(String val, PropertyContainer pc1) throws ScanException { return substVars(val, pc1, null); }
@Test public void jackrabbit_standalone() throws ScanException { String r = OptionHelper.substVars("${jackrabbit.log:-${repo:-jackrabbit}/log/jackrabbit.log}", context); assertEquals("jackrabbit/log/jackrabbit.log", r); }
@Override public final boolean readBoolean() throws EOFException { final int ch = read(); if (ch < 0) { throw new EOFException(); } return (ch != 0); }
@Test public void testReadBooleanPosition() throws Exception { boolean read1 = in.readBoolean(0); boolean read2 = in.readBoolean(1); assertFalse(read1); assertTrue(read2); }
public static Builder withSchema(Schema schema) { return new Builder(schema); }
@Test(expected = IllegalArgumentException.class) public void testLogicalTypeWithInvalidInputValueByFieldIndex() { Schema schema = Schema.builder().addLogicalTypeField("char", FixedBytes.of(10)).build(); byte[] byteArrayWithLengthFive = {1, 2, 3, 4, 5}; Row.withSchema(schema).addValues(byteArrayWithLengthFive).build(); }
@Override public boolean supportsSelectForUpdate() { return false; }
@Test void assertSupportsSelectForUpdate() { assertFalse(metaData.supportsSelectForUpdate()); }
public KsqlTopic getPrimarySourceTopic() { return primarySourceTopic; }
@Test public void shouldExtractPrimaryTopicFromJoinSelect() { // Given: final Statement statement = givenStatement(String.format( "SELECT * FROM %s A JOIN %s B ON A.F1 = B.F1;", STREAM_TOPIC_1, STREAM_TOPIC_2 )); // When: extractor.process(statement, null); // Then: assertThat(extractor.getPrimarySourceTopic(), is(TOPIC_1)); }
@Bean("EsClient") public EsClient provide(Configuration config) { Settings.Builder esSettings = Settings.builder(); // mandatory property defined by bootstrap process esSettings.put("cluster.name", config.get(CLUSTER_NAME.getKey()).get()); boolean clusterEnabled = config.getBoolean(CLUSTER_ENABLED.getKey()).orElse(false); boolean searchNode = !clusterEnabled || SEARCH.equals(NodeType.parse(config.get(CLUSTER_NODE_TYPE.getKey()).orElse(null))); List<HttpHost> httpHosts; if (clusterEnabled && !searchNode) { httpHosts = getHttpHosts(config); LOGGER.info("Connected to remote Elasticsearch: [{}]", displayedAddresses(httpHosts)); } else { // defaults provided in: // * in org.sonar.process.ProcessProperties.Property.SEARCH_HOST // * in org.sonar.process.ProcessProperties.Property.SEARCH_PORT HostAndPort host = HostAndPort.fromParts(config.get(SEARCH_HOST.getKey()).get(), config.getInt(SEARCH_PORT.getKey()).get()); httpHosts = Collections.singletonList(toHttpHost(host, config)); LOGGER.info("Connected to local Elasticsearch: [{}]", displayedAddresses(httpHosts)); } return new EsClient(config.get(CLUSTER_SEARCH_PASSWORD.getKey()).orElse(null), config.get(CLUSTER_ES_HTTP_KEYSTORE.getKey()).orElse(null), config.get(CLUSTER_ES_HTTP_KEYSTORE_PASSWORD.getKey()).orElse(null), httpHosts.toArray(new HttpHost[0])); }
@Test public void es_client_provider_must_throw_IAE_when_incorrect_port_is_used_when_search_disabled() { settings.setProperty(CLUSTER_ENABLED.getKey(), true); settings.setProperty(CLUSTER_NODE_TYPE.getKey(), "application"); settings.setProperty(CLUSTER_SEARCH_HOSTS.getKey(), format("%s:100000,%s:8081", localhostHostname, localhostHostname)); assertThatThrownBy(() -> underTest.provide(settings.asConfig())) .isInstanceOf(IllegalArgumentException.class) .hasMessage(format("Port number out of range: %s:100000", localhostHostname)); }
public Map<String, Long> getEtlFilePaths(String outputPath, BrokerDesc brokerDesc) throws Exception { Map<String, Long> filePathToSize = Maps.newHashMap(); List<TBrokerFileStatus> fileStatuses = Lists.newArrayList(); String etlFilePaths = outputPath + "/*"; try { if (brokerDesc.hasBroker()) { BrokerUtil.parseFile(etlFilePaths, brokerDesc, fileStatuses); } else { HdfsUtil.parseFile(etlFilePaths, brokerDesc, fileStatuses); } } catch (UserException e) { throw new Exception(e); } for (TBrokerFileStatus fstatus : fileStatuses) { if (fstatus.isDir) { continue; } filePathToSize.put(fstatus.getPath(), fstatus.getSize()); } LOG.debug("get spark etl file paths. files map: {}", filePathToSize); return filePathToSize; }
@Test public void testGetEtlFilePaths(@Mocked TFileBrokerService.Client client, @Mocked GlobalStateMgr globalStateMgr, @Injectable BrokerMgr brokerMgr) throws Exception { // list response TBrokerListResponse response = new TBrokerListResponse(); TBrokerOperationStatus status = new TBrokerOperationStatus(); status.statusCode = TBrokerOperationStatusCode.OK; response.opStatus = status; List<TBrokerFileStatus> files = Lists.newArrayList(); String filePath = "hdfs://127.0.0.1:10000/starrocks/jobs/1/label6/9/label6.10.11.12.0.666666.parquet"; files.add(new TBrokerFileStatus(filePath, false, 10, false)); response.files = files; FsBroker fsBroker = new FsBroker("127.0.0.1", 99999); new MockUp<ThriftConnectionPool<TFileBrokerService.Client>>() { @Mock public TFileBrokerService.Client borrowObject(TNetworkAddress address, int timeoutMs) throws Exception { return client; } @Mock public void returnObject(TNetworkAddress address, TFileBrokerService.Client object) { } @Mock public void invalidateObject(TNetworkAddress address, TFileBrokerService.Client object) { } }; new Expectations() { { globalStateMgr.getBrokerMgr(); result = brokerMgr; brokerMgr.getBroker(anyString, anyString); result = fsBroker; } }; try (MockedStatic<ThriftRPCRequestExecutor> thriftConnectionPoolMockedStatic = Mockito.mockStatic(ThriftRPCRequestExecutor.class)) { thriftConnectionPoolMockedStatic.when(() -> ThriftRPCRequestExecutor.call(Mockito.any(), Mockito.any(), Mockito.any())) .thenReturn(response); BrokerDesc brokerDesc = new BrokerDesc(broker, Maps.newHashMap()); SparkEtlJobHandler handler = new SparkEtlJobHandler(); Map<String, Long> filePathToSize = handler.getEtlFilePaths(etlOutputPath, brokerDesc); Assert.assertTrue(filePathToSize.containsKey(filePath)); Assert.assertEquals(10, (long) filePathToSize.get(filePath)); } }
public static AlterReplicaTask alterLakeTablet(long backendId, long dbId, long tableId, long partitionId, long rollupIndexId, long rollupTabletId, long baseTabletId, long version, long jobId, long txnId) { return new AlterReplicaTask(backendId, dbId, tableId, partitionId, rollupIndexId, rollupTabletId, baseTabletId, -1, -1, -1, version, jobId, AlterJobV2.JobType.SCHEMA_CHANGE, TTabletType.TABLET_TYPE_LAKE, txnId, null, Collections.emptyList(), null); }
@Test public void testAlterLakeTablet() { AlterReplicaTask task = AlterReplicaTask.alterLakeTablet(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); Assert.assertEquals(1, task.getBackendId()); Assert.assertEquals(2, task.getDbId()); Assert.assertEquals(3, task.getTableId()); Assert.assertEquals(4, task.getPartitionId()); Assert.assertEquals(5, task.getIndexId()); Assert.assertEquals(6, task.getTabletId()); Assert.assertEquals(7, task.getBaseTabletId()); Assert.assertEquals(8, task.getVersion()); Assert.assertEquals(9, task.getJobId()); Assert.assertEquals(AlterJobV2.JobType.SCHEMA_CHANGE, task.getJobType()); TAlterTabletReqV2 request = task.toThrift(); Assert.assertEquals(7, request.base_tablet_id); Assert.assertEquals(6, request.new_tablet_id); Assert.assertEquals(8, request.alter_version); Assert.assertEquals(10, request.txn_id); Assert.assertFalse(request.isSetMaterialized_view_params()); Assert.assertEquals(TTabletType.TABLET_TYPE_LAKE, request.tablet_type); }
@Override public boolean revokeToken(String clientId, String accessToken) { // 先查询,保证 clientId 时匹配的 OAuth2AccessTokenDO accessTokenDO = oauth2TokenService.getAccessToken(accessToken); if (accessTokenDO == null || ObjectUtil.notEqual(clientId, accessTokenDO.getClientId())) { return false; } // 再删除 return oauth2TokenService.removeAccessToken(accessToken) != null; }
@Test public void testRevokeToken_clientIdError() { // 准备参数 String clientId = randomString(); String accessToken = randomString(); // mock 方法 OAuth2AccessTokenDO accessTokenDO = randomPojo(OAuth2AccessTokenDO.class); when(oauth2TokenService.getAccessToken(eq(accessToken))).thenReturn(accessTokenDO); // 调用,并断言 assertFalse(oauth2GrantService.revokeToken(clientId, accessToken)); }
public static <T> Deduplicate.Values<T> values() { return new Deduplicate.Values<>(DEFAULT_TIME_DOMAIN, DEFAULT_DURATION); }
@Test @Category({NeedsRunner.class, UsesTestStream.class}) public void testEventTime() { Instant base = new Instant(0); TestStream<String> values = TestStream.create(StringUtf8Coder.of()) .advanceWatermarkTo(base) .addElements( TimestampedValue.of("k1", base), TimestampedValue.of("k2", base.plus(Duration.standardSeconds(10))), TimestampedValue.of("k3", base.plus(Duration.standardSeconds(20))), TimestampedValue.of("maybedup", base.plus(Duration.standardSeconds(59)))) .advanceWatermarkTo(base.plus(Duration.standardMinutes(1))) .addElements( TimestampedValue.of("k1", base.plus(Duration.standardSeconds(30))), TimestampedValue.of("k2", base.plus(Duration.standardSeconds(40))), TimestampedValue.of("k3", base.plus(Duration.standardSeconds(50)))) .advanceWatermarkTo( base.plus(Duration.standardMinutes(1)).plus(Deduplicate.DEFAULT_DURATION)) .addElements(TimestampedValue.of("maybedup", base.plus(Duration.standardSeconds(59)))) .advanceWatermarkToInfinity(); PCollection<String> distinctValues = p.apply(values).apply(Deduplicate.<String>values().withTimeDomain(TimeDomain.EVENT_TIME)); PAssert.that(distinctValues) .satisfies( (Iterable<String> input) -> { assertEquals(1, Iterables.frequency(input, "k1")); assertEquals(1, Iterables.frequency(input, "k2")); assertEquals(1, Iterables.frequency(input, "k3")); assertTrue( Iterables.frequency(input, "maybedup") == 1 || Iterables.frequency(input, "maybedup") == 2); return null; }); p.run(); }
public static String bytes2HexString(final byte[] bytes) { if (bytes == null) return null; int len = bytes.length; if (len <= 0) return null; char[] ret = new char[len << 1]; for (int i = 0, j = 0; i < len; i++) { ret[j++] = HEX_DIGITS[bytes[i] >>> 4 & 0x0f]; ret[j++] = HEX_DIGITS[bytes[i] & 0x0f]; } return new String(ret); }
@Test public void bytes2HexString() throws Exception { Assert.assertEquals( hexString, ConvertKit.bytes2HexString(mBytes) ); }
public static Object getInjectValue(String string, boolean decrypt) { Matcher m = pattern.matcher(string); StringBuffer sb = new StringBuffer(); // Parse the content inside pattern "${}" when this pattern is found while (m.find()) { // Get parsing result Object value = getValue(m.group(1), decrypt); // Return directly when the parsing result don't need to be cast to String if (!(value instanceof String)) { return value; } String valueStr = (String)value; if(valueStr.contains("\\$")) { m.appendReplacement(sb, (String)value); } else { m.appendReplacement(sb, Matcher.quoteReplacement((String)value)); } } return m.appendTail(sb).toString(); }
@Test @Ignore public void testGetInjectValueIssue744() { Object oldConfigValue = null; try { oldConfigValue = ConfigInjection.getInjectValue(value, true); } catch (Exception ce) { // expected exception since no valuemap defined yet. assertTrue(ce instanceof ConfigException); } assertNull(oldConfigValue); Map<String, Object> newValueMap = new HashMap<>(); newValueMap.put(configKey, configValue); Config.getInstance().putInConfigCache(valueMapKey, newValueMap); Object newConfigValue = ConfigInjection.getInjectValue(value, true); assertNotNull(newConfigValue); assertEquals(configValue, newConfigValue); }
public static void disablePushConsumption(DefaultMqPushConsumerWrapper wrapper, Set<String> topics) { Set<String> subscribedTopic = wrapper.getSubscribedTopics(); if (subscribedTopic.stream().anyMatch(topics::contains)) { suspendPushConsumer(wrapper); return; } resumePushConsumer(wrapper); }
@Test public void testDisablePushConsumptionNoTopic() { pushConsumerWrapper.setProhibition(true); RocketMqPushConsumerController.disablePushConsumption(pushConsumerWrapper, prohibitionTopics); Assert.assertFalse(pushConsumerWrapper.isProhibition()); }
@Nullable public Span currentSpan() { TraceContext context = currentTraceContext.get(); if (context == null) return null; // Returns a lazy span to reduce overhead when tracer.currentSpan() is invoked just to see if // one exists, or when the result is never used. return new LazySpan(this, context); }
@Test void currentSpan_decoratesExternalContext() { try (Scope scope = currentTraceContext.newScope(context)) { assertThat(tracer.currentSpan().context()) .isNotSameAs(context) .extracting(TraceContext::localRootId) .isEqualTo(context.spanId()); } }
public void logResponse(Config config, HttpRequest request, Response response) { long startTime = request.getStartTime(); long elapsedTime = request.getEndTime() - startTime; response.setResponseTime(elapsedTime); StringBuilder sb = new StringBuilder(); String uri = request.getUrl(); HttpLogModifier responseModifier = logModifier(config, uri); sb.append("response time in milliseconds: ").append(elapsedTime).append('\n'); sb.append(requestCount).append(" < ").append(response.getStatus()); logHeaders(requestCount, " < ", sb, responseModifier, response.getHeaders()); ResourceType rt = response.getResourceType(); if (rt == null || rt.isBinary()) { // don't log body } else { logBody(config, responseModifier, sb, uri, response.getBody(), false, rt); } sb.append('\n'); logger.debug("{}", sb); }
@Test void testResponseLoggingJson() { setup("json", "{a: 1}", "application/json"); httpRequestBuilder.path("/json"); Response response = handle(); match(response.getBodyAsString(), "{a: 1}"); match(response.getContentType(), "application/json"); httpLogger.logResponse(config, request, response); String logs = logAppender.collect(); assertTrue(logs.contains("{a: 1}")); assertTrue(logs.contains("Content-Type: application/json")); }
@Override public String toString() { StringBuilder builder = new StringBuilder("AfterEach.inOrder("); Joiner.on(", ").appendTo(builder, subTriggers); builder.append(")"); return builder.toString(); }
@Test public void testToString() { Trigger trigger = AfterEach.inOrder( StubTrigger.named("t1"), StubTrigger.named("t2"), StubTrigger.named("t3")); assertEquals("AfterEach.inOrder(t1, t2, t3)", trigger.toString()); }
public String getValue() { return value; }
@Test public void shouldHandleIntegerValueAsAString() throws Exception { final ConfigurationValue configurationValue = new ConfigurationValue(1); assertThat(configurationValue.getValue(), is("1")); }
@VisibleForTesting void generateScript(File localScript) throws IOException { if (verbose) { LOG.info("Generating script at: " + localScript.getAbsolutePath()); } String halrJarPath = HadoopArchiveLogsRunner.class.getProtectionDomain() .getCodeSource().getLocation().getPath(); String harJarPath = HadoopArchives.class.getProtectionDomain() .getCodeSource().getLocation().getPath(); String classpath = halrJarPath + File.pathSeparator + harJarPath; FileWriterWithEncoding fw = null; try { fw = FileWriterWithEncoding.builder() .setFile(localScript) .setCharset(StandardCharsets.UTF_8) .get(); fw.write("#!/bin/bash\nset -e\nset -x\n"); int containerCount = 1; for (AppInfo context : eligibleApplications) { fw.write("if [ \"$YARN_SHELL_ID\" == \""); fw.write(Integer.toString(containerCount)); fw.write("\" ]; then\n\tappId=\""); fw.write(context.getAppId()); fw.write("\"\n\tuser=\""); fw.write(context.getUser()); fw.write("\"\n\tworkingDir=\""); fw.write(context.getWorkingDir().toString()); fw.write("\"\n\tremoteRootLogDir=\""); fw.write(context.getRemoteRootLogDir().toString()); fw.write("\"\n\tsuffix=\""); fw.write(context.getSuffix()); fw.write("\"\nel"); containerCount++; } fw.write("se\n\techo \"Unknown Mapping!\"\n\texit 1\nfi\n"); fw.write("export HADOOP_CLIENT_OPTS=\"-Xmx"); fw.write(Long.toString(memory)); fw.write("m\"\n"); fw.write("export HADOOP_CLASSPATH="); fw.write(classpath); fw.write("\n\"$HADOOP_HOME\"/bin/hadoop "); fw.write(HadoopArchiveLogsRunner.class.getName()); fw.write(" -appId \"$appId\" -user \"$user\" -workingDir "); fw.write("\"$workingDir\""); fw.write(" -remoteRootLogDir "); fw.write("\"$remoteRootLogDir\""); fw.write(" -suffix "); fw.write("\"$suffix\""); if (!proxy) { fw.write(" -noProxy\n"); } fw.write("\n"); } finally { if (fw != null) { fw.close(); } } }
@Test(timeout = 10000) public void testGenerateScript() throws Exception { _testGenerateScript(false); _testGenerateScript(true); }
@Override public boolean isUsedInLabelTemplate(PipelineConfig pipelineConfig) { CaseInsensitiveString materialName = getName(); return materialName != null && pipelineConfig.getLabelTemplate().toLowerCase().contains(String.format("${%s}", materialName.toLower())); }
@Test void shouldReturnFalseIfMaterialNameIsNotDefined() { AbstractMaterialConfig material = new TestMaterialConfig("test"); PipelineConfig pipelineConfig = new PipelineConfig(new CaseInsensitiveString("blah"), "${COUNT}-${test}-test", "", false, null, new BaseCollection<>()); assertThat(material.isUsedInLabelTemplate(pipelineConfig)).isFalse(); }
@GetMapping("edit") public String getProductEditPage() { return "catalogue/products/edit"; }
@Test void getProductEditPage_ReturnsProductEditPage() { // given // when var result = this.controller.getProductEditPage(); // then assertEquals("catalogue/products/edit", result); verifyNoInteractions(this.productsRestClient); }
@Override public PiAction mapTreatment(TrafficTreatment treatment, PiTableId piTableId) throws PiInterpreterException { if (FORWARDING_CTRL_TBLS.contains(piTableId)) { return treatmentInterpreter.mapForwardingTreatment(treatment, piTableId); } else if (PRE_NEXT_CTRL_TBLS.contains(piTableId)) { return treatmentInterpreter.mapPreNextTreatment(treatment, piTableId); } else if (ACL_CTRL_TBLS.contains(piTableId)) { return treatmentInterpreter.mapAclTreatment(treatment, piTableId); } else if (NEXT_CTRL_TBLS.contains(piTableId)) { return treatmentInterpreter.mapNextTreatment(treatment, piTableId); } else if (E_NEXT_CTRL_TBLS.contains(piTableId)) { return treatmentInterpreter.mapEgressNextTreatment(treatment, piTableId); } else { throw new PiInterpreterException(format( "Treatment mapping not supported for table '%s'", piTableId)); } }
@Test public void testRoutingV4TreatmentEmpty() throws Exception { TrafficTreatment treatment = DefaultTrafficTreatment.emptyTreatment(); PiAction mappedAction = interpreter.mapTreatment( treatment, FabricConstants.FABRIC_INGRESS_FORWARDING_ROUTING_V4); PiAction expectedAction = PiAction.builder() .withId(FabricConstants.FABRIC_INGRESS_FORWARDING_NOP_ROUTING_V4) .build(); assertEquals(expectedAction, mappedAction); }
public static SqlDecimal widen(final SqlType t0, final SqlType t1) { final SqlDecimal lDecimal = DecimalUtil.toSqlDecimal(t0); final SqlDecimal rDecimal = DecimalUtil.toSqlDecimal(t1); final int wholePrecision = Math.max( lDecimal.getPrecision() - lDecimal.getScale(), rDecimal.getPrecision() - rDecimal.getScale() ); final int scale = Math.max(lDecimal.getScale(), rDecimal.getScale()); return SqlTypes.decimal(wholePrecision + scale, scale); }
@Test public void shouldWidenDecimalAndDecimal() { assertThat( "first contained", DecimalUtil.widen( SqlTypes.decimal(1, 0), SqlTypes.decimal(14, 3) ), is(SqlTypes.decimal(14, 3)) ); assertThat( "second contained", DecimalUtil.widen( SqlTypes.decimal(14, 3), SqlTypes.decimal(1, 0) ), is(SqlTypes.decimal(14, 3)) ); assertThat( "fractional", DecimalUtil.widen( SqlTypes.decimal(14, 14), SqlTypes.decimal(1, 1) ), is(SqlTypes.decimal(14, 14)) ); assertThat( "overlap", DecimalUtil.widen( SqlTypes.decimal(14, 4), SqlTypes.decimal(14, 5) ), is(SqlTypes.decimal(15, 5)) ); }
@Override public EntityStatementJWS establishIdpTrust(URI issuer) { var trustedFederationStatement = fetchTrustedFederationStatement(issuer); // the federation statement from the master will establish trust in the JWKS and the issuer URL // of the idp, // we still need to fetch the entity configuration directly afterward to get the full // entity statement return fetchTrustedEntityConfiguration(issuer, trustedFederationStatement.body().jwks()); }
@Test void establishTrust_expiredFedmasterConfig() { var client = new FederationMasterClientImpl(FEDERATION_MASTER, federationApiClient, clock); var issuer = URI.create("https://idp-tk.example.com"); var fedmasterKeypair = ECKeyGenerator.example(); var fedmasterEntityConfigurationJws = expiredFedmasterConfiguration(fedmasterKeypair); when(federationApiClient.fetchEntityConfiguration(FEDERATION_MASTER)) .thenReturn(fedmasterEntityConfigurationJws); // when var e = assertThrows(FederationException.class, () -> client.establishIdpTrust(issuer)); // then assertEquals( "entity statement of 'https://fedmaster.example.com' expired or not yet valid", e.getMessage()); }
public long getEarliestMsgStoretime(final String addr, final MessageQueue mq, final long timeoutMillis) throws RemotingException, MQBrokerException, InterruptedException { GetEarliestMsgStoretimeRequestHeader requestHeader = new GetEarliestMsgStoretimeRequestHeader(); requestHeader.setTopic(mq.getTopic()); requestHeader.setQueueId(mq.getQueueId()); requestHeader.setBrokerName(mq.getBrokerName()); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_EARLIEST_MSG_STORETIME, requestHeader); RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr), request, timeoutMillis); assert response != null; switch (response.getCode()) { case ResponseCode.SUCCESS: { GetEarliestMsgStoretimeResponseHeader responseHeader = (GetEarliestMsgStoretimeResponseHeader) response.decodeCommandCustomHeader(GetEarliestMsgStoretimeResponseHeader.class); return responseHeader.getTimestamp(); } default: break; } throw new MQBrokerException(response.getCode(), response.getRemark(), addr); }
@Test public void testGetEarliestMsgStoretime() throws Exception { doAnswer((Answer<RemotingCommand>) mock -> { RemotingCommand request = mock.getArgument(1); final RemotingCommand response = RemotingCommand.createResponseCommand(GetEarliestMsgStoretimeResponseHeader.class); final GetEarliestMsgStoretimeResponseHeader responseHeader = (GetEarliestMsgStoretimeResponseHeader) response.readCustomHeader(); responseHeader.setTimestamp(100L); response.makeCustomHeaderToNet(); response.setCode(ResponseCode.SUCCESS); response.setOpaque(request.getOpaque()); return response; }).when(remotingClient).invokeSync(anyString(), any(RemotingCommand.class), anyLong()); long t = mqClientAPI.getEarliestMsgStoretime(brokerAddr, new MessageQueue(topic, brokerName, 0), 10000); assertThat(t).isEqualTo(100L); }
public static SqlType fromValue(final BigDecimal value) { // SqlDecimal does not support negative scale: final BigDecimal decimal = value.scale() < 0 ? value.setScale(0, BigDecimal.ROUND_UNNECESSARY) : value; /* We can't use BigDecimal.precision() directly for all cases, since it defines * precision differently from SQL Decimal. * In particular, if the decimal is between -0.1 and 0.1, BigDecimal precision can be * lower than scale, which is disallowed in SQL Decimal. For example, 0.005 in * BigDecimal has a precision,scale of 1,3; whereas we expect 4,3. * If the decimal is in (-1,1) but outside (-0.1,0.1), the code doesn't throw, but * gives lower precision than expected (e.g., 0.8 has precision 1 instead of 2). * To account for this edge case, we just take the scale and add one and use that * for the precision instead. This works since BigDecimal defines scale as the * number of digits to the right of the period; which is one lower than the precision for * anything in the range (-1, 1). * This covers the case where BigDecimal has a value of 0. * Note: This solution differs from the SQL definition in that it returns (4, 3) for * both "0.005" and ".005", whereas SQL expects (3, 3) for the latter. This is unavoidable * if we use BigDecimal as an intermediate representation, since the two strings are parsed * identically by it to have precision 1. */ if (decimal.compareTo(BigDecimal.ONE) < 0 && decimal.compareTo(BigDecimal.ONE.negate()) > 0) { return SqlTypes.decimal(decimal.scale() + 1, decimal.scale()); } return SqlTypes.decimal(decimal.precision(), Math.max(decimal.scale(), 0)); }
@Test public void shouldConvertFromBigDecimalWithNegativeScale() { assertThat( DecimalUtil.fromValue(new BigDecimal("1e3")), is(SqlTypes.decimal(4, 0)) ); }
public Set<String> getFieldNames() { return Collections.unmodifiableSet(fields.keySet()); }
@Test public void testGetFieldNames() throws Exception { assertTrue("Missing fields in set!", symmetricDifference(message.getFieldNames(), Sets.newHashSet("_id", "timestamp", "source", "message")).isEmpty()); message.addField("testfield", "testvalue"); assertTrue("Missing fields in set!", symmetricDifference(message.getFieldNames(), Sets.newHashSet("_id", "timestamp", "source", "message", "testfield")).isEmpty()); }
@Override public InMemoryReaderIterator iterator() throws IOException { return new InMemoryReaderIterator(); }
@Test public void testDynamicSplit() throws Exception { List<Integer> elements = Arrays.asList(33, 44, 55, 66, 77, 88); // Should initially read elements at indices: 44@1, 55@2, 66@3, 77@4 Coder<Integer> coder = BigEndianIntegerCoder.of(); InMemoryReader<Integer> inMemoryReader = new InMemoryReader<>(encodedElements(elements, coder), 1, 4, coder); // Unstarted iterator. try (InMemoryReader<Integer>.InMemoryReaderIterator iterator = inMemoryReader.iterator()) { assertNull(iterator.requestDynamicSplit(ReaderTestUtils.splitRequestAtIndex(3L))); } // Illegal proposed split position. try (InMemoryReader<Integer>.InMemoryReaderIterator iterator = inMemoryReader.iterator()) { assertNull(iterator.requestDynamicSplit(ReaderTestUtils.splitRequestAtIndex(3L))); // Poke the iterator so that we can test dynamic splitting. assertTrue(iterator.start()); assertNull( iterator.requestDynamicSplit(toDynamicSplitRequest(new ApproximateSplitRequest()))); assertNull(iterator.requestDynamicSplit(ReaderTestUtils.splitRequestAtIndex(null))); } // Successful update. try (InMemoryReader<Integer>.InMemoryReaderIterator iterator = inMemoryReader.iterator()) { // Poke the iterator so that we can test dynamic splitting. assertTrue(iterator.start()); NativeReader.DynamicSplitResult dynamicSplitResult = iterator.requestDynamicSplit(ReaderTestUtils.splitRequestAtIndex(3L)); Assert.assertEquals( ReaderTestUtils.positionAtIndex(3L), ReaderTestUtils.positionFromSplitResult(dynamicSplitResult)); assertEquals(3, iterator.tracker.getStopPosition().longValue()); assertEquals(44, iterator.getCurrent().intValue()); assertTrue(iterator.advance()); assertEquals(55, iterator.getCurrent().intValue()); assertFalse(iterator.advance()); } // Proposed split position is before the current position, no update. try (InMemoryReader<Integer>.InMemoryReaderIterator iterator = inMemoryReader.iterator()) { // Poke the iterator so that we can test dynamic splitting. assertTrue(iterator.start()); assertEquals(44, iterator.getCurrent().intValue()); assertTrue(iterator.advance()); assertEquals(55, iterator.getCurrent().intValue()); assertTrue(iterator.advance()); // Returns true => we promised to return 66. // Now we have to refuse the split. assertNull(iterator.requestDynamicSplit(ReaderTestUtils.splitRequestAtIndex(3L))); assertEquals(4, iterator.tracker.getStopPosition().longValue()); assertEquals(66, iterator.getCurrent().intValue()); assertFalse(iterator.advance()); } // Proposed split position is after the current stop (end) position, no update. try (InMemoryReader<Integer>.InMemoryReaderIterator iterator = inMemoryReader.iterator()) { // Poke the iterator so that we can test dynamic splitting. assertTrue(iterator.start()); assertNull(iterator.requestDynamicSplit(ReaderTestUtils.splitRequestAtIndex(5L))); assertEquals(4, iterator.tracker.getStopPosition().longValue()); } }
public void setAllDisabled( boolean value ) { for ( PermissionsCheckboxes permissionsCheckboxes : ALL_PERMISSIONS ) { permissionsCheckboxes.permissionCheckbox.setDisabled( value ); } }
@Test public void testSetAllEnabledEnablesAll() { boolean disabled = false; permissionsCheckboxHandler.setAllDisabled( disabled ); verify( readCheckbox, times( 1 ) ).setDisabled( disabled ); verify( writeCheckbox, times( 1 ) ).setDisabled( disabled ); verify( deleteCheckbox, times( 1 ) ).setDisabled( disabled ); verify( manageCheckbox, times( 1 ) ).setDisabled( disabled ); }
public boolean compareIndexSetting(String tableName, Map<String, Object> settings) { if ((CollectionUtils.isEmpty(settings) || CollectionUtils.isEmpty((Map) settings.get("index"))) && Objects.isNull(indexSettingStructures.get(tableName))) { return true; } return indexSettingStructures.containsKey(tableName) && indexSettingStructures.get(tableName). equals(new UpdatableIndexSettings((Map<String, Object>) settings.get("index"))); }
@Test public void compareIndexSetting() { IndexStructures structures = new IndexStructures(); HashMap<String, Object> settings = new HashMap<>(); HashMap<String, Object> indexSettings = new HashMap<>(); settings.put("index", indexSettings); indexSettings.put("number_of_replicas", "1"); indexSettings.put("number_of_shards", "1"); structures.putStructure("test", new Mappings(), settings); HashMap<String, Object> settings2 = new HashMap<>(); HashMap<String, Object> indexSettings2 = new HashMap<>(); settings2.put("index", indexSettings2); indexSettings2.put("number_of_replicas", "1"); indexSettings2.put("number_of_shards", "1"); Assertions.assertTrue(structures.compareIndexSetting( "test", settings2 )); HashMap<String, Object> settings3 = new HashMap<>(); HashMap<String, Object> indexSettings3 = new HashMap<>(); settings3.put("index", indexSettings3); indexSettings3.put("number_of_replicas", "1"); indexSettings3.put("number_of_shards", "2"); Assertions.assertFalse(structures.compareIndexSetting( "test", settings3 )); }
static void populateOutputFields(final PMML4Result toUpdate, final ProcessingDTO processingDTO) { logger.debug("populateOutputFields {} {}", toUpdate, processingDTO); for (KiePMMLOutputField outputField : processingDTO.getOutputFields()) { Object variableValue = outputField.evaluate(processingDTO); if (variableValue != null) { String variableName = outputField.getName(); toUpdate.addResultVariable(variableName, variableValue); processingDTO.addKiePMMLNameValue(new KiePMMLNameValue(variableName, variableValue)); } } }
@Test void populatePredictedOutputField2() { KiePMMLNameValue kiePMMLNameValue = new KiePMMLNameValue("targetField", 54346.32454); KiePMMLOutputField outputField = KiePMMLOutputField.builder(OUTPUT_NAME, Collections.emptyList()) .withResultFeature(RESULT_FEATURE.PREDICTED_VALUE) .withTargetField(kiePMMLNameValue.getName()) .build(); KiePMMLTestingModel kiePMMLModel = testingModelBuilder(outputField).build(); ProcessingDTO processingDTO = buildProcessingDTOWithNameValues(kiePMMLModel, kiePMMLNameValue); PMML4Result toUpdate = new PMML4Result(); PostProcess.populateOutputFields(toUpdate, processingDTO); assertThat(toUpdate.getResultVariables()).isNotEmpty(); assertThat(toUpdate.getResultVariables()).containsKey(OUTPUT_NAME); assertThat(toUpdate.getResultVariables().get(OUTPUT_NAME)).isEqualTo(kiePMMLNameValue.getValue()); }
@Override public CompletableFuture<Void> applyExperimentTreatment(final Account account, final Device device) { return idleDeviceNotificationScheduler.scheduleNotification(account, device, PREFERRED_NOTIFICATION_TIME); }
@Test void applyExperimentTreatment() { final Account account = mock(Account.class); final Device device = mock(Device.class); experiment.applyExperimentTreatment(account, device); verify(idleDeviceNotificationScheduler) .scheduleNotification(account, device, NotifyIdleDevicesWithMessagesExperiment.PREFERRED_NOTIFICATION_TIME); }
public String generateNacosServiceName(String rawServiceName) { if (rawServiceName.contains(Constants.DEFAULT_GROUP)) { return rawServiceName; } return Constants.DEFAULT_GROUP + AddressServerConstants.GROUP_SERVICE_NAME_SEP + rawServiceName; }
@Test void testGenerateNacosServiceName() { AddressServerGeneratorManager manager = new AddressServerGeneratorManager(); final String containDefault = manager.generateNacosServiceName("DEFAULT_GROUP@@test"); assertEquals("DEFAULT_GROUP@@test", containDefault); final String product = manager.generateNacosServiceName("product"); assertEquals("DEFAULT_GROUP@@product", product); }
@SuppressWarnings("unchecked") public static List<String> createRobotVariablesFromCamelExchange(Exchange exchange, boolean allowContextMapAll) throws TypeConversionException, NoTypeConversionAvailableException { Map<String, Object> variablesMap = ExchangeHelper.createVariableMap(exchange, allowContextMapAll); List<String> variableKeyValuePairList = new ArrayList<>(); for (Map.Entry<String, Object> variableEntry : variablesMap.entrySet()) { if (ROBOT_VAR_CAMEL_BODY.equals(variableEntry.getKey())) { String bodyVariable = variableEntry.getKey() + ROBOT_VAR_FIELD_SEPERATOR + exchange.getContext().getTypeConverter().mandatoryConvertTo(String.class, variableEntry.getValue()); variableKeyValuePairList.add(bodyVariable); } else if (ROBOT_VAR_CAMEL_HEADERS.equals(variableEntry.getKey())) { // here the param is the headers map createStringValueOfVariablesFromMap(variableKeyValuePairList, ObjectHelper.cast(Map.class, variableEntry.getValue()), exchange, new StringBuilder(), ROBOT_VAR_CAMEL_HEADERS, true); } else if (ROBOT_VAR_CAMEL_VARIABLES.equals(variableEntry.getKey())) { // here the param is the headers map createStringValueOfVariablesFromMap(variableKeyValuePairList, exchange.getVariables(), exchange, new StringBuilder(), ROBOT_VAR_CAMEL_VARIABLES, true); } else if (ROBOT_CAMEL_EXCHANGE_NAME.equals(variableEntry.getKey())) { // here the param is camel exchange createStringValueOfVariablesFromMap(variableKeyValuePairList, exchange.getProperties(), ObjectHelper.cast(Exchange.class, variableEntry.getValue()), new StringBuilder(), ROBOT_VAR_CAMEL_PROPERTIES, true); } } return variableKeyValuePairList; }
@SuppressWarnings("unchecked") @Test public void testCreateRobotVariablesFromCamelExchange() throws Exception { MockEndpoint mock = getMockEndpoint("mock:result"); mock.expectedMinimumMessageCount(1); Map<String, Object> headers = new HashMap<>(); headers.put("stringKey", "str1"); headers.put("numericIntKey", 1); headers.put("numericBigDecimalKey", new BigDecimal(2)); Map<String, Object> inner1 = new HashMap<>(); inner1.put("innerStringKey", "str1"); inner1.put("innerNumericIntKey", 1); inner1.put("innerNumericBigDecimalKey", new BigDecimal(2)); headers.put("inner", inner1); exchange.getIn().setHeaders(headers); exchange.setProperty("stringKey", "str1"); exchange.setProperty("numericIntKey", 1); exchange.setProperty("numericBigDecimalKey", new BigDecimal(2)); exchange.setProperty("inner", inner1); Exchange responseExchange = template.send("direct:setVariableCamelExchange", exchange); List<String> camelRobotVariables = ObjectHelper.cast(List.class, responseExchange.getIn().getHeader(RobotFrameworkCamelConstants.CAMEL_ROBOT_VARIABLES)); for (String camelRobotVariable : camelRobotVariables) { if (!camelRobotVariable.contains("headers") && !camelRobotVariable.contains("properties") && camelRobotVariable.contains("body")) { assertEquals("body:Hello Robot", camelRobotVariable, "Body variable content should be [body:<body_value>]"); } if (camelRobotVariable.contains("headers") && camelRobotVariable.contains("stringKey")) { assertEquals("headers.stringKey:str1", camelRobotVariable, "Header variable content should be [headers.stringKey:<header_value>]"); } if (camelRobotVariable.contains("headers") && camelRobotVariable.contains("numericIntKey")) { assertEquals("headers.numericIntKey:1", camelRobotVariable, "Header variable content should be [headers.numericIntKey:<header_value>]"); } if (camelRobotVariable.contains("headers") && camelRobotVariable.contains("numericBigDecimalKey")) { assertEquals("headers.numericBigDecimalKey:2", camelRobotVariable, "Header variable content should be [headers.numericIntKey:<header_value>]"); } if (camelRobotVariable.contains("headers") && camelRobotVariable.contains("innerStringKey")) { assertEquals("headers.inner.innerStringKey:str1", camelRobotVariable, "Header variable content should be [headers.stringKey:<header_value>]"); } if (camelRobotVariable.contains("headers") && camelRobotVariable.contains("innerNumericIntKey")) { assertEquals("headers.inner.innerNumericIntKey:1", camelRobotVariable, "Header variable content should be [headers.numericIntKey:<header_value>]"); } if (camelRobotVariable.contains("headers") && camelRobotVariable.contains("innerNumericBigDecimalKey")) { assertEquals("headers.inner.innerNumericBigDecimalKey:2", camelRobotVariable, "Header variable content should be [headers.numericIntKey:<header_value>]"); } if (camelRobotVariable.contains("properties") && camelRobotVariable.contains("stringKey")) { assertEquals("properties.stringKey:str1", camelRobotVariable, "Header variable content should be [properties.stringKey:<header_value>]"); } if (camelRobotVariable.contains("properties") && camelRobotVariable.contains("numericIntKey")) { assertEquals("properties.numericIntKey:1", camelRobotVariable, "Header variable content should be [properties.numericIntKey:<header_value>]"); } if (camelRobotVariable.contains("properties") && camelRobotVariable.contains("numericBigDecimalKey")) { assertEquals("properties.numericBigDecimalKey:2", camelRobotVariable, "Header variable content should be [properties.numericIntKey:<header_value>]"); } if (camelRobotVariable.contains("properties") && camelRobotVariable.contains("innerStringKey")) { assertEquals("properties.inner.innerStringKey:str1", camelRobotVariable, "Header variable content should be [properties.stringKey:<header_value>]"); } if (camelRobotVariable.contains("properties") && camelRobotVariable.contains("innerNumericIntKey")) { assertEquals("properties.inner.innerNumericIntKey:1", camelRobotVariable, "Header variable content should be [properties.numericIntKey:<header_value>]"); } if (camelRobotVariable.contains("properties") && camelRobotVariable.contains("innerNumericBigDecimalKey")) { assertEquals("properties.inner.innerNumericBigDecimalKey:2", camelRobotVariable, "Header variable content should be [properties.numericIntKey:<header_value>]"); } } MockEndpoint.assertIsSatisfied(context); Exchange exchange = mock.getExchanges().get(0); assertEquals(0, (int) ObjectHelper.cast(Integer.class, exchange.getIn().getHeader(RobotFrameworkCamelConstants.CAMEL_ROBOT_RETURN_CODE))); }
public static List<Event> computeEventDiff(final Params params) { final List<Event> events = new ArrayList<>(); emitPerNodeDiffEvents(createBaselineParams(params), events); emitWholeClusterDiffEvent(createBaselineParams(params), events); emitDerivedBucketSpaceStatesDiffEvents(params, events); return events; }
@Test void may_have_merges_pending_down_edge_event_emitted_if_derived_bucket_space_state_differs_from_baseline() { EventFixture f = EventFixture.createForNodes(3) .clusterStateBefore("distributor:3 storage:3") .derivedClusterStateBefore("default", "distributor:3 storage:3 .1.s:m") .derivedStorageNodeReasonBefore("default", 1, NodeStateReason.MAY_HAVE_MERGES_PENDING) .clusterStateAfter("distributor:3 storage:3") .derivedClusterStateAfter("default", "distributor:3 storage:3"); List<Event> events = f.computeEventDiff(); assertThat(events.size(), equalTo(2)); assertThat(events, hasItem(allOf( eventForNode(storageNode(1)), nodeEventForBucketSpace("default"), nodeEventWithDescription("Altered node state in cluster state from 'M' to 'U'")))); assertThat(events, hasItem(allOf( eventForNode(storageNode(1)), nodeEventForBucketSpace("default"), nodeEventWithDescription("Node no longer has merges pending")))); }
public KafkaConfiguration getConfiguration() { return configuration; }
@Test public void testPropertiesSet() { String uri = "kafka:mytopic?brokers=broker1:12345,broker2:12566&partitioner=com.class.Party"; KafkaEndpoint endpoint = context.getEndpoint(uri, KafkaEndpoint.class); assertEquals("broker1:12345,broker2:12566", endpoint.getConfiguration().getBrokers()); assertEquals("mytopic", endpoint.getConfiguration().getTopic()); assertEquals("com.class.Party", endpoint.getConfiguration().getPartitioner()); }
public static String convertFreshnessToCron(IntervalFreshness intervalFreshness) { switch (intervalFreshness.getTimeUnit()) { case SECOND: return validateAndConvertCron( intervalFreshness, SECOND_CRON_UPPER_BOUND, SECOND_CRON_EXPRESSION_TEMPLATE); case MINUTE: return validateAndConvertCron( intervalFreshness, MINUTE_CRON_UPPER_BOUND, MINUTE_CRON_EXPRESSION_TEMPLATE); case HOUR: return validateAndConvertCron( intervalFreshness, HOUR_CRON_UPPER_BOUND, HOUR_CRON_EXPRESSION_TEMPLATE); case DAY: return validateAndConvertDayCron(intervalFreshness); default: throw new ValidationException( String.format( "Unknown freshness time unit: %s.", intervalFreshness.getTimeUnit())); } }
@Test void testConvertHourFreshnessToCronExpression() { // verify illegal freshness assertThatThrownBy(() -> convertFreshnessToCron(IntervalFreshness.ofHour("24"))) .isInstanceOf(ValidationException.class) .hasMessageContaining( "In full refresh mode, freshness must be less than 24 when the time unit is HOUR."); assertThatThrownBy(() -> convertFreshnessToCron(IntervalFreshness.ofHour("14"))) .isInstanceOf(ValidationException.class) .hasMessageContaining( "In full refresh mode, only freshness that are factors of 24 are currently supported when the time unit is HOUR."); String actual1 = convertFreshnessToCron(IntervalFreshness.ofHour("12")); assertThat(actual1).isEqualTo("0 0 0/12 * * ? *"); String actual2 = convertFreshnessToCron(IntervalFreshness.ofHour("4")); assertThat(actual2).isEqualTo("0 0 0/4 * * ? *"); String actual3 = convertFreshnessToCron(IntervalFreshness.ofHour("1")); assertThat(actual3).isEqualTo("0 0 0/1 * * ? *"); }
public PublisherAgreement getPublisherAgreement(UserData user) { var eclipseToken = checkEclipseToken(user); var personId = user.getEclipsePersonId(); if (StringUtils.isEmpty(personId)) { return null; } checkApiUrl(); var urlTemplate = eclipseApiUrl + "openvsx/publisher_agreement/{personId}"; var uriVariables = Map.of("personId", personId); var headers = new HttpHeaders(); headers.setBearerAuth(eclipseToken.accessToken); headers.setAccept(Arrays.asList(MediaType.APPLICATION_JSON)); var request = new HttpEntity<>(headers); try { var json = restTemplate.exchange(urlTemplate, HttpMethod.GET, request, String.class, uriVariables); return parseAgreementResponse(json); } catch (RestClientException exc) { HttpStatusCode status = HttpStatus.INTERNAL_SERVER_ERROR; if (exc instanceof HttpStatusCodeException) { status = ((HttpStatusCodeException) exc).getStatusCode(); // The endpoint yields 404 if the specified user has not signed a publisher agreement if (status == HttpStatus.NOT_FOUND) return null; } var url = UriComponentsBuilder.fromUriString(urlTemplate).build(uriVariables); logger.error("Get request failed with URL: " + url, exc); throw new ErrorResultException("Request for retrieving publisher agreement failed: " + exc.getMessage(), status); } }
@Test public void testGetPublisherAgreementNotFound() throws Exception { var user = mockUser(); user.setEclipsePersonId("test"); var urlTemplate = "https://test.openvsx.eclipse.org/openvsx/publisher_agreement/{personId}"; Mockito.when(restTemplate.exchange(eq(urlTemplate), eq(HttpMethod.GET), any(HttpEntity.class), eq(String.class), eq(Map.of("personId", "test")))) .thenThrow(new HttpClientErrorException(HttpStatus.NOT_FOUND)); var agreement = eclipse.getPublisherAgreement(user); assertThat(agreement).isNull(); }
public List<ColumnMatchResult<?>> getMismatchedColumns(List<Column> columns, ChecksumResult controlChecksum, ChecksumResult testChecksum) { return columns.stream() .flatMap(column -> columnValidators.get(column.getCategory()).get().validate(column, controlChecksum, testChecksum).stream()) .filter(columnMatchResult -> !columnMatchResult.isMatched()) .collect(toImmutableList()); }
@Test public void testFloatingPointArray() { List<Column> columns = ImmutableList.of(DOUBLE_ARRAY_COLUMN, REAL_ARRAY_COLUMN); Map<String, Object> stableCounts = new HashMap<>(10); stableCounts.put("double_array$nan_count", 2L); stableCounts.put("double_array$pos_inf_count", 3L); stableCounts.put("double_array$neg_inf_count", 4L); stableCounts.put("double_array$cardinality_checksum", new SqlVarbinary(new byte[] {0xb})); stableCounts.put("double_array$cardinality_sum", 10L); stableCounts.put("real_array$nan_count", 2L); stableCounts.put("real_array$pos_inf_count", 3L); stableCounts.put("real_array$neg_inf_count", 4L); stableCounts.put("real_array$cardinality_checksum", new SqlVarbinary(new byte[] {0xd})); stableCounts.put("real_array$cardinality_sum", 10L); Map<String, Object> emptyTableCounts = new HashMap<>(12); emptyTableCounts.put("double_array$nan_count", null); emptyTableCounts.put("double_array$pos_inf_count", null); emptyTableCounts.put("double_array$neg_inf_count", null); emptyTableCounts.put("double_array$cardinality_checksum", null); emptyTableCounts.put("double_array$cardinality_sum", null); emptyTableCounts.put("double_array$sum", null); emptyTableCounts.put("real_array$nan_count", null); emptyTableCounts.put("real_array$pos_inf_count", null); emptyTableCounts.put("real_array$neg_inf_count", null); emptyTableCounts.put("real_array$cardinality_checksum", null); emptyTableCounts.put("real_array$cardinality_sum", null); emptyTableCounts.put("real_array$sum", null); final ChecksumResult controlChecksum = new ChecksumResult( 5, ImmutableMap.<String, Object>builder() .putAll(stableCounts) .put("double_array$sum", 1.0) .put("real_array$sum", 1.0) .build()); // Matched ChecksumResult testChecksum = new ChecksumResult( 5, ImmutableMap.<String, Object>builder() .putAll(stableCounts) .put("double_array$sum", 1 + RELATIVE_ERROR_MARGIN) .put("real_array$sum", 1 - RELATIVE_ERROR_MARGIN + RELATIVE_ERROR_MARGIN * RELATIVE_ERROR_MARGIN) .build()); assertTrue(checksumValidator.getMismatchedColumns(columns, controlChecksum, testChecksum).isEmpty()); // Matched two empty tables final ChecksumResult testEmptyChecksum = new ChecksumResult(0, emptyTableCounts); assertTrue(checksumValidator.getMismatchedColumns(columns, testEmptyChecksum, testEmptyChecksum).isEmpty()); // Mismatched empty and non-empty (number of rows differs). assertThatThrownBy(() -> checksumValidator.getMismatchedColumns(columns, controlChecksum, testEmptyChecksum)) .isInstanceOf(IllegalArgumentException.class); // Mismatched testChecksum = new ChecksumResult( 5, ImmutableMap.<String, Object>builder() .put("double_array$sum", 1.0) .put("double_array$nan_count", 0L) .put("double_array$pos_inf_count", 3L) .put("double_array$neg_inf_count", 4L) .put("double_array$cardinality_checksum", new SqlVarbinary(new byte[] {0xb})) .put("double_array$cardinality_sum", 10L) .put("real_array$sum", 1.0) .put("real_array$nan_count", 2L) .put("real_array$pos_inf_count", 0L) .put("real_array$neg_inf_count", 4L) .put("real_array$cardinality_checksum", new SqlVarbinary(new byte[] {0xd})) .put("real_array$cardinality_sum", 10L) .build()); assertMismatchedColumns(columns, controlChecksum, testChecksum, DOUBLE_ARRAY_COLUMN, REAL_ARRAY_COLUMN); testChecksum = new ChecksumResult( 5, ImmutableMap.<String, Object>builder() .put("double_array$sum", 1.0) .put("double_array$nan_count", 2L) .put("double_array$pos_inf_count", 3L) .put("double_array$neg_inf_count", 0L) .put("double_array$cardinality_checksum", new SqlVarbinary(new byte[] {0xb})) .put("double_array$cardinality_sum", 10L) .put("real_array$sum", 1 - RELATIVE_ERROR_MARGIN) .put("real_array$nan_count", 2L) .put("real_array$pos_inf_count", 3L) .put("real_array$neg_inf_count", 4L) .put("real_array$cardinality_checksum", new SqlVarbinary(new byte[] {0xd})) .put("real_array$cardinality_sum", 10L) .build()); List<ColumnMatchResult<?>> mismatchedColumns = assertMismatchedColumns(columns, controlChecksum, testChecksum, DOUBLE_ARRAY_COLUMN, REAL_ARRAY_COLUMN); assertEquals(mismatchedColumns.get(1).getMessage(), Optional.of("relative error: 1.0000500025000149E-4")); }
public static AlluxioSinkConfig load(String yamlFile) throws IOException { ObjectMapper mapper = new ObjectMapper(new YAMLFactory()); return mapper.readValue(new File(yamlFile), AlluxioSinkConfig.class); }
@Test public final void loadFromMapTest() throws IOException { Map<String, Object> map = new HashMap<>(); map.put("alluxioMasterHost", "localhost"); map.put("alluxioMasterPort", "19998"); map.put("alluxioDir", "pulsar"); map.put("filePrefix", "TopicA"); map.put("fileExtension", ".txt"); map.put("lineSeparator", "\n"); map.put("rotationRecords", "100"); map.put("rotationInterval", "-1"); AlluxioSinkConfig config = AlluxioSinkConfig.load(map); assertNotNull(config); assertEquals("localhost", config.getAlluxioMasterHost()); assertEquals(Integer.parseInt("19998"), config.getAlluxioMasterPort()); assertEquals("pulsar", config.getAlluxioDir()); assertEquals("TopicA", config.getFilePrefix()); assertEquals(".txt", config.getFileExtension()); assertEquals("\n".charAt(0), config.getLineSeparator()); assertEquals(Long.parseLong("100"), config.getRotationRecords()); assertEquals(Long.parseLong("-1"), config.getRotationInterval()); }
static String strip(final String line) { return new Parser(line).parse(); }
@Test public void shouldReturnLineWithCommentInSingleQuotesAsIs() { // Given: final String line = "no comment here '-- even this is not a comment'..."; // Then: assertThat(CommentStripper.strip(line), is(sameInstance(line))); }
public static Object[] realize(Object[] objs, Class<?>[] types) { if (objs.length != types.length) { throw new IllegalArgumentException("args.length != types.length"); } Object[] dests = new Object[objs.length]; for (int i = 0; i < objs.length; i++) { dests[i] = realize(objs[i], types[i]); } return dests; }
@Test void testRealizeCollectionWithNullElement() { LinkedList<String> listStr = new LinkedList<>(); listStr.add("arrayValue"); listStr.add(null); HashSet<String> setStr = new HashSet<>(); setStr.add("setValue"); setStr.add(null); Object listResult = PojoUtils.realize(listStr, LinkedList.class); assertEquals(LinkedList.class, listResult.getClass()); assertEquals(listResult, listStr); Object setResult = PojoUtils.realize(setStr, HashSet.class); assertEquals(HashSet.class, setResult.getClass()); assertEquals(setResult, setStr); }
public Statement buildStatement(final ParserRuleContext parseTree) { return build(Optional.of(getSources(parseTree)), parseTree); }
@Test public void shouldSupportExplicitEmitFinalForCsas() { // Given: final SingleStatementContext stmt = givenQuery("CREATE STREAM X AS SELECT * FROM TEST1 EMIT FINAL;"); // When: final Query result = ((QueryContainer) builder.buildStatement(stmt)).getQuery(); // Then: assertThat("Should be push", result.isPullQuery(), is(false)); assertThat(result.getRefinement().get().getOutputRefinement(), is(OutputRefinement.FINAL)); }
static Properties resolveProducerProperties(Map<String, String> options, Object keySchema, Object valueSchema) { Properties properties = from(options); withSerdeProducerProperties(true, options, keySchema, properties); withSerdeProducerProperties(false, options, valueSchema, properties); return properties; }
@Test public void test_producerProperties_absentFormat() { assertThat(resolveProducerProperties(emptyMap())) .containsExactlyEntriesOf(Map.of(KEY_SERIALIZER, ByteArraySerializer.class.getCanonicalName())); }
public void validateIdentity(WorkflowInstance toRestart) { // instance id will not always match for foreach if (initiator.getType() == Initiator.Type.FOREACH) { Checks.checkTrue( getRestartWorkflowId().equals(toRestart.getWorkflowId()), "Cannot restart a FOREACH iteration [%s] as it does not match run request [%s]", toRestart.getWorkflowId(), getRestartWorkflowId()); } else { Checks.checkTrue( getRestartWorkflowId().equals(toRestart.getWorkflowId()) && getRestartInstanceId() == toRestart.getWorkflowInstanceId(), "Cannot restart a workflow instance %s as it does not match run request %s", toRestart.getIdentity(), getWorkflowIdentity()); } }
@Test public void testValidateIdentityForManualRun() { RestartConfig config = RestartConfig.builder().addRestartNode("foo", 1, "bar").build(); RunRequest runRequest = RunRequest.builder() .currentPolicy(RunPolicy.RESTART_FROM_INCOMPLETE) .restartConfig(config) .requester(User.create("tester")) .build(); WorkflowInstance instance = new WorkflowInstance(); instance.setWorkflowId("foo"); instance.setWorkflowInstanceId(1); runRequest.validateIdentity(instance); instance.setWorkflowInstanceId(2); AssertHelper.assertThrows( "Mismatch instance identity", IllegalArgumentException.class, "Cannot restart a workflow instance ", () -> runRequest.validateIdentity(instance)); }
public void command(String primaryCommand, SecureConfig config, String... allArguments) { terminal.writeLine(""); final Optional<CommandLine> commandParseResult; try { commandParseResult = Command.parse(primaryCommand, allArguments); } catch (InvalidCommandException e) { terminal.writeLine(String.format("ERROR: %s", e.getMessage())); return; } if (commandParseResult.isEmpty()) { printHelp(); return; } final CommandLine commandLine = commandParseResult.get(); switch (commandLine.getCommand()) { case CREATE: { if (commandLine.hasOption(CommandOptions.HELP)){ terminal.writeLine("Creates a new keystore. For example: 'bin/logstash-keystore create'"); return; } if (secretStoreFactory.exists(config.clone())) { terminal.write("An Logstash keystore already exists. Overwrite ? [y/N] "); if (isYes(terminal.readLine())) { create(config); } } else { create(config); } break; } case LIST: { if (commandLine.hasOption(CommandOptions.HELP)){ terminal.writeLine("List all secret identifiers from the keystore. For example: " + "`bin/logstash-keystore list`. Note - only the identifiers will be listed, not the secrets."); return; } Collection<SecretIdentifier> ids = secretStoreFactory.load(config).list(); List<String> keys = ids.stream().filter(id -> !id.equals(LOGSTASH_MARKER)).map(id -> id.getKey()).collect(Collectors.toList()); Collections.sort(keys); keys.forEach(terminal::writeLine); break; } case ADD: { if (commandLine.hasOption(CommandOptions.HELP)){ terminal.writeLine("Add secrets to the keystore. For example: " + "`bin/logstash-keystore add my-secret`, at the prompt enter your secret. You will use the identifier ${my-secret} in your Logstash configuration."); return; } if (commandLine.getArguments().isEmpty()) { terminal.writeLine("ERROR: You must supply an identifier to add. (e.g. bin/logstash-keystore add my-secret)"); return; } if (secretStoreFactory.exists(config.clone())) { final SecretStore secretStore = secretStoreFactory.load(config); for (String argument : commandLine.getArguments()) { final SecretIdentifier id = new SecretIdentifier(argument); final byte[] existingValue = secretStore.retrieveSecret(id); if (existingValue != null) { SecretStoreUtil.clearBytes(existingValue); terminal.write(String.format("%s already exists. Overwrite ? [y/N] ", argument)); if (!isYes(terminal.readLine())) { continue; } } final String enterValueMessage = String.format("Enter value for %s: ", argument); char[] secret = null; while(secret == null) { terminal.write(enterValueMessage); final char[] readSecret = terminal.readSecret(); if (readSecret == null || readSecret.length == 0) { terminal.writeLine("ERROR: Value cannot be empty"); continue; } if (!ASCII_ENCODER.canEncode(CharBuffer.wrap(readSecret))) { terminal.writeLine("ERROR: Value must contain only ASCII characters"); continue; } secret = readSecret; } add(secretStore, id, SecretStoreUtil.asciiCharToBytes(secret)); } } else { terminal.writeLine("ERROR: Logstash keystore not found. Use 'create' command to create one."); } break; } case REMOVE: { if (commandLine.hasOption(CommandOptions.HELP)){ terminal.writeLine("Remove secrets from the keystore. For example: " + "`bin/logstash-keystore remove my-secret`"); return; } if (commandLine.getArguments().isEmpty()) { terminal.writeLine("ERROR: You must supply a value to remove. (e.g. bin/logstash-keystore remove my-secret)"); return; } final SecretStore secretStore = secretStoreFactory.load(config); for (String argument : commandLine.getArguments()) { SecretIdentifier id = new SecretIdentifier(argument); if (secretStore.containsSecret(id)) { secretStore.purgeSecret(id); terminal.writeLine(String.format("Removed '%s' from the Logstash keystore.", id.getKey())); } else { terminal.writeLine(String.format("ERROR: '%s' does not exist in the Logstash keystore.", argument)); } } break; } } }
@Test public void testHelpRemove() { cli.command("remove", null, "--help"); assertThat(terminal.out).containsIgnoringCase("Remove secrets from the keystore"); }
@Override public void initialize(ServiceConfiguration config) throws IOException, IllegalArgumentException { String prefix = (String) config.getProperty(CONF_TOKEN_SETTING_PREFIX); if (null == prefix) { prefix = ""; } this.confTokenSecretKeySettingName = prefix + CONF_TOKEN_SECRET_KEY; this.confTokenPublicKeySettingName = prefix + CONF_TOKEN_PUBLIC_KEY; this.confTokenAuthClaimSettingName = prefix + CONF_TOKEN_AUTH_CLAIM; this.confTokenPublicAlgSettingName = prefix + CONF_TOKEN_PUBLIC_ALG; this.confTokenAudienceClaimSettingName = prefix + CONF_TOKEN_AUDIENCE_CLAIM; this.confTokenAudienceSettingName = prefix + CONF_TOKEN_AUDIENCE; this.confTokenAllowedClockSkewSecondsSettingName = prefix + CONF_TOKEN_ALLOWED_CLOCK_SKEW_SECONDS; // we need to fetch the algorithm before we fetch the key this.publicKeyAlg = getPublicKeyAlgType(config); this.validationKey = getValidationKey(config); this.roleClaim = getTokenRoleClaim(config); this.audienceClaim = getTokenAudienceClaim(config); this.audience = getTokenAudience(config); long allowedSkew = getConfTokenAllowedClockSkewSeconds(config); this.parser = Jwts.parserBuilder() .setAllowedClockSkewSeconds(allowedSkew) .setSigningKey(this.validationKey) .build(); if (audienceClaim != null && audience == null) { throw new IllegalArgumentException("Token Audience Claim [" + audienceClaim + "] configured, but Audience stands for this broker not."); } }
@Test(expectedExceptions = IOException.class) public void testInitializeWhenSecretKeyIsValidPathOrBase64() throws IOException { Properties properties = new Properties(); properties.setProperty(AuthenticationProviderToken.CONF_TOKEN_SECRET_KEY, "secret_key_file_not_exist"); ServiceConfiguration conf = new ServiceConfiguration(); conf.setProperties(properties); new AuthenticationProviderToken().initialize(conf); }
public static String[] getTrimmedStrings(String str){ if (null == str || str.trim().isEmpty()) { return emptyStringArray; } return str.trim().split("\\s*[,\n]\\s*"); }
@Test (timeout = 30000) public void testGetTrimmedStrings() throws Exception { String compactDirList = "/spindle1/hdfs,/spindle2/hdfs,/spindle3/hdfs"; String spacedDirList = "/spindle1/hdfs, /spindle2/hdfs, /spindle3/hdfs"; String pathologicalDirList1 = " /spindle1/hdfs , /spindle2/hdfs ,/spindle3/hdfs "; String pathologicalDirList2 = " /spindle1/hdfs , /spindle2/hdfs ,/spindle3/hdfs , "; String emptyList1 = ""; String emptyList2 = " "; String[] expectedArray = {"/spindle1/hdfs", "/spindle2/hdfs", "/spindle3/hdfs"}; String[] emptyArray = {}; assertArrayEquals(expectedArray, StringUtils.getTrimmedStrings(compactDirList)); assertArrayEquals(expectedArray, StringUtils.getTrimmedStrings(spacedDirList)); assertArrayEquals(expectedArray, StringUtils.getTrimmedStrings(pathologicalDirList1)); assertArrayEquals(expectedArray, StringUtils.getTrimmedStrings(pathologicalDirList2)); assertArrayEquals(emptyArray, StringUtils.getTrimmedStrings(emptyList1)); String[] estring = StringUtils.getTrimmedStrings(emptyList2); assertArrayEquals(emptyArray, estring); }
static CommandLineOptions parse(Iterable<String> options) { CommandLineOptions.Builder optionsBuilder = CommandLineOptions.builder(); List<String> expandedOptions = new ArrayList<>(); expandParamsFiles(options, expandedOptions); Iterator<String> it = expandedOptions.iterator(); while (it.hasNext()) { String option = it.next(); if (!option.startsWith("-")) { optionsBuilder.filesBuilder().add(option).addAll(it); break; } String flag; String value; int idx = option.indexOf('='); if (idx >= 0) { flag = option.substring(0, idx); value = option.substring(idx + 1); } else { flag = option; value = null; } // NOTE: update usage information in UsageException when new flags are added switch (flag) { case "-i": case "-r": case "-replace": case "--replace": optionsBuilder.inPlace(true); break; case "--lines": case "-lines": case "--line": case "-line": parseRangeSet(optionsBuilder.linesBuilder(), getValue(flag, it, value)); break; case "--offset": case "-offset": optionsBuilder.addOffset(parseInteger(it, flag, value)); break; case "--length": case "-length": optionsBuilder.addLength(parseInteger(it, flag, value)); break; case "--aosp": case "-aosp": case "-a": optionsBuilder.aosp(true); break; case "--version": case "-version": case "-v": optionsBuilder.version(true); break; case "--help": case "-help": case "-h": optionsBuilder.help(true); break; case "--fix-imports-only": optionsBuilder.fixImportsOnly(true); break; case "--skip-sorting-imports": optionsBuilder.sortImports(false); break; case "--skip-removing-unused-imports": optionsBuilder.removeUnusedImports(false); break; case "--skip-reflowing-long-strings": optionsBuilder.reflowLongStrings(false); break; case "--skip-javadoc-formatting": optionsBuilder.formatJavadoc(false); break; case "-": optionsBuilder.stdin(true); break; case "-n": case "--dry-run": optionsBuilder.dryRun(true); break; case "--set-exit-if-changed": optionsBuilder.setExitIfChanged(true); break; case "-assume-filename": case "--assume-filename": optionsBuilder.assumeFilename(getValue(flag, it, value)); break; default: throw new IllegalArgumentException("unexpected flag: " + flag); } } return optionsBuilder.build(); }
@Test public void skipJavadocFormatting() { assertThat( CommandLineOptionsParser.parse(Arrays.asList("--skip-javadoc-formatting")) .formatJavadoc()) .isFalse(); }
public static BigDecimal toNanos(Timestamp timestamp) { final BigDecimal secondsAsNanos = BigDecimal.valueOf(timestamp.getSeconds()).subtract(MIN_SECONDS).scaleByPowerOfTen(9); final BigDecimal nanos = BigDecimal.valueOf(timestamp.getNanos()); return secondsAsNanos.add(nanos); }
@Test public void testToNanosConvertTimestampMaxToNanos() { assertEquals( new BigDecimal("315537897599999999999"), TimestampUtils.toNanos(Timestamp.MAX_VALUE)); }
public void reset() { lock.lock(); try { clearTransactions(); lastBlockSeenHash = null; lastBlockSeenHeight = -1; // Magic value for 'never'. lastBlockSeenTime = null; saveLater(); maybeQueueOnWalletChanged(); } finally { lock.unlock(); } }
@Test public void reset() { sendMoneyToWallet(AbstractBlockChain.NewBlockType.BEST_CHAIN, COIN, myAddress); assertNotEquals(Coin.ZERO, wallet.getBalance(Wallet.BalanceType.ESTIMATED)); assertNotEquals(0, wallet.getTransactions(false).size()); assertNotEquals(0, wallet.getUnspents().size()); wallet.reset(); assertEquals(Coin.ZERO, wallet.getBalance(Wallet.BalanceType.ESTIMATED)); assertEquals(0, wallet.getTransactions(false).size()); assertEquals(0, wallet.getUnspents().size()); }
public static <T> T readValue(String jsonStr, Class<T> clazz) { try { return getInstance().readValue(jsonStr, clazz); } catch (JsonParseException e) { logger.error(e.getMessage(), e); } catch (JsonMappingException e) { logger.error(e.getMessage(), e); } catch (IOException e) { logger.error(e.getMessage(), e); } return null; }
@Test public void shouldReadValueAsObject() { //given String jsonString = "{\"aaa\":\"111\",\"bbb\":\"222\"}"; //when Map result = JacksonUtil.readValue(jsonString, Map.class); //then assertEquals(result.get("aaa"), "111"); assertEquals(result.get("bbb"),"222"); }
public static short translateBucketAcl(GSAccessControlList acl, String userId) { short mode = (short) 0; for (GrantAndPermission gp : acl.getGrantAndPermissions()) { Permission perm = gp.getPermission(); GranteeInterface grantee = gp.getGrantee(); if (perm.equals(Permission.PERMISSION_READ)) { if (isUserIdInGrantee(grantee, userId)) { // If the bucket is readable by the user, add r and x to the owner mode. mode |= (short) 0500; } } else if (perm.equals(Permission.PERMISSION_WRITE)) { if (isUserIdInGrantee(grantee, userId)) { // If the bucket is writable by the user, +w to the owner mode. mode |= (short) 0200; } } else if (perm.equals(Permission.PERMISSION_FULL_CONTROL)) { if (isUserIdInGrantee(grantee, userId)) { // If the user has full control to the bucket, +rwx to the owner mode. mode |= (short) 0700; } } } return mode; }
@Test public void translateAuthenticatedUserReadPermission() { GroupGrantee authenticatedUsersGrantee = GroupGrantee.AUTHENTICATED_USERS; mAcl.grantPermission(authenticatedUsersGrantee, Permission.PERMISSION_READ); assertEquals((short) 0500, GCSUtils.translateBucketAcl(mAcl, ID)); assertEquals((short) 0500, GCSUtils.translateBucketAcl(mAcl, OTHER_ID)); }
public Map<TopicPartition, Long> retryEndOffsets(Set<TopicPartition> partitions, Duration timeoutDuration, long retryBackoffMs) { try { return RetryUtil.retryUntilTimeout( () -> endOffsets(partitions), () -> "list offsets for topic partitions", timeoutDuration, retryBackoffMs); } catch (UnsupportedVersionException e) { // Older brokers don't support this admin method, so rethrow it without wrapping it throw e; } catch (Exception e) { throw ConnectUtils.maybeWrap(e, "Failed to list offsets for topic partitions"); } }
@Test public void retryEndOffsetsShouldWrapNonRetriableExceptionsWithConnectException() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); Set<TopicPartition> tps = Collections.singleton(tp1); Long offset = 1000L; Cluster cluster = createCluster(1, "myTopic", 1); try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { Map<TopicPartition, Long> offsetMap = new HashMap<>(); offsetMap.put(tp1, offset); env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); // This error should be treated as non-retriable and cause TopicAdmin::retryEndOffsets to fail env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.TOPIC_AUTHORIZATION_FAILED, Errors.NONE)); // But, in case there's a bug in our logic, prepare a valid response afterward so that TopicAdmin::retryEndOffsets // will return successfully if we retry (which should in turn cause this test to fail) env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); env.kafkaClient().prepareResponse(listOffsetsResult(tp1, offset)); TopicAdmin admin = new TopicAdmin(env.adminClient()); ConnectException exception = assertThrows(ConnectException.class, () -> admin.retryEndOffsets(tps, Duration.ofMillis(100), 1) ); Throwable cause = exception.getCause(); assertNotNull(cause, "cause of failure should be preserved"); assertTrue( cause instanceof TopicAuthorizationException, "cause of failure should be accurately reported; expected topic authorization error, but was " + cause ); } }
public static GrpcDataWriter create(FileSystemContext context, WorkerNetAddress address, long id, long length, RequestType type, OutStreamOptions options) throws IOException { long chunkSize = context.getClusterConf() .getBytes(PropertyKey.USER_STREAMING_WRITER_CHUNK_SIZE_BYTES); CloseableResource<BlockWorkerClient> grpcClient = context.acquireBlockWorkerClient(address); try { return new GrpcDataWriter(context, address, id, length, chunkSize, type, options, grpcClient); } catch (Exception e) { grpcClient.close(); throw e; } }
@Test(timeout = 1000 * 60) public void writeFileManyChunks() throws Exception { long checksumActual; Future<Long> checksumExpected; long length = CHUNK_SIZE * 30000 + CHUNK_SIZE / 3; try (DataWriter writer = create(Long.MAX_VALUE)) { checksumExpected = writeFile(writer, length, 10, length / 3); checksumExpected.get(); checksumActual = verifyWriteRequests(mClient, 10, length / 3); } assertEquals(checksumExpected.get().longValue(), checksumActual); }
public long get(T item) { MutableLong count = map.get(item); return count == null ? 0 : count.value; }
@Test public void testGet_returnsZeroWhenEmpty() { long count = counter.get(new Object()); assertEquals(0, count); }
@Override public KeyValueIterator<Windowed<Bytes>, byte[]> backwardFindSessions(final Bytes key, final long earliestSessionEndTime, final long latestSessionStartTime) { return wrapped().backwardFindSessions(key, earliestSessionEndTime, latestSessionStartTime); }
@Test public void shouldDelegateToUnderlyingStoreWhenBackwardFindingSessions() { store.backwardFindSessions(bytesKey, 0, 1); verify(inner).backwardFindSessions(bytesKey, 0, 1); }
@Override public ProcResult fetchResult() throws AnalysisException { Preconditions.checkNotNull(globalStateMgr); BaseProcResult result = new BaseProcResult(); result.setNames(TITLE_NAMES); List<String> dbNames = globalStateMgr.getLocalMetastore().listDbNames(); if (dbNames == null || dbNames.isEmpty()) { // empty return result; } // get info List<List<Comparable>> dbInfos = new ArrayList<List<Comparable>>(); for (String dbName : dbNames) { Database db = globalStateMgr.getDb(dbName); if (db == null) { continue; } List<Comparable> dbInfo = new ArrayList<Comparable>(); Locker locker = new Locker(); locker.lockDatabase(db, LockType.READ); try { int tableNum = db.getTables().size(); dbInfo.add(db.getId()); dbInfo.add(dbName); dbInfo.add(tableNum); long dataQuota = db.getDataQuota(); Pair<Double, String> quotaUnitPair = DebugUtil.getByteUint(dataQuota); String readableQuota = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(quotaUnitPair.first) + " " + quotaUnitPair.second; dbInfo.add(readableQuota); dbInfo.add(TimeUtils.longToTimeString(db.getLastCheckTime())); long replicaQuota = db.getReplicaQuota(); dbInfo.add(replicaQuota); } finally { locker.unLockDatabase(db, LockType.READ); } dbInfos.add(dbInfo); } // order by dbId, asc ListComparator<List<Comparable>> comparator = new ListComparator<List<Comparable>>(0); Collections.sort(dbInfos, comparator); // set result for (List<Comparable> info : dbInfos) { List<String> row = new ArrayList<String>(info.size()); for (Comparable comparable : info) { row.add(comparable.toString()); } result.addRow(row); } return result; }
@Test public void testFetchResultNormal() throws AnalysisException { new Expectations(globalStateMgr) { { globalStateMgr.getLocalMetastore().listDbNames(); minTimes = 0; result = Lists.newArrayList("db1", "db2"); globalStateMgr.getDb("db1"); minTimes = 0; result = db1; globalStateMgr.getDb("db2"); minTimes = 0; result = db2; globalStateMgr.getDb("db3"); minTimes = 0; result = null; globalStateMgr.getDb(db1.getId()); minTimes = 0; result = db1; globalStateMgr.getDb(db2.getId()); minTimes = 0; result = db2; globalStateMgr.getDb(anyLong); minTimes = 0; result = null; } }; DbsProcDir dir; ProcResult result; dir = new DbsProcDir(globalStateMgr); result = dir.fetchResult(); Assert.assertNotNull(result); Assert.assertTrue(result instanceof BaseProcResult); Assert.assertEquals( Lists.newArrayList("DbId", "DbName", "TableNum", "Quota", "LastConsistencyCheckTime", "ReplicaQuota"), result.getColumnNames()); List<List<String>> rows = Lists.newArrayList(); rows.add(Arrays.asList(String.valueOf(db1.getId()), db1.getOriginName(), "0", "8388608.000 TB", FeConstants.NULL_STRING, "9223372036854775807")); rows.add(Arrays.asList(String.valueOf(db2.getId()), db2.getOriginName(), "0", "8388608.000 TB", FeConstants.NULL_STRING, "9223372036854775807")); Assert.assertEquals(rows, result.getRows()); }
public static List<AclEntry> filterAclEntriesByAclSpec( List<AclEntry> existingAcl, List<AclEntry> inAclSpec) throws AclException { ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec); ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); EnumMap<AclEntryScope, AclEntry> providedMask = Maps.newEnumMap(AclEntryScope.class); EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class); EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class); for (AclEntry existingEntry: existingAcl) { if (aclSpec.containsKey(existingEntry)) { scopeDirty.add(existingEntry.getScope()); if (existingEntry.getType() == MASK) { maskDirty.add(existingEntry.getScope()); } } else { if (existingEntry.getType() == MASK) { providedMask.put(existingEntry.getScope(), existingEntry); } else { aclBuilder.add(existingEntry); } } } copyDefaultsIfNeeded(aclBuilder); calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty); return buildAndValidateAcl(aclBuilder); }
@Test public void testFilterAclEntriesByAclSpecUnchanged() throws AclException { List<AclEntry> existing = new ImmutableList.Builder<AclEntry>() .add(aclEntry(ACCESS, USER, ALL)) .add(aclEntry(ACCESS, USER, "bruce", ALL)) .add(aclEntry(ACCESS, GROUP, READ_EXECUTE)) .add(aclEntry(ACCESS, GROUP, "sales", ALL)) .add(aclEntry(ACCESS, MASK, ALL)) .add(aclEntry(ACCESS, OTHER, NONE)) .build(); List<AclEntry> aclSpec = Lists.newArrayList( aclEntry(ACCESS, USER, "clark"), aclEntry(ACCESS, GROUP, "execs")); assertEquals(existing, filterAclEntriesByAclSpec(existing, aclSpec)); }
public static boolean isSystem(String topic, String group) { return TopicValidator.isSystemTopic(topic) || isSystemGroup(group); }
@Test public void testIsSystem_NonSystemTopicAndGroup_ReturnsFalse() { String topic = "FooTopic"; String group = "FooGroup"; boolean result = BrokerMetricsManager.isSystem(topic, group); assertThat(result).isFalse(); }
@Override @Deprecated public <VR> KStream<K, VR> flatTransformValues(final org.apache.kafka.streams.kstream.ValueTransformerSupplier<? super V, Iterable<VR>> valueTransformerSupplier, final String... stateStoreNames) { Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); return doFlatTransformValues( toValueTransformerWithKeySupplier(valueTransformerSupplier), NamedInternal.empty(), stateStoreNames); }
@Test @SuppressWarnings("deprecation") public void shouldNotAllowNullValueTransformerSupplierOnFlatTransformValues() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.flatTransformValues((org.apache.kafka.streams.kstream.ValueTransformerSupplier<Object, Iterable<Object>>) null)); assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); }
@Override public Object getCalendarValue(final int columnIndex, final Class<?> type, final Calendar calendar) throws SQLException { return mergedResult.getCalendarValue(columnIndex, type, calendar); }
@Test void assertGetCalendarValue() throws SQLException { Calendar calendar = Calendar.getInstance(); when(mergedResult.getCalendarValue(1, Date.class, calendar)).thenReturn(new Date(0L)); assertThat(new EncryptMergedResult(database, encryptRule, selectStatementContext, mergedResult).getCalendarValue(1, Date.class, calendar), is(new Date(0L))); }
@Override public void login(String loginId) { }
@Test public void login() { mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() { @Override public boolean onTrackEvent(String eventName, JSONObject eventProperties) { Assert.fail(); return false; } }); mSensorsAPI.login("abcde"); Assert.assertNull(mSensorsAPI.getLoginId()); }
int getMessageCountAndThenIncrement(String msg) { // don't insert null elements if (msg == null) { return 0; } Integer i; // LinkedHashMap is not LinkedHashMap. See also LBCLASSIC-255 synchronized (this) { i = super.get(msg); if (i == null) { i = 0; } else { i = i + 1; } super.put(msg, i); } return i; }
@Test public void testEldestEntriesRemoval() { final LRUMessageCache cache = new LRUMessageCache(2); assertEquals(0, cache.getMessageCountAndThenIncrement("0")); assertEquals(1, cache.getMessageCountAndThenIncrement("0")); assertEquals(0, cache.getMessageCountAndThenIncrement("1")); assertEquals(1, cache.getMessageCountAndThenIncrement("1")); // 0 entry should have been removed. assertEquals(0, cache.getMessageCountAndThenIncrement("2")); // So it is expected a returned value of 0 instead of 2. // 1 entry should have been removed. assertEquals(0, cache.getMessageCountAndThenIncrement("0")); // So it is expected a returned value of 0 instead of 2. // 2 entry should have been removed. assertEquals(0, cache.getMessageCountAndThenIncrement("1")); // So it is expected a returned value of 0 instead of 2. assertEquals(0, cache.getMessageCountAndThenIncrement("2")); }
@Override public ScalarOperator visitCloneOperator(CloneOperator operator, Void context) { return shuttleIfUpdate(operator); }
@Test void testCloneOperator() { BinaryPredicateOperator binary1 = new BinaryPredicateOperator(BinaryType.EQ, new ColumnRefOperator(1, INT, "id", true), ConstantOperator.createInt(1)); CloneOperator clone = new CloneOperator(binary1); { ScalarOperator newOperator = shuttle.visitCloneOperator(clone, null); assertEquals(clone, newOperator); } { ScalarOperator newOperator = shuttle2.visitCloneOperator(clone, null); assertEquals(clone, newOperator); } }
public GsonAzureProjectList getProjects(String serverUrl, String token) { String url = String.format("%s/_apis/projects?%s", getTrimmedUrl(serverUrl), API_VERSION_3); return doGet(token, url, r -> buildGson().fromJson(r.body().charStream(), GsonAzureProjectList.class)); }
@Test public void get_projects_with_invalid_pat() { enqueueResponse(401); assertThatThrownBy(() -> underTest.getProjects(server.url("").toString(), "invalid-token")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Invalid personal access token"); assertThat(logTester.logs(Level.ERROR)).hasSize(1); assertThat(logTester.logs(Level.ERROR).iterator().next()) .contains("Unable to contact Azure DevOps server for request [" + server.url("") + "_apis/projects?api-version=3.0]: Invalid personal access token"); }
public void write( ByteBuffer record, TieredStorageSubpartitionId subpartitionId, Buffer.DataType dataType, boolean isBroadcast) throws IOException { if (isBroadcast && !isBroadcastOnly) { int currentPosition = record.position(); for (int i = 0; i < numSubpartitions; ++i) { // As the tiered storage subpartition ID is created only for broadcast records, // which are fewer than normal records, the performance impact of generating new // TieredStorageSubpartitionId objects is expected to be manageable. If the // performance is significantly affected, this logic will be optimized accordingly. bufferAccumulator.receive( record, new TieredStorageSubpartitionId(i), dataType, isBroadcast); record.position(currentPosition); } } else { bufferAccumulator.receive(record, subpartitionId, dataType, isBroadcast); } }
@TestTemplate void testTierCanNotStartNewSegment() { int numSubpartitions = 10; int bufferSize = 1024; Random random = new Random(); TestingTierProducerAgent tierProducerAgent = new TestingTierProducerAgent.Builder() .setTryStartSegmentSupplier(((subpartitionId, integer) -> false)) .build(); TieredStorageProducerClient tieredStorageProducerClient = createTieredStorageProducerClient( numSubpartitions, Collections.singletonList(tierProducerAgent)); assertThatThrownBy( () -> tieredStorageProducerClient.write( generateRandomData(bufferSize, random), new TieredStorageSubpartitionId(0), Buffer.DataType.DATA_BUFFER, isBroadcast)) .isInstanceOf(RuntimeException.class) .hasMessageContaining("Failed to choose a storage tier"); }
@Override public void updateTag(MemberTagUpdateReqVO updateReqVO) { // 校验存在 validateTagExists(updateReqVO.getId()); // 校验名称唯一 validateTagNameUnique(updateReqVO.getId(), updateReqVO.getName()); // 更新 MemberTagDO updateObj = MemberTagConvert.INSTANCE.convert(updateReqVO); memberTagMapper.updateById(updateObj); }
@Test public void testUpdateTag_notExists() { // 准备参数 MemberTagUpdateReqVO reqVO = randomPojo(MemberTagUpdateReqVO.class); // 调用, 并断言异常 assertServiceException(() -> tagService.updateTag(reqVO), TAG_NOT_EXISTS); }
@Override protected CompletableFuture<EmptyResponseBody> handleRequest( @Nonnull final HandlerRequest<EmptyRequestBody> request, @Nonnull final RestfulGateway gateway) throws RestHandlerException { final String jarId = request.getPathParameter(JarIdPathParameter.class); return CompletableFuture.supplyAsync( () -> { final Path jarToDelete = jarDir.resolve(jarId); if (!Files.exists(jarToDelete)) { throw new CompletionException( new RestHandlerException( String.format( "File %s does not exist in %s.", jarId, jarDir), HttpResponseStatus.BAD_REQUEST)); } else { try { Files.delete(jarToDelete); return EmptyResponseBody.getInstance(); } catch (final IOException e) { throw new CompletionException( new RestHandlerException( String.format("Failed to delete jar %s.", jarToDelete), HttpResponseStatus.INTERNAL_SERVER_ERROR, e)); } } }, executor); }
@Test void testDeleteUnknownJar() throws Exception { final HandlerRequest<EmptyRequestBody> request = createRequest("doesnotexist.jar"); assertThatThrownBy(() -> jarDeleteHandler.handleRequest(request, restfulGateway).get()) .satisfies( e -> { final Throwable throwable = ExceptionUtils.stripCompletionException(e.getCause()); assertThat(throwable).isInstanceOf(RestHandlerException.class); final RestHandlerException restHandlerException = (RestHandlerException) throwable; assertThat(restHandlerException.getMessage()) .contains("File doesnotexist.jar does not exist in"); assertThat(restHandlerException.getHttpResponseStatus()) .isEqualTo(HttpResponseStatus.BAD_REQUEST); }); }
public void startsWith(@Nullable String string) { checkNotNull(string); if (actual == null) { failWithActual("expected a string that starts with", string); } else if (!actual.startsWith(string)) { failWithActual("expected to start with", string); } }
@Test public void stringStartsWith() { assertThat("abc").startsWith("ab"); }
MethodSpec buildFunction(AbiDefinition functionDefinition) throws ClassNotFoundException { return buildFunction(functionDefinition, true); }
@Test public void testBuildFunctionConstantMultipleValueReturn() throws Exception { AbiDefinition functionDefinition = new AbiDefinition( true, Arrays.asList( new NamedType("param1", "uint8"), new NamedType("param2", "uint32")), "functionName", Arrays.asList( new NamedType("result1", "int8"), new NamedType("result2", "int32")), "type", false); MethodSpec methodSpec = solidityFunctionWrapper.buildFunction(functionDefinition); String expected = "public org.web3j.protocol.core.RemoteFunctionCall<org.web3j.tuples.generated.Tuple2<java.math.BigInteger, java.math.BigInteger>> functionName(\n" + " java.math.BigInteger param1, java.math.BigInteger param2) {\n" + " final org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(FUNC_FUNCTIONNAME, \n" + " java.util.Arrays.<org.web3j.abi.datatypes.Type>asList(new org.web3j.abi.datatypes.generated.Uint8(param1), \n" + " new org.web3j.abi.datatypes.generated.Uint32(param2)), \n" + " java.util.Arrays.<org.web3j.abi.TypeReference<?>>asList(new org.web3j.abi.TypeReference<org.web3j.abi.datatypes.generated.Int8>() {}, new org.web3j.abi.TypeReference<org.web3j.abi.datatypes.generated.Int32>() {}));\n" + " return new org.web3j.protocol.core.RemoteFunctionCall<org.web3j.tuples.generated.Tuple2<java.math.BigInteger, java.math.BigInteger>>(function,\n" + " new java.util.concurrent.Callable<org.web3j.tuples.generated.Tuple2<java.math.BigInteger, java.math.BigInteger>>() {\n" + " @java.lang.Override\n" + " public org.web3j.tuples.generated.Tuple2<java.math.BigInteger, java.math.BigInteger> call()\n" + " throws java.lang.Exception {\n" + " java.util.List<org.web3j.abi.datatypes.Type> results = executeCallMultipleValueReturn(function);\n" + " return new org.web3j.tuples.generated.Tuple2<java.math.BigInteger, java.math.BigInteger>(\n" + " (java.math.BigInteger) results.get(0).getValue(), \n" + " (java.math.BigInteger) results.get(1).getValue());\n" + " }\n" + " });\n" + "}\n"; assertEquals((expected), methodSpec.toString()); }
public static boolean isNullOrEmpty(final CharSequence cs) { return (cs == null) || (cs.length() == 0); }
@Test public void testIsNullOrEmpty() throws IOException { assertTrue(StringUtils.isNullOrEmpty(null)); assertTrue(StringUtils.isNullOrEmpty("")); assertFalse(StringUtils.isNullOrEmpty(" ")); assertFalse(StringUtils.isNullOrEmpty("abc")); assertFalse(StringUtils.isNullOrEmpty(" a")); }
@Override public void doLimit(String sql) throws SQLException { if (!enabledLimit) { return; } String trimmedSql = sql.trim(); if (StringUtils.isEmpty(trimmedSql)) { return; } int firstTokenIndex = trimmedSql.indexOf(" "); if (-1 == firstTokenIndex) { throwException(trimmedSql); } String firstToken = trimmedSql.substring(0, firstTokenIndex).toUpperCase(); if (allowedDmlSqls.contains(firstToken)) { return; } if (!allowedDdlSqls.contains(firstToken)) { throwException(trimmedSql); } checkSqlForSecondToken(firstTokenIndex, trimmedSql); }
@Test void testDoLimitForDisabledLimit() throws SQLException { MockEnvironment environment = new MockEnvironment(); environment.setProperty("nacos.persistence.sql.derby.limit.enabled", "false"); EnvUtil.setEnvironment(environment); sqlLimiter = new SqlTypeLimiter(); sqlLimiter.doLimit("CALL SALES.TOTAL_REVENUES();"); }
public <T extends MongoEntity> MongoCollection<T> collection(String collectionName, Class<T> valueType) { return getCollection(collectionName, valueType); }
@Test void testTimestampToJodaDateTimeConversion() { final MongoCollection<TimestampTest> collection = collections.collection("timestamp-test", TimestampTest.class); final DateTime now = DateTime.now(DateTimeZone.UTC); final ObjectId objectId = new ObjectId(); final Map<String, Object> fields = Map.of( "$set", Map.of("_id", objectId), "$currentDate", Map.of("timestamp", Map.of("$type", "timestamp")) ); collection.updateOne(Filters.eq("_id", objectId), new BasicDBObject(fields), new UpdateOptions().upsert(true)); final TimestampTest timestampTest = collection.find(Filters.eq("_id", objectId)).first(); assertThat(timestampTest).isNotNull().satisfies(tt -> assertThat(tt.timestamp().getMillis()).isGreaterThanOrEqualTo( now.withMillisOfSecond(0).getMillis()) ); }
public static INodeFile valueOf(INode inode, String path ) throws FileNotFoundException { return valueOf(inode, path, false); }
@Test public void testValueOf () throws IOException { final String path = "/testValueOf"; final short replication = 3; {//cast from null final INode from = null; //cast to INodeFile, should fail try { INodeFile.valueOf(from, path); fail(); } catch(FileNotFoundException fnfe) { assertTrue(fnfe.getMessage().contains("File does not exist")); } //cast to INodeDirectory, should fail try { INodeDirectory.valueOf(from, path); fail(); } catch(FileNotFoundException e) { assertTrue(e.getMessage().contains("Directory does not exist")); } } {//cast from INodeFile final INode from = createINodeFile(replication, preferredBlockSize); //cast to INodeFile, should success final INodeFile f = INodeFile.valueOf(from, path); assertTrue(f == from); //cast to INodeDirectory, should fail try { INodeDirectory.valueOf(from, path); fail(); } catch(PathIsNotDirectoryException e) { // Expected } } {//cast from INodeFileUnderConstruction final INode from = new INodeFile( HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, 1024L); from.asFile().toUnderConstruction("client", "machine"); //cast to INodeFile, should success final INodeFile f = INodeFile.valueOf(from, path); assertTrue(f == from); //cast to INodeDirectory, should fail try { INodeDirectory.valueOf(from, path); fail(); } catch(PathIsNotDirectoryException expected) { // expected } } {//cast from INodeDirectory final INode from = new INodeDirectory(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L); //cast to INodeFile, should fail try { INodeFile.valueOf(from, path); fail(); } catch(FileNotFoundException fnfe) { assertTrue(fnfe.getMessage().contains("Path is not a file")); } //cast to INodeDirectory, should success final INodeDirectory d = INodeDirectory.valueOf(from, path); assertTrue(d == from); } }
static <K, V> ReadFromKafkaDoFn<K, V> create( ReadSourceDescriptors<K, V> transform, TupleTag<KV<KafkaSourceDescriptor, KafkaRecord<K, V>>> recordTag) { if (transform.isBounded()) { return new Bounded<>(transform, recordTag); } else { return new Unbounded<>(transform, recordTag); } }
@Test public void testConstructorWithPollTimeout() { ReadSourceDescriptors<String, String> descriptors = makeReadSourceDescriptor(consumer); // default poll timeout = 1 scond ReadFromKafkaDoFn<String, String> dofnInstance = ReadFromKafkaDoFn.create(descriptors, RECORDS); Assert.assertEquals(2L, dofnInstance.consumerPollingTimeout); // updated timeout = 5 seconds descriptors = descriptors.withConsumerPollingTimeout(5L); ReadFromKafkaDoFn<String, String> dofnInstanceNew = ReadFromKafkaDoFn.create(descriptors, RECORDS); Assert.assertEquals(5L, dofnInstanceNew.consumerPollingTimeout); }
public static String hump2Line(String str) { Matcher matcher = CAMEL_PATTERN.matcher(str); StringBuffer sb = new StringBuffer(); if (matcher.find()) { matcher.appendReplacement(sb, "-" + matcher.group(0).toLowerCase()); while (matcher.find()) { matcher.appendReplacement(sb, "-" + matcher.group(0).toLowerCase()); } } else { matcher = LINE_PATTERN.matcher(str); while (matcher.find()) { matcher.appendReplacement(sb, matcher.group(1).toUpperCase()); } } matcher.appendTail(sb); return sb.toString(); }
@Test public void testHump2Line(){ assertThat(StringUtils.hump2Line("abc-d").equals("abcD")).isTrue(); assertThat(StringUtils.hump2Line("aBc").equals("a-bc")).isTrue(); assertThat(StringUtils.hump2Line("abc").equals("abc")).isTrue(); }
@Override public boolean setIfExists(V value) { return get(setIfExistsAsync(value)); }
@Test public void testSetIfExists() { RJsonBucket<TestType> al = redisson.getJsonBucket("test", new JacksonCodec<>(TestType.class)); TestType t = new TestType(); t.setName("name1"); al.set(t); NestedType nt2 = new NestedType(); nt2.setValue(124); nt2.setValues(Arrays.asList("t4", "t3")); assertThat(al.setIfExists("$.type", nt2)).isFalse(); NestedType nt = new NestedType(); nt.setValue(123); nt.setValues(Arrays.asList("t1", "t2")); al.set("$.type", nt); Integer n2 = al.get(new JacksonCodec<>(Integer.class), "type.value"); assertThat(n2).isEqualTo(123); assertThat(al.setIfExists("$.type", nt2)).isTrue(); Integer n3 = al.get(new JacksonCodec<>(Integer.class), "type.value"); assertThat(n3).isEqualTo(124); }
@Override public void isEqualTo(@Nullable Object expected) { super.isEqualTo(expected); }
@Test public void isEqualTo_WithoutToleranceParameter_Fail_DifferentOrder() { expectFailureWhenTestingThat(array(2.2d, 3.3d)).isEqualTo(array(3.3d, 2.2d)); }
@Override protected CompletableFuture<JobStatusInfo> handleRequest( @Nonnull HandlerRequest<EmptyRequestBody> request, @Nonnull RestfulGateway gateway) throws RestHandlerException { JobID jobId = request.getPathParameter(JobIDPathParameter.class); return gateway.requestJobStatus(jobId, timeout).thenApply(JobStatusInfo::new); }
@Test void testRequestJobStatus() throws Exception { final JobStatusHandler jobStatusHandler = new JobStatusHandler( CompletableFuture::new, TestingUtils.TIMEOUT, Collections.emptyMap(), JobStatusInfoHeaders.getInstance()); final HandlerRequest<EmptyRequestBody> request = createRequest(new JobID()); final CompletableFuture<JobStatusInfo> response = jobStatusHandler.handleRequest( request, new TestingRestfulGateway.Builder() .setRequestJobStatusFunction( ignored -> CompletableFuture.completedFuture( JobStatus.INITIALIZING)) .build()); assertThat(JobStatus.INITIALIZING).isEqualTo(response.get().getJobStatus()); }