focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public Optional<BoolQueryBuilder> getPostFilters() { return toBoolQuery(postFilters, (e, v) -> true); }
@Test public void getPostFilters_returns_empty_when_no_declared_topAggregation() { AllFilters allFilters = randomNonEmptyAllFilters(); RequestFiltersComputer underTest = new RequestFiltersComputer(allFilters, Collections.emptySet()); assertThat(underTest.getPostFilters()).isEmpty(); }
public static Optional<String> getSystemProperty(String propertyName) { return Optional.ofNullable(getSystemProperty(propertyName, null)); }
@Test public void getSystemProperty_whenPropertyNotExistsWithDefaultValue_returnsDefaultValue() { assertThat(TsunamiConfig.getSystemProperty(TEST_PROPERTY, "Default")).isEqualTo("Default"); }
@Override protected void processOptions(LinkedList<String> args) { CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, OPTION_QUOTA, OPTION_HUMAN, OPTION_HEADER, OPTION_QUOTA_AND_USAGE, OPTION_EXCLUDE_SNAPSHOT, OPTION_ECPOLICY, OPTION_SNAPSHOT_COUNT); cf.addOptionWithValue(OPTION_TYPE); cf.parse(args); if (args.isEmpty()) { // default path is the current working directory args.add("."); } showQuotas = cf.getOpt(OPTION_QUOTA); humanReadable = cf.getOpt(OPTION_HUMAN); showQuotasAndUsageOnly = cf.getOpt(OPTION_QUOTA_AND_USAGE); excludeSnapshots = cf.getOpt(OPTION_EXCLUDE_SNAPSHOT); displayECPolicy = cf.getOpt(OPTION_ECPOLICY); showSnapshot = cf.getOpt(OPTION_SNAPSHOT_COUNT); if (showQuotas || showQuotasAndUsageOnly) { String types = cf.getOptValue(OPTION_TYPE); if (null != types) { showQuotabyType = true; storageTypes = getAndCheckStorageTypes(types); } else { showQuotabyType = false; } if (excludeSnapshots) { out.println(OPTION_QUOTA + " or " + OPTION_QUOTA_AND_USAGE + " option " + "is given, the -x option is ignored."); excludeSnapshots = false; } } if (cf.getOpt(OPTION_HEADER)) { StringBuilder headString = new StringBuilder(); if (showQuotabyType) { headString.append(QuotaUsage.getStorageTypeHeader(storageTypes)); } else { if (showQuotasAndUsageOnly) { headString.append(QuotaUsage.getHeader()); } else { headString.append(ContentSummary.getHeader(showQuotas)); } } if (displayECPolicy) { headString.append(ContentSummary.getErasureCodingPolicyHeader()); } if (showSnapshot) { headString.append(ContentSummary.getSnapshotHeader()); } headString.append("PATHNAME"); out.println(headString.toString()); } }
@Test public void processOptionsHeaderWithQuotas() { LinkedList<String> options = new LinkedList<String>(); options.add("-q"); options.add("-v"); options.add("dummy"); PrintStream out = mock(PrintStream.class); Count count = new Count(); count.out = out; count.processOptions(options); String withQuotasHeader = // <----12----> <-----15------> <-----15------> <-----15------> " QUOTA REM_QUOTA SPACE_QUOTA REM_SPACE_QUOTA " + // <----12----> <----12----> <-------18-------> " DIR_COUNT FILE_COUNT CONTENT_SIZE PATHNAME"; verify(out).println(withQuotasHeader); verifyNoMoreInteractions(out); }
public Order getOrderById(Long orderId) throws RestClientException { return getOrderByIdWithHttpInfo(orderId).getBody(); }
@Test public void getOrderByIdTest() { Long orderId = null; Order response = api.getOrderById(orderId); // TODO: test validations }
public static <T, PredicateT extends ProcessFunction<T, Boolean>> Filter<T> by( PredicateT predicate) { return new Filter<>(predicate); }
@Test @Category(NeedsRunner.class) public void testFilterByPredicate() { PCollection<Integer> output = p.apply(Create.of(1, 2, 3, 4, 5, 6, 7)).apply(Filter.by(new EvenFn())); PAssert.that(output).containsInAnyOrder(2, 4, 6); p.run(); }
@Override public Collection<V> range(long startTimestamp, long endTimestamp, int limit) { return get(rangeAsync(startTimestamp, endTimestamp, limit)); }
@Test public void testRange() throws InterruptedException { RTimeSeries<String, Object> t = redisson.getTimeSeries("test"); t.add(1, "10"); t.add(2, "10"); t.add(3, "30"); t.add(4, "40"); assertThat(t.range(1, 4, 2)).containsExactly("10", "10"); assertThat(t.range(1, 4, 0)).containsExactly("10", "10", "30", "40"); RTimeSeries<String, Object> t2 = redisson.getTimeSeries("test2"); t2.add(1, "10"); t2.add(2, "10", 1, TimeUnit.SECONDS); t2.add(3, "30"); t2.add(4, "40"); Thread.sleep(1200); assertThat(t2.range(1, 4, 2)).containsExactly("10", "30"); }
@Override public Reiterator<Object> get(int tag) { return new SubIterator(tag); }
@Test public void testEmpties() { TaggedReiteratorList iter = create( new String[] {}, new String[] {"a", "b", "c"}, new String[] {}, new String[] {}, new String[] {"d"}); assertEquals(iter.get(2) /*empty*/); assertEquals(iter.get(1), "a", "b", "c"); assertEquals(iter.get(2) /*empty*/); assertEquals(iter.get(0) /*empty*/); assertEquals(iter.get(2) /*empty*/); assertEquals(iter.get(4), "d"); assertEquals(iter.get(3) /*empty*/); }
public static DataMap dataSchemaToDataMap(NamedDataSchema schema) { String inputSchemaAsString = schema.toString(); try { JacksonDataCodec codec = new JacksonDataCodec(); DataMap schemaAsDataMap = codec.stringToMap(inputSchemaAsString); return schemaAsDataMap; } catch (IOException e) { // This should never occur. // UTF-8 encoding should always be valid for getBytes // codec.readMap from JSON generated from a schema should always be successful. throw new IllegalStateException(UNEXPECTED_IOEXCEPTION + inputSchemaAsString, e); } }
@Test public void testConvertDataSchemaToDataMap() throws IOException { for (String good : goodInputs) { NamedDataSchema dataSchema = (NamedDataSchema) TestUtil.dataSchemaFromString(good); DataMap mapFromSchema = Conversions.dataSchemaToDataMap(dataSchema); DataMap mapFromString = TestUtil.dataMapFromString(good); assertEquals(mapFromSchema, mapFromString); } }
public static Labels fromResource(HasMetadata resource) { Map<String, String> additionalLabels = resource.getMetadata().getLabels(); if (additionalLabels != null) { additionalLabels = additionalLabels .entrySet() .stream() .filter(entryset -> !STRIMZI_LABELS_EXCLUSION_PATTERN.matcher(entryset.getKey()).matches()) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); } return additionalLabels(additionalLabels); }
@Test public void testFromResourceWithoutLabels() { Kafka kafka = new KafkaBuilder() .withNewMetadata() .withName("my-kafka") .endMetadata() .withNewSpec() .withNewZookeeper() .withReplicas(3) .withNewEphemeralStorage() .endEphemeralStorage() .endZookeeper() .withNewKafka() .withReplicas(3) .withNewEphemeralStorage() .endEphemeralStorage() .endKafka() .endSpec() .build(); Labels l = Labels.fromResource(kafka); assertThat(l, is(Labels.EMPTY)); }
public void reverseReplace(TestElement el) throws InvalidVariableException { Collection<JMeterProperty> newProps = replaceValues(el.propertyIterator(), new ReplaceFunctionsWithStrings(masterFunction, variables)); setProperties(el, newProps); }
@Test public void testOverlappingMatches() throws Exception { TestPlan plan = new TestPlan(); plan.addParameter("longMatch", "servername"); plan.addParameter("shortMatch", ".+"); ValueReplacer replacer = new ValueReplacer(plan); TestElement element = new TestPlan(); element.setProperty(new StringProperty("domain", "servername.domain")); replacer.reverseReplace(element, true); String replacedDomain = element.getPropertyAsString("domain"); assertEquals("${${shortMatch}", replacedDomain); }
public static RestartBackoffTimeStrategy.Factory createRestartBackoffTimeStrategyFactory( final RestartStrategies.RestartStrategyConfiguration jobRestartStrategyConfiguration, final Configuration jobConfiguration, final Configuration clusterConfiguration, final boolean isCheckpointingEnabled) { checkNotNull(jobRestartStrategyConfiguration); checkNotNull(jobConfiguration); checkNotNull(clusterConfiguration); return getJobRestartStrategyFactory(jobRestartStrategyConfiguration) .orElse( getRestartStrategyFactoryFromConfig(jobConfiguration) .orElse( (getRestartStrategyFactoryFromConfig(clusterConfiguration) .orElse( getDefaultRestartStrategyFactory( isCheckpointingEnabled))))); }
@Test void testExponentialDelayStrategySpecifiedInClusterConfig() { final Configuration conf = new Configuration(); conf.set(RestartStrategyOptions.RESTART_STRATEGY, EXPONENTIAL_DELAY.getMainValue()); final RestartBackoffTimeStrategy.Factory factory = RestartBackoffTimeStrategyFactoryLoader.createRestartBackoffTimeStrategyFactory( DEFAULT_JOB_LEVEL_RESTART_CONFIGURATION, new Configuration(), conf, false); assertThat(factory) .isInstanceOf( ExponentialDelayRestartBackoffTimeStrategy .ExponentialDelayRestartBackoffTimeStrategyFactory.class); }
@Override public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { LOG.debug("Merging statistics: [aggregateColStats:{}, newColStats: {}]", aggregateColStats, newColStats); DecimalColumnStatsDataInspector aggregateData = decimalInspectorFromStats(aggregateColStats); DecimalColumnStatsDataInspector newData = decimalInspectorFromStats(newColStats); Decimal lowValue = mergeLowValue(getLowValue(aggregateData), getLowValue(newData)); if (lowValue != null) { aggregateData.setLowValue(lowValue); } Decimal highValue = mergeHighValue(getHighValue(aggregateData), getHighValue(newData)); if (highValue != null) { aggregateData.setHighValue(highValue); } aggregateData.setNumNulls(mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls())); NumDistinctValueEstimator oldNDVEst = aggregateData.getNdvEstimator(); NumDistinctValueEstimator newNDVEst = newData.getNdvEstimator(); List<NumDistinctValueEstimator> ndvEstimatorsList = Arrays.asList(oldNDVEst, newNDVEst); aggregateData.setNumDVs(mergeNumDistinctValueEstimator(aggregateColStats.getColName(), ndvEstimatorsList, aggregateData.getNumDVs(), newData.getNumDVs())); aggregateData.setNdvEstimator(ndvEstimatorsList.get(0)); KllHistogramEstimator oldKllEst = aggregateData.getHistogramEstimator(); KllHistogramEstimator newKllEst = newData.getHistogramEstimator(); aggregateData.setHistogramEstimator(mergeHistogramEstimator(aggregateColStats.getColName(), oldKllEst, newKllEst)); aggregateColStats.getStatsData().setDecimalStats(aggregateData); }
@Test public void testMergeNonNullWithNullValues() { ColumnStatisticsObj aggrObj = createColumnStatisticsObj(new ColStatsBuilder<>(Decimal.class) .low(DECIMAL_1) .high(DECIMAL_3) .numNulls(4) .numDVs(2) .hll(1, 3, 3) .kll(1, 3, 3) .build()); ColumnStatisticsObj newObj = createColumnStatisticsObj(new ColStatsBuilder<>(Decimal.class) .low(null) .high(null) .numNulls(2) .numDVs(0) .build()); merger.merge(aggrObj, newObj); ColumnStatisticsData expectedColumnStatisticsData = new ColStatsBuilder<>(Decimal.class) .low(DECIMAL_1) .high(DECIMAL_3) .numNulls(6) .numDVs(2) .hll(1, 3, 3) .kll(1, 3, 3) .build(); assertEquals(expectedColumnStatisticsData, aggrObj.getStatsData()); }
@Override public boolean shouldFire(TriggerStateMachine.TriggerContext context) throws Exception { return getRepeated(context).invokeShouldFire(context); }
@Test public void testShouldFire() throws Exception { setUp(FixedWindows.of(Duration.millis(10))); when(mockTrigger.shouldFire(anyTriggerContext())).thenReturn(true); assertTrue(tester.shouldFire(new IntervalWindow(new Instant(0), new Instant(10)))); when(mockTrigger.shouldFire(Mockito.any())).thenReturn(false); assertFalse(tester.shouldFire(new IntervalWindow(new Instant(0), new Instant(10)))); }
public static long hash64(byte[] data) { int len = data.length; if (len <= 32) { if (len <= 16) { return hashLen0to16(data); } else { return hashLen17to32(data); } } else if (len <= 64) { return hashLen33to64(data); } // For strings over 64 bytes we hash the end first, and then as we // loop we keep 56 bytes of state: v, w, x, y, and z. long x = fetch64(data, len - 40); long y = fetch64(data, len - 16) + fetch64(data, len - 56); long z = hashLen16(fetch64(data, len - 48) + len, fetch64(data, len - 24)); Number128 v = weakHashLen32WithSeeds(data, len - 64, len, z); Number128 w = weakHashLen32WithSeeds(data, len - 32, y + k1, x); x = x * k1 + fetch64(data, 0); // Decrease len to the nearest multiple of 64, and operate on 64-byte chunks. len = (len - 1) & ~63; int pos = 0; do { x = rotate64(x + y + v.getLowValue() + fetch64(data, pos + 8), 37) * k1; y = rotate64(y + v.getHighValue() + fetch64(data, pos + 48), 42) * k1; x ^= w.getHighValue(); y += v.getLowValue() + fetch64(data, pos + 40); z = rotate64(z + w.getLowValue(), 33) * k1; v = weakHashLen32WithSeeds(data, pos, v.getHighValue() * k1, x + w.getLowValue()); w = weakHashLen32WithSeeds(data, pos + 32, z + w.getHighValue(), y + fetch64(data, pos + 16)); // swap z,x value long swapValue = x; x = z; z = swapValue; pos += 64; len -= 64; } while (len != 0); return hashLen16(hashLen16(v.getLowValue(), w.getLowValue()) + shiftMix(y) * k1 + z, hashLen16(v.getHighValue(), w.getHighValue()) + x); }
@Test public void hash64Test() { long hv = CityHash.hash64(StrUtil.utf8Bytes("你")); assertEquals(-4296898700418225525L, hv); hv = CityHash.hash64(StrUtil.utf8Bytes("你好")); assertEquals(-4294276205456761303L, hv); hv = CityHash.hash64(StrUtil.utf8Bytes("见到你很高兴")); assertEquals(272351505337503793L, hv); hv = CityHash.hash64(StrUtil.utf8Bytes("我们将通过生成一个大的文件的方式来检验各种方法的执行效率因为这种方式在结束的时候需要执行文件")); assertEquals(-8234735310919228703L, hv); }
public SslPrincipalMapper(String sslPrincipalMappingRules) { this.rules = parseRules(splitRules(sslPrincipalMappingRules)); }
@Test public void testSslPrincipalMapper() throws Exception { String rules = String.join(", ", "RULE:^CN=(.*?),OU=ServiceUsers.*$/$1/L", "RULE:^CN=(.*?),OU=(.*?),O=(.*?),L=(.*?),ST=(.*?),C=(.*?)$/$1@$2/L", "RULE:^cn=(.*?),ou=(.*?),dc=(.*?),dc=(.*?)$/$1@$2/U", "RULE:^.*[Cc][Nn]=([a-zA-Z0-9.]*).*$/$1/U", "DEFAULT" ); SslPrincipalMapper mapper = SslPrincipalMapper.fromRules(rules); assertEquals("duke", mapper.getName("CN=Duke,OU=ServiceUsers,O=Org,C=US")); assertEquals("duke@sme", mapper.getName("CN=Duke,OU=SME,O=mycp,L=Fulton,ST=MD,C=US")); assertEquals("DUKE@SME", mapper.getName("cn=duke,ou=sme,dc=mycp,dc=com")); assertEquals("DUKE", mapper.getName("cN=duke,OU=JavaSoft,O=Sun Microsystems")); assertEquals("OU=JavaSoft,O=Sun Microsystems,C=US", mapper.getName("OU=JavaSoft,O=Sun Microsystems,C=US")); }
@Override public WebhookDelivery call(Webhook webhook, WebhookPayload payload) { WebhookDelivery.Builder builder = new WebhookDelivery.Builder(); long startedAt = system.now(); builder .setAt(startedAt) .setPayload(payload) .setWebhook(webhook); try { HttpUrl url = HttpUrl.parse(webhook.getUrl()); if (url == null) { throw new IllegalArgumentException("Webhook URL is not valid: " + webhook.getUrl()); } builder.setEffectiveUrl(HttpUrlHelper.obfuscateCredentials(webhook.getUrl(), url)); Request request = buildHttpRequest(url, webhook, payload); try (Response response = execute(request)) { builder.setHttpStatus(response.code()); } } catch (Exception e) { builder.setError(e); } return builder .setDurationInMs((int) (system.now() - startedAt)) .build(); }
@Test public void silently_catch_error_when_url_is_incorrect() { Webhook webhook = new Webhook(WEBHOOK_UUID, PROJECT_UUID, CE_TASK_UUID, randomAlphanumeric(40), "my-webhook", "this_is_not_an_url", null); WebhookDelivery delivery = newSender(false).call(webhook, PAYLOAD); assertThat(delivery.getHttpStatus()).isEmpty(); assertThat(delivery.getDurationInMs().get()).isNotNegative(); assertThat(delivery.getError().get()).isInstanceOf(IllegalArgumentException.class); assertThat(delivery.getErrorMessage()).contains("Webhook URL is not valid: this_is_not_an_url"); assertThat(delivery.getAt()).isEqualTo(NOW); assertThat(delivery.getWebhook()).isSameAs(webhook); assertThat(delivery.getPayload()).isSameAs(PAYLOAD); }
T getFunction(final List<SqlArgument> arguments) { // first try to get the candidates without any implicit casting Optional<T> candidate = findMatchingCandidate(arguments, false); if (candidate.isPresent()) { return candidate.get(); } else if (!supportsImplicitCasts) { throw createNoMatchingFunctionException(arguments); } // if none were found (candidate isn't present) try again with implicit casting candidate = findMatchingCandidate(arguments, true); if (candidate.isPresent()) { return candidate.get(); } throw createNoMatchingFunctionException(arguments); }
@Test public void shouldFindVarargWithList() { // Given: givenFunctions( function(EXPECTED, 0, STRING_VARARGS) ); // When: final KsqlScalarFunction fun = udfIndex .getFunction(ImmutableList.of(SqlArgument.of(SqlArray.of(SqlTypes.STRING)))); // Then: assertThat(fun.name(), equalTo(EXPECTED)); }
@Override public void getConfig(CloudDataPlaneFilterConfig.Builder builder) { if (clientsLegacyMode) { builder.legacyMode(true); } else { var clientsCfg = clients.stream() .filter(c -> !c.certificates().isEmpty()) .map(x -> new CloudDataPlaneFilterConfig.Clients.Builder() .id(x.id()) .certificates(x.certificates().stream().map(X509CertificateUtils::toPem).toList()) .permissions(x.permissions().stream().map(Client.Permission::asString).sorted().toList())) .toList(); builder.clients(clientsCfg).legacyMode(false); } }
@Test public void it_generates_correct_config() throws IOException { Path certFile = securityFolder.resolve("foo.pem"); Element clusterElem = DomBuilderTest.parse( """ <container version='1.0'> <clients> <client id="foo" permissions="read,write"> <certificate file="%s"/> </client> </clients> </container> """ .formatted(applicationFolder.toPath().relativize(certFile).toString())); X509Certificate certificate = createCertificate(certFile); buildModel(clusterElem); CloudDataPlaneFilterConfig config = root.getConfig(CloudDataPlaneFilterConfig.class, cloudDataPlaneFilterConfigId); assertFalse(config.legacyMode()); List<CloudDataPlaneFilterConfig.Clients> clients = config.clients(); assertEquals(1, clients.size()); CloudDataPlaneFilterConfig.Clients client = clients.get(0); assertEquals("foo", client.id()); assertIterableEquals(List.of("read", "write"), client.permissions()); assertIterableEquals(List.of(X509CertificateUtils.toPem(certificate)), client.certificates()); ConnectorConfig connectorConfig = connectorConfig(); var caCerts = X509CertificateUtils.certificateListFromPem(connectorConfig.ssl().caCertificate()); assertEquals(1, caCerts.size()); assertEquals(List.of(certificate), caCerts); assertEquals(List.of("foo.bar"), connectorConfig.serverName().known()); var srvCfg = root.getConfig(ServerConfig.class, "container/http"); assertEquals("cloud-data-plane-insecure", srvCfg.defaultFilters().get(0).filterId()); assertEquals(8080, srvCfg.defaultFilters().get(0).localPort()); assertEquals("cloud-data-plane-secure", srvCfg.defaultFilters().get(1).filterId()); assertEquals(4443, srvCfg.defaultFilters().get(1).localPort()); }
public static void extract(Path source, Path destination) throws IOException { extract(source, destination, false); }
@Test public void testExtract() throws URISyntaxException, IOException { Path source = Paths.get(Resources.getResource("core/extract.tar").toURI()); Path destination = temporaryFolder.getRoot().toPath(); TarExtractor.extract(source, destination); Assert.assertTrue(Files.exists(destination.resolve("file A"))); Assert.assertTrue(Files.exists(destination.resolve("file B"))); Assert.assertTrue( Files.exists(destination.resolve("folder").resolve("nested folder").resolve("file C"))); try (Stream<String> lines = Files.lines(destination.resolve("file A"))) { String contents = lines.collect(Collectors.joining()); Assert.assertEquals("Hello", contents); } }
public void logSubscriptionRemoval(final String channel, final int streamId, final long subscriptionId) { final int length = SIZE_OF_INT * 2 + SIZE_OF_LONG + channel.length(); final int captureLength = captureLength(length); final int encodedLength = encodedLength(captureLength); final ManyToOneRingBuffer ringBuffer = this.ringBuffer; final int index = ringBuffer.tryClaim(toEventCodeId(REMOVE_SUBSCRIPTION_CLEANUP), encodedLength); if (index > 0) { try { final UnsafeBuffer buffer = (UnsafeBuffer)ringBuffer.buffer(); encodeSubscriptionRemoval(buffer, index, captureLength, length, channel, streamId, subscriptionId); } finally { ringBuffer.commit(index); } } }
@Test void logSubscriptionRemoval() { final int recordOffset = align(131, ALIGNMENT); logBuffer.putLong(CAPACITY + TAIL_POSITION_OFFSET, recordOffset); final String uri = "uri"; final int streamId = 42; final long id = 19; final int captureLength = uri.length() + SIZE_OF_INT * 2 + SIZE_OF_LONG; logger.logSubscriptionRemoval(uri, streamId, id); verifyLogHeader( logBuffer, recordOffset, toEventCodeId(REMOVE_SUBSCRIPTION_CLEANUP), captureLength, captureLength); assertEquals(streamId, logBuffer.getInt(encodedMsgOffset(recordOffset + LOG_HEADER_LENGTH), LITTLE_ENDIAN)); assertEquals(id, logBuffer.getLong(encodedMsgOffset(recordOffset + LOG_HEADER_LENGTH + SIZE_OF_INT), LITTLE_ENDIAN)); assertEquals(uri, logBuffer.getStringAscii(encodedMsgOffset(recordOffset + LOG_HEADER_LENGTH + SIZE_OF_INT + SIZE_OF_LONG), LITTLE_ENDIAN)); }
public long betweenYear(boolean isReset) { final Calendar beginCal = DateUtil.calendar(begin); final Calendar endCal = DateUtil.calendar(end); int result = endCal.get(Calendar.YEAR) - beginCal.get(Calendar.YEAR); if (false == isReset) { final int beginMonthBase0 = beginCal.get(Calendar.MONTH); final int endMonthBase0 = endCal.get(Calendar.MONTH); if (beginMonthBase0 < endMonthBase0) { return result; } else if (beginMonthBase0 > endMonthBase0) { return result - 1; } else if (Calendar.FEBRUARY == beginMonthBase0 && CalendarUtil.isLastDayOfMonth(beginCal) && CalendarUtil.isLastDayOfMonth(endCal)) { // 考虑闰年的2月情况 // 两个日期都位于2月的最后一天,此时月数按照相等对待,此时都设置为1号 beginCal.set(Calendar.DAY_OF_MONTH, 1); endCal.set(Calendar.DAY_OF_MONTH, 1); } endCal.set(Calendar.YEAR, beginCal.get(Calendar.YEAR)); long between = endCal.getTimeInMillis() - beginCal.getTimeInMillis(); if (between < 0) { return result - 1; } } return result; }
@Test public void betweenYearTest2() { Date start = DateUtil.parse("2000-02-29"); Date end = DateUtil.parse("2018-02-28"); long betweenYear = new DateBetween(start, end).betweenYear(false); assertEquals(18, betweenYear); }
@Override public HashSlotCursor8byteKey cursor() { return new Cursor(); }
@Test(expected = AssertionError.class) @RequireAssertEnabled public void testCursor_advance_whenDisposed() { HashSlotCursor8byteKey cursor = hsa.cursor(); hsa.dispose(); cursor.advance(); }
@Override public synchronized UdfFactory ensureFunctionFactory(final UdfFactory factory) { validateFunctionName(factory.getName()); final String functionName = factory.getName().toUpperCase(); if (udafs.containsKey(functionName)) { throw new KsqlException("UdfFactory already registered as aggregate: " + functionName); } if (udtfs.containsKey(functionName)) { throw new KsqlException("UdfFactory already registered as table function: " + functionName); } final UdfFactory existing = udfs.putIfAbsent(functionName, factory); if (existing != null && !existing.matches(factory)) { throw new KsqlException("UdfFactory not compatible with existing factory." + " function: " + functionName + " existing: " + existing + ", factory: " + factory); } return existing == null ? factory : existing; }
@Test public void shouldNotThrowWhenEnsuringCompatibleUdfFactory() { // Given: functionRegistry.ensureFunctionFactory(udfFactory); // When: functionRegistry.ensureFunctionFactory(udfFactory); // Then: no exception thrown. }
public List<String> getDatacentersFor( InetAddress address, String continent, String country, Optional<String> subdivision ) { final int NUM_DATACENTERS = 3; if(this.isEmpty()) { return Collections.emptyList(); } List<String> dcsBySubnet = getDatacentersBySubnet(address); List<String> dcsByGeo = getDatacentersByGeo(continent, country, subdivision).stream() .limit(NUM_DATACENTERS) .filter(dc -> (dcsBySubnet.isEmpty() || !dc.equals(dcsBySubnet.getFirst())) && (dcsBySubnet.size() < 2 || !dc.equals(dcsBySubnet.get(1))) ).toList(); return Stream.concat( dcsBySubnet.stream().limit(dcsByGeo.isEmpty() ? NUM_DATACENTERS : NUM_DATACENTERS - 1), dcsByGeo.stream()) .limit(NUM_DATACENTERS) .toList(); }
@Test void testGetFastestDatacentersEmptySubnet() throws UnknownHostException { var v4address = Inet4Address.getByName("200.200.123.1"); var actual = basicTable.getDatacentersFor(v4address, "NA", "US", Optional.of("VA")); assertThat(actual).isEqualTo(List.of("datacenter-2", "datacenter-1")); }
@SuppressWarnings("checkstyle:HiddenField") public AwsCredentialsProvider credentialsProvider( String accessKeyId, String secretAccessKey, String sessionToken) { if (!Strings.isNullOrEmpty(accessKeyId) && !Strings.isNullOrEmpty(secretAccessKey)) { if (Strings.isNullOrEmpty(sessionToken)) { return StaticCredentialsProvider.create( AwsBasicCredentials.create(accessKeyId, secretAccessKey)); } else { return StaticCredentialsProvider.create( AwsSessionCredentials.create(accessKeyId, secretAccessKey, sessionToken)); } } if (!Strings.isNullOrEmpty(this.clientCredentialsProvider)) { return credentialsProvider(this.clientCredentialsProvider); } // Create a new credential provider for each client return DefaultCredentialsProvider.builder().build(); }
@Test public void testSessionCredentialsConfiguration() { // set access key id, secret access key, and session token AwsClientProperties awsClientProperties = new AwsClientProperties(); AwsCredentialsProvider credentialsProvider = awsClientProperties.credentialsProvider("key", "secret", "token"); assertThat(credentialsProvider.resolveCredentials()) .as("Should use session credentials if session token is set") .isInstanceOf(AwsSessionCredentials.class); assertThat(credentialsProvider.resolveCredentials().accessKeyId()) .as("The access key id should be the same as the one set by tag ACCESS_KEY_ID") .isEqualTo("key"); assertThat(credentialsProvider.resolveCredentials().secretAccessKey()) .as("The secret access key should be the same as the one set by tag SECRET_ACCESS_KEY") .isEqualTo("secret"); }
@Nonnull @Override public Result addChunk(ByteBuf buf, @Nullable SocketAddress remoteAddress) { if (!buf.isReadable(2)) { return new Result(null, false); } try { final IpfixParser.MessageDescription messageDescription = shallowParser.shallowParseMessage(buf); final long observationDomainId = messageDescription.getHeader().observationDomainId(); addTemplateKeyInCache(remoteAddress, messageDescription, observationDomainId); // TODO handle options templates // collects all data records that are now ready to be sent final Set<ShallowDataSet> packetsToSendCollection = new HashSet<>(); // the set of template records to include in the newly created message that is our "aggregate result" final Set<Integer> bufferedTemplateIdList = new HashSet<>(); if (!messageDescription.declaredTemplateIds().isEmpty()) { // if we have new templates, look for buffered data records that we have all the templates for now final Set<Integer> knownTemplateIdsList = new HashSet<>(); collectAllTemplateIds(remoteAddress, observationDomainId, knownTemplateIdsList); final Queue<ShallowDataSet> bufferedPackets = packetCache.getIfPresent(TemplateKey.idForExporter(remoteAddress, observationDomainId)); handleBufferedPackets(packetsToSendCollection, bufferedTemplateIdList, knownTemplateIdsList, bufferedPackets); } boolean packetBuffered = false; // the list of template keys to return in the result ( TODO this copies all of the templates all the time :( ) final Set<TemplateKey> templatesList = new HashSet<>(templateCache.asMap().keySet()); bufferedTemplateIdList.addAll(messageDescription.referencedTemplateIds()); LOG.debug("Finding the needed templates for the buffered and current packets"); for (int templateId : bufferedTemplateIdList) { final TemplateKey templateKey = new TemplateKey(remoteAddress, observationDomainId, templateId); final Object template = templateCache.getIfPresent(templateKey); if (template == null) { LOG.debug("Template is null, packet needs to be buffered until templates have been received."); try { final TemplateKey newTemplateKey = TemplateKey.idForExporter(remoteAddress, observationDomainId); final Queue<ShallowDataSet> bufferedPackets = packetCache.get(newTemplateKey, ConcurrentLinkedQueue::new); final byte[] bytes = ByteBufUtil.getBytes(buf); bufferedPackets.addAll(messageDescription.dataSets()); packetBuffered = true; } catch (ExecutionException ignored) { // the loader cannot fail, it only creates a new queue } } else { LOG.debug("Template [{}] has been added to template list.", templateKey); templatesList.add(templateKey); packetsToSendCollection.addAll(messageDescription.dataSets()); } } // if we have buffered this packet, don't try to process it now. we still need all the templates for it if (packetBuffered) { LOG.debug("Packet has been buffered and will not be processed now, returning result."); return new Result(null, true); } // if we didn't buffer anything but also didn't have anything queued that can be processed, don't proceed. if (packetsToSendCollection.isEmpty()) { LOG.debug("Packet has not been buffered and no packet is queued."); return new Result(null, true); } final IpfixJournal.RawIpfix.Builder journalBuilder = IpfixJournal.RawIpfix.newBuilder(); buildJournalObject(packetsToSendCollection, templatesList, journalBuilder); final IpfixJournal.RawIpfix rawIpfix = journalBuilder.build(); return getCompleteResult(rawIpfix); } catch (Exception e) { LOG.error("Unable to aggregate IPFIX message due to the following error ", e); return new Result(null, false); } }
@SuppressWarnings("unchecked") @Test public void ixFlowTest() throws IOException, URISyntaxException { final IpfixAggregator ipfixAggregator = new IpfixAggregator(); final Map<String, Object> configMap = getIxiaConfigmap(); final IpfixCodec codec = new IpfixCodec(new Configuration(configMap), ipfixAggregator, messageFactory); final List<Message> messages = new ArrayList<>(); // ixflow.pcap contains 4 packets, the first has the data templates and option templates // followed by three data sets. two sets have subtemplateList data, the third has only empty lists for domain information try (InputStream stream = Resources.getResource("ixflow.pcap").openStream()) { final Pcap pcap = Pcap.openStream(stream); pcap.loop(packet -> { if (packet.hasProtocol(Protocol.UDP)) { final UDPPacket udp = (UDPPacket) packet.getPacket(Protocol.UDP); final InetSocketAddress source = new InetSocketAddress(udp.getParentPacket().getSourceIP(), udp.getSourcePort()); byte[] payload = new byte[udp.getPayload().getReadableBytes()]; udp.getPayload().getBytes(payload); final ByteBuf buf = Unpooled.wrappedBuffer(payload); final CodecAggregator.Result result = ipfixAggregator.addChunk(buf, source); final ByteBuf ipfixRawBuf = result.getMessage(); if (ipfixRawBuf != null) { byte[] bytes = new byte[ipfixRawBuf.readableBytes()]; ipfixRawBuf.getBytes(0, bytes); messages.addAll(Objects.requireNonNull(codec.decodeMessages(new RawMessage(bytes)))); } } return true; }); } catch (IOException e) { fail("Cannot process PCAP stream"); } assertThat(messages).hasSize(3); assertThat(messages.get(0).getFields()) .doesNotContainKey("httpSession") .containsEntry("dnsRecord_0_dnsIpv4Address", "1.2.0.2") .containsEntry("dnsRecord_0_dnsIpv6Address", "0:0:0:0:0:0:0:0") .containsEntry("dnsRecord_0_dnsName", "server-1020002.example.int."); assertThat(messages.get(1).getFields()) .doesNotContainKey("httpSession") .containsEntry("dnsRecord_0_dnsIpv4Address", "1.2.14.73") .containsEntry("dnsRecord_0_dnsIpv6Address", "0:0:0:0:0:0:0:0") .containsEntry("dnsRecord_0_dnsName", "server-1020e49.example.int."); assertThat(messages.get(2).getFields()) .doesNotContainKey("httpSession") .doesNotContainKey("dnsRecord"); }
@Operation(summary = "Redirect transactionId from BVD") @GetMapping(value = {"/frontchannel/saml/v4/return_from_bvd", "/frontchannel/saml/v4/idp/return_from_bvd"}) public RedirectView redirectFromBvd(@RequestParam(value = "transactionId") String transactionId, @RequestParam(value = "status", required = false) BvdStatus status, HttpServletRequest request) throws SamlSessionException, UnsupportedEncodingException { logger.info("Receive redirect with transactionId from BVD"); return new RedirectView(assertionConsumerServiceUrlService.generateRedirectUrl(null, transactionId, request.getRequestedSessionId(), status)); }
@Test void redirectFromBvdTest() throws SamlSessionException, UnsupportedEncodingException { String redirectUrl = "redirectUrl"; httpServletRequestMock.setRequestedSessionId("sessionId"); when(assertionConsumerServiceUrlServiceMock.generateRedirectUrl(any(), anyString(), anyString(), any(BvdStatus.class))).thenReturn(redirectUrl); RedirectView result = artifactController.redirectFromBvd("transactionId", OK, httpServletRequestMock); assertNotNull(result); assertEquals(redirectUrl, result.getUrl()); verify(assertionConsumerServiceUrlServiceMock, times(1)).generateRedirectUrl(any(), anyString(), anyString(), any(BvdStatus.class)); }
@Override public PositionOutputStream create() { if (!exists()) { return createOrOverwrite(); } else { throw new AlreadyExistsException("Location already exists: %s", uri()); } }
@Test public void testCreate() { OSSURI uri = randomURI(); int dataSize = 8; byte[] data = randomData(dataSize); writeOSSData(uri, data); OutputFile out = OSSOutputFile.fromLocation(ossClient, uri.location(), aliyunProperties); assertThatThrownBy(out::create) .isInstanceOf(AlreadyExistsException.class) .hasMessageContaining("Location already exists"); }
@Override public Num calculate(BarSeries series, Position position) { return getTradeCost(series, position, series.numOf(initialAmount)); }
@Test public void fixedCostWithOnePosition() { MockBarSeries series = new MockBarSeries(numFunction, 100, 95, 100, 80, 85, 70); Position position = new Position(); Num criterion; criterion = getCriterion(1000d, 0d, 0.75d).calculate(series, position); assertNumEquals(0d, criterion); position.operate(1); criterion = getCriterion(1000d, 0d, 0.75d).calculate(series, position); assertNumEquals(0.75d, criterion); position.operate(3); criterion = getCriterion(1000d, 0d, 0.75d).calculate(series, position); assertNumEquals(1.5d, criterion); position.operate(4); criterion = getCriterion(1000d, 0d, 0.75d).calculate(series, position); assertNumEquals(1.5d, criterion); }
public static Pod createPod( String name, String namespace, Labels labels, OwnerReference ownerReference, PodTemplate template, Map<String, String> defaultPodLabels, Map<String, String> podAnnotations, Affinity affinity, List<Container> initContainers, List<Container> containers, List<Volume> volumes, List<LocalObjectReference> defaultImagePullSecrets, PodSecurityContext podSecurityContext ) { return new PodBuilder() .withNewMetadata() .withName(name) .withLabels(labels.withAdditionalLabels(Util.mergeLabelsOrAnnotations(defaultPodLabels, TemplateUtils.labels(template))).toMap()) .withNamespace(namespace) .withAnnotations(Util.mergeLabelsOrAnnotations(podAnnotations, TemplateUtils.annotations(template))) .withOwnerReferences(ownerReference) .endMetadata() .withNewSpec() .withRestartPolicy("Never") .withServiceAccountName(name) .withEnableServiceLinks(template != null ? template.getEnableServiceLinks() : null) .withAffinity(affinity) .withInitContainers(initContainers) .withContainers(containers) .withVolumes(volumes) .withTolerations(template != null && template.getTolerations() != null ? template.getTolerations() : null) .withTerminationGracePeriodSeconds(template != null ? (long) template.getTerminationGracePeriodSeconds() : 30L) .withImagePullSecrets(imagePullSecrets(template, defaultImagePullSecrets)) .withSecurityContext(podSecurityContext) .withPriorityClassName(template != null ? template.getPriorityClassName() : null) .withSchedulerName(template != null && template.getSchedulerName() != null ? template.getSchedulerName() : "default-scheduler") .withHostAliases(template != null ? template.getHostAliases() : null) .withTopologySpreadConstraints(template != null ? template.getTopologySpreadConstraints() : null) .endSpec() .build(); }
@Test public void testCreatePodWithNullValuesAndNullTemplate() { Pod pod = WorkloadUtils.createPod( NAME, NAMESPACE, LABELS, OWNER_REFERENCE, null, Map.of("default-label", "default-value"), Map.of("extra", "annotations"), DEFAULT_AFFINITY, List.of(new ContainerBuilder().withName("init-container").build()), List.of(new ContainerBuilder().withName("container").build()), VolumeUtils.createPodSetVolumes(NAME + "-0", DEFAULT_STORAGE, false), List.of(new LocalObjectReference("some-pull-secret")), DEFAULT_POD_SECURITY_CONTEXT ); assertThat(pod.getMetadata().getName(), is(NAME)); assertThat(pod.getMetadata().getNamespace(), is(NAMESPACE)); assertThat(pod.getMetadata().getLabels(), is(LABELS.withAdditionalLabels(Map.of("default-label", "default-value")).toMap())); assertThat(pod.getMetadata().getAnnotations(), is(Map.of("extra", "annotations"))); assertThat(pod.getSpec().getRestartPolicy(), is("Never")); assertThat(pod.getSpec().getServiceAccountName(), is(NAME)); assertThat(pod.getSpec().getEnableServiceLinks(), is(nullValue())); assertThat(pod.getSpec().getAffinity(), is(DEFAULT_AFFINITY)); assertThat(pod.getSpec().getInitContainers().size(), is(1)); assertThat(pod.getSpec().getInitContainers().get(0).getName(), is("init-container")); assertThat(pod.getSpec().getContainers().size(), is(1)); assertThat(pod.getSpec().getContainers().get(0).getName(), is("container")); assertThat(pod.getSpec().getVolumes(), is(VolumeUtils.createPodSetVolumes(NAME + "-0", DEFAULT_STORAGE, false))); assertThat(pod.getSpec().getTolerations(), is(nullValue())); assertThat(pod.getSpec().getTerminationGracePeriodSeconds(), is(30L)); assertThat(pod.getSpec().getImagePullSecrets(), is(List.of(new LocalObjectReference("some-pull-secret")))); assertThat(pod.getSpec().getSecurityContext(), is(DEFAULT_POD_SECURITY_CONTEXT)); assertThat(pod.getSpec().getPriorityClassName(), is(nullValue())); assertThat(pod.getSpec().getSchedulerName(), is("default-scheduler")); assertThat(pod.getSpec().getHostAliases(), is(nullValue())); assertThat(pod.getSpec().getTopologySpreadConstraints(), is(nullValue())); }
public int hash(JimfsPath path) { // Note: JimfsPath.equals() is implemented using the compare() method below; // equalityUsesCanonicalForm is taken into account there via the namesOrdering, which is set // at construction time. int hash = 31; hash = 31 * hash + getFileSystem().hashCode(); final Name root = path.root(); final ImmutableList<Name> names = path.names(); if (equalityUsesCanonicalForm) { // use hash codes of names themselves, which are based on the canonical form hash = 31 * hash + (root == null ? 0 : root.hashCode()); for (Name name : names) { hash = 31 * hash + name.hashCode(); } } else { // use hash codes from toString() form of names hash = 31 * hash + (root == null ? 0 : root.toString().hashCode()); for (Name name : names) { hash = 31 * hash + name.toString().hashCode(); } } return hash; }
@Test public void testHash_usingDisplayForm() { PathService pathService = fakePathService(PathType.unix(), false); JimfsPath path1 = new JimfsPath(pathService, null, ImmutableList.of(Name.create("FOO", "foo"))); JimfsPath path2 = new JimfsPath(pathService, null, ImmutableList.of(Name.create("FOO", "FOO"))); JimfsPath path3 = new JimfsPath( pathService, null, ImmutableList.of(Name.create("FOO", "9874238974897189741"))); assertThat(pathService.hash(path1)).isEqualTo(pathService.hash(path2)); assertThat(pathService.hash(path2)).isEqualTo(pathService.hash(path3)); }
public void decode(ByteBuf buffer) { boolean last; int statusCode; while (true) { switch(state) { case READ_COMMON_HEADER: if (buffer.readableBytes() < SPDY_HEADER_SIZE) { return; } int frameOffset = buffer.readerIndex(); int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET; int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET; buffer.skipBytes(SPDY_HEADER_SIZE); boolean control = (buffer.getByte(frameOffset) & 0x80) != 0; int version; int type; if (control) { // Decode control frame common header version = getUnsignedShort(buffer, frameOffset) & 0x7FFF; type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET); streamId = 0; // Default to session Stream-ID } else { // Decode data frame common header version = spdyVersion; // Default to expected version type = SPDY_DATA_FRAME; streamId = getUnsignedInt(buffer, frameOffset); } flags = buffer.getByte(flagsOffset); length = getUnsignedMedium(buffer, lengthOffset); // Check version first then validity if (version != spdyVersion) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SPDY Version"); } else if (!isValidFrameHeader(streamId, type, flags, length)) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid Frame Error"); } else { state = getNextState(type, length); } break; case READ_DATA_FRAME: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0)); break; } // Generate data frames that do not exceed maxChunkSize int dataLength = Math.min(maxChunkSize, length); // Wait until entire frame is readable if (buffer.readableBytes() < dataLength) { return; } ByteBuf data = buffer.alloc().buffer(dataLength); data.writeBytes(buffer, dataLength); length -= dataLength; if (length == 0) { state = State.READ_COMMON_HEADER; } last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN); delegate.readDataFrame(streamId, last, data); break; case READ_SYN_STREAM_FRAME: if (buffer.readableBytes() < 10) { return; } int offset = buffer.readerIndex(); streamId = getUnsignedInt(buffer, offset); int associatedToStreamId = getUnsignedInt(buffer, offset + 4); byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07); last = hasFlag(flags, SPDY_FLAG_FIN); boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL); buffer.skipBytes(10); length -= 10; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_STREAM Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional); } break; case READ_SYN_REPLY_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_REPLY Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynReplyFrame(streamId, last); } break; case READ_RST_STREAM_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (streamId == 0 || statusCode == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid RST_STREAM Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readRstStreamFrame(streamId, statusCode); } break; case READ_SETTINGS_FRAME: if (buffer.readableBytes() < 4) { return; } boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR); numSettings = getUnsignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); length -= 4; // Validate frame length against number of entries. Each ID/Value entry is 8 bytes. if ((length & 0x07) != 0 || length >> 3 != numSettings) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SETTINGS Frame"); } else { state = State.READ_SETTING; delegate.readSettingsFrame(clear); } break; case READ_SETTING: if (numSettings == 0) { state = State.READ_COMMON_HEADER; delegate.readSettingsEnd(); break; } if (buffer.readableBytes() < 8) { return; } byte settingsFlags = buffer.getByte(buffer.readerIndex()); int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1); int value = getSignedInt(buffer, buffer.readerIndex() + 4); boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE); boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED); buffer.skipBytes(8); --numSettings; delegate.readSetting(id, value, persistValue, persisted); break; case READ_PING_FRAME: if (buffer.readableBytes() < 4) { return; } int pingId = getSignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); state = State.READ_COMMON_HEADER; delegate.readPingFrame(pingId); break; case READ_GOAWAY_FRAME: if (buffer.readableBytes() < 8) { return; } int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); state = State.READ_COMMON_HEADER; delegate.readGoAwayFrame(lastGoodStreamId, statusCode); break; case READ_HEADERS_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid HEADERS Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readHeadersFrame(streamId, last); } break; case READ_WINDOW_UPDATE_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (deltaWindowSize == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid WINDOW_UPDATE Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readWindowUpdateFrame(streamId, deltaWindowSize); } break; case READ_HEADER_BLOCK: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readHeaderBlockEnd(); break; } if (!buffer.isReadable()) { return; } int compressedBytes = Math.min(buffer.readableBytes(), length); ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes); headerBlock.writeBytes(buffer, compressedBytes); length -= compressedBytes; delegate.readHeaderBlock(headerBlock); break; case DISCARD_FRAME: int numBytes = Math.min(buffer.readableBytes(), length); buffer.skipBytes(numBytes); length -= numBytes; if (length == 0) { state = State.READ_COMMON_HEADER; break; } return; case FRAME_ERROR: buffer.skipBytes(buffer.readableBytes()); return; default: throw new Error("Shouldn't reach here."); } } }
@Test public void testIllegalSpdyRstStreamFrameStatusCode() throws Exception { short type = 3; byte flags = 0; int length = 8; int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; int statusCode = 0; // invalid status code ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); encodeControlFrameHeader(buf, type, flags, length); buf.writeInt(streamId); buf.writeInt(statusCode); decoder.decode(buf); verify(delegate).readFrameError(anyString()); assertFalse(buf.isReadable()); buf.release(); }
public Certificate add(CvCertificate cert) { final Certificate db = Certificate.from(cert); if (repository.countByIssuerAndSubject(db.getIssuer(), db.getSubject()) > 0) { throw new ClientException(String.format( "Certificate of subject %s and issuer %s already exists", db.getSubject(), db.getIssuer())); } // Special case for first CVCA certificate for this document type if (db.getType() == Certificate.Type.CVCA && repository.countByDocumentTypeAndType(db.getDocumentType(), db.getType()) == 0) { signatureService.verify(cert, cert.getBody().getPublicKey(), cert.getBody().getPublicKey().getParams()); logger.warn("Added first CVCA certificate for {}, set trusted flag manually", db.getDocumentType()); } else { verify(cert); if (db.getType() == Certificate.Type.AT) { verifyPublicKey(cert); } } return repository.saveAndFlush(db); }
@Test public void shouldNotAddCertificateIfAlreadyExists() throws Exception { certificateRepo.save(loadCvCertificate("rdw/acc/cvca.cvcert", true)); ClientException thrown = assertThrows(ClientException.class, () -> service.add(readCvCertificate("rdw/acc/cvca.cvcert"))); assertEquals("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", thrown.getMessage()); }
public static String getString(long date, String format) { Date d = new Date(date); return getString(d, format); }
@Test public void getString_dateSignature() throws ParseException { String expectedDate = "15/05/2012"; String format = "dd/MM/yyyy"; Date date = new SimpleDateFormat(format).parse(expectedDate); assertEquals(expectedDate, DateUtils.getString(date, format)); }
@Override public String toString() { if (str == null) { str = this.ip + ":" + this.port; } return str; }
@Test public void testToStringReset() { final Endpoint ep = new Endpoint("192.168.1.1", 8080); assertEquals("192.168.1.1:8080", ep.toString()); assertEquals("192.168.1.1:8080", ep.toString()); }
static void populateSchemaFromListOfRanges(Schema toPopulate, List<RangeNode> ranges) { Range range = consolidateRanges(ranges); if (range != null) { if (range.getLowEndPoint() != null) { if (range.getLowEndPoint() instanceof BigDecimal bigDecimal) { toPopulate.minimum(bigDecimal); } else { toPopulate.addExtension(DMNOASConstants.X_DMN_MINIMUM_VALUE, range.getLowEndPoint()); } toPopulate.exclusiveMinimum(range.getLowBoundary() == Range.RangeBoundary.OPEN); } if (range.getHighEndPoint() != null) { if (range.getHighEndPoint() instanceof BigDecimal bigDecimal) { toPopulate.maximum(bigDecimal); } else { toPopulate.addExtension(DMNOASConstants.X_DMN_MAXIMUM_VALUE, range.getHighEndPoint()); } toPopulate.exclusiveMaximum(range.getHighBoundary() == Range.RangeBoundary.OPEN); } } }
@Test void evaluateUnaryTestsForDateRange() { List<LocalDate> expectedDates = Arrays.asList(LocalDate.of(2022, 1, 1), LocalDate.of(2024, 1, 1)); List<String> formattedDates = expectedDates.stream() .map(toFormat -> String.format("@\"%s-0%s-0%s\"", toFormat.getYear(), toFormat.getMonthValue(), toFormat.getDayOfMonth())) .toList(); List<String> toRange = Arrays.asList(String.format("(>%s)", formattedDates.get(0)), String.format("(<=%s)", formattedDates.get(1))); List<RangeNode> ranges = getBaseNodes(toRange, RangeNode.class); Schema toPopulate = OASFactory.createObject(Schema.class); RangeNodeSchemaMapper.populateSchemaFromListOfRanges(toPopulate, ranges); assertEquals(expectedDates.get(0), toPopulate.getExtensions().get(DMNOASConstants.X_DMN_MINIMUM_VALUE)); assertTrue(toPopulate.getExclusiveMinimum()); assertEquals(expectedDates.get(1), toPopulate.getExtensions().get(DMNOASConstants.X_DMN_MAXIMUM_VALUE)); assertFalse(toPopulate.getExclusiveMaximum()); }
@PublicEvolving public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes( MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) { return getMapReturnTypes(mapInterface, inType, null, false); }
@Test void testInterface() { MapFunction<String, Boolean> mapInterface = new MapFunction<String, Boolean>() { private static final long serialVersionUID = 1L; @Override public Boolean map(String record) throws Exception { return null; } }; TypeInformation<?> ti = TypeExtractor.getMapReturnTypes(mapInterface, BasicTypeInfo.STRING_TYPE_INFO); assertThat(ti).isEqualTo(BasicTypeInfo.BOOLEAN_TYPE_INFO); }
public void doesNotMatch(@Nullable String regex) { checkNotNull(regex); if (actual == null) { failWithActual("expected a string that does not match", regex); } else if (actual.matches(regex)) { failWithActual("expected not to match", regex); } }
@Test public void stringDoesNotMatchStringWithFail() { expectFailureWhenTestingThat("abcaaadev").doesNotMatch(".*aaa.*"); assertFailureValue("expected not to match", ".*aaa.*"); }
public final void isNotSameInstanceAs(@Nullable Object unexpected) { if (actual == unexpected) { /* * We use actualCustomStringRepresentation() because it might be overridden to be better than * actual.toString()/unexpected.toString(). */ failWithoutActual( fact("expected not to be specific instance", actualCustomStringRepresentation())); } }
@Test public void isNotSameInstanceAsWithObjects() { Object a = new Object(); Object b = new Object(); assertThat(a).isNotSameInstanceAs(b); }
@Override public void execute(ComputationStep.Context context) { DuplicationVisitor visitor = new DuplicationVisitor(); new DepthTraversalTypeAwareCrawler(visitor).visit(treeRootHolder.getReportTreeRoot()); context.getStatistics().add("duplications", visitor.count); }
@Test public void loads_never_consider_originals_from_batch_on_same_lines_as_the_equals() { reportReader.putDuplications( FILE_2_REF, createDuplication( singleLineTextRange(LINE), createInnerDuplicate(LINE + 1), createInnerDuplicate(LINE + 2), createInProjectDuplicate(FILE_1_REF, LINE + 2)), createDuplication( singleLineTextRange(LINE), createInnerDuplicate(LINE + 2), createInnerDuplicate(LINE + 3), createInProjectDuplicate(FILE_1_REF, LINE + 2))); TestComputationStepContext context = new TestComputationStepContext(); underTest.execute(context); Component file1Component = treeRootHolder.getComponentByRef(FILE_1_REF); assertThat(duplicationRepository.getDuplications(FILE_2_REF)).containsOnly( duplication( singleLineDetailedTextBlock(1, LINE), new InnerDuplicate(singleLineTextBlock(LINE + 1)), new InnerDuplicate(singleLineTextBlock(LINE + 2)), new InProjectDuplicate(file1Component, singleLineTextBlock(LINE + 2))), duplication( singleLineDetailedTextBlock(2, LINE), new InnerDuplicate(singleLineTextBlock(LINE + 2)), new InnerDuplicate(singleLineTextBlock(LINE + 3)), new InProjectDuplicate(file1Component, singleLineTextBlock(LINE + 2)))); assertNbOfDuplications(context, 2); }
@Override public long getDelay() { return config.getLong(DELAY_IN_MILISECONDS_PROPERTY).orElse(10_000L); }
@Test public void getDelay_returnNumberIfConfigEmpty() { long delay = underTest.getDelay(); assertThat(delay).isPositive(); }
@Override public Mono<Void> deleteBatchAsync(List<String> strings, DeleteRecordOptions options) { return Mono.fromRunnable(() -> { Map<String, Record> collection = getCollection(); strings.forEach(collection::remove); }); }
@Test public void deleteBatchAsync() { List<Hotel> hotels = getHotels(); recordCollection.upsertBatchAsync(hotels, null).block(); List<String> keys = hotels.stream().map(Hotel::getId).collect(Collectors.toList()); recordCollection.deleteBatchAsync(keys, null).block(); for (String key : keys) { assertNull(recordCollection.getAsync(key, null).block()); } }
public static boolean containsMessage(@Nullable Throwable exception, String message) { if (exception == null) { return false; } if (exception.getMessage() != null && exception.getMessage().contains(message)) { return true; } return containsMessage(exception.getCause(), message); }
@Test public void testContainsNegativeWithNested() { assertThat( containsMessage( new IllegalStateException( "There is a bad state in the client", new IllegalArgumentException("RESOURCE_EXHAUSTED: Quota issues")), "401 Unauthorized")) .isFalse(); }
@Override public SelType call(String methodName, SelType[] args) { if (args.length == 1 && "get".equals(methodName)) { return field((SelString) args[0]); } else if (extension != null) { return extension.call(methodName, args); } throw new UnsupportedOperationException( type() + " DO NOT support calling method: " + methodName + " with args: " + Arrays.toString(args)); }
@Test(expected = UnsupportedOperationException.class) public void testInvalidCallGet() { params.call("get", new SelType[] {}); }
public static <R> R callStaticMethod( ClassLoader classLoader, String fullyQualifiedClassName, String methodName, ClassParameter<?>... classParameters) { Class<?> clazz = loadClass(classLoader, fullyQualifiedClassName); return callStaticMethod(clazz, methodName, classParameters); }
@Test public void callStaticMethodReflectively_rethrowsError() { try { ReflectionHelpers.callStaticMethod(ExampleDescendant.class, "staticThrowError"); fail("Expected exception not thrown"); } catch (RuntimeException e) { throw new RuntimeException("Incorrect exception thrown", e); } catch (TestError e) { } }
@Override public long write(Sample sample) { Validate.validState(writer != null, "No writer set! Call setWriter() first!"); StringBuilder row = new StringBuilder(); char[] specials = new char[] { separator, CSVSaveService.QUOTING_CHAR, CharUtils.CR, CharUtils.LF }; for (int i = 0; i < columnCount; i++) { String data = sample.getData(i); row.append(CSVSaveService.quoteDelimiters(data, specials)) .append(separator); } row.setLength(row.length() - 1); writer.println(row.toString()); sampleCount++; return sampleCount; }
@Test public void testWriteWithoutSample() throws Exception { try (Writer writer = new StringWriter(); CsvSampleWriter csvWriter = new CsvSampleWriter(writer, metadata)) { try { csvWriter.write(null); fail("NPE expected"); } catch (NullPointerException e) { // OK. Expected to land here } } }
@Override public void writeFiltered(List<FilteredMessage> filteredMessages) throws Exception { final var messages = filteredMessages.stream() .filter(message -> !message.destinations().get(FILTER_KEY).isEmpty()) .toList(); writes.mark(messages.size()); ignores.mark(filteredMessages.size() - messages.size()); writeMessageEntries(messages); }
@Test public void writeFiltered() throws Exception { final List<Message> messageList = buildMessages(2); output.writeFiltered(List.of( // The first message should not be written to the output because the output's filter key is not included. DefaultFilteredMessage.forDestinationKeys(messageList.get(0), Set.of("foo")), DefaultFilteredMessage.forDestinationKeys(messageList.get(1), Set.of("foo", ElasticSearchOutput.FILTER_KEY)) )); verify(messages, times(1)).bulkIndex(eq(List.of( new MessageWithIndex(wrap(messageList.get(1)), defaultIndexSet) ))); verifyNoMoreInteractions(messages); }
public boolean isSlim() { return (hasTransparencyRelative(50, 16, 2, 4) || hasTransparencyRelative(54, 20, 2, 12) || hasTransparencyRelative(42, 48, 2, 4) || hasTransparencyRelative(46, 52, 2, 12)) || (isAreaBlackRelative(50, 16, 2, 4) && isAreaBlackRelative(54, 20, 2, 12) && isAreaBlackRelative(42, 48, 2, 4) && isAreaBlackRelative(46, 52, 2, 12)); }
@Test @EnabledIf("org.jackhuang.hmcl.JavaFXLauncher#isStarted") public void testIsSlim() throws Exception { String[] names = {"alex", "ari", "efe", "kai", "makena", "noor", "steve", "sunny", "zuri"}; for (String skin : names) { assertTrue(getSkin(skin, true).isSlim()); assertFalse(getSkin(skin, false).isSlim()); } }
public ControllerResult<Void> cleanBrokerData(final CleanControllerBrokerDataRequestHeader requestHeader, final BrokerValidPredicate validPredicate) { final ControllerResult<Void> result = new ControllerResult<>(); final String clusterName = requestHeader.getClusterName(); final String brokerName = requestHeader.getBrokerName(); final String brokerControllerIdsToClean = requestHeader.getBrokerControllerIdsToClean(); Set<Long> brokerIdSet = null; if (!requestHeader.isCleanLivingBroker()) { //if SyncStateInfo.masterAddress is not empty, at least one broker with the same BrokerName is alive SyncStateInfo syncStateInfo = this.syncStateSetInfoTable.get(brokerName); if (StringUtils.isBlank(brokerControllerIdsToClean) && null != syncStateInfo && syncStateInfo.getMasterBrokerId() != null) { String remark = String.format("Broker %s is still alive, clean up failure", requestHeader.getBrokerName()); result.setCodeAndRemark(ResponseCode.CONTROLLER_INVALID_CLEAN_BROKER_METADATA, remark); return result; } if (StringUtils.isNotBlank(brokerControllerIdsToClean)) { try { brokerIdSet = Stream.of(brokerControllerIdsToClean.split(";")).map(idStr -> Long.valueOf(idStr)).collect(Collectors.toSet()); } catch (NumberFormatException numberFormatException) { String remark = String.format("Please set the option <brokerControllerIdsToClean> according to the format, exception: %s", numberFormatException); result.setCodeAndRemark(ResponseCode.CONTROLLER_INVALID_CLEAN_BROKER_METADATA, remark); return result; } for (Long brokerId : brokerIdSet) { if (validPredicate.check(clusterName, brokerName, brokerId)) { String remark = String.format("Broker [%s, %s] is still alive, clean up failure", requestHeader.getBrokerName(), brokerId); result.setCodeAndRemark(ResponseCode.CONTROLLER_INVALID_CLEAN_BROKER_METADATA, remark); return result; } } } } if (isContainsBroker(brokerName)) { final CleanBrokerDataEvent event = new CleanBrokerDataEvent(brokerName, brokerIdSet); result.addEvent(event); return result; } result.setCodeAndRemark(ResponseCode.CONTROLLER_INVALID_CLEAN_BROKER_METADATA, String.format("Broker %s is not existed,clean broker data failure.", brokerName)); return result; }
@Test public void testCleanBrokerData() { mockMetaData(); CleanControllerBrokerDataRequestHeader header1 = new CleanControllerBrokerDataRequestHeader(DEFAULT_CLUSTER_NAME, DEFAULT_BROKER_NAME, "1"); ControllerResult<Void> result1 = this.replicasInfoManager.cleanBrokerData(header1, (cluster, brokerName, brokerId) -> true); assertEquals(ResponseCode.CONTROLLER_INVALID_CLEAN_BROKER_METADATA, result1.getResponseCode()); CleanControllerBrokerDataRequestHeader header2 = new CleanControllerBrokerDataRequestHeader(DEFAULT_CLUSTER_NAME, DEFAULT_BROKER_NAME, null); ControllerResult<Void> result2 = this.replicasInfoManager.cleanBrokerData(header2, (cluster, brokerName, brokerId) -> true); assertEquals(ResponseCode.CONTROLLER_INVALID_CLEAN_BROKER_METADATA, result2.getResponseCode()); assertEquals("Broker broker-set-a is still alive, clean up failure", result2.getRemark()); CleanControllerBrokerDataRequestHeader header3 = new CleanControllerBrokerDataRequestHeader(DEFAULT_CLUSTER_NAME, DEFAULT_BROKER_NAME, "1"); ControllerResult<Void> result3 = this.replicasInfoManager.cleanBrokerData(header3, (cluster, brokerName, brokerId) -> false); assertEquals(ResponseCode.SUCCESS, result3.getResponseCode()); CleanControllerBrokerDataRequestHeader header4 = new CleanControllerBrokerDataRequestHeader(DEFAULT_CLUSTER_NAME, DEFAULT_BROKER_NAME, "1;2;3"); ControllerResult<Void> result4 = this.replicasInfoManager.cleanBrokerData(header4, (cluster, brokerName, brokerId) -> false); assertEquals(ResponseCode.SUCCESS, result4.getResponseCode()); CleanControllerBrokerDataRequestHeader header5 = new CleanControllerBrokerDataRequestHeader(DEFAULT_CLUSTER_NAME, "broker12", "1;2;3", true); ControllerResult<Void> result5 = this.replicasInfoManager.cleanBrokerData(header5, (cluster, brokerName, brokerId) -> false); assertEquals(ResponseCode.CONTROLLER_INVALID_CLEAN_BROKER_METADATA, result5.getResponseCode()); assertEquals("Broker broker12 is not existed,clean broker data failure.", result5.getRemark()); CleanControllerBrokerDataRequestHeader header6 = new CleanControllerBrokerDataRequestHeader(null, "broker12", "1;2;3", true); ControllerResult<Void> result6 = this.replicasInfoManager.cleanBrokerData(header6, (cluster, brokerName, brokerId) -> cluster != null); assertEquals(ResponseCode.CONTROLLER_INVALID_CLEAN_BROKER_METADATA, result6.getResponseCode()); CleanControllerBrokerDataRequestHeader header7 = new CleanControllerBrokerDataRequestHeader(null, DEFAULT_BROKER_NAME, "1;2;3", true); ControllerResult<Void> result7 = this.replicasInfoManager.cleanBrokerData(header7, (cluster, brokerName, brokerId) -> false); assertEquals(ResponseCode.SUCCESS, result7.getResponseCode()); }
public static TimeLock ofTimestamp(Instant time) { long secs = time.getEpochSecond(); if (secs < THRESHOLD) throw new IllegalArgumentException("timestamp too low: " + secs); return new TimeLock(secs); }
@Test(expected = IllegalArgumentException.class) public void ofTimestamp_tooLow() { LockTime.ofTimestamp(Instant.EPOCH.plus(365, ChronoUnit.DAYS)); }
@Override public boolean accept(final Path file, final Local local, final TransferStatus parent) throws BackgroundException { if(local.isFile()) { if(local.exists()) { // Read remote attributes final PathAttributes attributes = attribute.find(file); if(local.attributes().getSize() == attributes.getSize()) { if(Checksum.NONE != attributes.getChecksum()) { final ChecksumCompute compute = ChecksumComputeFactory.get(attributes.getChecksum().algorithm); if(compute.compute(local.getInputStream(), parent).equals(attributes.getChecksum())) { if(log.isInfoEnabled()) { log.info(String.format("Skip file %s with checksum %s", file, attributes.getChecksum())); } return false; } else { log.warn(String.format("Checksum mismatch for %s and %s", file, local)); } } else { if(log.isInfoEnabled()) { log.info(String.format("Skip file %s with local size %d", file, local.attributes().getSize())); } // No need to resume completed transfers return false; } } } } return super.accept(file, local, parent); }
@Test public void testAcceptDirectory() throws Exception { ResumeFilter f = new ResumeFilter(new DisabledDownloadSymlinkResolver(), new NullSession(new Host(new TestProtocol()))); Path p = new Path("a", EnumSet.of(Path.Type.directory)); assertTrue(f.accept(p, new NullLocal("d", "a") { @Override public boolean isDirectory() { return true; } @Override public boolean isFile() { return false; } }, new TransferStatus())); }
public boolean containsKey(final long key) { return get(key) != missingValue; }
@Test public void shouldNotContainKeyOfAMissingKey() { assertFalse(map.containsKey(1L)); }
@Override public DistroData getDatumSnapshot(String targetServer) { Member member = memberManager.find(targetServer); if (checkTargetServerStatusUnhealthy(member)) { throw new DistroException( String.format("[DISTRO] Cancel get snapshot caused by target server %s unhealthy", targetServer)); } DistroDataRequest request = new DistroDataRequest(); request.setDataOperation(DataOperation.SNAPSHOT); try { Response response = clusterRpcClientProxy .sendRequest(member, request, DistroConfig.getInstance().getLoadDataTimeoutMillis()); if (checkResponse(response)) { return ((DistroDataResponse) response).getDistroData(); } else { throw new DistroException( String.format("[DISTRO-FAILED] Get snapshot request to %s failed, code: %d, message: %s", targetServer, response.getErrorCode(), response.getMessage())); } } catch (NacosException e) { throw new DistroException("[DISTRO-FAILED] Get distro snapshot failed! ", e); } }
@Test void testGetDatumSnapshotFailure() throws NacosException { assertThrows(DistroException.class, () -> { when(memberManager.find(member.getAddress())).thenReturn(member); member.setState(NodeState.UP); when(clusterRpcClientProxy.isRunning(member)).thenReturn(true); when(clusterRpcClientProxy.sendRequest(eq(member), any(), any(Long.class))).thenReturn(response); response.setErrorInfo(ResponseCode.FAIL.getCode(), "TEST"); transportAgent.getDatumSnapshot(member.getAddress()); }); }
@Nullable public String ensureUser(String userName, String password, String firstName, String lastName, String email, Set<String> expectedRoles) { return ensureUser(userName, password, firstName, lastName, email, expectedRoles, false); }
@Test public void ensureUserWithoutExpectedRoles() throws Exception { final Permissions permissions = new Permissions(ImmutableSet.of()); final User existingUser = newUser(permissions); existingUser.setName("test-user"); existingUser.setFirstLastFullNames("Test", "User"); existingUser.setPassword("password"); existingUser.setEmail("test@example.com"); existingUser.setTimeZone(DateTimeZone.UTC); existingUser.setRoleIds(ImmutableSet.of()); // Set invalid role IDs so the user gets updated when(userService.load("test-user")).thenReturn(existingUser); when(userService.save(any(User.class))).thenReturn("new-id"); assertThat(migrationHelpers.ensureUser("test-user", "pass", "Test", "User", "test@example.com", ImmutableSet.of("54e3deadbeefdeadbeef0001", "54e3deadbeefdeadbeef0002"))) .isEqualTo("new-id"); final ArgumentCaptor<User> userArg = ArgumentCaptor.forClass(User.class); verify(userService, times(1)).save(userArg.capture()); assertThat(userArg.getValue()).satisfies(user -> { assertThat(user.getName()).describedAs("user name").isEqualTo("test-user"); assertThat(user.getFullName()).describedAs("user full-name").isEqualTo("Test User"); assertThat(user.getHashedPassword()).describedAs("user hashed password").isNotBlank(); assertThat(user.getEmail()).describedAs("user email").isEqualTo("test@example.com"); assertThat(user.isReadOnly()).describedAs("user is read-only").isFalse(); assertThat(user.getPermissions()).describedAs("user permissions") .containsOnlyElementsOf(permissions.userSelfEditPermissions("test-user")); assertThat(user.getRoleIds()).describedAs("user roles").containsOnly( "54e3deadbeefdeadbeef0001", "54e3deadbeefdeadbeef0002" ); assertThat(user.getTimeZone()).describedAs("user timezone").isEqualTo(DateTimeZone.UTC); }); }
@Override public int getType() { checkState(); return TYPE_FORWARD_ONLY; }
@Test void assertGetType() { assertThat(actualResultSet.getType(), is(ResultSet.TYPE_FORWARD_ONLY)); }
public static void main(String[] argv) throws Throwable { Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); int nRet = 0; // usage: $0 user appId locId host port app_log_dir user_dir [user_dir]* // let $x = $x/usercache for $local.dir // MKDIR $x/$user/appcache/$appid // MKDIR $x/$user/appcache/$appid/output // MKDIR $x/$user/appcache/$appid/filecache // LOAD $x/$user/appcache/$appid/appTokens try { String user = argv[0]; String appId = argv[1]; String locId = argv[2]; InetSocketAddress nmAddr = new InetSocketAddress(argv[3], Integer.parseInt(argv[4])); String tokenFileName = argv[5]; String[] sLocaldirs = Arrays.copyOfRange(argv, 6, argv.length); ArrayList<Path> localDirs = new ArrayList<>(sLocaldirs.length); for (String sLocaldir : sLocaldirs) { localDirs.add(new Path(sLocaldir)); } final String uid = UserGroupInformation.getCurrentUser().getShortUserName(); if (!user.equals(uid)) { // TODO: fail localization LOG.warn("Localization running as " + uid + " not " + user); } ContainerLocalizer localizer = new ContainerLocalizer( FileContext.getLocalFSFileContext(), user, appId, locId, tokenFileName, localDirs, RecordFactoryProvider.getRecordFactory(null)); localizer.runLocalization(nmAddr); } catch (Throwable e) { // Print traces to stdout so that they can be logged by the NM address // space in both DefaultCE and LCE cases e.printStackTrace(System.out); LOG.error("Exception in main:", e); nRet = -1; } finally { System.exit(nRet); } }
@Test public void testMain() throws Exception { ContainerLocalizerWrapper wrapper = new ContainerLocalizerWrapper(); ContainerLocalizer localizer = wrapper.setupContainerLocalizerForTest(); Random random = wrapper.random; List<Path> localDirs = wrapper.localDirs; Path tokenPath = wrapper.tokenPath; LocalizationProtocol nmProxy = wrapper.nmProxy; AbstractFileSystem spylfs = wrapper.spylfs; mockOutDownloads(localizer); // verify created cache List<Path> privCacheList = new ArrayList<Path>(); List<Path> appCacheList = new ArrayList<Path>(); for (Path p : localDirs) { Path base = new Path(new Path(p, ContainerLocalizer.USERCACHE), appUser); Path privcache = new Path(base, ContainerLocalizer.FILECACHE); privCacheList.add(privcache); Path appDir = new Path(base, new Path(ContainerLocalizer.APPCACHE, appId)); Path appcache = new Path(appDir, ContainerLocalizer.FILECACHE); appCacheList.add(appcache); } // mock heartbeat responses from NM ResourceLocalizationSpec rsrcA = getMockRsrc(random, LocalResourceVisibility.PRIVATE, privCacheList.get(0)); ResourceLocalizationSpec rsrcB = getMockRsrc(random, LocalResourceVisibility.PRIVATE, privCacheList.get(0)); ResourceLocalizationSpec rsrcC = getMockRsrc(random, LocalResourceVisibility.APPLICATION, appCacheList.get(0)); ResourceLocalizationSpec rsrcD = getMockRsrc(random, LocalResourceVisibility.PRIVATE, privCacheList.get(0)); when(nmProxy.heartbeat(isA(LocalizerStatus.class))) .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE, Collections.singletonList(rsrcA))) .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE, Collections.singletonList(rsrcB))) .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE, Collections.singletonList(rsrcC))) .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE, Collections.singletonList(rsrcD))) .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE, Collections.<ResourceLocalizationSpec>emptyList())) .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.DIE, null)); LocalResource tRsrcA = rsrcA.getResource(); LocalResource tRsrcB = rsrcB.getResource(); LocalResource tRsrcC = rsrcC.getResource(); LocalResource tRsrcD = rsrcD.getResource(); doReturn( new FakeDownload(rsrcA.getResource().getResource().getFile(), true)) .when(localizer).download(isA(Path.class), eq(tRsrcA), isA(UserGroupInformation.class)); doReturn( new FakeDownload(rsrcB.getResource().getResource().getFile(), true)) .when(localizer).download(isA(Path.class), eq(tRsrcB), isA(UserGroupInformation.class)); doReturn( new FakeDownload(rsrcC.getResource().getResource().getFile(), true)) .when(localizer).download(isA(Path.class), eq(tRsrcC), isA(UserGroupInformation.class)); doReturn( new FakeDownload(rsrcD.getResource().getResource().getFile(), true)) .when(localizer).download(isA(Path.class), eq(tRsrcD), isA(UserGroupInformation.class)); // run localization localizer.runLocalization(nmAddr); for (Path p : localDirs) { Path base = new Path(new Path(p, ContainerLocalizer.USERCACHE), appUser); Path privcache = new Path(base, ContainerLocalizer.FILECACHE); // $x/usercache/$user/filecache verify(spylfs).mkdir(eq(privcache), eq(CACHE_DIR_PERM), eq(false)); Path appDir = new Path(base, new Path(ContainerLocalizer.APPCACHE, appId)); // $x/usercache/$user/appcache/$appId/filecache Path appcache = new Path(appDir, ContainerLocalizer.FILECACHE); verify(spylfs).mkdir(eq(appcache), eq(CACHE_DIR_PERM), eq(false)); } // verify tokens read at expected location verify(spylfs).open(tokenPath); // verify downloaded resources reported to NM verify(nmProxy).heartbeat(argThat(new HBMatches(rsrcA.getResource()))); verify(nmProxy).heartbeat(argThat(new HBMatches(rsrcB.getResource()))); verify(nmProxy).heartbeat(argThat(new HBMatches(rsrcC.getResource()))); verify(nmProxy).heartbeat(argThat(new HBMatches(rsrcD.getResource()))); // verify all HB use localizerID provided verify(nmProxy, never()).heartbeat(argThat( status -> !containerId.equals(status.getLocalizerId()))); }
@Override public List<Map<String, String>> taskConfigs(int maxTasks) { if (knownConsumerGroups == null) { // If knownConsumerGroup is null, it means the initial loading has not finished. // An exception should be thrown to trigger the retry behavior in the framework. log.debug("Initial consumer loading has not yet completed"); throw new RetriableException("Timeout while loading consumer groups."); } // if the replication is disabled, known consumer group is empty, or checkpoint emission is // disabled by setting 'emit.checkpoints.enabled' to false, the interval of checkpoint emission // will be negative and no 'MirrorCheckpointTask' will be created if (!config.enabled() || knownConsumerGroups.isEmpty() || config.emitCheckpointsInterval().isNegative()) { return Collections.emptyList(); } int numTasks = Math.min(maxTasks, knownConsumerGroups.size()); List<List<String>> groupsPartitioned = ConnectorUtils.groupPartitions(new ArrayList<>(knownConsumerGroups), numTasks); return IntStream.range(0, numTasks) .mapToObj(i -> config.taskConfigForConsumerGroups(groupsPartitioned.get(i), i)) .collect(Collectors.toList()); }
@Test public void testReplicationDisabled() { // disable the replication MirrorCheckpointConfig config = new MirrorCheckpointConfig(makeProps("enabled", "false")); Set<String> knownConsumerGroups = new HashSet<>(); knownConsumerGroups.add(CONSUMER_GROUP); // MirrorCheckpointConnector as minimum to run taskConfig() MirrorCheckpointConnector connector = new MirrorCheckpointConnector(knownConsumerGroups, config); List<Map<String, String>> output = connector.taskConfigs(1); // expect no task will be created assertEquals(0, output.size(), "Replication isn't disabled"); }
public static SortOrder buildSortOrder(Table table) { return buildSortOrder(table.schema(), table.spec(), table.sortOrder()); }
@Test public void testSortOrderClusteringAllPartitionFields() { PartitionSpec spec = PartitionSpec.builderFor(SCHEMA).day("ts").identity("category").build(); SortOrder order = SortOrder.builderFor(SCHEMA) .withOrderId(1) .asc(Expressions.day("ts")) .asc("category") .desc("id") .build(); assertThat(SortOrderUtil.buildSortOrder(SCHEMA, spec, order)) .as("Should leave the order unchanged") .isEqualTo(order); }
public List<ResContainer> makeResourcesXml(JadxArgs args) { Map<String, ICodeWriter> contMap = new HashMap<>(); for (ResourceEntry ri : resStorage.getResources()) { if (SKIP_RES_TYPES.contains(ri.getTypeName())) { continue; } String fn = getFileName(ri); ICodeWriter cw = contMap.get(fn); if (cw == null) { cw = new SimpleCodeWriter(args); cw.add("<?xml version=\"1.0\" encoding=\"utf-8\"?>"); cw.startLine("<resources>"); cw.incIndent(); contMap.put(fn, cw); } addValue(cw, ri); } List<ResContainer> files = new ArrayList<>(contMap.size()); for (Map.Entry<String, ICodeWriter> entry : contMap.entrySet()) { String fileName = entry.getKey(); ICodeWriter content = entry.getValue(); content.decIndent(); content.startLine("</resources>"); ICodeInfo codeInfo = content.finish(); files.add(ResContainer.textResource(fileName, codeInfo)); } Collections.sort(files); return files; }
@Test void testStringFormattedFalse() { ResourceStorage resStorage = new ResourceStorage(); ResourceEntry re = new ResourceEntry(2130903103, "jadx.gui.app", "string", "app_name", ""); re.setSimpleValue(new RawValue(3, 0)); re.setNamedValues(Lists.list()); resStorage.add(re); BinaryXMLStrings strings = new BinaryXMLStrings(); strings.put(0, "%s at %s"); ValuesParser vp = new ValuesParser(strings, resStorage.getResourcesNames()); ResXmlGen resXmlGen = new ResXmlGen(resStorage, vp); List<ResContainer> files = resXmlGen.makeResourcesXml(args); assertThat(files).hasSize(1); assertThat(files.get(0).getName()).isEqualTo("res/values/strings.xml"); String input = files.get(0).getText().toString(); assertThat(input).isEqualTo("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n" + "<resources>\n" + " <string name=\"app_name\" formatted=\"false\">%s at %s</string>\n" + "</resources>"); }
List<AlternativeInfo> calcAlternatives(final int s, final int t) { // First, do a regular bidirectional route search checkAlreadyRun(); init(s, 0, t, 0); runAlgo(); final Path bestPath = extractPath(); if (!bestPath.isFound()) { return Collections.emptyList(); } alternatives.add(new AlternativeInfo(bestPath, 0)); final ArrayList<PotentialAlternativeInfo> potentialAlternativeInfos = new ArrayList<>(); bestWeightMapFrom.forEach((IntObjectPredicate<SPTEntry>) (v, fromSPTEntry) -> { SPTEntry toSPTEntry = bestWeightMapTo.get(v); if (toSPTEntry == null) return true; if (fromSPTEntry.getWeightOfVisitedPath() + toSPTEntry.getWeightOfVisitedPath() > bestPath.getWeight() * maxWeightFactor) return true; // This gives us a path s -> v -> t, but since we are using contraction hierarchies, // s -> v and v -> t need not be shortest paths. In fact, they can sometimes be pretty strange. // We still use this preliminary path to filter for shared path length with other alternatives, // so we don't have to work so much. Path preliminaryRoute = createPathExtractor().extract(fromSPTEntry, toSPTEntry, fromSPTEntry.getWeightOfVisitedPath() + toSPTEntry.getWeightOfVisitedPath()); double preliminaryShare = calculateShare(preliminaryRoute); if (preliminaryShare > maxShareFactor) { return true; } PotentialAlternativeInfo potentialAlternativeInfo = new PotentialAlternativeInfo(); potentialAlternativeInfo.v = v; potentialAlternativeInfo.weight = 2 * (fromSPTEntry.getWeightOfVisitedPath() + toSPTEntry.getWeightOfVisitedPath()) + preliminaryShare; potentialAlternativeInfos.add(potentialAlternativeInfo); return true; }); potentialAlternativeInfos.sort(Comparator.comparingDouble(o -> o.weight)); for (PotentialAlternativeInfo potentialAlternativeInfo : potentialAlternativeInfos) { int v = potentialAlternativeInfo.v; // Okay, now we want the s -> v -> t shortest via-path, so we route s -> v and v -> t // and glue them together. DijkstraBidirectionCH svRouter = new DijkstraBidirectionCH(graph); svRouter.setPathExtractorSupplier(this::createPathExtractor); final Path svPath = svRouter.calcPath(s, v); extraVisitedNodes += svRouter.getVisitedNodes(); DijkstraBidirectionCH vtRouter = new DijkstraBidirectionCH(graph); vtRouter.setPathExtractorSupplier(this::createPathExtractor); final Path vtPath = vtRouter.calcPath(v, t); Path path = concat(graph.getBaseGraph(), svPath, vtPath); extraVisitedNodes += vtRouter.getVisitedNodes(); double sharedDistanceWithShortest = sharedDistanceWithShortest(path); double detourLength = path.getDistance() - sharedDistanceWithShortest; double directLength = bestPath.getDistance() - sharedDistanceWithShortest; if (detourLength > directLength * maxWeightFactor) { continue; } double share = calculateShare(path); if (share > maxShareFactor) { continue; } // This is the final test we need: Discard paths that are not "locally shortest" around v. // So move a couple of nodes to the left and right from v on our path, // route, and check if v is on the shortest path. final IntIndexedContainer svNodes = svPath.calcNodes(); int vIndex = svNodes.size() - 1; if (!tTest(path, vIndex)) continue; alternatives.add(new AlternativeInfo(path, share)); if (alternatives.size() >= maxPaths) break; } return alternatives; }
@Test public void testRelaxMaximumStretch() { BaseGraph g = createTestGraph(em); PMap hints = new PMap(); hints.putObject("alternative_route.max_weight_factor", 4); hints.putObject("alternative_route.local_optimality_factor", 0.5); hints.putObject("alternative_route.max_paths", 4); RoutingCHGraph routingCHGraph = prepareCH(g); AlternativeRouteCH altDijkstra = new AlternativeRouteCH(routingCHGraph, hints); List<AlternativeRouteCH.AlternativeInfo> pathInfos = altDijkstra.calcAlternatives(5, 10); assertEquals(4, pathInfos.size()); // 4 -> 11 -> 12 is shorter than 4 -> 10 -> 12 (11 is an admissible via node), AND // 4 -> 11 -> 12 -> 10 is not too long compared to 4 -> 10 }
public static <T extends ScanTask> List<ScanTaskGroup<T>> planTaskGroups( List<T> tasks, long splitSize, int lookback, long openFileCost) { return Lists.newArrayList( planTaskGroups(CloseableIterable.withNoopClose(tasks), splitSize, lookback, openFileCost)); }
@Test public void testTaskGroupPlanningByPartition() { // When all files belong to the same partition, we should combine them together as long as the // total file size is <= split size List<PartitionScanTask> tasks = ImmutableList.of( taskWithPartition(SPEC1, PARTITION1, 64), taskWithPartition(SPEC1, PARTITION1, 128), taskWithPartition(SPEC1, PARTITION1, 64), taskWithPartition(SPEC1, PARTITION1, 128)); int count = 0; for (ScanTaskGroup<PartitionScanTask> task : TableScanUtil.planTaskGroups(tasks, 512, 10, 4, SPEC1.partitionType())) { assertThat(task.filesCount()).isEqualTo(4); assertThat(task.sizeBytes()).isEqualTo(64 + 128 + 64 + 128); count += 1; } assertThat(count).isOne(); // We have 2 files from partition 1 and 2 files from partition 2, so they should be combined // separately tasks = ImmutableList.of( taskWithPartition(SPEC1, PARTITION1, 64), taskWithPartition(SPEC1, PARTITION1, 128), taskWithPartition(SPEC1, PARTITION2, 64), taskWithPartition(SPEC1, PARTITION2, 128)); count = 0; for (ScanTaskGroup<PartitionScanTask> task : TableScanUtil.planTaskGroups(tasks, 512, 10, 4, SPEC1.partitionType())) { assertThat(task.filesCount()).isEqualTo(2); assertThat(task.sizeBytes()).isEqualTo(64 + 128); count += 1; } assertThat(count).isEqualTo(2); // Similar to the case above, but now files have different partition specs tasks = ImmutableList.of( taskWithPartition(SPEC1, PARTITION1, 64), taskWithPartition(SPEC2, PARTITION1, 128), taskWithPartition(SPEC1, PARTITION2, 64), taskWithPartition(SPEC2, PARTITION2, 128)); count = 0; for (ScanTaskGroup<PartitionScanTask> task : TableScanUtil.planTaskGroups(tasks, 512, 10, 4, SPEC1.partitionType())) { assertThat(task.filesCount()).isEqualTo(2); assertThat(task.sizeBytes()).isEqualTo(64 + 128); count += 1; } assertThat(count).isEqualTo(2); // Combining within partitions should also respect split size. In this case, the split size // is equal to the file size, so each partition will have 2 tasks. tasks = ImmutableList.of( taskWithPartition(SPEC1, PARTITION1, 128), taskWithPartition(SPEC2, PARTITION1, 128), taskWithPartition(SPEC1, PARTITION2, 128), taskWithPartition(SPEC2, PARTITION2, 128)); count = 0; for (ScanTaskGroup<PartitionScanTask> task : TableScanUtil.planTaskGroups(tasks, 128, 10, 4, SPEC1.partitionType())) { assertThat(task.filesCount()).isOne(); assertThat(task.sizeBytes()).isEqualTo(128); count += 1; } assertThat(count).isEqualTo(4); // The following should throw exception since `SPEC2` is not an intersection of partition specs // across all tasks. List<PartitionScanTask> tasks2 = ImmutableList.of( taskWithPartition(SPEC1, PARTITION1, 128), taskWithPartition(SPEC2, PARTITION2, 128)); assertThatThrownBy( () -> TableScanUtil.planTaskGroups(tasks2, 128, 10, 4, SPEC2.partitionType())) .isInstanceOf(IllegalArgumentException.class) .hasMessageStartingWith("Cannot find field"); }
public long getClientExpiredTime() { return clientExpiredTime; }
@Test void testInitConfigFormEnv() throws NoSuchMethodException, InvocationTargetException, InstantiationException, IllegalAccessException { mockEnvironment.setProperty(ClientConstants.CLIENT_EXPIRED_TIME_CONFIG_KEY, String.valueOf(EXPIRED_TIME)); Constructor<ClientConfig> declaredConstructor = ClientConfig.class.getDeclaredConstructor(); declaredConstructor.setAccessible(true); ClientConfig clientConfig = declaredConstructor.newInstance(); assertEquals(EXPIRED_TIME, clientConfig.getClientExpiredTime()); }
@Override public Map<String, String> getMetadata(final Path file) throws BackgroundException { try { return new GoogleStorageAttributesFinderFeature(session).find(file).getMetadata(); } catch(NotfoundException e) { if(file.isDirectory()) { // No placeholder file may exist but we just have a common prefix return Collections.emptyMap(); } throw e; } }
@Test public void testGetMetadataBucket() throws Exception { final Path bucket = new Path("cyberduck-test-eu", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Map<String, String> metadata = new GoogleStorageMetadataFeature(session).getMetadata(bucket); assertTrue(metadata.isEmpty()); }
private WorkflowRun getRun() { return PipelineRunImpl.findRun(runExternalizableId); }
@Test public void getRun_EventuallyFound() throws Exception { try (MockedStatic<QueueUtil> queueUtilMockedStatic = Mockito.mockStatic(QueueUtil.class)) { Mockito.when(QueueUtil.getRun(job, 1)).thenReturn(null).thenReturn(null).thenReturn(null).thenReturn(run); WorkflowRun workflowRun = PipelineNodeImpl.getRun(job, 1); assertEquals(workflowRun, run); Mockito.verify(QueueUtil.class, VerificationModeFactory.times(4)); QueueUtil.getRun(job, 1); // need to call again to handle verify } }
@Override public boolean process(Set<? extends TypeElement> annotations, RoundEnvironment roundEnv) { var triggers = annotations.stream() .filter(te -> { for (var trigger : KoraSchedulingAnnotationProcessor.triggers) { if (te.getQualifiedName().contentEquals(trigger.canonicalName())) { return true; } } return false; }) .toArray(TypeElement[]::new); var scheduledMethods = roundEnv.getElementsAnnotatedWithAny(triggers); var scheduledTypes = scheduledMethods.stream().collect(Collectors.groupingBy(e -> { var type = (TypeElement) e.getEnclosingElement(); return type.getQualifiedName().toString(); })); for (var entry : scheduledTypes.entrySet()) { var methods = entry.getValue(); var type = (TypeElement) entry.getValue().get(0).getEnclosingElement(); try { this.generateModule(type, methods); } catch (ProcessingErrorException e) { e.printError(this.processingEnv); } catch (IOException e) { throw new RuntimeException(e); } // todo exceptions } return false; }
@Test void testScheduledJdkOnceTest() throws Exception { process(ScheduledJdkOnceTest.class); }
void push(SelType obj) { stack[++top] = obj; }
@Test public void testPush() { assertTrue(state.isStackEmpty()); state.push(SelString.of("foo")); SelType res = state.readWithOffset(0); assertEquals("STRING: foo", res.type() + ": " + res); }
private IcebergTimeObjectInspector() { super(TypeInfoFactory.stringTypeInfo); }
@Test public void testIcebergTimeObjectInspector() { IcebergTimeObjectInspector oi = IcebergTimeObjectInspector.get(); assertThat(oi.getCategory()).isEqualTo(ObjectInspector.Category.PRIMITIVE); assertThat(oi.getPrimitiveCategory()) .isEqualTo(PrimitiveObjectInspector.PrimitiveCategory.STRING); assertThat(oi.getTypeInfo()).isEqualTo(TypeInfoFactory.stringTypeInfo); assertThat(oi.getTypeName()).isEqualTo(TypeInfoFactory.stringTypeInfo.getTypeName()); assertThat(oi.getJavaPrimitiveClass()).isEqualTo(String.class); assertThat(oi.getPrimitiveWritableClass()).isEqualTo(Text.class); assertThat(oi.copyObject(null)).isNull(); assertThat(oi.getPrimitiveJavaObject(null)).isNull(); assertThat(oi.getPrimitiveWritableObject(null)).isNull(); assertThat(oi.convert(null)).isNull(); LocalTime localTime = LocalTime.now(); String time = localTime.toString(); Text text = new Text(time); assertThat(oi.getPrimitiveJavaObject(text)).isEqualTo(time); assertThat(oi.getPrimitiveWritableObject(time)).isEqualTo(text); assertThat(oi.convert(time)).isEqualTo(localTime); Text copy = (Text) oi.copyObject(text); assertThat(copy).isEqualTo(text); assertThat(copy).isNotSameAs(text); assertThat(oi.preferWritable()).isFalse(); }
public String anonymize(final ParseTree tree) { return build(tree); }
@Test public void shouldAnonymizeUDFQueriesCorrectly() { final String output = anon.anonymize("CREATE STREAM OUTPUT AS SELECT ID, " + "REDUCE(numbers, 2, (s, x) => s + x) AS reduce FROM test;"); Approvals.verify(output); }
public String generateRedirectUrl(String artifact, String transactionId, String sessionId, BvdStatus status) throws SamlSessionException, UnsupportedEncodingException { final var samlSession = findSamlSessionByArtifactOrTransactionId(artifact, transactionId); if (CANCELLED.equals(status)) samlSession.setBvdStatus(AdAuthenticationStatus.STATUS_CANCELED.label); if (ERROR.equals(status)) samlSession.setBvdStatus(AdAuthenticationStatus.STATUS_FAILED.label); if (artifact == null) artifact = samlSession.getArtifact(); if (sessionId == null || !sessionId.equals(samlSession.getHttpSessionId())) throw new SamlSessionException("Saml session found with invalid sessionId for redirect_with_artifact"); var url = new StringBuilder(samlSession.getAssertionConsumerServiceURL() + "?SAMLart=" + URLEncoder.encode(artifact, "UTF-8")); // append relay-state if (samlSession.getRelayState() != null) url.append("&RelayState=" + URLEncoder.encode(samlSession.getRelayState(), "UTF-8")); samlSession.setResolveBeforeTime(System.currentTimeMillis() + 1000 * 60 * minutesToResolve); samlSessionRepository.save(samlSession); return url.toString(); }
@Test void redirectWithIncorrectSession() { when(samlSessionRepositoryMock.findByArtifact(anyString())).thenReturn(Optional.empty()); Exception exception = assertThrows(SamlSessionException.class, () -> assertionConsumerServiceUrlService.generateRedirectUrl("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", null, "incorrectSession", null)); assertEquals("Saml session not found by artifact/transactionid for redirect_with_artifact", exception.getMessage()); }
public static DependencyVersion parseVersion(String text) { return parseVersion(text, false); }
@Test public void testParseVersion_String_boolean() { //cpe:/a:playframework:play_framework:2.1.1:rc1-2.9.x-backport String text = "2.1.1.rc1.2.9.x-backport"; boolean firstMatchOnly = false; DependencyVersion expResult; DependencyVersion result = DependencyVersionUtil.parseVersion(text, firstMatchOnly); assertNull(result); firstMatchOnly = true; expResult = DependencyVersionUtil.parseVersion("2.1.1.rc1"); result = DependencyVersionUtil.parseVersion(text, firstMatchOnly); assertEquals(expResult, result); result = DependencyVersionUtil.parseVersion("1.0.0-RC", firstMatchOnly); assertEquals(4, result.getVersionParts().size()); assertEquals("rc", result.getVersionParts().get(3)); result = DependencyVersionUtil.parseVersion("1.0.0-RC2", firstMatchOnly); assertEquals(4, result.getVersionParts().size()); assertEquals("rc2", result.getVersionParts().get(3)); }
public static String getJsonToSave(@Nullable final List<Tab> tabList) { final JsonStringWriter jsonWriter = JsonWriter.string(); jsonWriter.object(); jsonWriter.array(JSON_TABS_ARRAY_KEY); if (tabList != null) { for (final Tab tab : tabList) { tab.writeJsonOn(jsonWriter); } } jsonWriter.end(); jsonWriter.end(); return jsonWriter.done(); }
@Test public void testEmptyAndNullSave() throws JsonParserException { final List<Tab> emptyList = Collections.emptyList(); String returnedJson = TabsJsonHelper.getJsonToSave(emptyList); assertTrue(isTabsArrayEmpty(returnedJson)); final List<Tab> nullList = null; returnedJson = TabsJsonHelper.getJsonToSave(nullList); assertTrue(isTabsArrayEmpty(returnedJson)); }
@Nonnull @Override public Result addChunk(ByteBuf buffer) { final byte[] readable = new byte[buffer.readableBytes()]; buffer.readBytes(readable, buffer.readerIndex(), buffer.readableBytes()); final GELFMessage msg = new GELFMessage(readable); final ByteBuf aggregatedBuffer; switch (msg.getGELFType()) { case CHUNKED: try { chunkCounter.inc(); aggregatedBuffer = checkForCompletion(msg); if (aggregatedBuffer == null) { return VALID_EMPTY_RESULT; } } catch (IllegalArgumentException | IllegalStateException | IndexOutOfBoundsException e) { log.debug("Invalid gelf message chunk, dropping message.", e); return INVALID_RESULT; } break; case ZLIB: case GZIP: case UNCOMPRESSED: aggregatedBuffer = Unpooled.wrappedBuffer(readable); break; case UNSUPPORTED: return INVALID_RESULT; default: return INVALID_RESULT; } return new Result(aggregatedBuffer, true); }
@Test public void missingChunk() { final DateTime initialTime = new DateTime(2014, 1, 1, 1, 59, 59, 0, DateTimeZone.UTC); final InstantMillisProvider clock = new InstantMillisProvider(initialTime); DateTimeUtils.setCurrentMillisProvider(clock); // we don't want the clean up task to run automatically poolExecutor = mock(ScheduledThreadPoolExecutor.class); final MetricRegistry metricRegistry = new MetricRegistry(); aggregator = new GelfChunkAggregator(poolExecutor, metricRegistry); final GelfChunkAggregator.ChunkEvictionTask evictionTask = aggregator.new ChunkEvictionTask(); final ByteBuf[] chunks = createChunkedMessage(4096 + 512, 1024); // creates 5 chunks int i = 0; for (final ByteBuf chunk : chunks) { final CodecAggregator.Result result; // skip first chunk if (i++ == 0) { continue; } result = aggregator.addChunk(chunk); assertTrue(result.isValid()); assertNull("chunks not complete", result.getMessage()); } // move clock forward enough to evict all of the chunks clock.tick(Period.seconds(10)); evictionTask.run(); final CodecAggregator.Result result = aggregator.addChunk(chunks[0]); assertNull("message should not be complete because chunks were evicted already", result.getMessage()); assertTrue(result.isValid()); // we send all chunks but the last one comes too late assertEquals("no message is complete", 0, counterValueNamed(metricRegistry, COMPLETE_MESSAGES)); assertEquals("received 5 chunks", 5, counterValueNamed(metricRegistry, CHUNK_COUNTER)); assertEquals("last chunk creates another waiting message", 1, counterValueNamed(metricRegistry, WAITING_MESSAGES)); assertEquals("4 chunks expired", 4, counterValueNamed(metricRegistry, EXPIRED_CHUNKS)); assertEquals("one message expired", 1, counterValueNamed(metricRegistry, EXPIRED_MESSAGES)); assertEquals("no duplicate chunks", 0, counterValueNamed(metricRegistry, DUPLICATE_CHUNKS)); // reset clock for other tests DateTimeUtils.setCurrentMillisSystem(); }
public static <K, InputT, AccumT> ParDoFn create( PipelineOptions options, KvCoder<K, ?> inputElementCoder, @Nullable CloudObject cloudUserFn, @Nullable List<SideInputInfo> sideInputInfos, List<Receiver> receivers, DataflowExecutionContext<?> executionContext, DataflowOperationContext operationContext) throws Exception { AppliedCombineFn<K, InputT, AccumT, ?> combineFn; SideInputReader sideInputReader; StepContext stepContext; if (cloudUserFn == null) { combineFn = null; sideInputReader = NullSideInputReader.empty(); stepContext = null; } else { Object deserializedFn = SerializableUtils.deserializeFromByteArray( getBytes(cloudUserFn, PropertyNames.SERIALIZED_FN), "serialized combine fn"); @SuppressWarnings("unchecked") AppliedCombineFn<K, InputT, AccumT, ?> combineFnUnchecked = ((AppliedCombineFn<K, InputT, AccumT, ?>) deserializedFn); combineFn = combineFnUnchecked; sideInputReader = executionContext.getSideInputReader( sideInputInfos, combineFn.getSideInputViews(), operationContext); stepContext = executionContext.getStepContext(operationContext); } return create( options, inputElementCoder, combineFn, sideInputReader, receivers.get(0), stepContext); }
@Test public void testPartialGroupByKeyWithCombiner() throws Exception { Coder keyCoder = StringUtf8Coder.of(); Coder valueCoder = BigEndianIntegerCoder.of(); TestOutputReceiver receiver = new TestOutputReceiver( new ElementByteSizeObservableCoder( WindowedValue.getValueOnlyCoder(KvCoder.of(keyCoder, valueCoder))), counterSet, NameContextsForTests.nameContextForTest()); Combiner<WindowedValue<String>, Integer, Integer, Integer> combineFn = new TestCombiner(); ParDoFn pgbkParDoFn = new SimplePartialGroupByKeyParDoFn( GroupingTables.combining( new WindowingCoderGroupingKeyCreator(keyCoder), PairInfo.create(), combineFn, new CoderSizeEstimator(WindowedValue.getValueOnlyCoder(keyCoder)), new CoderSizeEstimator(valueCoder)), receiver); pgbkParDoFn.startBundle(receiver); pgbkParDoFn.processElement(WindowedValue.valueInGlobalWindow(KV.of("hi", 4))); pgbkParDoFn.processElement(WindowedValue.valueInGlobalWindow(KV.of("there", 5))); pgbkParDoFn.processElement(WindowedValue.valueInGlobalWindow(KV.of("hi", 6))); pgbkParDoFn.processElement(WindowedValue.valueInGlobalWindow(KV.of("joe", 7))); pgbkParDoFn.processElement(WindowedValue.valueInGlobalWindow(KV.of("there", 8))); pgbkParDoFn.processElement(WindowedValue.valueInGlobalWindow(KV.of("hi", 9))); pgbkParDoFn.finishBundle(); assertThat( receiver.outputElems, IsIterableContainingInAnyOrder.<Object>containsInAnyOrder( WindowedValue.valueInGlobalWindow(KV.of("hi", 19)), WindowedValue.valueInGlobalWindow(KV.of("there", 13)), WindowedValue.valueInGlobalWindow(KV.of("joe", 7)))); // Exact counter values depend on size of encoded data. If encoding // changes, then these expected counters should change to match. CounterUpdateExtractor<?> updateExtractor = Mockito.mock(CounterUpdateExtractor.class); counterSet.extractUpdates(false, updateExtractor); verify(updateExtractor).longSum(getObjectCounterName("test_receiver_out"), false, 3L); verify(updateExtractor) .longMean( getMeanByteCounterName("test_receiver_out"), false, LongCounterMean.ZERO.addValue(25L, 3)); verifyNoMoreInteractions(updateExtractor); }
static boolean areAllReplicasInSync(PartitionInfo partitionInfo) { return Arrays.asList(partitionInfo.inSyncReplicas()).containsAll(Arrays.asList(partitionInfo.replicas())); }
@Test public void testAreAllReplicasInSync() { // Verify: If isr is the same as replicas, all replicas are in-sync Node[] replicas = new Node[2]; replicas[0] = NODE_0; replicas[1] = NODE_1; Node[] isr = new Node[2]; isr[0] = NODE_0; isr[1] = NODE_1; PartitionInfo partitionInfo = new PartitionInfo(TP.topic(), TP.partition(), NODE_1, replicas, isr); assertTrue(ExecutionProposal.areAllReplicasInSync(partitionInfo)); // Verify: If isr is smaller than replicas, not all replicas are in-sync Node[] smallIsr = new Node[1]; smallIsr[0] = NODE_0; partitionInfo = new PartitionInfo(TP.topic(), TP.partition(), NODE_1, replicas, smallIsr); assertFalse(ExecutionProposal.areAllReplicasInSync(partitionInfo)); // Verify: If isr is greater than replicas, all replicas are in-sync Node[] greaterIsr = new Node[3]; greaterIsr[0] = NODE_0; greaterIsr[1] = NODE_1; greaterIsr[2] = NODE_2; partitionInfo = new PartitionInfo(TP.topic(), TP.partition(), NODE_1, replicas, greaterIsr); assertTrue(ExecutionProposal.areAllReplicasInSync(partitionInfo)); // Verify: If isr has the same size as replicas, but replicas are not same as in-sync replicas, then not all replicas are in-sync. Node[] isrWithDifferentBrokerIds = new Node[2]; isrWithDifferentBrokerIds[0] = NODE_0; isrWithDifferentBrokerIds[1] = NODE_2; partitionInfo = new PartitionInfo(TP.topic(), TP.partition(), NODE_1, replicas, isrWithDifferentBrokerIds); assertFalse(ExecutionProposal.areAllReplicasInSync(partitionInfo)); }
@Override public <VO, VR> KStream<K, VR> join(final KStream<K, VO> otherStream, final ValueJoiner<? super V, ? super VO, ? extends VR> joiner, final JoinWindows windows) { return join(otherStream, toValueJoinerWithKey(joiner), windows); }
@SuppressWarnings("deprecation") @Test public void shouldNotAllowNullStreamJoinedOnJoin() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.join( testStream, MockValueJoiner.TOSTRING_JOINER, JoinWindows.of(ofMillis(10)), (StreamJoined<String, String, String>) null)); assertThat(exception.getMessage(), equalTo("streamJoined can't be null")); }
public static ILogger getLogger(@Nonnull Class<?> clazz) { checkNotNull(clazz, "class must not be null"); return getLoggerInternal(clazz.getName()); }
@Test public void getLogger_whenNone_thenReturnNoLogger() { isolatedLoggingRule.setLoggingType(LOGGING_TYPE_NONE); assertInstanceOf(NoLogFactory.NoLogger.class, Logger.getLogger(getClass())); }
UriEndpoint createUriEndpoint(String url, boolean isWs) { return createUriEndpoint(url, isWs, connectAddress); }
@Test void createUriEndpointRelative() { String test1 = this.builder.build() .createUriEndpoint("/foo", false) .toExternalForm(); String test2 = this.builder.build() .createUriEndpoint("/foo", true) .toExternalForm(); assertThat(test1).isEqualTo("http://localhost/foo"); assertThat(test2).isEqualTo("ws://localhost/foo"); }
public static GenericData get() { return INSTANCE; }
@Test void recordGetFieldDoesntExist() throws Exception { assertThrows(AvroRuntimeException.class, () -> { Schema schema = Schema.createRecord("test", "doc", "test", false, Collections.EMPTY_LIST); GenericData.Record record = new GenericData.Record(schema); record.get("does not exist"); }); }
public void acquireReadLock(String key) { getLock(key).readLock().lock(); }
@Test public void shouldNotEnforceMutualExclusionOfReadLockForGivenName() throws InterruptedException { readWriteLock.acquireReadLock("foo"); new Thread(() -> { readWriteLock.acquireReadLock("foo"); numberOfLocks++; }).start(); Thread.sleep(1000); assertThat(numberOfLocks, is(1)); }
@Override public LispNatLcafAddress getNatLcafAddress() { return natLcafAddress; }
@Test public void testConstruction() { DefaultLispInfoReply reply = (DefaultLispInfoReply) reply1; LispIpv4Address address = new LispIpv4Address(IpAddress.valueOf("192.168.1.4")); short msUdpPortNumber1 = 80; short etrUdpPortNumber1 = 100; LispIpv4Address globalEtrRlocAddress1 = new LispIpv4Address(IpAddress.valueOf("192.168.1.1")); LispIpv4Address msRlocAddress1 = new LispIpv4Address(IpAddress.valueOf("192.168.1.2")); LispIpv4Address privateEtrRlocAddress1 = new LispIpv4Address(IpAddress.valueOf("192.168.1.3")); LispNatLcafAddress natLcafAddress = new NatAddressBuilder() .withLength((short) 0) .withMsUdpPortNumber(msUdpPortNumber1) .withEtrUdpPortNumber(etrUdpPortNumber1) .withGlobalEtrRlocAddress(globalEtrRlocAddress1) .withMsRlocAddress(msRlocAddress1) .withPrivateEtrRlocAddress(privateEtrRlocAddress1) .build(); assertThat(reply.isInfoReply(), is(true)); assertThat(reply.getNonce(), is(1L)); assertThat(reply.getKeyId(), is((short) 1)); assertThat(reply.getMaskLength(), is((byte) 1)); assertThat(reply.getPrefix(), is(address)); assertThat(reply.getNatLcafAddress(), is(natLcafAddress)); }
@VisibleForTesting public static SegmentSelectionResult processValidDocIdsMetadata(Map<String, String> taskConfigs, Map<String, SegmentZKMetadata> completedSegmentsMap, Map<String, List<ValidDocIdsMetadataInfo>> validDocIdsMetadataInfoMap) { double invalidRecordsThresholdPercent = Double.parseDouble( taskConfigs.getOrDefault(UpsertCompactionTask.INVALID_RECORDS_THRESHOLD_PERCENT, String.valueOf(DEFAULT_INVALID_RECORDS_THRESHOLD_PERCENT))); long invalidRecordsThresholdCount = Long.parseLong( taskConfigs.getOrDefault(UpsertCompactionTask.INVALID_RECORDS_THRESHOLD_COUNT, String.valueOf(DEFAULT_INVALID_RECORDS_THRESHOLD_COUNT))); List<Pair<SegmentZKMetadata, Long>> segmentsForCompaction = new ArrayList<>(); List<String> segmentsForDeletion = new ArrayList<>(); for (String segmentName : validDocIdsMetadataInfoMap.keySet()) { // check if segment is part of completed segments if (!completedSegmentsMap.containsKey(segmentName)) { LOGGER.warn("Segment {} is not found in the completed segments list, skipping it for compaction", segmentName); continue; } SegmentZKMetadata segment = completedSegmentsMap.get(segmentName); for (ValidDocIdsMetadataInfo validDocIdsMetadata : validDocIdsMetadataInfoMap.get(segmentName)) { long totalInvalidDocs = validDocIdsMetadata.getTotalInvalidDocs(); // Skip segments if the crc from zk metadata and server does not match. They may be being reloaded. if (segment.getCrc() != Long.parseLong(validDocIdsMetadata.getSegmentCrc())) { LOGGER.warn("CRC mismatch for segment: {}, (segmentZKMetadata={}, validDocIdsMetadata={})", segmentName, segment.getCrc(), validDocIdsMetadata.getSegmentCrc()); continue; } long totalDocs = validDocIdsMetadata.getTotalDocs(); double invalidRecordPercent = ((double) totalInvalidDocs / totalDocs) * 100; if (totalInvalidDocs == totalDocs) { segmentsForDeletion.add(segment.getSegmentName()); } else if (invalidRecordPercent >= invalidRecordsThresholdPercent && totalInvalidDocs >= invalidRecordsThresholdCount) { segmentsForCompaction.add(Pair.of(segment, totalInvalidDocs)); } break; } } segmentsForCompaction.sort((o1, o2) -> { if (o1.getValue() > o2.getValue()) { return -1; } else if (o1.getValue().equals(o2.getValue())) { return 0; } return 1; }); return new SegmentSelectionResult( segmentsForCompaction.stream().map(Map.Entry::getKey).collect(Collectors.toList()), segmentsForDeletion); }
@Test public void testProcessValidDocIdsMetadata() throws IOException { Map<String, String> compactionConfigs = getCompactionConfigs("1", "10"); String json = "{\"testTable__0\": [{\"totalValidDocs\": 50, \"totalInvalidDocs\": 50, " + "\"segmentName\": \"testTable__0\", \"totalDocs\": 100, \"segmentCrc\": \"1000\"}], " + "\"testTable__1\": [{\"totalValidDocs\": 0, " + "\"totalInvalidDocs\": 10, \"segmentName\": \"testTable__1\", \"totalDocs\": 10, \"segmentCrc\": \"2000\"}]}"; Map<String, List<ValidDocIdsMetadataInfo>> validDocIdsMetadataInfo = JsonUtils.stringToObject(json, new TypeReference<>() { }); // no completed segments scenario, there shouldn't be any segment selected for compaction UpsertCompactionTaskGenerator.SegmentSelectionResult segmentSelectionResult = UpsertCompactionTaskGenerator.processValidDocIdsMetadata(compactionConfigs, new HashMap<>(), validDocIdsMetadataInfo); assertEquals(segmentSelectionResult.getSegmentsForCompaction().size(), 0); // test with valid crc and thresholds segmentSelectionResult = UpsertCompactionTaskGenerator.processValidDocIdsMetadata(compactionConfigs, _completedSegmentsMap, validDocIdsMetadataInfo); assertEquals(segmentSelectionResult.getSegmentsForCompaction().size(), 1); assertEquals(segmentSelectionResult.getSegmentsForDeletion().size(), 1); assertEquals(segmentSelectionResult.getSegmentsForCompaction().get(0).getSegmentName(), _completedSegment.getSegmentName()); assertEquals(segmentSelectionResult.getSegmentsForDeletion().get(0), _completedSegment2.getSegmentName()); // test with a higher invalidRecordsThresholdPercent compactionConfigs = getCompactionConfigs("60", "10"); segmentSelectionResult = UpsertCompactionTaskGenerator.processValidDocIdsMetadata(compactionConfigs, _completedSegmentsMap, validDocIdsMetadataInfo); assertTrue(segmentSelectionResult.getSegmentsForCompaction().isEmpty()); assertEquals(segmentSelectionResult.getSegmentsForDeletion().size(), 1); assertEquals(segmentSelectionResult.getSegmentsForDeletion().get(0), _completedSegment2.getSegmentName()); // test without an invalidRecordsThresholdPercent compactionConfigs = getCompactionConfigs("0", "10"); segmentSelectionResult = UpsertCompactionTaskGenerator.processValidDocIdsMetadata(compactionConfigs, _completedSegmentsMap, validDocIdsMetadataInfo); assertEquals(segmentSelectionResult.getSegmentsForDeletion().size(), 1); assertEquals(segmentSelectionResult.getSegmentsForCompaction().size(), 1); assertEquals(segmentSelectionResult.getSegmentsForCompaction().get(0).getSegmentName(), _completedSegment.getSegmentName()); assertEquals(segmentSelectionResult.getSegmentsForDeletion().get(0), _completedSegment2.getSegmentName()); // test without a invalidRecordsThresholdCount compactionConfigs = getCompactionConfigs("30", "0"); segmentSelectionResult = UpsertCompactionTaskGenerator.processValidDocIdsMetadata(compactionConfigs, _completedSegmentsMap, validDocIdsMetadataInfo); assertEquals(segmentSelectionResult.getSegmentsForDeletion().size(), 1); assertEquals(segmentSelectionResult.getSegmentsForCompaction().size(), 1); assertEquals(segmentSelectionResult.getSegmentsForCompaction().get(0).getSegmentName(), _completedSegment.getSegmentName()); assertEquals(segmentSelectionResult.getSegmentsForDeletion().get(0), _completedSegment2.getSegmentName()); // Test the case where the completedSegment from api has different crc than segment from zk metadata. json = "{\"" + _completedSegment.getSegmentName() + "\": [{\"totalValidDocs\": 50, \"totalInvalidDocs\": 50, " + "\"segmentName\": \"" + _completedSegment.getSegmentName() + "\", \"totalDocs\": 100, \"segmentCrc\": " + "\"1234567890\"}], \"" + _completedSegment2.getSegmentName() + "\": [{\"totalValidDocs\": 0, " + "\"totalInvalidDocs\": 10, \"segmentName\": \"" + _completedSegment2.getSegmentName() + "\", " + "\"segmentCrc\": \"" + _completedSegment2.getCrc() + "\", \"totalDocs\": 10}]}"; validDocIdsMetadataInfo = JsonUtils.stringToObject(json, new TypeReference<>() { }); segmentSelectionResult = UpsertCompactionTaskGenerator.processValidDocIdsMetadata(compactionConfigs, _completedSegmentsMap, validDocIdsMetadataInfo); // completedSegment is supposed to be filtered out Assert.assertEquals(segmentSelectionResult.getSegmentsForCompaction().size(), 0); // completedSegment2 is still supposed to be deleted Assert.assertEquals(segmentSelectionResult.getSegmentsForDeletion().size(), 1); assertEquals(segmentSelectionResult.getSegmentsForDeletion().get(0), _completedSegment2.getSegmentName()); // check if both the candidates for compaction are coming in sorted descending order json = "{\"" + _completedSegment.getSegmentName() + "\": [{\"totalValidDocs\": 50, \"totalInvalidDocs\": 50, " + "\"segmentName\": \"" + _completedSegment.getSegmentName() + "\", \"totalDocs\": 100, \"segmentCrc\": \"" + _completedSegment.getCrc() + "\"}], \"" + _completedSegment2.getSegmentName() + "\": " + "[{\"totalValidDocs\": 10, \"totalInvalidDocs\": 40, \"segmentName\": \"" + _completedSegment2.getSegmentName() + "\", \"segmentCrc\": \"" + _completedSegment2.getCrc() + "\", " + "\"totalDocs\": 50}]}"; validDocIdsMetadataInfo = JsonUtils.stringToObject(json, new TypeReference<>() { }); compactionConfigs = getCompactionConfigs("30", "0"); segmentSelectionResult = UpsertCompactionTaskGenerator.processValidDocIdsMetadata(compactionConfigs, _completedSegmentsMap, validDocIdsMetadataInfo); Assert.assertEquals(segmentSelectionResult.getSegmentsForCompaction().size(), 2); Assert.assertEquals(segmentSelectionResult.getSegmentsForDeletion().size(), 0); assertEquals(segmentSelectionResult.getSegmentsForCompaction().get(0).getSegmentName(), _completedSegment.getSegmentName()); assertEquals(segmentSelectionResult.getSegmentsForCompaction().get(1).getSegmentName(), _completedSegment2.getSegmentName()); }
@Nullable public static TraceContextOrSamplingFlags parseB3SingleFormat(CharSequence b3) { return parseB3SingleFormat(b3, 0, b3.length()); }
@Test void parseB3SingleFormat_middleOfString() { String input = "b3=" + traceIdHigh + traceId + "-" + spanId + ","; assertThat(parseB3SingleFormat(input, 3, input.length() - 1).context()) .isEqualToComparingFieldByField(TraceContext.newBuilder() .traceIdHigh(Long.parseUnsignedLong(traceIdHigh, 16)) .traceId(Long.parseUnsignedLong(traceId, 16)) .spanId(Long.parseUnsignedLong(spanId, 16)).build() ); }
@Override public Map<String, Object> load(String configKey) { if (targetFilePath != null) { try { Map<String, Object> raw = (Map<String, Object>) Utils.readYamlFile(targetFilePath); if (raw != null) { return (Map<String, Object>) raw.get(configKey); } } catch (Exception e) { LOG.error("Failed to load from file {}", targetFilePath); } } return null; }
@Test public void testValidFile() throws Exception { File temp = Files.createTempFile("FileLoader", ".yaml").toFile(); temp.deleteOnExit(); Map<String, Integer> testMap = new HashMap<>(); testMap.put("a", 1); testMap.put("b", 2); testMap.put("c", 3); testMap.put("d", 4); testMap.put("e", 5); Map<String, Map<String, Integer>> confMap = new HashMap<>(); confMap.put(DaemonConfig.MULTITENANT_SCHEDULER_USER_POOLS, testMap); Yaml yaml = new Yaml(); FileWriter fw = new FileWriter(temp); yaml.dump(confMap, fw); fw.flush(); fw.close(); Config conf = new Config(); conf.put(DaemonConfig.SCHEDULER_CONFIG_LOADER_URI, FILE_SCHEME_PREFIX + temp.getCanonicalPath()); FileConfigLoader loader = new FileConfigLoader(conf); Map<String, Object> result = loader.load(DaemonConfig.MULTITENANT_SCHEDULER_USER_POOLS); assertNotNull(result, "Unexpectedly returned null"); assertEquals(testMap.keySet().size(), result.keySet().size(), "Maps are a different size"); for (String key : testMap.keySet()) { Integer expectedValue = testMap.get(key); Integer returnedValue = (Integer) result.get(key); assertEquals(expectedValue, returnedValue, "Bad value for key=" + key); } }
public static <T> T retryUntilTimeout(Callable<T> callable, Supplier<String> description, Duration timeoutDuration, long retryBackoffMs) throws Exception { return retryUntilTimeout(callable, description, timeoutDuration, retryBackoffMs, Time.SYSTEM); }
@Test public void retriesEventuallySucceed() throws Exception { Mockito.when(mockCallable.call()) .thenThrow(new TimeoutException()) .thenThrow(new TimeoutException()) .thenThrow(new TimeoutException()) .thenReturn("success"); assertEquals("success", RetryUtil.retryUntilTimeout(mockCallable, testMsg, Duration.ofMillis(100), 1, mockTime)); Mockito.verify(mockCallable, Mockito.times(4)).call(); }
@SuppressWarnings("checkstyle:magicnumber") public static String getInternalBinaryName(byte[] classBytes) { try { ByteBuffer buffer = ByteBuffer.wrap(classBytes); buffer.order(ByteOrder.BIG_ENDIAN); // Skip magic number and major/minor versions buffer.position(8); int constantPoolCount = buffer.getShort() & 0xFFFF; Object[] constantPool = new Object[constantPoolCount]; // Iterate constant pool, collecting UTF8 strings (could be our class name) and looking for CONSTANT_Class tags // to identify our desired UTF8 string representing the class name. Skips appropriate bytes for all other tags. // While it is generally convention for the index referenced by a CONSTANT_Class value to already be populated in // the constant pool (forward references), it is not forbidden by JVM Spec to use backward references. // We need to skip the payload of all irrelevant tags, by the amount defined in the JVM spec (see javadoc) for (int i = 1; i < constantPoolCount; i++) { int tag = buffer.get() & 0xFF; switch (tag) { case 1: // CONSTANT_Utf8 int length = buffer.getShort() & 0xFFFF; byte[] bytes = new byte[length]; buffer.get(bytes); constantPool[i] = new String(bytes, StandardCharsets.UTF_8); break; case 7: // CONSTANT_Class constantPool[i] = buffer.getShort() & 0xFFFF; // Store index break; case 8: // CONSTANT_String case 16: // CONSTANT_MethodType case 19: // CONSTANT_Module case 20: // CONSTANT_Package skipBytes(buffer, 2); break; case 15: // CONSTANT_MethodHandle skipBytes(buffer, 3); break; case 3: // CONSTANT_Integer case 4: // CONSTANT_Float case 9: // CONSTANT_Fieldref case 10: // CONSTANT_Methodref case 11: // CONSTANT_InterfaceMethodref case 12: // CONSTANT_NameAndType case 18: // CONSTANT_InvokeDynamic case 17: // CONSTANT_Dynamic skipBytes(buffer, 4); break; case 5: // CONSTANT_Long case 6: // CONSTANT_Double skipBytes(buffer, 8); i++; break; default: throw new IllegalArgumentException("Invalid constant pool tag: " + tag); } } // Skip access flag skipBytes(buffer, 2); // Read this_class index which points to the constantPool index which holds the value of // the index to find the current classes' internal binary name int thisClassIndex = buffer.getShort() & 0xFFFF; return (String) constantPool[(int) constantPool[thisClassIndex]]; } catch (Exception e) { throw new IllegalArgumentException("Unable to local package/class names from class bytes!", e); } }
@Test public void testAllConstantTagsReadable_whenReadingInternalBinaryName() { // To read an internal binary name, we need to read (and skip values for) all constant pool bytes // in the class file, so we need to make sure all 17 (as of JDK 21) are handled correctly byte[] classBytes = generateClassFileHeaderWithAllConstants(); assertEquals("com.hazelcast.test.FakeClass", ReflectionUtils.getInternalBinaryName(classBytes)); }
public static Expression convert(Predicate[] predicates) { Expression expression = Expressions.alwaysTrue(); for (Predicate predicate : predicates) { Expression converted = convert(predicate); Preconditions.checkArgument( converted != null, "Cannot convert Spark predicate to Iceberg expression: %s", predicate); expression = Expressions.and(expression, converted); } return expression; }
@SuppressWarnings("checkstyle:MethodLength") @Test public void testV2Filters() { Map<String, String> attrMap = Maps.newHashMap(); attrMap.put("id", "id"); attrMap.put("`i.d`", "i.d"); attrMap.put("`i``d`", "i`d"); attrMap.put("`d`.b.`dd```", "d.b.dd`"); attrMap.put("a.`aa```.c", "a.aa`.c"); attrMap.forEach( (quoted, unquoted) -> { NamedReference namedReference = FieldReference.apply(quoted); org.apache.spark.sql.connector.expressions.Expression[] attrOnly = new org.apache.spark.sql.connector.expressions.Expression[] {namedReference}; LiteralValue value = new LiteralValue(1, DataTypes.IntegerType); org.apache.spark.sql.connector.expressions.Expression[] attrAndValue = new org.apache.spark.sql.connector.expressions.Expression[] {namedReference, value}; org.apache.spark.sql.connector.expressions.Expression[] valueAndAttr = new org.apache.spark.sql.connector.expressions.Expression[] {value, namedReference}; Predicate isNull = new Predicate("IS_NULL", attrOnly); Expression expectedIsNull = Expressions.isNull(unquoted); Expression actualIsNull = SparkV2Filters.convert(isNull); Assert.assertEquals( "IsNull must match", expectedIsNull.toString(), actualIsNull.toString()); Predicate isNotNull = new Predicate("IS_NOT_NULL", attrOnly); Expression expectedIsNotNull = Expressions.notNull(unquoted); Expression actualIsNotNull = SparkV2Filters.convert(isNotNull); Assert.assertEquals( "IsNotNull must match", expectedIsNotNull.toString(), actualIsNotNull.toString()); Predicate lt1 = new Predicate("<", attrAndValue); Expression expectedLt1 = Expressions.lessThan(unquoted, 1); Expression actualLt1 = SparkV2Filters.convert(lt1); Assert.assertEquals("LessThan must match", expectedLt1.toString(), actualLt1.toString()); Predicate lt2 = new Predicate("<", valueAndAttr); Expression expectedLt2 = Expressions.greaterThan(unquoted, 1); Expression actualLt2 = SparkV2Filters.convert(lt2); Assert.assertEquals("LessThan must match", expectedLt2.toString(), actualLt2.toString()); Predicate ltEq1 = new Predicate("<=", attrAndValue); Expression expectedLtEq1 = Expressions.lessThanOrEqual(unquoted, 1); Expression actualLtEq1 = SparkV2Filters.convert(ltEq1); Assert.assertEquals( "LessThanOrEqual must match", expectedLtEq1.toString(), actualLtEq1.toString()); Predicate ltEq2 = new Predicate("<=", valueAndAttr); Expression expectedLtEq2 = Expressions.greaterThanOrEqual(unquoted, 1); Expression actualLtEq2 = SparkV2Filters.convert(ltEq2); Assert.assertEquals( "LessThanOrEqual must match", expectedLtEq2.toString(), actualLtEq2.toString()); Predicate gt1 = new Predicate(">", attrAndValue); Expression expectedGt1 = Expressions.greaterThan(unquoted, 1); Expression actualGt1 = SparkV2Filters.convert(gt1); Assert.assertEquals( "GreaterThan must match", expectedGt1.toString(), actualGt1.toString()); Predicate gt2 = new Predicate(">", valueAndAttr); Expression expectedGt2 = Expressions.lessThan(unquoted, 1); Expression actualGt2 = SparkV2Filters.convert(gt2); Assert.assertEquals( "GreaterThan must match", expectedGt2.toString(), actualGt2.toString()); Predicate gtEq1 = new Predicate(">=", attrAndValue); Expression expectedGtEq1 = Expressions.greaterThanOrEqual(unquoted, 1); Expression actualGtEq1 = SparkV2Filters.convert(gtEq1); Assert.assertEquals( "GreaterThanOrEqual must match", expectedGtEq1.toString(), actualGtEq1.toString()); Predicate gtEq2 = new Predicate(">=", valueAndAttr); Expression expectedGtEq2 = Expressions.lessThanOrEqual(unquoted, 1); Expression actualGtEq2 = SparkV2Filters.convert(gtEq2); Assert.assertEquals( "GreaterThanOrEqual must match", expectedGtEq2.toString(), actualGtEq2.toString()); Predicate eq1 = new Predicate("=", attrAndValue); Expression expectedEq1 = Expressions.equal(unquoted, 1); Expression actualEq1 = SparkV2Filters.convert(eq1); Assert.assertEquals("EqualTo must match", expectedEq1.toString(), actualEq1.toString()); Predicate eq2 = new Predicate("=", valueAndAttr); Expression expectedEq2 = Expressions.equal(unquoted, 1); Expression actualEq2 = SparkV2Filters.convert(eq2); Assert.assertEquals("EqualTo must match", expectedEq2.toString(), actualEq2.toString()); Predicate notEq1 = new Predicate("<>", attrAndValue); Expression expectedNotEq1 = Expressions.notEqual(unquoted, 1); Expression actualNotEq1 = SparkV2Filters.convert(notEq1); Assert.assertEquals( "NotEqualTo must match", expectedNotEq1.toString(), actualNotEq1.toString()); Predicate notEq2 = new Predicate("<>", valueAndAttr); Expression expectedNotEq2 = Expressions.notEqual(unquoted, 1); Expression actualNotEq2 = SparkV2Filters.convert(notEq2); Assert.assertEquals( "NotEqualTo must match", expectedNotEq2.toString(), actualNotEq2.toString()); Predicate eqNullSafe1 = new Predicate("<=>", attrAndValue); Expression expectedEqNullSafe1 = Expressions.equal(unquoted, 1); Expression actualEqNullSafe1 = SparkV2Filters.convert(eqNullSafe1); Assert.assertEquals( "EqualNullSafe must match", expectedEqNullSafe1.toString(), actualEqNullSafe1.toString()); Predicate eqNullSafe2 = new Predicate("<=>", valueAndAttr); Expression expectedEqNullSafe2 = Expressions.equal(unquoted, 1); Expression actualEqNullSafe2 = SparkV2Filters.convert(eqNullSafe2); Assert.assertEquals( "EqualNullSafe must match", expectedEqNullSafe2.toString(), actualEqNullSafe2.toString()); LiteralValue str = new LiteralValue(UTF8String.fromString("iceberg"), DataTypes.StringType); org.apache.spark.sql.connector.expressions.Expression[] attrAndStr = new org.apache.spark.sql.connector.expressions.Expression[] {namedReference, str}; Predicate startsWith = new Predicate("STARTS_WITH", attrAndStr); Expression expectedStartsWith = Expressions.startsWith(unquoted, "iceberg"); Expression actualStartsWith = SparkV2Filters.convert(startsWith); Assert.assertEquals( "StartsWith must match", expectedStartsWith.toString(), actualStartsWith.toString()); Predicate in = new Predicate("IN", attrAndValue); Expression expectedIn = Expressions.in(unquoted, 1); Expression actualIn = SparkV2Filters.convert(in); Assert.assertEquals("In must match", expectedIn.toString(), actualIn.toString()); Predicate and = new And(lt1, eq1); Expression expectedAnd = Expressions.and(expectedLt1, expectedEq1); Expression actualAnd = SparkV2Filters.convert(and); Assert.assertEquals("And must match", expectedAnd.toString(), actualAnd.toString()); org.apache.spark.sql.connector.expressions.Expression[] attrAndAttr = new org.apache.spark.sql.connector.expressions.Expression[] { namedReference, namedReference }; Predicate invalid = new Predicate("<", attrAndAttr); Predicate andWithInvalidLeft = new And(invalid, eq1); Expression convertedAnd = SparkV2Filters.convert(andWithInvalidLeft); Assert.assertEquals("And must match", convertedAnd, null); Predicate or = new Or(lt1, eq1); Expression expectedOr = Expressions.or(expectedLt1, expectedEq1); Expression actualOr = SparkV2Filters.convert(or); Assert.assertEquals("Or must match", expectedOr.toString(), actualOr.toString()); Predicate orWithInvalidLeft = new Or(invalid, eq1); Expression convertedOr = SparkV2Filters.convert(orWithInvalidLeft); Assert.assertEquals("Or must match", convertedOr, null); Predicate not = new Not(lt1); Expression expectedNot = Expressions.not(expectedLt1); Expression actualNot = SparkV2Filters.convert(not); Assert.assertEquals("Not must match", expectedNot.toString(), actualNot.toString()); }); }
@Override public Object handle(String targetService, List<Object> invokers, Object invocation, Map<String, String> queryMap, String serviceInterface) { if (!shouldHandle(invokers)) { return invokers; } List<Object> targetInvokers; if (routerConfig.isUseRequestRouter()) { targetInvokers = getTargetInvokersByRequest(targetService, invokers, invocation); } else { targetInvokers = getTargetInvokersByRules(invokers, invocation, queryMap, targetService, serviceInterface); } return super.handle(targetService, targetInvokers, invocation, queryMap, serviceInterface); }
@Test public void testGetMissMatchInvokers() { // initialize the routing rule RuleInitializationUtils.initFlowMatchRule(); List<Object> invokers = new ArrayList<>(); ApacheInvoker<Object> invoker1 = new ApacheInvoker<>("1.0.0"); invokers.add(invoker1); ApacheInvoker<Object> invoker2 = new ApacheInvoker<>("1.0.1"); invokers.add(invoker2); Invocation invocation = new ApacheInvocation(); invocation.setAttachment("bar", "bar2"); Map<String, String> queryMap = new HashMap<>(); queryMap.put("side", "consumer"); queryMap.put("group", "fooGroup"); queryMap.put("version", "0.0.1"); queryMap.put("interface", "io.sermant.foo.FooTest"); DubboCache.INSTANCE.putApplication("io.sermant.foo.FooTest", "foo"); List<Object> targetInvokers = (List<Object>) flowRouteHandler.handle( DubboCache.INSTANCE.getApplication("io.sermant.foo.FooTest") , invokers, invocation, queryMap, "io.sermant.foo.FooTest"); Assert.assertEquals(1, targetInvokers.size()); Assert.assertEquals(invoker1, targetInvokers.get(0)); ConfigCache.getLabel(RouterConstant.DUBBO_CACHE_NAME).resetRouteRule(Collections.emptyMap()); }
public JWTValidator validateDate() throws ValidateException { return validateDate(DateUtil.beginOfSecond(DateUtil.date())); }
@Test public void validateDateTest() { assertThrows(ValidateException.class, () -> { final JWT jwt = JWT.create() .setPayload("id", 123) .setPayload("username", "hutool") .setExpiresAt(DateUtil.parse("2021-10-13 09:59:00")); JWTValidator.of(jwt).validateDate(DateUtil.date()); }); }
@Override public ProviderGroup getProviderGroup(String groupName) { rLock.lock(); try { return RpcConstants.ADDRESS_DIRECT_GROUP.equals(groupName) ? directUrlGroup : registryGroup; } finally { rLock.unlock(); } }
@Test public void getProviders() throws Exception { SingleGroupAddressHolder addressHolder = new SingleGroupAddressHolder(null); Assert.assertTrue(ProviderHelper.isEmpty(addressHolder.getProviderGroup(null))); Assert.assertTrue(ProviderHelper.isEmpty(addressHolder.getProviderGroup(StringUtils.EMPTY))); Assert.assertTrue(ProviderHelper.isEmpty(addressHolder.getProviderGroup(ADDRESS_DEFAULT_GROUP))); addressHolder.registryGroup.add(ProviderHelper.toProviderInfo("127.0.0.1:12200")); addressHolder.registryGroup.add(ProviderHelper.toProviderInfo("127.0.0.1:12201")); Assert.assertTrue(addressHolder.getProviderGroup(null).size() == 2); Assert.assertTrue(addressHolder.getProviderGroup(StringUtils.EMPTY).size() == 2); Assert.assertTrue(addressHolder.getProviderGroup(ADDRESS_DEFAULT_GROUP).size() == 2); addressHolder.directUrlGroup.add(ProviderHelper.toProviderInfo("127.0.0.1:12200")); Assert.assertTrue(addressHolder.getProviderGroup(ADDRESS_DIRECT_GROUP).size() == 1); Assert.assertTrue(addressHolder.getProviderGroup(null).size() == 2); Assert.assertTrue(addressHolder.getProviderGroup(StringUtils.EMPTY).size() == 2); Assert.assertTrue(addressHolder.getProviderGroup(ADDRESS_DEFAULT_GROUP).size() == 2); }
public static SinkConfig validateUpdate(SinkConfig existingConfig, SinkConfig newConfig) { SinkConfig mergedConfig = clone(existingConfig); if (!existingConfig.getTenant().equals(newConfig.getTenant())) { throw new IllegalArgumentException("Tenants differ"); } if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) { throw new IllegalArgumentException("Namespaces differ"); } if (!existingConfig.getName().equals(newConfig.getName())) { throw new IllegalArgumentException("Sink Names differ"); } if (!StringUtils.isEmpty(newConfig.getClassName())) { mergedConfig.setClassName(newConfig.getClassName()); } if (!StringUtils.isEmpty(newConfig.getSourceSubscriptionName()) && !newConfig.getSourceSubscriptionName() .equals(existingConfig.getSourceSubscriptionName())) { throw new IllegalArgumentException("Subscription Name cannot be altered"); } if (newConfig.getInputSpecs() == null) { newConfig.setInputSpecs(new HashMap<>()); } if (mergedConfig.getInputSpecs() == null) { mergedConfig.setInputSpecs(new HashMap<>()); } if (!StringUtils.isEmpty(newConfig.getLogTopic())) { mergedConfig.setLogTopic(newConfig.getLogTopic()); } if (newConfig.getInputs() != null) { newConfig.getInputs().forEach((topicName -> { newConfig.getInputSpecs().putIfAbsent(topicName, ConsumerConfig.builder().isRegexPattern(false).build()); })); } if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) { newConfig.getInputSpecs().put(newConfig.getTopicsPattern(), ConsumerConfig.builder() .isRegexPattern(true) .build()); } if (newConfig.getTopicToSerdeClassName() != null) { newConfig.getTopicToSerdeClassName().forEach((topicName, serdeClassName) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .serdeClassName(serdeClassName) .isRegexPattern(false) .build()); }); } if (newConfig.getTopicToSchemaType() != null) { newConfig.getTopicToSchemaType().forEach((topicName, schemaClassname) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .schemaType(schemaClassname) .isRegexPattern(false) .build()); }); } if (!newConfig.getInputSpecs().isEmpty()) { SinkConfig finalMergedConfig = mergedConfig; newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> { if (!existingConfig.getInputSpecs().containsKey(topicName)) { throw new IllegalArgumentException("Input Topics cannot be altered"); } if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) { throw new IllegalArgumentException( "isRegexPattern for input topic " + topicName + " cannot be altered"); } finalMergedConfig.getInputSpecs().put(topicName, consumerConfig); }); } if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees() .equals(existingConfig.getProcessingGuarantees())) { throw new IllegalArgumentException("Processing Guarantees cannot be altered"); } if (newConfig.getConfigs() != null) { mergedConfig.setConfigs(newConfig.getConfigs()); } if (newConfig.getSecrets() != null) { mergedConfig.setSecrets(newConfig.getSecrets()); } if (newConfig.getParallelism() != null) { mergedConfig.setParallelism(newConfig.getParallelism()); } if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering() .equals(existingConfig.getRetainOrdering())) { throw new IllegalArgumentException("Retain Ordering cannot be altered"); } if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering() .equals(existingConfig.getRetainKeyOrdering())) { throw new IllegalArgumentException("Retain Key Ordering cannot be altered"); } if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) { throw new IllegalArgumentException("AutoAck cannot be altered"); } if (newConfig.getResources() != null) { mergedConfig .setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources())); } if (newConfig.getTimeoutMs() != null) { mergedConfig.setTimeoutMs(newConfig.getTimeoutMs()); } if (newConfig.getCleanupSubscription() != null) { mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription()); } if (!StringUtils.isEmpty(newConfig.getArchive())) { mergedConfig.setArchive(newConfig.getArchive()); } if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) { mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags()); } if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) { mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions()); } if (newConfig.getTransformFunction() != null) { mergedConfig.setTransformFunction(newConfig.getTransformFunction()); } if (newConfig.getTransformFunctionClassName() != null) { mergedConfig.setTransformFunctionClassName(newConfig.getTransformFunctionClassName()); } if (newConfig.getTransformFunctionConfig() != null) { mergedConfig.setTransformFunctionConfig(newConfig.getTransformFunctionConfig()); } return mergedConfig; }
@Test public void testMergeDifferentTimeout() { SinkConfig sinkConfig = createSinkConfig(); SinkConfig newSinkConfig = createUpdatedSinkConfig("timeoutMs", 102L); SinkConfig mergedConfig = SinkConfigUtils.validateUpdate(sinkConfig, newSinkConfig); assertEquals( mergedConfig.getTimeoutMs(), Long.valueOf(102L) ); mergedConfig.setTimeoutMs(sinkConfig.getTimeoutMs()); assertEquals( new Gson().toJson(sinkConfig), new Gson().toJson(mergedConfig) ); }
@Override public void judgeContinueToExecute(final SQLStatement statement) throws SQLException { ShardingSpherePreconditions.checkState(statement instanceof CommitStatement || statement instanceof RollbackStatement, () -> new SQLFeatureNotSupportedException("Current transaction is aborted, commands ignored until end of transaction block.")); }
@Test void assertJudgeContinueToExecuteWithRollbackStatement() { assertDoesNotThrow(() -> allowedSQLStatementHandler.judgeContinueToExecute(mock(RollbackStatement.class))); }
public static HealthStateScope forMaterial(Material material) { return new HealthStateScope(ScopeType.MATERIAL, material.getAttributesForScope().toString()); }
@Test public void shouldHaveDifferentScopeWhenAutoUpdateHasChanged() { SvnMaterial mat = MaterialsMother.svnMaterial("url1"); HealthStateScope scope1 = HealthStateScope.forMaterial(mat); mat.setAutoUpdate(false); HealthStateScope scope2 = HealthStateScope.forMaterial(mat); assertThat(scope1, not(scope2)); }
@Override public List<ImportValidationFeedback> verifyRule( Object subject ) { List<ImportValidationFeedback> feedback = new ArrayList<>(); if ( !isEnabled() || !( subject instanceof JobMeta ) ) { return feedback; } JobMeta jobMeta = (JobMeta) subject; String description = jobMeta.getDescription(); if ( null != description && minLength <= description.length() ) { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.APPROVAL, "A description is present" ) ); } else { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.ERROR, "A description is not present or too short" ) ); } return feedback; }
@Test public void testVerifyRule_EmptyDescription_DisabledRule() { JobHasDescriptionImportRule importRule = getImportRule( 10, false ); JobMeta jobMeta = new JobMeta(); jobMeta.setDescription( "" ); List<ImportValidationFeedback> feedbackList = importRule.verifyRule( null ); assertNotNull( feedbackList ); assertTrue( feedbackList.isEmpty() ); }
public List<StatisticsFile> statisticsFiles() { return statisticsFiles; }
@Test public void testParseStatisticsFiles() throws Exception { String data = readTableMetadataInputFile("TableMetadataStatisticsFiles.json"); TableMetadata parsed = TableMetadataParser.fromJson(data); assertThat(parsed.statisticsFiles()).hasSize(1); assertThat(parsed.statisticsFiles()) .hasSize(1) .first() .isEqualTo( new GenericStatisticsFile( 3055729675574597004L, "s3://a/b/stats.puffin", 413, 42, ImmutableList.of( new GenericBlobMetadata( "ndv", 3055729675574597004L, 1, ImmutableList.of(1), ImmutableMap.of())))); }
public void onRequest(FilterRequestContext requestContext, RestLiFilterResponseContextFactory filterResponseContextFactory) { // Initiate the filter chain iterator. The RestLiCallback will be passed to the method invoker at the end of the // filter chain. _filterChainIterator.onRequest(requestContext, filterResponseContextFactory, new RestLiCallback(requestContext, filterResponseContextFactory, this)); }
@SuppressWarnings("unchecked") @Test public void testFilterInvocationRequestErrorThrowsError() throws Exception { _restLiFilterChain = new RestLiFilterChain(Arrays.asList(_filters), _mockFilterChainDispatcher, _mockFilterChainCallback); _filters[1] = new CountFilterRequestErrorThrowsError(); when(_responseHandler.buildExceptionResponseData(eq(_method), any(RestLiServiceException.class), anyMap(), anyList())) .thenReturn(_mockRestLiResponseData); when(_mockRestLiResponseData.getResponseEnvelope()).thenReturn(mock(RestLiResponseEnvelope.class)); _restLiFilterChain.onRequest(_mockFilterRequestContext, new RestLiFilterResponseContextFactory(_request, _method, _responseHandler)); verifySecondFilterRequestException(); }
@Override public boolean supportsResultSetHoldability(final int holdability) { return false; }
@Test void assertSupportsResultSetHoldability() { assertFalse(metaData.supportsResultSetHoldability(0)); }
public static Logger empty() { return EMPTY_LOGGER; }
@Test public void emptyLoggerReturnsSameInstance() { Logger logger1 = Loggers.empty(); Logger logger2 = Loggers.empty(); assertThat(logger1, sameInstance(logger2)); }
public static InetSocketAddress getBindAddress(ServiceAttributeProvider service, AlluxioConfiguration conf) { int port = getPort(service, conf); assertValidPort(port); return new InetSocketAddress(getBindHost(service, conf), port); }
@Test public void testGetBindAddress() throws Exception { for (ServiceType service : ServiceType.values()) { if (service == ServiceType.JOB_MASTER_RAFT || service == ServiceType.MASTER_RAFT) { // Skip the raft services, which don't support separate bind and connect ports. continue; } getBindAddress(service); } }
public void completeDefaults(Props props) { // init string properties for (Map.Entry<Object, Object> entry : defaults().entrySet()) { props.setDefault(entry.getKey().toString(), entry.getValue().toString()); } boolean clusterEnabled = props.valueAsBoolean(CLUSTER_ENABLED.getKey(), false); if (!clusterEnabled) { props.setDefault(SEARCH_HOST.getKey(), InetAddress.getLoopbackAddress().getHostAddress()); props.setDefault(SEARCH_PORT.getKey(), "9001"); fixPortIfZero(props, Property.SEARCH_HOST.getKey(), SEARCH_PORT.getKey()); fixEsTransportPortIfNull(props); } }
@Test public void defaults_loads_properties_defaults_from_base_and_extensions() { Props p = new Props(new Properties()); when(serviceLoaderWrapper.load()).thenReturn(ImmutableSet.of(new FakeExtension1(), new FakeExtension3())); processProperties.completeDefaults(p); assertThat(p.value("sonar.some.property")).isEqualTo("1"); assertThat(p.value("sonar.some.property2")).isEqualTo("455"); assertThat(p.value("sonar.some.property4")).isEqualTo("abc"); assertThat(p.value("sonar.some.property5")).isEqualTo("def"); assertThat(p.value("sonar.some.property5")).isEqualTo("def"); assertThat(p.value("sonar.search.port")).isEqualTo("9001"); }
public CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> releaseAcquiredRecords( String groupId, String memberId ) { log.trace("Release acquired records request for groupId: {}, memberId: {}", groupId, memberId); List<TopicIdPartition> topicIdPartitions = cachedTopicIdPartitionsInShareSession( groupId, Uuid.fromString(memberId)); if (topicIdPartitions.isEmpty()) { return CompletableFuture.completedFuture(Collections.emptyMap()); } Map<TopicIdPartition, CompletableFuture<Errors>> futuresMap = new HashMap<>(); topicIdPartitions.forEach(topicIdPartition -> { SharePartition sharePartition = partitionCacheMap.get(sharePartitionKey(groupId, topicIdPartition)); if (sharePartition == null) { log.error("No share partition found for groupId {} topicPartition {} while releasing acquired topic partitions", groupId, topicIdPartition); futuresMap.put(topicIdPartition, CompletableFuture.completedFuture(Errors.UNKNOWN_TOPIC_OR_PARTITION)); } else { CompletableFuture<Errors> future = sharePartition.releaseAcquiredRecords(memberId).thenApply(throwable -> { if (throwable.isPresent()) { return Errors.forException(throwable.get()); } return Errors.NONE; }); futuresMap.put(topicIdPartition, future); } }); CompletableFuture<Void> allFutures = CompletableFuture.allOf( futuresMap.values().toArray(new CompletableFuture[futuresMap.size()])); return allFutures.thenApply(v -> { Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData> result = new HashMap<>(); futuresMap.forEach((topicIdPartition, future) -> result.put(topicIdPartition, new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(topicIdPartition.partition()) .setErrorCode(future.join().code()))); return result; }); }
@Test public void testReleaseAcquiredRecordsWithIncorrectGroupId() { String groupId = "grp"; Uuid memberId = Uuid.randomUuid(); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); ShareSessionCache cache = mock(ShareSessionCache.class); ShareSession shareSession = mock(ShareSession.class); when(cache.get(new ShareSessionKey(groupId, memberId))).thenReturn(shareSession); ImplicitLinkedHashCollection<CachedSharePartition> partitionMap = new ImplicitLinkedHashCollection<>(3); partitionMap.add(new CachedSharePartition(tp1)); when(shareSession.partitionMap()).thenReturn(partitionMap); SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); // Calling releaseAcquiredRecords with incorrect groupId. CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> resultFuture = sharePartitionManager.releaseAcquiredRecords("grp-2", memberId.toString()); Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData> result = resultFuture.join(); assertTrue(result.isEmpty()); }