focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public Optional<String> reasonAllControllersZkMigrationNotReady( MetadataVersion metadataVersion, Map<Integer, ControllerRegistration> controllers ) { if (!metadataVersion.isMigrationSupported()) { return Optional.of("The metadata.version too low at " + metadataVersion); } else if (!metadataVersion.isControllerRegistrationSupported()) { return Optional.empty(); } for (int quorumNodeId : quorumNodeIds) { ControllerRegistration registration = controllers.get(quorumNodeId); if (registration == null) { return Optional.of("No registration found for controller " + quorumNodeId); } else if (!registration.zkMigrationReady()) { return Optional.of("Controller " + quorumNodeId + " has not enabled " + "zookeeper.metadata.migration.enable"); } } return Optional.empty(); }
@Test public void testZkMigrationReadyIfControllerRegistrationNotSupported() { assertEquals(Optional.empty(), QUORUM_FEATURES.reasonAllControllersZkMigrationNotReady( MetadataVersion.IBP_3_4_IV0, Collections.emptyMap())); }
@Override public boolean needAnnounceBacklog() { return initialCredit == 0 && numCreditsAvailable == 0; }
@Test void testNeedAnnounceBacklog() throws Exception { int numCredits = 2; CreditBasedSequenceNumberingViewReader reader1 = createNetworkSequenceViewReader(numCredits); assertThat(reader1.needAnnounceBacklog()).isFalse(); reader1.addCredit(-numCredits); assertThat(reader1.needAnnounceBacklog()).isFalse(); CreditBasedSequenceNumberingViewReader reader2 = createNetworkSequenceViewReader(0); assertThat(reader2.needAnnounceBacklog()).isTrue(); reader2.addCredit(numCredits); assertThat(reader2.needAnnounceBacklog()).isFalse(); reader2.addCredit(-numCredits); assertThat(reader2.needAnnounceBacklog()).isTrue(); }
@Override public long readLong() throws EOFException { if (availableLong() < 8) { throw new EOFException(); } long result = _dataBuffer.getLong(_currentOffset); _currentOffset += 8; return result; }
@Test void testReadLong() throws EOFException { long read = _dataBufferPinotInputStream.readLong(); assertEquals(read, _byteBuffer.getLong(0)); assertEquals(_dataBufferPinotInputStream.getCurrentOffset(), Long.BYTES); }
public static Boolean judge(final ConditionData conditionData, final String realData) { if (Objects.isNull(conditionData) || StringUtils.isBlank(conditionData.getOperator())) { return false; } PredicateJudge predicateJudge = newInstance(conditionData.getOperator()); if (!(predicateJudge instanceof BlankPredicateJudge) && StringUtils.isBlank(realData)) { return false; } return predicateJudge.judge(conditionData, realData); }
@Test public void testMatchJudge() { conditionData.setOperator(OperatorEnum.MATCH.getAlias()); conditionData.setParamValue("/http/**"); assertTrue(PredicateJudgeFactory.judge(conditionData, "/http/**")); assertTrue(PredicateJudgeFactory.judge(conditionData, "/http/test")); assertTrue(PredicateJudgeFactory.judge(conditionData, "/http/test/test")); assertFalse(PredicateJudgeFactory.judge(conditionData, "/http1/**")); conditionData.setParamType(ParamTypeEnum.HEADER.getName()); assertTrue(PredicateJudgeFactory.judge(conditionData, "/http/**")); assertTrue(PredicateJudgeFactory.judge(conditionData, "/http/**/test")); assertFalse(PredicateJudgeFactory.judge(conditionData, "/http1/**")); }
public Long getTime() { return time; }
@Test public void testDefaultTime() { SplunkHECConfiguration config = new SplunkHECConfiguration(); assertNull(config.getTime()); }
public MapWritable() { super(); this.instance = new HashMap<Writable, Writable>(); }
@SuppressWarnings("unchecked") @Test public void testMapWritable() { Text[] keys = { new Text("key1"), new Text("key2"), new Text("Key3"), }; BytesWritable[] values = { new BytesWritable("value1".getBytes()), new BytesWritable("value2".getBytes()), new BytesWritable("value3".getBytes()) }; MapWritable inMap = new MapWritable(); for (int i = 0; i < keys.length; i++) { inMap.put(keys[i], values[i]); } MapWritable outMap = new MapWritable(inMap); assertEquals(inMap.size(), outMap.size()); for (Map.Entry<Writable, Writable> e: inMap.entrySet()) { assertTrue(outMap.containsKey(e.getKey())); assertEquals(0, ((WritableComparable) outMap.get(e.getKey())).compareTo( e.getValue())); } // Now for something a little harder... Text[] maps = { new Text("map1"), new Text("map2") }; MapWritable mapOfMaps = new MapWritable(); mapOfMaps.put(maps[0], inMap); mapOfMaps.put(maps[1], outMap); MapWritable copyOfMapOfMaps = new MapWritable(mapOfMaps); for (int i = 0; i < maps.length; i++) { assertTrue(copyOfMapOfMaps.containsKey(maps[i])); MapWritable a = (MapWritable) mapOfMaps.get(maps[i]); MapWritable b = (MapWritable) copyOfMapOfMaps.get(maps[i]); assertEquals(a.size(), b.size()); for (Writable key: a.keySet()) { assertTrue(b.containsKey(key)); // This will work because we know what we put into each set WritableComparable aValue = (WritableComparable) a.get(key); WritableComparable bValue = (WritableComparable) b.get(key); assertEquals(0, aValue.compareTo(bValue)); } } }
public static String getLocalIp(String... preferredNetworks) { InetAddress address = getLocalAddress(preferredNetworks); if (null != address) { String hostAddress = address.getHostAddress(); if (address instanceof Inet6Address) { if (hostAddress.contains("%")) { hostAddress = hostAddress.substring(0, hostAddress.indexOf("%")); } } return hostAddress; } return localIP(); }
@Test public void testGetLocalIp() { assertThat(NetUtil.getLocalIp()).isNotNull(); }
public GoConfigHolder loadConfigHolder(final String content, Callback callback) throws Exception { CruiseConfig configForEdit; CruiseConfig config; LOGGER.debug("[Config Save] Loading config holder"); configForEdit = deserializeConfig(content); if (callback != null) callback.call(configForEdit); config = preprocessAndValidate(configForEdit); return new GoConfigHolder(config, configForEdit); }
@Test void shouldFailValidation_WhenSameMaterialUsedBy2ConfigRepos() { assertThatThrownBy(() -> xmlLoader.loadConfigHolder(configWithConfigRepos( """ <config-repos> <config-repo pluginId="myplugin" id="id1"> <git url="https://github.com/tomzo/gocd-indep-config-part.git" /> </config-repo > <config-repo pluginId="myotherplugin" id="id2"> <git url="https://github.com/tomzo/gocd-indep-config-part.git" /> </config-repo > </config-repos> """ ))) .isInstanceOf(GoConfigInvalidException.class); }
public boolean equals(@NonNull String id1, @NonNull String id2) { return compare(id1, id2) == 0; }
@Test public void testEqualsCaseSensitiveEmailAddress() { IdStrategy idStrategy = new IdStrategy.CaseSensitiveEmailAddress(); assertFalse(idStrategy.equals("john.smith@acme.org", "John.Smith@acme.org")); assertFalse(idStrategy.equals("john.smith@acme.org", "John.Smith@ACME.org")); assertFalse(idStrategy.equals("john.smith@acme.org", "John.Smith@ACME.org")); assertFalse(idStrategy.equals("john.smith@acme.org", "John.Smith@acme.ORG")); assertFalse(idStrategy.equals("John@smith@acme.org", "john@Smith@acme.ORG")); String sameUser = "john.smith@acme.org"; assertTrue(idStrategy.equals(sameUser, sameUser)); assertTrue(idStrategy.equals("John.Smith@ACME.org", "John.Smith@acme.org")); assertTrue(idStrategy.equals("John.Smith@acme.ORG", "John.Smith@acme.org")); assertTrue(idStrategy.equals("john@smith@ACME.org", "john@smith@acme.org")); }
public static List<MetricsPacket.Builder> toMetricsPackets(String jsonString) { List<MetricsPacket.Builder> packets = new ArrayList<>(); var mapper = objectMapper(); try (JsonParser jp = mapper.createParser(jsonString)) { while (jp.nextToken() != null) { YamasJsonModel jsonModel = jp.readValueAs(YamasJsonModel.class); packets.add(toMetricsPacketBuilder(jsonModel)); } return packets; } catch (IOException e) { log.log(WARNING, "Could not create metrics packet from string:\n" + jsonString, e); return List.of(); } }
@Test public void empty_json_string_yields_empty_packet_list() { List<MetricsPacket.Builder> builders = toMetricsPackets(""); assertTrue(builders.isEmpty()); }
@Override public void fetchSegmentToLocal(URI downloadURI, File dest) throws Exception { // Create a RoundRobinURIProvider to round robin IP addresses when retry uploading. Otherwise may always try to // download from a same broken host as: 1) DNS may not RR the IP addresses 2) OS cache the DNS resolution result. RoundRobinURIProvider uriProvider = new RoundRobinURIProvider(List.of(downloadURI), true); int retryCount = getRetryCount(uriProvider); _logger.info("Retry downloading for {} times. retryCount from pinot server config: {}, number of IP addresses for " + "download URI: {}", retryCount, _retryCount, uriProvider.numAddresses()); RetryPolicies.exponentialBackoffRetryPolicy(retryCount, _retryWaitMs, _retryDelayScaleFactor).attempt(() -> { URI uri = uriProvider.next(); try { String hostName = downloadURI.getHost(); int port = downloadURI.getPort(); // If the original download address is specified as host name, need add a "HOST" HTTP header to the HTTP // request. Otherwise, if the download address is a LB address, when the LB be configured as "disallow direct // access by IP address", downloading will fail. List<Header> httpHeaders = new LinkedList<>(); if (!InetAddresses.isInetAddress(hostName)) { httpHeaders.add(new BasicHeader(HttpHeaders.HOST, hostName + ":" + port)); } int statusCode = _httpClient.downloadFile(uri, dest, _authProvider, httpHeaders); _logger.info("Downloaded segment from: {} to: {} of size: {}; Response status code: {}", uri, dest, dest.length(), statusCode); return true; } catch (HttpErrorStatusException e) { int statusCode = e.getStatusCode(); if (statusCode == HttpStatus.SC_NOT_FOUND || statusCode >= 500) { // Temporary exception // 404 is treated as a temporary exception, as the downloadURI may be backed by multiple hosts, // if singe host is down, can retry with another host. _logger.warn("Got temporary error status code: {} while downloading segment from: {} to: {}", statusCode, uri, dest, e); return false; } else { // Permanent exception _logger.error("Got permanent error status code: {} while downloading segment from: {} to: {}, won't retry", statusCode, uri, dest, e); throw e; } } catch (Exception e) { _logger.warn("Caught exception while downloading segment from: {} to: {}", uri, dest, e); return false; } }); }
@Test(expectedExceptions = AttemptsExceededException.class) public void testFetchSegmentToLocalAllDownloadAttemptsFailed() throws Exception { FileUploadDownloadClient client = mock(FileUploadDownloadClient.class); // All attempts failed when(client.downloadFile(any(), any(), any())).thenReturn(300); HttpSegmentFetcher segmentFetcher = getSegmentFetcher(client); List<URI> uris = List.of(new URI("http://h1:8080"), new URI("http://h2:8080")); segmentFetcher.fetchSegmentToLocal(SEGMENT_NAME, () -> uris, SEGMENT_FILE); }
@Override public SerializerAdapter serializerFor(Object object, boolean includeSchema) { Class<?> clazz = object == null ? null : object.getClass(); SerializerAdapter serializer = null; if (clazz != null) { serializer = serializersByClass.get(clazz); } if (serializer == null) { try { serializer = delegate.serializerFor(object, includeSchema); } catch (HazelcastSerializationException hse) { throw serializationException(clazz, hse); } } if (serializer == null) { throw active ? serializationException(clazz) : new HazelcastInstanceNotActiveException(); } return serializer; }
@Test public void when_doesNotFind_then_Delegates() { // Given DelegatingSerializationService service = new DelegatingSerializationService(emptyMap(), DELEGATE); // When // Then assertThat(service.serializerFor(TYPE_ID).getImpl()).isInstanceOf(ValueSerializer.class); assertThat(service.serializerFor(new Value(), false).getImpl()).isInstanceOf(ValueSerializer.class); }
@Override public void doPush(String clientId, Subscriber subscriber, PushDataWrapper data) { pushService.pushDataWithoutCallback(subscriber, handleClusterData(replaceServiceInfoName(data, subscriber), subscriber)); }
@Test void testDoPush() { pushExecutor.doPush(rpcClientId, subscriber, pushData); verify(pushService).pushDataWithoutCallback(eq(subscriber), any(ServiceInfo.class)); }
public static String describe(List<org.apache.iceberg.expressions.Expression> exprs) { return exprs.stream().map(Spark3Util::describe).collect(Collectors.joining(", ")); }
@Test public void testDescribeSortOrder() { Schema schema = new Schema( required(1, "data", Types.StringType.get()), required(2, "time", Types.TimestampType.withoutZone())); assertThat(Spark3Util.describe(buildSortOrder("Identity", schema, 1))) .as("Sort order isn't correct.") .isEqualTo("data DESC NULLS FIRST"); assertThat(Spark3Util.describe(buildSortOrder("bucket[1]", schema, 1))) .as("Sort order isn't correct.") .isEqualTo("bucket(1, data) DESC NULLS FIRST"); assertThat(Spark3Util.describe(buildSortOrder("truncate[3]", schema, 1))) .as("Sort order isn't correct.") .isEqualTo("truncate(data, 3) DESC NULLS FIRST"); assertThat(Spark3Util.describe(buildSortOrder("year", schema, 2))) .as("Sort order isn't correct.") .isEqualTo("years(time) DESC NULLS FIRST"); assertThat(Spark3Util.describe(buildSortOrder("month", schema, 2))) .as("Sort order isn't correct.") .isEqualTo("months(time) DESC NULLS FIRST"); assertThat(Spark3Util.describe(buildSortOrder("day", schema, 2))) .as("Sort order isn't correct.") .isEqualTo("days(time) DESC NULLS FIRST"); assertThat(Spark3Util.describe(buildSortOrder("hour", schema, 2))) .as("Sort order isn't correct.") .isEqualTo("hours(time) DESC NULLS FIRST"); assertThat(Spark3Util.describe(buildSortOrder("unknown", schema, 1))) .as("Sort order isn't correct.") .isEqualTo("unknown(data) DESC NULLS FIRST"); // multiple sort orders SortOrder multiOrder = SortOrder.builderFor(schema).asc("time", NULLS_FIRST).asc("data", NULLS_LAST).build(); assertThat(Spark3Util.describe(multiOrder)) .as("Sort order isn't correct.") .isEqualTo("time ASC NULLS FIRST, data ASC NULLS LAST"); }
@Nullable @SuppressWarnings("checkstyle:returncount") static Metadata resolve(InternalSerializationService ss, Object target, boolean key) { try { if (target instanceof Data) { Data data = (Data) target; if (data.isPortable()) { ClassDefinition classDefinition = ss.getPortableContext().lookupClassDefinition(data); return resolvePortable(classDefinition, key); } else if (data.isCompact()) { return resolveCompact(ss.extractSchemaFromData(data), key); } else if (data.isJson()) { return null; } else { return resolveJava(ss.toObject(data).getClass(), key); } } else if (target instanceof VersionedPortable) { VersionedPortable portable = (VersionedPortable) target; ClassDefinition classDefinition = ss.getPortableContext() .lookupClassDefinition(portable.getFactoryId(), portable.getClassId(), portable.getClassVersion()); return resolvePortable(classDefinition, key); } else if (target instanceof Portable) { Portable portable = (Portable) target; ClassDefinition classDefinition = ss.getPortableContext() .lookupClassDefinition(portable.getFactoryId(), portable.getClassId(), 0); return resolvePortable(classDefinition, key); } else if (target instanceof PortableGenericRecord) { return resolvePortable(((PortableGenericRecord) target).getClassDefinition(), key); } else if (target instanceof CompactGenericRecord) { return resolveCompact(((CompactGenericRecord) target).getSchema(), key); } else if (ss.isCompactSerializable(target)) { Schema schema = ss.extractSchemaFromObject(target); return resolveCompact(schema, key); } else if (target instanceof HazelcastJsonValue) { return null; } else { return resolveJava(target.getClass(), key); } } catch (Exception e) { return null; } }
@Test public void test_java() { InternalSerializationService ss = new DefaultSerializationServiceBuilder().build(); Metadata metadata = SampleMetadataResolver.resolve(ss, new Value(), key); assertThat(metadata.options()).containsExactly( entry(key ? OPTION_KEY_FORMAT : OPTION_VALUE_FORMAT, JAVA_FORMAT), entry(key ? OPTION_KEY_CLASS : OPTION_VALUE_CLASS, Value.class.getName()) ); metadata = SampleMetadataResolver.resolve(ss, ss.toData(new Value()), key); assertThat(metadata.options()).containsExactly( entry(key ? OPTION_KEY_FORMAT : OPTION_VALUE_FORMAT, JAVA_FORMAT), entry(key ? OPTION_KEY_CLASS : OPTION_VALUE_CLASS, Value.class.getName()) ); }
@Override public final int hashCode() { return delegate.hashCode(); }
@Test public void requireThatHashCodeIsImplemented() { assertEquals(newLazySet(null).hashCode(), newLazySet(null).hashCode()); }
@Override public String format(final Schema schema) { final String converted = SchemaWalker.visit(schema, new Converter()) + typePostFix(schema); return options.contains(Option.AS_COLUMN_LIST) ? stripTopLevelStruct(converted) : converted; }
@Test public void shouldEscapeReservedWords() { // Given: final Schema structSchema = SchemaBuilder.struct() .field("COL1", Schema.STRING_SCHEMA) .field("COL2", SchemaBuilder .struct() .field("COL3", Schema.STRING_SCHEMA) .build()) .build(); final Predicate<String> escaper = name -> !name.equalsIgnoreCase("COL1"); final SqlSchemaFormatter formatter = new SqlSchemaFormatter(escaper); // Then: assertThat(formatter.format(structSchema), is( "STRUCT<" + "COL1 VARCHAR, " + "`COL2` STRUCT<`COL3` VARCHAR>" + ">")); }
public int getSize() { return size; }
@Test public void testArraySizeConstructor() { ByteArrayHashIndex obj = new ByteArrayHashIndex( new RowMeta(), 1 ); assertEquals( 1, obj.getSize() ); obj = new ByteArrayHashIndex( new RowMeta(), 2 ); assertEquals( 2, obj.getSize() ); obj = new ByteArrayHashIndex( new RowMeta(), 3 ); assertEquals( 4, obj.getSize() ); obj = new ByteArrayHashIndex( new RowMeta(), 12 ); assertEquals( 16, obj.getSize() ); obj = new ByteArrayHashIndex( new RowMeta(), 99 ); assertEquals( 128, obj.getSize() ); }
@Override public List<RemoteInstance> queryRemoteNodes() { List<RemoteInstance> remoteInstances = new ArrayList<>(20); try { List<ServiceInstance<RemoteInstance>> serviceInstances = serviceCache.getInstances(); serviceInstances.forEach(serviceInstance -> { RemoteInstance instance = serviceInstance.getPayload(); if (instance.getAddress().equals(selfAddress)) { instance.getAddress().setSelf(true); } else { instance.getAddress().setSelf(false); } remoteInstances.add(instance); }); ClusterHealthStatus healthStatus = OAPNodeChecker.isHealth(remoteInstances); if (healthStatus.isHealth()) { this.healthChecker.health(); } else { this.healthChecker.unHealth(healthStatus.getReason()); } } catch (Throwable e) { this.healthChecker.unHealth(e); throw new ServiceQueryException(e.getMessage()); } if (log.isDebugEnabled()) { remoteInstances.forEach(instance -> log.debug("Zookeeper cluster instance: {}", instance)); } return remoteInstances; }
@Test public void queryRemoteNodes() { }
public int getHealth(int wizard) { return wizards[wizard].getHealth(); }
@Test void testGetHealth() { var wizardNumber = 0; var bytecode = new int[8]; bytecode[0] = LITERAL.getIntValue(); bytecode[1] = wizardNumber; bytecode[2] = LITERAL.getIntValue(); bytecode[3] = 50; // health amount bytecode[4] = SET_HEALTH.getIntValue(); bytecode[5] = LITERAL.getIntValue(); bytecode[6] = wizardNumber; bytecode[7] = GET_HEALTH.getIntValue(); var vm = new VirtualMachine(); vm.execute(bytecode); assertEquals(Integer.valueOf(50), vm.getStack().pop()); }
@Override public void setMonochrome(boolean monochrome) { formats = monochrome ? monochrome() : ansi(); }
@Test void should_skip_missing_location_strings() { Feature feature = TestFeatureParser.parse("path/test.feature", "" + "Feature: feature name\n" + " Scenario: scenario name\n" + " Given first step\n" + " When second step\n" + " Then third step\n"); ByteArrayOutputStream out = new ByteArrayOutputStream(); Runtime.builder() .withFeatureSupplier(new StubFeatureSupplier(feature)) .withAdditionalPlugins(new PrettyFormatter(out)) .withRuntimeOptions(new RuntimeOptionsBuilder().setMonochrome().build()) .withBackendSupplier(new StubBackendSupplier( new StubStepDefinition("first step", "path/step_definitions.java:3"), new StubStepDefinition("second step", (String) null), new StubStepDefinition("third step", "path/step_definitions.java:11"))) .build() .run(); assertThat(out, bytes(equalToCompressingWhiteSpace("" + "\n" + "Scenario: scenario name # path/test.feature:2\n" + " Given first step # path/step_definitions.java:3\n" + " When second step\n" + " Then third step # path/step_definitions.java:11\n"))); }
public boolean isUnresolved() { return inetSocketAddress.isUnresolved(); }
@Test public void testIsUnresolved() throws Exception { final InetSocketAddress inetSocketAddress = new InetSocketAddress(Inet4Address.getLoopbackAddress(), 12345); final ResolvableInetSocketAddress address = new ResolvableInetSocketAddress(inetSocketAddress); assertThat(address.isUnresolved()).isEqualTo(inetSocketAddress.isUnresolved()); }
@Override public Path find() throws BackgroundException { return this.find(Context.files); }
@Test public void testFindWithUsername() throws Exception { final NextcloudHomeFeature feature = new NextcloudHomeFeature(new Host(new NextcloudProtocol(), new Credentials("u"))); assertEquals(new Path("/ocs/v1.php", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.ocs)); assertEquals(new Path("/remote.php/dav/files/u", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.files)); assertEquals(new Path("/remote.php/dav/meta", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.meta)); assertEquals(new Path("/remote.php/dav/versions/u", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.versions)); }
protected List<MavenArtifact> processResponse(Dependency dependency, HttpURLConnection conn) throws IOException { final List<MavenArtifact> result = new ArrayList<>(); try (InputStreamReader streamReader = new InputStreamReader(conn.getInputStream(), StandardCharsets.UTF_8); JsonParser parser = objectReader.getFactory().createParser(streamReader)) { if (init(parser) && parser.nextToken() == com.fasterxml.jackson.core.JsonToken.START_OBJECT) { // at least one result do { final FileImpl file = objectReader.readValue(parser); checkHashes(dependency, file.getChecksums()); final Matcher pathMatcher = PATH_PATTERN.matcher(file.getPath()); if (!pathMatcher.matches()) { throw new IllegalStateException("Cannot extract the Maven information from the path " + "retrieved in Artifactory " + file.getPath()); } final String groupId = pathMatcher.group("groupId").replace('/', '.'); final String artifactId = pathMatcher.group("artifactId"); final String version = pathMatcher.group("version"); result.add(new MavenArtifact(groupId, artifactId, version, file.getDownloadUri(), MavenArtifact.derivePomUrl(artifactId, version, file.getDownloadUri()))); } while (parser.nextToken() == com.fasterxml.jackson.core.JsonToken.START_OBJECT); } else { throw new FileNotFoundException("Artifact " + dependency + " not found in Artifactory"); } } return result; }
@Test public void shouldProcessCorrectlyArtifactoryAnswerMisMatchSha256() throws IOException { // Given Dependency dependency = new Dependency(); dependency.setSha1sum("c5b4c491aecb72e7c32a78da0b5c6b9cda8dee0f"); dependency.setSha256sum("512b4bf6927f4864acc419b8c5109c23361c30ed1f5798170248d33040de068f"); dependency.setMd5sum("2d1dd0fc21ee96bccfab4353d5379649"); final HttpURLConnection urlConnection = mock(HttpURLConnection.class); final byte[] payload = payloadWithSha256().getBytes(StandardCharsets.UTF_8); when(urlConnection.getInputStream()).thenReturn(new ByteArrayInputStream(payload)); // When try { searcher.processResponse(dependency, urlConnection); fail("SHA256 mismatching should throw an exception!"); } catch (FileNotFoundException e) { // Then assertEquals("Artifact found by API is not matching the SHA-256 of the artifact (repository hash is 512b4bf6927f4864acc419b8c5109c23361c30ed1f5798170248d33040de068e while actual is 512b4bf6927f4864acc419b8c5109c23361c30ed1f5798170248d33040de068f) !", e.getMessage()); } }
@VisibleForTesting protected int compactionTaskLimit() { if (Config.lake_compaction_max_tasks >= 0) { return Config.lake_compaction_max_tasks; } WarehouseManager manager = GlobalStateMgr.getCurrentState().getWarehouseMgr(); Warehouse warehouse = manager.getCompactionWarehouse(); List<ComputeNode> aliveComputeNodes = manager.getAliveComputeNodes(warehouse.getId()); return aliveComputeNodes.size() * 16; }
@Test public void testCompactionTaskLimit() { CompactionScheduler compactionScheduler = new CompactionScheduler(null, null, null, null, ""); int defaultValue = Config.lake_compaction_max_tasks; // explicitly set config to a value bigger than default -1 Config.lake_compaction_max_tasks = 10; Assert.assertEquals(10, compactionScheduler.compactionTaskLimit()); // reset config to default value Config.lake_compaction_max_tasks = defaultValue; Backend b1 = new Backend(10001L, "192.168.0.1", 9050); ComputeNode c1 = new ComputeNode(10001L, "192.168.0.2", 9050); ComputeNode c2 = new ComputeNode(10001L, "192.168.0.3", 9050); new MockUp<WarehouseManager>() { @Mock public List<ComputeNode> getAliveComputeNodes(long warehouseId) { return Arrays.asList(b1, c1, c2); } @Mock public Warehouse getCompactionWarehouse() { return new DefaultWarehouse(WarehouseManager.DEFAULT_WAREHOUSE_ID, WarehouseManager.DEFAULT_WAREHOUSE_NAME); } }; Assert.assertEquals(3 * 16, compactionScheduler.compactionTaskLimit()); }
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj instanceof DefaultPatchDescription) { final DefaultPatchDescription that = (DefaultPatchDescription) obj; return this.getClass() == that.getClass() && Objects.equals(this.deviceId, that.deviceId) && Objects.equals(this.ifaceName, that.ifaceName) && Objects.equals(this.peerName, that.peerName); } return false; }
@Test public void testEquals() { new EqualsTester() .addEqualityGroup(defaultPatchDescription1, sameAsDefaultPatchDescription1) .addEqualityGroup(defaultPatchDescription2) .addEqualityGroup(defaultPatchDescription3) .addEqualityGroup(defaultPatchDescription4) .addEqualityGroup(defaultPatchDescriptionNoDeviceId) .testEquals(); }
boolean convertDeviceProfileForVersion330(JsonNode profileData) { boolean isUpdated = false; if (profileData.has("alarms") && !profileData.get("alarms").isNull()) { JsonNode alarms = profileData.get("alarms"); for (JsonNode alarm : alarms) { if (alarm.has("createRules")) { JsonNode createRules = alarm.get("createRules"); for (AlarmSeverity severity : AlarmSeverity.values()) { if (createRules.has(severity.name())) { JsonNode spec = createRules.get(severity.name()).get("condition").get("spec"); if (convertDeviceProfileAlarmRulesForVersion330(spec)) { isUpdated = true; } } } } if (alarm.has("clearRule") && !alarm.get("clearRule").isNull()) { JsonNode spec = alarm.get("clearRule").get("condition").get("spec"); if (convertDeviceProfileAlarmRulesForVersion330(spec)) { isUpdated = true; } } } } return isUpdated; }
@Test void convertDeviceProfileAlarmRulesForVersion330AlarmNodeNull() throws JsonProcessingException { JsonNode spec = JacksonUtil.toJsonNode("{ \"alarms\" : null }"); JsonNode expected = JacksonUtil.toJsonNode("{ \"alarms\" : null }"); assertThat(service.convertDeviceProfileForVersion330(spec)).isFalse(); assertThat(spec.toPrettyString()).isEqualTo(expected.toPrettyString()); }
public String getName() { return name; }
@Test public void testBooleanAssumption() { GoldFish goldFish = new GoldFish("Windows Jelly", 1); assumeTrue(System.getProperty("os.name").contains("Windows")); assertThat(goldFish.getName(), equalToIgnoringCase("Windows Jelly")); }
public static String getRemoteIp(HttpServletRequest request) { String remoteIp = RequestContextHolder.getContext().getBasicContext().getAddressContext().getSourceIp(); if (StringUtils.isBlank(remoteIp)) { remoteIp = RequestContextHolder.getContext().getBasicContext().getAddressContext().getRemoteIp(); } if (StringUtils.isBlank(remoteIp)) { remoteIp = WebUtils.getRemoteIp(request); } return remoteIp; }
@Test void testGetRemoteIpFromRequest() { HttpServletRequest request = Mockito.mock(HttpServletRequest.class); Mockito.when(request.getRemoteAddr()).thenReturn("127.0.0.1"); assertEquals("127.0.0.1", RequestUtil.getRemoteIp(request)); Mockito.when(request.getHeader(eq(X_REAL_IP))).thenReturn("127.0.0.2"); assertEquals("127.0.0.2", RequestUtil.getRemoteIp(request)); Mockito.when(request.getHeader(eq(X_FORWARDED_FOR))).thenReturn("127.0.0.3"); assertEquals("127.0.0.3", RequestUtil.getRemoteIp(request)); Mockito.when(request.getHeader(eq(X_FORWARDED_FOR))).thenReturn("127.0.0.3, 127.0.0.4"); assertEquals("127.0.0.3", RequestUtil.getRemoteIp(request)); Mockito.when(request.getHeader(eq(X_FORWARDED_FOR))).thenReturn(""); assertEquals("127.0.0.2", RequestUtil.getRemoteIp(request)); Mockito.when(request.getHeader(eq(X_REAL_IP))).thenReturn(""); assertEquals("127.0.0.1", RequestUtil.getRemoteIp(request)); }
@Override public boolean isEmpty() { return topicNames.isEmpty(); }
@Test public void testIsEmpty() { Set<String> topicNames = Collections.emptySet(); Set<Uuid> topicIds = new TopicIds(topicNames, TopicsImage.EMPTY); assertEquals(topicNames.size(), topicIds.size()); }
public static UserAgent parse(String userAgentString) { return UserAgentParser.parse(userAgentString); }
@Test public void parseWindows10WithChromeTest() { final String uaStr = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36"; final UserAgent ua = UserAgentUtil.parse(uaStr); assertEquals("Chrome", ua.getBrowser().toString()); assertEquals("70.0.3538.102", ua.getVersion()); assertEquals("Webkit", ua.getEngine().toString()); assertEquals("537.36", ua.getEngineVersion()); assertEquals("Windows 10 or Windows Server 2016", ua.getOs().toString()); assertEquals("10.0", ua.getOsVersion()); assertEquals("Windows", ua.getPlatform().toString()); assertFalse(ua.isMobile()); }
static CounterResult fromJson(String json) { return JsonUtil.parse(json, CounterResultParser::fromJson); }
@Test public void missingFields() { assertThatThrownBy(() -> CounterResultParser.fromJson("{}")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse missing string: unit"); assertThatThrownBy(() -> CounterResultParser.fromJson("{\"unit\":\"bytes\"}")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse missing long: value"); }
public String orderClause(AmountRequest amountRequest) { return orderClause(amountRequest, ORDER_TERM_TO_SQL_STRING); }
@Test void mapWithSomeIllegalStuff1() { final AmountRequest pageRequest = new AmountRequest("createdAt:ASC,\"delete * from jobtable\"updatedAt:DESC", 2); assertThat(amountMapper.orderClause(pageRequest)).isEqualTo(" ORDER BY createdAt ASC"); }
public Set<TaskId> activeTasks() { return unmodifiableSet(assignedActiveTasks.taskIds()); }
@Test public void shouldNotModifyActiveView() { final ClientState clientState = new ClientState(1); final Set<TaskId> taskIds = clientState.activeTasks(); assertThrows(UnsupportedOperationException.class, () -> taskIds.add(TASK_0_0)); assertThat(clientState, hasActiveTasks(0)); }
@Override protected Optional<ErrorResponse> filter(DiscFilterRequest req) { var certs = req.getClientCertificateChain(); log.fine(() -> "Certificate chain contains %d elements".formatted(certs.size())); if (certs.isEmpty()) { log.fine("Missing client certificate"); return Optional.of(new ErrorResponse(Response.Status.UNAUTHORIZED, "Unauthorized")); } if (legacyMode) { log.fine("Legacy mode validation complete"); ClientPrincipal.attachToRequest(req, Set.of(), Set.of(READ, WRITE)); return Optional.empty(); } var permission = Permission.getRequiredPermission(req).orElse(null); if (permission == null) return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden")); var clientCert = certs.get(0); var clientIds = new TreeSet<String>(); var permissions = new TreeSet<Permission>(); for (Client c : allowedClients) { if (!c.permissions().contains(permission)) continue; if (!c.certificates().contains(clientCert)) continue; clientIds.add(c.id()); permissions.addAll(c.permissions()); } if (clientIds.isEmpty()) return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden")); ClientPrincipal.attachToRequest(req, clientIds, permissions); return Optional.empty(); }
@Test void fails_on_missing_certificate_in_legacy_mode() { var req = FilterTestUtils.newRequestBuilder().build(); var responseHandler = new MockResponseHandler(); newFilterWithLegacyMode().filter(req, responseHandler); assertNotNull(responseHandler.getResponse()); assertEquals(UNAUTHORIZED, responseHandler.getResponse().getStatus()); }
@Override public void configure(Map<String, ?> props) { final SimpleConfig config = new SimpleConfig(CONFIG_DEF, props); casts = parseFieldTypes(config.getList(SPEC_CONFIG)); wholeValueCastType = casts.get(WHOLE_VALUE_CAST); schemaUpdateCache = new SynchronizedCache<>(new LRUCache<>(16)); replaceNullWithDefault = config.getBoolean(REPLACE_NULL_WITH_DEFAULT_CONFIG); }
@Test public void testConfigInvalidSchemaType() { assertThrows(ConfigException.class, () -> xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:faketype"))); }
@Override public boolean isOff(Ability ability) { return false; }
@Test public void claimsEveryAbilityIsOn() { Ability random = DefaultBot.getDefaultBuilder() .name("randomsomethingrandom").build(); toggle = new DefaultToggle(); defaultBot = new DefaultBot(null, EMPTY, db, toggle); defaultBot.onRegister(); assertFalse(toggle.isOff(random)); }
@Override public CompletableFuture<RegistrationResponse> registerJobMaster( final JobMasterId jobMasterId, final ResourceID jobManagerResourceId, final String jobManagerAddress, final JobID jobId, final Time timeout) { checkNotNull(jobMasterId); checkNotNull(jobManagerResourceId); checkNotNull(jobManagerAddress); checkNotNull(jobId); if (!jobLeaderIdService.containsJob(jobId)) { try { jobLeaderIdService.addJob(jobId); } catch (Exception e) { ResourceManagerException exception = new ResourceManagerException( "Could not add the job " + jobId + " to the job id leader service.", e); onFatalError(exception); log.error("Could not add job {} to job leader id service.", jobId, e); return FutureUtils.completedExceptionally(exception); } } log.info( "Registering job manager {}@{} for job {}.", jobMasterId, jobManagerAddress, jobId); CompletableFuture<JobMasterId> jobMasterIdFuture; try { jobMasterIdFuture = jobLeaderIdService.getLeaderId(jobId); } catch (Exception e) { // we cannot check the job leader id so let's fail // TODO: Maybe it's also ok to skip this check in case that we cannot check the leader // id ResourceManagerException exception = new ResourceManagerException( "Cannot obtain the " + "job leader id future to verify the correct job leader.", e); onFatalError(exception); log.debug( "Could not obtain the job leader id future to verify the correct job leader."); return FutureUtils.completedExceptionally(exception); } CompletableFuture<JobMasterGateway> jobMasterGatewayFuture = getRpcService().connect(jobManagerAddress, jobMasterId, JobMasterGateway.class); CompletableFuture<RegistrationResponse> registrationResponseFuture = jobMasterGatewayFuture.thenCombineAsync( jobMasterIdFuture, (JobMasterGateway jobMasterGateway, JobMasterId leadingJobMasterId) -> { if (Objects.equals(leadingJobMasterId, jobMasterId)) { return registerJobMasterInternal( jobMasterGateway, jobId, jobManagerAddress, jobManagerResourceId); } else { final String declineMessage = String.format( "The leading JobMaster id %s did not match the received JobMaster id %s. " + "This indicates that a JobMaster leader change has happened.", leadingJobMasterId, jobMasterId); log.debug(declineMessage); return new RegistrationResponse.Failure( new FlinkException(declineMessage)); } }, getMainThreadExecutor(jobId)); // handle exceptions which might have occurred in one of the futures inputs of combine return registrationResponseFuture.handleAsync( (RegistrationResponse registrationResponse, Throwable throwable) -> { if (throwable != null) { if (log.isDebugEnabled()) { log.debug( "Registration of job manager {}@{} failed.", jobMasterId, jobManagerAddress, throwable); } else { log.info( "Registration of job manager {}@{} failed.", jobMasterId, jobManagerAddress); } return new RegistrationResponse.Failure(throwable); } else { return registrationResponse; } }, ioExecutor); }
@Test void testJobMasterBecomesUnreachableTriggersDisconnect() throws Exception { final JobID jobId = new JobID(); final ResourceID jobMasterResourceId = ResourceID.generate(); final CompletableFuture<ResourceManagerId> disconnectFuture = new CompletableFuture<>(); final TestingJobMasterGateway jobMasterGateway = new TestingJobMasterGatewayBuilder() .setAddress(UUID.randomUUID().toString()) .setResourceManagerHeartbeatFunction( resourceId -> FutureUtils.completedExceptionally( new RecipientUnreachableException( "sender", "recipient", "task executor is unreachable"))) .setDisconnectResourceManagerConsumer(disconnectFuture::complete) .build(); rpcService.registerGateway(jobMasterGateway.getAddress(), jobMasterGateway); final LeaderRetrievalService jobMasterLeaderRetrievalService = new SettableLeaderRetrievalService( jobMasterGateway.getAddress(), jobMasterGateway.getFencingToken().toUUID()); highAvailabilityServices.setJobMasterLeaderRetrieverFunction( requestedJobId -> { assertThat(requestedJobId).isEqualTo(jobId); return jobMasterLeaderRetrievalService; }); runHeartbeatTargetBecomesUnreachableTest( (ignore) -> {}, resourceManagerGateway -> { final CompletableFuture<RegistrationResponse> registrationFuture = resourceManagerGateway.registerJobMaster( jobMasterGateway.getFencingToken(), jobMasterResourceId, jobMasterGateway.getAddress(), jobId, TIMEOUT); assertThatFuture(registrationFuture) .eventuallySucceeds() .isInstanceOf(RegistrationResponse.Success.class); }, resourceManagerResourceId -> assertThatFuture(disconnectFuture) .eventuallySucceeds() .isEqualTo(resourceManagerId)); }
@Override public Map<String, Object> unbox() { if (val == null) { return null; } Map<String, Object> ret = new HashMap<>(); for (Map.Entry<SelString, SelType> entry : val.entrySet()) { switch (entry.getValue().type()) { case STRING: case LONG: case DOUBLE: case BOOLEAN: ret.put(entry.getKey().getInternalVal(), entry.getValue().getInternalVal()); break; case STRING_ARRAY: case LONG_ARRAY: case DOUBLE_ARRAY: case BOOLEAN_ARRAY: case MAP: ret.put(entry.getKey().getInternalVal(), entry.getValue().unbox()); break; default: throw new UnsupportedOperationException( "Invalid type, not support having map entry value " + entry); } } return ret; }
@Test public void testUnbox() { Map<String, Object> map = orig.unbox(); assertEquals("{foo=bar, num=123}", String.valueOf(map)); }
@Override public Optional<String> resolveQueryFailure(QueryStats controlQueryStats, QueryException queryException, Optional<QueryObjectBundle> test) { return mapMatchingPrestoException(queryException, CONTROL_CHECKSUM, ImmutableSet.of(COMPILER_ERROR, GENERATED_BYTECODE_TOO_LARGE), e -> Optional.of("Checksum query too large")); }
@Test public void testResolveCompilerError() { assertEquals( getFailureResolver().resolveQueryFailure( CONTROL_QUERY_STATS, new PrestoQueryException( new RuntimeException(), false, CONTROL_CHECKSUM, Optional.of(COMPILER_ERROR), EMPTY_STATS), Optional.empty()), Optional.of("Checksum query too large")); }
@VisibleForTesting OutputBufferMemoryManager getMemoryManager() { return memoryManager; }
@Test public void testSharedBufferBlocking() { SettableFuture<?> blockedFuture = SettableFuture.create(); MockMemoryReservationHandler reservationHandler = new MockMemoryReservationHandler(blockedFuture); AggregatedMemoryContext memoryContext = newRootAggregatedMemoryContext(reservationHandler, 0L); Page page = createPage(1); long pageSize = PAGES_SERDE.serialize(page).getRetainedSizeInBytes(); // create a buffer that can only hold two pages BroadcastOutputBuffer buffer = createBroadcastBuffer(createInitialEmptyOutputBuffers(BROADCAST), new DataSize(pageSize * 2, BYTE), memoryContext, directExecutor()); OutputBufferMemoryManager memoryManager = buffer.getMemoryManager(); // adding the first page will block as no memory is available (MockMemoryReservationHandler will return a future that is not done) enqueuePage(buffer, page); // more memory is available blockedFuture.set(null); memoryManager.onMemoryAvailable(); assertTrue(memoryManager.getBufferBlockedFuture().isDone(), "buffer shouldn't be blocked"); // we should be able to add one more page after more memory is available addPage(buffer, page); // the buffer is full now enqueuePage(buffer, page); }
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } final P4MeterModel other = (P4MeterModel) obj; return Objects.equals(this.id, other.id) && Objects.equals(this.meterType, other.meterType) && Objects.equals(this.unit, other.unit) && Objects.equals(this.table, other.table) && Objects.equals(this.size, other.size); }
@Test public void testEquals() { new EqualsTester() .addEqualityGroup(P4_METER_MODEL_1, SAME_AS_P4_METER_MODEL_1) .addEqualityGroup(P4_METER_MODEL_2) .addEqualityGroup(P4_METER_MODEL_3) .testEquals(); }
public boolean createDataConnection(DataConnectionCatalogEntry dl, boolean replace, boolean ifNotExists) { if (replace) { dataConnectionStorage.put(dl.name(), dl); listeners.forEach(TableListener::onTableChanged); return true; } else { boolean added = dataConnectionStorage.putIfAbsent(dl.name(), dl); if (!added && !ifNotExists) { throw QueryException.error("Data connection already exists: " + dl.name()); } if (!added) { // report only updates to listener listeners.forEach(TableListener::onTableChanged); } return added; } }
@Test public void when_createsDuplicateDataConnection_then_throws() { // given DataConnectionCatalogEntry dataConnectionCatalogEntry = dataConnection(); given(relationsStorage.putIfAbsent(eq(dataConnectionCatalogEntry.name()), isA(DataConnectionCatalogEntry.class))).willReturn(false); // when // then assertThatThrownBy(() -> dataConnectionResolver.createDataConnection(dataConnectionCatalogEntry, false, false)) .isInstanceOf(QueryException.class) .hasMessageContaining("Data connection already exists: " + dataConnectionCatalogEntry.name()); }
<T extends PipelineOptions> T as(Class<T> iface) { checkNotNull(iface); checkArgument(iface.isInterface(), "Not an interface: %s", iface); T existingOption = computedProperties.interfaceToProxyCache.getInstance(iface); if (existingOption == null) { synchronized (this) { // double check existingOption = computedProperties.interfaceToProxyCache.getInstance(iface); if (existingOption == null) { Registration<T> registration = PipelineOptionsFactory.CACHE .get() .validateWellFormed(iface, computedProperties.knownInterfaces); List<PropertyDescriptor> propertyDescriptors = registration.getPropertyDescriptors(); Class<T> proxyClass = registration.getProxyClass(); existingOption = InstanceBuilder.ofType(proxyClass) .fromClass(proxyClass) .withArg(InvocationHandler.class, this) .build(); computedProperties = computedProperties.updated(iface, existingOption, propertyDescriptors); } } } return existingOption; }
@Test public void testPrimitiveIntegerFromJsonOptions() throws Exception { String optionsJson = "{\"options\":{\"appName\":\"ProxyInvocationHandlerTest\",\"optionsId\":1,\"int\":\"100\"},\"display_data\":[{\"namespace\":\"org.apache.beam.sdk.options.ProxyInvocationHandlerTest$DisplayDataOptions\",\"key\":\"int\",\"type\":\"INTEGER\",\"value\":100},{\"namespace\":\"org.apache.beam.sdk.options.ApplicationNameOptions\",\"key\":\"appName\",\"type\":\"STRING\",\"value\":\"ProxyInvocationHandlerTest\"}]}"; PrimitiveIntOptions options = MAPPER.readValue(optionsJson, PipelineOptions.class).as(PrimitiveIntOptions.class); int value = options.getInt(); assertEquals(100, value); }
@Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { var requestIdGenerator = platform.getContainer().getOptionalComponentByType(RequestIdGenerator.class); if (requestIdGenerator.isEmpty()) { chain.doFilter(request, response); } else { String requestId = requestIdGenerator.get().generate(); try (RequestIdMDCStorage mdcStorage = new RequestIdMDCStorage(requestId)) { request.setAttribute("ID", requestId); chain.doFilter(request, response); } } }
@Test public void filter_put_id_in_MDC_and_remove_it_after_chain_throws_exception() throws IOException, ServletException { RuntimeException exception = new RuntimeException("Simulating chain failing"); String requestId = "request id"; when(requestIdGenerator.generate()).thenReturn(requestId); doAnswer(invocation -> { assertThat(MDC.get("HTTP_REQUEST_ID")).isEqualTo(requestId); throw exception; }) .when(filterChain) .doFilter(servletRequest, servletResponse); try { underTest.doFilter(servletRequest, servletResponse, filterChain); fail("A runtime exception should have been raised"); } catch (RuntimeException e) { assertThat(e).isEqualTo(exception); } finally { assertThat(MDC.get("HTTP_REQUEST_ID")).isNull(); } }
public boolean canRunAppAM(Resource amResource) { if (Math.abs(maxAMShare - -1.0f) < 0.0001) { return true; } Resource maxAMResource = computeMaxAMResource(); getMetrics().setMaxAMShare(maxAMResource); Resource ifRunAMResource = Resources.add(amResourceUsage, amResource); return Resources.fitsIn(ifRunAMResource, maxAMResource); }
@Test public void testCanRunAppAMReturnsTrue() { conf.set(YarnConfiguration.RESOURCE_TYPES, CUSTOM_RESOURCE); ResourceUtils.resetResourceTypes(conf); resourceManager = new MockRM(conf); resourceManager.start(); scheduler = (FairScheduler) resourceManager.getResourceScheduler(); Resource maxShare = Resource.newInstance(1024 * 8, 4, ImmutableMap.of(CUSTOM_RESOURCE, 10L)); // Add a node to increase available memory and vcores in scheduler's // root queue metrics addNodeToScheduler(Resource.newInstance(4096, 10, ImmutableMap.of(CUSTOM_RESOURCE, 25L))); FSLeafQueue queue = setupQueue(maxShare); //Min(availableMemory, maxShareMemory (maxResourceOverridden)) // --> Min(4096, 8192) = 4096 //Min(availableVCores, maxShareVCores (maxResourceOverridden)) // --> Min(10, 4) = 4 //Min(available test1, maxShare test1 (maxResourceOverridden)) // --> Min(25, 10) = 10 //MaxAMResource: (4096 MB memory, 4 vcores, 10 test1) * MAX_AM_SHARE // --> 2048 MB memory, 2 vcores, 5 test1 Resource expectedAMShare = Resource.newInstance(2048, 2, ImmutableMap.of(CUSTOM_RESOURCE, 5L)); Resource appAMResource = Resource.newInstance(2048, 2, ImmutableMap.of(CUSTOM_RESOURCE, 3L)); Map<String, Long> customResourceValues = verifyQueueMetricsForCustomResources(queue); boolean result = queue.canRunAppAM(appAMResource); assertTrue("AM should have been allocated!", result); verifyAMShare(queue, expectedAMShare, customResourceValues); }
@SuppressWarnings("unchecked") public static void validateFormat(Object offsetData) { if (offsetData == null) return; if (!(offsetData instanceof Map)) throw new DataException("Offsets must be specified as a Map"); validateFormat((Map<Object, Object>) offsetData); }
@Test public void testValidateFormatMapWithNonPrimitiveKeys() { Map<Object, Object> offsetData = Collections.singletonMap("key", new Object()); DataException e = assertThrows(DataException.class, () -> OffsetUtils.validateFormat(offsetData)); assertThat(e.getMessage(), containsString("Offsets may only contain primitive types as values")); Map<Object, Object> offsetData2 = Collections.singletonMap("key", new ArrayList<>()); e = assertThrows(DataException.class, () -> OffsetUtils.validateFormat(offsetData2)); assertThat(e.getMessage(), containsString("Offsets may only contain primitive types as values")); }
void start(Iterable<ShardCheckpoint> checkpoints) { LOG.info( "Pool {} - starting for stream {} consumer {}. Checkpoints = {}", poolId, read.getStreamName(), consumerArn, checkpoints); for (ShardCheckpoint shardCheckpoint : checkpoints) { checkState( !state.containsKey(shardCheckpoint.getShardId()), "Duplicate shard id %s", shardCheckpoint.getShardId()); ShardState shardState = new ShardState( initShardSubscriber(shardCheckpoint), shardCheckpoint, watermarkPolicyFactory); state.put(shardCheckpoint.getShardId(), shardState); } }
@Test public void poolReSubscribesAndReadsManyEvents() throws Exception { kinesis = new EFOStubbedKinesisAsyncClient(1); kinesis.stubSubscribeToShard("shard-000", eventsWithRecords(18, 300)); kinesis.stubSubscribeToShard("shard-000", eventsWithoutRecords(318, 3)); kinesis.stubSubscribeToShard("shard-001", eventsWithRecords(75, 200)); kinesis.stubSubscribeToShard("shard-001", eventsWithoutRecords(275, 3)); KinesisReaderCheckpoint initialCheckpoint = initialLatestCheckpoint(ImmutableList.of("shard-000", "shard-001")); pool = new EFOShardSubscribersPool(readSpec, consumerArn, kinesis); pool.start(initialCheckpoint); PoolAssertion.assertPool(pool) .givesCheckPointedRecords( ShardAssertion.shard("shard-000") .gives(KinesisRecordView.generate("shard-000", 18, 300)) .withLastCheckpointSequenceNumber(320), ShardAssertion.shard("shard-001") .gives(KinesisRecordView.generate("shard-001", 75, 200)) .withLastCheckpointSequenceNumber(277)); assertThat(kinesis.subscribeRequestsSeen()) .containsExactlyInAnyOrder( subscribeLatest("shard-000"), subscribeLatest("shard-001"), subscribeAfterSeqNumber("shard-000", "317"), subscribeAfterSeqNumber("shard-001", "274"), subscribeAfterSeqNumber("shard-000", "320"), subscribeAfterSeqNumber("shard-001", "277")); }
@Override public void start() { try { createAndStartGrpcServer(); } catch (final IOException e) { throw new IllegalStateException("Failed to start the grpc server", e); } }
@Test void testGracefulShutdown() { // The server takes 2s seconds to shutdown final TestServer server = new TestServer(2000); when(this.factory.createServer()).thenReturn(server); // And we give it 5s to shutdown final GrpcServerLifecycle lifecycle = new GrpcServerLifecycle(this.factory, ofMillis(5000), this.eventPublisher); lifecycle.start(); verify(this.eventPublisher).publishEvent(ArgumentMatchers.any(GrpcServerStartedEvent.class)); assertFalse(server.isShutdown()); assertFalse(server.isTerminated()); // So it should finish within 5.1 seconds assertTimeout(ofMillis(5100), (Executable) lifecycle::stop); verify(this.eventPublisher).publishEvent(ArgumentMatchers.any(GrpcServerShutdownEvent.class)); verify(this.eventPublisher).publishEvent(ArgumentMatchers.any(GrpcServerTerminatedEvent.class)); assertTrue(server.isShutdown()); assertTrue(server.isTerminated()); }
@Override public RowData nextRecord(RowData reuse) { // return the next row row.setRowId(this.nextRow++); return row; }
@Test void testReadFileWithTypes(@TempDir File folder) throws IOException { String file = new File(folder, "testOrc").getPath(); int rowSize = 1024; prepareReadFileWithTypes(file, rowSize); // second test read. FileInputSplit split = createSplits(new Path(file), 1)[0]; int cnt = 0; Map<String, Object> partSpec = new HashMap<>(); partSpec.put("f5", true); partSpec.put("f6", new Date(562423)); partSpec.put("f7", LocalDateTime.of(1999, 1, 1, 1, 1)); partSpec.put("f8", 6.6); partSpec.put("f9", null); partSpec.put("f10", null); partSpec.put("f11", null); partSpec.put("f12", null); partSpec.put("f13", null); try (OrcColumnarRowSplitReader reader = createReader( new int[] {2, 0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, new DataType[] { DataTypes.FLOAT(), DataTypes.DOUBLE(), DataTypes.TIMESTAMP(), DataTypes.TINYINT(), DataTypes.SMALLINT(), DataTypes.BOOLEAN(), DataTypes.DATE(), DataTypes.TIMESTAMP(), DataTypes.DOUBLE(), DataTypes.DOUBLE(), DataTypes.INT(), DataTypes.STRING(), DataTypes.TIMESTAMP(), DataTypes.DECIMAL(5, 3) }, partSpec, split)) { // read and count all rows while (!reader.reachedEnd()) { RowData row = reader.nextRecord(null); if (cnt == rowSize - 1) { assertThat(row.isNullAt(0)).isTrue(); assertThat(row.isNullAt(1)).isTrue(); assertThat(row.isNullAt(2)).isTrue(); assertThat(row.isNullAt(3)).isTrue(); assertThat(row.isNullAt(4)).isTrue(); } else { assertThat(row.isNullAt(0)).isFalse(); assertThat(row.isNullAt(1)).isFalse(); assertThat(row.isNullAt(2)).isFalse(); assertThat(row.isNullAt(3)).isFalse(); assertThat(row.isNullAt(4)).isFalse(); assertThat(row.getTimestamp(0, 9)) .isEqualTo(TimestampData.fromTimestamp(toTimestamp(cnt))); assertThat(row.getFloat(1)).isEqualTo((float) cnt); assertThat(row.getDouble(2)).isEqualTo(cnt); assertThat(row.getByte(3)).isEqualTo((byte) cnt); assertThat(row.getShort(4)).isEqualTo((short) cnt); } assertThat(row.getBoolean(5)).isTrue(); assertThat(toSQLDate(row.getInt(6)).toString()) .isEqualTo(new Date(562423).toString()); assertThat(row.getTimestamp(7, 9).toLocalDateTime()) .isEqualTo(LocalDateTime.of(1999, 1, 1, 1, 1)); assertThat(row.getDouble(8)).isEqualTo(6.6); assertThat(row.isNullAt(9)).isTrue(); assertThat(row.isNullAt(10)).isTrue(); assertThat(row.isNullAt(11)).isTrue(); assertThat(row.isNullAt(12)).isTrue(); assertThat(row.isNullAt(13)).isTrue(); cnt++; } } // check that all rows have been read assertThat(cnt).isEqualTo(rowSize); }
@Override public double getMean() { if (values.length == 0) { return 0; } double sum = 0; for (long value : values) { sum += value; } return sum / values.length; }
@Test public void calculatesAMeanOfZeroForAnEmptySnapshot() { final Snapshot emptySnapshot = new UniformSnapshot(new long[]{}); assertThat(emptySnapshot.getMean()) .isZero(); }
static void filterProperties(Message message, Set<String> namesToClear) { List<Object> retainedProperties = messagePropertiesBuffer(); try { filterProperties(message, namesToClear, retainedProperties); } finally { retainedProperties.clear(); // ensure no object references are held due to any exception } }
@Test void filterProperties_message_handlesOnSetException() throws JMSException { Message message = mock(Message.class); when(message.getPropertyNames()).thenReturn( Collections.enumeration(Collections.singletonList("JMS_SQS_DeduplicationId"))); when(message.getObjectProperty("JMS_SQS_DeduplicationId")).thenReturn(""); doThrow(new IllegalArgumentException()).when(message).setObjectProperty(anyString(), eq("")); assertThatCode(() -> PropertyFilter.filterProperties(message, Collections.singleton("b3"))).doesNotThrowAnyException(); }
public OpenConfigChannelHandler addConfig(OpenConfigConfigOfChannelHandler config) { modelObject.config(config.getModelObject()); return this; }
@Test public void testAddConfig() { // test Handler OpenConfigChannelHandler channel = new OpenConfigChannelHandler(1, parent); // call addConfig OpenConfigConfigOfChannelHandler configOfChannel = new OpenConfigConfigOfChannelHandler(channel); // expected ModelObject DefaultChannel modelObject = new DefaultChannel(); modelObject.index(1); DefaultConfig config = new DefaultConfig(); modelObject.config(config); assertEquals("[NG]addConfig:ModelObject(Config added) is not an expected one.\n", modelObject, channel.getModelObject()); }
@VisibleForTesting protected void flattenMap(GenericRow record, List<String> columns) { for (String column : columns) { Object value = record.getValue(column); if (value instanceof Map) { Map<String, Object> map = (Map) value; List<String> mapColumns = new ArrayList<>(); for (Map.Entry<String, Object> entry : new ArrayList<>(map.entrySet())) { String flattenName = concat(column, entry.getKey()); Object nestedValue = entry.getValue(); record.putValue(flattenName, nestedValue); if (nestedValue instanceof Map || nestedValue instanceof Collection || isNonPrimitiveArray(nestedValue)) { mapColumns.add(flattenName); } } flattenMap(record, mapColumns); } else if (value instanceof Collection) { Collection collection = (Collection) value; if (_fieldsToUnnest.contains(column)) { for (Object inner : collection) { if (inner instanceof Map) { Map<String, Object> innerMap = (Map<String, Object>) inner; flattenMap(column, innerMap, new ArrayList<>(innerMap.keySet())); } } } else if (shallConvertToJson(collection)) { try { // convert the collection to JSON string String jsonString = JsonFunctions.jsonFormat(collection); record.putValue(column, jsonString); } catch (JsonProcessingException e) { throw new RuntimeException( String.format("Caught exception while converting value to JSON string %s", value), e); } } } else if (isNonPrimitiveArray(value)) { Object[] array = (Object[]) value; if (_fieldsToUnnest.contains(column)) { for (Object inner : array) { if (inner instanceof Map) { Map<String, Object> innerMap = (Map<String, Object>) inner; flattenMap(column, innerMap, new ArrayList<>(innerMap.keySet())); } } } else if (shallConvertToJson(array)) { try { // convert the array to JSON string String jsonString = JsonFunctions.jsonFormat(array); record.putValue(column, jsonString); } catch (JsonProcessingException e) { throw new RuntimeException( String.format("Caught exception while converting value to JSON string %s", value), e); } } } } }
@Test public void testFlattenMap() { ComplexTypeTransformer transformer = new ComplexTypeTransformer(new ArrayList<>(), "."); // test flatten root-level tuples GenericRow genericRow = new GenericRow(); genericRow.putValue("a", 1L); Map<String, Object> map1 = new HashMap<>(); genericRow.putValue("map1", map1); map1.put("b", "v"); Map<String, Object> innerMap1 = new HashMap<>(); innerMap1.put("aa", 2); innerMap1.put("bb", "u"); innerMap1.put("cc", new byte[]{1, 1}); map1.put("im1", innerMap1); Map<String, Object> map2 = new HashMap<>(); map2.put("c", 3); genericRow.putValue("map2", map2); transformer.transform(genericRow); Assert.assertEquals(genericRow.getValue("a"), 1L); Assert.assertEquals(genericRow.getValue("map1.b"), "v"); Assert.assertEquals(genericRow.getValue("map1.im1.aa"), 2); Assert.assertEquals(genericRow.getValue("map1.im1.bb"), "u"); Assert.assertEquals(genericRow.getValue("map1.im1.cc"), new byte[]{1, 1}); Assert.assertEquals(genericRow.getValue("map2.c"), 3); // test flattening the tuple inside the collection transformer = new ComplexTypeTransformer(Arrays.asList("l1"), "."); genericRow = new GenericRow(); List<Map<String, Object>> list1 = new ArrayList<>(); list1.add(map1); genericRow.putValue("l1", list1); List<Integer> list2 = new ArrayList<>(); list2.add(2); genericRow.putValue("l2", list2); transformer.flattenMap(genericRow, new ArrayList<>(genericRow.getFieldToValueMap().keySet())); Map<String, Object> map = (Map<String, Object>) ((Collection) genericRow.getValue("l1")).iterator().next(); Assert.assertEquals(map.get("b"), "v"); Assert.assertEquals(map.get("im1.aa"), 2); Assert.assertEquals(map.get("im1.bb"), "u"); // test overriding delimiter transformer = new ComplexTypeTransformer(Arrays.asList("l1"), "_"); genericRow = new GenericRow(); innerMap1 = new HashMap<>(); innerMap1.put("aa", 2); innerMap1.put("bb", "u"); map1 = new HashMap<>(); map1.put("im1", innerMap1); list1 = new ArrayList<>(); list1.add(map1); genericRow.putValue("l1", list1); transformer.flattenMap(genericRow, new ArrayList<>(genericRow.getFieldToValueMap().keySet())); map = (Map<String, Object>) ((Collection) genericRow.getValue("l1")).iterator().next(); Assert.assertEquals(map.get("im1_aa"), 2); Assert.assertEquals(map.get("im1_bb"), "u"); }
@Override public Object toConnectRow(final Object ksqlData) { if (!(ksqlData instanceof Struct)) { return ksqlData; } final Schema schema = getSchema(); final Struct struct = new Struct(schema); Struct originalData = (Struct) ksqlData; Schema originalSchema = originalData.schema(); if (originalSchema.name() == null && schema.name() != null) { originalSchema = AvroSchemas.getAvroCompatibleConnectSchema( originalSchema, schema.name() ); originalData = ConnectSchemas.withCompatibleRowSchema(originalData, originalSchema); } validate(originalSchema, schema); copyStruct(originalData, originalSchema, struct, schema); return struct; }
@Test public void shouldTransformStructWithNestedStructs() { // Given: final Schema innerStructSchemaWithoutOptional = getInnerStructSchema(false); final Schema innerStructSchemaWithOptional = getInnerStructSchema(true); Struct innerInnerStructWithOptional = getNestedData(innerStructSchemaWithOptional); Struct innerInnerStructWithoutOptional = getNestedData(innerStructSchemaWithoutOptional); final Schema structSchemaInnerWithOptional = getStructSchemaWithNestedStruct(innerStructSchemaWithOptional, true); final Schema structSchemaInnerWithOutOptional = getStructSchemaWithNestedStruct(innerStructSchemaWithoutOptional, false); // Physical Schema retrieved from SR final Schema schema = SchemaBuilder.struct() .field("string_field", SchemaBuilder.STRING_SCHEMA) .field("struct_field", structSchemaInnerWithOutOptional) .build(); // Logical Schema created by Ksql final Schema ORIGINAL_SCHEMA = SchemaBuilder.struct() .field("string_field", SchemaBuilder.OPTIONAL_STRING_SCHEMA) .field("struct_field", structSchemaInnerWithOptional) .optional() .build(); Struct innerStructWithoutOptional = getInnerStructData(structSchemaInnerWithOutOptional, innerInnerStructWithoutOptional); Struct innerStructWithOptional = getInnerStructData(structSchemaInnerWithOptional, innerInnerStructWithOptional); final Struct struct = new Struct(ORIGINAL_SCHEMA) .put("string_field", "abc") .put("struct_field", innerStructWithOptional); // When: final Object object = new AvroSRSchemaDataTranslator(schema).toConnectRow(struct); // Then: assertThat(object, instanceOf(Struct.class)); assertThat(((Struct) object).schema(), sameInstance(schema)); assertThat(((Struct) object).get("string_field"), is("abc")); assertThat(((Struct) object).get("struct_field"), equalTo(innerStructWithoutOptional)); }
private <T> T newPlugin(Class<T> klass) { // KAFKA-8340: The thread classloader is used during static initialization and must be // set to the plugin's classloader during instantiation try (LoaderSwap loaderSwap = withClassLoader(klass.getClassLoader())) { return Utils.newInstance(klass); } catch (Throwable t) { throw new ConnectException("Instantiation error", t); } }
@Test public void shouldThrowIfPluginMissingSuperclass() { assertThrows(ConnectException.class, () -> plugins.newPlugin( TestPlugin.BAD_PACKAGING_MISSING_SUPERCLASS.className(), new AbstractConfig(new ConfigDef(), Collections.emptyMap()), Converter.class )); }
Flux<List<ServiceInstance>> doRouter(Flux<List<ServiceInstance>> allServers, PolarisRouterContext routerContext) { ServiceInstances serviceInstances = RouterUtils.transferServersToServiceInstances(allServers, instanceTransformer); List<ServiceInstance> filteredInstances = new ArrayList<>(); if (serviceInstances.getInstances().size() > 0) { // filter instance by routers ProcessRoutersRequest processRoutersRequest = buildProcessRoutersRequest(serviceInstances, routerContext); // process request interceptors processRouterRequestInterceptors(processRoutersRequest, routerContext); // process router chain ProcessRoutersResponse processRoutersResponse = routerAPI.processRouters(processRoutersRequest); // process response interceptors processRouterResponseInterceptors(routerContext, processRoutersResponse); // transfer polaris server to ServiceInstance ServiceInstances filteredServiceInstances = processRoutersResponse.getServiceInstances(); for (Instance instance : filteredServiceInstances.getInstances()) { filteredInstances.add(new PolarisServiceInstance(instance)); } } return Flux.fromIterable(Collections.singletonList(filteredInstances)); }
@Test public void testRouter() { when(polarisRuleBasedRouterProperties.isEnabled()).thenReturn(true); try (MockedStatic<ApplicationContextAwareUtils> mockedApplicationContextAwareUtils = Mockito.mockStatic(ApplicationContextAwareUtils.class)) { mockedApplicationContextAwareUtils.when(() -> ApplicationContextAwareUtils.getProperties(anyString())) .thenReturn(testCallerService); setTransitiveMetadata(); PolarisRouterServiceInstanceListSupplier polarisSupplier = new PolarisRouterServiceInstanceListSupplier( delegate, routerAPI, requestInterceptors, Collections.singletonList(new TestRouterResponseInterceptor()), new PolarisInstanceTransformer()); ProcessRoutersResponse assembleResponse = assembleProcessRoutersResponse(); when(routerAPI.processRouters(any())).thenReturn(assembleResponse); Flux<List<ServiceInstance>> servers = polarisSupplier.doRouter(assembleServers(), assembleRouterContext()); assertThat(servers.toStream().mapToLong(List::size).sum()).isEqualTo(assembleResponse.getServiceInstances() .getInstances().size()); } }
public static Object typeConvert(String tableName ,String columnName, String value, int sqlType, String mysqlType) { if (value == null || (value.equals("") && !(isText(mysqlType) || sqlType == Types.CHAR || sqlType == Types.VARCHAR || sqlType == Types.LONGVARCHAR))) { return null; } try { Object res; switch (sqlType) { case Types.INTEGER: res = Integer.parseInt(value); break; case Types.SMALLINT: res = Short.parseShort(value); break; case Types.BIT: case Types.TINYINT: res = Byte.parseByte(value); break; case Types.BIGINT: if (mysqlType.startsWith("bigint") && mysqlType.endsWith("unsigned")) { res = new BigInteger(value); } else { res = Long.parseLong(value); } break; // case Types.BIT: case Types.BOOLEAN: res = !"0".equals(value); break; case Types.DOUBLE: case Types.FLOAT: res = Double.parseDouble(value); break; case Types.REAL: res = Float.parseFloat(value); break; case Types.DECIMAL: case Types.NUMERIC: res = new BigDecimal(value); break; case Types.BINARY: case Types.VARBINARY: case Types.LONGVARBINARY: case Types.BLOB: res = value.getBytes("ISO-8859-1"); break; case Types.DATE: if (!value.startsWith("0000-00-00")) { java.util.Date date = Util.parseDate(value); if (date != null) { res = new Date(date.getTime()); } else { res = null; } } else { res = null; } break; case Types.TIME: { java.util.Date date = Util.parseDate(value); if (date != null) { res = new Time(date.getTime()); } else { res = null; } break; } case Types.TIMESTAMP: if (!value.startsWith("0000-00-00")) { java.util.Date date = Util.parseDate(value); if (date != null) { res = new Timestamp(date.getTime()); } else { res = null; } } else { res = null; } break; case Types.CLOB: default: res = value; break; } return res; } catch (Exception e) { logger.error("table: {} column: {}, failed convert type {} to {}", tableName, columnName, value, sqlType); return value; } }
@Test public void typeConvertInputNotNullNotNullNotNullNegativeNotNullOutputPositive() { // Arrange final String tableName = "foo"; final String columnName = "foo"; final String value = "1234"; final int sqlType = -5; final String mysqlType = "foo"; // Act final Object actual = JdbcTypeUtil.typeConvert(tableName, columnName, value, sqlType, mysqlType); // Assert result Assert.assertEquals(1234L, actual); }
public static FactoryBuilder newFactoryBuilder(Propagation.Factory delegate) { return new FactoryBuilder(delegate); }
@Test void newFactory_noFields() { assertThat(BaggagePropagation.newFactoryBuilder(B3Propagation.FACTORY).build()) .isSameAs(B3Propagation.FACTORY); }
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal number) { if ( number == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "number", "cannot be null")); } return FEELFnResult.ofResult( number.abs() ); }
@Test void absFunctionNumber() { FunctionTestUtil.assertResult(absFunction.invoke(valueOf(10)), valueOf(10)); FunctionTestUtil.assertResult(absFunction.invoke(valueOf(-10)), valueOf(10)); FunctionTestUtil.assertResultError(absFunction.invoke((BigDecimal) null), InvalidParametersEvent.class); }
public List<MetadataLogEntry> previousFiles() { return previousFiles; }
@Test public void testJsonWithPreviousMetadataLog() throws Exception { long previousSnapshotId = System.currentTimeMillis() - new Random(1234).nextInt(3600); String manifestList = createManifestListWithManifestFile(previousSnapshotId, null, "file:/tmp/manifest1.avro"); Snapshot previousSnapshot = new BaseSnapshot( 0, previousSnapshotId, null, previousSnapshotId, null, null, null, manifestList); long currentSnapshotId = System.currentTimeMillis(); manifestList = createManifestListWithManifestFile( currentSnapshotId, previousSnapshotId, "file:/tmp/manifest2.avro"); Snapshot currentSnapshot = new BaseSnapshot( 0, currentSnapshotId, previousSnapshotId, currentSnapshotId, null, null, null, manifestList); List<HistoryEntry> reversedSnapshotLog = Lists.newArrayList(); long currentTimestamp = System.currentTimeMillis(); List<MetadataLogEntry> previousMetadataLog = Lists.newArrayList(); previousMetadataLog.add( new MetadataLogEntry( currentTimestamp, "/tmp/000001-" + UUID.randomUUID() + ".metadata.json")); TableMetadata base = new TableMetadata( null, 1, UUID.randomUUID().toString(), TEST_LOCATION, 0, System.currentTimeMillis(), 3, 7, ImmutableList.of(TEST_SCHEMA), 5, ImmutableList.of(SPEC_5), SPEC_5.lastAssignedFieldId(), 3, ImmutableList.of(SORT_ORDER_3), ImmutableMap.of("property", "value"), currentSnapshotId, Arrays.asList(previousSnapshot, currentSnapshot), null, reversedSnapshotLog, ImmutableList.copyOf(previousMetadataLog), ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), ImmutableList.of()); String asJson = TableMetadataParser.toJson(base); TableMetadata metadataFromJson = TableMetadataParser.fromJson(asJson); assertThat(metadataFromJson.previousFiles()).isEqualTo(previousMetadataLog); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { if(!session.getClient().setFileType(FTP.BINARY_FILE_TYPE)) { throw new FTPException(session.getClient().getReplyCode(), session.getClient().getReplyString()); } if(status.isAppend()) { session.getClient().setRestartOffset(status.getOffset()); } final InputStream in = new DataConnectionActionExecutor(session).data(new DataConnectionAction<InputStream>() { @Override public InputStream execute() throws BackgroundException { try { return session.getClient().retrieveFileStream(file.getAbsolute()); } catch(IOException e) { throw new FTPExceptionMappingService().map(e); } } }); return new ReadReplyInputStream(in, status); } catch(IOException e) { throw new FTPExceptionMappingService().map("Download {0} failed", e, file); } }
@Test public void testDoubleCloseStream() throws Exception { final Path file = new Path(new FTPWorkdirService(session).find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); new DefaultTouchFeature<>(new FTPWriteFeature(session)).touch(file, new TransferStatus()); final TransferStatus status = new TransferStatus(); status.setLength(5L); final Path workdir = new FTPWorkdirService(session).find(); final InputStream in = new FTPReadFeature(session).read(file, status, new DisabledConnectionCallback()); assertNotNull(in); // Read 226 reply in.close(); // Read timeout in.close(); new FTPDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public String getActiveVersionNodePath() { return String.join("/", key, ACTIVE_VERSION); }
@Test void assertGetActiveVersionNodePath() { assertThat(new MetaDataVersion("foo", "0", "1").getActiveVersionNodePath(), is("foo/active_version")); }
String toLogMessage(TbMsg msg) { return "\n" + "Incoming message:\n" + msg.getData() + "\n" + "Incoming metadata:\n" + JacksonUtil.toString(msg.getMetaData().getData()); }
@Test void givenMsg_whenToLog_thenReturnString() { TbLogNode node = new TbLogNode(); String data = "{\"key\": \"value\"}"; TbMsgMetaData metaData = new TbMsgMetaData(Map.of("mdKey1", "mdValue1", "mdKey2", "23")); TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, TenantId.SYS_TENANT_ID, metaData, data); String logMessage = node.toLogMessage(msg); log.info(logMessage); assertThat(logMessage).isEqualTo("\n" + "Incoming message:\n" + "{\"key\": \"value\"}\n" + "Incoming metadata:\n" + "{\"mdKey1\":\"mdValue1\",\"mdKey2\":\"23\"}"); }
public static <T> boolean isNullOrEmpty(Collection<T> collection) { if (collection == null) return true; return collection.isEmpty(); }
@Test void isNullOrEmptyIsTrueForEmptyArray() { assertThat(isNullOrEmpty(new String[]{})).isTrue(); }
@Override public Flux<BooleanResponse<RenameCommand>> rename(Publisher<RenameCommand> commands) { return execute(commands, command -> { Assert.notNull(command.getKey(), "Key must not be null!"); Assert.notNull(command.getNewKey(), "New name must not be null!"); byte[] keyBuf = toByteArray(command.getKey()); byte[] newKeyBuf = toByteArray(command.getNewKey()); if (executorService.getConnectionManager().calcSlot(keyBuf) == executorService.getConnectionManager().calcSlot(newKeyBuf)) { return super.rename(commands); } return read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.DUMP, keyBuf) .filter(Objects::nonNull) .zipWith( Mono.defer(() -> pTtl(command.getKey()) .filter(Objects::nonNull) .map(ttl -> Math.max(0, ttl)) .switchIfEmpty(Mono.just(0L)) ) ) .flatMap(valueAndTtl -> { return write(newKeyBuf, StringCodec.INSTANCE, RedisCommands.RESTORE, newKeyBuf, valueAndTtl.getT2(), valueAndTtl.getT1()); }) .thenReturn(new BooleanResponse<>(command, true)) .doOnSuccess((ignored) -> del(command.getKey())); }); }
@Test public void testRename_keyNotExist() { testInClusterReactive(connection -> { Integer originalSlot = getSlotForKey(originalKey, (RedissonReactiveRedisClusterConnection) connection); newKey = getNewKeyForSlot(new String(originalKey.array()), getTargetSlot(originalSlot), connection); if (sameSlot) { // This is a quirk of the implementation - since same-slot renames use the non-cluster version, // the result is a Redis error. This behavior matches other spring-data-redis implementations assertThatThrownBy(() -> connection.keyCommands().rename(originalKey, newKey).block()) .isInstanceOf(RedisSystemException.class); } else { Boolean response = connection.keyCommands().rename(originalKey, newKey).block(); assertThat(response).isTrue(); final ByteBuffer newKeyValue = connection.stringCommands().get(newKey).block(); assertThat(newKeyValue).isEqualTo(null); } }); }
public static ExecutableStage forGrpcPortRead( QueryablePipeline pipeline, PipelineNode.PCollectionNode inputPCollection, Set<PipelineNode.PTransformNode> initialNodes) { checkArgument( !initialNodes.isEmpty(), "%s must contain at least one %s.", GreedyStageFuser.class.getSimpleName(), PipelineNode.PTransformNode.class.getSimpleName()); // Choose the environment from an arbitrary node. The initial nodes may not be empty for this // subgraph to make any sense, there has to be at least one processor node // (otherwise the stage is gRPC Read -> gRPC Write, which doesn't do anything). Environment environment = getStageEnvironment(pipeline, initialNodes); ImmutableSet.Builder<PipelineNode.PTransformNode> fusedTransforms = ImmutableSet.builder(); fusedTransforms.addAll(initialNodes); Set<SideInputReference> sideInputs = new LinkedHashSet<>(); Set<UserStateReference> userStates = new LinkedHashSet<>(); Set<TimerReference> timers = new LinkedHashSet<>(); Set<PipelineNode.PCollectionNode> fusedCollections = new LinkedHashSet<>(); Set<PipelineNode.PCollectionNode> materializedPCollections = new LinkedHashSet<>(); Queue<PipelineNode.PCollectionNode> fusionCandidates = new ArrayDeque<>(); for (PipelineNode.PTransformNode initialConsumer : initialNodes) { fusionCandidates.addAll(pipeline.getOutputPCollections(initialConsumer)); sideInputs.addAll(pipeline.getSideInputs(initialConsumer)); userStates.addAll(pipeline.getUserStates(initialConsumer)); timers.addAll(pipeline.getTimers(initialConsumer)); } while (!fusionCandidates.isEmpty()) { PipelineNode.PCollectionNode candidate = fusionCandidates.poll(); if (fusedCollections.contains(candidate) || materializedPCollections.contains(candidate)) { // This should generally mean we get to a Flatten via multiple paths through the graph and // we've already determined what to do with the output. LOG.debug( "Skipping fusion candidate {} because it is {} in this {}", candidate, fusedCollections.contains(candidate) ? "fused" : "materialized", ExecutableStage.class.getSimpleName()); continue; } PCollectionFusibility fusibility = canFuse(pipeline, candidate, environment, fusedCollections); switch (fusibility) { case MATERIALIZE: materializedPCollections.add(candidate); break; case FUSE: // All of the consumers of the candidate PCollection can be fused into this stage. Do so. fusedCollections.add(candidate); fusedTransforms.addAll(pipeline.getPerElementConsumers(candidate)); for (PipelineNode.PTransformNode consumer : pipeline.getPerElementConsumers(candidate)) { // The outputs of every transform fused into this stage must be either materialized or // themselves fused away, so add them to the set of candidates. fusionCandidates.addAll(pipeline.getOutputPCollections(consumer)); sideInputs.addAll(pipeline.getSideInputs(consumer)); } break; default: throw new IllegalStateException( String.format( "Unknown type of %s %s", PCollectionFusibility.class.getSimpleName(), fusibility)); } } return ImmutableExecutableStage.ofFullComponents( pipeline.getComponents(), environment, inputPCollection, sideInputs, userStates, timers, fusedTransforms.build(), materializedPCollections, ExecutableStage.DEFAULT_WIRE_CODER_SETTINGS); }
@Test public void userStateIncludedInStage() { Environment env = Environments.createDockerEnvironment("common"); PTransform readTransform = PTransform.newBuilder() .putInputs("input", "impulse.out") .putOutputs("output", "read.out") .setSpec( FunctionSpec.newBuilder() .setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN) .setPayload( ParDoPayload.newBuilder() .setDoFn(FunctionSpec.newBuilder()) .build() .toByteString())) .setEnvironmentId("common") .build(); PTransform parDoTransform = PTransform.newBuilder() .putInputs("input", "read.out") .putOutputs("output", "parDo.out") .setSpec( FunctionSpec.newBuilder() .setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN) .setPayload( ParDoPayload.newBuilder() .setDoFn(FunctionSpec.newBuilder()) .putStateSpecs("state_spec", StateSpec.getDefaultInstance()) .build() .toByteString())) .setEnvironmentId("common") .build(); PCollection userStateMainInputPCollection = PCollection.newBuilder().setUniqueName("read.out").build(); QueryablePipeline p = QueryablePipeline.forPrimitivesIn( partialComponents .toBuilder() .putTransforms("read", readTransform) .putPcollections("read.out", userStateMainInputPCollection) .putTransforms( "user_state", PTransform.newBuilder() .putInputs("input", "impulse.out") .putOutputs("output", "user_state.out") .build()) .putPcollections( "user_state.out", PCollection.newBuilder().setUniqueName("user_state.out").build()) .putTransforms("parDo", parDoTransform) .putPcollections( "parDo.out", PCollection.newBuilder().setUniqueName("parDo.out").build()) .putEnvironments("common", env) .build()); PCollectionNode readOutput = getOnlyElement(p.getOutputPCollections(PipelineNode.pTransform("read", readTransform))); ExecutableStage subgraph = GreedyStageFuser.forGrpcPortRead( p, readOutput, ImmutableSet.of(PipelineNode.pTransform("parDo", parDoTransform))); PTransformNode parDoNode = PipelineNode.pTransform("parDo", parDoTransform); UserStateReference userStateRef = UserStateReference.of( parDoNode, "state_spec", PipelineNode.pCollection("read.out", userStateMainInputPCollection)); assertThat(subgraph.getUserStates(), contains(userStateRef)); assertThat(subgraph.getOutputPCollections(), emptyIterable()); }
public FEELFnResult<Boolean> invoke(@ParameterName( "list" ) List list) { if ( list == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null")); } boolean result = false; boolean containsNull = false; // Spec. definition: return true if any item is true, else false if all items are false, else null for ( final Object element : list ) { if (element != null && !(element instanceof Boolean)) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not a Boolean")); } else { if (element != null) { result |= (Boolean) element; } else if (!containsNull) { containsNull = true; } } } if (containsNull && !result) { return FEELFnResult.ofResult( null ); } else { return FEELFnResult.ofResult( result ); } }
@Test void invokeListParamNull() { FunctionTestUtil.assertResultError(anyFunction.invoke((List) null), InvalidParametersEvent.class); }
static void appendCharacterAsPrintFriendlyString(StringBuilder builder, char c) { if (CHARACTER_REPLACEMENTS.containsKey(c)) { builder.append(CHARACTER_REPLACEMENTS.get(c)); } else { builder.append(c); } }
@Test public void testAppendCharacterAsPrintFriendlyString() { StringBuilder builder = null; try { Hl7Util.appendCharacterAsPrintFriendlyString(builder, 'a'); fail("Exception should be raised with null StringBuilder argument"); } catch (NullPointerException ignoredEx) { // Eat this } builder = new StringBuilder(); Hl7Util.appendCharacterAsPrintFriendlyString(builder, MllpProtocolConstants.START_OF_BLOCK); assertEquals("<0x0B VT>", builder.toString()); builder = new StringBuilder(); Hl7Util.appendCharacterAsPrintFriendlyString(builder, MllpProtocolConstants.END_OF_BLOCK); assertEquals("<0x1C FS>", builder.toString()); builder = new StringBuilder(); Hl7Util.appendCharacterAsPrintFriendlyString(builder, MllpProtocolConstants.SEGMENT_DELIMITER); assertEquals("<0x0D CR>", builder.toString()); builder = new StringBuilder(); Hl7Util.appendCharacterAsPrintFriendlyString(builder, MllpProtocolConstants.MESSAGE_TERMINATOR); assertEquals("<0x0A LF>", builder.toString()); }
public void cleanRuleDataSelf(final List<RuleData> ruleDataList) { ruleDataList.forEach(this::removeRuleData); }
@Test public void testCleanRuleDataSelf() throws NoSuchFieldException, IllegalAccessException { RuleData firstCachedRuleData = RuleData.builder().id("1").selectorId(mockSelectorId1).build(); RuleData secondCachedRuleData = RuleData.builder().id("2").selectorId(mockSelectorId2).build(); ConcurrentHashMap<String, List<RuleData>> ruleMap = getFieldByName(ruleMapStr); ruleMap.put(mockSelectorId1, Lists.newArrayList(firstCachedRuleData)); ruleMap.put(mockSelectorId2, Lists.newArrayList(secondCachedRuleData)); BaseDataCache.getInstance().cleanRuleDataSelf(Lists.newArrayList(firstCachedRuleData)); assertEquals(Lists.newArrayList(), ruleMap.get(mockSelectorId1)); assertEquals(Lists.newArrayList(secondCachedRuleData), ruleMap.get(mockSelectorId2)); }
public void close() throws IOException { try { closeAsync().get(); } catch (ExecutionException e) { if (e.getCause() instanceof IOException) { throw (IOException) e.getCause(); } else { throw new PulsarServerException(e.getCause()); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); } }
@Test public void testTlsEnabled() throws Exception { final String topicName = "persistent://prop/ns-abc/newTopic"; final String subName = "newSub"; conf.setAuthenticationEnabled(false); conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); conf.setNumExecutorThreadPoolSize(5); restartBroker(); // Case 1: Access without TLS PulsarClient pulsarClient = null; try { pulsarClient = PulsarClient.builder().serviceUrl(brokerUrl.toString()).statsInterval(0, TimeUnit.SECONDS) .operationTimeout(1000, TimeUnit.MILLISECONDS).build(); @Cleanup Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName(subName) .subscribe(); } catch (Exception e) { fail("should not fail"); } finally { pulsarClient.close(); } // Case 2: Access with TLS (Allow insecure TLS connection) try { pulsarClient = PulsarClient.builder().serviceUrl(brokerUrlTls.toString()).enableTls(true) .allowTlsInsecureConnection(true).statsInterval(0, TimeUnit.SECONDS) .operationTimeout(1000, TimeUnit.MILLISECONDS).build(); @Cleanup Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName(subName) .subscribe(); } catch (Exception e) { fail("should not fail"); } finally { pulsarClient.close(); } // Case 3: Access with TLS (Disallow insecure TLS connection) try { pulsarClient = PulsarClient.builder().serviceUrl(brokerUrlTls.toString()).enableTls(true) .allowTlsInsecureConnection(false).statsInterval(0, TimeUnit.SECONDS) .operationTimeout(1000, TimeUnit.MILLISECONDS).build(); @Cleanup Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName(subName) .subscribe(); fail("should fail"); } catch (Exception e) { assertTrue(e.getMessage().contains("unable to find valid certification path to requested target")); } finally { pulsarClient.close(); } // Case 4: Access with TLS (Use trusted certificates) try { pulsarClient = PulsarClient.builder().serviceUrl(brokerUrlTls.toString()).enableTls(true) .allowTlsInsecureConnection(false).tlsTrustCertsFilePath(BROKER_CERT_FILE_PATH) .statsInterval(0, TimeUnit.SECONDS) .operationTimeout(1000, TimeUnit.MILLISECONDS).build(); @Cleanup Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName(subName) .subscribe(); } catch (Exception e) { fail("should not fail"); } finally { pulsarClient.close(); } }
@Override public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo, List<String> partNames, boolean areAllPartsFound) throws MetaException { checkStatisticsList(colStatsWithSourceInfo); ColumnStatisticsObj statsObj = null; String colType; String colName = null; // check if all the ColumnStatisticsObjs contain stats and all the ndv are // bitvectors boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size(); NumDistinctValueEstimator ndvEstimator = null; boolean areAllNDVEstimatorsMergeable = true; for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); if (statsObj == null) { colName = cso.getColName(); colType = cso.getColType(); statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso.getStatsData().getSetField()); LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName, doAllPartitionContainStats); } TimestampColumnStatsDataInspector columnStatsData = timestampInspectorFromStats(cso); // check if we can merge NDV estimators if (columnStatsData.getNdvEstimator() == null) { areAllNDVEstimatorsMergeable = false; break; } else { NumDistinctValueEstimator estimator = columnStatsData.getNdvEstimator(); if (ndvEstimator == null) { ndvEstimator = estimator; } else { if (!ndvEstimator.canMerge(estimator)) { areAllNDVEstimatorsMergeable = false; break; } } } } if (areAllNDVEstimatorsMergeable && ndvEstimator != null) { ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator); } LOG.debug("all of the bit vectors can merge for {} is {}", colName, areAllNDVEstimatorsMergeable); ColumnStatisticsData columnStatisticsData = initColumnStatisticsData(); if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) { TimestampColumnStatsDataInspector aggregateData = null; long lowerBound = 0; long higherBound = 0; double densityAvgSum = 0.0; TimestampColumnStatsMerger merger = new TimestampColumnStatsMerger(); for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); TimestampColumnStatsDataInspector newData = timestampInspectorFromStats(cso); lowerBound = Math.max(lowerBound, newData.getNumDVs()); higherBound += newData.getNumDVs(); if (newData.isSetLowValue() && newData.isSetHighValue()) { densityAvgSum += ((double) diff(newData.getHighValue(), newData.getLowValue())) / newData.getNumDVs(); } if (areAllNDVEstimatorsMergeable && ndvEstimator != null) { ndvEstimator.mergeEstimators(newData.getNdvEstimator()); } if (aggregateData == null) { aggregateData = newData.deepCopy(); } else { aggregateData.setLowValue(merger.mergeLowValue( merger.getLowValue(aggregateData), merger.getLowValue(newData))); aggregateData.setHighValue(merger.mergeHighValue( merger.getHighValue(aggregateData), merger.getHighValue(newData))); aggregateData.setNumNulls(merger.mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls())); aggregateData.setNumDVs(merger.mergeNumDVs(aggregateData.getNumDVs(), newData.getNumDVs())); } } if (areAllNDVEstimatorsMergeable && ndvEstimator != null) { // if all the ColumnStatisticsObjs contain bitvectors, we do not need to // use uniform distribution assumption because we can merge bitvectors // to get a good estimation. aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); } else { long estimation; if (useDensityFunctionForNDVEstimation && aggregateData != null && aggregateData.isSetLowValue() && aggregateData.isSetHighValue() ) { // We have estimation, lowerbound and higherbound. We use estimation // if it is between lowerbound and higherbound. double densityAvg = densityAvgSum / partNames.size(); estimation = (long) (diff(aggregateData.getHighValue(), aggregateData.getLowValue()) / densityAvg); if (estimation < lowerBound) { estimation = lowerBound; } else if (estimation > higherBound) { estimation = higherBound; } } else { estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner); } aggregateData.setNumDVs(estimation); } columnStatisticsData.setTimestampStats(aggregateData); } else { // TODO: bail out if missing stats are over a certain threshold // we need extrapolation LOG.debug("start extrapolation for {}", colName); Map<String, Integer> indexMap = new HashMap<>(); for (int index = 0; index < partNames.size(); index++) { indexMap.put(partNames.get(index), index); } Map<String, Double> adjustedIndexMap = new HashMap<>(); Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>(); // while we scan the css, we also get the densityAvg, lowerbound and // higherbound when useDensityFunctionForNDVEstimation is true. double densityAvgSum = 0.0; if (!areAllNDVEstimatorsMergeable) { // if not every partition uses bitvector for ndv, we just fall back to // the traditional extrapolation methods. for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); String partName = csp.getPartName(); TimestampColumnStatsData newData = cso.getStatsData().getTimestampStats(); if (useDensityFunctionForNDVEstimation && newData.isSetLowValue() && newData.isSetHighValue()) { densityAvgSum += ((double) diff(newData.getHighValue(), newData.getLowValue())) / newData.getNumDVs(); } adjustedIndexMap.put(partName, (double) indexMap.get(partName)); adjustedStatsMap.put(partName, cso.getStatsData()); } } else { // we first merge all the adjacent bitvectors that we could merge and // derive new partition names and index. StringBuilder pseudoPartName = new StringBuilder(); double pseudoIndexSum = 0; int length = 0; int curIndex = -1; TimestampColumnStatsDataInspector aggregateData = null; for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); String partName = csp.getPartName(); TimestampColumnStatsDataInspector newData = timestampInspectorFromStats(cso); // newData.isSetBitVectors() should be true for sure because we // already checked it before. if (indexMap.get(partName) != curIndex) { // There is bitvector, but it is not adjacent to the previous ones. if (length > 0) { // we have to set ndv adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); ColumnStatisticsData csd = new ColumnStatisticsData(); csd.setTimestampStats(aggregateData); adjustedStatsMap.put(pseudoPartName.toString(), csd); if (useDensityFunctionForNDVEstimation) { densityAvgSum += ((double) diff(aggregateData.getHighValue(), aggregateData.getLowValue())) / aggregateData.getNumDVs(); } // reset everything pseudoPartName = new StringBuilder(); pseudoIndexSum = 0; length = 0; ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator); } aggregateData = null; } curIndex = indexMap.get(partName); pseudoPartName.append(partName); pseudoIndexSum += curIndex; length++; curIndex++; if (aggregateData == null) { aggregateData = newData.deepCopy(); } else { aggregateData.setLowValue(min(aggregateData.getLowValue(), newData.getLowValue())); aggregateData.setHighValue(max(aggregateData.getHighValue(), newData.getHighValue())); aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); } ndvEstimator.mergeEstimators(newData.getNdvEstimator()); } if (length > 0) { // we have to set ndv adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); ColumnStatisticsData csd = new ColumnStatisticsData(); csd.setTimestampStats(aggregateData); adjustedStatsMap.put(pseudoPartName.toString(), csd); if (useDensityFunctionForNDVEstimation) { densityAvgSum += ((double) diff(aggregateData.getHighValue(), aggregateData.getLowValue())) / aggregateData.getNumDVs(); } } } extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(), adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size()); } LOG.debug( "Ndv estimation for {} is {}. # of partitions requested: {}. # of partitions found: {}", colName, columnStatisticsData.getTimestampStats().getNumDVs(), partNames.size(), colStatsWithSourceInfo.size()); KllHistogramEstimator mergedKllHistogramEstimator = mergeHistograms(colStatsWithSourceInfo); if (mergedKllHistogramEstimator != null) { columnStatisticsData.getTimestampStats().setHistogram(mergedKllHistogramEstimator.serialize()); } statsObj.setStatsData(columnStatisticsData); return statsObj; }
@Test public void testAggregateMultipleStatsWhenSomeNullValues() throws MetaException { List<String> partitions = Arrays.asList("part1", "part2"); long[] values1 = { TS_1.getSecondsSinceEpoch(), TS_2.getSecondsSinceEpoch() }; ColumnStatisticsData data1 = new ColStatsBuilder<>(Timestamp.class).numNulls(1).numDVs(2) .low(TS_1).high(TS_2).hll(values1).kll(values1).build(); ColumnStatisticsData data2 = new ColStatsBuilder<>(Timestamp.class).numNulls(2).numDVs(3).build(); List<ColStatsObjWithSourceInfo> statsList = Arrays.asList( createStatsWithInfo(data1, TABLE, COL, partitions.get(0)), createStatsWithInfo(data2, TABLE, COL, partitions.get(1))); TimestampColumnStatsAggregator aggregator = new TimestampColumnStatsAggregator(); ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, true); ColumnStatisticsData expectedStats = new ColStatsBuilder<>(Timestamp.class).numNulls(3).numDVs(3) .low(TS_1).high(TS_2).hll(values1).kll(values1).build(); assertEqualStatistics(expectedStats, computedStatsObj.getStatsData()); aggregator.useDensityFunctionForNDVEstimation = true; computedStatsObj = aggregator.aggregate(statsList, partitions, true); expectedStats = new ColStatsBuilder<>(Timestamp.class).numNulls(3).numDVs(4) .low(TS_1).high(TS_2).hll(values1).kll(values1).build(); assertEqualStatistics(expectedStats, computedStatsObj.getStatsData()); aggregator.useDensityFunctionForNDVEstimation = false; aggregator.ndvTuner = 1; computedStatsObj = aggregator.aggregate(statsList, partitions, true); expectedStats = new ColStatsBuilder<>(Timestamp.class).numNulls(3).numDVs(5) .low(TS_1).high(TS_2).hll(values1).kll(values1).build(); assertEqualStatistics(expectedStats, computedStatsObj.getStatsData()); }
private Map<String, Object> getProperties(final Map<String, Object> props) { Map<String, Object> result = new LinkedHashMap<>(props.size(), 1F); for (Entry<String, Object> entry : props.entrySet()) { if (!entry.getKey().contains(".")) { result.put(entry.getKey(), entry.getValue()); continue; } String[] complexKeys = entry.getKey().split("\\."); if (2 != complexKeys.length) { result.put(entry.getKey(), entry.getValue()); continue; } if (!result.containsKey(complexKeys[0])) { result.put(complexKeys[0], new Properties()); } ((Properties) result.get(complexKeys[0])).setProperty(complexKeys[1], entry.getValue().toString()); } return result; }
@Test void assertGetProperties() { Map<String, Object> actual = new CustomDataSourcePoolProperties( createProperties(), Arrays.asList("username", "password", "closed"), Collections.singletonList("closed"), Collections.singletonMap("username", "user")).getProperties(); assertThat(actual.size(), is(3)); assertThat(actual.get("foo"), is("bar")); assertThat(((Properties) actual.get("fooProperties")).size(), is(2)); assertThat(((Properties) actual.get("fooProperties")).getProperty("foo1"), is("fooValue1")); assertThat(((Properties) actual.get("fooProperties")).getProperty("foo2"), is("fooValue2")); assertThat(((Properties) actual.get("barProperties")).size(), is(2)); assertThat(((Properties) actual.get("barProperties")).getProperty("bar1"), is("barValue1")); assertThat(((Properties) actual.get("barProperties")).getProperty("bar2"), is("barValue2")); }
public List<SelectorData> obtainSelectorData(final String pluginName) { return SELECTOR_MAP.get(pluginName); }
@Test public void testObtainSelectorData() throws NoSuchFieldException, IllegalAccessException { SelectorData selectorData = SelectorData.builder().id("1").pluginName(mockPluginName1).build(); ConcurrentHashMap<String, List<SelectorData>> selectorMap = getFieldByName(selectorMapStr); selectorMap.put(mockPluginName1, Lists.newArrayList(selectorData)); List<SelectorData> selectorDataList = BaseDataCache.getInstance().obtainSelectorData(mockPluginName1); assertEquals(Lists.newArrayList(selectorData), selectorDataList); }
public static L3ModificationInstruction modArpTpa(IpAddress addr) { checkNotNull(addr, "Dst l3 ARP IP address cannot be null"); return new ModArpIPInstruction(L3SubType.ARP_TPA, addr); }
@Test public void testModArpTpaMethod() { final Instruction instruction = Instructions.modArpTpa(ip41); final L3ModificationInstruction.ModArpIPInstruction modArpIPInstruction = checkAndConvert(instruction, Instruction.Type.L3MODIFICATION, L3ModificationInstruction.ModArpIPInstruction.class); assertThat(modArpIPInstruction.subtype(), is(L3ModificationInstruction.L3SubType.ARP_TPA)); assertThat(modArpIPInstruction.ip(), is(ip41)); }
@Override public void upgrade() { // Only run this migration once. if (clusterConfigService.get(MigrationCompleted.class) != null) { LOG.debug("Migration already completed."); return; } final IndexSetConfig indexSetConfig = findDefaultIndexSet(); final ImmutableSet.Builder<String> completedStreamIds = ImmutableSet.builder(); final ImmutableSet.Builder<String> failedStreamIds = ImmutableSet.builder(); // Assign the "default index set" to all existing streams. Until now, there was no way to manually create // index sets, so the only one that exists is the "default" one created by an earlier migration. for (Stream stream : streamService.loadAll()) { if (isNullOrEmpty(stream.getIndexSetId())) { LOG.info("Assigning index set <{}> ({}) to stream <{}> ({})", indexSetConfig.id(), indexSetConfig.title(), stream.getId(), stream.getTitle()); stream.setIndexSetId(indexSetConfig.id()); try { streamService.save(stream); completedStreamIds.add(stream.getId()); } catch (ValidationException e) { LOG.error("Unable to save stream <{}>", stream.getId(), e); failedStreamIds.add(stream.getId()); } } } // Mark this migration as done. clusterConfigService.write(MigrationCompleted.create(indexSetConfig.id(), completedStreamIds.build(), failedStreamIds.build())); }
@Test public void upgrade() throws Exception { final Stream stream1 = mock(Stream.class); final Stream stream2 = mock(Stream.class); final IndexSetConfig indexSetConfig = mock(IndexSetConfig.class); when(indexSetService.findAll()).thenReturn(Collections.singletonList(indexSetConfig)); when(indexSetConfig.id()).thenReturn("abc123"); when(stream1.getId()).thenReturn("stream1"); when(stream2.getId()).thenReturn("stream2"); when(streamService.loadAll()).thenReturn(Lists.newArrayList(stream1, stream2)); migration.upgrade(); verify(stream1).setIndexSetId(indexSetConfig.id()); verify(stream2).setIndexSetId(indexSetConfig.id()); verify(streamService, times(1)).save(stream1); verify(streamService, times(1)).save(stream2); verify(clusterConfigService, times(1)).write( V20161122174500_AssignIndexSetsToStreamsMigration.MigrationCompleted.create( indexSetConfig.id(), Sets.newHashSet("stream1", "stream2"), Collections.emptySet())); }
@Override public Object[] toArray() { return toArray(new Object[size]); }
@Test public void testToGenericArrayReturnsNewArrayWhenSmallArrayProvided() { final OAHashSet<Integer> set = new OAHashSet<>(8); populateSet(set, 10); final Integer[] setElementsProvided = new Integer[9]; final Object[] setElementsReturned = set.toArray(setElementsProvided); assertNotSame(setElementsProvided, setElementsReturned); final BitSet foundElements = new BitSet(10); for (Object foundElement : setElementsReturned) { foundElements.set((Integer) foundElement); } for (int i = 0; i < 10; i++) { assertTrue(foundElements.get(i)); } }
public List<CompactionTask> produce() { // get all CF files sorted by key range start (L1+) List<SstFileMetaData> sstSortedByCfAndStartingKeys = metadataSupplier.get().stream() .filter(l -> l.level() > 0) // let RocksDB deal with L0 .sorted(SST_COMPARATOR) .collect(Collectors.toList()); LOG.trace("Input files: {}", sstSortedByCfAndStartingKeys.size()); List<CompactionTask> tasks = groupIntoTasks(sstSortedByCfAndStartingKeys); tasks.sort(Comparator.<CompactionTask>comparingInt(t -> t.files.size()).reversed()); return tasks.subList(0, Math.min(tasks.size(), settings.maxManualCompactions)); }
@Test void testNotGroupingOnDifferentLevels() { SstFileMetaData sst1 = sstBuilder().setLevel(1).build(); SstFileMetaData sst2 = sstBuilder().setLevel(2).build(); assertThat(produce(configBuilder().build(), sst1, sst2)).hasSize(2); }
public static BigDecimal jsToBigNumber( Object value, String classType ) { if ( classType.equalsIgnoreCase( JS_UNDEFINED ) ) { return null; } else if ( classType.equalsIgnoreCase( JS_NATIVE_NUM ) ) { Number nb = Context.toNumber( value ); return BigDecimal.valueOf( nb.doubleValue() ); } else if ( classType.equalsIgnoreCase( JS_NATIVE_JAVA_OBJ ) ) { // Is it a BigDecimal class ? return convertNativeJavaToBigDecimal( value ); } else if ( classType.equalsIgnoreCase( "java.lang.Byte" ) ) { return BigDecimal.valueOf( ( (Byte) value ).longValue() ); } else if ( classType.equalsIgnoreCase( "java.lang.Short" ) ) { return BigDecimal.valueOf( ( (Short) value ).longValue() ); } else if ( classType.equalsIgnoreCase( "java.lang.Integer" ) ) { return BigDecimal.valueOf( ( (Integer) value ).longValue() ); } else if ( classType.equalsIgnoreCase( "java.lang.Long" ) ) { return BigDecimal.valueOf( ( (Long) value ).longValue() ); } else if ( classType.equalsIgnoreCase( "java.lang.Double" ) ) { return BigDecimal.valueOf( ( (Double) value ).doubleValue() ); } else if ( classType.equalsIgnoreCase( "java.lang.String" ) ) { return BigDecimal.valueOf( ( new Long( (String) value ) ).longValue() ); } else { throw new UnsupportedOperationException( "JavaScript conversion to BigNumber not implemented for " + classType ); } }
@Test public void jsToBigNumber_String() throws Exception { assertEquals( BigDecimal.ONE, JavaScriptUtils.jsToBigNumber( "1", String.class.getName() ) ); }
public static LocalDateTime parse(CharSequence text) { return parse(text, (DateTimeFormatter) null); }
@Test public void parseTest5() { final LocalDateTime localDateTime = LocalDateTimeUtil.parse("19940121183604", "yyyyMMddHHmmss"); assertEquals("1994-01-21T18:36:04", Objects.requireNonNull(localDateTime).toString()); }
@Override public Set<Permission> permissions() { return permissions; }
@Test public void immutablePermissions() { // Set<Permission> p = PERMS_ORIG; Set<Permission> p = PERMS_UNSAFE; Application app = baseBuilder.build(); Set<Permission> perms = app.permissions(); try { perms.add(JUNK_PERM); } catch (UnsupportedOperationException e) { // set is immutable } assertTrue("no write perm", app.permissions().contains(PERM_W)); assertTrue("no read perm", app.permissions().contains(PERM_R)); assertEquals("extra perms", 2, app.permissions().size()); // DONE: review - is it sufficient to expect caller to pass in ImmutableSet ? // Issue Resolved with Immutable collections used during construction. // If we just pass in a HashSet, the contents would be modifiable by // an external party. (Making the field final just means that the // reference to the set can never change; the contents may still...) // Similar reasoning can be applied to these two fields also: // List<String> features // List<String> requiredApps }
public AbilityStatus getConnectionAbility(AbilityKey abilityKey) { if (abilityTable == null || !abilityTable.containsKey(abilityKey.getName())) { return AbilityStatus.UNKNOWN; } return abilityTable.get(abilityKey.getName()) ? AbilityStatus.SUPPORTED : AbilityStatus.NOT_SUPPORTED; }
@Test void testGetConnectionAbility() { assertFalse(connection.isAbilitiesSet()); assertEquals(AbilityStatus.UNKNOWN, connection.getConnectionAbility(AbilityKey.SDK_CLIENT_TEST_1)); connection.setAbilityTable(Collections.singletonMap(AbilityKey.SERVER_TEST_2.getName(), true)); assertTrue(connection.isAbilitiesSet()); assertEquals(AbilityStatus.UNKNOWN, connection.getConnectionAbility(AbilityKey.SDK_CLIENT_TEST_1)); assertEquals(AbilityStatus.SUPPORTED, connection.getConnectionAbility(AbilityKey.SERVER_TEST_2)); connection.setAbilityTable(Collections.singletonMap(AbilityKey.SERVER_TEST_2.getName(), false)); assertEquals(AbilityStatus.NOT_SUPPORTED, connection.getConnectionAbility(AbilityKey.SERVER_TEST_2)); }
@Override public Object objectFromByteBuffer(byte[] buf, int offset, int length) throws IOException, ClassNotFoundException { try (ObjectInputStream ois = new CheckedInputStream(new ByteArrayInputStream(buf), allowList)) { return ois.readObject(); } }
@Test public void testPrimitiveArrays() throws Exception { JavaSerializationMarshaller marshaller = new JavaSerializationMarshaller(); byte[] bytes = marshaller.objectToByteBuffer(Util.EMPTY_BYTE_ARRAY); assertArrayEquals(Util.EMPTY_BYTE_ARRAY, (byte[]) marshaller.objectFromByteBuffer(bytes)); bytes = marshaller.objectToByteBuffer(new short[0]); assertArrayEquals(new short[0], (short[]) marshaller.objectFromByteBuffer(bytes)); bytes = marshaller.objectToByteBuffer(new int[0]); assertArrayEquals(new int[0], (int[]) marshaller.objectFromByteBuffer(bytes)); bytes = marshaller.objectToByteBuffer(new long[0]); assertArrayEquals(new long[0], (long[]) marshaller.objectFromByteBuffer(bytes)); bytes = marshaller.objectToByteBuffer(new float[0]); assertArrayEquals(new float[0], (float[]) marshaller.objectFromByteBuffer(bytes), 0); bytes = marshaller.objectToByteBuffer(new double[0]); assertArrayEquals(new double[0], (double[]) marshaller.objectFromByteBuffer(bytes), 0); bytes = marshaller.objectToByteBuffer(new char[0]); assertArrayEquals(new char[0], (char[]) marshaller.objectFromByteBuffer(bytes)); bytes = marshaller.objectToByteBuffer(new boolean[0]); assertArrayEquals(new boolean[0], (boolean[]) marshaller.objectFromByteBuffer(bytes)); }
private void doMonitoring(HttpServletRequest httpRequest, HttpServletResponse httpResponse) throws IOException, ServletException { if (isRumMonitoring(httpRequest, httpResponse)) { return; } if (!isAllowed(httpRequest, httpResponse)) { return; } final Collector collector = filterContext.getCollector(); final MonitoringController monitoringController = new MonitoringController(collector, null); monitoringController.doActionIfNeededAndReport(httpRequest, httpResponse, filterConfig.getServletContext()); if ("stop".equalsIgnoreCase(HttpParameter.COLLECTOR.getParameterFrom(httpRequest))) { // on a été appelé par un serveur de collecte qui fera l'aggrégation dans le temps, // le stockage et les courbes, donc on arrête le timer s'il est démarré // et on vide les stats pour que le serveur de collecte ne récupère que les deltas for (final Counter counter : collector.getCounters()) { counter.clear(); } if (!collector.isStopped()) { LOG.debug( "Stopping the javamelody collector in this webapp, because a collector server from " + httpRequest.getRemoteAddr() + " wants to collect the data itself"); filterContext.stopCollector(); } } }
@Test public void testDoMonitoring() throws ServletException, IOException { monitoring(Collections.emptyMap()); monitoring(Collections.singletonMap(HttpParameter.FORMAT, "html")); monitoring(Collections.singletonMap(HttpParameter.FORMAT, "htmlbody")); setProperty(Parameter.DISABLED, Boolean.TRUE.toString()); try { setUp(); monitoring(Collections.emptyMap(), false); } finally { setProperty(Parameter.DISABLED, Boolean.FALSE.toString()); } setProperty(Parameter.NO_DATABASE, Boolean.TRUE.toString()); try { setUp(); monitoring(Collections.emptyMap()); } finally { setProperty(Parameter.NO_DATABASE, Boolean.FALSE.toString()); } setProperty(Parameter.ALLOWED_ADDR_PATTERN, "256.*"); try { setUp(); monitoring(Collections.emptyMap(), false); setProperty(Parameter.ALLOWED_ADDR_PATTERN, ".*"); setUp(); monitoring(Collections.emptyMap(), false); } finally { setProperty(Parameter.ALLOWED_ADDR_PATTERN, null); } setProperty(Parameter.AUTHORIZED_USERS, "admin:password, "); try { setUp(); monitoring(Collections.emptyMap(), false); setProperty(Parameter.AUTHORIZED_USERS, ""); setUp(); monitoring(Collections.emptyMap(), false); } finally { setProperty(Parameter.AUTHORIZED_USERS, null); } setProperty(Parameter.MONITORING_PATH, "/admin/monitoring"); try { setUp(); monitoring(Collections.emptyMap(), false); } finally { setProperty(Parameter.MONITORING_PATH, "/monitoring"); } try { setProperty(Parameter.JMX_EXPOSE_ENABLED, Boolean.TRUE.toString()); setUp(); monitoring(Collections.emptyMap()); } finally { setProperty(Parameter.JMX_EXPOSE_ENABLED, null); } }
@JsonProperty("server") public ServerFactory getServerFactory() { return server; }
@Test void hasAnHttpConfiguration() throws Exception { assertThat(configuration.getServerFactory()) .isNotNull(); }
static void setStaticGetter(final RegressionTable regressionTable, final RegressionCompilationDTO compilationDTO, final MethodDeclaration staticGetterMethod, final String variableName) { final BlockStmt regressionTableBody = staticGetterMethod.getBody().orElseThrow(() -> new KiePMMLException(String.format(MISSING_BODY_TEMPLATE, staticGetterMethod))); final BlockStmt newBody = new BlockStmt(); // populate maps String numericFunctionMapName = String.format(VARIABLE_NAME_TEMPLATE, NUMERIC_FUNCTION_MAP, variableName); final Map<String, Expression> numericPredictorsMap = getNumericPredictorsExpressions(regressionTable.getNumericPredictors()); createPopulatedHashMap(newBody, numericFunctionMapName, Arrays.asList(String.class.getSimpleName(), "SerializableFunction<Double, Double>"), numericPredictorsMap); final Map<String, Expression> categoricalPredictorFunctionsMap = getCategoricalPredictorsExpressions(regressionTable.getCategoricalPredictors(), newBody, variableName); String categoricalFunctionMapName = String.format(VARIABLE_NAME_TEMPLATE, CATEGORICAL_FUNCTION_MAP, variableName); createPopulatedHashMap(newBody, categoricalFunctionMapName, Arrays.asList(String.class.getSimpleName(), "SerializableFunction<String, " + "Double>") , categoricalPredictorFunctionsMap); String predictorTermsFunctionMapName = String.format(VARIABLE_NAME_TEMPLATE, PREDICTOR_TERM_FUNCTION_MAP, variableName); final Map<String, Expression> predictorTermsMap = getPredictorTermFunctions(regressionTable.getPredictorTerms()); createPopulatedHashMap(newBody, predictorTermsFunctionMapName, Arrays.asList(String.class.getSimpleName(), "SerializableFunction<Map" + "<String, " + "Object>, Double>"), predictorTermsMap); final VariableDeclarator variableDeclarator = getVariableDeclarator(regressionTableBody, TO_RETURN).orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_IN_BODY, TO_RETURN, regressionTableBody))); final MethodCallExpr initializer = variableDeclarator.getInitializer() .orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_INITIALIZER_TEMPLATE, TO_RETURN, regressionTableBody))) .asMethodCallExpr(); final MethodCallExpr builder = getChainedMethodCallExprFrom("builder", initializer); builder.setArgument(0, new StringLiteralExpr(variableName)); getChainedMethodCallExprFrom("withNumericFunctionMap", initializer).setArgument(0, new NameExpr(numericFunctionMapName) { }); getChainedMethodCallExprFrom("withCategoricalFunctionMap", initializer).setArgument(0, new NameExpr(categoricalFunctionMapName)); getChainedMethodCallExprFrom("withPredictorTermsFunctionMap", initializer).setArgument(0, new NameExpr(predictorTermsFunctionMapName)); getChainedMethodCallExprFrom("withIntercept", initializer).setArgument(0, getExpressionForObject(regressionTable.getIntercept().doubleValue())); getChainedMethodCallExprFrom("withTargetField", initializer).setArgument(0, getExpressionForObject(compilationDTO.getTargetFieldName())); getChainedMethodCallExprFrom("withTargetCategory", initializer).setArgument(0, getExpressionForObject(regressionTable.getTargetCategory())); final Expression resultUpdaterExpression = getResultUpdaterExpression(compilationDTO.getDefaultNormalizationMethod()); getChainedMethodCallExprFrom("withResultUpdater", initializer).setArgument(0, resultUpdaterExpression); regressionTableBody.getStatements().forEach(newBody::addStatement); staticGetterMethod.setBody(newBody); }
@Test void setStaticGetter() throws IOException { regressionTable = getRegressionTable(3.5, "professional"); RegressionModel regressionModel = new RegressionModel(); regressionModel.setNormalizationMethod(RegressionModel.NormalizationMethod.CAUCHIT); regressionModel.addRegressionTables(regressionTable); regressionModel.setModelName(getGeneratedClassName("RegressionModel")); String targetField = "targetField"; DataField dataField = new DataField(); dataField.setName(targetField); dataField.setOpType(OpType.CATEGORICAL); DataDictionary dataDictionary = new DataDictionary(); dataDictionary.addDataFields(dataField); MiningField miningField = new MiningField(); miningField.setUsageType(MiningField.UsageType.TARGET); miningField.setName(dataField.getName()); MiningSchema miningSchema = new MiningSchema(); miningSchema.addMiningFields(miningField); regressionModel.setMiningSchema(miningSchema); PMML pmml = new PMML(); pmml.setDataDictionary(dataDictionary); pmml.addModels(regressionModel); String variableName = "variableName"; final CommonCompilationDTO<RegressionModel> source = CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME, pmml, regressionModel, new PMMLCompilationContextMock(), "FILENAME"); final RegressionCompilationDTO compilationDTO = RegressionCompilationDTO.fromCompilationDTORegressionTablesAndNormalizationMethod(source, new ArrayList<>(), regressionModel.getNormalizationMethod()); final MethodDeclaration staticGetterMethod = STATIC_GETTER_METHOD.clone(); KiePMMLRegressionTableFactory.setStaticGetter(regressionTable, compilationDTO, staticGetterMethod, variableName); String text = getFileContent(TEST_06_SOURCE); MethodDeclaration expected = JavaParserUtils.parseMethod(text); assertThat(staticGetterMethod.toString()).isEqualTo(expected.toString()); assertThat(JavaParserUtils.equalsNode(expected, staticGetterMethod)).isTrue(); List<Class<?>> imports = Arrays.asList(AtomicReference.class, Collections.class, Arrays.class, List.class, Map.class, KiePMMLRegressionTable.class, SerializableFunction.class); commonValidateCompilationWithImports(staticGetterMethod, imports); }
@GetMapping("/publish/list") @Secured(action = ActionTypes.READ, resource = "nacos/admin") public Result<List<ObjectNode>> getPublishedServiceList(@RequestParam("clientId") String clientId) throws NacosApiException { checkClientId(clientId); Client client = clientManager.getClient(clientId); Collection<Service> allPublishedService = client.getAllPublishedService(); ArrayList<ObjectNode> res = new ArrayList<>(); for (Service service : allPublishedService) { InstancePublishInfo instancePublishInfo = client.getInstancePublishInfo(service); if (instancePublishInfo instanceof BatchInstancePublishInfo) { List<InstancePublishInfo> instancePublishInfos = ((BatchInstancePublishInfo) instancePublishInfo).getInstancePublishInfos(); for (InstancePublishInfo publishInfo : instancePublishInfos) { res.add(wrapSingleInstanceNode(publishInfo, service)); } } else { res.add(wrapSingleInstanceNode(instancePublishInfo, service)); } } return Result.success(res); }
@Test void testGetPublishedServiceList() throws Exception { // single instance when(clientManager.getClient("test1")).thenReturn(connectionBasedClient); Service service = Service.newService("test", "test", "test"); connectionBasedClient.addServiceInstance(service, new InstancePublishInfo("127.0.0.1", 8848)); MockHttpServletRequestBuilder mockHttpServletRequestBuilder = MockMvcRequestBuilders.get(URL + "/publish/list") .param("clientId", "test1"); mockmvc.perform(mockHttpServletRequestBuilder).andExpect(MockMvcResultMatchers.jsonPath("$.data.length()").value(1)); // batch instances BatchInstancePublishInfo instancePublishInfo = new BatchInstancePublishInfo(); instancePublishInfo.setInstancePublishInfos( Arrays.asList(new InstancePublishInfo("127.0.0.1", 8848), new InstancePublishInfo("127.0.0.1", 8849))); connectionBasedClient.addServiceInstance(service, instancePublishInfo); mockHttpServletRequestBuilder = MockMvcRequestBuilders.get(URL + "/publish/list").param("clientId", "test1"); mockmvc.perform(mockHttpServletRequestBuilder).andExpect(MockMvcResultMatchers.jsonPath("$.data.length()").value(2)); }
@DeleteMapping("/apps/{appId}/clusters/{clusterName:.+}") public void delete(@PathVariable("appId") String appId, @PathVariable("clusterName") String clusterName, @RequestParam String operator) { Cluster entity = clusterService.findOne(appId, clusterName); if (entity == null) { throw NotFoundException.clusterNotFound(appId, clusterName); } if(ConfigConsts.CLUSTER_NAME_DEFAULT.equals(entity.getName())){ throw new BadRequestException("can not delete default cluster!"); } clusterService.delete(entity.getId(), operator); }
@Test(expected = BadRequestException.class) public void testDeleteDefaultFail() { Cluster cluster = new Cluster(); cluster.setName(ConfigConsts.CLUSTER_NAME_DEFAULT); when(clusterService.findOne(any(String.class), any(String.class))).thenReturn(cluster); clusterController.delete("1", "2", "d"); }
public static StatementExecutorResponse execute( final ConfiguredStatement<AssertSchema> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { return AssertExecutor.execute( statement.getMaskedStatementText(), statement.getStatement(), executionContext.getKsqlConfig().getInt(KSQL_ASSERT_SCHEMA_DEFAULT_TIMEOUT_MS), serviceContext, (stmt, sc) -> assertSchema( sc.getSchemaRegistryClient(), ((AssertSchema) stmt).getSubject(), ((AssertSchema) stmt).getId(), stmt.checkExists()), (str, stmt) -> new AssertSchemaEntity( str, ((AssertSchema) stmt).getSubject(), ((AssertSchema) stmt).getId(), stmt.checkExists()) ); }
@Test public void shouldAssertSchemaBySubject() { // Given final AssertSchema assertSchema = new AssertSchema(Optional.empty(), Optional.of("subjectName"), Optional.empty(), Optional.empty(), true); final ConfiguredStatement<AssertSchema> statement = ConfiguredStatement .of(KsqlParser.PreparedStatement.of("", assertSchema), SessionConfig.of(ksqlConfig, ImmutableMap.of())); // When: final Optional<KsqlEntity> entity = AssertSchemaExecutor .execute(statement, mock(SessionProperties.class), engine, serviceContext).getEntity(); // Then: assertThat("expected response!", entity.isPresent()); assertThat(((AssertSchemaEntity) entity.get()).getSubject(), is(Optional.of("subjectName"))); assertThat(((AssertSchemaEntity) entity.get()).getId(), is(Optional.empty())); assertThat(((AssertSchemaEntity) entity.get()).getExists(), is(true)); }
@Udf public Map<String, String> splitToMap( @UdfParameter( description = "Separator string and values to join") final String input, @UdfParameter( description = "Separator string and values to join") final String entryDelimiter, @UdfParameter( description = "Separator string and values to join") final String kvDelimiter) { if (input == null || entryDelimiter == null || kvDelimiter == null) { return null; } if (entryDelimiter.isEmpty() || kvDelimiter.isEmpty() || entryDelimiter.equals(kvDelimiter)) { return null; } final Iterable<String> entries = Splitter.on(entryDelimiter).omitEmptyStrings().split(input); return StreamSupport.stream(entries.spliterator(), false) .filter(e -> e.contains(kvDelimiter)) .map(kv -> Splitter.on(kvDelimiter).split(kv).iterator()) .collect(Collectors.toMap( Iterator::next, Iterator::next, (v1, v2) -> v2)); }
@Test public void shouldReturnNullOnNullEntryDelimiter() { Map<String, String> result = udf.splitToMap("foo:=apple/bar:=cherry", null, ":="); assertThat(result, is(nullValue())); }
public static void setRuleMechanism(String ruleMech) { if (ruleMech != null && (!ruleMech.equalsIgnoreCase(MECHANISM_HADOOP) && !ruleMech.equalsIgnoreCase(MECHANISM_MIT))) { throw new IllegalArgumentException("Invalid rule mechanism: " + ruleMech); } ruleMechanism = ruleMech; }
@Test(expected = IllegalArgumentException.class) public void testInvalidRuleMechanism() throws Exception { KerberosName.setRuleMechanism("INVALID_MECHANISM"); }
public String build( final String cellValue ) { switch ( type ) { case FORALL: return buildForAll( cellValue ); case INDEXED: return buildMulti( cellValue ); default: return buildSingle( cellValue ); } }
@Test public void testForAllOrAndMultipleWithPrefix() { final String snippet = "something == this && forall(||){something == $} && forall(&&){something < $}"; final SnippetBuilder snip = new SnippetBuilder(snippet); final String result = snip.build("x, y"); assertThat(result).isEqualTo("something == this && something == x || something == y && something < x && something < y"); }
public static boolean isValidRootUrl(String url) { UrlValidator validator = new CustomUrlValidator(); return validator.isValid(url); }
@Test public void queryIsForbidden() { // this url will be used as a root url and so will be concatenated with other part, query part is not allowed assertFalse(UrlHelper.isValidRootUrl("http://jenkins?param=test")); assertFalse(UrlHelper.isValidRootUrl("http://jenkins.com?param=test")); }
@Override protected void runTask() { LOGGER.trace("Looking for succeeded jobs that can go to the deleted state... "); final Instant updatedBefore = now().minus(backgroundJobServerConfiguration().getDeleteSucceededJobsAfter()); processManyJobs(previousResults -> getSucceededJobs(updatedBefore, previousResults), job -> job.delete("JobRunr maintenance - deleting succeeded job"), this::handleTotalAmountOfSucceededJobs); }
@Test void testTask() { Job succeededJob1 = aSucceededJob().build(); Job succeededJob2 = aSucceededJob().build(); when(storageProvider.getJobList(eq(SUCCEEDED), any(), any())).thenReturn(asList(succeededJob1, succeededJob2), emptyJobList()); runTask(task); verify(storageProvider).save(anyList()); verify(storageProvider).publishTotalAmountOfSucceededJobs(2); assertThat(logAllStateChangesFilter.getStateChanges(succeededJob1)).containsExactly("SUCCEEDED->DELETED"); assertThat(logAllStateChangesFilter.getStateChanges(succeededJob2)).containsExactly("SUCCEEDED->DELETED"); assertThat(logAllStateChangesFilter.onProcessingIsCalled(succeededJob1)).isFalse(); assertThat(logAllStateChangesFilter.onProcessingIsCalled(succeededJob2)).isFalse(); assertThat(logAllStateChangesFilter.onProcessingSucceededIsCalled(succeededJob1)).isFalse(); assertThat(logAllStateChangesFilter.onProcessingSucceededIsCalled(succeededJob2)).isFalse(); }
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { final Map<Path, List<ObjectKeyAndVersion>> map = new HashMap<>(); final List<Path> containers = new ArrayList<>(); for(Path file : files.keySet()) { if(containerService.isContainer(file)) { containers.add(file); continue; } callback.delete(file); final Path bucket = containerService.getContainer(file); if(file.getType().contains(Path.Type.upload)) { // In-progress multipart upload try { multipartService.delete(new MultipartUpload(file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))); } catch(NotfoundException ignored) { log.warn(String.format("Ignore failure deleting multipart upload %s", file)); } } else { final List<ObjectKeyAndVersion> keys = new ArrayList<>(); // Always returning 204 even if the key does not exist. Does not return 404 for non-existing keys keys.add(new ObjectKeyAndVersion(containerService.getKey(file), file.attributes().getVersionId())); if(map.containsKey(bucket)) { map.get(bucket).addAll(keys); } else { map.put(bucket, keys); } } } // Iterate over all containers and delete list of keys for(Map.Entry<Path, List<ObjectKeyAndVersion>> entry : map.entrySet()) { final Path container = entry.getKey(); final List<ObjectKeyAndVersion> keys = entry.getValue(); this.delete(container, keys, prompt); } for(Path file : containers) { callback.delete(file); // Finally delete bucket itself try { final String bucket = containerService.getContainer(file).getName(); session.getClient().deleteBucket(bucket); session.getClient().getRegionEndpointCache().removeRegionForBucketName(bucket); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Cannot delete {0}", e, file); } } }
@Test(expected = NotfoundException.class) public void testDeleteNotFoundBucketDnsNameCompatible() throws Exception { final Path container = new Path(new AlphanumericRandomStringService().random().toLowerCase(), EnumSet.of(Path.Type.directory, Path.Type.volume)); new S3MultipleDeleteFeature(session, new S3AccessControlListFeature(session)).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public void watch(final String key, final DataChangedEventListener dataChangedEventListener) { Watch.Listener listener = Watch.listener(response -> { for (WatchEvent each : response.getEvents()) { Type type = getEventChangedType(each); if (Type.IGNORED != type) { dispatchEvent(dataChangedEventListener, each, type); } } }); ByteSequence prefix = ByteSequence.from(key, StandardCharsets.UTF_8); Preconditions.checkNotNull(prefix, "prefix should not be null"); client.getWatchClient().watch(prefix, WatchOption.newBuilder().withRange(OptionsUtil.prefixEndOf(prefix)).build(), listener); }
@Test void assertWatchUpdate() { doAnswer(invocationOnMock -> { Watch.Listener listener = (Watch.Listener) invocationOnMock.getArguments()[2]; listener.onNext(buildWatchResponse(WatchEvent.EventType.PUT)); return mock(Watch.Watcher.class); }).when(watch).watch(any(ByteSequence.class), any(WatchOption.class), any(Watch.Listener.class)); repository.watch("key1", event -> { }); verify(watch).watch(any(ByteSequence.class), any(WatchOption.class), any(Watch.Listener.class)); }
@VisibleForTesting public Map<String, HashSet<String>> runTest(Set<String> inputList, Map<String, Long> sizes) { try { conf = msConf; testDatasizes = sizes; coverageList.clear(); removeNestedStructure(inputList); createOutputList(inputList, "test", "test"); } catch (Exception e) { LOG.error("MetaToolTask failed on ListExtTblLocs test: ", e); } return coverageList; }
@Test public void testGroupLocationsDummyDataSizes() { Set<String> inputLocations = new TreeSet<>(); Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetaToolTaskListExtTblLocs.msConf = conf; MetaToolTaskListExtTblLocs task = new MetaToolTaskListExtTblLocs(); //Case 1: Multiple unpartitioned external tables, expected o/p without extra data: 1 location (tested in testGroupLocations#1) // But say there is some data at ../customLocation, then we list all the 3 paths inputLocations.add("/warehouse/customLocation/t1"); inputLocations.add("/warehouse/customLocation/t2"); inputLocations.add("/warehouse/customLocation/t3"); Map<String, Long> dataSizes = new HashMap<>(); dataSizes.put("/warehouse/customLocation", Long.valueOf(100)); //Simulate 100 bytes extra data at customLocation Map<String, HashSet<String>> output = task.runTest(inputLocations, dataSizes); Assert.assertEquals(3, output.size()); String expectedOutput1 = "/warehouse/customLocation/t1"; Assert.assertTrue(output.containsKey(expectedOutput1)); HashSet<String> coveredLocs = output.get(expectedOutput1); Assert.assertEquals(1, coveredLocs.size()); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/t1")); //Case 2 : inputs at multiple depths // inputs ../ext/b0 - contains 1 location // ../ext/p=0 - contains 1 location // ../ext/b1/b2/b3 - contains 3 locations (p1, p2, p3) // expected output without extra data : [../ext/b1/b2/b3 containing 3 elements, t1, p0] (tested in testGroupLocations#2) // expected output with extra data at ../ext/b1/b2/b3 : [p1, p2, p3, t1, p0] inputLocations.clear(); dataSizes.clear(); inputLocations.add("/warehouse/customLocation/ext/b0"); inputLocations.add("/warehouse/customLocation/ext/p=0"); inputLocations.add("/warehouse/customLocation/ext/b1/b2/b3/p=1"); inputLocations.add("/warehouse/customLocation/ext/b1/b2/b3/p=2"); inputLocations.add("/warehouse/customLocation/ext/b1/b2/b3/p=3"); dataSizes.put("/warehouse/customLocation/ext/b1/b2/b3", Long.valueOf(100)); // simulate 100 bytes of extra data at ../b3 output = task.runTest(inputLocations, dataSizes); Assert.assertEquals(5, output.size()); expectedOutput1 = "/warehouse/customLocation/ext/b0"; Assert.assertTrue(output.containsKey(expectedOutput1)); coveredLocs = output.get(expectedOutput1); Assert.assertEquals(1, coveredLocs.size()); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b0")); String expectedOutput2 = "/warehouse/customLocation/ext/p=0"; Assert.assertTrue(output.containsKey(expectedOutput2)); coveredLocs = output.get(expectedOutput2); Assert.assertEquals(1, coveredLocs.size()); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/p=0")); String expectedOutput3 = "/warehouse/customLocation/ext/b1/b2/b3/p=1"; Assert.assertTrue(output.containsKey(expectedOutput3)); coveredLocs = output.get(expectedOutput3); Assert.assertEquals(1, coveredLocs.size()); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b1/b2/b3/p=1")); String expectedOutput4 = "/warehouse/customLocation/ext/b1/b2/b3/p=2"; Assert.assertTrue(output.containsKey(expectedOutput4)); coveredLocs = output.get(expectedOutput4); Assert.assertEquals(1, coveredLocs.size()); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b1/b2/b3/p=2")); String expectedOutput5 = "/warehouse/customLocation/ext/b1/b2/b3/p=3"; Assert.assertTrue(output.containsKey(expectedOutput5)); coveredLocs = output.get(expectedOutput5); Assert.assertEquals(1, coveredLocs.size()); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b1/b2/b3/p=3")); //Case 3 : intermediate directory has extra data // inputs ../ext/ - contains 4 locations // ../ext/b1 - contains 3 locations // expected output without extra data : [../ext covering all locations] (tested in testGroupLocations#3) // We simulate extra data at ../ext/b1. So, expected output is the list of all locations. inputLocations.clear(); dataSizes.clear(); inputLocations.add("/warehouse/customLocation/ext/p=0"); inputLocations.add("/warehouse/customLocation/ext/p=1"); inputLocations.add("/warehouse/customLocation/ext/p=2"); inputLocations.add("/warehouse/customLocation/ext/p=3"); inputLocations.add("/warehouse/customLocation/ext/b1/p=4"); inputLocations.add("/warehouse/customLocation/ext/b1/p=5"); inputLocations.add("/warehouse/customLocation/ext/b1/p=6"); dataSizes.put("/warehouse/customLocation/ext/b1", Long.valueOf(100)); // simulate 100 bytes of extra data at ..ext/b1 dataSizes.put("/warehouse/customLocation/ext", Long.valueOf(100));// since ext/b1 contains 100 bytes, ../ext also has 100 bytes output = task.runTest(inputLocations, dataSizes); Assert.assertEquals(7, output.size()); Assert.assertTrue(output.keySet().containsAll(inputLocations)); for(String outLoc : output.keySet()) { Assert.assertTrue(output.get(outLoc).contains(outLoc)); } }
public T get(K key) { T metric = metrics.get(key); if (metric == null) { metric = factory.createInstance(key); metric = MoreObjects.firstNonNull(metrics.putIfAbsent(key, metric), metric); } return metric; }
@Test public void testCreateSeparateInstances() { AtomicLong foo = metricsMap.get("foo"); AtomicLong bar = metricsMap.get("bar"); assertThat(foo, not(sameInstance(bar))); }
@Override public SSLContext getIdentitySslContext() { return sslContext; }
@Test void constructs_ssl_context_from_file() throws IOException { File keyFile = File.createTempFile("junit", null, tempDirectory); KeyPair keypair = KeyUtils.generateKeypair(KeyAlgorithm.RSA); createPrivateKeyFile(keyFile, keypair); X509Certificate certificate = createCertificate(keypair); File certificateFile = File.createTempFile("junit", null, tempDirectory); createCertificateFile(certificate, certificateFile); File trustStoreFile = File.createTempFile("junit", null, tempDirectory); createTrustStoreFile(certificate, trustStoreFile); SiaIdentityProvider provider = new SiaIdentityProvider( new AthenzService("domain", "service-name"), keyFile.toPath(), certificateFile.toPath(), trustStoreFile.toPath()); assertNotNull(provider.getIdentitySslContext()); }
public Duration queryTimeout() { return queryTimeout; }
@Test void queryTimeout() { assertThat(builder.build().queryTimeout()).isEqualTo(DEFAULT_QUERY_TIMEOUT); Duration queryTimeout = Duration.ofSeconds(5); builder.queryTimeout(queryTimeout); assertThat(builder.build().queryTimeout()).isEqualTo(queryTimeout); }