focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public boolean isUpgrade() { return oldVersion.isLessThan(newVersion); }
@Test public void testIsUpgrade() { assertTrue(CHANGE_3_0_IV1_TO_3_3_IV0.isUpgrade()); assertFalse(CHANGE_3_3_IV0_TO_3_0_IV1.isUpgrade()); }
@Override // Camel calls this method if the endpoint isSynchronous(), as the // KafkaEndpoint creates a SynchronousDelegateProducer for it public void process(Exchange exchange) throws Exception { // is the message body a list or something that contains multiple values Message message = exchange.getIn(); if (transactionId != null) { startKafkaTransaction(exchange); } if (endpoint.getConfiguration().isUseIterator() && isIterable(message.getBody())) { processIterableSync(exchange, message); } else { processSingleMessageSync(exchange, message); } }
@Test public void processRequiresTopicInEndpointOrInHeader() throws Exception { endpoint.getConfiguration().setTopic(null); Mockito.when(exchange.getIn()).thenReturn(in); Mockito.when(exchange.getMessage()).thenReturn(in); in.setHeader(KafkaConstants.PARTITION_KEY, 4); in.setHeader(KafkaConstants.KEY, "someKey"); producer.process(exchange); verifySendMessage("sometopic", "someKey"); assertRecordMetadataExists(); }
@Override public long getMax() { if (values.length == 0) { return 0; } return values[values.length - 1]; }
@Test public void calculatesTheMaximumValue() throws Exception { assertThat(snapshot.getMax()) .isEqualTo(5); }
@Override public void run() { final Instant now = time.get(); try { final Collection<PersistentQueryMetadata> queries = engine.getPersistentQueries(); final Optional<Double> saturation = queries.stream() .collect(Collectors.groupingBy(PersistentQueryMetadata::getQueryApplicationId)) .entrySet() .stream() .map(e -> measure(now, e.getKey(), e.getValue())) .max(PersistentQuerySaturationMetrics::compareSaturation) .orElse(Optional.of(0.0)); saturation.ifPresent(s -> report(now, s)); final Set<String> appIds = queries.stream() .map(PersistentQueryMetadata::getQueryApplicationId) .collect(Collectors.toSet()); for (final String appId : Sets.difference(new HashSet<>(perKafkaStreamsStats.keySet()), appIds)) { perKafkaStreamsStats.get(appId).cleanup(reporter); perKafkaStreamsStats.remove(appId); } } catch (final RuntimeException e) { LOGGER.error("Error collecting saturation", e); throw e; } }
@Test public void shouldAddPointsForQueriesSharingRuntimes() { // Given: final Instant start = Instant.now(); when(clock.get()).thenReturn(start); givenMetrics(kafkaStreams1) .withThreadStartTime("t1", start.minus(WINDOW.multipliedBy(2))) .withBlockedTime("t1", Duration.ofMinutes(2)); collector.run(); when(clock.get()).thenReturn(start.plus(WINDOW)); givenMetrics(kafkaStreams1) .withThreadStartTime("t1", start.minus(WINDOW.multipliedBy(2))) .withBlockedTime("t1", Duration.ofMinutes(3)); // When: collector.run(); // Then: final DataPoint point = verifyAndGetLatestDataPoint( "node-query-saturation", ImmutableMap.of("query-id", "boom") ); assertThat((Double) point.getValue(), closeTo(.9, .01)); }
public synchronized void setLevel(Level newLevel) { if (level == newLevel) { // nothing to do; return; } if (newLevel == null && isRootLogger()) { throw new IllegalArgumentException("The level of the root logger cannot be set to null"); } level = newLevel; if (newLevel == null) { effectiveLevelInt = parent.effectiveLevelInt; newLevel = parent.getEffectiveLevel(); } else { effectiveLevelInt = newLevel.levelInt; } if (childrenList != null) { int len = childrenList.size(); for (int i = 0; i < len; i++) { Logger child = (Logger) childrenList.get(i); // tell child to handle parent levelInt change child.handleParentLevelChange(effectiveLevelInt); } } // inform listeners loggerContext.fireOnLevelChange(this, newLevel); }
@Test public void testEnabled_Info() throws Exception { root.setLevel(Level.INFO); checkLevelThreshold(loggerTest, Level.INFO); }
public static String encode(final String input) { try { final StringBuilder b = new StringBuilder(); final StringTokenizer t = new StringTokenizer(input, "/"); if(!t.hasMoreTokens()) { return input; } if(StringUtils.startsWith(input, String.valueOf(Path.DELIMITER))) { b.append(Path.DELIMITER); } while(t.hasMoreTokens()) { b.append(URLEncoder.encode(t.nextToken(), StandardCharsets.UTF_8.name())); if(t.hasMoreTokens()) { b.append(Path.DELIMITER); } } if(StringUtils.endsWith(input, String.valueOf(Path.DELIMITER))) { b.append(Path.DELIMITER); } // Because URLEncoder uses <code>application/x-www-form-urlencoded</code> we have to replace these // for proper URI percented encoding. return StringUtils.replaceEach(b.toString(), new String[]{"+", "*", "%7E", "%40"}, new String[]{"%20", "%2A", "~", "@"}); } catch(UnsupportedEncodingException e) { log.warn(String.format("Failure %s encoding input %s", e, input)); return input; } }
@Test public void testEncodeTrailingDelimiter() { assertEquals("/a/p/", URIEncoder.encode("/a/p/")); assertEquals("/p%20d/", URIEncoder.encode("/p d/")); }
public static Action resolve(Schema writer, Schema reader, GenericData data) { return resolve(Schema.applyAliases(writer, reader), reader, data, new HashMap<>()); }
@Test void resolveTime() { final Schema writeSchema = Schema.create(Schema.Type.INT); final Schema readSchema = new TimeConversions.TimeMicrosConversion().getRecommendedSchema(); // LONG Resolver.Action action = Resolver.resolve(writeSchema, readSchema); Assertions.assertNotNull(action); MatcherAssert.assertThat("Wrong class for action", action, Matchers.instanceOf(Resolver.Promote.class)); Assertions.assertEquals(action.type, Resolver.Action.Type.PROMOTE); Assertions.assertNotNull(action.logicalType); }
@Override public void validate(final SingleRule rule, final SQLStatementContext sqlStatementContext, final ShardingSphereDatabase database) { DropSchemaStatement dropSchemaStatement = (DropSchemaStatement) sqlStatementContext.getSqlStatement(); boolean containsCascade = dropSchemaStatement.isContainsCascade(); for (IdentifierValue each : dropSchemaStatement.getSchemaNames()) { String schemaName = each.getValue(); ShardingSphereSchema schema = database.getSchema(schemaName); ShardingSpherePreconditions.checkNotNull(schema, () -> new SchemaNotFoundException(schemaName)); ShardingSpherePreconditions.checkState(containsCascade || schema.getAllTableNames().isEmpty(), () -> new DropNotEmptySchemaException(schemaName)); } }
@Test void assertValidateWithNotExistedSchema() { ShardingSphereDatabase database = mockDatabase(); when(database.getSchema("not_existed_schema")).thenReturn(null); assertThrows(SchemaNotFoundException.class, () -> new SingleDropSchemaMetaDataValidator().validate(mock(SingleRule.class, RETURNS_DEEP_STUBS), createSQLStatementContext("not_existed_schema", true), database)); }
@Override @SuppressWarnings("unchecked") public int run() throws IOException { RewriteOptions options = buildOptionsOrFail(); ParquetRewriter rewriter = new ParquetRewriter(options); rewriter.processBlocks(); rewriter.close(); return 0; }
@Test(expected = FileAlreadyExistsException.class) public void testRewriteCommandWithoutOverwrite() throws IOException { File file = parquetFile(); RewriteCommand command = new RewriteCommand(createLogger()); command.inputs = Arrays.asList(file.getAbsolutePath()); File output = new File(getTempFolder(), "converted.parquet"); command.output = output.getAbsolutePath(); command.setConf(new Configuration()); Files.createFile(output.toPath()); command.run(); }
@Override public Optional<ConfigItem> resolve(final String propertyName, final boolean strict) { if (propertyName.startsWith(KSQL_REQUEST_CONFIG_PROPERTY_PREFIX)) { return resolveRequestConfig(propertyName); } else if (propertyName.startsWith(KSQL_CONFIG_PROPERTY_PREFIX) && !propertyName.startsWith(KSQL_STREAMS_PREFIX)) { return resolveKsqlConfig(propertyName); } return resolveStreamsConfig(propertyName, strict); }
@Test public void shouldNotFindUnknownProducerPropertyIfStrict() { // Given: final String configName = StreamsConfig.PRODUCER_PREFIX + "custom.interceptor.config"; // Then: assertThat(resolver.resolve(configName, true), is(Optional.empty())); }
public String ldapLogin(String userId, String userPwd) { Properties searchEnv = getManagerLdapEnv(); LdapContext ctx = null; try { // Connect to the LDAP server and Authenticate with a service user of whom we know the DN and credentials ctx = new InitialLdapContext(searchEnv, null); SearchControls sc = new SearchControls(); sc.setReturningAttributes(new String[]{ldapEmailAttribute}); sc.setSearchScope(SearchControls.SUBTREE_SCOPE); EqualsFilter filter = new EqualsFilter(ldapUserIdentifyingAttribute, userId); NamingEnumeration<SearchResult> results = ctx.search(ldapBaseDn, filter.toString(), sc); if (results.hasMore()) { // get the users DN (distinguishedName) from the result SearchResult result = results.next(); NamingEnumeration<? extends Attribute> attrs = result.getAttributes().getAll(); while (attrs.hasMore()) { // Open another connection to the LDAP server with the found DN and the password searchEnv.put(Context.SECURITY_PRINCIPAL, result.getNameInNamespace()); searchEnv.put(Context.SECURITY_CREDENTIALS, userPwd); try { new InitialDirContext(searchEnv); } catch (Exception e) { log.warn("invalid ldap credentials or ldap search error", e); return null; } Attribute attr = attrs.next(); if (attr.getID().equals(ldapEmailAttribute)) { return (String) attr.get(); } } } } catch (NamingException e) { log.error("ldap search error", e); return null; } finally { try { if (ctx != null) { ctx.close(); } } catch (NamingException e) { log.error("ldap context close error", e); } } return null; }
@Test public void ldapLoginError() throws NoSuchFieldException, IllegalAccessException { changeSslEnable(false); String email2 = ldapService.ldapLogin(username, "error password"); Assertions.assertNull(email2); }
@Override public KsMaterializedQueryResult<Row> get( final GenericKey key, final int partition, final Optional<Position> position ) { try { final KeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = KeyQuery.withKey(key); StateQueryRequest<ValueAndTimestamp<GenericRow>> request = inStore(stateStore.getStateStoreName()) .withQuery(query) .withPartitions(ImmutableSet.of(partition)); if (position.isPresent()) { request = request.withPositionBound(PositionBound.at(position.get())); } final StateQueryResult<ValueAndTimestamp<GenericRow>> result = stateStore.getKafkaStreams().query(request); final QueryResult<ValueAndTimestamp<GenericRow>> queryResult = result.getPartitionResults().get(partition); // Some of these failures are retriable, and in the future, we may want to retry // locally before throwing. if (queryResult.isFailure()) { throw failedQueryException(queryResult); } else if (queryResult.getResult() == null) { return KsMaterializedQueryResult.rowIteratorWithPosition( Collections.emptyIterator(), queryResult.getPosition()); } else { final ValueAndTimestamp<GenericRow> row = queryResult.getResult(); return KsMaterializedQueryResult.rowIteratorWithPosition( ImmutableList.of(Row.of(stateStore.schema(), key, row.value(), row.timestamp())) .iterator(), queryResult.getPosition()); } } catch (final NotUpToBoundException | MaterializationException e) { throw e; } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldThrowIfRangeQueryResultIsError() { // Given: when(kafkaStreams.query(any())).thenReturn(getErrorResult()); // When: final Exception e = assertThrows( MaterializationException.class, () -> table.get(PARTITION, A_KEY, A_KEY2) ); // Then: assertThat(e.getMessage(), containsString("Error!")); assertThat(e, (instanceOf(MaterializationException.class))); }
public Future<Collection<Integer>> resizeAndReconcilePvcs(KafkaStatus kafkaStatus, List<PersistentVolumeClaim> pvcs) { Set<Integer> podIdsToRestart = new HashSet<>(); List<Future<Void>> futures = new ArrayList<>(pvcs.size()); for (PersistentVolumeClaim desiredPvc : pvcs) { Future<Void> perPvcFuture = pvcOperator.getAsync(reconciliation.namespace(), desiredPvc.getMetadata().getName()) .compose(currentPvc -> { if (currentPvc == null || currentPvc.getStatus() == null || !"Bound".equals(currentPvc.getStatus().getPhase())) { // This branch handles the following conditions: // * The PVC doesn't exist yet, we should create it // * The PVC is not Bound, we should reconcile it return pvcOperator.reconcile(reconciliation, reconciliation.namespace(), desiredPvc.getMetadata().getName(), desiredPvc) .map((Void) null); } else if (currentPvc.getStatus().getConditions().stream().anyMatch(cond -> "Resizing".equals(cond.getType()) && "true".equals(cond.getStatus().toLowerCase(Locale.ENGLISH)))) { // The PVC is Bound, but it is already resizing => Nothing to do, we should let it resize LOGGER.debugCr(reconciliation, "The PVC {} is resizing, nothing to do", desiredPvc.getMetadata().getName()); return Future.succeededFuture(); } else if (currentPvc.getStatus().getConditions().stream().anyMatch(cond -> "FileSystemResizePending".equals(cond.getType()) && "true".equals(cond.getStatus().toLowerCase(Locale.ENGLISH)))) { // The PVC is Bound and resized but waiting for FS resizing => We need to restart the pod which is using it podIdsToRestart.add(getPodIndexFromPvcName(desiredPvc.getMetadata().getName())); LOGGER.infoCr(reconciliation, "The PVC {} is waiting for file system resizing and the pod using it might need to be restarted.", desiredPvc.getMetadata().getName()); return Future.succeededFuture(); } else { // The PVC is Bound and resizing is not in progress => We should check if the SC supports resizing and check if size changed Long currentSize = StorageUtils.convertToMillibytes(currentPvc.getSpec().getResources().getRequests().get("storage")); Long desiredSize = StorageUtils.convertToMillibytes(desiredPvc.getSpec().getResources().getRequests().get("storage")); if (!currentSize.equals(desiredSize)) { // The sizes are different => we should resize (shrinking will be handled in StorageDiff, so we do not need to check that) return resizePvc(kafkaStatus, currentPvc, desiredPvc); } else { // size didn't change, just reconcile return pvcOperator.reconcile(reconciliation, reconciliation.namespace(), desiredPvc.getMetadata().getName(), desiredPvc) .map((Void) null); } } }); futures.add(perPvcFuture); } return Future.all(futures) .map(podIdsToRestart); }
@Test public void testVolumesBoundExpandableStorageClass(VertxTestContext context) { List<PersistentVolumeClaim> pvcs = List.of( createPvc("data-pod-0"), createPvc("data-pod-1"), createPvc("data-pod-2") ); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); // Mock the PVC Operator PvcOperator mockPvcOps = supplier.pvcOperations; when(mockPvcOps.getAsync(eq(NAMESPACE), ArgumentMatchers.startsWith("data-"))) .thenAnswer(invocation -> { String pvcName = invocation.getArgument(1); PersistentVolumeClaim currentPvc = pvcs.stream().filter(pvc -> pvcName.equals(pvc.getMetadata().getName())).findFirst().orElse(null); if (currentPvc != null) { PersistentVolumeClaim pvcWithStatus = new PersistentVolumeClaimBuilder(currentPvc) .editSpec() .withNewResources() .withRequests(Map.of("storage", new Quantity("50Gi", null))) .endResources() .endSpec() .withNewStatus() .withPhase("Bound") .withCapacity(Map.of("storage", new Quantity("50Gi", null))) .endStatus() .build(); return Future.succeededFuture(pvcWithStatus); } else { return Future.succeededFuture(); } }); ArgumentCaptor<PersistentVolumeClaim> pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock the StorageClass Operator StorageClassOperator mockSco = supplier.storageClassOperations; when(mockSco.getAsync(eq(STORAGE_CLASS_NAME))).thenReturn(Future.succeededFuture(RESIZABLE_STORAGE_CLASS)); // Reconcile the PVCs PvcReconciler reconciler = new PvcReconciler( new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), mockPvcOps, mockSco ); Checkpoint async = context.checkpoint(); reconciler.resizeAndReconcilePvcs(new KafkaStatus(), pvcs) .onComplete(res -> { assertThat(res.succeeded(), is(true)); assertThat(res.result().size(), is(0)); assertThat(pvcCaptor.getAllValues().size(), is(3)); assertThat(pvcCaptor.getAllValues(), is(pvcs)); async.flag(); }); }
public static DockerContainerStatus getContainerStatus(String containerId, PrivilegedOperationExecutor privilegedOperationExecutor, Context nmContext) { try { String currentContainerStatus = executeStatusCommand(containerId, privilegedOperationExecutor, nmContext); DockerContainerStatus dockerContainerStatus = parseContainerStatus( currentContainerStatus); LOG.debug("Container Status: {} ContainerId: {}", dockerContainerStatus.getName(), containerId); return dockerContainerStatus; } catch (ContainerExecutionException e) { LOG.debug("Container Status: {} ContainerId: {}", DockerContainerStatus.NONEXISTENT.getName(), containerId); return DockerContainerStatus.NONEXISTENT; } }
@Test public void testGetContainerStatus() throws Exception { for (DockerContainerStatus status : DockerContainerStatus.values()) { when(mockExecutor.executePrivilegedOperation(eq(null), any(PrivilegedOperation.class), eq(null), any(), eq(true), eq(false))) .thenReturn(status.getName()); assertEquals(status, DockerCommandExecutor.getContainerStatus( MOCK_CONTAINER_ID, mockExecutor, nmContext)); } }
public static Context context(int ioThreads) { return new Context(ioThreads); }
@Test(expected = ZMQException.class) public void testBindInprocSameAddress() { ZMQ.Context context = ZMQ.context(1); ZMQ.Socket socket1 = context.socket(SocketType.REQ); ZMQ.Socket socket2 = context.socket(SocketType.REQ); socket1.bind("inproc://address.already.in.use"); socket2.bind("inproc://address.already.in.use"); assertThat(socket2.errno(), is(ZError.EADDRINUSE)); socket1.close(); socket2.close(); context.term(); }
@SuppressWarnings("checkstyle:HiddenField") public AwsCredentialsProvider credentialsProvider( String accessKeyId, String secretAccessKey, String sessionToken) { if (!Strings.isNullOrEmpty(accessKeyId) && !Strings.isNullOrEmpty(secretAccessKey)) { if (Strings.isNullOrEmpty(sessionToken)) { return StaticCredentialsProvider.create( AwsBasicCredentials.create(accessKeyId, secretAccessKey)); } else { return StaticCredentialsProvider.create( AwsSessionCredentials.create(accessKeyId, secretAccessKey, sessionToken)); } } if (!Strings.isNullOrEmpty(this.clientCredentialsProvider)) { return credentialsProvider(this.clientCredentialsProvider); } // Create a new credential provider for each client return DefaultCredentialsProvider.builder().build(); }
@Test public void testCreatesNewInstanceOfDefaultCredentialsConfiguration() { AwsClientProperties awsClientProperties = new AwsClientProperties(); AwsCredentialsProvider credentialsProvider = awsClientProperties.credentialsProvider(null, null, null); AwsCredentialsProvider credentialsProvider2 = awsClientProperties.credentialsProvider(null, null, null); assertThat(credentialsProvider) .as("Should create a new instance in each call") .isNotSameAs(credentialsProvider2); }
@Udf(description = "Adds a duration to a timestamp") public Timestamp timestampAdd( @UdfParameter(description = "A unit of time, for example DAY or HOUR") final TimeUnit unit, @UdfParameter(description = "An integer number of intervals to add") final Integer interval, @UdfParameter(description = "A TIMESTAMP value.") final Timestamp timestamp ) { if (unit == null || interval == null || timestamp == null) { return null; } return new Timestamp(timestamp.getTime() + unit.toMillis(interval)); }
@Test public void handleNullTimestamp() { // When: final Timestamp result = udf.timestampAdd(TimeUnit.MILLISECONDS, -300, null); // Then: assertNull(result); }
public static void tripSuggestions( List<CharSequence> suggestions, final int maxSuggestions, List<CharSequence> stringsPool) { while (suggestions.size() > maxSuggestions) { removeSuggestion(suggestions, maxSuggestions, stringsPool); } }
@Test public void testTrimSuggestionsWithRecycleBackToPool() { ArrayList<CharSequence> list = new ArrayList<>( Arrays.<CharSequence>asList( "typed", "something", "duped", new StringBuilder("duped"), "something")); Assert.assertEquals(0, mStringPool.size()); IMEUtil.tripSuggestions(list, 2, mStringPool); Assert.assertEquals(2, list.size()); Assert.assertEquals("typed", list.get(0)); Assert.assertEquals("something", list.get(1)); Assert.assertEquals(1, mStringPool.size()); Assert.assertEquals("duped", mStringPool.get(0).toString()); Assert.assertTrue(mStringPool.get(0) instanceof StringBuilder); }
@Override public JType apply(String nodeName, JsonNode node, JsonNode parent, JClassContainer jClassContainer, Schema schema) { String propertyTypeName = getTypeName(node); JType type; if (propertyTypeName.equals("object") || node.has("properties") && node.path("properties").size() > 0) { type = ruleFactory.getObjectRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema); } else if (node.has("existingJavaType")) { String typeName = node.path("existingJavaType").asText(); if (isPrimitive(typeName, jClassContainer.owner())) { type = primitiveType(typeName, jClassContainer.owner()); } else { type = resolveType(jClassContainer, typeName); } } else if (propertyTypeName.equals("string")) { type = jClassContainer.owner().ref(String.class); } else if (propertyTypeName.equals("number")) { type = getNumberType(jClassContainer.owner(), ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("integer")) { type = getIntegerType(jClassContainer.owner(), node, ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("boolean")) { type = unboxIfNecessary(jClassContainer.owner().ref(Boolean.class), ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("array")) { type = ruleFactory.getArrayRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema); } else { type = jClassContainer.owner().ref(Object.class); } if (!node.has("javaType") && !node.has("existingJavaType") && node.has("format")) { type = ruleFactory.getFormatRule().apply(nodeName, node.get("format"), node, type, schema); } else if (!node.has("javaType") && !node.has("existingJavaType") && propertyTypeName.equals("string") && node.has("media")) { type = ruleFactory.getMediaRule().apply(nodeName, node.get("media"), node, type, schema); } return type; }
@Test public void applyGeneratesIntegerUsingJavaTypeLongPrimitiveWhenMinimumLessThanIntegerMin() { JPackage jpackage = new JCodeModel()._package(getClass().getPackage().getName()); ObjectNode objectNode = new ObjectMapper().createObjectNode(); objectNode.put("type", "integer"); objectNode.put("minimum", Integer.MIN_VALUE - 1L); when(config.isUsePrimitives()).thenReturn(true); JType result = rule.apply("fooBar", objectNode, null, jpackage, null); assertThat(result.fullName(), is("long")); }
public void writeTo(OutputStream out) throws IOException { if (image.getImageFormat() == OciManifestTemplate.class) { ociWriteTo(out); } else { dockerWriteTo(out); } }
@Test public void testWriteTo_oci() throws InvalidImageReferenceException, IOException, LayerPropertyNotFoundException { Image testImage = Image.builder(OciManifestTemplate.class).addLayer(mockLayer1).addLayer(mockLayer2).build(); ImageTarball imageTarball = new ImageTarball( testImage, ImageReference.parse("my/image:tag"), ImmutableSet.of("tag", "another-tag", "tag3")); ByteArrayOutputStream out = new ByteArrayOutputStream(); imageTarball.writeTo(out); ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray()); try (TarArchiveInputStream tarArchiveInputStream = new TarArchiveInputStream(in)) { // Verifies layer with fileA was added. TarArchiveEntry headerFileALayer = tarArchiveInputStream.getNextTarEntry(); Assert.assertEquals("blobs/sha256/" + fakeDigestA.getHash(), headerFileALayer.getName()); String fileAString = CharStreams.toString( new InputStreamReader(tarArchiveInputStream, StandardCharsets.UTF_8)); Assert.assertEquals(Blobs.writeToString(Blobs.from(fileA)), fileAString); // Verifies layer with fileB was added. TarArchiveEntry headerFileBLayer = tarArchiveInputStream.getNextTarEntry(); Assert.assertEquals("blobs/sha256/" + fakeDigestB.getHash(), headerFileBLayer.getName()); String fileBString = CharStreams.toString( new InputStreamReader(tarArchiveInputStream, StandardCharsets.UTF_8)); Assert.assertEquals(Blobs.writeToString(Blobs.from(fileB)), fileBString); // Verifies container configuration was added. TarArchiveEntry headerContainerConfiguration = tarArchiveInputStream.getNextTarEntry(); Assert.assertEquals( "blobs/sha256/011212cff4d5d6b18c7d3a00a7a2701514a1fdd3ec0d250a03756f84f3d955d4", headerContainerConfiguration.getName()); JsonTemplateMapper.readJson(tarArchiveInputStream, ContainerConfigurationTemplate.class); // Verifies manifest was added. TarArchiveEntry headerManifest = tarArchiveInputStream.getNextTarEntry(); Assert.assertEquals( "blobs/sha256/1543d061159a8d6877087938bfd62681cdeff873e1fa3e1fcf12dec358c112a4", headerManifest.getName()); JsonTemplateMapper.readJson(tarArchiveInputStream, OciManifestTemplate.class); // Verifies oci-layout was added. TarArchiveEntry headerOciLayout = tarArchiveInputStream.getNextTarEntry(); Assert.assertEquals("oci-layout", headerOciLayout.getName()); String ociLayoutJson = CharStreams.toString( new InputStreamReader(tarArchiveInputStream, StandardCharsets.UTF_8)); Assert.assertEquals("{\"imageLayoutVersion\": \"1.0.0\"}", ociLayoutJson); // Verifies index.json was added. TarArchiveEntry headerIndex = tarArchiveInputStream.getNextTarEntry(); Assert.assertEquals("index.json", headerIndex.getName()); OciIndexTemplate index = JsonTemplateMapper.readJson(tarArchiveInputStream, OciIndexTemplate.class); BuildableManifestTemplate.ContentDescriptorTemplate indexManifest = index.getManifests().get(0); Assert.assertEquals( "1543d061159a8d6877087938bfd62681cdeff873e1fa3e1fcf12dec358c112a4", indexManifest.getDigest().getHash()); } }
@Override public void configure(Map<String, ?> configs) { final SimpleConfig simpleConfig = new SimpleConfig(CONFIG_DEF, configs); final String field = simpleConfig.getString(FIELD_CONFIG); final String type = simpleConfig.getString(TARGET_TYPE_CONFIG); String formatPattern = simpleConfig.getString(FORMAT_CONFIG); final String unixPrecision = simpleConfig.getString(UNIX_PRECISION_CONFIG); schemaUpdateCache = new SynchronizedCache<>(new LRUCache<>(16)); replaceNullWithDefault = simpleConfig.getBoolean(REPLACE_NULL_WITH_DEFAULT_CONFIG); if (type.equals(TYPE_STRING) && Utils.isBlank(formatPattern)) { throw new ConfigException("TimestampConverter requires format option to be specified when using string timestamps"); } SimpleDateFormat format = null; if (!Utils.isBlank(formatPattern)) { try { format = new SimpleDateFormat(formatPattern); format.setTimeZone(UTC); } catch (IllegalArgumentException e) { throw new ConfigException("TimestampConverter requires a SimpleDateFormat-compatible pattern for string timestamps: " + formatPattern, e); } } config = new Config(field, type, format, unixPrecision); }
@Test public void testConfigInvalidTargetType() { assertThrows(ConfigException.class, () -> xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "invalid"))); }
@Override public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) { scheduledRepetitively.mark(); return delegate.scheduleAtFixedRate(new InstrumentedPeriodicRunnable(command, period, unit), initialDelay, period, unit); }
@Test public void testScheduleFixedRateCallable() throws Exception { assertThat(submitted.getCount()).isZero(); assertThat(running.getCount()).isZero(); assertThat(completed.getCount()).isZero(); assertThat(duration.getCount()).isZero(); assertThat(scheduledOnce.getCount()).isZero(); assertThat(scheduledRepetitively.getCount()).isZero(); assertThat(scheduledOverrun.getCount()).isZero(); assertThat(percentOfPeriod.getCount()).isZero(); CountDownLatch countDownLatch = new CountDownLatch(1); ScheduledFuture<?> theFuture = instrumentedScheduledExecutor.scheduleAtFixedRate(() -> { assertThat(submitted.getCount()).isZero(); assertThat(running.getCount()).isEqualTo(1); assertThat(scheduledOnce.getCount()).isEqualTo(0); assertThat(scheduledRepetitively.getCount()).isEqualTo(1); try { TimeUnit.MILLISECONDS.sleep(50); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } countDownLatch.countDown(); }, 10L, 10L, TimeUnit.MILLISECONDS); TimeUnit.MILLISECONDS.sleep(100); // Give some time for the task to be run countDownLatch.await(5, TimeUnit.SECONDS); // Don't cancel until it didn't complete once theFuture.cancel(true); TimeUnit.MILLISECONDS.sleep(200); // Wait while the task is cancelled assertThat(submitted.getCount()).isZero(); assertThat(running.getCount()).isZero(); assertThat(completed.getCount()).isNotEqualTo(0); assertThat(duration.getCount()).isNotEqualTo(0); assertThat(duration.getSnapshot().size()).isNotEqualTo(0); assertThat(scheduledOnce.getCount()).isZero(); assertThat(scheduledRepetitively.getCount()).isEqualTo(1); assertThat(scheduledOverrun.getCount()).isNotEqualTo(0); assertThat(percentOfPeriod.getCount()).isNotEqualTo(0); }
@Override public boolean isPluginDisabled(String pluginId) { if (disabledPlugins.contains(pluginId)) { return true; } return !enabledPlugins.isEmpty() && !enabledPlugins.contains(pluginId); }
@Test public void testIsPluginDisabled() throws IOException { createEnabledFile(); createDisabledFile(); PluginStatusProvider statusProvider = new DefaultPluginStatusProvider(pluginsPath); assertFalse(statusProvider.isPluginDisabled("plugin-1")); assertTrue(statusProvider.isPluginDisabled("plugin-2")); assertTrue(statusProvider.isPluginDisabled("plugin-3")); }
public static String md5Hex(String string) { return compute(string, DigestObjectPools.MD5); }
@Test public void shouldComputeForAnEmptyStringUsingMD5() { String fingerprint = ""; String digest = md5Hex(fingerprint); assertEquals(DigestUtils.md5Hex(fingerprint), digest); }
@VisibleForTesting public int getContainerReportFailedRetrieved() { return numGetContainerReportFailedRetrieved.value(); }
@Test public void testGetContainerReportFailed() { long totalBadBefore = metrics.getContainerReportFailedRetrieved(); badSubCluster.getContainerReport(); Assert.assertEquals(totalBadBefore + 1, metrics.getContainerReportFailedRetrieved()); }
public void perform(List<Exception> list, T obj) { do { try { op.operation(list); return; } catch (Exception e) { this.errors.add(e); if (this.attempts.incrementAndGet() >= this.maxAttempts || !this.test.test(e)) { this.handleError.handleIssue(obj, e); return; //return here... don't go further } try { long testDelay = (long) Math.pow(2, this.attempts.intValue()) * 1000 + RANDOM.nextInt(1000); long delay = Math.min(testDelay, this.maxDelay); Thread.sleep(delay); } catch (InterruptedException f) { //ignore } } } while (true); }
@Test void performTest() { Retry.Operation op = (l) -> { if (!l.isEmpty()) { throw l.remove(0); } }; Retry.HandleErrorIssue<Order> handleError = (o, e) -> { }; var r1 = new Retry<>(op, handleError, 3, 30000, e -> DatabaseUnavailableException.class.isAssignableFrom(e.getClass())); var r2 = new Retry<>(op, handleError, 3, 30000, e -> DatabaseUnavailableException.class.isAssignableFrom(e.getClass())); var user = new User("Jim", "ABCD"); var order = new Order(user, "book", 10f); var arr1 = new ArrayList<>(List.of(new ItemUnavailableException(), new DatabaseUnavailableException())); try { r1.perform(arr1, order); } catch (Exception e1) { LOG.error("An exception occurred", e1); } var arr2 = new ArrayList<>(List.of(new DatabaseUnavailableException(), new ItemUnavailableException())); try { r2.perform(arr2, order); } catch (Exception e1) { LOG.error("An exception occurred", e1); } //r1 stops at ItemUnavailableException, r2 retries because it encounters DatabaseUnavailableException assertTrue(arr1.size() == 1 && arr2.isEmpty()); }
@SuppressWarnings("unchecked") @Override public void execute(String mapName, Predicate predicate, Collection<Integer> partitions, Result result) { runUsingPartitionScanWithoutPaging(mapName, predicate, partitions, result); if (predicate instanceof PagingPredicateImpl pagingPredicate) { Map.Entry<Integer, Map.Entry> nearestAnchorEntry = pagingPredicate.getNearestAnchorEntry(); result.orderAndLimit(pagingPredicate, nearestAnchorEntry); } }
@Test public void execute_fail_retryable() { PartitionScanRunner runner = mock(PartitionScanRunner.class); ParallelPartitionScanExecutor executor = executor(runner); Predicate<Object, Object> predicate = Predicates.equal("attribute", 1); QueryResult queryResult = new QueryResult(IterationType.ENTRY, null, null, Long.MAX_VALUE, false); doThrow(new RetryableHazelcastException()).when(runner).run(anyString(), eq(predicate), anyInt(), isA(QueryResult.class)); List<Integer> list = asList(1, 2, 3); assertThatThrownBy(() -> executor.execute("Map", predicate, list, queryResult)) .isInstanceOf(RetryableHazelcastException.class); }
@Override public boolean put(final V value, final T t) { addToCounter(); final boolean put = map.put(value, t); addToChangeSet(value, t); fire(); return put; }
@Test void testPut() throws Exception { map.put(new Value("hello"), "test"); assertThat(changeSet.getAdded().get(new Value("hello"))).contains("test"); assertThat(timesCalled).isEqualTo(1); }
@Override public void build(final DefaultGoPublisher publisher, final EnvironmentVariableContext environmentVariableContext, TaskExtension taskExtension, ArtifactExtension artifactExtension, PluginRequestProcessorRegistry pluginRequestProcessorRegistry, Charset consoleLogCharset) { ExecutionResult executionResult = null; try { executionResult = taskExtension.execute(pluginId, (task, pluginDescriptor) -> executeTask(task, publisher, environmentVariableContext, consoleLogCharset)); } catch (Exception e) { logException(publisher, e); } finally { JobConsoleLoggerInternal.unsetContext(); } if (executionResult == null) { logError(publisher, "ExecutionResult cannot be null. Please return a success or a failure response."); } else if (!executionResult.isSuccessful()) { logError(publisher, executionResult.getMessagesForDisplay()); } }
@Test public void shouldPublishErrorMessageIfPluginThrowsAnException() { PluggableTaskBuilder taskBuilder = new PluggableTaskBuilder(runIfConfigs, cancelBuilder, pluggableTask, TEST_PLUGIN_ID, "test-directory") { @Override protected ExecutionResult executeTask(Task task, DefaultGoPublisher publisher, EnvironmentVariableContext environmentVariableContext, Charset consoleLogCharset) { throw new RuntimeException("err"); } }; assertThatThrownBy(() -> taskBuilder.build(goPublisher, variableContext, taskExtension, null, null, UTF_8)) .isInstanceOf(RuntimeException.class) .hasMessage("err"); ArgumentCaptor<String> captor = ArgumentCaptor.forClass(String.class); verify(goPublisher).taggedConsumeLine(eq(DefaultGoPublisher.ERR), captor.capture()); assertThat(captor.getValue()).isEqualTo("Error: err"); }
static Logger configureHttpLogging(Level level) { // To instantiate the static HttpTransport logger field. // Fixes https://github.com/GoogleContainerTools/jib/issues/3156. new ApacheHttpTransport(); ConsoleHandler consoleHandler = new ConsoleHandler(); consoleHandler.setLevel(level); Logger logger = Logger.getLogger(HttpTransport.class.getName()); logger.setLevel(level); logger.addHandler(consoleHandler); return logger; }
@Test public void testConfigureHttpLogging() { Logger logger = JibCli.configureHttpLogging(Level.ALL); assertThat(logger.getName()).isEqualTo("com.google.api.client.http.HttpTransport"); assertThat(logger.getLevel()).isEqualTo(Level.ALL); assertThat(logger.getHandlers()).hasLength(1); Handler handler = logger.getHandlers()[0]; assertThat(handler).isInstanceOf(ConsoleHandler.class); assertThat(handler.getLevel()).isEqualTo(Level.ALL); }
@VisibleForTesting Set<? extends Watcher> getEntries() { return Sets.newHashSet(entries); }
@Test public void testResetFromWatcher() throws Exception { Timing timing = new Timing(); CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); try { client.start(); final WatcherRemovalFacade removerClient = (WatcherRemovalFacade) client.newWatcherRemoveCuratorFramework(); final CountDownLatch createdLatch = new CountDownLatch(1); final CountDownLatch deletedLatch = new CountDownLatch(1); Watcher watcher = new Watcher() { @Override public void process(WatchedEvent event) { if (event.getType() == Event.EventType.NodeCreated) { try { removerClient.checkExists().usingWatcher(this).forPath("/yo"); } catch (Exception e) { e.printStackTrace(); } createdLatch.countDown(); } else if (event.getType() == Event.EventType.NodeDeleted) { deletedLatch.countDown(); } } }; removerClient.checkExists().usingWatcher(watcher).forPath("/yo"); assertEquals(removerClient.getRemovalManager().getEntries().size(), 1); removerClient.create().forPath("/yo"); assertTrue(timing.awaitLatch(createdLatch)); assertEquals(removerClient.getRemovalManager().getEntries().size(), 1); removerClient.delete().forPath("/yo"); assertTrue(timing.awaitLatch(deletedLatch)); assertEquals(removerClient.getRemovalManager().getEntries().size(), 0); } finally { TestCleanState.closeAndTestClean(client); } }
public static <K,V> List<Pair<K,V>> mapToPair(Map<K,V> map) { List<Pair<K,V>> ret = new ArrayList<>(map.size()); for(Map.Entry<K,V> entry : map.entrySet()) { ret.add(Pair.of(entry.getKey(),entry.getValue())); } return ret; }
@Test public void testMapToPair() { Map<String,String> map = new HashMap<>(); for(int i = 0; i < 5; i++) { map.put(String.valueOf(i),String.valueOf(i)); } List<Pair<String, String>> pairs = FunctionalUtils.mapToPair(map); assertEquals(map.size(),pairs.size()); }
MemberMap toMemberMap() { MemberImpl[] m = new MemberImpl[size()]; int ix = 0; for (MemberInfo memberInfo : members) { m[ix++] = memberInfo.toMember(); } return MemberMap.createNew(version, m); }
@Test public void toMemberMap() { int version = 5; MemberImpl[] members = MemberMapTest.newMembers(3); MembersView view = MembersView.createNew(version, Arrays.asList(members)); MemberMap memberMap = view.toMemberMap(); assertEquals(version, memberMap.getVersion()); assertMembersViewEquals(memberMap.getMembers().toArray(new MemberImpl[0]), view); }
@Override public PageResult<ArticleCategoryDO> getArticleCategoryPage(ArticleCategoryPageReqVO pageReqVO) { return articleCategoryMapper.selectPage(pageReqVO); }
@Test @Disabled // TODO 请修改 null 为需要的值,然后删除 @Disabled 注解 public void testGetArticleCategoryPage() { // mock 数据 ArticleCategoryDO dbArticleCategory = randomPojo(ArticleCategoryDO.class, o -> { // 等会查询到 o.setName(null); o.setPicUrl(null); o.setStatus(null); o.setSort(null); o.setCreateTime(null); }); articleCategoryMapper.insert(dbArticleCategory); // 测试 name 不匹配 articleCategoryMapper.insert(cloneIgnoreId(dbArticleCategory, o -> o.setName(null))); // 测试 picUrl 不匹配 articleCategoryMapper.insert(cloneIgnoreId(dbArticleCategory, o -> o.setPicUrl(null))); // 测试 status 不匹配 articleCategoryMapper.insert(cloneIgnoreId(dbArticleCategory, o -> o.setStatus(null))); // 测试 sort 不匹配 articleCategoryMapper.insert(cloneIgnoreId(dbArticleCategory, o -> o.setSort(null))); // 测试 createTime 不匹配 articleCategoryMapper.insert(cloneIgnoreId(dbArticleCategory, o -> o.setCreateTime(null))); // 准备参数 ArticleCategoryPageReqVO reqVO = new ArticleCategoryPageReqVO(); reqVO.setName(null); reqVO.setStatus(null); reqVO.setCreateTime(buildBetweenTime(2023, 2, 1, 2023, 2, 28)); // 调用 PageResult<ArticleCategoryDO> pageResult = articleCategoryService.getArticleCategoryPage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbArticleCategory, pageResult.getList().get(0)); }
static Properties readProps(List<String> producerProps, String producerConfig) throws IOException { Properties props = new Properties(); if (producerConfig != null) { props.putAll(Utils.loadProps(producerConfig)); } if (producerProps != null) for (String prop : producerProps) { String[] pieces = prop.split("="); if (pieces.length != 2) throw new IllegalArgumentException("Invalid property: " + prop); props.put(pieces[0], pieces[1]); } props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer"); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer"); if (props.getProperty(ProducerConfig.CLIENT_ID_CONFIG) == null) { props.put(ProducerConfig.CLIENT_ID_CONFIG, "perf-producer-client"); } return props; }
@Test public void testReadProps() throws Exception { List<String> producerProps = Collections.singletonList("bootstrap.servers=localhost:9000"); File producerConfig = createTempFile("acks=1"); Properties prop = ProducerPerformance.readProps(producerProps, producerConfig.getAbsolutePath()); assertNotNull(prop); assertEquals(5, prop.size()); Utils.delete(producerConfig); }
public TreeCache start() throws Exception { Preconditions.checkState(treeState.compareAndSet(TreeState.LATENT, TreeState.STARTED), "already started"); if (createParentNodes) { client.createContainers(root.path); } client.getConnectionStateListenable().addListener(connectionStateListener); if (client.getZookeeperClient().isConnected()) { root.wasCreated(); } return this; }
@Test public void testSyncInitialPopulation() throws Exception { cache = newTreeCacheWithListeners(client, "/test"); cache.start(); assertEvent(TreeCacheEvent.Type.INITIALIZED); client.create().forPath("/test"); client.create().forPath("/test/one", "hey there".getBytes()); assertEvent(TreeCacheEvent.Type.NODE_ADDED, "/test"); assertEvent(TreeCacheEvent.Type.NODE_ADDED, "/test/one"); assertNoMoreEvents(); }
public Span nextSpan(TraceContextOrSamplingFlags extracted) { if (extracted == null) throw new NullPointerException("extracted == null"); TraceContext context = extracted.context(); if (context != null) return newChild(context); TraceIdContext traceIdContext = extracted.traceIdContext(); if (traceIdContext != null) { return _toSpan(null, decorateContext( InternalPropagation.instance.flags(extracted.traceIdContext()), traceIdContext.traceIdHigh(), traceIdContext.traceId(), 0L, 0L, 0L, extracted.extra() )); } SamplingFlags samplingFlags = extracted.samplingFlags(); List<Object> extra = extracted.extra(); TraceContext parent = currentTraceContext.get(); int flags; long traceIdHigh = 0L, traceId = 0L, localRootId = 0L, spanId = 0L; if (parent != null) { // At this point, we didn't extract trace IDs, but do have a trace in progress. Since typical // trace sampling is up front, we retain the decision from the parent. flags = InternalPropagation.instance.flags(parent); traceIdHigh = parent.traceIdHigh(); traceId = parent.traceId(); localRootId = parent.localRootId(); spanId = parent.spanId(); extra = concat(extra, parent.extra()); } else { flags = InternalPropagation.instance.flags(samplingFlags); } return _toSpan(parent, decorateContext(flags, traceIdHigh, traceId, localRootId, spanId, 0L, extra)); }
@Test void nextSpan_fromFlags_resultantSpanIsLocalRoot() { Span span = tracer.nextSpan(TraceContextOrSamplingFlags.create(SamplingFlags.SAMPLED)); assertThat(span.context().spanId()).isEqualTo(span.context().localRootId()); // Sanity check assertThat(span.context().isLocalRoot()).isTrue(); }
@Nonnull public static List<String> fastSplit(@Nonnull String input, boolean includeEmpty, char split) { StringBuilder sb = new StringBuilder(); ArrayList<String> words = new ArrayList<>(); words.ensureCapacity(input.length() / 5); char[] strArray = input.toCharArray(); for (char c : strArray) { if (c == split) { if ((includeEmpty || !sb.isEmpty())) words.add(sb.toString()); sb.setLength(0); } else { sb.append(c); } } if ((includeEmpty || !sb.isEmpty())) words.add(sb.toString()); return words; }
@Test void testFastSplit() { // Empty discarded assertEquals(List.of("a", "b", "c", "d"), StringUtil.fastSplit("a/b/c/d", false, '/')); assertEquals(List.of("a", "b", "c"), StringUtil.fastSplit("a/b/c/", false, '/')); assertEquals(List.of("b", "c", "d"), StringUtil.fastSplit("/b/c/d", false, '/')); assertEquals(Collections.emptyList(), StringUtil.fastSplit("/", false, '/')); assertEquals(Collections.emptyList(), StringUtil.fastSplit("//", false, '/')); assertEquals(Collections.emptyList(), StringUtil.fastSplit("///", false, '/')); // Empty included assertEquals(List.of("a", "b", "c", ""), StringUtil.fastSplit("a/b/c/", true, '/')); assertEquals(List.of("", "b", "c", "d"), StringUtil.fastSplit("/b/c/d", true, '/')); assertEquals(List.of("", ""), StringUtil.fastSplit("/", true, '/')); assertEquals(List.of("", "", ""), StringUtil.fastSplit("//", true, '/')); assertEquals(List.of("", "", "", ""), StringUtil.fastSplit("///", true, '/')); }
public void validate(String clientId, String clientSecret, String workspace) { Token token = validateAccessToken(clientId, clientSecret); if (token.getScopes() == null || !token.getScopes().contains("pullrequest")) { LOG.info(MISSING_PULL_REQUEST_READ_PERMISSION + String.format(SCOPE, token.getScopes())); throw new IllegalArgumentException(ERROR_BBC_SERVERS + ": " + MISSING_PULL_REQUEST_READ_PERMISSION); } try { doGet(token.getAccessToken(), buildUrl("/repositories/" + workspace), r -> null); } catch (NotFoundException | IllegalStateException e) { throw new IllegalArgumentException(e.getMessage()); } }
@Test public void validate_fails_with_IAE_if_timeout() { server.enqueue(new MockResponse().setSocketPolicy(SocketPolicy.NO_RESPONSE)); assertThatIllegalArgumentException() .isThrownBy(() -> underTest.validate("clientId", "clientSecret", "workspace")); }
private ServiceConfig() { this(CONFIG_NAME); }
@Test public void testServiceConfig() { ServiceConfig serviceConfig = (ServiceConfig) Config.getInstance().getJsonObjectConfig(CONFIG_NAME, ServiceConfig.class); List<Map<String, Object>> singletons = serviceConfig.getSingletons(); Assert.assertTrue(singletons.size() > 0); }
Object getCellValue(Cell cell, Schema.FieldType type) { ByteString cellValue = cell.getValue(); int valueSize = cellValue.size(); switch (type.getTypeName()) { case BOOLEAN: checkArgument(valueSize == 1, message("Boolean", 1)); return cellValue.toByteArray()[0] != 0; case BYTE: checkArgument(valueSize == 1, message("Byte", 1)); return cellValue.toByteArray()[0]; case INT16: checkArgument(valueSize == 2, message("Int16", 2)); return Shorts.fromByteArray(cellValue.toByteArray()); case INT32: checkArgument(valueSize == 4, message("Int32", 4)); return Ints.fromByteArray(cellValue.toByteArray()); case INT64: checkArgument(valueSize == 8, message("Int64", 8)); return Longs.fromByteArray(cellValue.toByteArray()); case FLOAT: checkArgument(valueSize == 4, message("Float", 4)); return Float.intBitsToFloat(Ints.fromByteArray(cellValue.toByteArray())); case DOUBLE: checkArgument(valueSize == 8, message("Double", 8)); return Double.longBitsToDouble(Longs.fromByteArray(cellValue.toByteArray())); case DATETIME: return DateTime.parse(cellValue.toStringUtf8()); case STRING: return cellValue.toStringUtf8(); case BYTES: return cellValue.toByteArray(); case LOGICAL_TYPE: String identifier = checkArgumentNotNull(type.getLogicalType()).getIdentifier(); throw new IllegalStateException("Unsupported logical type: " + identifier); default: throw new IllegalArgumentException( String.format("Unsupported cell value type '%s'.", type.getTypeName())); } }
@Test public void shouldParseStringType() { byte[] value = "stringValue".getBytes(UTF_8); assertEquals("stringValue", PARSER.getCellValue(cell(value), STRING)); }
public long getNumBlocksFailedToUncache() { return numBlocksFailedToUncache.longValue(); }
@Test(timeout=60000) public void testUncacheUnknownBlock() throws Exception { // Create a file Path fileName = new Path("/testUncacheUnknownBlock"); int fileLen = 4096; DFSTestUtil.createFile(fs, fileName, fileLen, (short)1, 0xFDFD); HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.getFileBlockLocations( fileName, 0, fileLen); // Try to uncache it without caching it first setHeartbeatResponse(uncacheBlocks(locs)); GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { return fsd.getNumBlocksFailedToUncache() > 0; } }, 100, 10000); }
public static boolean isRetryOrDlqTopic(String topic) { if (StringUtils.isBlank(topic)) { return false; } return topic.startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX) || topic.startsWith(MixAll.DLQ_GROUP_TOPIC_PREFIX); }
@Test public void testIsRetryOrDlqTopicWithEmptyTopic() { String topic = ""; boolean result = BrokerMetricsManager.isRetryOrDlqTopic(topic); assertThat(result).isFalse(); }
@Override public <T> List<T> toList(DataTable dataTable, Type itemType) { requireNonNull(dataTable, "dataTable may not be null"); requireNonNull(itemType, "itemType may not be null"); if (dataTable.isEmpty()) { return emptyList(); } ListOrProblems<T> result = toListOrProblems(dataTable, itemType); if (result.hasList()) { return unmodifiableList(result.getList()); } throw listNoConverterDefined( itemType, result.getProblems()); }
@Test void convert_to_optional_list() { DataTable table = parse("", "| 11.22 |", "| 255.999 |", "| |"); List<Optional<BigDecimal>> expected = asList( Optional.of(new BigDecimal("11.22")), Optional.of(new BigDecimal("255.999")), Optional.empty()); assertEquals(expected, converter.toList(table, OPTIONAL_BIG_DECIMAL)); }
public long get(final long key) { final long[] entries = this.entries; int index = evenLongHash(key, mask); long candidateKey; while ((candidateKey = entries[index]) != missingValue) { if (candidateKey == key) { return entries[index + 1]; } index = next(index); } return missingValue; }
@Test public void getShouldReturnMissingValueWhenEmpty() { assertEquals(MISSING_VALUE, map.get(1L)); }
@Override public List<byte[]> clusterGetKeysInSlot(int slot, Integer count) { RFuture<List<byte[]>> f = executorService.readAsync((String)null, ByteArrayCodec.INSTANCE, CLUSTER_GETKEYSINSLOT, slot, count); return syncFuture(f); }
@Test public void testClusterGetKeysInSlot() { testInCluster(connection -> { connection.flushAll(); List<byte[]> keys = connection.clusterGetKeysInSlot(12, 10); assertThat(keys).isEmpty(); }); }
@Override public final List<? extends FileBasedSource<T>> split( long desiredBundleSizeBytes, PipelineOptions options) throws Exception { // This implementation of method split is provided to simplify subclasses. Here we // split a FileBasedSource based on a file pattern to FileBasedSources based on full single // files. For files that can be efficiently seeked, we further split FileBasedSources based on // those files to FileBasedSources based on sub ranges of single files. String fileOrPattern = fileOrPatternSpec.get(); if (mode == Mode.FILEPATTERN) { long startTime = System.currentTimeMillis(); List<Metadata> expandedFiles = FileSystems.match(fileOrPattern, emptyMatchTreatment).metadata(); List<FileBasedSource<T>> splitResults = new ArrayList<>(expandedFiles.size()); for (Metadata metadata : expandedFiles) { FileBasedSource<T> split = createForSubrangeOfFile(metadata, 0, metadata.sizeBytes()); verify( split.getMode() == Mode.SINGLE_FILE_OR_SUBRANGE, "%s.createForSubrangeOfFile must return a source in mode %s", split, Mode.SINGLE_FILE_OR_SUBRANGE); // The split is NOT in FILEPATTERN mode, so we can call its split without fear // of recursion. This will break a single file into multiple splits when the file is // splittable and larger than the desired bundle size. splitResults.addAll(split.split(desiredBundleSizeBytes, options)); } LOG.info( "Splitting filepattern {} into bundles of size {} took {} ms " + "and produced {} files and {} bundles", fileOrPattern, desiredBundleSizeBytes, System.currentTimeMillis() - startTime, expandedFiles.size(), splitResults.size()); return splitResults; } else { FileSystems.reportSourceLineage(getSingleFileMetadata().resourceId()); if (isSplittable()) { @SuppressWarnings("unchecked") List<FileBasedSource<T>> splits = (List<FileBasedSource<T>>) super.split(desiredBundleSizeBytes, options); return splits; } else { LOG.debug( "The source for file {} is not split into sub-range based sources since " + "the file is not seekable", fileOrPattern); return ImmutableList.of(this); } } }
@Test public void testReadAllSplitsOfFilePattern() throws Exception { PipelineOptions options = PipelineOptionsFactory.create(); List<String> data1 = createStringDataset(3, 50); File file1 = createFileWithData("file1", data1); List<String> data2 = createStringDataset(3, 50); createFileWithData("file2", data2); List<String> data3 = createStringDataset(3, 50); createFileWithData("file3", data3); List<String> data4 = createStringDataset(3, 50); createFileWithData("otherfile", data4); TestFileBasedSource source = new TestFileBasedSource(new File(file1.getParent(), "file*").getPath(), 64, null); List<? extends BoundedSource<String>> sources = source.split(512, null); // Not a trivial split. assertTrue(sources.size() > 1); List<String> results = new ArrayList<>(); for (BoundedSource<String> split : sources) { results.addAll(readFromSource(split, options)); } List<String> expectedResults = new ArrayList<>(); expectedResults.addAll(data1); expectedResults.addAll(data2); expectedResults.addAll(data3); assertThat(expectedResults, containsInAnyOrder(results.toArray())); }
@Override public void checkTopicAccess( final KsqlSecurityContext securityContext, final String topicName, final AclOperation operation ) { checkAccess(new CacheKey(securityContext, AuthObjectType.TOPIC, topicName, operation)); }
@Test public void shouldThrowAuthorizationExceptionWhenBackendTopicValidatorIsDenied() { // Given doThrow(KsqlTopicAuthorizationException.class).when(backendValidator) .checkTopicAccess(securityContext, TOPIC_1, AclOperation.READ); // When: assertThrows( KsqlTopicAuthorizationException.class, () -> cache.checkTopicAccess(securityContext, TOPIC_1, AclOperation.READ) ); }
public TopicStatsImpl add(TopicStats ts) { TopicStatsImpl stats = (TopicStatsImpl) ts; this.count++; this.msgRateIn += stats.msgRateIn; this.msgThroughputIn += stats.msgThroughputIn; this.msgRateOut += stats.msgRateOut; this.msgThroughputOut += stats.msgThroughputOut; this.bytesInCounter += stats.bytesInCounter; this.msgInCounter += stats.msgInCounter; this.bytesOutCounter += stats.bytesOutCounter; this.msgOutCounter += stats.msgOutCounter; this.waitingPublishers += stats.waitingPublishers; double newAverageMsgSize = (this.averageMsgSize * (this.count - 1) + stats.averageMsgSize) / this.count; this.averageMsgSize = newAverageMsgSize; this.storageSize += stats.storageSize; this.backlogSize += stats.backlogSize; this.publishRateLimitedTimes += stats.publishRateLimitedTimes; this.offloadedStorageSize += stats.offloadedStorageSize; this.nonContiguousDeletedMessagesRanges += stats.nonContiguousDeletedMessagesRanges; this.nonContiguousDeletedMessagesRangesSerializedSize += stats.nonContiguousDeletedMessagesRangesSerializedSize; this.delayedMessageIndexSizeInBytes += stats.delayedMessageIndexSizeInBytes; this.ongoingTxnCount = stats.ongoingTxnCount; this.abortedTxnCount = stats.abortedTxnCount; this.committedTxnCount = stats.committedTxnCount; this.backlogQuotaLimitTime = stats.backlogQuotaLimitTime; this.backlogQuotaLimitSize = stats.backlogQuotaLimitSize; if (stats.oldestBacklogMessageAgeSeconds > this.oldestBacklogMessageAgeSeconds) { this.oldestBacklogMessageAgeSeconds = stats.oldestBacklogMessageAgeSeconds; this.oldestBacklogMessageSubscriptionName = stats.oldestBacklogMessageSubscriptionName; } stats.bucketDelayedIndexStats.forEach((k, v) -> { TopicMetricBean topicMetricBean = this.bucketDelayedIndexStats.computeIfAbsent(k, __ -> new TopicMetricBean()); topicMetricBean.name = v.name; topicMetricBean.labelsAndValues = v.labelsAndValues; topicMetricBean.value += v.value; }); List<? extends PublisherStats> publisherStats = stats.getPublishers(); for (int index = 0; index < publisherStats.size(); index++) { PublisherStats s = publisherStats.get(index); if (s.isSupportsPartialProducer() && s.getProducerName() != null) { this.publishersMap.computeIfAbsent(s.getProducerName(), key -> { final PublisherStatsImpl newStats = new PublisherStatsImpl(); newStats.setSupportsPartialProducer(true); newStats.setProducerName(s.getProducerName()); return newStats; }).add((PublisherStatsImpl) s); } else { // Add a publisher stat entry to this.publishers // if this.publishers.size() is smaller than // the input stats.publishers.size(). // Here, index == this.publishers.size() means // this.publishers.size() is smaller than the input stats.publishers.size() if (index == this.publishers.size()) { PublisherStatsImpl newStats = new PublisherStatsImpl(); newStats.setSupportsPartialProducer(false); this.publishers.add(newStats); } this.publishers.get(index) .add((PublisherStatsImpl) s); } } for (Map.Entry<String, SubscriptionStatsImpl> entry : stats.subscriptions.entrySet()) { SubscriptionStatsImpl subscriptionStats = this.subscriptions.computeIfAbsent(entry.getKey(), k -> new SubscriptionStatsImpl()); subscriptionStats.add(entry.getValue()); } for (Map.Entry<String, ReplicatorStatsImpl> entry : stats.replication.entrySet()) { ReplicatorStatsImpl replStats = this.replication.computeIfAbsent(entry.getKey(), k -> { ReplicatorStatsImpl r = new ReplicatorStatsImpl(); r.setConnected(true); return r; }); replStats.add(entry.getValue()); } if (earliestMsgPublishTimeInBacklogs != 0 && ((TopicStatsImpl) ts).earliestMsgPublishTimeInBacklogs != 0) { earliestMsgPublishTimeInBacklogs = Math.min( earliestMsgPublishTimeInBacklogs, ((TopicStatsImpl) ts).earliestMsgPublishTimeInBacklogs ); } else { earliestMsgPublishTimeInBacklogs = Math.max( earliestMsgPublishTimeInBacklogs, ((TopicStatsImpl) ts).earliestMsgPublishTimeInBacklogs ); } return this; }
@Test public void testAdd_EarliestMsgPublishTimeInBacklogs_Earliest() { TopicStatsImpl stats1 = new TopicStatsImpl(); stats1.earliestMsgPublishTimeInBacklogs = 10L; TopicStatsImpl stats2 = new TopicStatsImpl(); stats2.earliestMsgPublishTimeInBacklogs = 20L; TopicStatsImpl aggregate = stats1.add(stats2); assertEquals(aggregate.earliestMsgPublishTimeInBacklogs, 10L); }
public CompiledPipeline.CompiledExecution buildExecution() { return buildExecution(false); }
@SuppressWarnings({"unchecked"}) @Test public void moreThan255Parents() throws Exception { final ConfigVariableExpander cve = ConfigVariableExpander.withoutSecret(EnvironmentVariableProvider.defaultProvider()); final PipelineIR pipelineIR = ConfigCompiler.configToPipelineIR( IRHelpers.toSourceWithMetadata("input {mockinput{}} filter { " + "if [foo] != \"bar\" { " + "mockfilter {} " + "mockaddfilter {} " + "if [foo] != \"bar\" { " + "mockfilter {} " + Strings.repeat("} else if [foo] != \"bar\" {" + "mockfilter {} ", 300) + " } } " + "} output {mockoutput{} }"), false, cve); final JrubyEventExtLibrary.RubyEvent testEvent = JrubyEventExtLibrary.RubyEvent.newRubyEvent(RubyUtil.RUBY, new Event()); final Map<String, Supplier<IRubyObject>> filters = new HashMap<>(); filters.put("mockfilter", () -> IDENTITY_FILTER); filters.put("mockaddfilter", () -> ADD_FIELD_FILTER); new CompiledPipeline( pipelineIR, new CompiledPipelineTest.MockPluginFactory( Collections.singletonMap("mockinput", () -> null), filters, Collections.singletonMap("mockoutput", mockOutputSupplier()) ) ).buildExecution().compute(RubyUtil.RUBY.newArray(testEvent), false, false); final Collection<JrubyEventExtLibrary.RubyEvent> outputEvents = EVENT_SINKS.get(runId); MatcherAssert.assertThat(outputEvents.size(), CoreMatchers.is(1)); MatcherAssert.assertThat(outputEvents.contains(testEvent), CoreMatchers.is(true)); }
public void generateAcknowledgementPayload( MllpSocketBuffer mllpSocketBuffer, byte[] hl7MessageBytes, String acknowledgementCode) throws MllpAcknowledgementGenerationException { generateAcknowledgementPayload(mllpSocketBuffer, hl7MessageBytes, acknowledgementCode, null); }
@Test public void testGenerateAcknowledgementPayloadFromNullMessage() { MllpSocketBuffer mllpSocketBuffer = new MllpSocketBuffer(new MllpEndpointStub()); assertThrows(MllpAcknowledgementGenerationException.class, () -> hl7util.generateAcknowledgementPayload(mllpSocketBuffer, null, "AA")); }
public static String generateNewId(String id, int targetLength) { if (id.length() <= targetLength) { return id; } if (targetLength <= 8) { throw new IllegalArgumentException("targetLength must be greater than 8"); } HashFunction hashFunction = goodFastHash(32); String hash = hashFunction.hashUnencodedChars(id).toString(); return id.substring(0, targetLength - hash.length() - 1) + "-" + hash; }
@Test public void testGenerateNewIdShouldThrowExceptionWhenTargetLengthIsNotGreaterThanEight() { String id = "long-test-id"; assertThrows(IllegalArgumentException.class, () -> generateNewId(id, 8)); }
@Override public void run() { if (!ignoreListenShutdownHook && destroyed.compareAndSet(false, true)) { if (logger.isInfoEnabled()) { logger.info("Run shutdown hook now."); } doDestroy(); } }
@Test public void testDestoryWithModuleManagedExternally() throws InterruptedException { applicationModel.getModuleModels().get(0).setLifeCycleManagedExternally(true); new Thread(() -> { applicationModel.getModuleModels().get(0).destroy(); }) .start(); TimeUnit.MILLISECONDS.sleep(10); dubboShutdownHook.run(); Assertions.assertTrue(applicationModel.isDestroyed()); }
public static Map<String, String> toStringMap(String... pairs) { Map<String, String> parameters = new HashMap<>(); if (ArrayUtils.isEmpty(pairs)) { return parameters; } if (pairs.length > 0) { if (pairs.length % 2 != 0) { throw new IllegalArgumentException("pairs must be even."); } for (int i = 0; i < pairs.length; i = i + 2) { parameters.put(pairs[i], pairs[i + 1]); } } return parameters; }
@Test void testStringMap1() { assertThat(toStringMap("key", "value"), equalTo(Collections.singletonMap("key", "value"))); }
@Override public HttpRestResult<String> httpPost(String path, Map<String, String> headers, Map<String, String> paramValues, String encode, long readTimeoutMs) throws Exception { final long endTime = System.currentTimeMillis() + readTimeoutMs; String currentServerAddr = serverListMgr.getCurrentServerAddr(); int maxRetry = this.maxRetry; HttpClientConfig httpConfig = HttpClientConfig.builder() .setReadTimeOutMillis(Long.valueOf(readTimeoutMs).intValue()) .setConTimeOutMillis(ConfigHttpClientManager.getInstance().getConnectTimeoutOrDefault(3000)).build(); do { try { Header newHeaders = Header.newInstance(); if (headers != null) { newHeaders.addAll(headers); } HttpRestResult<String> result = nacosRestTemplate.postForm(getUrl(currentServerAddr, path), httpConfig, newHeaders, paramValues, String.class); if (isFail(result)) { LOGGER.error("[NACOS ConnectException] currentServerAddr: {}, httpCode: {}", currentServerAddr, result.getCode()); } else { // Update the currently available server addr serverListMgr.updateCurrentServerAddr(currentServerAddr); return result; } } catch (ConnectException connectException) { LOGGER.error("[NACOS ConnectException httpPost] currentServerAddr: {}, err : {}", currentServerAddr, connectException.getMessage()); } catch (SocketTimeoutException socketTimeoutException) { LOGGER.error("[NACOS SocketTimeoutException httpPost] currentServerAddr: {}, err : {}", currentServerAddr, socketTimeoutException.getMessage()); } catch (Exception ex) { LOGGER.error("[NACOS Exception httpPost] currentServerAddr: " + currentServerAddr, ex); throw ex; } if (serverListMgr.getIterator().hasNext()) { currentServerAddr = serverListMgr.getIterator().next(); } else { maxRetry--; if (maxRetry < 0) { throw new ConnectException( "[NACOS HTTP-POST] The maximum number of tolerable server reconnection errors has been reached"); } serverListMgr.refreshCurrentServerAddr(); } } while (System.currentTimeMillis() <= endTime); LOGGER.error("no available server, currentServerAddr : {}", currentServerAddr); throw new ConnectException("no available server, currentServerAddr : " + currentServerAddr); }
@Test void testHttpPostFailed() throws Exception { assertThrows(ConnectException.class, () -> { when(nacosRestTemplate.<String>postForm(eq(SERVER_ADDRESS_1 + "/test"), any(HttpClientConfig.class), any(Header.class), anyMap(), eq(String.class))).thenReturn(mockResult); when(mockResult.getCode()).thenReturn(HttpURLConnection.HTTP_NOT_FOUND); serverHttpAgent.httpPost("/test", Collections.emptyMap(), Collections.emptyMap(), "UTF-8", 1000); }); }
public static void generate(String cluster, OutputStream out, List<PrometheusRawMetricsProvider> metricsProviders) throws IOException { ByteBuf buf = PulsarByteBufAllocator.DEFAULT.heapBuffer(); try { SimpleTextOutputStream stream = new SimpleTextOutputStream(buf); generateSystemMetrics(stream, cluster); if (metricsProviders != null) { for (PrometheusRawMetricsProvider metricsProvider : metricsProviders) { metricsProvider.generate(stream); } } out.write(buf.array(), buf.arrayOffset(), buf.readableBytes()); } finally { buf.release(); } }
@Test public void testGenerateSystemMetricsWithSpecifyCluster() throws Exception { String defaultClusterValue = "cluster_test"; String specifyClusterValue = "lb_x"; String metricsName = "label_contains_cluster" + randomString(); Counter counter = new Counter.Builder() .name(metricsName) .labelNames(LABEL_NAME_CLUSTER) .help("x") .register(CollectorRegistry.defaultRegistry); counter.labels(specifyClusterValue).inc(); ByteArrayOutputStream out = new ByteArrayOutputStream(); PrometheusMetricsGeneratorUtils.generate(defaultClusterValue, out, Collections.emptyList()); assertTrue(out.toString().contains( String.format("%s_total{cluster=\"%s\"} 1.0", metricsName, specifyClusterValue) )); // cleanup out.close(); CollectorRegistry.defaultRegistry.unregister(counter); }
@Override public Mono<ConfirmUsernameHashResponse> confirmUsernameHash(final ConfirmUsernameHashRequest request) { final AuthenticatedDevice authenticatedDevice = AuthenticationUtil.requireAuthenticatedDevice(); if (request.getUsernameHash().isEmpty()) { throw Status.INVALID_ARGUMENT .withDescription("Username hash must not be empty") .asRuntimeException(); } if (request.getUsernameHash().size() != AccountController.USERNAME_HASH_LENGTH) { throw Status.INVALID_ARGUMENT .withDescription(String.format("Username hash length must be %d bytes, but was actually %d", AccountController.USERNAME_HASH_LENGTH, request.getUsernameHash().size())) .asRuntimeException(); } if (request.getZkProof().isEmpty()) { throw Status.INVALID_ARGUMENT .withDescription("Zero-knowledge proof must not be empty") .asRuntimeException(); } if (request.getUsernameCiphertext().isEmpty()) { throw Status.INVALID_ARGUMENT .withDescription("Username ciphertext must not be empty") .asRuntimeException(); } if (request.getUsernameCiphertext().size() > AccountController.MAXIMUM_USERNAME_CIPHERTEXT_LENGTH) { throw Status.INVALID_ARGUMENT .withDescription(String.format("Username hash length must at most %d bytes, but was actually %d", AccountController.MAXIMUM_USERNAME_CIPHERTEXT_LENGTH, request.getUsernameCiphertext().size())) .asRuntimeException(); } try { usernameHashZkProofVerifier.verifyProof(request.getZkProof().toByteArray(), request.getUsernameHash().toByteArray()); } catch (final BaseUsernameException e) { throw Status.INVALID_ARGUMENT.withDescription("Could not verify proof").asRuntimeException(); } return rateLimiters.getUsernameSetLimiter().validateReactive(authenticatedDevice.accountIdentifier()) .then(Mono.fromFuture(() -> accountsManager.getByAccountIdentifierAsync(authenticatedDevice.accountIdentifier()))) .map(maybeAccount -> maybeAccount.orElseThrow(Status.UNAUTHENTICATED::asRuntimeException)) .flatMap(account -> Mono.fromFuture(() -> accountsManager.confirmReservedUsernameHash(account, request.getUsernameHash().toByteArray(), request.getUsernameCiphertext().toByteArray()))) .map(updatedAccount -> ConfirmUsernameHashResponse.newBuilder() .setUsernameHash(ByteString.copyFrom(updatedAccount.getUsernameHash().orElseThrow())) .setUsernameLinkHandle(UUIDUtil.toByteString(updatedAccount.getUsernameLinkHandle())) .build()) .onErrorMap(UsernameReservationNotFoundException.class, throwable -> Status.FAILED_PRECONDITION.asRuntimeException()) .onErrorMap(UsernameHashNotAvailableException.class, throwable -> Status.NOT_FOUND.asRuntimeException()); }
@Test void confirmUsernameHashInvalidProof() throws BaseUsernameException { final byte[] usernameHash = TestRandomUtil.nextBytes(AccountController.USERNAME_HASH_LENGTH); final byte[] usernameCiphertext = TestRandomUtil.nextBytes(32); final byte[] zkProof = TestRandomUtil.nextBytes(32); final Account account = mock(Account.class); when(accountsManager.getByAccountIdentifierAsync(AUTHENTICATED_ACI)) .thenReturn(CompletableFuture.completedFuture(Optional.of(account))); doThrow(BaseUsernameException.class).when(usernameHashZkProofVerifier).verifyProof(any(), any()); //noinspection ResultOfMethodCallIgnored GrpcTestUtils.assertStatusException(Status.INVALID_ARGUMENT, () -> authenticatedServiceStub().confirmUsernameHash(ConfirmUsernameHashRequest.newBuilder() .setUsernameHash(ByteString.copyFrom(usernameHash)) .setUsernameCiphertext(ByteString.copyFrom(usernameCiphertext)) .setZkProof(ByteString.copyFrom(zkProof)) .build())); }
@Override public final InputStream getInputStream(final int columnIndex, final String type) throws SQLException { InputStream result = getCurrentQueryResult().getInputStream(columnIndex, type); wasNull = getCurrentQueryResult().wasNull(); return result; }
@Test void assertGetInputStream() throws SQLException { QueryResult queryResult = mock(QueryResult.class); InputStream value = mock(InputStream.class); when(queryResult.getInputStream(1, "Ascii")).thenReturn(value); streamMergedResult.setCurrentQueryResult(queryResult); assertThat(streamMergedResult.getInputStream(1, "Ascii"), is(value)); }
public static List<String> parseAddressList(String addressInfo) { if (StringUtils.isBlank(addressInfo)) { return Collections.emptyList(); } List<String> addressList = new ArrayList<>(); String[] addresses = addressInfo.split(ADDRESS_SEPARATOR); for (String address : addresses) { URI uri = URI.create(address.trim()); addressList.add(uri.getAuthority()); } return addressList; }
@Test public void testNullStr() { List<String> result = AddressUtils.parseAddressList(null); assertThat(result).isNotNull(); assertThat(result).isEmpty(); }
public EmailVerifyResult verifyEmail(long accountId, EmailVerifyRequest request) { Map<String, Object> resultMap = accountClient.verifyEmail(accountId, request.getVerificationCode()); return objectMapper.convertValue(resultMap, EmailVerifyResult.class); }
@Test public void testVerifyEmail() { EmailVerifyRequest request = new EmailVerifyRequest(); request.setVerificationCode("code"); Map<String, Object> result = Map.of( "status", "OK", "error", "custom error", "remaining_attempts", "5"); when(accountClient.verifyEmail(eq(1L), eq("code"))).thenReturn(result); EmailVerifyResult emailVerifyResult = accountService.verifyEmail(1L, request); assertEquals(Status.OK, emailVerifyResult.getStatus()); assertEquals("custom error", emailVerifyResult.getError()); assertEquals(5, emailVerifyResult.getRemainingAttempts()); }
public Optional<Integer> loadInstanceWorkerId(final String instanceId) { try { String workerId = repository.query(ComputeNode.getInstanceWorkerIdNodePath(instanceId)); return Strings.isNullOrEmpty(workerId) ? Optional.empty() : Optional.of(Integer.valueOf(workerId)); } catch (final NumberFormatException ex) { log.error("Invalid worker id for instance: {}", instanceId); } return Optional.empty(); }
@Test void assertLoadInstanceWorkerId() { InstanceMetaData instanceMetaData = new ProxyInstanceMetaData("foo_instance_id", 3307); final String instanceId = instanceMetaData.getId(); new ComputeNodePersistService(repository).loadInstanceWorkerId(instanceId); verify(repository).query(ComputeNode.getInstanceWorkerIdNodePath(instanceId)); }
@Override public void updateJobResourceRequirements(JobResourceRequirements jobResourceRequirements) { if (settings.getExecutionMode() == SchedulerExecutionMode.REACTIVE) { throw new UnsupportedOperationException( "Cannot change the parallelism of a job running in reactive mode."); } final Optional<VertexParallelismStore> maybeUpdateVertexParallelismStore = DefaultVertexParallelismStore.applyJobResourceRequirements( jobInformation.getVertexParallelismStore(), jobResourceRequirements); if (maybeUpdateVertexParallelismStore.isPresent()) { this.jobInformation = new JobGraphJobInformation(jobGraph, maybeUpdateVertexParallelismStore.get()); declareDesiredResources(); state.tryRun( ResourceListener.class, ResourceListener::onNewResourceRequirements, "Current state does not react to desired parallelism changes."); } }
@Test void testRequirementLowerBoundDecreaseAfterResourceScarcityBelowAvailableSlots() throws Exception { final JobGraph jobGraph = createJobGraph(); final DefaultDeclarativeSlotPool declarativeSlotPool = createDeclarativeSlotPool(jobGraph.getJobID()); final int availableSlots = 1; JobResourceRequirements initialJobResourceRequirements = createRequirementsWithEqualLowerAndUpperParallelism(PARALLELISM); final AdaptiveScheduler scheduler = prepareSchedulerWithNoTimeouts(jobGraph, declarativeSlotPool) .setJobResourceRequirements(initialJobResourceRequirements) .build(); final SubmissionBufferingTaskManagerGateway taskManagerGateway = createSubmissionBufferingTaskManagerGateway(PARALLELISM, scheduler); startJobWithSlotsMatchingParallelism( scheduler, declarativeSlotPool, taskManagerGateway, availableSlots); // unlock job by decreasing the parallelism JobResourceRequirements newJobResourceRequirements = createRequirementsWithLowerAndUpperParallelism(availableSlots, PARALLELISM); singleThreadMainThreadExecutor.execute( () -> scheduler.updateJobResourceRequirements(newJobResourceRequirements)); awaitJobReachingParallelism(taskManagerGateway, scheduler, availableSlots); }
public static String formatSql(final AstNode root) { final StringBuilder builder = new StringBuilder(); new Formatter(builder).process(root, 0); return StringUtils.stripEnd(builder.toString(), "\n"); }
@Test public void shouldFormatCreateTableStatementWithImplicitKey() { // Given: final CreateTable createTable = new CreateTable( TEST, ELEMENTS_WITHOUT_KEY, false, false, SOME_WITH_PROPS, false); // When: final String sql = SqlFormatter.formatSql(createTable); // Then: assertThat(sql, is("CREATE TABLE TEST (`Foo` STRING, `Bar` STRING) " + "WITH (KAFKA_TOPIC='topic_test', VALUE_FORMAT='JSON');")); }
@NotNull public SocialUserDO authSocialUser(Integer socialType, Integer userType, String code, String state) { // 优先从 DB 中获取,因为 code 有且可以使用一次。 // 在社交登录时,当未绑定 User 时,需要绑定登录,此时需要 code 使用两次 SocialUserDO socialUser = socialUserMapper.selectByTypeAndCodeAnState(socialType, code, state); if (socialUser != null) { return socialUser; } // 请求获取 AuthUser authUser = socialClientService.getAuthUser(socialType, userType, code, state); Assert.notNull(authUser, "三方用户不能为空"); // 保存到 DB 中 socialUser = socialUserMapper.selectByTypeAndOpenid(socialType, authUser.getUuid()); if (socialUser == null) { socialUser = new SocialUserDO(); } socialUser.setType(socialType).setCode(code).setState(state) // 需要保存 code + state 字段,保证后续可查询 .setOpenid(authUser.getUuid()).setToken(authUser.getToken().getAccessToken()).setRawTokenInfo((toJsonString(authUser.getToken()))) .setNickname(authUser.getNickname()).setAvatar(authUser.getAvatar()).setRawUserInfo(toJsonString(authUser.getRawUserInfo())); if (socialUser.getId() == null) { socialUserMapper.insert(socialUser); } else { socialUserMapper.updateById(socialUser); } return socialUser; }
@Test public void testAuthSocialUser_update() { // 准备参数 Integer socialType = SocialTypeEnum.GITEE.getType(); Integer userType = randomEle(SocialTypeEnum.values()).getType(); String code = "tudou"; String state = "yuanma"; // mock 数据 socialUserMapper.insert(randomPojo(SocialUserDO.class).setType(socialType).setOpenid("test_openid")); // mock 方法 AuthUser authUser = randomPojo(AuthUser.class); when(socialClientService.getAuthUser(eq(socialType), eq(userType), eq(code), eq(state))).thenReturn(authUser); // 调用 SocialUserDO result = socialUserService.authSocialUser(socialType, userType, code, state); // 断言 assertBindSocialUser(socialType, result, authUser); assertEquals(code, result.getCode()); assertEquals(state, result.getState()); }
@Override public synchronized ScheduleResult schedule() { dropListenersFromWhenFinishedOrNewLifespansAdded(); int overallSplitAssignmentCount = 0; ImmutableSet.Builder<RemoteTask> overallNewTasks = ImmutableSet.builder(); List<ListenableFuture<?>> overallBlockedFutures = new ArrayList<>(); boolean anyBlockedOnPlacements = false; boolean anyBlockedOnNextSplitBatch = false; boolean anyNotBlocked = false; for (Entry<Lifespan, ScheduleGroup> entry : scheduleGroups.entrySet()) { Lifespan lifespan = entry.getKey(); ScheduleGroup scheduleGroup = entry.getValue(); if (scheduleGroup.state == ScheduleGroupState.NO_MORE_SPLITS || scheduleGroup.state == ScheduleGroupState.DONE) { verify(scheduleGroup.nextSplitBatchFuture == null); } else if (scheduleGroup.pendingSplits.isEmpty()) { // try to get the next batch if (scheduleGroup.nextSplitBatchFuture == null) { scheduleGroup.nextSplitBatchFuture = splitSource.getNextBatch(scheduleGroup.partitionHandle, lifespan, splitBatchSize); long start = System.nanoTime(); addSuccessCallback(scheduleGroup.nextSplitBatchFuture, () -> stage.recordGetSplitTime(start)); } if (scheduleGroup.nextSplitBatchFuture.isDone()) { SplitBatch nextSplits = getFutureValue(scheduleGroup.nextSplitBatchFuture); scheduleGroup.nextSplitBatchFuture = null; scheduleGroup.pendingSplits = new HashSet<>(nextSplits.getSplits()); if (nextSplits.isLastBatch()) { if (scheduleGroup.state == ScheduleGroupState.INITIALIZED && scheduleGroup.pendingSplits.isEmpty()) { // Add an empty split in case no splits have been produced for the source. // For source operators, they never take input, but they may produce output. // This is well handled by Presto execution engine. // However, there are certain non-source operators that may produce output without any input, // for example, 1) an AggregationOperator, 2) a HashAggregationOperator where one of the grouping sets is (). // Scheduling an empty split kicks off necessary driver instantiation to make this work. scheduleGroup.pendingSplits.add(new Split( splitSource.getConnectorId(), splitSource.getTransactionHandle(), new EmptySplit(splitSource.getConnectorId()), lifespan, NON_CACHEABLE)); } scheduleGroup.state = ScheduleGroupState.NO_MORE_SPLITS; } } else { overallBlockedFutures.add(scheduleGroup.nextSplitBatchFuture); anyBlockedOnNextSplitBatch = true; continue; } } Multimap<InternalNode, Split> splitAssignment = ImmutableMultimap.of(); if (!scheduleGroup.pendingSplits.isEmpty()) { if (!scheduleGroup.placementFuture.isDone()) { anyBlockedOnPlacements = true; continue; } if (scheduleGroup.state == ScheduleGroupState.INITIALIZED) { scheduleGroup.state = ScheduleGroupState.SPLITS_ADDED; } if (state == State.INITIALIZED) { state = State.SPLITS_ADDED; } // calculate placements for splits SplitPlacementResult splitPlacementResult = splitPlacementPolicy.computeAssignments(scheduleGroup.pendingSplits); splitAssignment = splitPlacementResult.getAssignments(); // remove splits with successful placements splitAssignment.values().forEach(scheduleGroup.pendingSplits::remove); // AbstractSet.removeAll performs terribly here. overallSplitAssignmentCount += splitAssignment.size(); // if not completed placed, mark scheduleGroup as blocked on placement if (!scheduleGroup.pendingSplits.isEmpty()) { scheduleGroup.placementFuture = splitPlacementResult.getBlocked(); overallBlockedFutures.add(scheduleGroup.placementFuture); anyBlockedOnPlacements = true; } } // if no new splits will be assigned, update state and attach completion event Multimap<InternalNode, Lifespan> noMoreSplitsNotification = ImmutableMultimap.of(); if (scheduleGroup.pendingSplits.isEmpty() && scheduleGroup.state == ScheduleGroupState.NO_MORE_SPLITS) { scheduleGroup.state = ScheduleGroupState.DONE; if (!lifespan.isTaskWide()) { InternalNode node = ((BucketedSplitPlacementPolicy) splitPlacementPolicy).getNodeForBucket(lifespan.getId()); noMoreSplitsNotification = ImmutableMultimap.of(node, lifespan); } } // assign the splits with successful placements overallNewTasks.addAll(assignSplits(splitAssignment, noMoreSplitsNotification)); // Assert that "placement future is not done" implies "pendingSplits is not empty". // The other way around is not true. One obvious reason is (un)lucky timing, where the placement is unblocked between `computeAssignments` and this line. // However, there are other reasons that could lead to this. // Note that `computeAssignments` is quite broken: // 1. It always returns a completed future when there are no tasks, regardless of whether all nodes are blocked. // 2. The returned future will only be completed when a node with an assigned task becomes unblocked. Other nodes don't trigger future completion. // As a result, to avoid busy loops caused by 1, we check pendingSplits.isEmpty() instead of placementFuture.isDone() here. if (scheduleGroup.nextSplitBatchFuture == null && scheduleGroup.pendingSplits.isEmpty() && scheduleGroup.state != ScheduleGroupState.DONE) { anyNotBlocked = true; } } // * `splitSource.isFinished` invocation may fail after `splitSource.close` has been invoked. // If state is NO_MORE_SPLITS/FINISHED, splitSource.isFinished has previously returned true, and splitSource is closed now. // * Even if `splitSource.isFinished()` return true, it is not necessarily safe to tear down the split source. // * If anyBlockedOnNextSplitBatch is true, it means we have not checked out the recently completed nextSplitBatch futures, // which may contain recently published splits. We must not ignore those. // * If any scheduleGroup is still in DISCOVERING_SPLITS state, it means it hasn't realized that there will be no more splits. // Next time it invokes getNextBatch, it will realize that. However, the invocation will fail we tear down splitSource now. // // Since grouped execution is going to support failure recovery, and scheduled splits might have to be rescheduled during retry, // we can no longer claim schedule is complete after all splits are scheduled. // Splits schedule can only be considered as finished when all lifespan executions are done // (by calling `notifyAllLifespansFinishedExecution`) if ((state == State.NO_MORE_SPLITS || state == State.FINISHED) || (!groupedExecution && lifespanAdded && scheduleGroups.isEmpty() && splitSource.isFinished())) { switch (state) { case INITIALIZED: // We have not scheduled a single split so far. // But this shouldn't be possible. See usage of EmptySplit in this method. throw new IllegalStateException("At least 1 split should have been scheduled for this plan node"); case SPLITS_ADDED: state = State.NO_MORE_SPLITS; splitSource.close(); // fall through case NO_MORE_SPLITS: state = State.FINISHED; whenFinishedOrNewLifespanAdded.set(null); // fall through case FINISHED: return ScheduleResult.nonBlocked( true, overallNewTasks.build(), overallSplitAssignmentCount); default: throw new IllegalStateException("Unknown state"); } } if (anyNotBlocked) { return ScheduleResult.nonBlocked(false, overallNewTasks.build(), overallSplitAssignmentCount); } if (anyBlockedOnPlacements) { // In a broadcast join, output buffers of the tasks in build source stage have to // hold onto all data produced before probe side task scheduling finishes, // even if the data is acknowledged by all known consumers. This is because // new consumers may be added until the probe side task scheduling finishes. // // As a result, the following line is necessary to prevent deadlock // due to neither build nor probe can make any progress. // The build side blocks due to a full output buffer. // In the meantime the probe side split cannot be consumed since // builder side hash table construction has not finished. // // TODO: When SourcePartitionedScheduler is used as a SourceScheduler, it shouldn't need to worry about // task scheduling and creation -- these are done by the StageScheduler. overallNewTasks.addAll(finalizeTaskCreationIfNecessary()); } ScheduleResult.BlockedReason blockedReason; if (anyBlockedOnNextSplitBatch) { blockedReason = anyBlockedOnPlacements ? MIXED_SPLIT_QUEUES_FULL_AND_WAITING_FOR_SOURCE : WAITING_FOR_SOURCE; } else { blockedReason = anyBlockedOnPlacements ? SPLIT_QUEUES_FULL : NO_ACTIVE_DRIVER_GROUP; } overallBlockedFutures.add(whenFinishedOrNewLifespanAdded); return ScheduleResult.blocked( false, overallNewTasks.build(), nonCancellationPropagating(whenAnyComplete(overallBlockedFutures)), blockedReason, overallSplitAssignmentCount); }
@Test public void testBalancedSplitAssignment() { // use private node manager so we can add a node later InMemoryNodeManager nodeManager = new InMemoryNodeManager(); nodeManager.addNode(CONNECTOR_ID, new InternalNode("other1", URI.create("http://127.0.0.1:11"), NodeVersion.UNKNOWN, false), new InternalNode("other2", URI.create("http://127.0.0.1:12"), NodeVersion.UNKNOWN, false), new InternalNode("other3", URI.create("http://127.0.0.1:13"), NodeVersion.UNKNOWN, false)); NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService); // Schedule 15 splits - there are 3 nodes, each node should get 5 splits SubPlan firstPlan = createPlan(); SqlStageExecution firstStage = createSqlStageExecution(firstPlan, nodeTaskMap); StageScheduler firstScheduler = getSourcePartitionedScheduler(createFixedSplitSource(15, TestingSplit::createRemoteSplit), firstStage, nodeManager, nodeTaskMap, 200); ScheduleResult scheduleResult = firstScheduler.schedule(); assertEffectivelyFinished(scheduleResult, firstScheduler); assertTrue(scheduleResult.getBlocked().isDone()); assertEquals(scheduleResult.getNewTasks().size(), 3); assertEquals(firstStage.getAllTasks().size(), 3); for (RemoteTask remoteTask : firstStage.getAllTasks()) { PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo(); assertEquals(splitsInfo.getCount(), 5); } // Add new node InternalNode additionalNode = new InternalNode("other4", URI.create("http://127.0.0.1:14"), NodeVersion.UNKNOWN, false); nodeManager.addNode(CONNECTOR_ID, additionalNode); // Schedule 5 splits in another query. Since the new node does not have any splits, all 5 splits are assigned to the new node SubPlan secondPlan = createPlan(); SqlStageExecution secondStage = createSqlStageExecution(secondPlan, nodeTaskMap); StageScheduler secondScheduler = getSourcePartitionedScheduler(createFixedSplitSource(5, TestingSplit::createRemoteSplit), secondStage, nodeManager, nodeTaskMap, 200); scheduleResult = secondScheduler.schedule(); assertEffectivelyFinished(scheduleResult, secondScheduler); assertTrue(scheduleResult.getBlocked().isDone()); assertEquals(scheduleResult.getNewTasks().size(), 1); assertEquals(secondStage.getAllTasks().size(), 1); RemoteTask task = secondStage.getAllTasks().get(0); assertEquals(task.getPartitionedSplitsInfo().getCount(), 5); firstStage.abort(); secondStage.abort(); }
@Override public void unregister(String pluginId) { mainLock.runWithWriteLock( () -> Optional.ofNullable(pluginId) .map(registeredPlugins::remove) .ifPresent(plugin -> { disabledPlugins.remove(pluginId); forQuickIndexes(quickIndex -> quickIndex.removeIfPossible(plugin)); plugin.stop(); })); }
@Test public void testUnregister() { manager.register(new TestTaskAwarePlugin()); manager.unregister(TestTaskAwarePlugin.class.getSimpleName()); Assert.assertFalse(manager.isRegistered(TestTaskAwarePlugin.class.getSimpleName())); manager.register(new TestRejectedAwarePlugin()); manager.unregister(TestRejectedAwarePlugin.class.getSimpleName()); Assert.assertFalse(manager.isRegistered(TestRejectedAwarePlugin.class.getSimpleName())); manager.register(new TestShutdownAwarePlugin()); manager.unregister(TestShutdownAwarePlugin.class.getSimpleName()); Assert.assertFalse(manager.isRegistered(TestShutdownAwarePlugin.class.getSimpleName())); manager.register(new TestExecuteAwarePlugin()); manager.unregister(TestExecuteAwarePlugin.class.getSimpleName()); Assert.assertFalse(manager.isRegistered(TestExecuteAwarePlugin.class.getSimpleName())); }
@Override public void checkAuthorization( final KsqlSecurityContext securityContext, final MetaStore metaStore, final Statement statement ) { if (statement instanceof Query) { validateQuery(securityContext, metaStore, (Query)statement); } else if (statement instanceof InsertInto) { validateInsertInto(securityContext, metaStore, (InsertInto)statement); } else if (statement instanceof CreateAsSelect) { validateCreateAsSelect(securityContext, metaStore, (CreateAsSelect)statement); } else if (statement instanceof PrintTopic) { validatePrintTopic(securityContext, (PrintTopic)statement); } else if (statement instanceof CreateSource) { validateCreateSource(securityContext, (CreateSource)statement); } }
@Test public void shouldThrowWhenCreateAsSelectOnNewTopicWithoutKeySchemaWritePermissions() { // Given: givenSubjectAccessDenied("topic-key", AclOperation.WRITE); final Statement statement = givenStatement(String.format( "CREATE STREAM newStream WITH (kafka_topic='topic', key_format='AVRO') " + "AS SELECT * FROM %s;", KAFKA_STREAM_TOPIC) ); // When: final Exception e = assertThrows( KsqlSchemaAuthorizationException.class, () -> authorizationValidator.checkAuthorization(securityContext, metaStore, statement) ); // Then: assertThat(e.getMessage(), containsString(String.format( "Authorization denied to Write on Schema Registry subject: [topic-key]" ))); }
@Override public int get(int key) { if (key == NO_KEY) { return val0; } final int V = ihc.get(key); assert !isPrime(V); // Never return a Prime return V; }
@Test public void testSingleThread() { final IntHashCounter intMap = new AtomicIntHashCounter(128 * 1024); final AtomicIntegerArray intArray = new AtomicIntegerArray(128 * 1024); final ConcurrentMap<Integer, AtomicInteger> integerMap = new ConcurrentHashMap<>(128 * 1024); mode1(intMap, intArray, integerMap, 1024, 64); mode1(intMap, intArray, integerMap, 256, 256); mode1(intMap, intArray, integerMap, 64, 1024); mode1(intMap, intArray, integerMap, 16, 4 * 1024); mode1(intMap, intArray, integerMap, 4, 16 * 1024); mode1(intMap, intArray, integerMap, 1, 64 * 1024); for (Entry<Integer, AtomicInteger> entry : integerMap.entrySet()) { final int key = entry.getKey(); final AtomicInteger value = entry.getValue(); Assert.assertEquals("intArray", value.intValue(), intArray.get(key)); Assert.assertEquals("intMap", value.intValue(), intMap.get(key)); } }
public static String extractPartsFromURIs(List<String> uris) { // 1st pass on collection: find a common prefix. String commonURIPath = extractCommonPrefix(uris); // 2nd pass on collection: find a common suffix. String commonURIEnd = extractCommonSuffix(uris); // 3rd pass on collection: guess the max number of part. int partsLen = 0; for (String uri : uris) { String parts = uri.substring(commonURIPath.length() + 1); if (commonURIEnd != null) { parts = parts.substring(0, parts.lastIndexOf(commonURIEnd)); } int numOfParts = parts.split("/").length; if (numOfParts > partsLen) { partsLen = numOfParts; } } if (partsLen > 0) { StringBuilder parts = new StringBuilder(); for (int i = 0; i < partsLen; i++) { parts.append("part").append(i + 1); if (i < partsLen - 1) { parts.append(" && "); } } return parts.toString(); } return ""; }
@Test void testExtractPartsFromURIs() { // Prepare a bunch of uri paths. List<String> resourcePaths = new ArrayList<>(); resourcePaths.add("/v2/pet/findByDate/2017"); resourcePaths.add("/v2/pet/findByDate/2016/12"); resourcePaths.add("/v2/pet/findByDate/2016/12/20"); // Dispatch parts in natural order. // Should be deduced to something like "/v2/pet/findByDate/{part1}/{part2}/{part3}" String dispatchCriteria = DispatchCriteriaHelper.extractPartsFromURIs(resourcePaths); assertEquals("part1 && part2 && part3", dispatchCriteria); // 2nd case: variable part is not terminating. resourcePaths = new ArrayList<>(); resourcePaths.add("/v2/pet/1/tattoos"); resourcePaths.add("/v2/pet/23/tattoos"); // Should be deduced to something like "/v2/pet/{part1}/tattoos" dispatchCriteria = DispatchCriteriaHelper.extractPartsFromURIs(resourcePaths); assertEquals("part1", dispatchCriteria); }
public static void hasText(String text, String message) { if (!StringUtil.hasText(text)) { throw new IllegalArgumentException(message); } }
@Test(expected = IllegalArgumentException.class) public void assertHasTextAndMessageIsNull() { Assert.hasText(" "); }
@Override public PostgresConnectorEmbeddedDebeziumConfiguration getConfiguration() { return configuration; }
@Test void testIfConnectorEndpointCreatedWithConfig() throws Exception { final Map<String, Object> params = new HashMap<>(); params.put("offsetStorageFileName", "/offset_test_file"); params.put("databaseHostname", "localhost"); params.put("databaseUser", "dbz"); params.put("databasePassword", "pwd"); params.put("topicPrefix", "test"); params.put("databaseServerId", 1234); params.put("schemaHistoryInternalFileFilename", "/db_history_file_test"); final String remaining = "test_name"; final String uri = "debezium?name=test_name&offsetStorageFileName=/test&" + "databaseHostname=localhost&databaseServerId=1234&databaseUser=dbz&databasePassword=pwd&" + "topicPrefix=test&schemaHistoryInternalFileFilename=/test"; try (final DebeziumComponent debeziumComponent = new DebeziumPostgresComponent(new DefaultCamelContext())) { debeziumComponent.start(); final DebeziumEndpoint debeziumEndpoint = debeziumComponent.createEndpoint(uri, remaining, params); assertNotNull(debeziumEndpoint); // test for config final PostgresConnectorEmbeddedDebeziumConfiguration configuration = (PostgresConnectorEmbeddedDebeziumConfiguration) debeziumEndpoint.getConfiguration(); assertEquals("test_name", configuration.getName()); assertEquals("/offset_test_file", configuration.getOffsetStorageFileName()); assertEquals("localhost", configuration.getDatabaseHostname()); assertEquals("dbz", configuration.getDatabaseUser()); assertEquals("pwd", configuration.getDatabasePassword()); assertEquals("test", configuration.getTopicPrefix()); assertEquals("/db_history_file_test", configuration.getSchemaHistoryInternalFileFilename()); } }
public Translation get(String locale) { locale = locale.replace("-", "_"); Translation tr = translations.get(locale); if (locale.contains("_") && tr == null) tr = translations.get(locale.substring(0, 2)); return tr; }
@Test public void testToRoundaboutString() { Translation ptMap = SINGLETON.get("pt"); assertTrue(ptMap.tr("roundabout_exit_onto", "1", "somestreet").contains("somestreet")); }
public static boolean isBlankChar(char c) { return isBlankChar((int) c); }
@Test public void isBlankCharTest(){ final char a = '\u00A0'; assertTrue(CharUtil.isBlankChar(a)); final char a2 = '\u0020'; assertTrue(CharUtil.isBlankChar(a2)); final char a3 = '\u3000'; assertTrue(CharUtil.isBlankChar(a3)); final char a4 = '\u0000'; assertTrue(CharUtil.isBlankChar(a4)); final char a5 = ' '; assertTrue(CharUtil.isBlankChar(a5)); }
@Override public void validateSmsCode(SmsCodeValidateReqDTO reqDTO) { validateSmsCode0(reqDTO.getMobile(), reqDTO.getCode(), reqDTO.getScene()); }
@Test public void validateSmsCode_success() { // 准备参数 SmsCodeValidateReqDTO reqDTO = randomPojo(SmsCodeValidateReqDTO.class, o -> { o.setMobile("15601691300"); o.setScene(randomEle(SmsSceneEnum.values()).getScene()); }); // mock 数据 SqlConstants.init(DbType.MYSQL); smsCodeMapper.insert(randomPojo(SmsCodeDO.class, o -> o.setMobile(reqDTO.getMobile()) .setScene(reqDTO.getScene()).setCode(reqDTO.getCode()).setUsed(false))); // 调用 smsCodeService.validateSmsCode(reqDTO); }
@NonNull @Override public String getId() { return ID; }
@Test public void getOrganizationsWithInvalidCredentialId() throws IOException, UnirestException { Map r = new RequestBuilder(baseUrl) .crumb(crumb) .status(400) .jwtToken(getJwtToken(j.jenkins, authenticatedUser.getId(), authenticatedUser.getId())) .post("/organizations/jenkins/scm/"+ BitbucketCloudScm.ID+"/organizations/"+getApiUrlParam()+"&credentialId=foo") .build(Map.class); }
@Override public Num calculate(BarSeries series, Position position) { return criterion.calculate(series, position).dividedBy(enterAndHoldCriterion.calculate(series, position)); }
@Test public void calculateOnlyWithLossPositions() { MockBarSeries series = new MockBarSeries(numFunction, 100, 95, 100, 80, 85, 70); TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series), Trade.sellAt(1, series), Trade.buyAt(2, series), Trade.sellAt(5, series)); AnalysisCriterion buyAndHold = getCriterion(new ReturnCriterion()); assertNumEquals(0.95 * 0.7 / 0.7, buyAndHold.calculate(series, tradingRecord)); }
public static int[] sort(int[] array) { int[] order = new int[array.length]; for (int i = 0; i < order.length; i++) { order[i] = i; } sort(array, order); return order; }
@Test public void testSortInt() { System.out.println("sort int"); int[] data1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; int[] order1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; assertArrayEquals(order1, QuickSort.sort(data1)); int[] data2 = {9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; int[] order2 = {9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; assertArrayEquals(order2, QuickSort.sort(data2)); int[] data3 = {0, 1, 2, 3, 5, 4, 6, 7, 8, 9}; int[] order3 = {0, 1, 2, 3, 5, 4, 6, 7, 8, 9}; assertArrayEquals(order3, QuickSort.sort(data3)); int[] data4 = {4, 1, 2, 3, 0, 5, 6, 7, 8, 9}; int[] order4 = {4, 1, 2, 3, 0, 5, 6, 7, 8, 9}; assertArrayEquals(order4, QuickSort.sort(data4)); }
public Object evaluate( final GenericRow row, final Object defaultValue, final ProcessingLogger logger, final Supplier<String> errorMsg ) { try { return expressionEvaluator.evaluate(new Object[]{ spec.resolveArguments(row), defaultValue, logger, row }); } catch (final Exception e) { final Throwable cause = e instanceof InvocationTargetException ? e.getCause() : e; logger.error(RecordProcessingError.recordProcessingError(errorMsg.get(), cause, row)); return defaultValue; } }
@Test public void shouldLogIfEvalThrows() throws Exception { // Given: spec.addParameter( ColumnName.of("foo1"), Integer.class, 0 ); compiledExpression = new CompiledExpression( expressionEvaluator, spec.build(), EXPRESSION_TYPE, expression ); final RuntimeException e = new RuntimeException("Boom"); when(expressionEvaluator.evaluate(any())).thenThrow(new InvocationTargetException(e)); final GenericRow row = genericRow(123); // When: compiledExpression .evaluate(row, DEFAULT_VAL, processingLogger, errorMsgSupplier); // Then: verify(processingLogger).error(RecordProcessingError .recordProcessingError("It went wrong!", e, row)); }
static void unTarUsingJava(File inFile, File untarDir, boolean gzipped) throws IOException { InputStream inputStream = null; TarArchiveInputStream tis = null; try { if (gzipped) { inputStream = new GZIPInputStream(Files.newInputStream(inFile.toPath())); } else { inputStream = Files.newInputStream(inFile.toPath()); } inputStream = new BufferedInputStream(inputStream); tis = new TarArchiveInputStream(inputStream); for (TarArchiveEntry entry = tis.getNextTarEntry(); entry != null;) { unpackEntries(tis, entry, untarDir); entry = tis.getNextTarEntry(); } } finally { IOUtils.cleanupWithLogger(LOG, tis, inputStream); } }
@Test(timeout = 8000) public void testCreateSymbolicLinkUsingJava() throws IOException { final File simpleTar = new File(del, FILE); OutputStream os = new FileOutputStream(simpleTar); try (TarArchiveOutputStream tos = new TarArchiveOutputStream(os)) { // Files to tar final String tmpDir = "tmp/test"; File tmpDir1 = new File(tmpDir, "dir1/"); File tmpDir2 = new File(tmpDir, "dir2/"); Verify.mkdirs(tmpDir1); Verify.mkdirs(tmpDir2); java.nio.file.Path symLink = Paths.get(tmpDir1.getPath(), "sl"); // Create Symbolic Link Files.createSymbolicLink(symLink, Paths.get(tmpDir2.getPath())); assertTrue(Files.isSymbolicLink(symLink.toAbsolutePath())); // Put entries in tar file putEntriesInTar(tos, tmpDir1.getParentFile()); tos.close(); File untarFile = new File(tmpDir, "2"); // Untar using Java FileUtil.unTarUsingJava(simpleTar, untarFile, false); // Check symbolic link and other directories are there in untar file assertTrue(Files.exists(untarFile.toPath())); assertTrue(Files.exists(Paths.get(untarFile.getPath(), tmpDir))); assertTrue(Files.isSymbolicLink(Paths.get(untarFile.getPath(), symLink.toString()))); } finally { FileUtils.deleteDirectory(new File("tmp")); } }
public List<Set<String>> getActualTableNameGroups(final String actualDataSourceName, final Set<String> logicTableNames) { return logicTableNames.stream().map(each -> getActualTableNames(actualDataSourceName, each)).filter(each -> !each.isEmpty()).collect(Collectors.toList()); }
@Test void assertGetActualTableNameGroups() { List<Set<String>> actual = multiRouteContext.getActualTableNameGroups(DATASOURCE_NAME_1, new HashSet<>(Collections.singleton(LOGIC_TABLE))); assertThat(actual.size(), is(1)); assertTrue(actual.get(0).contains(ACTUAL_TABLE)); }
@UdafFactory(description = "Compute sample standard deviation of column with type Integer.", aggregateSchema = "STRUCT<SUM integer, COUNT bigint, M2 double>") public static TableUdaf<Integer, Struct, Double> stdDevInt() { return getStdDevImplementation( 0, STRUCT_INT, (agg, newValue) -> newValue + agg.getInt32(SUM), (agg, newValue) -> Double.valueOf(newValue * (agg.getInt64(COUNT) + 1) - (agg.getInt32(SUM) + newValue)), (agg1, agg2) -> Double.valueOf( agg1.getInt32(SUM) / agg1.getInt64(COUNT) - agg2.getInt32(SUM) / agg2.getInt64(COUNT)), (agg1, agg2) -> agg1.getInt32(SUM) + agg2.getInt32(SUM), (agg, valueToRemove) -> agg.getInt32(SUM) - valueToRemove); }
@Test public void shouldAverageZeroes() { final TableUdaf<Integer, Struct, Double> udaf = stdDevInt(); Struct agg = udaf.initialize(); final int[] values = new int[] {0, 0, 0}; for (final int thisValue : values) { agg = udaf.aggregate(thisValue, agg); } final double standardDev = udaf.map(agg); assertThat(standardDev, equalTo(0.0)); }
@Override public void exists(final String path, final boolean watch, final AsyncCallback.StatCallback cb, final Object ctx) { if (!SymlinkUtil.containsSymlink(path)) { _zk.exists(path, watch, cb, ctx); } else { SymlinkStatCallback compositeCallback = new SymlinkStatCallback(path, _defaultWatcher, cb); exists0(path, watch ? compositeCallback : null, compositeCallback, ctx); } }
@Test public void testInvalidSymlinkWithExists() throws ExecutionException, InterruptedException { final CountDownLatch latch1 = new CountDownLatch(1); final CountDownLatch latch2 = new CountDownLatch(1); final AsyncCallback.StatCallback callback = new AsyncCallback.StatCallback() { @Override public void processResult(int rc, String path, Object ctx, Stat stat) { Assert.assertEquals(path, "/foo/$link"); KeeperException.Code result = KeeperException.Code.get(rc); Assert.assertEquals(result, KeeperException.Code.NONODE); latch1.countDown(); } }; final Watcher watcher = new Watcher() { @Override public void process(WatchedEvent event) { Assert.assertEquals(event.getType(), Event.EventType.NodeDataChanged); latch2.countDown(); } }; FutureCallback<None> fcb = new FutureCallback<>(); _zkClient.setSymlinkData("/foo/$link", "INVALID", fcb); fcb.get(); _zkClient.getZooKeeper().exists("/foo/$link", watcher, callback, null); latch1.await(30, TimeUnit.SECONDS); _zkClient.setSymlinkData("/foo/$link", "/foo/bar", fcb); if (!latch2.await(30, TimeUnit.SECONDS)) { Assert.fail("Exists Watch is not triggered"); } }
@Override public BasicTypeDefine reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.builder() .name(column.getName()) .nullable(column.isNullable()) .comment(column.getComment()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case BOOLEAN: builder.columnType(SQLSERVER_BIT); builder.dataType(SQLSERVER_BIT); break; case TINYINT: builder.columnType(SQLSERVER_TINYINT); builder.dataType(SQLSERVER_TINYINT); break; case SMALLINT: builder.columnType(SQLSERVER_SMALLINT); builder.dataType(SQLSERVER_SMALLINT); break; case INT: builder.columnType(SQLSERVER_INT); builder.dataType(SQLSERVER_INT); break; case BIGINT: builder.columnType(SQLSERVER_BIGINT); builder.dataType(SQLSERVER_BIGINT); break; case FLOAT: builder.columnType(SQLSERVER_REAL); builder.dataType(SQLSERVER_REAL); break; case DOUBLE: builder.columnType(SQLSERVER_FLOAT); builder.dataType(SQLSERVER_FLOAT); break; case DECIMAL: DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = (int) Math.max(0, scale - (precision - MAX_PRECISION)); precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } builder.columnType(String.format("%s(%s,%s)", SQLSERVER_DECIMAL, precision, scale)); builder.dataType(SQLSERVER_DECIMAL); builder.precision(precision); builder.scale(scale); break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(MAX_NVARCHAR); builder.dataType(MAX_NVARCHAR); } else if (column.getColumnLength() <= MAX_NVARCHAR_LENGTH) { builder.columnType( String.format("%s(%s)", SQLSERVER_NVARCHAR, column.getColumnLength())); builder.dataType(SQLSERVER_NVARCHAR); builder.length(column.getColumnLength()); } else { builder.columnType(MAX_NVARCHAR); builder.dataType(MAX_NVARCHAR); builder.length(column.getColumnLength()); } break; case BYTES: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(MAX_VARBINARY); builder.dataType(SQLSERVER_VARBINARY); } else if (column.getColumnLength() <= MAX_BINARY_LENGTH) { builder.columnType( String.format("%s(%s)", SQLSERVER_VARBINARY, column.getColumnLength())); builder.dataType(SQLSERVER_VARBINARY); builder.length(column.getColumnLength()); } else { builder.columnType(MAX_VARBINARY); builder.dataType(SQLSERVER_VARBINARY); builder.length(column.getColumnLength()); } break; case DATE: builder.columnType(SQLSERVER_DATE); builder.dataType(SQLSERVER_DATE); break; case TIME: if (column.getScale() != null && column.getScale() > 0) { int timeScale = column.getScale(); if (timeScale > MAX_TIME_SCALE) { timeScale = MAX_TIME_SCALE; log.warn( "The time column {} type time({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to time({})", column.getName(), column.getScale(), MAX_SCALE, timeScale); } builder.columnType(String.format("%s(%s)", SQLSERVER_TIME, timeScale)); builder.scale(timeScale); } else { builder.columnType(SQLSERVER_TIME); } builder.dataType(SQLSERVER_TIME); break; case TIMESTAMP: if (column.getScale() != null && column.getScale() > 0) { int timestampScale = column.getScale(); if (timestampScale > MAX_TIMESTAMP_SCALE) { timestampScale = MAX_TIMESTAMP_SCALE; log.warn( "The timestamp column {} type timestamp({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to timestamp({})", column.getName(), column.getScale(), MAX_TIMESTAMP_SCALE, timestampScale); } builder.columnType( String.format("%s(%s)", SQLSERVER_DATETIME2, timestampScale)); builder.scale(timestampScale); } else { builder.columnType(SQLSERVER_DATETIME2); } builder.dataType(SQLSERVER_DATETIME2); break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.SQLSERVER, column.getDataType().getSqlType().name(), column.getName()); } return builder.build(); }
@Test public void testReconvertDatetime() { Column column = PhysicalColumn.builder() .name("test") .dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE) .build(); BasicTypeDefine typeDefine = SqlServerTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals( SqlServerTypeConverter.SQLSERVER_DATETIME2, typeDefine.getColumnType()); Assertions.assertEquals( SqlServerTypeConverter.SQLSERVER_DATETIME2, typeDefine.getDataType()); column = PhysicalColumn.builder() .name("test") .dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE) .scale(3) .build(); typeDefine = SqlServerTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals( String.format( "%s(%s)", SqlServerTypeConverter.SQLSERVER_DATETIME2, column.getScale()), typeDefine.getColumnType()); Assertions.assertEquals( SqlServerTypeConverter.SQLSERVER_DATETIME2, typeDefine.getDataType()); Assertions.assertEquals(column.getScale(), typeDefine.getScale()); column = PhysicalColumn.builder() .name("test") .dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE) .scale(9) .build(); typeDefine = SqlServerTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals( String.format("%s(%s)", SqlServerTypeConverter.SQLSERVER_DATETIME2, 7), typeDefine.getColumnType()); Assertions.assertEquals( SqlServerTypeConverter.SQLSERVER_DATETIME2, typeDefine.getDataType()); Assertions.assertEquals(7, typeDefine.getScale()); }
@Override public AwsProxyResponse handle(Throwable ex) { log.error("Called exception handler for:", ex); // adding a print stack trace in case we have no appender or we are running inside SAM local, where need the // output to go to the stderr. ex.printStackTrace(); if (ex instanceof InvalidRequestEventException || ex instanceof InternalServerErrorException) { return new AwsProxyResponse(500, HEADERS, getErrorJson(INTERNAL_SERVER_ERROR)); } else { return new AwsProxyResponse(502, HEADERS, getErrorJson(GATEWAY_TIMEOUT_ERROR)); } }
@Test void streamHandle_InvalidRequestEventException_500State() throws IOException { ByteArrayOutputStream respStream = new ByteArrayOutputStream(); exceptionHandler.handle(new InvalidRequestEventException(INVALID_REQUEST_MESSAGE, null), respStream); assertNotNull(respStream); assertTrue(respStream.size() > 0); AwsProxyResponse resp = objectMapper.readValue(new ByteArrayInputStream(respStream.toByteArray()), AwsProxyResponse.class); assertNotNull(resp); assertEquals(500, resp.getStatusCode()); }
public Optional<MaskTable> findMaskTable(final String tableName) { return Optional.ofNullable(tables.get(tableName)); }
@Test void assertFindMaskTableWhenTableNameDoesNotExist() { assertFalse(maskRule.findMaskTable("non_existent_table").isPresent()); }
@Override public Collection<String> getXADriverClassNames() { return Arrays.asList("com.mysql.jdbc.jdbc2.optional.MysqlXADataSource", "com.mysql.cj.jdbc.MysqlXADataSource"); }
@Test void assertGetXADriverClassName() { assertThat(new MySQLXADataSourceDefinition().getXADriverClassNames(), is(Arrays.asList("com.mysql.jdbc.jdbc2.optional.MysqlXADataSource", "com.mysql.cj.jdbc.MysqlXADataSource"))); }
@Override public String toString() { if (mUriString != null) { return mUriString; } StringBuilder sb = new StringBuilder(); if (mUri.getScheme() != null) { sb.append(mUri.getScheme()); sb.append("://"); } if (hasAuthority()) { if (mUri.getScheme() == null) { sb.append("//"); } sb.append(mUri.getAuthority().toString()); } if (mUri.getPath() != null) { String path = mUri.getPath(); if (path.indexOf('/') == 0 && hasWindowsDrive(path, true) // has windows drive && mUri.getScheme() == null // but no scheme && (mUri.getAuthority() == null || mUri.getAuthority() instanceof NoAuthority)) { // or authority path = path.substring(1); // remove slash before drive } sb.append(path); } if (mUri.getQuery() != null) { sb.append("?"); sb.append(mUri.getQuery()); } mUriString = sb.toString(); return mUriString; }
@Test public void normalizeTests() { assertEquals("/", new AlluxioURI("//").toString()); assertEquals("/foo", new AlluxioURI("/foo/").toString()); assertEquals("/foo", new AlluxioURI("/foo/").toString()); assertEquals("foo", new AlluxioURI("foo/").toString()); assertEquals("foo", new AlluxioURI("foo//").toString()); assertEquals("foo/bar", new AlluxioURI("foo//bar").toString()); assertEquals("foo/boo", new AlluxioURI("foo/bar/..//boo").toString()); assertEquals("foo/boo/baz", new AlluxioURI("foo/bar/..//boo/./baz").toString()); assertEquals("../foo/boo", new AlluxioURI("../foo/bar/..//boo").toString()); assertEquals("/../foo/boo", new AlluxioURI("/.././foo/boo").toString()); assertEquals("foo/boo", new AlluxioURI("./foo/boo").toString()); assertEquals("foo://bar boo:8080/abc/c", new AlluxioURI("foo://bar boo:8080/abc///c").toString()); }
@Override public long getMin() { if (values.length == 0) { return 0; } return values[0]; }
@Test public void calculatesTheMinimumValue() { assertThat(snapshot.getMin()) .isEqualTo(1); }
public List<PrometheusQueryResult> queryMetric(String queryString, long startTimeMs, long endTimeMs) throws IOException { URI queryUri = URI.create(_prometheusEndpoint.toURI() + QUERY_RANGE_API_PATH); HttpPost httpPost = new HttpPost(queryUri); List<NameValuePair> data = new ArrayList<>(); data.add(new BasicNameValuePair(QUERY, queryString)); /* "start" and "end" are expected to be unix timestamp in seconds (number of seconds since the Unix epoch). They accept values with a decimal point (up to 64 bits). The samples returned are inclusive of the "end" timestamp provided. */ data.add(new BasicNameValuePair(START, String.valueOf((double) startTimeMs / SEC_TO_MS))); data.add(new BasicNameValuePair(END, String.valueOf((double) endTimeMs / SEC_TO_MS))); // step is expected to be in seconds, and accept values with a decimal point (up to 64 bits). data.add(new BasicNameValuePair(STEP, String.valueOf((double) _samplingIntervalMs / SEC_TO_MS))); httpPost.setEntity(new UrlEncodedFormEntity(data)); try (CloseableHttpResponse response = _httpClient.execute(httpPost)) { int responseCode = response.getStatusLine().getStatusCode(); HttpEntity entity = response.getEntity(); InputStream content = entity.getContent(); String responseString = IOUtils.toString(content, StandardCharsets.UTF_8); if (responseCode != HttpServletResponse.SC_OK) { throw new IOException(String.format("Received non-success response code on Prometheus API HTTP call," + " response code = %d, response body = %s", responseCode, responseString)); } PrometheusResponse prometheusResponse = GSON.fromJson(responseString, PrometheusResponse.class); if (prometheusResponse == null) { throw new IOException(String.format( "No response received from Prometheus API query, response body = %s", responseString)); } if (!SUCCESS.equals(prometheusResponse.status())) { throw new IOException(String.format( "Prometheus API query was not successful, response body = %s", responseString)); } if (prometheusResponse.data() == null || prometheusResponse.data().result() == null) { throw new IOException(String.format( "Response from Prometheus HTTP API is malformed, response body = %s", responseString)); } EntityUtils.consume(entity); return prometheusResponse.data().result(); } }
@Test(expected = IOException.class) public void testEmptyStatus() throws Exception { this.serverBootstrap.registerHandler("/api/v1/query_range", new HttpRequestHandler() { @Override public void handle(HttpRequest request, HttpResponse response, HttpContext context) { response.setStatusCode(HttpServletResponse.SC_OK); response.setEntity(new StringEntity( "{\"data\":{\"result\": []}}", StandardCharsets.UTF_8)); } }); HttpHost httpHost = this.start(); PrometheusAdapter prometheusAdapter = new PrometheusAdapter(this.httpclient, httpHost, SAMPLING_INTERVAL_MS); prometheusAdapter.queryMetric( "kafka_server_BrokerTopicMetrics_OneMinuteRate{name=\"BytesOutPerSec\",topic=\"\"}", START_TIME_MS, END_TIME_MS); }
public Duration elapsed() { return Duration.between(startTime, isRunning() ? Instant.now() : stopTime); }
@Test public void compareTo() { Duration hour = Duration.ofHours(1); assertTrue(stopwatch.elapsed().compareTo(hour) < 0); assertTrue(hour.compareTo(stopwatch.elapsed()) > 0); }
@Override public Collection<LocalDataQueryResultRow> getRows(final ShowDistVariableStatement sqlStatement, final ContextManager contextManager) { ShardingSphereMetaData metaData = contextManager.getMetaDataContexts().getMetaData(); String variableName = sqlStatement.getName(); if (isConfigurationKey(variableName)) { return Collections.singleton(new LocalDataQueryResultRow(variableName.toLowerCase(), getConfigurationValue(metaData, variableName))); } if (isTemporaryConfigurationKey(variableName)) { return Collections.singleton(new LocalDataQueryResultRow(variableName.toLowerCase(), getTemporaryConfigurationValue(metaData, variableName))); } return Collections.singleton(new LocalDataQueryResultRow(variableName.toLowerCase(), getConnectionSize(variableName))); }
@Test void assertShowPropsVariable() { when(contextManager.getMetaDataContexts().getMetaData().getProps()).thenReturn(new ConfigurationProperties(PropertiesBuilder.build(new Property("sql-show", Boolean.TRUE.toString())))); ShowDistVariableExecutor executor = new ShowDistVariableExecutor(); Collection<LocalDataQueryResultRow> actual = executor.getRows(new ShowDistVariableStatement("SQL_SHOW"), contextManager); assertThat(actual.size(), is(1)); LocalDataQueryResultRow row = actual.iterator().next(); assertThat(row.getCell(1), is("sql_show")); assertThat(row.getCell(2), is("true")); }
@Override public void write(String propertyKey, @Nullable String value) { checkPropertyKey(propertyKey); try (DbSession dbSession = dbClient.openSession(false)) { if (value == null || value.isEmpty()) { dbClient.internalPropertiesDao().saveAsEmpty(dbSession, propertyKey); } else { dbClient.internalPropertiesDao().save(dbSession, propertyKey, value); } dbSession.commit(); } }
@Test public void write_calls_dao_saveAsEmpty_when_value_is_empty() { underTest.write(SOME_KEY, EMPTY_STRING); verify(internalPropertiesDao).saveAsEmpty(dbSession, SOME_KEY); verify(dbSession).commit(); }
@Override public ObjectNode encode(TrafficTreatment treatment, CodecContext context) { checkNotNull(treatment, "Traffic treatment cannot be null"); final ObjectNode result = context.mapper().createObjectNode(); final ArrayNode jsonInstructions = result.putArray(INSTRUCTIONS); final JsonCodec<Instruction> instructionCodec = context.codec(Instruction.class); for (final Instruction instruction : treatment.immediate()) { jsonInstructions.add(instructionCodec.encode(instruction, context)); } if (treatment.metered() != null) { for (Instructions.MeterInstruction instruction : treatment.meters()) { jsonInstructions.add(instructionCodec.encode(instruction, context)); } } if (treatment.tableTransition() != null) { jsonInstructions.add(instructionCodec.encode(treatment.tableTransition(), context)); } if (treatment.clearedDeferred()) { result.put(CLEAR_DEFERRED, true); } final ArrayNode jsonDeferred = result.putArray(DEFERRED); for (final Instruction instruction : treatment.deferred()) { jsonDeferred.add(instructionCodec.encode(instruction, context)); } return result; }
@Test public void testTrafficTreatmentEncode() { Instruction output = Instructions.createOutput(PortNumber.portNumber(0)); Instruction modL2Src = Instructions.modL2Src(MacAddress.valueOf("11:22:33:44:55:66")); Instruction modL2Dst = Instructions.modL2Dst(MacAddress.valueOf("44:55:66:77:88:99")); MeterId meterId = MeterId.meterId(1); Instruction meter = Instructions.meterTraffic(meterId); Instruction transition = Instructions.transition(1); TrafficTreatment.Builder tBuilder = DefaultTrafficTreatment.builder(); TrafficTreatment treatment = tBuilder .add(output) .add(modL2Src) .add(modL2Dst) .add(meter) .add(transition) .build(); ObjectNode treatmentJson = trafficTreatmentCodec.encode(treatment, context); assertThat(treatmentJson, TrafficTreatmentJsonMatcher.matchesTrafficTreatment(treatment)); }
@Override public void onError(QueryException error) { assert error != null; done.compareAndSet(null, error); }
@Test public void when_doneWithErrorWhileWaiting_then_throw_sync() throws Exception { initProducer(false); Future<?> future = spawn(() -> { assertThatThrownBy(() -> iterator.hasNext(1, DAYS)) .hasMessageContaining("mock error"); }); sleepMillis(50); // sleep so that the thread starts blocking in `hasNext` producer.onError(QueryException.error("mock error")); future.get(); }
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) { CharStream input = CharStreams.fromString(source); FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input ); CommonTokenStream tokens = new CommonTokenStream( lexer ); FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens ); ParserHelper parserHelper = new ParserHelper(eventsManager); additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol())); parser.setHelper(parserHelper); parser.setErrorHandler( new FEELErrorHandler() ); parser.removeErrorListeners(); // removes the error listener that prints to the console parser.addErrorListener( new FEELParserErrorListener( eventsManager ) ); // pre-loads the parser with symbols defineVariables( inputVariableTypes, inputVariables, parser ); if (typeRegistry != null) { parserHelper.setTypeRegistry(typeRegistry); } return parser; }
@Test void pathExpression() { String inputExpression = "[ 10, 15 ].size"; BaseNode pathBase = parse( inputExpression ); assertThat( pathBase).isInstanceOf(PathExpressionNode.class); assertThat( pathBase.getText()).isEqualTo(inputExpression); PathExpressionNode pathExpr = (PathExpressionNode) pathBase; assertThat( pathExpr.getExpression()).isInstanceOf(ListNode.class); assertThat( pathExpr.getExpression().getText()).isEqualTo( "10, 15"); assertThat( pathExpr.getName()).isInstanceOf(NameRefNode.class); assertThat( pathExpr.getName().getText()).isEqualTo("size"); }
@Override public Map<String, String> parameters() { return parameters == null ? null : Collections.unmodifiableMap(parameters); }
@Test public void testParameters() { Map<String, String> expectedParameters = new HashMap<>(); expectedParameters.put("foo", "val"); expectedParameters.put("bar", "baz"); Schema schema = SchemaBuilder.string().parameter("foo", "val").parameter("bar", "baz").build(); assertTypeAndDefault(schema, Schema.Type.STRING, false, null); assertMetadata(schema, null, null, null, expectedParameters); schema = SchemaBuilder.string().parameters(expectedParameters).build(); assertTypeAndDefault(schema, Schema.Type.STRING, false, null); assertMetadata(schema, null, null, null, expectedParameters); }
@Override public void refreshLogRetentionSettings() throws IOException { UserGroupInformation user = checkAcls("refreshLogRetentionSettings"); try { loginUGI.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws IOException { aggLogDelService.refreshLogRetentionSettings(); return null; } }); } catch (InterruptedException e) { throw new IOException(e); } HSAuditLogger.logSuccess(user.getShortUserName(), "refreshLogRetentionSettings", "HSAdminServer"); }
@Test public void testRefreshLogRetentionSettings() throws Exception { String[] args = new String[1]; args[0] = "-refreshLogRetentionSettings"; hsAdminClient.run(args); verify(alds).refreshLogRetentionSettings(); }
public void ignoreSynchronous() { pending.get().clear(); }
@Test public void ignoreSynchronous() { var dispatcher = new EventDispatcher<Integer, Integer>(Runnable::run); dispatcher.pending.get().add(CompletableFuture.completedFuture(null)); dispatcher.ignoreSynchronous(); assertThat(dispatcher.pending.get()).isEmpty(); }
public CompletionService<MultiHttpRequestResponse> executeGet(List<String> urls, @Nullable Map<String, String> requestHeaders, int timeoutMs) { List<Pair<String, String>> urlsAndRequestBodies = new ArrayList<>(); urls.forEach(url -> urlsAndRequestBodies.add(Pair.of(url, ""))); return execute(urlsAndRequestBodies, requestHeaders, timeoutMs, "GET", HttpGet::new); }
@Test public void testMultiGet() { List<String> urls = Arrays.asList("http://localhost:" + String.valueOf(_portStart) + URI_PATH, "http://localhost:" + String.valueOf(_portStart + 1) + URI_PATH, "http://localhost:" + String.valueOf(_portStart + 2) + URI_PATH, // 2nd request to the same server "http://localhost:" + String.valueOf(_portStart) + URI_PATH, "http://localhost:" + String.valueOf(_portStart + 3) + URI_PATH); MultiHttpRequest mget = new MultiHttpRequest(Executors.newCachedThreadPool(), new PoolingHttpClientConnectionManager()); // timeout value needs to be less than 5000ms set above for // third server final int requestTimeoutMs = 1000; CompletionService<MultiHttpRequestResponse> completionService = mget.executeGet(urls, null, requestTimeoutMs); TestResult result = collectResult(completionService, urls.size()); Assert.assertEquals(result.getSuccess(), 2); Assert.assertEquals(result.getErrors(), 2); Assert.assertEquals(result.getTimeouts(), 1); }
@Override public void onIssue(Component component, DefaultIssue issue) { if (issue.authorLogin() != null) { return; } loadScmChangesets(component); Optional<String> scmAuthor = guessScmAuthor(issue, component); if (scmAuthor.isPresent()) { if (scmAuthor.get().length() <= IssueDto.AUTHOR_MAX_SIZE) { issueUpdater.setNewAuthor(issue, scmAuthor.get(), changeContext); } else { LOGGER.debug("SCM account '{}' is too long to be stored as issue author", scmAuthor.get()); } } if (issue.assignee() == null) { UserIdDto userId = scmAuthor.map(scmAccountToUser::getNullable).orElse(defaultAssignee.loadDefaultAssigneeUserId()); issueUpdater.setNewAssignee(issue, userId, changeContext); } }
@Test void assign_to_default_assignee_if_no_scm_on_issue_locations() { addScmUser("john", buildUserId("u123", "John C")); Changeset changeset = Changeset.newChangesetBuilder() .setAuthor("john") .setDate(123456789L) .setRevision("rev-1") .build(); scmInfoRepository.setScmInfo(FILE_REF, changeset, changeset); DefaultIssue issue = newIssueOnLines(3); underTest.onIssue(FILE, issue); assertThat(issue.assignee()).isEqualTo("u123"); assertThat(issue.assigneeLogin()).isEqualTo("John C"); }