focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static String getPath(ApplicationId id) { return getPath(id, false); }
@Test void testGetPathApplicationIdString() { assertEquals("/proxy/application_6384623_0005", ProxyUriUtils.getPath(BuilderUtils.newApplicationId(6384623l, 5), null)); assertEquals("/proxy/application_6384623_0005/static/app", ProxyUriUtils.getPath(BuilderUtils.newApplicationId(6384623l, 5), "/static/app")); assertEquals("/proxy/application_6384623_0005/", ProxyUriUtils.getPath(BuilderUtils.newApplicationId(6384623l, 5), "/")); assertEquals("/proxy/application_6384623_0005/some/path", ProxyUriUtils.getPath(BuilderUtils.newApplicationId(6384623l, 5), "some/path")); }
public synchronized <K, V> KStream<K, V> stream(final String topic) { return stream(Collections.singleton(topic)); }
@Test public void shouldThrowWhenSubscribedToATopicWithSetAndUnsetResetPolicies() { builder.stream("topic", Consumed.with(AutoOffsetReset.EARLIEST)); builder.stream("topic"); assertThrows(TopologyException.class, builder::build); }
@GET @Path("/{connector}/tasks-config") @Operation(deprecated = true, summary = "Get the configuration of all tasks for the specified connector") public Map<ConnectorTaskId, Map<String, String>> getTasksConfig( final @PathParam("connector") String connector) throws Throwable { log.warn("The 'GET /connectors/{connector}/tasks-config' endpoint is deprecated and will be removed in the next major release. " + "Please use the 'GET /connectors/{connector}/tasks' endpoint instead."); FutureCallback<Map<ConnectorTaskId, Map<String, String>>> cb = new FutureCallback<>(); herder.tasksConfig(connector, cb); return requestHandler.completeRequest(cb); }
@Test public void testGetTasksConfig() throws Throwable { final ConnectorTaskId connectorTask0 = new ConnectorTaskId(CONNECTOR_NAME, 0); final Map<String, String> connectorTask0Configs = new HashMap<>(); connectorTask0Configs.put("connector-task0-config0", "123"); connectorTask0Configs.put("connector-task0-config1", "456"); final ConnectorTaskId connectorTask1 = new ConnectorTaskId(CONNECTOR_NAME, 1); final Map<String, String> connectorTask1Configs = new HashMap<>(); connectorTask0Configs.put("connector-task1-config0", "321"); connectorTask0Configs.put("connector-task1-config1", "654"); final ConnectorTaskId connector2Task0 = new ConnectorTaskId(CONNECTOR2_NAME, 0); final Map<String, String> connector2Task0Configs = Collections.singletonMap("connector2-task0-config0", "789"); final Map<ConnectorTaskId, Map<String, String>> expectedTasksConnector = new HashMap<>(); expectedTasksConnector.put(connectorTask0, connectorTask0Configs); expectedTasksConnector.put(connectorTask1, connectorTask1Configs); final Map<ConnectorTaskId, Map<String, String>> expectedTasksConnector2 = new HashMap<>(); expectedTasksConnector2.put(connector2Task0, connector2Task0Configs); final ArgumentCaptor<Callback<Map<ConnectorTaskId, Map<String, String>>>> cb1 = ArgumentCaptor.forClass(Callback.class); expectAndCallbackResult(cb1, expectedTasksConnector).when(herder).tasksConfig(eq(CONNECTOR_NAME), cb1.capture()); final ArgumentCaptor<Callback<Map<ConnectorTaskId, Map<String, String>>>> cb2 = ArgumentCaptor.forClass(Callback.class); expectAndCallbackResult(cb2, expectedTasksConnector2).when(herder).tasksConfig(eq(CONNECTOR2_NAME), cb2.capture()); Map<ConnectorTaskId, Map<String, String>> tasksConfig = connectorsResource.getTasksConfig(CONNECTOR_NAME); assertEquals(expectedTasksConnector, tasksConfig); Map<ConnectorTaskId, Map<String, String>> tasksConfig2 = connectorsResource.getTasksConfig(CONNECTOR2_NAME); assertEquals(expectedTasksConnector2, tasksConfig2); }
@Override public void start() { this.all = registry.meter(name(getName(), "all")); this.trace = registry.meter(name(getName(), "trace")); this.debug = registry.meter(name(getName(), "debug")); this.info = registry.meter(name(getName(), "info")); this.warn = registry.meter(name(getName(), "warn")); this.error = registry.meter(name(getName(), "error")); super.start(); }
@Test public void usesRegistryFromProperty() { SharedMetricRegistries.add("something_else", registry); System.setProperty(InstrumentedAppender.REGISTRY_PROPERTY_NAME, "something_else"); final InstrumentedAppender shared = new InstrumentedAppender(); shared.start(); when(event.getLevel()).thenReturn(Level.INFO); shared.doAppend(event); assertThat(SharedMetricRegistries.names()).contains("something_else"); assertThat(registry.meter(METRIC_NAME_PREFIX + ".info").getCount()) .isEqualTo(1); }
@Udf public String rpad( @UdfParameter(description = "String to be padded") final String input, @UdfParameter(description = "Target length") final Integer targetLen, @UdfParameter(description = "Padding string") final String padding) { if (input == null) { return null; } if (padding == null || padding.isEmpty() || targetLen == null || targetLen < 0) { return null; } final StringBuilder sb = new StringBuilder(targetLen + padding.length()); sb.append(input); final int padChars = Math.max(targetLen - input.length(), 0); for (int i = 0; i < padChars; i += padding.length()) { sb.append(padding); } sb.setLength(targetLen); return sb.toString(); }
@Test public void shouldReturnNullForNegativeLengthString() { final String result = udf.rpad("foo", -1, "bar"); assertThat(result, is(nullValue())); }
public static <T, K> AggregateOperation1<T, Map<K, List<T>>, Map<K, List<T>>> groupingBy( FunctionEx<? super T, ? extends K> keyFn ) { checkSerializable(keyFn, "keyFn"); return groupingBy(keyFn, toList()); }
@Test public void when_groupingBy_withSameKey() { Entry<String, Integer> entryA = entry("a", 1); validateOpWithoutDeduct( groupingBy(entryKey()), identity(), entryA, entryA, asMap("a", singletonList(entryA)), asMap("a", asList(entryA, entryA)), asMap("a", asList(entryA, entryA)) ); }
public static Optional<DynamicTableSink> getDynamicTableSink( ContextResolvedTable contextResolvedTable, LogicalTableModify tableModify, CatalogManager catalogManager) { final FlinkContext context = ShortcutUtils.unwrapContext(tableModify.getCluster()); CatalogBaseTable catalogBaseTable = contextResolvedTable.getTable(); // only consider DynamicTableSink if (catalogBaseTable instanceof CatalogTable) { ResolvedCatalogTable resolvedTable = contextResolvedTable.getResolvedTable(); Optional<Catalog> optionalCatalog = contextResolvedTable.getCatalog(); ObjectIdentifier objectIdentifier = contextResolvedTable.getIdentifier(); boolean isTemporary = contextResolvedTable.isTemporary(); // only consider the CatalogTable that doesn't use legacy connector sink option if (!contextResolvedTable.isAnonymous() && !TableFactoryUtil.isLegacyConnectorOptions( catalogManager .getCatalog(objectIdentifier.getCatalogName()) .orElse(null), context.getTableConfig(), !context.isBatchMode(), objectIdentifier, resolvedTable, isTemporary)) { // create table dynamic table sink DynamicTableSink tableSink = ExecutableOperationUtils.createDynamicTableSink( optionalCatalog.orElse(null), () -> context.getModuleManager() .getFactory((Module::getTableSinkFactory)), objectIdentifier, resolvedTable, Collections.emptyMap(), context.getTableConfig(), context.getClassLoader(), contextResolvedTable.isTemporary()); return Optional.of(tableSink); } } return Optional.empty(); }
@Test public void testGetDynamicTableSink() { // create a table with connector = test-update-delete Map<String, String> options = new HashMap<>(); options.put("connector", "test-update-delete"); CatalogTable catalogTable = createTestCatalogTable(options); ObjectIdentifier tableId = ObjectIdentifier.of("builtin", "default", "t"); catalogManager.createTable(catalogTable, tableId, false); ContextResolvedTable resolvedTable = ContextResolvedTable.permanent( tableId, catalog, catalogManager.resolveCatalogTable(catalogTable)); LogicalTableModify tableModify = getTableModifyFromSql("DELETE FROM t"); Optional<DynamicTableSink> optionalDynamicTableSink = DeletePushDownUtils.getDynamicTableSink(resolvedTable, tableModify, catalogManager); // verify we can get the dynamic table sink assertThat(optionalDynamicTableSink).isPresent(); assertThat(optionalDynamicTableSink.get()) .isInstanceOf(TestUpdateDeleteTableFactory.SupportsDeletePushDownSink.class); // create table with connector = COLLECTION, it's legacy table sink options.put("connector", "COLLECTION"); catalogTable = createTestCatalogTable(options); tableId = ObjectIdentifier.of("builtin", "default", "t1"); catalogManager.createTable(catalogTable, tableId, false); resolvedTable = ContextResolvedTable.permanent( tableId, catalog, catalogManager.resolveCatalogTable(catalogTable)); tableModify = getTableModifyFromSql("DELETE FROM t1"); optionalDynamicTableSink = DeletePushDownUtils.getDynamicTableSink(resolvedTable, tableModify, catalogManager); // verify it should be empty since it's not an instance of DynamicTableSink but is legacy // TableSink assertThat(optionalDynamicTableSink).isEmpty(); }
public static FunctionConfig validateUpdate(FunctionConfig existingConfig, FunctionConfig newConfig) { FunctionConfig mergedConfig = existingConfig.toBuilder().build(); if (!existingConfig.getTenant().equals(newConfig.getTenant())) { throw new IllegalArgumentException("Tenants differ"); } if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) { throw new IllegalArgumentException("Namespaces differ"); } if (!existingConfig.getName().equals(newConfig.getName())) { throw new IllegalArgumentException("Function Names differ"); } if (!StringUtils.isEmpty(newConfig.getClassName())) { mergedConfig.setClassName(newConfig.getClassName()); } if (!StringUtils.isEmpty(newConfig.getJar())) { mergedConfig.setJar(newConfig.getJar()); } if (newConfig.getInputSpecs() == null) { newConfig.setInputSpecs(new HashMap<>()); } if (mergedConfig.getInputSpecs() == null) { mergedConfig.setInputSpecs(new HashMap<>()); } if (newConfig.getInputs() != null) { newConfig.getInputs().forEach((topicName -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder().isRegexPattern(false).build()); })); } if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) { newConfig.getInputSpecs().put(newConfig.getTopicsPattern(), ConsumerConfig.builder() .isRegexPattern(true) .build()); } if (newConfig.getCustomSerdeInputs() != null) { newConfig.getCustomSerdeInputs().forEach((topicName, serdeClassName) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .serdeClassName(serdeClassName) .isRegexPattern(false) .build()); }); } if (newConfig.getCustomSchemaInputs() != null) { newConfig.getCustomSchemaInputs().forEach((topicName, schemaClassname) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .schemaType(schemaClassname) .isRegexPattern(false) .build()); }); } if (!newConfig.getInputSpecs().isEmpty()) { newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> { if (!existingConfig.getInputSpecs().containsKey(topicName)) { throw new IllegalArgumentException("Input Topics cannot be altered"); } if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) { throw new IllegalArgumentException( "isRegexPattern for input topic " + topicName + " cannot be altered"); } mergedConfig.getInputSpecs().put(topicName, consumerConfig); }); } if (!StringUtils.isEmpty(newConfig.getOutputSerdeClassName()) && !newConfig.getOutputSerdeClassName() .equals(existingConfig.getOutputSerdeClassName())) { throw new IllegalArgumentException("Output Serde mismatch"); } if (!StringUtils.isEmpty(newConfig.getOutputSchemaType()) && !newConfig.getOutputSchemaType() .equals(existingConfig.getOutputSchemaType())) { throw new IllegalArgumentException("Output Schema mismatch"); } if (!StringUtils.isEmpty(newConfig.getLogTopic())) { mergedConfig.setLogTopic(newConfig.getLogTopic()); } if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees() .equals(existingConfig.getProcessingGuarantees())) { throw new IllegalArgumentException("Processing Guarantees cannot be altered"); } if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering() .equals(existingConfig.getRetainOrdering())) { throw new IllegalArgumentException("Retain Ordering cannot be altered"); } if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering() .equals(existingConfig.getRetainKeyOrdering())) { throw new IllegalArgumentException("Retain Key Ordering cannot be altered"); } if (!StringUtils.isEmpty(newConfig.getOutput())) { mergedConfig.setOutput(newConfig.getOutput()); } if (newConfig.getUserConfig() != null) { mergedConfig.setUserConfig(newConfig.getUserConfig()); } if (newConfig.getSecrets() != null) { mergedConfig.setSecrets(newConfig.getSecrets()); } if (newConfig.getRuntime() != null && !newConfig.getRuntime().equals(existingConfig.getRuntime())) { throw new IllegalArgumentException("Runtime cannot be altered"); } if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) { throw new IllegalArgumentException("AutoAck cannot be altered"); } if (newConfig.getMaxMessageRetries() != null) { mergedConfig.setMaxMessageRetries(newConfig.getMaxMessageRetries()); } if (!StringUtils.isEmpty(newConfig.getDeadLetterTopic())) { mergedConfig.setDeadLetterTopic(newConfig.getDeadLetterTopic()); } if (!StringUtils.isEmpty(newConfig.getSubName()) && !newConfig.getSubName() .equals(existingConfig.getSubName())) { throw new IllegalArgumentException("Subscription Name cannot be altered"); } if (newConfig.getParallelism() != null) { mergedConfig.setParallelism(newConfig.getParallelism()); } if (newConfig.getResources() != null) { mergedConfig .setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources())); } if (newConfig.getWindowConfig() != null) { mergedConfig.setWindowConfig(newConfig.getWindowConfig()); } if (newConfig.getTimeoutMs() != null) { mergedConfig.setTimeoutMs(newConfig.getTimeoutMs()); } if (newConfig.getCleanupSubscription() != null) { mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription()); } if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) { mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags()); } if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) { mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions()); } if (newConfig.getProducerConfig() != null) { mergedConfig.setProducerConfig(newConfig.getProducerConfig()); } return mergedConfig; }
@Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Retain Key Ordering cannot be altered") public void testMergeDifferentRetainKeyOrdering() { FunctionConfig functionConfig = createFunctionConfig(); FunctionConfig newFunctionConfig = createUpdatedFunctionConfig("retainKeyOrdering", true); FunctionConfigUtils.validateUpdate(functionConfig, newFunctionConfig); }
@Override public Long createNotifyTemplate(NotifyTemplateSaveReqVO createReqVO) { // 校验站内信编码是否重复 validateNotifyTemplateCodeDuplicate(null, createReqVO.getCode()); // 插入 NotifyTemplateDO notifyTemplate = BeanUtils.toBean(createReqVO, NotifyTemplateDO.class); notifyTemplate.setParams(parseTemplateContentParams(notifyTemplate.getContent())); notifyTemplateMapper.insert(notifyTemplate); return notifyTemplate.getId(); }
@Test public void testCreateNotifyTemplate_success() { // 准备参数 NotifyTemplateSaveReqVO reqVO = randomPojo(NotifyTemplateSaveReqVO.class, o -> o.setStatus(randomCommonStatus())) .setId(null); // 防止 id 被赋值 // 调用 Long notifyTemplateId = notifyTemplateService.createNotifyTemplate(reqVO); // 断言 assertNotNull(notifyTemplateId); // 校验记录的属性是否正确 NotifyTemplateDO notifyTemplate = notifyTemplateMapper.selectById(notifyTemplateId); assertPojoEquals(reqVO, notifyTemplate, "id"); }
@Override public Predicate<FileInfo> get() { long currentTimeMS = System.currentTimeMillis(); Interval interval = Interval.between(currentTimeMS, currentTimeMS + 1); return FileInfo -> { try { return interval.intersect(mInterval.add(mGetter.apply(FileInfo))).isValid(); } catch (RuntimeException e) { LOG.debug("Failed to filter: ", e); return false; } }; }
@Test public void testTimePredicateFactories() { FileFilter filter = FileFilter.newBuilder().setName("unmodifiedFor").setValue("1s").build(); long timestamp = System.currentTimeMillis(); FileInfo info = new FileInfo(); info.setLastModificationTimeMs(timestamp); assertFalse(FilePredicate.create(filter).get().test(info)); info.setLastModificationTimeMs(timestamp - 1000); assertTrue(FilePredicate.create(filter).get().test(info)); }
public FEELFnResult<List> invoke(@ParameterName( "list" ) Object list) { if ( list == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null")); } // spec requires us to return a new list final List<Object> result = new ArrayList<>(); flattenList( list, result ); return FEELFnResult.ofResult( result ); }
@Test void invokeNull() { FunctionTestUtil.assertResultError(flattenFunction.invoke(null), InvalidParametersEvent.class); }
@Override public DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options) { // Partition the requested config resources based on which broker they must be sent to with the // null broker being used for config resources which can be obtained from any broker final Map<Integer, Map<ConfigResource, KafkaFutureImpl<Config>>> nodeFutures = new HashMap<>(configResources.size()); for (ConfigResource resource : configResources) { Integer broker = nodeFor(resource); nodeFutures.compute(broker, (key, value) -> { if (value == null) { value = new HashMap<>(); } value.put(resource, new KafkaFutureImpl<>()); return value; }); } final long now = time.milliseconds(); for (Map.Entry<Integer, Map<ConfigResource, KafkaFutureImpl<Config>>> entry : nodeFutures.entrySet()) { final Integer node = entry.getKey(); Map<ConfigResource, KafkaFutureImpl<Config>> unified = entry.getValue(); runnable.call(new Call("describeConfigs", calcDeadlineMs(now, options.timeoutMs()), node != null ? new ConstantNodeIdProvider(node, true) : new LeastLoadedBrokerOrActiveKController()) { @Override DescribeConfigsRequest.Builder createRequest(int timeoutMs) { return new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData() .setResources(unified.keySet().stream() .map(config -> new DescribeConfigsRequestData.DescribeConfigsResource() .setResourceName(config.name()) .setResourceType(config.type().id()) .setConfigurationKeys(null)) .collect(Collectors.toList())) .setIncludeSynonyms(options.includeSynonyms()) .setIncludeDocumentation(options.includeDocumentation())); } @Override void handleResponse(AbstractResponse abstractResponse) { DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse; for (Map.Entry<ConfigResource, DescribeConfigsResponseData.DescribeConfigsResult> entry : response.resultMap().entrySet()) { ConfigResource configResource = entry.getKey(); DescribeConfigsResponseData.DescribeConfigsResult describeConfigsResult = entry.getValue(); KafkaFutureImpl<Config> future = unified.get(configResource); if (future == null) { if (node != null) { log.warn("The config {} in the response from node {} is not in the request", configResource, node); } else { log.warn("The config {} in the response from the least loaded broker is not in the request", configResource); } } else { if (describeConfigsResult.errorCode() != Errors.NONE.code()) { future.completeExceptionally(Errors.forCode(describeConfigsResult.errorCode()) .exception(describeConfigsResult.errorMessage())); } else { future.complete(describeConfigResult(describeConfigsResult)); } } } completeUnrealizedFutures( unified.entrySet().stream(), configResource -> "The node response did not contain a result for config resource " + configResource); } @Override void handleFailure(Throwable throwable) { completeAllExceptionally(unified.values(), throwable); } }, now); } return new DescribeConfigsResult( nodeFutures.entrySet() .stream() .flatMap(x -> x.getValue().entrySet().stream()) .collect(Collectors.toMap( Map.Entry::getKey, Map.Entry::getValue, (oldValue, newValue) -> { // Duplicate keys should not occur, throw an exception to signal this issue throw new IllegalStateException(String.format("Duplicate key for values: %s and %s", oldValue, newValue)); }, HashMap::new )) ); }
@Test public void testDescribeClientMetricsConfigs() throws Exception { ConfigResource resource = new ConfigResource(ConfigResource.Type.CLIENT_METRICS, "sub1"); ConfigResource resource1 = new ConfigResource(ConfigResource.Type.CLIENT_METRICS, "sub2"); try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(new DescribeConfigsResponse( new DescribeConfigsResponseData().setResults(asList( new DescribeConfigsResponseData.DescribeConfigsResult() .setResourceName(resource.name()).setResourceType(resource.type().id()).setErrorCode(Errors.NONE.code()) .setConfigs(emptyList()), new DescribeConfigsResponseData.DescribeConfigsResult() .setResourceName(resource1.name()).setResourceType(resource1.type().id()).setErrorCode(Errors.NONE.code()) .setConfigs(emptyList()))))); Map<ConfigResource, KafkaFuture<Config>> result = env.adminClient().describeConfigs(asList( resource, resource1)).values(); assertEquals(new HashSet<>(asList(resource, resource1)), result.keySet()); assertNotNull(result.get(resource).get()); assertNotNull(result.get(resource1).get()); } }
@Override public void subscribe(Subscriber<? super T>[] subscribers) { if (!validate(subscribers)) { return; } @SuppressWarnings("unchecked") Subscriber<? super T>[] newSubscribers = new Subscriber[subscribers.length]; for (int i = 0; i < subscribers.length; i++) { AutoDisposingSubscriberImpl<? super T> subscriber = new AutoDisposingSubscriberImpl<>(scope, subscribers[i]); newSubscribers[i] = subscriber; } source.subscribe(newSubscribers); }
@Test public void autoDispose_withProvider() { TestSubscriber<Integer> firstSubscriber = new TestSubscriber<>(); TestSubscriber<Integer> secondSubscriber = new TestSubscriber<>(); PublishProcessor<Integer> source = PublishProcessor.create(); CompletableSubject scope = CompletableSubject.create(); ScopeProvider provider = TestUtil.makeProvider(scope); //noinspection unchecked Subscriber<Integer>[] subscribers = new Subscriber[] {firstSubscriber, secondSubscriber}; source.parallel(DEFAULT_PARALLELISM).to(autoDisposable(provider)).subscribe(subscribers); assertThat(firstSubscriber.hasSubscription()).isTrue(); assertThat(secondSubscriber.hasSubscription()).isTrue(); assertThat(source.hasSubscribers()).isTrue(); assertThat(scope.hasObservers()).isTrue(); source.onNext(1); source.onNext(2); firstSubscriber.assertValue(1); secondSubscriber.assertValue(2); source.onNext(3); source.onNext(4); assertThat(source.hasSubscribers()).isTrue(); assertThat(scope.hasObservers()).isTrue(); firstSubscriber.assertValues(1, 3); secondSubscriber.assertValues(2, 4); scope.onComplete(); source.onNext(5); source.onNext(6); firstSubscriber.assertValues(1, 3); secondSubscriber.assertValues(2, 4); assertThat(source.hasSubscribers()).isFalse(); assertThat(scope.hasObservers()).isFalse(); }
@SuppressWarnings("unchecked") @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement ) { try { if (statement.getStatement() instanceof CreateAsSelect) { registerForCreateAs((ConfiguredStatement<? extends CreateAsSelect>) statement); } else if (statement.getStatement() instanceof CreateSource) { registerForCreateSource((ConfiguredStatement<? extends CreateSource>) statement); } } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { throw new KsqlStatementException( ErrorMessageUtil.buildErrorMessage(e), statement.getMaskedStatementText(), e.getCause()); } // Remove schema id from SessionConfig return stripSchemaIdConfig(statement); }
@Test public void shouldThrowInconsistentValueSchemaTypeExceptionWithOverrideSchema() { // Given: final SchemaAndId schemaAndId = SchemaAndId.schemaAndId(SCHEMA.value(), AVRO_SCHEMA, 1); givenStatement("CREATE STREAM source (id int key, f1 varchar) " + "WITH (" + "kafka_topic='expectedName', " + "key_format='PROTOBUF', " + "value_format='PROTOBUF', " + "value_schema_id=1, " + "partitions=1" + ");", Pair.of(null, schemaAndId)); // When: final Exception e = assertThrows( KsqlStatementException.class, () -> injector.inject(statement) ); // Then: assertThat(e.getMessage(), containsString("Format and fetched schema type using " + "VALUE_SCHEMA_ID 1 are different. Format: [PROTOBUF], Fetched schema type: [AVRO].")); }
public Certificate getCertificate(Long id) { Optional<Certificate> cert = certificateRepository.findById(id); if (!cert.isPresent()) { throw new NotFoundException("Could not find certificate with id: " + id); } return cert.get(); }
@Test public void certificateNotFound() { Optional<Certificate> certificateOptional = Optional.empty(); when(certificateRepositoryMock.findById(anyLong())).thenReturn(certificateOptional); assertThrows(NotFoundException.class, () -> { certificateServiceMock.getCertificate(anyLong()); }); }
public static byte[] bytes(ByteBuffer buf) { byte[] d = new byte[buf.limit()]; buf.get(d); return d; }
@Test public void testBytes() { byte[] array = new byte[10]; ByteBuffer buffer = ByteBuffer.wrap(array); byte[] bytes = Utils.bytes(buffer); assertThat(bytes, is(array)); }
public void createIndex(DBObject keys, DBObject options) { delegate.createIndex(new BasicDBObject(keys.toMap()), toIndexOptions(options)); }
@Test void createIndexWithOptions() { final var collection = jacksonCollection("simple", Simple.class); collection.createIndex(new BasicDBObject("name", 1), new BasicDBObject("sparse", true).append("unique", true)); assertThat(mongoCollection("simple").listIndexes()).containsExactlyInAnyOrder( new Document("key", new Document("_id", 1)) .append("name", "_id_") .append("v", 2), new Document("key", new Document("name", 1)) .append("name", "name_1") .append("sparse", true) .append("unique", true) .append("v", 2) ); }
@Override public void request(Payload grpcRequest, StreamObserver<Payload> responseObserver) { traceIfNecessary(grpcRequest, true); String type = grpcRequest.getMetadata().getType(); long startTime = System.nanoTime(); //server is on starting. if (!ApplicationUtils.isStarted()) { Payload payloadResponse = GrpcUtils.convert( ErrorResponse.build(NacosException.INVALID_SERVER_STATUS, "Server is starting,please try later.")); traceIfNecessary(payloadResponse, false); responseObserver.onNext(payloadResponse); responseObserver.onCompleted(); MetricsMonitor.recordGrpcRequestEvent(type, false, NacosException.INVALID_SERVER_STATUS, null, null, System.nanoTime() - startTime); return; } // server check. if (ServerCheckRequest.class.getSimpleName().equals(type)) { Payload serverCheckResponseP = GrpcUtils.convert(new ServerCheckResponse(GrpcServerConstants.CONTEXT_KEY_CONN_ID.get(), true)); traceIfNecessary(serverCheckResponseP, false); responseObserver.onNext(serverCheckResponseP); responseObserver.onCompleted(); MetricsMonitor.recordGrpcRequestEvent(type, true, 0, null, null, System.nanoTime() - startTime); return; } RequestHandler requestHandler = requestHandlerRegistry.getByRequestType(type); //no handler found. if (requestHandler == null) { Loggers.REMOTE_DIGEST.warn(String.format("[%s] No handler for request type : %s :", "grpc", type)); Payload payloadResponse = GrpcUtils .convert(ErrorResponse.build(NacosException.NO_HANDLER, "RequestHandler Not Found")); traceIfNecessary(payloadResponse, false); responseObserver.onNext(payloadResponse); responseObserver.onCompleted(); MetricsMonitor.recordGrpcRequestEvent(type, false, NacosException.NO_HANDLER, null, null, System.nanoTime() - startTime); return; } //check connection status. String connectionId = GrpcServerConstants.CONTEXT_KEY_CONN_ID.get(); boolean requestValid = connectionManager.checkValid(connectionId); if (!requestValid) { Loggers.REMOTE_DIGEST .warn("[{}] Invalid connection Id ,connection [{}] is un registered ,", "grpc", connectionId); Payload payloadResponse = GrpcUtils .convert(ErrorResponse.build(NacosException.UN_REGISTER, "Connection is unregistered.")); traceIfNecessary(payloadResponse, false); responseObserver.onNext(payloadResponse); responseObserver.onCompleted(); MetricsMonitor.recordGrpcRequestEvent(type, false, NacosException.UN_REGISTER, null, null, System.nanoTime() - startTime); return; } Object parseObj = null; try { parseObj = GrpcUtils.parse(grpcRequest); } catch (Exception e) { Loggers.REMOTE_DIGEST .warn("[{}] Invalid request receive from connection [{}] ,error={}", "grpc", connectionId, e); Payload payloadResponse = GrpcUtils.convert(ErrorResponse.build(NacosException.BAD_GATEWAY, e.getMessage())); traceIfNecessary(payloadResponse, false); responseObserver.onNext(payloadResponse); responseObserver.onCompleted(); MetricsMonitor.recordGrpcRequestEvent(type, false, NacosException.BAD_GATEWAY, e.getClass().getSimpleName(), null, System.nanoTime() - startTime); return; } if (parseObj == null) { Loggers.REMOTE_DIGEST.warn("[{}] Invalid request receive ,parse request is null", connectionId); Payload payloadResponse = GrpcUtils .convert(ErrorResponse.build(NacosException.BAD_GATEWAY, "Invalid request")); traceIfNecessary(payloadResponse, false); responseObserver.onNext(payloadResponse); responseObserver.onCompleted(); MetricsMonitor.recordGrpcRequestEvent(type, false, NacosException.BAD_GATEWAY, null, null, System.nanoTime() - startTime); return; } if (!(parseObj instanceof Request)) { Loggers.REMOTE_DIGEST .warn("[{}] Invalid request receive ,parsed payload is not a request,parseObj={}", connectionId, parseObj); Payload payloadResponse = GrpcUtils .convert(ErrorResponse.build(NacosException.BAD_GATEWAY, "Invalid request")); traceIfNecessary(payloadResponse, false); responseObserver.onNext(payloadResponse); responseObserver.onCompleted(); MetricsMonitor.recordGrpcRequestEvent(type, false, NacosException.BAD_GATEWAY, null, null, System.nanoTime() - startTime); return; } Request request = (Request) parseObj; try { Connection connection = connectionManager.getConnection(GrpcServerConstants.CONTEXT_KEY_CONN_ID.get()); RequestMeta requestMeta = new RequestMeta(); requestMeta.setClientIp(connection.getMetaInfo().getClientIp()); requestMeta.setConnectionId(GrpcServerConstants.CONTEXT_KEY_CONN_ID.get()); requestMeta.setClientVersion(connection.getMetaInfo().getVersion()); requestMeta.setLabels(connection.getMetaInfo().getLabels()); requestMeta.setAbilityTable(connection.getAbilityTable()); connectionManager.refreshActiveTime(requestMeta.getConnectionId()); prepareRequestContext(request, requestMeta, connection); Response response = requestHandler.handleRequest(request, requestMeta); Payload payloadResponse = GrpcUtils.convert(response); traceIfNecessary(payloadResponse, false); if (response.getErrorCode() == NacosException.OVER_THRESHOLD) { RpcScheduledExecutor.CONTROL_SCHEDULER.schedule(() -> { traceIfNecessary(payloadResponse, false); responseObserver.onNext(payloadResponse); responseObserver.onCompleted(); }, 1000L, TimeUnit.MILLISECONDS); } else { traceIfNecessary(payloadResponse, false); responseObserver.onNext(payloadResponse); responseObserver.onCompleted(); } MetricsMonitor.recordGrpcRequestEvent(type, response.isSuccess(), response.getErrorCode(), null, request.getModule(), System.nanoTime() - startTime); } catch (Throwable e) { Loggers.REMOTE_DIGEST .error("[{}] Fail to handle request from connection [{}] ,error message :{}", "grpc", connectionId, e); Payload payloadResponse = GrpcUtils.convert(ErrorResponse.build(e)); traceIfNecessary(payloadResponse, false); responseObserver.onNext(payloadResponse); responseObserver.onCompleted(); MetricsMonitor.recordGrpcRequestEvent(type, false, ResponseCode.FAIL.getCode(), e.getClass().getSimpleName(), request.getModule(), System.nanoTime() - startTime); } finally { RequestContextHolder.removeContext(); } }
@Test void testRequestContentError() { ApplicationUtils.setStarted(true); Mockito.when(requestHandlerRegistry.getByRequestType(Mockito.anyString())).thenReturn(mockHandler); Mockito.when(connectionManager.checkValid(Mockito.any())).thenReturn(true); StreamObserver<Payload> streamObserver = new StreamObserver<Payload>() { @Override public void onNext(Payload payload) { System.out.println("Receive data from server: " + payload); Object res = GrpcUtils.parse(payload); assertTrue(res instanceof ErrorResponse); ErrorResponse errorResponse = (ErrorResponse) res; assertEquals(NacosException.BAD_GATEWAY, errorResponse.getErrorCode()); } @Override public void onError(Throwable throwable) { fail(throwable.getMessage()); } @Override public void onCompleted() { System.out.println("complete"); } }; streamStub.request(null, streamObserver); ApplicationUtils.setStarted(false); }
public VplsConfig getVplsWithName(String name) { return vplss().stream() .filter(vpls -> vpls.name().equals(name)) .findFirst() .orElse(null); }
@Test public void getVplsWithName() { assertNotNull("Configuration for VPLS not found", vplsAppConfig.getVplsWithName(VPLS1)); assertNull("Unexpected configuration for VPLS found", vplsAppConfig.getVplsWithName(VPLS2)); }
void handleLostAll() { log.debug("Closing lost active tasks as zombies."); closeRunningTasksDirty(); removeLostActiveTasksFromStateUpdaterAndPendingTasksToInit(); if (processingMode == EXACTLY_ONCE_V2) { activeTaskCreator.reInitializeThreadProducer(); } }
@Test public void shouldReInitializeThreadProducerOnHandleLostAllIfEosV2Enabled() { final TaskManager taskManager = setUpTaskManager(ProcessingMode.EXACTLY_ONCE_V2, false); taskManager.handleLostAll(); verify(activeTaskCreator).reInitializeThreadProducer(); }
@Override public boolean equals(Object o) { if (o == this) { return true; } else if (!(o instanceof ExpiryPolicy)) { return false; } var policy = (ExpiryPolicy) o; return Objects.equals(creation, policy.getExpiryForCreation()) && Objects.equals(update, policy.getExpiryForUpdate()) && Objects.equals(access, policy.getExpiryForAccess()); }
@Test public void equals() { assertThat(eternal.equals(eternal)).isTrue(); }
@Override public final ChannelPipeline replace(ChannelHandler oldHandler, String newName, ChannelHandler newHandler) { replace(getContextOrDie(oldHandler), newName, newHandler); return this; }
@Test @Timeout(value = 10000, unit = TimeUnit.MILLISECONDS) public void testReplaceAndForwardOutbound() throws Exception { final BufferedTestHandler handler1 = new BufferedTestHandler(); final BufferedTestHandler handler2 = new BufferedTestHandler(); setUp(handler1); self.eventLoop().submit(new Runnable() { @Override public void run() { ChannelPipeline p = self.pipeline(); handler1.outboundBuffer.add(8); assertEquals(8, handler1.outboundBuffer.peek()); assertTrue(handler2.outboundBuffer.isEmpty()); p.replace(handler1, "handler2", handler2); assertEquals(8, handler2.outboundBuffer.peek()); } }).sync(); }
@Override public ListOffsetsResult listOffsets(Map<TopicPartition, OffsetSpec> topicPartitionOffsets, ListOffsetsOptions options) { AdminApiFuture.SimpleAdminApiFuture<TopicPartition, ListOffsetsResultInfo> future = ListOffsetsHandler.newFuture(topicPartitionOffsets.keySet()); Map<TopicPartition, Long> offsetQueriesByPartition = topicPartitionOffsets.entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> getOffsetFromSpec(e.getValue()))); ListOffsetsHandler handler = new ListOffsetsHandler(offsetQueriesByPartition, options, logContext); invokeDriver(handler, future, options.timeoutMs); return new ListOffsetsResult(future.all()); }
@Test public void testListOffsetsRetriableErrors() throws Exception { Node node0 = new Node(0, "localhost", 8120); Node node1 = new Node(1, "localhost", 8121); List<Node> nodes = asList(node0, node1); List<PartitionInfo> pInfos = new ArrayList<>(); pInfos.add(new PartitionInfo("foo", 0, node0, new Node[]{node0, node1}, new Node[]{node0, node1})); pInfos.add(new PartitionInfo("foo", 1, node0, new Node[]{node0, node1}, new Node[]{node0, node1})); pInfos.add(new PartitionInfo("bar", 0, node1, new Node[]{node1, node0}, new Node[]{node1, node0})); final Cluster cluster = new Cluster( "mockClusterId", nodes, pInfos, Collections.emptySet(), Collections.emptySet(), node0); final TopicPartition tp0 = new TopicPartition("foo", 0); final TopicPartition tp1 = new TopicPartition("foo", 1); final TopicPartition tp2 = new TopicPartition("bar", 0); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); // listoffsets response from broker 0 ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.LEADER_NOT_AVAILABLE, -1L, 123L, 321); ListOffsetsTopicResponse t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 987L, 789); ListOffsetsResponseData responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) .setTopics(asList(t0, t1)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0); // listoffsets response from broker 1 ListOffsetsTopicResponse t2 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp2, Errors.NONE, -1L, 456L, 654); responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) .setTopics(singletonList(t2)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1); // metadata refresh because of LEADER_NOT_AVAILABLE env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); // listoffsets response from broker 0 t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 345L, 543); responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) .setTopics(singletonList(t0)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0); Map<TopicPartition, OffsetSpec> partitions = new HashMap<>(); partitions.put(tp0, OffsetSpec.latest()); partitions.put(tp1, OffsetSpec.latest()); partitions.put(tp2, OffsetSpec.latest()); ListOffsetsResult result = env.adminClient().listOffsets(partitions); Map<TopicPartition, ListOffsetsResultInfo> offsets = result.all().get(); assertFalse(offsets.isEmpty()); assertEquals(345L, offsets.get(tp0).offset()); assertEquals(543, offsets.get(tp0).leaderEpoch().get().intValue()); assertEquals(-1L, offsets.get(tp0).timestamp()); assertEquals(987L, offsets.get(tp1).offset()); assertEquals(789, offsets.get(tp1).leaderEpoch().get().intValue()); assertEquals(-1L, offsets.get(tp1).timestamp()); assertEquals(456L, offsets.get(tp2).offset()); assertEquals(654, offsets.get(tp2).leaderEpoch().get().intValue()); assertEquals(-1L, offsets.get(tp2).timestamp()); } }
@Override public Serializer<T> serializer() { final Serializer<T> serializer = delegate.serializer(); return (topic, data) -> serializer.serialize(this.topic, data); }
@Test public void shouldUseDelegateSerializerWithStaticTopic() { // When: final byte[] serialized = staticSerde.serializer().serialize(SOURCE_TOPIC, SOME_OBJECT); // Then: verify(delegateS).serialize(STATIC_TOPIC, SOME_OBJECT); assertThat(serialized, is(SOME_BYTES)); verifyNoMoreInteractions(callback); }
public static HttpClient newConnection() { return new HttpClientConnect(new HttpConnectionProvider(ConnectionProvider.newConnection())); }
@Test public void testSharedNameResolver_NotSharedClientNoConnectionPool() throws InterruptedException { doTestSharedNameResolver(HttpClient.newConnection(), false); }
@Override public int getMaxParallelism() { return this.maxParallelism; }
@Test void setAutoMax() { DefaultVertexParallelismInfo info = new DefaultVertexParallelismInfo( 1, ExecutionConfig.PARALLELISM_AUTO_MAX, ALWAYS_VALID); assertThat(info.getMaxParallelism()) .isEqualTo(KeyGroupRangeAssignment.UPPER_BOUND_MAX_PARALLELISM); }
@Override public void apply(IntentOperationContext<FlowObjectiveIntent> intentOperationContext) { Objects.requireNonNull(intentOperationContext); Optional<IntentData> toUninstall = intentOperationContext.toUninstall(); Optional<IntentData> toInstall = intentOperationContext.toInstall(); List<FlowObjectiveIntent> uninstallIntents = intentOperationContext.intentsToUninstall(); List<FlowObjectiveIntent> installIntents = intentOperationContext.intentsToInstall(); if (!toInstall.isPresent() && !toUninstall.isPresent()) { intentInstallCoordinator.intentInstallSuccess(intentOperationContext); return; } if (toUninstall.isPresent()) { IntentData intentData = toUninstall.get(); trackerService.removeTrackedResources(intentData.key(), intentData.intent().resources()); uninstallIntents.forEach(installable -> trackerService.removeTrackedResources(intentData.intent().key(), installable.resources())); } if (toInstall.isPresent()) { IntentData intentData = toInstall.get(); trackerService.addTrackedResources(intentData.key(), intentData.intent().resources()); installIntents.forEach(installable -> trackerService.addTrackedResources(intentData.key(), installable.resources())); } FlowObjectiveIntentInstallationContext intentInstallationContext = new FlowObjectiveIntentInstallationContext(intentOperationContext); uninstallIntents.stream() .map(intent -> buildObjectiveContexts(intent, REMOVE)) .flatMap(Collection::stream) .forEach(context -> { context.intentInstallationContext(intentInstallationContext); intentInstallationContext.addContext(context); intentInstallationContext.addPendingContext(context); }); installIntents.stream() .map(intent -> buildObjectiveContexts(intent, ADD)) .flatMap(Collection::stream) .forEach(context -> { context.intentInstallationContext(intentInstallationContext); intentInstallationContext.addContext(context); intentInstallationContext.addNextPendingContext(context); }); intentInstallationContext.apply(); }
@Test public void testGroupExistError() { // group exists, retry by using add to exist intentInstallCoordinator = new TestIntentInstallCoordinator(); installer.intentInstallCoordinator = intentInstallCoordinator; errors = ImmutableList.of(GROUPEXISTS); installer.flowObjectiveService = new TestFailedFlowObjectiveService(errors); context = createInstallContext(); installer.apply(context); successContext = intentInstallCoordinator.successContext; assertEquals(successContext, context); }
@VisibleForTesting static String getActualColumnName(String rawTableName, String columnName, @Nullable Map<String, String> columnNameMap, boolean ignoreCase) { if ("*".equals(columnName)) { return columnName; } String columnNameToCheck = trimTableName(rawTableName, columnName, ignoreCase); if (ignoreCase) { columnNameToCheck = columnNameToCheck.toLowerCase(); } if (columnNameMap != null) { String actualColumnName = columnNameMap.get(columnNameToCheck); if (actualColumnName != null) { return actualColumnName; } } if (columnName.charAt(0) == '$') { return columnName; } throw new BadQueryRequestException("Unknown columnName '" + columnName + "' found in the query"); }
@Test public void testGetActualColumnNameCaseSensitive() { Map<String, String> columnNameMap = new HashMap<>(); columnNameMap.put("student_name", "student_name"); String actualColumnName = BaseSingleStageBrokerRequestHandler.getActualColumnName("mytable", "mytable.student_name", columnNameMap, false); Assert.assertEquals(actualColumnName, "student_name"); Assert.assertEquals( BaseSingleStageBrokerRequestHandler.getActualColumnName("db1.mytable", "db1.mytable.student_name", columnNameMap, false), "student_name"); Assert.assertEquals( BaseSingleStageBrokerRequestHandler.getActualColumnName("db1.mytable", "mytable.student_name", columnNameMap, false), "student_name"); boolean exceptionThrown = false; try { BaseSingleStageBrokerRequestHandler.getActualColumnName("mytable", "mytable2.student_name", columnNameMap, false); Assert.fail("should throw exception if column is not known"); } catch (BadQueryRequestException ex) { exceptionThrown = true; } Assert.assertTrue(exceptionThrown, "should throw exception if column is not known"); exceptionThrown = false; try { BaseSingleStageBrokerRequestHandler.getActualColumnName("mytable", "MYTABLE.student_name", columnNameMap, false); Assert.fail("should throw exception if case sensitive and table name different"); } catch (BadQueryRequestException ex) { exceptionThrown = true; } Assert.assertTrue(exceptionThrown, "should throw exception if column is not known"); columnNameMap.put("mytable_student_name", "mytable_student_name"); String wrongColumnName2 = BaseSingleStageBrokerRequestHandler.getActualColumnName("mytable", "mytable_student_name", columnNameMap, false); Assert.assertEquals(wrongColumnName2, "mytable_student_name"); columnNameMap.put("mytable", "mytable"); String wrongColumnName3 = BaseSingleStageBrokerRequestHandler.getActualColumnName("mytable", "mytable", columnNameMap, false); Assert.assertEquals(wrongColumnName3, "mytable"); }
public static List<Transformation<?>> optimize(List<Transformation<?>> transformations) { final Map<Transformation<?>, Set<Transformation<?>>> outputMap = buildOutputMap(transformations); final LinkedHashSet<Transformation<?>> chainedTransformations = new LinkedHashSet<>(); final Set<Transformation<?>> alreadyTransformed = Sets.newIdentityHashSet(); final Queue<Transformation<?>> toTransformQueue = Queues.newArrayDeque(transformations); while (!toTransformQueue.isEmpty()) { final Transformation<?> transformation = toTransformQueue.poll(); if (!alreadyTransformed.contains(transformation)) { alreadyTransformed.add(transformation); final ChainInfo chainInfo = chainWithInputIfPossible(transformation, outputMap); chainedTransformations.add(chainInfo.newTransformation); chainedTransformations.removeAll(chainInfo.oldTransformations); alreadyTransformed.addAll(chainInfo.oldTransformations); // Add the chained transformation and its inputs to the to-optimize list toTransformQueue.add(chainInfo.newTransformation); toTransformQueue.addAll(chainInfo.newTransformation.getInputs()); } } return new ArrayList<>(chainedTransformations); }
@Test void testMultipleChainedOperators() { ExternalPythonKeyedProcessOperator<?> keyedProcessOperator1 = createKeyedProcessOperator( "f1", new RowTypeInfo(Types.INT(), Types.INT()), Types.STRING()); ExternalPythonProcessOperator<?, ?> processOperator1 = createProcessOperator( "f2", new RowTypeInfo(Types.INT(), Types.INT()), Types.STRING()); ExternalPythonProcessOperator<?, ?> processOperator2 = createProcessOperator( "f3", new RowTypeInfo(Types.INT(), Types.INT()), Types.LONG()); ExternalPythonKeyedProcessOperator<?> keyedProcessOperator2 = createKeyedProcessOperator( "f4", new RowTypeInfo(Types.INT(), Types.INT()), Types.STRING()); ExternalPythonProcessOperator<?, ?> processOperator3 = createProcessOperator( "f5", new RowTypeInfo(Types.INT(), Types.INT()), Types.STRING()); Transformation<?> sourceTransformation = mock(SourceTransformation.class); OneInputTransformation<?, ?> keyedProcessTransformation1 = new OneInputTransformation( sourceTransformation, "keyedProcess", keyedProcessOperator1, keyedProcessOperator1.getProducedType(), 2); Transformation<?> processTransformation1 = new OneInputTransformation( keyedProcessTransformation1, "process", processOperator1, processOperator1.getProducedType(), 2); Transformation<?> processTransformation2 = new OneInputTransformation( processTransformation1, "process", processOperator2, processOperator2.getProducedType(), 2); OneInputTransformation<?, ?> keyedProcessTransformation2 = new OneInputTransformation( processTransformation2, "keyedProcess", keyedProcessOperator2, keyedProcessOperator2.getProducedType(), 2); Transformation<?> processTransformation3 = new OneInputTransformation( keyedProcessTransformation2, "process", processOperator3, processOperator3.getProducedType(), 2); List<Transformation<?>> transformations = new ArrayList<>(); transformations.add(sourceTransformation); transformations.add(keyedProcessTransformation1); transformations.add(processTransformation1); transformations.add(processTransformation2); transformations.add(keyedProcessTransformation2); transformations.add(processTransformation3); List<Transformation<?>> optimized = PythonOperatorChainingOptimizer.optimize(transformations); assertThat(optimized).hasSize(3); OneInputTransformation<?, ?> chainedTransformation1 = (OneInputTransformation<?, ?>) optimized.get(1); assertThat(sourceTransformation.getOutputType()) .isEqualTo(chainedTransformation1.getInputType()); assertThat(processOperator2.getProducedType()) .isEqualTo(chainedTransformation1.getOutputType()); OneInputTransformation<?, ?> chainedTransformation2 = (OneInputTransformation<?, ?>) optimized.get(2); assertThat(processOperator2.getProducedType()) .isEqualTo(chainedTransformation2.getInputType()); assertThat(processOperator3.getProducedType()) .isEqualTo(chainedTransformation2.getOutputType()); OneInputStreamOperator<?, ?> chainedOperator1 = chainedTransformation1.getOperator(); assertThat(chainedOperator1).isInstanceOf(ExternalPythonKeyedProcessOperator.class); validateChainedPythonFunctions( ((ExternalPythonKeyedProcessOperator<?>) chainedOperator1).getPythonFunctionInfo(), "f3", "f2", "f1"); OneInputStreamOperator<?, ?> chainedOperator2 = chainedTransformation2.getOperator(); assertThat(chainedOperator2).isInstanceOf(ExternalPythonKeyedProcessOperator.class); validateChainedPythonFunctions( ((ExternalPythonKeyedProcessOperator<?>) chainedOperator2).getPythonFunctionInfo(), "f5", "f4"); }
@VisibleForTesting static SwitchGenerationCase checkSwitchGenerationCase(Type type, List<RowExpression> values) { if (values.size() > 32) { // 32 is chosen because // * SET_CONTAINS performs worst when smaller than but close to power of 2 // * Benchmark shows performance of SET_CONTAINS is better at 50, but similar at 25. return SwitchGenerationCase.SET_CONTAINS; } if (!(type instanceof IntegerType || type instanceof BigintType || type instanceof DateType)) { return SwitchGenerationCase.HASH_SWITCH; } for (RowExpression expression : values) { // For non-constant expressions, they will be added to the default case in the generated switch code. They do not affect any of // the cases other than the default one. Therefore, it's okay to skip them when choosing between DIRECT_SWITCH and HASH_SWITCH. // Same argument applies for nulls. if (!(expression instanceof ConstantExpression)) { continue; } Object constant = ((ConstantExpression) expression).getValue(); if (constant == null) { continue; } long longConstant = ((Number) constant).longValue(); if (longConstant < Integer.MIN_VALUE || longConstant > Integer.MAX_VALUE) { return SwitchGenerationCase.HASH_SWITCH; } } return SwitchGenerationCase.DIRECT_SWITCH; }
@Test public void testDate() { List<RowExpression> values = new ArrayList<>(); values.add(constant(1L, DATE)); values.add(constant(2L, DATE)); values.add(constant(3L, DATE)); assertEquals(checkSwitchGenerationCase(DATE, values), DIRECT_SWITCH); for (long i = 4; i <= 32; ++i) { values.add(constant(i, DATE)); } assertEquals(checkSwitchGenerationCase(DATE, values), DIRECT_SWITCH); values.add(constant(33L, DATE)); assertEquals(checkSwitchGenerationCase(DATE, values), SET_CONTAINS); }
@Override public Set<GrokPattern> bulkLoad(Collection<String> patternIds) { return patternIds.stream() .map(store::get) .filter(Objects::nonNull) .collect(Collectors.toSet()); }
@Test public void bulkLoad() throws Exception { GrokPattern pattern1 = service.save(GrokPattern.create("NAME1", ".*")); GrokPattern pattern2 = service.save(GrokPattern.create("NAME2", ".*")); GrokPattern pattern3 = service.save(GrokPattern.create("NAME3", ".*")); assertThat(service.bulkLoad(ImmutableSet.of(pattern1.id(), pattern3.id()))).containsExactlyInAnyOrder(pattern1, pattern3); }
public static L4ModificationInstruction modTcpDst(TpPort port) { checkNotNull(port, "Dst TCP port cannot be null"); return new ModTransportPortInstruction(L4SubType.TCP_DST, port); }
@Test public void testModTcpDstMethod() { final Instruction instruction = Instructions.modTcpDst(tpPort1); final L4ModificationInstruction.ModTransportPortInstruction modTransportPortInstruction = checkAndConvert(instruction, Instruction.Type.L4MODIFICATION, L4ModificationInstruction.ModTransportPortInstruction.class); assertThat(modTransportPortInstruction.port(), is(equalTo(tpPort1))); assertThat(modTransportPortInstruction.subtype(), is(equalTo(L4ModificationInstruction.L4SubType.TCP_DST))); }
public static Details create(String template, Object... args) { return Details.builder() .status(MaestroRuntimeException.Code.INTERNAL_ERROR) .message(String.format(template, args)) .build(); }
@Test public void testCreateWithStackTrace() { Exception exception = new Exception(new Exception(new Exception("test"))); Details details = Details.create(exception, false, "test-msg"); assertEquals(MaestroRuntimeException.Code.INTERNAL_ERROR, details.getStatus()); assertEquals("test-msg", details.getMessage()); assertEquals(6, details.getErrors().size()); assertFalse(details.isRetryable()); exception.setStackTrace(new StackTraceElement[0]); details = Details.create(exception, false, "test-msg"); assertEquals(MaestroRuntimeException.Code.INTERNAL_ERROR, details.getStatus()); assertEquals("test-msg", details.getMessage()); assertEquals(3, details.getErrors().size()); assertFalse(details.isRetryable()); }
public static boolean substringMatch(CharSequence str, int index, CharSequence substring) { if (index + substring.length() > str.length()) { return false; } for (int i = 0; i < substring.length(); i++) { if (str.charAt(index + i) != substring.charAt(i)) { return false; } } return true; }
@Test public void testSubstringMatchWithPositive() { assertFalse(StringUtil.substringMatch("", 4770, "")); }
@Override protected ExecuteContext doAfter(ExecuteContext context) { final Object object = context.getObject(); if (object instanceof LoadBalancerCacheManager) { GraceContext.INSTANCE.getGraceShutDownManager().setLoadBalancerCacheManager(object); } return context; }
@Test public void testCacheManager() { final SpringCacheManagerInterceptor springCacheManagerInterceptor = new SpringCacheManagerInterceptor(); final LoadBalancerCacheManager cacheManager = Mockito.mock(LoadBalancerCacheManager.class); final ExecuteContext executeContext = ExecuteContext .forMemberMethod(cacheManager, null, null, Collections.emptyMap(), Collections.emptyMap()); springCacheManagerInterceptor.doAfter(executeContext); assertNotNull(GraceContext.INSTANCE.getGraceShutDownManager().getLoadBalancerCacheManager()); }
@Transactional @ApolloAuditLog(type = OpType.CREATE, name = "App.create") public App createAppAndAddRolePermission( App app, Set<String> admins ) { App createdApp = this.createAppInLocal(app); publisher.publishEvent(new AppCreationEvent(createdApp)); if (!CollectionUtils.isEmpty(admins)) { rolePermissionService .assignRoleToUsers(RoleUtils.buildAppMasterRoleName(createdApp.getAppId()), admins, userInfoHolder.getUser().getUserId()); } return createdApp; }
@Test void createAppAndAddRolePermissionButOwnerNotExists() { Mockito.when(userService.findByUserId(Mockito.any())) .thenReturn(null); assertThrows( BadRequestException.class, () -> appService.createAppAndAddRolePermission(new App(), Collections.emptySet()) ); }
public void decode(ByteBuf buffer) { boolean last; int statusCode; while (true) { switch(state) { case READ_COMMON_HEADER: if (buffer.readableBytes() < SPDY_HEADER_SIZE) { return; } int frameOffset = buffer.readerIndex(); int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET; int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET; buffer.skipBytes(SPDY_HEADER_SIZE); boolean control = (buffer.getByte(frameOffset) & 0x80) != 0; int version; int type; if (control) { // Decode control frame common header version = getUnsignedShort(buffer, frameOffset) & 0x7FFF; type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET); streamId = 0; // Default to session Stream-ID } else { // Decode data frame common header version = spdyVersion; // Default to expected version type = SPDY_DATA_FRAME; streamId = getUnsignedInt(buffer, frameOffset); } flags = buffer.getByte(flagsOffset); length = getUnsignedMedium(buffer, lengthOffset); // Check version first then validity if (version != spdyVersion) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SPDY Version"); } else if (!isValidFrameHeader(streamId, type, flags, length)) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid Frame Error"); } else { state = getNextState(type, length); } break; case READ_DATA_FRAME: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0)); break; } // Generate data frames that do not exceed maxChunkSize int dataLength = Math.min(maxChunkSize, length); // Wait until entire frame is readable if (buffer.readableBytes() < dataLength) { return; } ByteBuf data = buffer.alloc().buffer(dataLength); data.writeBytes(buffer, dataLength); length -= dataLength; if (length == 0) { state = State.READ_COMMON_HEADER; } last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN); delegate.readDataFrame(streamId, last, data); break; case READ_SYN_STREAM_FRAME: if (buffer.readableBytes() < 10) { return; } int offset = buffer.readerIndex(); streamId = getUnsignedInt(buffer, offset); int associatedToStreamId = getUnsignedInt(buffer, offset + 4); byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07); last = hasFlag(flags, SPDY_FLAG_FIN); boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL); buffer.skipBytes(10); length -= 10; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_STREAM Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional); } break; case READ_SYN_REPLY_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_REPLY Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynReplyFrame(streamId, last); } break; case READ_RST_STREAM_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (streamId == 0 || statusCode == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid RST_STREAM Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readRstStreamFrame(streamId, statusCode); } break; case READ_SETTINGS_FRAME: if (buffer.readableBytes() < 4) { return; } boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR); numSettings = getUnsignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); length -= 4; // Validate frame length against number of entries. Each ID/Value entry is 8 bytes. if ((length & 0x07) != 0 || length >> 3 != numSettings) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SETTINGS Frame"); } else { state = State.READ_SETTING; delegate.readSettingsFrame(clear); } break; case READ_SETTING: if (numSettings == 0) { state = State.READ_COMMON_HEADER; delegate.readSettingsEnd(); break; } if (buffer.readableBytes() < 8) { return; } byte settingsFlags = buffer.getByte(buffer.readerIndex()); int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1); int value = getSignedInt(buffer, buffer.readerIndex() + 4); boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE); boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED); buffer.skipBytes(8); --numSettings; delegate.readSetting(id, value, persistValue, persisted); break; case READ_PING_FRAME: if (buffer.readableBytes() < 4) { return; } int pingId = getSignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); state = State.READ_COMMON_HEADER; delegate.readPingFrame(pingId); break; case READ_GOAWAY_FRAME: if (buffer.readableBytes() < 8) { return; } int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); state = State.READ_COMMON_HEADER; delegate.readGoAwayFrame(lastGoodStreamId, statusCode); break; case READ_HEADERS_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid HEADERS Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readHeadersFrame(streamId, last); } break; case READ_WINDOW_UPDATE_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (deltaWindowSize == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid WINDOW_UPDATE Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readWindowUpdateFrame(streamId, deltaWindowSize); } break; case READ_HEADER_BLOCK: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readHeaderBlockEnd(); break; } if (!buffer.isReadable()) { return; } int compressedBytes = Math.min(buffer.readableBytes(), length); ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes); headerBlock.writeBytes(buffer, compressedBytes); length -= compressedBytes; delegate.readHeaderBlock(headerBlock); break; case DISCARD_FRAME: int numBytes = Math.min(buffer.readableBytes(), length); buffer.skipBytes(numBytes); length -= numBytes; if (length == 0) { state = State.READ_COMMON_HEADER; break; } return; case FRAME_ERROR: buffer.skipBytes(buffer.readableBytes()); return; default: throw new Error("Shouldn't reach here."); } } }
@Test public void testReservedSpdySynReplyFrameBits() throws Exception { short type = 2; byte flags = 0; int length = 4; int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); encodeControlFrameHeader(buf, type, flags, length); buf.writeInt(streamId | 0x80000000); // should ignore reserved bit decoder.decode(buf); verify(delegate).readSynReplyFrame(streamId, false); verify(delegate).readHeaderBlockEnd(); assertFalse(buf.isReadable()); buf.release(); }
public static String findBsn(List<Container> categorieList){ return findValue(categorieList, CATEGORIE_IDENTIFICATIENUMMERS, ELEMENT_BURGERSERVICENUMMER); }
@Test public void testFindBsn() { assertThat(CategorieUtil.findBsn(createFullCategories()), is("burgerservicenummer")); }
public boolean isTransferSupported() { requireNotStale(); return Optional.ofNullable(getPrimaryDevice().getCapabilities()) .map(Device.DeviceCapabilities::transfer) .orElse(false); }
@Test void testIsTransferSupported() { final Device transferCapablePrimaryDevice = mock(Device.class); final Device nonTransferCapablePrimaryDevice = mock(Device.class); final Device transferCapableLinkedDevice = mock(Device.class); final DeviceCapabilities transferCapabilities = mock(DeviceCapabilities.class); final DeviceCapabilities nonTransferCapabilities = mock(DeviceCapabilities.class); when(transferCapablePrimaryDevice.getId()).thenReturn(Device.PRIMARY_ID); when(transferCapablePrimaryDevice.isPrimary()).thenReturn(true); when(transferCapablePrimaryDevice.getCapabilities()).thenReturn(transferCapabilities); when(nonTransferCapablePrimaryDevice.getId()).thenReturn(Device.PRIMARY_ID); when(nonTransferCapablePrimaryDevice.isPrimary()).thenReturn(true); when(nonTransferCapablePrimaryDevice.getCapabilities()).thenReturn(nonTransferCapabilities); when(transferCapableLinkedDevice.getId()).thenReturn((byte) 2); when(transferCapableLinkedDevice.isPrimary()).thenReturn(false); when(transferCapableLinkedDevice.getCapabilities()).thenReturn(transferCapabilities); when(transferCapabilities.transfer()).thenReturn(true); when(nonTransferCapabilities.transfer()).thenReturn(false); { final Account transferablePrimaryAccount = AccountsHelper.generateTestAccount("+14152222222", UUID.randomUUID(), UUID.randomUUID(), List.of(transferCapablePrimaryDevice), "1234".getBytes()); assertTrue(transferablePrimaryAccount.isTransferSupported()); } { final Account nonTransferablePrimaryAccount = AccountsHelper.generateTestAccount("+14152222222", UUID.randomUUID(), UUID.randomUUID(), List.of(nonTransferCapablePrimaryDevice), "1234".getBytes()); assertFalse(nonTransferablePrimaryAccount.isTransferSupported()); } { final Account transferableLinkedAccount = AccountsHelper.generateTestAccount("+14152222222", UUID.randomUUID(), UUID.randomUUID(), List.of(nonTransferCapablePrimaryDevice, transferCapableLinkedDevice), "1234".getBytes()); assertFalse(transferableLinkedAccount.isTransferSupported()); } }
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) { CharStream input = CharStreams.fromString(source); FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input ); CommonTokenStream tokens = new CommonTokenStream( lexer ); FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens ); ParserHelper parserHelper = new ParserHelper(eventsManager); additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol())); parser.setHelper(parserHelper); parser.setErrorHandler( new FEELErrorHandler() ); parser.removeErrorListeners(); // removes the error listener that prints to the console parser.addErrorListener( new FEELParserErrorListener( eventsManager ) ); // pre-loads the parser with symbols defineVariables( inputVariableTypes, inputVariables, parser ); if (typeRegistry != null) { parserHelper.setTypeRegistry(typeRegistry); } return parser; }
@Test void atLiteralDate() { String inputExpression = "@\"2016-07-29\""; BaseNode bool = parse(inputExpression); assertThat(bool).isInstanceOf(AtLiteralNode.class); assertThat(bool.getResultType()).isEqualTo(BuiltInType.DATE); assertLocation(inputExpression, bool); }
public static Optional<Expression> convert( org.apache.flink.table.expressions.Expression flinkExpression) { if (!(flinkExpression instanceof CallExpression)) { return Optional.empty(); } CallExpression call = (CallExpression) flinkExpression; Operation op = FILTERS.get(call.getFunctionDefinition()); if (op != null) { switch (op) { case IS_NULL: return onlyChildAs(call, FieldReferenceExpression.class) .map(FieldReferenceExpression::getName) .map(Expressions::isNull); case NOT_NULL: return onlyChildAs(call, FieldReferenceExpression.class) .map(FieldReferenceExpression::getName) .map(Expressions::notNull); case LT: return convertFieldAndLiteral(Expressions::lessThan, Expressions::greaterThan, call); case LT_EQ: return convertFieldAndLiteral( Expressions::lessThanOrEqual, Expressions::greaterThanOrEqual, call); case GT: return convertFieldAndLiteral(Expressions::greaterThan, Expressions::lessThan, call); case GT_EQ: return convertFieldAndLiteral( Expressions::greaterThanOrEqual, Expressions::lessThanOrEqual, call); case EQ: return convertFieldAndLiteral( (ref, lit) -> { if (NaNUtil.isNaN(lit)) { return Expressions.isNaN(ref); } else { return Expressions.equal(ref, lit); } }, call); case NOT_EQ: return convertFieldAndLiteral( (ref, lit) -> { if (NaNUtil.isNaN(lit)) { return Expressions.notNaN(ref); } else { return Expressions.notEqual(ref, lit); } }, call); case NOT: return onlyChildAs(call, CallExpression.class) .flatMap(FlinkFilters::convert) .map(Expressions::not); case AND: return convertLogicExpression(Expressions::and, call); case OR: return convertLogicExpression(Expressions::or, call); case STARTS_WITH: return convertLike(call); } } return Optional.empty(); }
@Test public void testGreaterThanEquals() { UnboundPredicate<Integer> expected = org.apache.iceberg.expressions.Expressions.greaterThanOrEqual("field1", 1); Optional<org.apache.iceberg.expressions.Expression> actual = FlinkFilters.convert(resolve(Expressions.$("field1").isGreaterOrEqual(Expressions.lit(1)))); assertThat(actual).isPresent(); assertPredicatesMatch(expected, actual.get()); Optional<org.apache.iceberg.expressions.Expression> actual1 = FlinkFilters.convert(resolve(Expressions.lit(1).isLessOrEqual(Expressions.$("field1")))); assertThat(actual1).isPresent(); assertPredicatesMatch(expected, actual1.get()); }
public ProtocolBuilder corethreads(Integer corethreads) { this.corethreads = corethreads; return getThis(); }
@Test void corethreads() { ProtocolBuilder builder = new ProtocolBuilder(); builder.corethreads(10); Assertions.assertEquals(10, builder.build().getCorethreads()); }
@Override public DescribeTopicsResult describeTopics(final TopicCollection topics, DescribeTopicsOptions options) { if (topics instanceof TopicIdCollection) return DescribeTopicsResult.ofTopicIds(handleDescribeTopicsByIds(((TopicIdCollection) topics).topicIds(), options)); else if (topics instanceof TopicNameCollection) return DescribeTopicsResult.ofTopicNames(handleDescribeTopicsByNamesWithDescribeTopicPartitionsApi(((TopicNameCollection) topics).topicNames(), options)); else throw new IllegalArgumentException("The TopicCollection: " + topics + " provided did not match any supported classes for describeTopics."); }
@Test public void testDescribeTopicPartitionsApiWithoutAuthorizedOps() throws ExecutionException, InterruptedException { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); String topicName0 = "test-0"; Uuid topicId = Uuid.randomUuid(); int authorisedOperations = Utils.to32BitField(Utils.mkSet(AclOperation.DESCRIBE.code(), AclOperation.ALTER.code())); env.kafkaClient().prepareResponse( prepareDescribeClusterResponse(0, env.cluster().nodes(), env.cluster().clusterResource().clusterId(), 2, authorisedOperations) ); DescribeTopicPartitionsResponseData responseData = new DescribeTopicPartitionsResponseData(); responseData.topics().add(new DescribeTopicPartitionsResponseTopic() .setErrorCode((short) 0) .setTopicId(topicId) .setName(topicName0) .setIsInternal(false) .setTopicAuthorizedOperations(authorisedOperations)); env.kafkaClient().prepareResponse(new DescribeTopicPartitionsResponse(responseData)); DescribeTopicsResult result = env.adminClient().describeTopics( singletonList(topicName0), new DescribeTopicsOptions().includeAuthorizedOperations(false) ); Map<String, TopicDescription> topicDescriptions = result.allTopicNames().get(); TopicDescription topicDescription = topicDescriptions.get(topicName0); assertNull(topicDescription.authorizedOperations()); } }
public Optional<Projection> createProjection(final ProjectionSegment projectionSegment) { if (projectionSegment instanceof ShorthandProjectionSegment) { return Optional.of(createProjection((ShorthandProjectionSegment) projectionSegment)); } if (projectionSegment instanceof ColumnProjectionSegment) { return Optional.of(createProjection((ColumnProjectionSegment) projectionSegment)); } if (projectionSegment instanceof ExpressionProjectionSegment) { return Optional.of(createProjection((ExpressionProjectionSegment) projectionSegment)); } if (projectionSegment instanceof AggregationDistinctProjectionSegment) { return Optional.of(createProjection((AggregationDistinctProjectionSegment) projectionSegment)); } if (projectionSegment instanceof AggregationProjectionSegment) { return Optional.of(createProjection((AggregationProjectionSegment) projectionSegment)); } if (projectionSegment instanceof SubqueryProjectionSegment) { return Optional.of(createProjection((SubqueryProjectionSegment) projectionSegment)); } if (projectionSegment instanceof ParameterMarkerExpressionSegment) { return Optional.of(createProjection((ParameterMarkerExpressionSegment) projectionSegment)); } return Optional.empty(); }
@Test void assertCreateProjectionWhenProjectionSegmentInstanceOfAggregationDistinctProjectionSegmentAndAggregationTypeIsAvg() { AggregationDistinctProjectionSegment aggregationDistinctProjectionSegment = new AggregationDistinctProjectionSegment(0, 10, AggregationType.AVG, "(1)", "distinctExpression"); Optional<Projection> actual = new ProjectionEngine(databaseType).createProjection(aggregationDistinctProjectionSegment); assertTrue(actual.isPresent()); assertThat(actual.get(), instanceOf(AggregationDistinctProjection.class)); }
public void writeIntLenenc(final long value) { if (value < 0xfb) { byteBuf.writeByte((int) value); return; } if (value < Math.pow(2D, 16D)) { byteBuf.writeByte(0xfc); byteBuf.writeShortLE((int) value); return; } if (value < Math.pow(2D, 24D)) { byteBuf.writeByte(0xfd); byteBuf.writeMediumLE((int) value); return; } byteBuf.writeByte(0xfe); byteBuf.writeLongLE(value); }
@Test void assertWriteIntLenencWithTwoBytes() { new MySQLPacketPayload(byteBuf, StandardCharsets.UTF_8).writeIntLenenc(Double.valueOf(Math.pow(2D, 16D)).longValue() - 1L); verify(byteBuf).writeByte(0xfc); verify(byteBuf).writeShortLE(Double.valueOf(Math.pow(2D, 16D)).intValue() - 1); }
public static GeneratedResources getGeneratedResourcesObject(String generatedResourcesString) throws JsonProcessingException { return objectMapper.readValue(generatedResourcesString, GeneratedResources.class); }
@Test void getGeneratedResourcesObjectFromFile() throws Exception { String fileName = "IndexFile.test_json"; URL resource = Thread.currentThread().getContextClassLoader().getResource(fileName); assert resource != null; IndexFile indexFile = new IndexFile(new File(resource.toURI())); GeneratedResources retrieved = JSONUtils.getGeneratedResourcesObject(indexFile); assertThat(retrieved).isNotNull(); String fullClassName = "full.class.Name"; GeneratedResource expected1 = new GeneratedClassResource(fullClassName); LocalUri modelLocalUriId = new ReflectiveAppRoot("test") .get(ComponentFoo.class) .get("this", "is", "fri") .asLocalUri(); ModelLocalUriId localUriId = new ModelLocalUriId(modelLocalUriId); GeneratedResource expected2 = new GeneratedExecutableResource(localUriId, Collections.singletonList(fullClassName)); assertThat(retrieved).contains(expected1); assertThat(retrieved).contains(expected2); }
@Override protected Object getTargetObject(boolean key) { Object targetObject; if (key) { // keyData is never null if (keyData.isPortable() || keyData.isJson() || keyData.isCompact()) { targetObject = keyData; } else { targetObject = getKey(); } } else { if (valueObject == null) { targetObject = getTargetObjectFromData(); } else { if (valueObject instanceof PortableGenericRecord || valueObject instanceof CompactGenericRecord) { // These two classes should be able to be handled by respective Getters // see PortableGetter and CompactGetter // We get into this branch when in memory format is Object and // - the cluster does not have PortableFactory configuration for Portable // - the cluster does not related classes for Compact targetObject = getValue(); } else if (valueObject instanceof Portable || serializationService.isCompactSerializable(valueObject)) { targetObject = getValueData(); } else { // Note that targetObject can be PortableGenericRecord // and it will be handled with PortableGetter for query. // We get PortableGenericRecord here when in-memory format is OBJECT and // the cluster does not have PortableFactory configuration for the object's factory ID targetObject = getValue(); } } } return targetObject; }
@Test public void testGetTargetObject_givenValueIsDataAndPortable_whenKeyFlagIsFalse_thenReturnValueData() { Data key = serializationService.toData("indexedKey"); Data value = serializationService.toData(new PortableEmployee(30, "peter")); QueryableEntry entry = createEntry(key, value, newExtractor()); Object targetObject = entry.getTargetObject(false); assertEquals(value, targetObject); }
@ApiOperation(value = "Get Edge Events (getEdgeEvents)", notes = "Returns a page of edge events for the requested edge. " + PAGE_DATA_PARAMETERS) @PreAuthorize("hasAuthority('TENANT_ADMIN')") @GetMapping(value = "/edge/{edgeId}/events") public PageData<EdgeEvent> getEdgeEvents( @Parameter(description = EDGE_ID_PARAM_DESCRIPTION, required = true) @PathVariable(EDGE_ID) String strEdgeId, @Parameter(description = PAGE_SIZE_DESCRIPTION, required = true) @RequestParam int pageSize, @Parameter(description = PAGE_NUMBER_DESCRIPTION, required = true) @RequestParam int page, @Parameter(description = "The case insensitive 'substring' filter based on the edge event type name.") @RequestParam(required = false) String textSearch, @Parameter(description = SORT_PROPERTY_DESCRIPTION, schema = @Schema(allowableValues = {"createdTime", "name", "type", "label", "customerTitle"})) @RequestParam(required = false) String sortProperty, @Parameter(description = SORT_ORDER_DESCRIPTION, schema = @Schema(allowableValues = {"ASC", "DESC"})) @RequestParam(required = false) String sortOrder, @Parameter(description = "Timestamp. Edge events with creation time before it won't be queried") @RequestParam(required = false) Long startTime, @Parameter(description = "Timestamp. Edge events with creation time after it won't be queried") @RequestParam(required = false) Long endTime) throws ThingsboardException { checkParameter(EDGE_ID, strEdgeId); TenantId tenantId = getCurrentUser().getTenantId(); EdgeId edgeId = new EdgeId(toUUID(strEdgeId)); checkEdgeId(edgeId, Operation.READ); TimePageLink pageLink = createTimePageLink(pageSize, page, textSearch, sortProperty, sortOrder, startTime, endTime); return checkNotNull(edgeEventService.findEdgeEvents(tenantId, edgeId, 0L, null, pageLink)); }
@Test public void testGetEdgeEvents() throws Exception { Edge edge = constructEdge("TestEdge", "default"); edge = doPost("/api/edge", edge, Edge.class); final EdgeId edgeId = edge.getId(); awaitForEdgeTemplateRootRuleChainToAssignToEdge(edgeId); // simulate edge activation ObjectNode attributes = JacksonUtil.newObjectNode(); attributes.put("active", true); doPost("/api/plugins/telemetry/EDGE/" + edge.getId() + "/attributes/" + AttributeScope.SERVER_SCOPE, attributes); Device device = constructDevice("TestDevice", "default"); Device savedDevice = doPost("/api/device", device, Device.class); doPost("/api/edge/" + edgeId + "/device/" + savedDevice.getId(), Device.class); Asset asset = constructAsset("TestAsset", "default"); Asset savedAsset = doPost("/api/asset", asset, Asset.class); doPost("/api/edge/" + edgeId + "/asset/" + savedAsset.getId(), Asset.class); EntityRelation relation = new EntityRelation(savedAsset.getId(), savedDevice.getId(), EntityRelation.CONTAINS_TYPE); awaitForNumberOfEdgeEvents(edgeId, 2); doPost("/api/relation", relation); awaitForNumberOfEdgeEvents(edgeId, 3); List<EdgeEvent> edgeEvents = findEdgeEvents(edgeId); Assert.assertTrue(popEdgeEvent(edgeEvents, EdgeEventType.DEVICE)); Assert.assertTrue(popEdgeEvent(edgeEvents, EdgeEventType.ASSET)); Assert.assertTrue(popEdgeEvent(edgeEvents, EdgeEventType.RELATION)); Assert.assertTrue(edgeEvents.isEmpty()); }
@Override public ServiceRecord resolve(String path) throws PathNotFoundException, NoRecordException, InvalidRecordException, IOException { // Read the entire file into byte array, should be small metadata Long size = fs.getFileStatus(formatDataPath(path)).getLen(); byte[] bytes = new byte[size.intValue()]; FSDataInputStream instream = fs.open(formatDataPath(path)); int bytesRead = instream.read(bytes); instream.close(); if (bytesRead < size) { throw new InvalidRecordException(path, "Expected " + size + " bytes, but read " + bytesRead); } // Unmarshal, check, and return ServiceRecord record = serviceRecordMarshal.fromBytes(path, bytes); RegistryTypeUtils.validateServiceRecord(path, record); return record; }
@Test public void testResolve() throws IOException { ServiceRecord record = createRecord("0"); registry.bind("test/registryTestNode", record, 1); Assert.assertTrue(fs.exists(new Path("test/registryTestNode/_record"))); System.out.println("Read record that exists"); ServiceRecord readRecord = registry.resolve("test/registryTestNode"); Assert.assertNotNull(readRecord); Assert.assertTrue(record.equals(readRecord)); System.out.println("Try to read record that does not exist"); try { readRecord = registry.resolve("test/nonExistentNode"); Assert.fail("Should throw an error, record does not exist"); } catch (IOException e) { } }
static public FileSize valueOf(String fileSizeStr) { Matcher matcher = FILE_SIZE_PATTERN.matcher(fileSizeStr); long coefficient; if (matcher.matches()) { String lenStr = matcher.group(DOUBLE_GROUP); String unitStr = matcher.group(UNIT_GROUP); long lenValue = Long.valueOf(lenStr); if (unitStr.equalsIgnoreCase("")) { coefficient = 1; } else if (unitStr.equalsIgnoreCase("kb")) { coefficient = KB_COEFFICIENT; } else if (unitStr.equalsIgnoreCase("mb")) { coefficient = MB_COEFFICIENT; } else if (unitStr.equalsIgnoreCase("gb")) { coefficient = GB_COEFFICIENT; } else { throw new IllegalStateException("Unexpected " + unitStr); } return new FileSize(lenValue * coefficient); } else { throw new IllegalArgumentException("String value [" + fileSizeStr + "] is not in the expected format."); } }
@Test public void testValueOf() { { FileSize fs = FileSize.valueOf("8"); assertEquals(8, fs.getSize()); } { FileSize fs = FileSize.valueOf("8 kbs"); assertEquals(8*KB_CO, fs.getSize()); } { FileSize fs = FileSize.valueOf("8 kb"); assertEquals(8*KB_CO, fs.getSize()); } { FileSize fs = FileSize.valueOf("12 mb"); assertEquals(12*MB_CO, fs.getSize()); } { FileSize fs = FileSize.valueOf("5 GBs"); assertEquals(5*GB_CO, fs.getSize()); } }
@RequestMapping("/push/state") public ObjectNode pushState(@RequestParam(required = false) boolean detail, @RequestParam(required = false) boolean reset) { ObjectNode result = JacksonUtils.createEmptyJsonNode(); int failedPushCount = MetricsMonitor.getFailedPushMonitor().get(); int totalPushCount = MetricsMonitor.getTotalPushMonitor().get(); result.put("succeed", totalPushCount - failedPushCount); result.put("total", totalPushCount); if (totalPushCount > 0) { result.put("ratio", ((float) totalPushCount - failedPushCount) / totalPushCount); } else { result.put("ratio", 0); } if (detail) { ObjectNode detailNode = JacksonUtils.createEmptyJsonNode(); detailNode.put("avgPushCost", MetricsMonitor.getAvgPushCostMonitor().get()); detailNode.put("maxPushCost", MetricsMonitor.getMaxPushCostMonitor().get()); result.replace("detail", detailNode); } if (reset) { MetricsMonitor.resetPush(); } result.put("reset", reset); return result; }
@Test void testPushState() { MetricsMonitor.resetPush(); ObjectNode objectNode = operatorController.pushState(true, true); assertTrue(objectNode.toString().contains("succeed\":0")); }
public <T> void unregister(MeshRuleListener subscriber) { meshRuleDispatcher.unregister(subscriber); }
@Test void unregister() { MeshAppRuleListener meshAppRuleListener = new MeshAppRuleListener("demo-route"); StandardMeshRuleRouter standardMeshRuleRouter1 = Mockito.spy(new StandardMeshRuleRouter(URL.valueOf(""))); StandardMeshRuleRouter standardMeshRuleRouter2 = Mockito.spy(new StandardMeshRuleRouter(URL.valueOf(""))); meshAppRuleListener.register(standardMeshRuleRouter1); Assertions.assertEquals( 1, meshAppRuleListener .getMeshRuleDispatcher() .getListenerMap() .get(MeshRuleConstants.STANDARD_ROUTER_KEY) .size()); meshAppRuleListener.receiveConfigInfo(rule1 + "---\n" + rule2); meshAppRuleListener.register(standardMeshRuleRouter2); Assertions.assertEquals( 2, meshAppRuleListener .getMeshRuleDispatcher() .getListenerMap() .get(MeshRuleConstants.STANDARD_ROUTER_KEY) .size()); meshAppRuleListener.unregister(standardMeshRuleRouter1); Assertions.assertEquals( 1, meshAppRuleListener .getMeshRuleDispatcher() .getListenerMap() .get(MeshRuleConstants.STANDARD_ROUTER_KEY) .size()); meshAppRuleListener.unregister(standardMeshRuleRouter2); Assertions.assertEquals( 0, meshAppRuleListener.getMeshRuleDispatcher().getListenerMap().size()); }
void release() { synchronized (allBuffers) { while (!allBuffers.isEmpty()) { allBuffers.poll().f0.recycleBuffer(); } } }
@Test void testRelease() { SubpartitionDiskCacheManager subpartitionDiskCacheManager = new SubpartitionDiskCacheManager(); Buffer buffer = BufferBuilderTestUtils.buildSomeBuffer(); subpartitionDiskCacheManager.append(buffer); List<Tuple2<Buffer, Integer>> bufferAndIndexes = subpartitionDiskCacheManager.removeAllBuffers(); assertThat(bufferAndIndexes).hasSize(1); assertThat(bufferAndIndexes.get(0).f0).isEqualTo(buffer); assertThatNoException().isThrownBy(subpartitionDiskCacheManager::release); }
public ConfigTransformerResult transform(Map<String, String> configs) { Map<String, Map<String, Set<String>>> keysByProvider = new HashMap<>(); Map<String, Map<String, Map<String, String>>> lookupsByProvider = new HashMap<>(); // Collect the variables from the given configs that need transformation for (Map.Entry<String, String> config : configs.entrySet()) { if (config.getValue() != null) { List<ConfigVariable> configVars = getVars(config.getValue(), DEFAULT_PATTERN); for (ConfigVariable configVar : configVars) { Map<String, Set<String>> keysByPath = keysByProvider.computeIfAbsent(configVar.providerName, k -> new HashMap<>()); Set<String> keys = keysByPath.computeIfAbsent(configVar.path, k -> new HashSet<>()); keys.add(configVar.variable); } } } // Retrieve requested variables from the ConfigProviders Map<String, Long> ttls = new HashMap<>(); for (Map.Entry<String, Map<String, Set<String>>> entry : keysByProvider.entrySet()) { String providerName = entry.getKey(); ConfigProvider provider = configProviders.get(providerName); Map<String, Set<String>> keysByPath = entry.getValue(); if (provider != null && keysByPath != null) { for (Map.Entry<String, Set<String>> pathWithKeys : keysByPath.entrySet()) { String path = pathWithKeys.getKey(); Set<String> keys = new HashSet<>(pathWithKeys.getValue()); ConfigData configData = provider.get(path, keys); Map<String, String> data = configData.data(); Long ttl = configData.ttl(); if (ttl != null && ttl >= 0) { ttls.put(path, ttl); } Map<String, Map<String, String>> keyValuesByPath = lookupsByProvider.computeIfAbsent(providerName, k -> new HashMap<>()); keyValuesByPath.put(path, data); } } } // Perform the transformations by performing variable replacements Map<String, String> data = new HashMap<>(configs); for (Map.Entry<String, String> config : configs.entrySet()) { data.put(config.getKey(), replace(lookupsByProvider, config.getValue(), DEFAULT_PATTERN)); } return new ConfigTransformerResult(data, ttls); }
@Test public void testReplaceVariableWithTTL() { ConfigTransformerResult result = configTransformer.transform(Collections.singletonMap(MY_KEY, "${test:testPath:testKeyWithTTL}")); Map<String, String> data = result.data(); Map<String, Long> ttls = result.ttls(); assertEquals(TEST_RESULT_WITH_TTL, data.get(MY_KEY)); assertEquals(1L, ttls.get(TEST_PATH).longValue()); }
public static String getFullElapsedTime(final long delta) { if (delta < Duration.ofSeconds(1).toMillis()) { return String.format("%d %s", delta, delta == 1 ? LocaleUtils.getLocalizedString("global.millisecond") : LocaleUtils.getLocalizedString("global.milliseconds")); } else if (delta < Duration.ofMinutes(1).toMillis()) { final long millis = delta % Duration.ofSeconds(1).toMillis(); final long seconds = delta / Duration.ofSeconds(1).toMillis(); final String secondsString = String.format("%d %s", seconds, seconds == 1 ? LocaleUtils.getLocalizedString("global.second") : LocaleUtils.getLocalizedString("global.seconds")); if (millis > 0) { return secondsString + ", " + getFullElapsedTime(millis); } else { return secondsString; } } else if (delta < Duration.ofHours(1).toMillis()) { final long millis = delta % Duration.ofMinutes(1).toMillis(); final long minutes = delta / Duration.ofMinutes(1).toMillis(); final String minutesString = String.format("%d %s", minutes, minutes == 1 ? LocaleUtils.getLocalizedString("global.minute") : LocaleUtils.getLocalizedString("global.minutes")); if (millis > 0) { return minutesString + ", " + getFullElapsedTime(millis); } else { return minutesString; } } else if (delta < Duration.ofDays(1).toMillis()) { final long millis = delta % Duration.ofHours(1).toMillis(); final long hours = delta / Duration.ofHours(1).toMillis(); final String daysString = String.format("%d %s", hours, hours == 1 ? LocaleUtils.getLocalizedString("global.hour") : LocaleUtils.getLocalizedString("global.hours")); if (millis > 0) { return daysString + ", " + getFullElapsedTime(millis); } else { return daysString; } } else { final long millis = delta % Duration.ofDays(1).toMillis(); final long days = delta / Duration.ofDays(1).toMillis(); final String daysString = String.format("%d %s", days, days == 1 ? LocaleUtils.getLocalizedString("global.day") : LocaleUtils.getLocalizedString("global.days")); if (millis > 0) { return daysString + ", " + getFullElapsedTime(millis); } else { return daysString; } } }
@Test public void testElapsedTimeInDays() throws Exception { assertThat(StringUtils.getFullElapsedTime(Duration.ofDays(1)), is("1 day")); assertThat(StringUtils.getFullElapsedTime(Duration.ofDays(1).plus(Duration.ofHours(1)).plus(Duration.ofMinutes(1)).plus(Duration.ofSeconds(1)).plus(Duration.ofMillis(1))), is("1 day, 1 hour, 1 minute, 1 second, 1 ms")); assertThat(StringUtils.getFullElapsedTime(Duration.ofDays(10).plus(Duration.ofHours(10))), is("10 days, 10 hours")); }
@SneakyThrows({InterruptedException.class, ExecutionException.class}) @Override public List<String> getChildrenKeys(final String key) { String prefix = key + PATH_SEPARATOR; ByteSequence prefixByteSequence = ByteSequence.from(prefix, StandardCharsets.UTF_8); GetOption getOption = GetOption.newBuilder().isPrefix(true).withSortField(GetOption.SortTarget.KEY).withSortOrder(GetOption.SortOrder.ASCEND).build(); List<KeyValue> keyValues = client.getKVClient().get(prefixByteSequence, getOption).get().getKvs(); return keyValues.stream().map(each -> getSubNodeKeyName(prefix, each.getKey().toString(StandardCharsets.UTF_8))).distinct().collect(Collectors.toList()); }
@Test void assertGetChildrenKeysWhenThrowInterruptedException() throws ExecutionException, InterruptedException { doThrow(InterruptedException.class).when(getFuture).get(); try { repository.getChildrenKeys("/key/key1"); // CHECKSTYLE:OFF } catch (final Exception ex) { // CHECKSTYLE:ON assertThat(ex, instanceOf(InterruptedException.class)); } }
public static Properties getProperties(File file) throws AnalysisException { try (BufferedReader utf8Reader = Files.newBufferedReader(file.toPath(), StandardCharsets.UTF_8)) { return getProperties(utf8Reader); } catch (IOException | IllegalArgumentException e) { throw new AnalysisException("Error parsing PyPA core-metadata file", e); } }
@Test public void getProperties_should_support_folding_in_headerValue() throws IOException { String payload = "Metadata-Version: 2\r\n" + " .2\r\n" + "Description: My value\r\n" + " contains a \r\n" + " : colon\r\n"; Properties props = PyPACoreMetadataParser.getProperties(new BufferedReader(new StringReader(payload))); Assert.assertEquals("2.2", props.getProperty("Metadata-Version")); Assert.assertEquals("My value contains a : colon", props.getProperty("Description")); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testQuantifiedComparisonExpression() { analyze("SELECT * FROM t1 WHERE t1.a <= ALL (VALUES 10, 20)"); assertFails(MULTIPLE_FIELDS_FROM_SUBQUERY, "SELECT * FROM t1 WHERE t1.a = ANY (SELECT 1, 2)"); assertFails(TYPE_MISMATCH, "SELECT * FROM t1 WHERE t1.a = SOME (VALUES ('abc'))"); // map is not orderable assertFails(TYPE_MISMATCH, ("SELECT map(ARRAY[1], ARRAY['hello']) < ALL (VALUES map(ARRAY[1], ARRAY['hello']))")); // but map is comparable analyze(("SELECT map(ARRAY[1], ARRAY['hello']) = ALL (VALUES map(ARRAY[1], ARRAY['hello']))")); // HLL is neither orderable nor comparable assertFails(TYPE_MISMATCH, "SELECT cast(NULL AS HyperLogLog) < ALL (VALUES cast(NULL AS HyperLogLog))"); assertFails(TYPE_MISMATCH, "SELECT cast(NULL AS HyperLogLog) = ANY (VALUES cast(NULL AS HyperLogLog))"); // qdigest is neither orderable nor comparable assertFails(TYPE_MISMATCH, "SELECT cast(NULL AS qdigest(double)) < ALL (VALUES cast(NULL AS qdigest(double)))"); assertFails(TYPE_MISMATCH, "SELECT cast(NULL AS qdigest(double)) = ANY (VALUES cast(NULL AS qdigest(double)))"); }
ImmutableList<PayloadDefinition> validatePayloads(List<PayloadDefinition> payloads) { for (PayloadDefinition p : payloads) { checkArgument(p.hasName(), "Parsed payload does not have a name."); checkArgument( p.getInterpretationEnvironment() != PayloadGeneratorConfig.InterpretationEnvironment .INTERPRETATION_ENVIRONMENT_UNSPECIFIED, "Parsed payload does not have an interpretation_environment."); checkArgument( p.getExecutionEnvironment() != PayloadGeneratorConfig.ExecutionEnvironment.EXECUTION_ENVIRONMENT_UNSPECIFIED, "Parsed payload does not have an exeuction_environment."); checkArgument( !p.getVulnerabilityTypeList().isEmpty(), "Parsed payload has no entries for vulnerability_type."); checkArgument(p.hasPayloadString(), "Parsed payload does not have a payload_string."); if (p.getUsesCallbackServer().getValue()) { checkArgument( p.getPayloadString().getValue().contains("$TSUNAMI_PAYLOAD_TOKEN_URL"), "Parsed payload uses callback server but $TSUNAMI_PAYLOAD_TOKEN_URL not found in" + " payload_string."); } else { checkArgument( p.getValidationType() != PayloadValidationType.VALIDATION_TYPE_UNSPECIFIED, "Parsed payload has no validation_type and does not use the callback server."); if (p.getValidationType() == PayloadValidationType.VALIDATION_REGEX) { checkArgument( p.hasValidationRegex(), "Parsed payload has no validation_regex but uses PayloadValidationType.REGEX"); } } } return ImmutableList.copyOf(payloads); }
@Test public void validatePayloads_withCallbackPayloadWithoutUrlToken_throwsException() throws IOException { PayloadDefinition p = goodCallbackDefinition.setPayloadString(StringValue.of("my payload")).build(); Throwable thrown = assertThrows( IllegalArgumentException.class, () -> module.validatePayloads(ImmutableList.of(p))); assertThat(thrown).hasMessageThat().contains("$TSUNAMI_PAYLOAD_TOKEN_URL"); }
@Override public double quantile(double p) { if (p < 0.0 || p > 1.0) { throw new IllegalArgumentException("Invalid p: " + p); } return 2 * Gamma.inverseRegularizedIncompleteGamma(0.5 * nu, p); }
@Test public void testQuantile() { System.out.println("quantile"); ChiSquareDistribution instance = new ChiSquareDistribution(20); instance.rand(); assertEquals(0.0, instance.quantile(0), 1E-7); assertEquals(12.44261, instance.quantile(0.1), 1E-5); assertEquals(14.57844, instance.quantile(0.2), 1E-5); assertEquals(16.26586, instance.quantile(0.3), 1E-5); assertEquals(19.33743, instance.quantile(0.5), 1E-5); assertEquals(28.41198, instance.quantile(0.9), 1E-5); }
@Override public OptimizeTableStatement getSqlStatement() { return (OptimizeTableStatement) super.getSqlStatement(); }
@Test void assertNewInstance() { MySQLOptimizeTableStatement sqlStatement = new MySQLOptimizeTableStatement(); sqlStatement.getTables().add(new SimpleTableSegment(new TableNameSegment(0, 0, new IdentifierValue("tbl_1")))); OptimizeTableStatementContext actual = new OptimizeTableStatementContext(sqlStatement, DefaultDatabase.LOGIC_NAME); assertThat(actual, instanceOf(CommonSQLStatementContext.class)); assertThat(actual.getSqlStatement(), is(sqlStatement)); assertThat(actual.getTablesContext().getSimpleTables().stream().map(each -> each.getTableName().getIdentifier().getValue()).collect(Collectors.toList()), is(Collections.singletonList("tbl_1"))); }
@Override protected Future<KafkaMirrorMakerStatus> createOrUpdate(Reconciliation reconciliation, KafkaMirrorMaker assemblyResource) { String namespace = reconciliation.namespace(); KafkaMirrorMakerCluster mirror; KafkaMirrorMakerStatus kafkaMirrorMakerStatus = new KafkaMirrorMakerStatus(); try { mirror = KafkaMirrorMakerCluster.fromCrd(reconciliation, assemblyResource, versions, sharedEnvironmentProvider); } catch (Exception e) { LOGGER.warnCr(reconciliation, e); StatusUtils.setStatusConditionAndObservedGeneration(assemblyResource, kafkaMirrorMakerStatus, e); return Future.failedFuture(new ReconciliationException(kafkaMirrorMakerStatus, e)); } Map<String, String> annotations = new HashMap<>(1); KafkaClientAuthentication authConsumer = assemblyResource.getSpec().getConsumer().getAuthentication(); List<CertSecretSource> trustedCertificatesConsumer = assemblyResource.getSpec().getConsumer().getTls() == null ? Collections.emptyList() : assemblyResource.getSpec().getConsumer().getTls().getTrustedCertificates(); KafkaClientAuthentication authProducer = assemblyResource.getSpec().getProducer().getAuthentication(); List<CertSecretSource> trustedCertificatesProducer = assemblyResource.getSpec().getProducer().getTls() == null ? Collections.emptyList() : assemblyResource.getSpec().getProducer().getTls().getTrustedCertificates(); Promise<KafkaMirrorMakerStatus> createOrUpdatePromise = Promise.promise(); boolean mirrorHasZeroReplicas = mirror.getReplicas() == 0; LOGGER.debugCr(reconciliation, "Updating Kafka Mirror Maker cluster"); mirrorMakerServiceAccount(reconciliation, namespace, mirror) .compose(i -> deploymentOperations.scaleDown(reconciliation, namespace, mirror.getComponentName(), mirror.getReplicas(), operationTimeoutMs)) .compose(i -> MetricsAndLoggingUtils.metricsAndLogging(reconciliation, configMapOperations, mirror.logging(), mirror.metrics())) .compose(metricsAndLoggingCm -> { ConfigMap logAndMetricsConfigMap = mirror.generateMetricsAndLogConfigMap(metricsAndLoggingCm); annotations.put(Annotations.ANNO_STRIMZI_LOGGING_HASH, Util.hashStub(logAndMetricsConfigMap.getData().get(mirror.logging().configMapKey()))); return configMapOperations.reconcile(reconciliation, namespace, KafkaMirrorMakerResources.metricsAndLogConfigMapName(reconciliation.name()), logAndMetricsConfigMap); }) .compose(i -> podDisruptionBudgetOperator.reconcile(reconciliation, namespace, mirror.getComponentName(), mirror.generatePodDisruptionBudget())) .compose(i -> Future.join(VertxUtil.authTlsHash(secretOperations, namespace, authConsumer, trustedCertificatesConsumer), VertxUtil.authTlsHash(secretOperations, namespace, authProducer, trustedCertificatesProducer))) .compose(hashFut -> { if (hashFut != null) { annotations.put(Annotations.ANNO_STRIMZI_AUTH_HASH, Integer.toString((int) hashFut.resultAt(0) + (int) hashFut.resultAt(1))); } return Future.succeededFuture(); }) .compose(i -> deploymentOperations.reconcile(reconciliation, namespace, mirror.getComponentName(), mirror.generateDeployment(annotations, pfa.isOpenshift(), imagePullPolicy, imagePullSecrets))) .compose(i -> deploymentOperations.scaleUp(reconciliation, namespace, mirror.getComponentName(), mirror.getReplicas(), operationTimeoutMs)) .compose(i -> deploymentOperations.waitForObserved(reconciliation, namespace, mirror.getComponentName(), 1_000, operationTimeoutMs)) .compose(i -> mirrorHasZeroReplicas ? Future.succeededFuture() : deploymentOperations.readiness(reconciliation, namespace, mirror.getComponentName(), 1_000, operationTimeoutMs)) .onComplete(reconciliationResult -> { StatusUtils.setStatusConditionAndObservedGeneration(assemblyResource, kafkaMirrorMakerStatus, reconciliationResult.cause()); // Add warning about Mirror Maker 1 being deprecated and removed soon LOGGER.warnCr(reconciliation, "Mirror Maker 1 is deprecated and will be removed in Apache Kafka 4.0.0. Please migrate to Mirror Maker 2."); StatusUtils.addConditionsToStatus(kafkaMirrorMakerStatus, Set.of(StatusUtils.buildWarningCondition("MirrorMaker1Deprecation", "Mirror Maker 1 is deprecated and will be removed in Apache Kafka 4.0.0. Please migrate to Mirror Maker 2."))); kafkaMirrorMakerStatus.setReplicas(mirror.getReplicas()); kafkaMirrorMakerStatus.setLabelSelector(mirror.getSelectorLabels().toSelectorString()); if (reconciliationResult.succeeded()) { createOrUpdatePromise.complete(kafkaMirrorMakerStatus); } else { createOrUpdatePromise.fail(new ReconciliationException(kafkaMirrorMakerStatus, reconciliationResult.cause())); } } ); return createOrUpdatePromise.future(); }
@Test public void testUpdateCluster(VertxTestContext context) { ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true); CrdOperator<KubernetesClient, KafkaMirrorMaker, KafkaMirrorMakerList> mockMirrorOps = supplier.mirrorMakerOperator; DeploymentOperator mockDcOps = supplier.deploymentOperations; PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator; ConfigMapOperator mockCmOps = supplier.configMapOperations; String kmmName = "foo"; String kmmNamespace = "test"; KafkaMirrorMakerConsumerSpec consumer = new KafkaMirrorMakerConsumerSpecBuilder() .withBootstrapServers(consumerBootstrapServers) .withGroupId(groupId) .withNumStreams(numStreams) .build(); KafkaMirrorMakerProducerSpec producer = new KafkaMirrorMakerProducerSpecBuilder() .withBootstrapServers(producerBootstrapServers) .build(); KafkaMirrorMaker kmm = ResourceUtils.createKafkaMirrorMaker(kmmNamespace, kmmName, image, producer, consumer, include); KafkaMirrorMakerCluster mirror = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm, VERSIONS, SHARED_ENV_PROVIDER); kmm.getSpec().setImage("some/different:image"); // Change the image to generate some diff when(mockMirrorOps.get(kmmNamespace, kmmName)).thenReturn(kmm); when(mockMirrorOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm)); when(mockMirrorOps.updateStatusAsync(any(), any(KafkaMirrorMaker.class))).thenReturn(Future.succeededFuture()); when(mockDcOps.get(kmmNamespace, mirror.getComponentName())).thenReturn(mirror.generateDeployment(new HashMap<>(), true, null, null)); when(mockDcOps.readiness(any(), eq(kmmNamespace), eq(mirror.getComponentName()), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); ArgumentCaptor<String> dcNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<Deployment> dcCaptor = ArgumentCaptor.forClass(Deployment.class); when(mockDcOps.reconcile(any(), eq(kmmNamespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<String> dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<Integer> dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class); when(mockDcOps.scaleUp(any(), eq(kmmNamespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture(), anyLong())).thenReturn(Future.succeededFuture()); ArgumentCaptor<String> dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<Integer> dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class); when(mockDcOps.scaleDown(any(), eq(kmmNamespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture(), anyLong())).thenReturn(Future.succeededFuture()); ArgumentCaptor<PodDisruptionBudget> pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); // Mock CM get when(mockMirrorOps.get(kmmNamespace, kmmName)).thenReturn(kmm); ConfigMap metricsCm = new ConfigMapBuilder().withNewMetadata() .withName(KafkaMirrorMakerResources.metricsAndLogConfigMapName(kmmName)) .withNamespace(kmmNamespace) .endMetadata() .withData(Collections.singletonMap(MetricsModel.CONFIG_MAP_KEY, METRICS_CONFIG)) .build(); when(mockCmOps.get(kmmNamespace, KafkaMirrorMakerResources.metricsAndLogConfigMapName(kmmName))).thenReturn(metricsCm); // Mock CM patch Set<String> metricsCms = TestUtils.set(); doAnswer(invocation -> { metricsCms.add(invocation.getArgument(1)); return Future.succeededFuture(); }).when(mockCmOps).reconcile(any(), eq(kmmNamespace), anyString(), any()); KafkaMirrorMakerAssemblyOperator ops = new KafkaMirrorMakerAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), new MockCertManager(), new PasswordGenerator(10, "a", "a"), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS)); Checkpoint async = context.checkpoint(); ops.createOrUpdate(new Reconciliation("test-trigger", KafkaMirrorMaker.RESOURCE_KIND, kmmNamespace, kmmName), kmm) .onComplete(context.succeeding(v -> context.verify(() -> { KafkaMirrorMakerCluster compareTo = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm, VERSIONS, SHARED_ENV_PROVIDER); // Verify Deployment List<Deployment> capturedDc = dcCaptor.getAllValues(); assertThat(capturedDc, hasSize(1)); Deployment dc = capturedDc.get(0); assertThat(dc.getMetadata().getName(), is(compareTo.getComponentName())); Map<String, String> annotations = new HashMap<>(); annotations.put(Annotations.ANNO_STRIMZI_LOGGING_HASH, "e697cf66"); annotations.put(Annotations.ANNO_STRIMZI_AUTH_HASH, "0"); assertThat("Deployments are not equal", dc, is(compareTo.generateDeployment(annotations, true, null, null))); // Verify PodDisruptionBudget List<PodDisruptionBudget> capturedPdb = pdbCaptor.getAllValues(); assertThat(capturedPdb, hasSize(1)); PodDisruptionBudget pdb = capturedPdb.get(0); assertThat(pdb.getMetadata().getName(), is(compareTo.getComponentName())); assertThat("PodDisruptionBudgets are not equal", pdb, is(compareTo.generatePodDisruptionBudget())); // Verify scaleDown / scaleUp were not called assertThat(dcScaleDownNameCaptor.getAllValues(), hasSize(1)); assertThat(dcScaleUpNameCaptor.getAllValues(), hasSize(1)); // No metrics config => no CMs created verify(mockCmOps, never()).createOrUpdate(any(), any()); async.flag(); }))); }
public static HttpServletResponse createRumResponseWrapper(HttpServletRequest httpRequest, HttpServletResponse httpResponse, String requestName) { if (HtmlInjectorServletResponseWrapper.acceptsRequest(httpRequest)) { final HtmlToInject htmlToInject = new RumInjector(httpRequest, requestName); return new HtmlInjectorServletResponseWrapper(httpRequest, httpResponse, htmlToInject); } return httpResponse; }
@Test public void testCreateRumResponseWrapper() throws IOException { final String requestName = "test GET"; final HttpServletRequest httpRequest = createNiceMock(HttpServletRequest.class); final HttpServletResponse httpResponse = createNiceMock(HttpServletResponse.class); expect(httpRequest.getHeader("accept")).andReturn(null); final HttpServletResponse result = createRumResponseWrapper(httpRequest, httpResponse, requestName); assertFalse("createRumResponseWrapper", result instanceof HtmlInjectorServletResponseWrapper); final HttpServletRequest httpRequest2 = createNiceMock(HttpServletRequest.class); final HttpServletResponse httpResponse2 = createNiceMock(HttpServletResponse.class); expect(httpRequest2.getHeader("accept")).andReturn("text/xml"); final HttpServletResponse result2 = createRumResponseWrapper(httpRequest2, httpResponse2, requestName); assertFalse("createRumResponseWrapper", result2 instanceof HtmlInjectorServletResponseWrapper); final HttpServletRequest httpRequest3 = createNiceMock(HttpServletRequest.class); final HttpServletResponse httpResponse3 = createNiceMock(HttpServletResponse.class); expect(httpRequest3.getHeader("accept")).andReturn("text/html"); expect(httpRequest3.getAttribute("javamelody.injectorWrapped")).andReturn(Boolean.TRUE); final HttpServletResponse result3 = createRumResponseWrapper(httpRequest3, httpResponse3, requestName); assertFalse("createRumResponseWrapper", result3 instanceof HtmlInjectorServletResponseWrapper); final HttpServletRequest httpRequest4 = createNiceMock(HttpServletRequest.class); final HttpServletResponse httpResponse4 = createNiceMock(HttpServletResponse.class); expect(httpRequest4.getHeader("accept")).andReturn("text/html").anyTimes(); expect(httpResponse4.getContentType()).andReturn("text/xml").anyTimes(); final HttpServletResponse result4 = createRumResponseWrapper(httpRequest4, httpResponse4, requestName); assertTrue("createRumResponseWrapper", result4 instanceof HtmlInjectorServletResponseWrapper); assertFalse("createRumResponseWrapper", result4.getOutputStream() instanceof HtmlInjectorResponseStream); final HttpServletRequest httpRequest5 = createNiceMock(HttpServletRequest.class); final HttpServletResponse httpResponse5 = createNiceMock(HttpServletResponse.class); expect(httpRequest5.getHeader("accept")).andReturn("text/html").anyTimes(); expect(httpResponse5.getContentType()).andReturn(null).anyTimes(); final HttpServletResponse result5 = createRumResponseWrapper(httpRequest5, httpResponse5, requestName); assertTrue("createRumResponseWrapper", result5 instanceof HtmlInjectorServletResponseWrapper); assertTrue("createRumResponseWrapper", result5.getOutputStream() instanceof HtmlInjectorResponseStream); final HttpServletRequest httpRequest6 = createNiceMock(HttpServletRequest.class); final HttpServletResponse httpResponse6 = createNiceMock(HttpServletResponse.class); expect(httpRequest6.getHeader("accept")).andReturn("text/html").anyTimes(); expect(httpResponse6.getContentType()).andReturn("text/html").anyTimes(); final HttpServletResponse result6 = createRumResponseWrapper(httpRequest6, httpResponse6, requestName); final ServletOutputStream outputStream = result6.getOutputStream(); outputStream.write(' '); outputStream.write("<!-- begin test -->".getBytes(StandardCharsets.UTF_8)); final String htmlContent = "<html><body>test</body></html>"; outputStream.write(htmlContent.getBytes(StandardCharsets.UTF_8)); result6.setContentType("text/html"); outputStream.write("<!-- end test -->".getBytes(StandardCharsets.UTF_8)); assertTrue("createRumResponseWrapper", result6 instanceof HtmlInjectorServletResponseWrapper); assertTrue("createRumResponseWrapper", result6.getOutputStream() instanceof HtmlInjectorResponseStream); final HttpServletRequest httpRequest7 = createNiceMock(HttpServletRequest.class); final HttpServletResponse httpResponse7 = createNiceMock(HttpServletResponse.class); expect(httpRequest7.getHeader("accept")).andReturn("text/html").anyTimes(); expect(httpResponse7.getContentType()).andReturn("text/html").anyTimes(); final HttpServletResponse result7 = createRumResponseWrapper(httpRequest7, httpResponse7, "//test/test GET"); result7.getOutputStream().write(htmlContent.getBytes(StandardCharsets.UTF_8)); result7.setContentType("text/html"); assertTrue("createRumResponseWrapper", result7 instanceof HtmlInjectorServletResponseWrapper); assertTrue("createRumResponseWrapper", result7.getOutputStream() instanceof HtmlInjectorResponseStream); final HttpServletRequest httpRequest8 = createNiceMock(HttpServletRequest.class); final HttpServletResponse httpResponse8 = createNiceMock(HttpServletResponse.class); expect(httpRequest8.getHeader("accept")).andReturn("text/html").anyTimes(); expect(httpResponse8.getContentType()).andReturn("text/html").anyTimes(); final HttpServletResponse result8 = createRumResponseWrapper(httpRequest8, httpResponse8, requestName); result8.setContentType("text/xml"); ((HtmlInjectorResponseStream) result8.getOutputStream()).cancelInjection(); result8.getOutputStream() .write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?><web-app></<web-app>" .getBytes(StandardCharsets.UTF_8)); assertTrue("createRumResponseWrapper", result8 instanceof HtmlInjectorServletResponseWrapper); assertTrue("createRumResponseWrapper", result8.getOutputStream() instanceof HtmlInjectorResponseStream); }
public static SchemaAndValue parseString(String value) { if (value == null) { return NULL_SCHEMA_AND_VALUE; } if (value.isEmpty()) { return new SchemaAndValue(Schema.STRING_SCHEMA, value); } ValueParser parser = new ValueParser(new Parser(value)); return parser.parse(false); }
@Test public void shouldParseStringListWithExtraDelimitersAndReturnString() { String str = "[1, 2, 3,,,]"; SchemaAndValue result = Values.parseString(str); assertEquals(Type.STRING, result.schema().type()); assertEquals(str, result.value()); }
private static GuardedByExpression bind(JCTree.JCExpression exp, BinderContext context) { GuardedByExpression expr = BINDER.visit(exp, context); checkGuardedBy(expr != null, String.valueOf(exp)); checkGuardedBy(expr.kind() != Kind.TYPE_LITERAL, "Raw type literal: %s", exp); return expr; }
@Test public void otherClass() { assertThat( bind( "Test", "Other.lock", forSourceLines( "threadsafety/Test.java", "package threadsafety;", "class Other {", " static final Object lock = new Object();", "}", "class Test {", "}"))) .isEqualTo("(SELECT (TYPE_LITERAL threadsafety.Other) lock)"); }
public ProcResult fetchResultByFilter(HashMap<String, Expr> filter, ArrayList<OrderByPair> orderByPairs, LimitElement limitElement) throws AnalysisException { Preconditions.checkNotNull(db); Preconditions.checkNotNull(schemaChangeHandler); List<List<Comparable>> schemaChangeJobInfos = getOptimizeJobInfos(); //where List<List<Comparable>> jobInfos; if (filter == null || filter.size() == 0) { jobInfos = schemaChangeJobInfos; } else { jobInfos = Lists.newArrayList(); for (List<Comparable> infoStr : schemaChangeJobInfos) { if (infoStr.size() != TITLE_NAMES.size()) { LOG.warn("SchemaChangeJobInfos.size() " + schemaChangeJobInfos.size() + " not equal TITLE_NAMES.size() " + TITLE_NAMES.size()); continue; } boolean isNeed = true; for (int i = 0; i < infoStr.size(); i++) { isNeed = filterResult(TITLE_NAMES.get(i), infoStr.get(i), filter); if (!isNeed) { break; } } if (isNeed) { jobInfos.add(infoStr); } } } // order by if (orderByPairs != null) { ListComparator<List<Comparable>> comparator = null; OrderByPair[] orderByPairArr = new OrderByPair[orderByPairs.size()]; comparator = new ListComparator<List<Comparable>>(orderByPairs.toArray(orderByPairArr)); Collections.sort(jobInfos, comparator); } //limit if (limitElement != null && limitElement.hasLimit()) { int beginIndex = (int) limitElement.getOffset(); int endIndex = (int) (beginIndex + limitElement.getLimit()); if (endIndex > jobInfos.size()) { endIndex = jobInfos.size(); } jobInfos = jobInfos.subList(beginIndex, endIndex); } BaseProcResult result = new BaseProcResult(); result.setNames(TITLE_NAMES); for (List<Comparable> jobInfo : jobInfos) { List<String> oneResult = new ArrayList<String>(jobInfos.size()); for (Comparable column : jobInfo) { oneResult.add(column.toString()); } result.addRow(oneResult); } return result; }
@Test public void testFetchResultByFilterNull() throws AnalysisException { BaseProcResult result = (BaseProcResult) optimizeProcDir.fetchResultByFilter(null, null, null); List<List<String>> rows = result.getRows(); List<String> list1 = rows.get(0); Assert.assertEquals(list1.size(), OptimizeProcDir.TITLE_NAMES.size()); // JobId Assert.assertEquals("1", list1.get(0)); // TableName Assert.assertEquals("tb1", list1.get(1)); // CreateTime Assert.assertEquals("2020-01-01", list1.get(2)); // FinishTime Assert.assertEquals("2020-01-01", list1.get(3)); // Operation Assert.assertEquals("ALTER", list1.get(4)); // TransactionId Assert.assertEquals("0", list1.get(5)); // State Assert.assertEquals("FINISHED", list1.get(6)); // Msg Assert.assertEquals("", list1.get(7)); // Progress Assert.assertEquals("100", list1.get(8)); // Timeout Assert.assertEquals("10000", list1.get(9)); List<String> list2 = rows.get(1); Assert.assertEquals(list2.size(), OptimizeProcDir.TITLE_NAMES.size()); // JobId Assert.assertEquals("1", list2.get(0)); // TableName Assert.assertEquals("tb1", list2.get(1)); // CreateTime Assert.assertEquals("2020-01-01", list2.get(2)); // FinishTime Assert.assertEquals("2020-01-01", list2.get(3)); // Operation Assert.assertEquals("ALTER", list2.get(4)); // TransactionId Assert.assertEquals("0", list2.get(5)); // State Assert.assertEquals("FINISHED", list2.get(6)); // Msg Assert.assertEquals("", list2.get(7)); // Progress Assert.assertEquals("100", list2.get(8)); // Timeout Assert.assertEquals("10000", list2.get(9)); }
@Override public void handleRequest(RestRequest request, RequestContext requestContext, final Callback<RestResponse> callback) { if (HttpMethod.POST != HttpMethod.valueOf(request.getMethod())) { _log.error("POST is expected, but " + request.getMethod() + " received"); callback.onError(RestException.forError(HttpStatus.S_405_METHOD_NOT_ALLOWED.getCode(), "Invalid method")); return; } // Disable server-side latency instrumentation for multiplexed requests requestContext.putLocalAttr(TimingContextUtil.TIMINGS_DISABLED_KEY_NAME, true); IndividualRequestMap individualRequests; try { individualRequests = extractIndividualRequests(request); if (_multiplexerSingletonFilter != null) { individualRequests = _multiplexerSingletonFilter.filterRequests(individualRequests); } } catch (RestException e) { _log.error("Invalid multiplexed request", e); callback.onError(e); return; } catch (Exception e) { _log.error("Invalid multiplexed request", e); callback.onError(RestException.forError(HttpStatus.S_400_BAD_REQUEST.getCode(), e)); return; } // prepare the map of individual responses to be collected final IndividualResponseMap individualResponses = new IndividualResponseMap(individualRequests.size()); final Map<String, HttpCookie> responseCookies = new HashMap<>(); // all tasks are Void and side effect based, that will be useful when we add streaming Task<?> requestProcessingTask = createParallelRequestsTask(request, requestContext, individualRequests, individualResponses, responseCookies); Task<Void> responseAggregationTask = Task.action("send aggregated response", () -> { RestResponse aggregatedResponse = aggregateResponses(individualResponses, responseCookies); callback.onSuccess(aggregatedResponse); } ); _engine.run(requestProcessingTask.andThen(responseAggregationTask), MUX_PLAN_CLASS); }
@Test(dataProvider = "multiplexerConfigurations") public void testHandleSingleRequest(MultiplexerRunMode multiplexerRunMode) throws Exception { SynchronousRequestHandler mockHandler = createMockHandler(); MultiplexedRequestHandlerImpl multiplexer = createMultiplexer(mockHandler, multiplexerRunMode); RequestContext requestContext = new RequestContext(); RestRequest request = fakeMuxRestRequest(ImmutableMap.of("0", fakeIndRequest(FOO_URL))); // set expectations RestRequest individualRestRequest = fakeIndRestRequest(FOO_URL); RestResponse individualRestResponse = fakeIndRestResponse(FOO_ENTITY); expect(mockHandler.handleRequestSync(individualRestRequest, requestContext)).andReturn(individualRestResponse); // switch into replay mode replay(mockHandler); FutureCallback<RestResponse> callback = new FutureCallback<>(); multiplexer.handleRequest(request, requestContext, callback); RestResponse muxRestResponse = callback.get(); RestResponse expectedMuxRestResponse = fakeMuxRestResponse(ImmutableMap.of(0, fakeIndResponse(FOO_JSON_BODY))); assertEquals(muxRestResponse, expectedMuxRestResponse); verify(mockHandler); }
public abstract SubClusterId getApplicationHomeSubCluster(ApplicationId appId) throws Exception;
@Test public void testGetHomeSubClusterForApp() throws YarnException { for (int i = 0; i < numApps; i++) { ApplicationId appId = ApplicationId.newInstance(clusterTs, i); SubClusterId expectedSC = stateStoreTestUtil.queryApplicationHomeSC(appId); SubClusterId cachedPC = facade.getApplicationHomeSubCluster(appId); assertEquals(expectedSC, cachedPC); } }
public static double getUTCTimestampWithMilliseconds() { return getUTCTimestampWithMilliseconds(System.currentTimeMillis()); }
@Test public void testGetUTCTimestampWithMilliseconds() { assertTrue(Tools.getUTCTimestampWithMilliseconds() > 0.0d); assertTrue(Tools.getUTCTimestampWithMilliseconds(Instant.now().toEpochMilli()) > 0.0d); }
public static String getAddress(ECKeyPair ecKeyPair) { return getAddress(ecKeyPair.getPublicKey()); }
@Test public void testGetAddressZeroPaddedAddress() { String publicKey = "0xa1b31be4d58a7ddd24b135db0da56a90fb5382077ae26b250e1dc9cd6232ce22" + "70f4c995428bc76aa78e522316e95d7834d725efc9ca754d043233af6ca90113"; assertEquals(Keys.getAddress(publicKey), ("01c52b08330e05d731e38c856c1043288f7d9744")); }
public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { if (stream == null) { throw new NullPointerException("null stream"); } Throwable t; boolean alive = false; ForkClient client = acquireClient(); try { ContentHandler tee = (handler instanceof AbstractRecursiveParserWrapperHandler) ? handler : new TeeContentHandler(handler, new MetadataContentHandler(metadata)); t = client.call("parse", stream, tee, metadata, context); alive = true; } catch (TikaException te) { // Problem occurred on our side alive = true; throw te; } catch (IOException e) { // Problem occurred on the other side throw new TikaException("Failed to communicate with a forked parser process." + " The process has most likely crashed due to some error" + " like running out of memory. A new process will be" + " started for the next parsing request.", e); } finally { releaseClient(client, alive); } if (t instanceof IOException) { throw (IOException) t; } else if (t instanceof SAXException) { throw (SAXException) t; } else if (t instanceof TikaException) { throw (TikaException) t; } else if (t != null) { throw new TikaException("Unexpected error in forked server process", t); } }
@Test public void testForkParserDoesntPreventShutdown() throws Exception { ExecutorService service = Executors.newFixedThreadPool(1); CountDownLatch cdl = new CountDownLatch(1); service.submit(() -> { try (ForkParser parser = new ForkParser(ForkParserTest.class.getClassLoader(), new ForkTestParser.ForkTestParserWaiting())) { Metadata metadata = new Metadata(); ContentHandler output = new BodyContentHandler(); InputStream stream = new ByteArrayInputStream(new byte[0]); ParseContext context = new ParseContext(); cdl.countDown(); parser.parse(stream, output, metadata, context); // Don't care about output not planning to get this far } catch (IOException | SAXException | TikaException e) { throw new RuntimeException(e); } }); // Wait to make sure submitted runnable is actually running boolean await = cdl.await(1, TimeUnit.SECONDS); if (!await) { // This should never happen but be thorough fail("Future never ran so cannot test cancellation"); } // Parse is being called try and shutdown Instant requestShutdown = Instant.now(); service.shutdownNow(); service.awaitTermination(15, TimeUnit.SECONDS); long secondsSinceShutdown = ChronoUnit.SECONDS.between(requestShutdown, Instant.now()); assertTrue(secondsSinceShutdown < 5, "Should have shutdown the service in less than 5 seconds"); }
public void addFirst(T event) throws RejectedExecutionException { lock.lock(); try { if (closed) throw new RejectedExecutionException("Can't accept an event because the accumulator is closed."); K key = event.key(); Deque<T> queue = queues.get(key); if (queue == null) { queue = new LinkedList<>(); queues.put(key, queue); if (!inflightKeys.contains(key)) { addAvailableKey(key); } } queue.addFirst(event); size++; } finally { lock.unlock(); } }
@Test public void testAddFirst() { EventAccumulator<Integer, MockEvent> accumulator = new EventAccumulator<>(); List<MockEvent> events = Arrays.asList( new MockEvent(1, 0), new MockEvent(1, 1), new MockEvent(1, 2) ); events.forEach(accumulator::addFirst); assertEquals(3, accumulator.size()); List<MockEvent> polledEvents = new ArrayList<>(3); for (int i = 0; i < events.size(); i++) { MockEvent event = accumulator.poll(); assertNotNull(event); polledEvents.add(event); assertEquals(events.size() - 1 - i, accumulator.size()); accumulator.done(event); } Collections.reverse(events); assertEquals(events, polledEvents); accumulator.close(); }
static Map<Integer, List<Integer>> parseReplicaAssignment(String replicaAssignmentList) { String[] partitionList = replicaAssignmentList.split(","); Map<Integer, List<Integer>> ret = new LinkedHashMap<>(); for (int i = 0; i < partitionList.length; i++) { List<Integer> brokerList = Arrays.stream(partitionList[i].split(":")) .map(String::trim) .mapToInt(Integer::parseInt) .boxed() .collect(Collectors.toList()); Collection<Integer> duplicateBrokers = ToolsUtils.duplicates(brokerList); if (!duplicateBrokers.isEmpty()) { throw new AdminCommandFailedException("Partition replica lists may not contain duplicate entries: " + duplicateBrokers.stream() .map(Object::toString) .collect(Collectors.joining(",")) ); } ret.put(i, brokerList); if (ret.get(i).size() != ret.get(0).size()) { throw new AdminOperationException("Partition " + i + " has different replication factor: " + brokerList); } } return ret; }
@Test public void testParseAssignmentPartitionsOfDifferentSize() { assertThrows(AdminOperationException.class, () -> TopicCommand.parseReplicaAssignment("5:4:3,2:1")); }
@PUT @Path("{id}/add_router_interface") @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) public Response addRouterInterface(@PathParam("id") String id, InputStream input) throws IOException { log.trace(String.format(MESSAGE_ROUTER_IFACE, "UPDATE " + id)); String inputStr = IOUtils.toString(input, REST_UTF8); if (!haService.isActive() && !DEFAULT_ACTIVE_IP_ADDRESS.equals(haService.getActiveIp())) { return syncPut(haService, ROUTERS, "add_router_interface", id, inputStr); } final NeutronRouterInterface osRouterIface = (NeutronRouterInterface) jsonToModelEntity(inputStr, NeutronRouterInterface.class); adminService.addRouterInterface(osRouterIface); return status(Response.Status.OK).build(); }
@Test public void testAddRouterInterfaceWithAdditionOperation() { expect(mockOpenstackHaService.isActive()).andReturn(true).anyTimes(); replay(mockOpenstackHaService); mockOpenstackRouterAdminService.addRouterInterface(anyObject()); replay(mockOpenstackRouterAdminService); final WebTarget wt = target(); InputStream jsonStream = OpenstackRouterWebResourceTest.class .getResourceAsStream("openstack-router-interface.json"); Response response = wt.path(PATH + "/f49a1319-423a-4ee6-ba54-1d95a4f6cc68/add_router_interface") .request(MediaType.APPLICATION_JSON_TYPE) .put(Entity.json(jsonStream)); final int status = response.getStatus(); assertThat(status, is(200)); verify(mockOpenstackRouterAdminService); }
public boolean matches(final String keyWord) { return p.matcher(keyWord).matches(); }
@Test public void matches() { Assertions.assertTrue(keyWordMatch.matches("name")); Assertions.assertTrue(keyWordMatch.matches("test")); Assertions.assertFalse(keyWordMatch.matches("dsaer")); }
public static ExpressionEvaluator compileExpression( String expression, List<String> argumentNames, List<Class<?>> argumentClasses, Class<?> returnClass) { ExpressionEvaluator expressionEvaluator = new ExpressionEvaluator(); expressionEvaluator.setParameters( argumentNames.toArray(new String[0]), argumentClasses.toArray(new Class[0])); expressionEvaluator.setExpressionType(returnClass); try { expressionEvaluator.cook(expression); return expressionEvaluator; } catch (CompileException e) { throw new InvalidProgramException( "Expression cannot be compiled. This is a bug. Please file an issue.\nExpression: " + expression, e); } }
@Test public void testJaninoStringCompare() throws InvocationTargetException { String expression = "String.valueOf(\"metadata_table\").equals(__table_name__)"; List<String> columnNames = Arrays.asList("__table_name__"); List<Class<?>> paramTypes = Arrays.asList(String.class); List<Object> params = Arrays.asList("metadata_table"); ExpressionEvaluator expressionEvaluator = JaninoCompiler.compileExpression( expression, columnNames, paramTypes, Boolean.class); Object evaluate = expressionEvaluator.evaluate(params.toArray()); Assert.assertEquals(true, evaluate); }
public static BundleDistribution bundleProcessingThreadDistribution( String shortId, MetricName name) { return new BundleProcessingThreadDistribution(shortId, name); }
@Test public void testAccurateBundleDistributionReportsValueFirstTimeWithoutMutations() throws Exception { Map<String, ByteString> report = new HashMap<>(); BundleDistribution bundleDistribution = Metrics.bundleProcessingThreadDistribution(TEST_ID, TEST_NAME); bundleDistribution.updateIntermediateMonitoringData(report); assertEquals( report, Collections.singletonMap( TEST_ID, MonitoringInfoEncodings.encodeInt64Distribution(DistributionData.EMPTY))); report.clear(); // Test that a reported value isn't reported again on final update bundleDistribution.updateFinalMonitoringData(report); assertEquals(report, Collections.emptyMap()); // Test that the value is not reported after reset if no mutations after being // reported the first time. bundleDistribution.reset(); bundleDistribution.updateFinalMonitoringData(report); assertEquals(report, Collections.emptyMap()); }
@Override public Object handle(String targetService, List<Object> invokers, Object invocation, Map<String, String> queryMap, String serviceInterface) { if (!shouldHandle(invokers)) { return invokers; } List<Object> targetInvokers; if (routerConfig.isUseRequestRouter()) { targetInvokers = getTargetInvokersByRequest(targetService, invokers, invocation); } else { targetInvokers = getTargetInvokersByRules(invokers, invocation, queryMap, targetService, serviceInterface); } return super.handle(targetService, targetInvokers, invocation, queryMap, serviceInterface); }
@Test public void testGetTargetInvokersByFlowRules() { // initialize the routing rule RuleInitializationUtils.initFlowMatchRule(); List<Object> invokers = new ArrayList<>(); ApacheInvoker<Object> invoker1 = new ApacheInvoker<>("1.0.0"); invokers.add(invoker1); ApacheInvoker<Object> invoker2 = new ApacheInvoker<>("1.0.1"); invokers.add(invoker2); Invocation invocation = new ApacheInvocation(); invocation.setAttachment("bar", "bar1"); Map<String, String> queryMap = new HashMap<>(); queryMap.put("side", "consumer"); queryMap.put("group", "fooGroup"); queryMap.put("version", "0.0.1"); queryMap.put("interface", "io.sermant.foo.FooTest"); DubboCache.INSTANCE.putApplication("io.sermant.foo.FooTest", "foo"); List<Object> targetInvokers = (List<Object>) flowRouteHandler.handle( DubboCache.INSTANCE.getApplication("io.sermant.foo.FooTest") , invokers, invocation, queryMap, "io.sermant.foo.FooTest"); Assert.assertEquals(1, targetInvokers.size()); Assert.assertEquals(invoker2, targetInvokers.get(0)); ConfigCache.getLabel(RouterConstant.DUBBO_CACHE_NAME).resetRouteRule(Collections.emptyMap()); }
@Override public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException { try { if(new DeepboxFindFeature(session, fileid).find(folder)) { throw new ConflictException(folder.getAbsolute()); } final Folder upload = new Folder(); upload.setName(folder.getName()); upload.setI18n(Collections.emptyMap()); final List<Folder> body = Collections.singletonList(upload); final String deepBoxNodeId = fileid.getDeepBoxNodeId(folder.getParent()); final String boxNodeId = fileid.getBoxNodeId(folder.getParent()); final List<FolderAdded> created; if(new DeepboxPathContainerService(session).isDocuments(folder.getParent())) { created = new PathRestControllerApi(session.getClient()).addFolders1( body, deepBoxNodeId, boxNodeId ); } else { final String parentNodeId = fileid.getFileId(folder.getParent()); created = new PathRestControllerApi(session.getClient()).addFolders( body, deepBoxNodeId, boxNodeId, parentNodeId ); } final FolderAdded f = created.stream().findFirst().orElse(null); if(f != null) { fileid.cache(folder, f.getNode().getNodeId()); } return folder.withAttributes(new DeepboxAttributesFinderFeature(session, fileid).toAttributes(f.getNode())); } catch(ApiException e) { throw new DeepboxExceptionMappingService(fileid).map("Cannot create folder {0}", e, folder); } }
@Test public void testBookkeeping() throws Exception { final DeepboxIdProvider nodeid = new DeepboxIdProvider(session); final DeepboxDirectoryFeature directory = new DeepboxDirectoryFeature(session, nodeid); final Path parent = new Path("/ORG 4 - DeepBox Desktop App/ORG3:Box1/Documents/Bookkeeping", EnumSet.of(Path.Type.directory)); final Path folder = new Path(parent, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)); directory.mkdir(folder, new TransferStatus()); assertEquals(0, new DeepboxListService(session, nodeid).list(folder, new DisabledListProgressListener()).size()); new DeepboxDeleteFeature(session, nodeid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(new DeepboxFindFeature(session, nodeid).find(folder)); }
public void clearOptionsData() { getControls(); if ( optionsParameterTree != null ) { optionsParameterTree.getRootChildren().removeAll(); } }
@Test public void testClearOptionsData() throws Exception { }
@Override public int getWorkerMaxCount() { return workerThreadCount; }
@Test public void getWorkerMaxCount_returns_1_when_there_is_no_WorkerCountProvider() { assertThat(new CeConfigurationImpl(EMPTY_CONFIGURATION).getWorkerMaxCount()).isOne(); }
public Collection<String> getAllClientsSubscribeService(Service service) { return subscriberIndexes.containsKey(service) ? subscriberIndexes.get(service) : new ConcurrentHashSet<>(); }
@Test void testGetAllClientsSubscribeService() { Collection<String> allClientsSubscribeService = clientServiceIndexesManager.getAllClientsSubscribeService(service); assertNotNull(allClientsSubscribeService); assertEquals(1, allClientsSubscribeService.size()); }
@Override public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay way, IntsRef relationFlags) { String highwayValue = way.getTag("highway"); if (skipEmergency && "service".equals(highwayValue) && "emergency_access".equals(way.getTag("service"))) return; int firstIndex = way.getFirstIndex(restrictionKeys); String firstValue = firstIndex < 0 ? "" : way.getTag(restrictionKeys.get(firstIndex), ""); if (restrictedValues.contains(firstValue) && !hasTemporalRestriction(way, firstIndex, restrictionKeys)) return; if (way.hasTag("gh:barrier_edge") && way.hasTag("node_tags")) { List<Map<String, Object>> nodeTags = way.getTag("node_tags", null); Map<String, Object> firstNodeTags = nodeTags.get(0); // a barrier edge has the restriction in both nodes and the tags are the same -> get(0) firstValue = getFirstPriorityNodeTag(firstNodeTags, restrictionKeys); String barrierValue = firstNodeTags.containsKey("barrier") ? (String) firstNodeTags.get("barrier") : ""; if (restrictedValues.contains(firstValue) || barriers.contains(barrierValue) || "yes".equals(firstNodeTags.get("locked")) && !INTENDED.contains(firstValue)) return; } if (FerrySpeedCalculator.isFerry(way)) { boolean isCar = restrictionKeys.contains("motorcar"); if (INTENDED.contains(firstValue) // implied default is allowed only if foot and bicycle is not specified: || isCar && firstValue.isEmpty() && !way.hasTag("foot") && !way.hasTag("bicycle") // if hgv is allowed then smaller trucks and cars are allowed too even if not specified || isCar && way.hasTag("hgv", "yes")) { accessEnc.setBool(false, edgeId, edgeIntAccess, true); accessEnc.setBool(true, edgeId, edgeIntAccess, true); } } else { boolean isRoundabout = roundaboutEnc.getBool(false, edgeId, edgeIntAccess); boolean ignoreOneway = "no".equals(way.getFirstValue(ignoreOnewayKeys)); boolean isBwd = isBackwardOneway(way); if (!ignoreOneway && (isBwd || isRoundabout || isForwardOneway(way))) { accessEnc.setBool(isBwd, edgeId, edgeIntAccess, true); } else { accessEnc.setBool(false, edgeId, edgeIntAccess, true); accessEnc.setBool(true, edgeId, edgeIntAccess, true); } } }
@Test public void testAccess() { ReaderWay way = new ReaderWay(1); way.setTag("highway", "primary"); EdgeIntAccess edgeIntAccess = ArrayEdgeIntAccess.createFromBytes(em.getBytesForFlags()); int edgeId = 0; parser.handleWayTags(edgeId, edgeIntAccess, way, null); assertTrue(busAccessEnc.getBool(false, edgeId, edgeIntAccess)); assertTrue(busAccessEnc.getBool(true, edgeId, edgeIntAccess)); }
@Override public PluginRuntime getPluginRuntime() { return new PluginRuntime(getId()) .addInfo("executeTimeOut", executeTimeOut + "ms"); }
@Test public void testGetRuntime() { Assert.assertNotNull(plugin.getPluginRuntime()); }
@Override public int read() throws IOException { if (mPosition == mLength) { // at end of file return -1; } updateStreamIfNeeded(); int res = mUfsInStream.get().read(); if (res == -1) { return -1; } mPosition++; Metrics.BYTES_READ_FROM_UFS.inc(1); return res; }
@Test public void readOutOfBound() throws IOException, AlluxioException { AlluxioURI ufsPath = getUfsPath(); createFile(ufsPath, CHUNK_SIZE); try (FileInStream inStream = getStream(ufsPath)) { byte[] res = new byte[CHUNK_SIZE * 2]; assertEquals(CHUNK_SIZE, inStream.read(res)); assertTrue(BufferUtils.matchIncreasingByteArray(0, CHUNK_SIZE, res)); assertEquals(-1, inStream.read(res)); } }
@Nullable public static Field findPropertyField(Class<?> clazz, String fieldName) { Field field; try { field = clazz.getField(fieldName); } catch (NoSuchFieldException e) { return null; } if (!Modifier.isPublic(field.getModifiers()) || Modifier.isStatic(field.getModifiers())) { return null; } return field; }
@Test public void when_findPropertyField_private_then_returnsNull() { assertNull(findPropertyField(JavaFields.class, "privateField")); }
public List<Document> export(final String collectionName, final List<String> exportedFieldNames, final int limit, final Bson dbFilter, final List<Sort> sorts, final Subject subject) { final MongoCollection<Document> collection = mongoConnection.getMongoDatabase().getCollection(collectionName); final FindIterable<Document> resultsWithoutLimit = collection.find(Objects.requireNonNullElse(dbFilter, Filters.empty())) .projection(Projections.fields(Projections.include(exportedFieldNames))) .sort(toMongoDbSort(sorts)); final var userCanReadAllEntities = permissionsUtils.hasAllPermission(subject) || permissionsUtils.hasReadPermissionForWholeCollection(subject, collectionName); final var checkPermission = permissionsUtils.createPermissionCheck(subject, collectionName); final var documents = userCanReadAllEntities ? getFromMongo(resultsWithoutLimit, limit) : getWithInMemoryPermissionCheck(resultsWithoutLimit, limit, checkPermission); return documents.collect(Collectors.toList()); }
@Test void testExportWorksCorrectlyWithSelectivePermissions() { insertTestData(); simulateUserThatCanSeeOnlyOneDoc("0000000000000000000000c7"); final List<Document> exportedDocuments = toTest.export(TEST_COLLECTION_NAME, List.of("name"), 10, Filters.empty(), List.of(), subject); assertThat(exportedDocuments) .isNotNull() .hasSize(1) .containsExactlyInAnyOrder( new Document(Map.of("_id", "0000000000000000000000c7", "name", "Judith")) ); }
@Override public Long createPost(PostSaveReqVO createReqVO) { // 校验正确性 validatePostForCreateOrUpdate(null, createReqVO.getName(), createReqVO.getCode()); // 插入岗位 PostDO post = BeanUtils.toBean(createReqVO, PostDO.class); postMapper.insert(post); return post.getId(); }
@Test public void testValidatePost_nameDuplicateForCreate() { // mock 数据 PostDO postDO = randomPostDO(); postMapper.insert(postDO);// @Sql: 先插入出一条存在的数据 // 准备参数 PostSaveReqVO reqVO = randomPojo(PostSaveReqVO.class, // 模拟 name 重复 o -> o.setName(postDO.getName())); assertServiceException(() -> postService.createPost(reqVO), POST_NAME_DUPLICATE); }
public int calculateBufferSize(long totalBufferSizeInBytes, int totalBuffers) { checkArgument(totalBufferSizeInBytes >= 0, "Size of buffer should be non negative"); checkArgument(totalBuffers > 0, "Number of buffers should be positive"); // Since the result value is always limited by max buffer size while the instant value is // potentially unlimited. It can lead to an instant change from min to max value in case // when the instant value is significantly larger than the possible max value. // The solution is to limit the instant buffer size by twice of current buffer size in order // to have the same growth and shrink speeds. for example if the instant value is equal to 0 // and the current value is 16000 we can decrease it at maximum by 1600(suppose alfa=0.1) . // The idea is to allow increase and decrease size by the same number. So if the instant // value would be large(for example 100000) it will be possible to increase the current // value by 1600(the same as decreasing) because the limit will be 2 * currentValue = 32000. // Example of change speed: // growing = 32768, 29647, 26823, 24268, 21956, 19864 // shrinking = 19864, 21755, 23826, 26095, 28580, 31301, 32768 long desirableBufferSize = Math.min(totalBufferSizeInBytes / totalBuffers, 2L * lastBufferSize); lastBufferSize += alpha * (desirableBufferSize - lastBufferSize); return lastBufferSize = Math.max(minBufferSize, Math.min(lastBufferSize, maxBufferSize)); }
@Test void testNegativeTotalSize() { BufferSizeEMA calculator = new BufferSizeEMA(100, 200, 2); assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> calculator.calculateBufferSize(-1, 1)); }
public abstract void execute(Map<String, List<String>> parameters, String body, PrintWriter output) throws Exception;
@SuppressWarnings("deprecation") @Test void throwsExceptionWhenCallingExecuteWithoutThePostBody() throws Exception { assertThatExceptionOfType(UnsupportedOperationException.class).isThrownBy(() -> task.execute(Collections.emptyMap(), new PrintWriter(new OutputStreamWriter(System.out, UTF_8)))); }
public static Collection<String> parseTableExpressionWithoutSchema(final ShardingSphereDatabase database, final List<String> tableNames) { ShardingSphereSchema schema = database.getSchema(database.getName()); Set<String> allTableNames = null == schema ? Collections.emptySet() : new HashSet<>(schema.getAllTableNames()); return tableNames.stream().anyMatch("*"::equals) ? allTableNames : new HashSet<>(tableNames); }
@Test void assertParseTableExpressionWithoutSchema() { Map<String, ShardingSphereSchema> schemas = Collections.singletonMap("sharding_db", mockedPublicSchema()); ShardingSphereDatabase database = new ShardingSphereDatabase("sharding_db", TypedSPILoader.getService(DatabaseType.class, "FIXTURE"), null, null, schemas); List<String> schemaTables = Collections.singletonList("*"); Collection<String> actualWildcardTable = CDCSchemaTableUtils.parseTableExpressionWithoutSchema(database, schemaTables); Set<String> expectedWildcardTable = new HashSet<>(Arrays.asList("t_order", "t_order2")); assertThat(actualWildcardTable, is(expectedWildcardTable)); schemaTables = Collections.singletonList("t_order"); Collection<String> actualSingleTable = CDCSchemaTableUtils.parseTableExpressionWithoutSchema(database, schemaTables); Set<String> expectedSingleTable = new HashSet<>(Collections.singletonList("t_order")); assertThat(actualSingleTable, is(expectedSingleTable)); }
@Nullable public synchronized Beacon track(@NonNull Beacon beacon) { Beacon trackedBeacon = null; if (beacon.isMultiFrameBeacon() || beacon.getServiceUuid() != -1) { trackedBeacon = trackGattBeacon(beacon); } else { trackedBeacon = beacon; } return trackedBeacon; }
@Test public void gattBeaconFieldsGetUpdated() { Beacon beacon = getGattBeacon(); Beacon extraDataBeacon = getGattBeaconExtraData(); Beacon repeatBeacon = getGattBeacon(); repeatBeacon.setRssi(-100); ExtraDataBeaconTracker tracker = new ExtraDataBeaconTracker(); tracker.track(beacon); tracker.track(extraDataBeacon); Beacon trackedBeacon = tracker.track(repeatBeacon); assertEquals("rssi should NOT be updated", -100, trackedBeacon.getRssi()); assertEquals("extra data fields should be updated", extraDataBeacon.getDataFields(), trackedBeacon.getExtraDataFields()); }
public static TransactionWitness redeemP2WSH(Script witnessScript, TransactionSignature... signatures) { List<byte[]> pushes = new ArrayList<>(signatures.length + 2); pushes.add(new byte[] {}); for (TransactionSignature signature : signatures) pushes.add(signature.encodeToBitcoin()); pushes.add(witnessScript.program()); return TransactionWitness.of(pushes); }
@Test public void testRedeemP2WSH() throws SignatureDecodeException { ECKey.ECDSASignature ecdsaSignature1 = TransactionSignature.decodeFromDER(ByteUtils.parseHex("3045022100c3d84f7bf41c7eda3b23bbbccebde842a451c1a0aca39df706a3ff2fe78b1e0a02206e2e3c23559798b02302ad6fa5ddbbe87af5cc7d3b9f86b88588253770ab9f79")); TransactionSignature signature1 = new TransactionSignature(ecdsaSignature1, Transaction.SigHash.ALL, false); ECKey.ECDSASignature ecdsaSignature2 = TransactionSignature.decodeFromDER(ByteUtils.parseHex("3045022100fcfe4a58f2878047ef7c5889fc52a3816ad2dd218807daa3c3eafd4841ffac4d022073454df7e212742f0fee20416b418a2c1340a33eebed5583d19a61088b112832")); TransactionSignature signature2 = new TransactionSignature(ecdsaSignature2, Transaction.SigHash.ALL, false); Script witnessScript = Script.parse(ByteUtils.parseHex("522102bb65b325a986c5b15bd75e0d81cf149219597617a70995efedec6309b4600fa02103c54f073f5db9f68915019801435058c9232cb72c6528a2ca15af48eb74ca8b9a52ae")); TransactionWitness witness = TransactionWitness.redeemP2WSH(witnessScript, signature1, signature2); assertEquals(4, witness.getPushCount()); assertArrayEquals(new byte[]{}, witness.getPush(0)); assertArrayEquals(signature1.encodeToBitcoin(), witness.getPush(1)); assertArrayEquals(signature2.encodeToBitcoin(), witness.getPush(2)); assertArrayEquals(witnessScript.program(), witness.getPush(3)); }
public Concept lowestCommonAncestor(String v, String w) { Concept vnode = getConcept(v); Concept wnode = getConcept(w); return lowestCommonAncestor(vnode, wnode); }
@Test public void testLowestCommonAncestor() { System.out.println("lowestCommonAncestor"); Concept result = taxonomy.lowestCommonAncestor("A", "B"); assertEquals(a, result); result = taxonomy.lowestCommonAncestor("E", "B"); assertEquals(taxonomy.getRoot(), result); }
public Blade watchEnvChange(boolean watchEnvChange) { this.environment.set(BladeConst.ENV_KEY_APP_WATCH_ENV, watchEnvChange); return this; }
@Test public void testWatchEnvChange() { Environment environment = Blade.create().watchEnvChange(false).environment(); assertEquals(Boolean.FALSE, environment.getBooleanOrNull(ENV_KEY_APP_WATCH_ENV)); }
public CoordinatorResult<OffsetCommitResponseData, CoordinatorRecord> commitOffset( RequestContext context, OffsetCommitRequestData request ) throws ApiException { Group group = validateOffsetCommit(context, request); // In the old consumer group protocol, the offset commits maintain the session if // the group is in Stable or PreparingRebalance state. if (group.type() == Group.GroupType.CLASSIC) { ClassicGroup classicGroup = (ClassicGroup) group; if (classicGroup.isInState(ClassicGroupState.STABLE) || classicGroup.isInState(ClassicGroupState.PREPARING_REBALANCE)) { groupMetadataManager.rescheduleClassicGroupMemberHeartbeat( classicGroup, classicGroup.member(request.memberId()) ); } } final OffsetCommitResponseData response = new OffsetCommitResponseData(); final List<CoordinatorRecord> records = new ArrayList<>(); final long currentTimeMs = time.milliseconds(); final OptionalLong expireTimestampMs = expireTimestampMs(request.retentionTimeMs(), currentTimeMs); request.topics().forEach(topic -> { final OffsetCommitResponseTopic topicResponse = new OffsetCommitResponseTopic().setName(topic.name()); response.topics().add(topicResponse); topic.partitions().forEach(partition -> { if (isMetadataInvalid(partition.committedMetadata())) { topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code())); } else { log.debug("[GroupId {}] Committing offsets {} for partition {}-{} from member {} with leader epoch {}.", request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(), request.memberId(), partition.committedLeaderEpoch()); topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.NONE.code())); final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest( partition, currentTimeMs, expireTimestampMs ); records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord( request.groupId(), topic.name(), partition.partitionIndex(), offsetAndMetadata, metadataImage.features().metadataVersion() )); } }); }); if (!records.isEmpty()) { metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size()); } return new CoordinatorResult<>(records, response); }
@Test public void testConsumerGroupOffsetDeleteWithPendingTransactionalOffsets() { OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build(); ConsumerGroup group = context.groupMetadataManager.getOrMaybeCreatePersistedConsumerGroup( "foo", true ); context.commitOffset(10L, "foo", "bar", 0, 100L, 0, context.time.milliseconds()); assertFalse(group.isSubscribedToTopic("bar")); context.testOffsetDeleteWith("foo", "bar", 0, Errors.NONE); assertFalse(context.hasOffset("foo", "bar", 0)); }
static String strip(final String line) { return new Parser(line).parse(); }
@Test public void shouldCorrectHandleEscapedDoubleQuotes() { // Given: final String line = "\"this isn''t a comment -- the first quote isn''t closed\" -- comment"; final String line2 = "\"\"\"this isn''t a comment -- the first quote isn''t closed\" -- comment"; // Then: assertThat(CommentStripper.strip(line), is("\"this isn''t a comment -- the first quote isn''t closed\"")); assertThat(CommentStripper.strip(line2), is("\"\"\"this isn''t a comment -- the first quote isn''t closed\"")); }
private CompletionStage<RestResponse> putInCache(NettyRestResponse.Builder responseBuilder, AdvancedCache<Object, Object> cache, Object key, byte[] data, Long ttl, Long idleTime) { Configuration config = SecurityActions.getCacheConfiguration(cache); final Metadata metadata = CacheOperationsHelper.createMetadata(config, ttl, idleTime); responseBuilder.header("etag", calcETAG(data)); CompletionStage<Object> stage; // Indexing is still blocking - can be removed when https://issues.redhat.com/browse/ISPN-11731 is complete if (config.indexing().enabled()) { stage = CompletableFuture.supplyAsync(() -> cache.putAsync(key, data, metadata), invocationHelper.getExecutor()) .thenCompose(Function.identity()); } else { stage = cache.putAsync(key, data, metadata); } return stage.thenApply(o -> responseBuilder.build()); }
@Test public void testIntKeysTextToXMLValues() { Integer key = 12345; String keyContentType = "application/x-java-object;type=java.lang.Integer"; String value = "<foo>bar</foo>"; putInCache("default", key, keyContentType, value, TEXT_PLAIN_TYPE); RestResponse response = get("default", key, keyContentType, APPLICATION_XML_TYPE); ResponseAssertion.assertThat(response).hasReturnedText(value); }
public double[] decodeFloat8Array(final byte[] parameterBytes, final boolean isBinary) { ShardingSpherePreconditions.checkState(!isBinary, () -> new UnsupportedSQLOperationException("binary mode")); String parameterValue = new String(parameterBytes, StandardCharsets.UTF_8); Collection<String> parameterElements = decodeText(parameterValue); double[] result = new double[parameterElements.size()]; int index = 0; for (String each : parameterElements) { result[index++] = Double.parseDouble(each); } return result; }
@Test void assertParseFloat8ArrayNormalTextMode() { double[] actual = DECODER.decodeFloat8Array(FLOAT_ARRAY_STR.getBytes(), false); assertThat(actual.length, is(2)); assertThat(Double.compare(actual[0], 11.1D), is(0)); assertThat(Double.compare(actual[1], 12.1D), is(0)); }
@Override public void emit(OutboundPacket packet) { DeviceId devId = packet.sendThrough(); String scheme = devId.toString().split(":")[0]; if (!scheme.equals(this.id().scheme())) { throw new IllegalArgumentException( "Don't know how to handle Device with scheme " + scheme); } Dpid dpid = Dpid.dpid(devId.uri()); OpenFlowSwitch sw = controller.getSwitch(dpid); if (sw == null) { log.warn("Device {} isn't available?", devId); return; } OFPort inPort; if (packet.inPort() != null) { inPort = portDesc(packet.inPort()).getPortNo(); } else { inPort = OFPort.CONTROLLER; } //Ethernet eth = new Ethernet(); //eth.deserialize(packet.data().array(), 0, packet.data().array().length); OFPortDesc p = null; for (Instruction inst : packet.treatment().allInstructions()) { if (inst.type().equals(Instruction.Type.OUTPUT)) { p = portDesc(((OutputInstruction) inst).port()); OFPacketOut po = packetOut(sw, packet.data().array(), p.getPortNo(), inPort); sw.sendMsg(po); } } }
@Test(expected = IllegalArgumentException.class) public void wrongScheme() { sw.setRole(RoleState.MASTER); OutboundPacket schemeFailPkt = outPacket(DID_WRONG, TR, null); provider.emit(schemeFailPkt); assertEquals("message sent incorrectly", 0, sw.sent.size()); }