focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public Opt<T> peek(Consumer<T> action) throws NullPointerException { Objects.requireNonNull(action); if (isEmpty()) { return Opt.empty(); } action.accept(value); return this; }
@Test public void peekTest() { User user = new User(); // 相当于ifPresent的链式调用 Opt.ofNullable("hutool").peek(user::setUsername).peek(user::setNickname); assertEquals("hutool", user.getNickname()); assertEquals("hutool", user.getUsername()); // 注意,传入的lambda中,对包裹内的元素执行赋值操作并不会影响到原来的元素 String name = Opt.ofNullable("hutool").peek(username -> username = "123").peek(username -> username = "456").get(); assertEquals("hutool", name); }
@Override public void storeChanged(Store store) { var contentStore = (ContentStore) store; content = contentStore.getContent(); render(); }
@Test void testStoreChanged() { final var store = mock(ContentStore.class); when(store.getContent()).thenReturn(Content.PRODUCTS); final var view = new ContentView(); view.storeChanged(store); verify(store, times(1)).getContent(); verifyNoMoreInteractions(store); }
@Override public RFuture<Boolean> tryLockAsync(long threadId) { RFuture<Long> longRFuture = tryAcquireAsync(-1, null, threadId); CompletionStage<Boolean> f = longRFuture.thenApply(res -> res == null); return new CompletableFutureWrapper<>(f); }
@Test public void testTryLockAsync() throws InterruptedException { RLock lock = redisson.getSpinLock("lock"); lock.lock(); AtomicBoolean lockAsyncSucceed = new AtomicBoolean(); Thread thread = new Thread(() -> { RFuture<Void> booleanRFuture = lock.lockAsync(); booleanRFuture.whenComplete((res, e) -> { if (e != null) { Assertions.fail("Lock aquire failed for some reason"); } lockAsyncSucceed.set(true); }); }); thread.start(); Thread.sleep(1_000); assertThat(lockAsyncSucceed.get()).isFalse(); lock.unlock(); Thread.sleep(200); assertThat(lockAsyncSucceed.get()).isTrue(); lock.forceUnlock(); }
@Override public CompletionStage<Set<String>> getAllUsers() { LOGGER.debugOp("Searching for Users with any ACL rules"); Set<String> users = new HashSet<>(); Set<String> ignored = new HashSet<>(IGNORED_USERS.size()); Enumeration<String> keys = cache.keys(); while (keys.hasMoreElements()) { String username = KafkaUserModel.decodeUsername(keys.nextElement()); if (IGNORED_USERS.contains(username)) { if (!ignored.contains(username)) { // This info message is logged only once per reconciliation even if there are multiple rules LOGGER.infoOp("Existing ACLs for user '{}' will be ignored.", username); ignored.add(username); } } else { if (LOGGER.isDebugEnabled()) { LOGGER.debugOp("Adding user {} to Set of users with ACLs", username); } users.add(username); } } return CompletableFuture.completedFuture(users); }
@Test public void testGetAllUsers() throws ExecutionException, InterruptedException { Admin mockAdminClient = mock(AdminClient.class); ResourcePattern res1 = new ResourcePattern(ResourceType.TOPIC, "my-topic", PatternType.LITERAL); ResourcePattern res2 = new ResourcePattern(ResourceType.GROUP, "my-group", PatternType.LITERAL); KafkaPrincipal foo = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "CN=foo"); AclBinding fooAclBinding = new AclBinding(res1, new AccessControlEntry(foo.toString(), "*", org.apache.kafka.common.acl.AclOperation.READ, AclPermissionType.ALLOW)); KafkaPrincipal bar = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "CN=bar"); AclBinding barAclBinding = new AclBinding(res1, new AccessControlEntry(bar.toString(), "*", org.apache.kafka.common.acl.AclOperation.READ, AclPermissionType.ALLOW)); KafkaPrincipal baz = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "baz"); AclBinding bazAclBinding = new AclBinding(res2, new AccessControlEntry(baz.toString(), "*", org.apache.kafka.common.acl.AclOperation.READ, AclPermissionType.ALLOW)); KafkaPrincipal all = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "*"); AclBinding allAclBinding = new AclBinding(res1, new AccessControlEntry(all.toString(), "*", org.apache.kafka.common.acl.AclOperation.READ, AclPermissionType.ALLOW)); KafkaPrincipal anonymous = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "ANONYMOUS"); AclBinding anonymousAclBinding = new AclBinding(res2, new AccessControlEntry(anonymous.toString(), "*", org.apache.kafka.common.acl.AclOperation.READ, AclPermissionType.ALLOW)); Collection<AclBinding> aclBindings = asList(fooAclBinding, barAclBinding, bazAclBinding, allAclBinding, anonymousAclBinding); assertDoesNotThrow(() -> mockDescribeAcls(mockAdminClient, AclBindingFilter.ANY, aclBindings)); SimpleAclOperator aclOp = new SimpleAclOperator(mockAdminClient, ResourceUtils.createUserOperatorConfig(), EXECUTOR); aclOp.start(); try { Set<String> users = aclOp.getAllUsers().toCompletableFuture().get(); assertThat(users, is(new HashSet<>(asList("foo", "bar", "baz")))); } finally { aclOp.stop(); } }
@Override @Nonnull public <T extends DataConnection> T getAndRetainDataConnection(String name, Class<T> clazz) { DataConnectionEntry dataConnection = dataConnections.computeIfPresent(name, (k, v) -> { if (!clazz.isInstance(v.instance)) { throw new HazelcastException("Data connection '" + name + "' must be an instance of " + clazz); } v.instance.retain(); return v; }); if (dataConnection == null) { throw new HazelcastException("Data connection '" + name + "' not found"); } //noinspection unchecked return (T) dataConnection.instance; }
@Test public void should_throw_when_data_connection_does_not_exist() { assertThatThrownBy(() -> dataConnectionService.getAndRetainDataConnection("does-not-exist", JdbcDataConnection.class)) .isInstanceOf(HazelcastException.class) .hasMessageContaining("Data connection 'does-not-exist' not found"); }
@Bean public TimerRegistry timerRegistry( TimerConfigurationProperties timerConfigurationProperties, EventConsumerRegistry<TimerEvent> timerEventConsumerRegistry, RegistryEventConsumer<Timer> timerRegistryEventConsumer, @Qualifier("compositeTimerCustomizer") CompositeCustomizer<TimerConfigCustomizer> compositeTimerCustomizer, @Autowired(required = false) MeterRegistry registry ) { TimerRegistry timerRegistry = createTimerRegistry(timerConfigurationProperties, timerRegistryEventConsumer, compositeTimerCustomizer, registry); registerEventConsumer(timerRegistry, timerEventConsumerRegistry, timerConfigurationProperties); initTimerRegistry(timerRegistry, timerConfigurationProperties, compositeTimerCustomizer); return timerRegistry; }
@Test public void shouldConfigureInstancesUsingCustomDefaultConfig() { InstanceProperties defaultProperties = new InstanceProperties() .setMetricNames("resilience4j.timer.default") .setOnFailureTagResolver(FixedOnFailureTagResolver.class); InstanceProperties instanceProperties1 = new InstanceProperties() .setMetricNames("resilience4j.timer.operations1"); InstanceProperties instanceProperties2 = new InstanceProperties() .setOnFailureTagResolver(QualifiedClassNameOnFailureTagResolver.class); TimerConfigurationProperties configurationProperties = new TimerConfigurationProperties(); configurationProperties.getConfigs().put("default", defaultProperties); configurationProperties.getInstances().put("backend1", instanceProperties1); configurationProperties.getInstances().put("backend2", instanceProperties2); TimerConfiguration configuration = new TimerConfiguration(); TimerRegistry registry = configuration.timerRegistry( configurationProperties, new DefaultEventConsumerRegistry<>(), new CompositeRegistryEventConsumer<>(emptyList()), new CompositeCustomizer<>(emptyList()), new SimpleMeterRegistry() ); assertThat(registry.getAllTimers().count()).isEqualTo(2); Timer timer1 = registry.timer("backend1"); assertThat(timer1).isNotNull(); assertThat(timer1.getTimerConfig().getMetricNames()).isEqualTo("resilience4j.timer.operations1"); assertThat(timer1.getTimerConfig().getOnFailureTagResolver()).isInstanceOf(FixedOnFailureTagResolver.class); Timer timer2 = registry.timer("backend2"); assertThat(timer2).isNotNull(); assertThat(timer2.getTimerConfig().getMetricNames()).isEqualTo("resilience4j.timer.default"); assertThat(timer2.getTimerConfig().getOnFailureTagResolver()).isInstanceOf(QualifiedClassNameOnFailureTagResolver.class); Timer timer3 = registry.timer("backend3"); assertThat(timer3).isNotNull(); assertThat(timer3.getTimerConfig().getMetricNames()).isEqualTo("resilience4j.timer.default"); assertThat(timer3.getTimerConfig().getOnFailureTagResolver()).isInstanceOf(FixedOnFailureTagResolver.class); }
@Override public DataflowPipelineJob run(Pipeline pipeline) { // Multi-language pipelines and pipelines that include upgrades should automatically be upgraded // to Runner v2. if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_runner_v2")) { LOG.info( "Automatically enabling Dataflow Runner v2 since the pipeline used cross-language" + " transforms or pipeline needed a transform upgrade."); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build()); } } if (useUnifiedWorker(options)) { if (hasExperiment(options, "disable_runner_v2") || hasExperiment(options, "disable_runner_v2_until_2023") || hasExperiment(options, "disable_prime_runner_v2")) { throw new IllegalArgumentException( "Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set."); } List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("use_runner_v2")) { experiments.add("use_runner_v2"); } if (!experiments.contains("use_unified_worker")) { experiments.add("use_unified_worker"); } if (!experiments.contains("beam_fn_api")) { experiments.add("beam_fn_api"); } if (!experiments.contains("use_portable_job_submission")) { experiments.add("use_portable_job_submission"); } options.setExperiments(ImmutableList.copyOf(experiments)); } logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline); logWarningIfBigqueryDLQUnused(pipeline); if (shouldActAsStreaming(pipeline)) { options.setStreaming(true); if (useUnifiedWorker(options)) { options.setEnableStreamingEngine(true); List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("enable_streaming_engine")) { experiments.add("enable_streaming_engine"); } if (!experiments.contains("enable_windmill_service")) { experiments.add("enable_windmill_service"); } } } if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) { ProjectionPushdownOptimizer.optimize(pipeline); } LOG.info( "Executing pipeline on the Dataflow Service, which will have billing implications " + "related to Google Compute Engine usage and other Google Cloud Services."); DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class); String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions); // This incorrectly puns the worker harness container image (which implements v1beta3 API) // with the SDK harness image (which implements Fn API). // // The same Environment is used in different and contradictory ways, depending on whether // it is a v1 or v2 job submission. RunnerApi.Environment defaultEnvironmentForDataflow = Environments.createDockerEnvironment(workerHarnessContainerImageURL); // The SdkComponents for portable an non-portable job submission must be kept distinct. Both // need the default environment. SdkComponents portableComponents = SdkComponents.create(); portableComponents.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); RunnerApi.Pipeline portablePipelineProto = PipelineTranslation.toProto(pipeline, portableComponents, false); // Note that `stageArtifacts` has to be called before `resolveArtifact` because // `resolveArtifact` updates local paths to staged paths in pipeline proto. portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto); List<DataflowPackage> packages = stageArtifacts(portablePipelineProto); portablePipelineProto = resolveArtifacts(portablePipelineProto); portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options); if (LOG.isDebugEnabled()) { LOG.debug( "Portable pipeline proto:\n{}", TextFormat.printer().printToString(portablePipelineProto)); } // Stage the portable pipeline proto, retrieving the staged pipeline path, then update // the options on the new job // TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation()); byte[] serializedProtoPipeline = portablePipelineProto.toByteArray(); DataflowPackage stagedPipeline = options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME); dataflowOptions.setPipelineUrl(stagedPipeline.getLocation()); if (useUnifiedWorker(options)) { LOG.info("Skipping v1 transform replacements since job will run on v2."); } else { // Now rewrite things to be as needed for v1 (mutates the pipeline) // This way the job submitted is valid for v1 and v2, simultaneously replaceV1Transforms(pipeline); } // Capture the SdkComponents for look up during step translations SdkComponents dataflowV1Components = SdkComponents.create(); dataflowV1Components.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); // No need to perform transform upgrading for the Runner v1 proto. RunnerApi.Pipeline dataflowV1PipelineProto = PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false); if (LOG.isDebugEnabled()) { LOG.debug( "Dataflow v1 pipeline proto:\n{}", TextFormat.printer().printToString(dataflowV1PipelineProto)); } // Set a unique client_request_id in the CreateJob request. // This is used to ensure idempotence of job creation across retried // attempts to create a job. Specifically, if the service returns a job with // a different client_request_id, it means the returned one is a different // job previously created with the same job name, and that the job creation // has been effectively rejected. The SDK should return // Error::Already_Exists to user in that case. int randomNum = new Random().nextInt(9000) + 1000; String requestId = DateTimeFormat.forPattern("YYYYMMddHHmmssmmm") .withZone(DateTimeZone.UTC) .print(DateTimeUtils.currentTimeMillis()) + "_" + randomNum; JobSpecification jobSpecification = translator.translate( pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages); if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_staged_dataflow_worker_jar")) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("use_staged_dataflow_worker_jar") .build()); } } Job newJob = jobSpecification.getJob(); try { newJob .getEnvironment() .setSdkPipelineOptions( MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class)); } catch (IOException e) { throw new IllegalArgumentException( "PipelineOptions specified failed to serialize to JSON.", e); } newJob.setClientRequestId(requestId); DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo(); String version = dataflowRunnerInfo.getVersion(); checkState( !"${pom.version}".equals(version), "Unable to submit a job to the Dataflow service with unset version ${pom.version}"); LOG.info("Dataflow SDK version: {}", version); newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties()); // The Dataflow Service may write to the temporary directory directly, so // must be verified. if (!isNullOrEmpty(options.getGcpTempLocation())) { newJob .getEnvironment() .setTempStoragePrefix( dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation())); } newJob.getEnvironment().setDataset(options.getTempDatasetId()); if (options.getWorkerRegion() != null) { newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion()); } if (options.getWorkerZone() != null) { newJob.getEnvironment().setWorkerZone(options.getWorkerZone()); } if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED"); } else if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED"); } // Represent the minCpuPlatform pipeline option as an experiment, if not already present. if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); List<String> minCpuFlags = experiments.stream() .filter(p -> p.startsWith("min_cpu_platform")) .collect(Collectors.toList()); if (minCpuFlags.isEmpty()) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform()) .build()); } else { LOG.warn( "Flag min_cpu_platform is defined in both top level PipelineOption, " + "as well as under experiments. Proceed using {}.", minCpuFlags.get(0)); } } newJob .getEnvironment() .setExperiments( ImmutableList.copyOf( firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()))); // Set the Docker container image that executes Dataflow worker harness, residing in Google // Container Registry. Translator is guaranteed to create a worker pool prior to this point. // For runner_v1, only worker_harness_container is set. // For runner_v2, both worker_harness_container and sdk_harness_container are set to the same // value. String containerImage = getContainerImageForJob(options); for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) { workerPool.setWorkerHarnessContainerImage(containerImage); } configureSdkHarnessContainerImages(options, portablePipelineProto, newJob); newJob.getEnvironment().setVersion(getEnvironmentVersion(options)); if (hooks != null) { hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment()); } // enable upload_graph when the graph is too large byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8); int jobGraphByteSize = jobGraphBytes.length; if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES && !hasExperiment(options, "upload_graph") && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build()); LOG.info( "The job graph size ({} in bytes) is larger than {}. Automatically add " + "the upload_graph option to experiments.", jobGraphByteSize, CREATE_JOB_REQUEST_LIMIT_BYTES); } if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) { ArrayList<String> experiments = new ArrayList<>(options.getExperiments()); while (experiments.remove("upload_graph")) {} options.setExperiments(experiments); LOG.warn( "The upload_graph experiment was specified, but it does not apply " + "to runner v2 jobs. Option has been automatically removed."); } // Upload the job to GCS and remove the graph object from the API call. The graph // will be downloaded from GCS by the service. if (hasExperiment(options, "upload_graph")) { DataflowPackage stagedGraph = options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME); newJob.getSteps().clear(); newJob.setStepsLocation(stagedGraph.getLocation()); } if (!isNullOrEmpty(options.getDataflowJobFile()) || !isNullOrEmpty(options.getTemplateLocation())) { boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation()); if (isTemplate) { checkArgument( isNullOrEmpty(options.getDataflowJobFile()), "--dataflowJobFile and --templateLocation are mutually exclusive."); } String fileLocation = firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile()); checkArgument( fileLocation.startsWith("/") || fileLocation.startsWith("gs://"), "Location must be local or on Cloud Storage, got %s.", fileLocation); ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */); String workSpecJson = DataflowPipelineTranslator.jobToString(newJob); try (PrintWriter printWriter = new PrintWriter( new BufferedWriter( new OutputStreamWriter( Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)), UTF_8)))) { printWriter.print(workSpecJson); LOG.info("Printed job specification to {}", fileLocation); } catch (IOException ex) { String error = String.format("Cannot create output file at %s", fileLocation); if (isTemplate) { throw new RuntimeException(error, ex); } else { LOG.warn(error, ex); } } if (isTemplate) { LOG.info("Template successfully created."); return new DataflowTemplateJob(); } } String jobIdToUpdate = null; if (options.isUpdate()) { jobIdToUpdate = getJobIdFromName(options.getJobName()); newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setReplaceJobId(jobIdToUpdate); } if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) { newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot()); } Job jobResult; try { jobResult = dataflowClient.createJob(newJob); } catch (GoogleJsonResponseException e) { String errorMessages = "Unexpected errors"; if (e.getDetails() != null) { if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) { errorMessages = "The size of the serialized JSON representation of the pipeline " + "exceeds the allowable limit. " + "For more information, please see the documentation on job submission:\n" + "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs"; } else { errorMessages = e.getDetails().getMessage(); } } throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e); } catch (IOException e) { throw new RuntimeException("Failed to create a workflow job", e); } // Use a raw client for post-launch monitoring, as status calls may fail // regularly and need not be retried automatically. DataflowPipelineJob dataflowPipelineJob = new DataflowPipelineJob( DataflowClient.create(options), jobResult.getId(), options, jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(), portablePipelineProto); // If the service returned client request id, the SDK needs to compare it // with the original id generated in the request, if they are not the same // (i.e., the returned job is not created by this request), throw // DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException // depending on whether this is a reload or not. if (jobResult.getClientRequestId() != null && !jobResult.getClientRequestId().isEmpty() && !jobResult.getClientRequestId().equals(requestId)) { // If updating a job. if (options.isUpdate()) { throw new DataflowJobAlreadyUpdatedException( dataflowPipelineJob, String.format( "The job named %s with id: %s has already been updated into job id: %s " + "and cannot be updated again.", newJob.getName(), jobIdToUpdate, jobResult.getId())); } else { throw new DataflowJobAlreadyExistsException( dataflowPipelineJob, String.format( "There is already an active job named %s with id: %s. If you want to submit a" + " second job, try again by setting a different name using --jobName.", newJob.getName(), jobResult.getId())); } } LOG.info( "To access the Dataflow monitoring console, please navigate to {}", MonitoringUtil.getJobMonitoringPageURL( options.getProject(), options.getRegion(), jobResult.getId())); LOG.info("Submitted job: {}", jobResult.getId()); LOG.info( "To cancel the job using the 'gcloud' tool, run:\n> {}", MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId())); return dataflowPipelineJob; }
@Test public void testSettingAnyFnApiExperimentEnablesUnifiedWorker() throws Exception { for (String experiment : ImmutableList.of( "beam_fn_api", "use_runner_v2", "use_unified_worker", "use_portable_job_submission")) { DataflowPipelineOptions options = buildPipelineOptions(); ExperimentalOptions.addExperiment(options, experiment); Pipeline p = Pipeline.create(options); p.apply(Create.of("A")); p.run(); assertFalse(options.isEnableStreamingEngine()); assertThat( options.getExperiments(), containsInAnyOrder( "beam_fn_api", "use_runner_v2", "use_unified_worker", "use_portable_job_submission")); } for (String experiment : ImmutableList.of( "beam_fn_api", "use_runner_v2", "use_unified_worker", "use_portable_job_submission")) { DataflowPipelineOptions options = buildPipelineOptions(); options.setStreaming(true); ExperimentalOptions.addExperiment(options, experiment); Pipeline p = Pipeline.create(options); p.apply(Create.of("A")); p.run(); assertTrue(options.isEnableStreamingEngine()); assertThat( options.getExperiments(), containsInAnyOrder( "beam_fn_api", "use_runner_v2", "use_unified_worker", "use_portable_job_submission", "enable_windmill_service", "enable_streaming_engine")); } }
public IssueQuery create(SearchRequest request) { try (DbSession dbSession = dbClient.openSession(false)) { final ZoneId timeZone = parseTimeZone(request.getTimeZone()).orElse(clock.getZone()); Collection<RuleDto> ruleDtos = ruleKeysToRuleId(dbSession, request.getRules()); Collection<String> ruleUuids = ruleDtos.stream().map(RuleDto::getUuid).collect(Collectors.toSet()); Collection<String> issueKeys = collectIssueKeys(dbSession, request); if (request.getRules() != null && request.getRules().stream().collect(Collectors.toSet()).size() != ruleDtos.size()) { ruleUuids.add("non-existing-uuid"); } IssueQuery.Builder builder = IssueQuery.builder() .issueKeys(issueKeys) .severities(request.getSeverities()) .cleanCodeAttributesCategories(request.getCleanCodeAttributesCategories()) .impactSoftwareQualities(request.getImpactSoftwareQualities()) .impactSeverities(request.getImpactSeverities()) .statuses(request.getStatuses()) .resolutions(request.getResolutions()) .issueStatuses(request.getIssueStatuses()) .resolved(request.getResolved()) .prioritizedRule(request.getPrioritizedRule()) .rules(ruleDtos) .ruleUuids(ruleUuids) .assigneeUuids(request.getAssigneeUuids()) .authors(request.getAuthors()) .scopes(request.getScopes()) .languages(request.getLanguages()) .tags(request.getTags()) .types(request.getTypes()) .pciDss32(request.getPciDss32()) .pciDss40(request.getPciDss40()) .owaspAsvs40(request.getOwaspAsvs40()) .owaspAsvsLevel(request.getOwaspAsvsLevel()) .owaspTop10(request.getOwaspTop10()) .owaspTop10For2021(request.getOwaspTop10For2021()) .stigAsdR5V3(request.getStigAsdV5R3()) .casa(request.getCasa()) .sansTop25(request.getSansTop25()) .cwe(request.getCwe()) .sonarsourceSecurity(request.getSonarsourceSecurity()) .assigned(request.getAssigned()) .createdAt(parseStartingDateOrDateTime(request.getCreatedAt(), timeZone)) .createdBefore(parseEndingDateOrDateTime(request.getCreatedBefore(), timeZone)) .facetMode(request.getFacetMode()) .timeZone(timeZone) .codeVariants(request.getCodeVariants()); List<ComponentDto> allComponents = new ArrayList<>(); boolean effectiveOnComponentOnly = mergeDeprecatedComponentParameters(dbSession, request, allComponents); addComponentParameters(builder, dbSession, effectiveOnComponentOnly, allComponents, request); setCreatedAfterFromRequest(dbSession, builder, request, allComponents, timeZone); String sort = request.getSort(); if (!isNullOrEmpty(sort)) { builder.sort(sort); builder.asc(request.getAsc()); } return builder.build(); } }
@Test public void query_without_any_parameter() { SearchRequest request = new SearchRequest(); IssueQuery query = underTest.create(request); assertThat(query.componentUuids()).isEmpty(); assertThat(query.projectUuids()).isEmpty(); assertThat(query.directories()).isEmpty(); assertThat(query.files()).isEmpty(); assertThat(query.viewUuids()).isEmpty(); assertThat(query.branchUuid()).isNull(); }
@Override public ResultSet executeQuery(String sql) throws SQLException { validateState(); try { if (!DriverUtils.queryContainsLimitStatement(sql)) { sql += " " + LIMIT_STATEMENT + " " + _maxRows; } String enabledSql = DriverUtils.enableQueryOptions(sql, _connection.getQueryOptions()); ResultSetGroup resultSetGroup = _session.execute(enabledSql); if (resultSetGroup.getResultSetCount() == 0) { _resultSet = PinotResultSet.empty(); return _resultSet; } _resultSet = new PinotResultSet(resultSetGroup.getResultSet(0)); return _resultSet; } catch (PinotClientException e) { throw new SQLException(String.format("Failed to execute query : %s", sql), e); } }
@Test public void testExecuteQuery() throws Exception { PinotConnection connection = new PinotConnection("dummy", _dummyPinotClientTransport, "dummy", _dummyPinotControllerTransport); Statement statement = new PinotStatement(connection); ResultSet resultSet = statement.executeQuery(BASIC_TEST_QUERY); Assert.assertNotNull(resultSet); Assert.assertEquals(statement.getConnection(), connection); }
@Override public ResultSet getFunctions(final String catalog, final String schemaPattern, final String functionNamePattern) throws SQLException { return createDatabaseMetaDataResultSet(getDatabaseMetaData().getFunctions(getActualCatalog(catalog), getActualSchema(schemaPattern), functionNamePattern)); }
@Test void assertGetFunctions() throws SQLException { when(databaseMetaData.getFunctions("test", null, null)).thenReturn(resultSet); assertThat(shardingSphereDatabaseMetaData.getFunctions("test", null, null), instanceOf(DatabaseMetaDataResultSet.class)); }
@Override public boolean isTrusted(X509Certificate[] chain, String authType) throws CertificateException { if (chain.length == 0 || this.trustedFingerprints.isEmpty()) { return false; } final MessageDigest sha256Digest = sha256(); // traverse up the chain until we find one whose fingerprint matches for (int i = 0; i < chain.length; i++) { final X509Certificate currentCandidate = chain[i]; final byte[] derEncoding = currentCandidate.getEncoded(); Fingerprint candidateFingerprint = new Fingerprint(sha256Digest.digest(derEncoding)); if (this.trustedFingerprints.contains(candidateFingerprint)) { final Date currentDate = dateSupplier.get(); currentCandidate.checkValidity(currentDate); // zip back down the chain and make sure everything is valid for(; i > 0; i--) { final X509Certificate signer = chain[i]; final X509Certificate signed = chain[i-1]; verifyAndValidate(signed, signer, currentDate); } return true; } } return false; }
@Test public void testIsTrustedWhenNoMatchingCertificateIsOnTheChain() throws CertificateException { final CATrustedFingerprintTrustStrategy trustStrategy = new CATrustedFingerprintTrustStrategy("abad1deaabad1deaabad1deaabad1deaabad1deaabad1deaabad1deaabad1dea", ()-> DATE_CERTS_VALID); final X509Certificate[] chain = {CERTIFICATE_SERVER, CERTIFICATE_INTERMEDIATE, CERTIFICATE_ROOT}; assertFalse(trustStrategy.isTrusted(chain, "noop")); }
@ManagedOperation(description = "Subscribe for dynamic routing with a predicate expression") public String subscribeWithPredicateExpression( String subscribeChannel, String subscriptionId, String destinationUri, int priority, String predicate, String expressionLanguage, boolean update) { return filterService.addFilterForChannel(subscriptionId, priority, obtainPredicateFromExpression(camelContext, predicate, expressionLanguage), destinationUri, subscribeChannel, update); }
@Test void subscribeWithPredicateExpression() { service.subscribeWithPredicateExpression(subscribeChannel, subscriptionId, destinationUri, priority, predicateExpression, expressionLanguage, false); Mockito.verify(filterService, Mockito.times(1)) .addFilterForChannel( eq(subscriptionId), eq(priority), any(Predicate.class), eq(destinationUri), eq(subscribeChannel), eq(false)); }
public static ErrorResponse fromJson(int code, String json) { return JsonUtil.parse(json, node -> OAuthErrorResponseParser.fromJson(code, node)); }
@Test public void testOAuthErrorResponseFromJsonWithNulls() { String error = OAuth2Properties.INVALID_CLIENT_ERROR; String json = String.format("{\"error\":\"%s\"}", error); ErrorResponse expected = ErrorResponse.builder().responseCode(400).withType(error).build(); assertEquals(expected, OAuthErrorResponseParser.fromJson(400, json)); // test with explicitly set nulls json = String.format("{\"error\":\"%s\",\"error_description\":null,\"error_uri\":null}", error); assertEquals(expected, OAuthErrorResponseParser.fromJson(400, json)); }
public CompletableFuture<QueryAssignmentResponse> queryAssignment(ProxyContext ctx, QueryAssignmentRequest request) { CompletableFuture<QueryAssignmentResponse> future = new CompletableFuture<>(); try { validateTopicAndConsumerGroup(request.getTopic(), request.getGroup()); List<org.apache.rocketmq.proxy.common.Address> addressList = this.convertToAddressList(request.getEndpoints()); ProxyTopicRouteData proxyTopicRouteData = this.messagingProcessor.getTopicRouteDataForProxy( ctx, addressList, request.getTopic().getName()); boolean fifo = false; SubscriptionGroupConfig config = this.messagingProcessor.getSubscriptionGroupConfig(ctx, request.getGroup().getName()); if (config != null && config.isConsumeMessageOrderly()) { fifo = true; } List<Assignment> assignments = new ArrayList<>(); Map<String, Map<Long, Broker>> brokerMap = buildBrokerMap(proxyTopicRouteData.getBrokerDatas()); for (QueueData queueData : proxyTopicRouteData.getQueueDatas()) { if (PermName.isReadable(queueData.getPerm()) && queueData.getReadQueueNums() > 0) { Map<Long, Broker> brokerIdMap = brokerMap.get(queueData.getBrokerName()); if (brokerIdMap != null) { Broker broker = brokerIdMap.get(MixAll.MASTER_ID); Permission permission = this.convertToPermission(queueData.getPerm()); if (fifo) { for (int i = 0; i < queueData.getReadQueueNums(); i++) { MessageQueue defaultMessageQueue = MessageQueue.newBuilder() .setTopic(request.getTopic()) .setId(i) .setPermission(permission) .setBroker(broker) .build(); assignments.add(Assignment.newBuilder() .setMessageQueue(defaultMessageQueue) .build()); } } else { MessageQueue defaultMessageQueue = MessageQueue.newBuilder() .setTopic(request.getTopic()) .setId(-1) .setPermission(permission) .setBroker(broker) .build(); assignments.add(Assignment.newBuilder() .setMessageQueue(defaultMessageQueue) .build()); } } } } QueryAssignmentResponse response; if (assignments.isEmpty()) { response = QueryAssignmentResponse.newBuilder() .setStatus(ResponseBuilder.getInstance().buildStatus(Code.FORBIDDEN, "no readable queue")) .build(); } else { response = QueryAssignmentResponse.newBuilder() .addAllAssignments(assignments) .setStatus(ResponseBuilder.getInstance().buildStatus(Code.OK, Code.OK.name())) .build(); } future.complete(response); } catch (Throwable t) { future.completeExceptionally(t); } return future; }
@Test public void testQueryAssignment() throws Throwable { when(this.messagingProcessor.getTopicRouteDataForProxy(any(), any(), anyString())) .thenReturn(createProxyTopicRouteData(2, 2, 6)); QueryAssignmentResponse response = this.routeActivity.queryAssignment( createContext(), QueryAssignmentRequest.newBuilder() .setEndpoints(grpcEndpoints) .setTopic(GRPC_TOPIC) .setGroup(GRPC_GROUP) .build() ).get(); assertEquals(Code.OK, response.getStatus().getCode()); assertEquals(1, response.getAssignmentsCount()); assertEquals(grpcEndpoints, response.getAssignments(0).getMessageQueue().getBroker().getEndpoints()); }
public BigDecimal calculateProductGramsForRequiredFiller(Filler filler, BigDecimal fillerGrams) { if (filler == null || fillerGrams == null || fillerGrams.doubleValue() <= 0) { return BigDecimal.valueOf(0); } if (filler.equals(Filler.PROTEIN)) { return calculateProductGramsForRequiredProteins(fillerGrams).setScale(0, RoundingMode.HALF_DOWN); } else if (filler.equals(Filler.CARBOHYDRATE)) { return calculateProductGramsForRequiredCarbohydrates(fillerGrams).setScale(0, RoundingMode.HALF_DOWN); } else if (filler.equals(Filler.FAT)) { return calculateProductGramsForRequiredFats(fillerGrams).setScale(0, RoundingMode.HALF_DOWN); } return BigDecimal.valueOf(0); }
@Test void calculateProductGramsForRequiredFiller_zeroValue() { BigDecimal result = product.calculateProductGramsForRequiredFiller(Filler.CARBOHYDRATE, BigDecimal.valueOf(0)); assertEquals(BigDecimal.valueOf(0), result); }
public static Permission getPermission(String name, String serviceName, String... actions) { PermissionFactory permissionFactory = PERMISSION_FACTORY_MAP.get(serviceName); if (permissionFactory == null) { throw new IllegalArgumentException("No permissions found for service: " + serviceName); } return permissionFactory.create(name, actions); }
@Test public void getPermission_Set() { Permission permission = ActionConstants.getPermission("foo", SetService.SERVICE_NAME); assertNotNull(permission); assertTrue(permission instanceof SetPermission); }
public Duration computeReadTimeout(HttpRequestMessage request, int attemptNum) { IClientConfig clientConfig = getRequestClientConfig(request); Long originTimeout = getOriginReadTimeout(); Long requestTimeout = getRequestReadTimeout(clientConfig); long computedTimeout; if (originTimeout == null && requestTimeout == null) { computedTimeout = MAX_OUTBOUND_READ_TIMEOUT_MS.get(); } else if (originTimeout == null || requestTimeout == null) { computedTimeout = originTimeout == null ? requestTimeout : originTimeout; } else { // return the stricter (i.e. lower) of the two timeouts computedTimeout = Math.min(originTimeout, requestTimeout); } // enforce max timeout upperbound return Duration.ofMillis(Math.min(computedTimeout, MAX_OUTBOUND_READ_TIMEOUT_MS.get())); }
@Test void computeReadTimeout_originOnly() { originConfig.set(CommonClientConfigKey.ReadTimeout, 1000); Duration timeout = originTimeoutManager.computeReadTimeout(request, 1); assertEquals(1000, timeout.toMillis()); }
public static String fix(final String raw) { if ( raw == null || "".equals( raw.trim() )) { return raw; } MacroProcessor macroProcessor = new MacroProcessor(); macroProcessor.setMacros( macros ); return macroProcessor.parse( raw ); }
@Test public void testAssertLogical() { final String raw = "some code; insertLogical(new String(\"foo\"));\n More();"; final String result = "some code; drools.insertLogical(new String(\"foo\"));\n More();"; assertEqualsIgnoreWhitespace( result, KnowledgeHelperFixerTest.fixer.fix( raw ) ); }
protected String decrypt(String encryptedStr) throws Exception { String[] split = encryptedStr.split(":"); checkTrue(split.length == 3, "Wrong format of the encrypted variable (" + encryptedStr + ")"); byte[] salt = Base64.getDecoder().decode(split[0].getBytes(StandardCharsets.UTF_8)); checkTrue(salt.length == saltLengthBytes, "Salt length doesn't match."); int iterations = Integer.parseInt(split[1]); byte[] encryptedVal = Base64.getDecoder().decode(split[2].getBytes(StandardCharsets.UTF_8)); return new String(transform(Cipher.DECRYPT_MODE, encryptedVal, salt, iterations), StandardCharsets.UTF_8); }
@Test(expected = IllegalArgumentException.class) public void testDecryptionFailWithNullPassword() throws Exception { assumeDefaultAlgorithmsSupported(); AbstractPbeReplacer replacer = createAndInitReplacer(null, new Properties()); replacer.decrypt("aSalt1xx:1:test"); }
@Override public void onResignation( int groupMetadataPartitionIndex, OptionalInt groupMetadataPartitionLeaderEpoch ) { throwIfNotActive(); runtime.scheduleUnloadOperation( new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupMetadataPartitionIndex), groupMetadataPartitionLeaderEpoch ); }
@Test public void testOnResignation() { CoordinatorRuntime<GroupCoordinatorShard, CoordinatorRecord> runtime = mockRuntime(); GroupCoordinatorService service = new GroupCoordinatorService( new LogContext(), createConfig(), runtime, new GroupCoordinatorMetrics(), createConfigManager() ); assertThrows(CoordinatorNotAvailableException.class, () -> service.onResignation(5, OptionalInt.of(10))); service.startup(() -> 1); service.onResignation(5, OptionalInt.of(10)); verify(runtime, times(1)).scheduleUnloadOperation( new TopicPartition("__consumer_offsets", 5), OptionalInt.of(10) ); }
@Override public boolean isSingleton() { return true; }
@Test public final void isSingletonShouldAlwaysReturnTrue() { final InfinispanRemoteCacheManagerFactoryBean objectUnderTest = new InfinispanRemoteCacheManagerFactoryBean(); assertTrue( "isSingleton() should always return true since each AbstractRemoteCacheManagerFactory will always produce " + "the same RemoteCacheManager instance. However,it returned false.", objectUnderTest.isSingleton()); }
@VisibleForTesting void removeDisableUsers(Set<Long> assigneeUserIds) { if (CollUtil.isEmpty(assigneeUserIds)) { return; } Map<Long, AdminUserRespDTO> userMap = adminUserApi.getUserMap(assigneeUserIds); assigneeUserIds.removeIf(id -> { AdminUserRespDTO user = userMap.get(id); return user == null || !CommonStatusEnum.ENABLE.getStatus().equals(user.getStatus()); }); }
@Test public void testRemoveDisableUsers() { // 准备参数. 1L 可以找到;2L 是禁用的;3L 找不到 Set<Long> assigneeUserIds = asSet(1L, 2L, 3L); // mock 方法 AdminUserRespDTO user1 = randomPojo(AdminUserRespDTO.class, o -> o.setId(1L) .setStatus(CommonStatusEnum.ENABLE.getStatus())); AdminUserRespDTO user2 = randomPojo(AdminUserRespDTO.class, o -> o.setId(2L) .setStatus(CommonStatusEnum.DISABLE.getStatus())); Map<Long, AdminUserRespDTO> userMap = MapUtil.builder(user1.getId(), user1) .put(user2.getId(), user2).build(); when(adminUserApi.getUserMap(eq(assigneeUserIds))).thenReturn(userMap); // 调用 taskCandidateInvoker.removeDisableUsers(assigneeUserIds); // 断言 assertEquals(asSet(1L), assigneeUserIds); }
@Override protected int command() { if (!validateConfigFilePresent()) { return 1; } final MigrationConfig config; try { config = MigrationConfig.load(getConfigFile()); } catch (KsqlException | MigrationException e) { LOGGER.error(e.getMessage()); return 1; } return command( config, MigrationsUtil::getKsqlClient, getMigrationsDir(getConfigFile(), config), Clock.systemDefaultZone() ); }
@Test public void shouldApplySecondMigration() throws Exception { // Given: command = PARSER.parse("-n"); createMigrationFile(1, NAME, migrationsDir, COMMAND); createMigrationFile(3, NAME, migrationsDir, COMMAND); givenCurrentMigrationVersion("1"); givenAppliedMigration(1, NAME, MigrationState.MIGRATED); // When: final int result = command.command(config, (cfg, headers) -> ksqlClient, migrationsDir, Clock.fixed( Instant.ofEpochMilli(1000), ZoneId.systemDefault())); // Then: assertThat(result, is(0)); final InOrder inOrder = inOrder(ksqlClient); verifyMigratedVersion(inOrder, 3, "1", MigrationState.MIGRATED); inOrder.verify(ksqlClient).close(); inOrder.verifyNoMoreInteractions(); }
@Override public Optional<RedirectionAction> getRedirectionAction(final CallContext ctx) { val webContext = ctx.webContext(); var computeLoginUrl = configuration.computeFinalLoginUrl(webContext); val computedCallbackUrl = client.computeFinalCallbackUrl(webContext); val renew = configuration.isRenew() || webContext.getRequestAttribute(RedirectionActionBuilder.ATTRIBUTE_FORCE_AUTHN).isPresent(); val gateway = configuration.isGateway() || webContext.getRequestAttribute(RedirectionActionBuilder.ATTRIBUTE_PASSIVE).isPresent(); val redirectionUrl = constructRedirectUrl(computeLoginUrl, getServiceParameter(), computedCallbackUrl, renew, gateway, configuration.getMethod()); LOGGER.debug("redirectionUrl: {}", redirectionUrl); return Optional.of(HttpActionHelper.buildRedirectUrlAction(webContext, redirectionUrl)); }
@Test public void testRedirectForSAMLProtocol() { val config = new CasConfiguration(); config.setProtocol(CasProtocol.SAML); val builder = newBuilder(config); val action = builder.getRedirectionAction(new CallContext(MockWebContext.create(), new MockSessionStore())).get(); assertTrue(action instanceof FoundAction); assertEquals(LOGIN_URL + "?TARGET=http%3A%2F%2Fwww.pac4j.org%2Ftest.html%3Fclient_name%3DCasClient", ((FoundAction) action).getLocation()); }
static void setConstructor(final MiningModelCompilationDTO compilationDTO, final ClassOrInterfaceDeclaration modelTemplate) { KiePMMLModelFactoryUtils.init(compilationDTO, modelTemplate); final ConstructorDeclaration constructorDeclaration = modelTemplate.getDefaultConstructor().orElseThrow(() -> new KiePMMLInternalException(String.format(MISSING_DEFAULT_CONSTRUCTOR, modelTemplate.getName()))); final BlockStmt body = constructorDeclaration.getBody(); ClassOrInterfaceType kiePMMLSegmentationClass = parseClassOrInterfaceType(compilationDTO.getSegmentationCanonicalClassName()); ObjectCreationExpr objectCreationExpr = new ObjectCreationExpr(); objectCreationExpr.setType(kiePMMLSegmentationClass); CommonCodegenUtils.setAssignExpressionValue(body, "segmentation", objectCreationExpr); }
@Test void setConstructor() { PMML_MODEL pmmlModel = PMML_MODEL.byName(MINING_MODEL.getClass().getSimpleName()); final ClassOrInterfaceDeclaration modelTemplate = MODEL_TEMPLATE.clone(); MINING_FUNCTION miningFunction = MINING_FUNCTION.byName(MINING_MODEL.getMiningFunction().value()); final CommonCompilationDTO<MiningModel> source = CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME, pmml, MINING_MODEL, new PMMLCompilationContextMock(), "FILENAME"); final MiningModelCompilationDTO compilationDTO = MiningModelCompilationDTO.fromCompilationDTO(source); KiePMMLMiningModelFactory.setConstructor(compilationDTO, modelTemplate); Map<Integer, Expression> superInvocationExpressionsMap = new HashMap<>(); superInvocationExpressionsMap.put(0, new NameExpr(String.format("\"%s\"", "FILENAME"))); superInvocationExpressionsMap.put(1, new NameExpr(String.format("\"%s\"", MINING_MODEL.getModelName()))); Map<String, Expression> assignExpressionMap = new HashMap<>(); assignExpressionMap.put("targetField", new StringLiteralExpr(targetFieldName)); assignExpressionMap.put("miningFunction", new NameExpr(miningFunction.getClass().getName() + "." + miningFunction.name())); assignExpressionMap.put("pmmlMODEL", new NameExpr(pmmlModel.getClass().getName() + "." + pmmlModel.name())); ClassOrInterfaceType kiePMMLSegmentationClass = parseClassOrInterfaceType(compilationDTO.getSegmentationCanonicalClassName()); ObjectCreationExpr objectCreationExpr = new ObjectCreationExpr(); objectCreationExpr.setType(kiePMMLSegmentationClass); assignExpressionMap.put("segmentation", objectCreationExpr); ConstructorDeclaration constructorDeclaration = modelTemplate.getDefaultConstructor().get(); assertThat(commonEvaluateConstructor(constructorDeclaration, getSanitizedClassName(MINING_MODEL.getModelName()), superInvocationExpressionsMap, assignExpressionMap)).isTrue(); }
@Override public String generateSqlType(Dialect dialect) { return switch (dialect.getId()) { case PostgreSql.ID, H2.ID -> "INTEGER"; case MsSql.ID -> "INT"; case Oracle.ID -> "NUMBER(38,0)"; default -> throw new IllegalArgumentException("Unsupported dialect id " + dialect.getId()); }; }
@Test public void generateSqlType_for_Oracle() { assertThat(underTest.generateSqlType(new Oracle())).isEqualTo("NUMBER(38,0)"); }
@SuppressWarnings("unchecked") public static int compare(Comparable lhs, Comparable rhs) { Class lhsClass = lhs.getClass(); Class rhsClass = rhs.getClass(); assert lhsClass != rhsClass; assert lhs instanceof Number; assert rhs instanceof Number; Number lhsNumber = (Number) lhs; Number rhsNumber = (Number) rhs; if (isDoubleRepresentable(lhsClass)) { if (isDoubleRepresentable(rhsClass)) { return Double.compare(lhsNumber.doubleValue(), rhsNumber.doubleValue()); } else if (isLongRepresentable(rhsClass)) { return -Integer.signum(compareLongWithDouble(rhsNumber.longValue(), lhsNumber.doubleValue())); } } else if (isLongRepresentable(lhsClass)) { if (isDoubleRepresentable(rhsClass)) { return compareLongWithDouble(lhsNumber.longValue(), rhsNumber.doubleValue()); } else if (isLongRepresentable(rhsClass)) { return Long.compare(lhsNumber.longValue(), rhsNumber.longValue()); } } return lhs.compareTo(rhs); }
@SuppressWarnings("ConstantConditions") @Test(expected = Throwable.class) public void testNullLhsInCompareThrows() { compare(null, 1); }
@Override public int hashCode() { return new HashCodeBuilder(5, 71) .append(versionParts) .toHashCode(); }
@Test public void testHashCode() { DependencyVersion instance = new DependencyVersion("3.2.1"); int expResult = 80756; int result = instance.hashCode(); assertEquals(expResult, result); }
public OpenAPI read(Class<?> cls) { return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>()); }
@Test(description = "Optional handling") public void testTicket3624() { Reader reader = new Reader(new OpenAPI()); OpenAPI openAPI = reader.read(Service.class); String yaml = "openapi: 3.0.1\n" + "paths:\n" + " /example/model:\n" + " get:\n" + " tags:\n" + " - ExampleService\n" + " summary: ' Retrieve models for display to the user'\n" + " operationId: getModels\n" + " responses:\n" + " default:\n" + " description: default response\n" + " content:\n" + " application/json:\n" + " schema:\n" + " $ref: '#/components/schemas/Response'\n" + " /example/model/by/ids:\n" + " get:\n" + " tags:\n" + " - ExampleService\n" + " summary: ' Retrieve models by their ids'\n" + " operationId: getModelsById\n" + " responses:\n" + " default:\n" + " description: default response\n" + " content:\n" + " application/json:\n" + " schema:\n" + " $ref: '#/components/schemas/ByIdResponse'\n" + " /example/containerized/model:\n" + " get:\n" + " tags:\n" + " - ExampleService\n" + " summary: ' Retrieve review insights for a specific product'\n" + " operationId: getContainerizedModels\n" + " responses:\n" + " default:\n" + " description: default response\n" + " content:\n" + " application/json:\n" + " schema:\n" + " $ref: '#/components/schemas/ContainerizedResponse'\n" + "components:\n" + " schemas:\n" + " Model:\n" + " type: object\n" + " properties:\n" + " text:\n" + " type: string\n" + " title:\n" + " type: string\n" + " active:\n" + " type: boolean\n" + " schemaParent:\n" + " $ref: '#/components/schemas/Model'\n" + " optionalString:\n" + " type: string\n" + " parent:\n" + " $ref: '#/components/schemas/Model'\n" + " id:\n" + " type: integer\n" + " format: int32\n" + " Response:\n" + " type: object\n" + " properties:\n" + " count:\n" + " type: integer\n" + " format: int32\n" + " models:\n" + " type: array\n" + " items:\n" + " $ref: '#/components/schemas/Model'\n" + " ByIdResponse:\n" + " type: object\n" + " properties:\n" + " modelsById:\n" + " type: object\n" + " additionalProperties:\n" + " $ref: '#/components/schemas/Model'\n" + " ContainerizedResponse:\n" + " type: object\n" + " properties:\n" + " totalCount:\n" + " type: integer\n" + " format: int32\n" + " containerizedModels:\n" + " type: array\n" + " items:\n" + " $ref: '#/components/schemas/ModelContainer'\n" + " ModelContainer:\n" + " type: object\n" + " properties:\n" + " text:\n" + " type: string\n" + " model:\n" + " $ref: '#/components/schemas/Model'\n" + " id:\n" + " type: integer\n" + " format: int32"; SerializationMatchers.assertEqualsToYaml(openAPI, yaml); }
public boolean evaluateIfActiveVersion(UpdateCenter updateCenter) { Version installedVersion = Version.create(sonarQubeVersion.get().toString()); if (compareWithoutPatchVersion(installedVersion, updateCenter.getSonar().getLtaVersion().getVersion()) == 0) { return true; } SortedSet<Release> allReleases = updateCenter.getSonar().getAllReleases(); if (compareWithoutPatchVersion(installedVersion, updateCenter.getSonar().getPastLtaVersion().getVersion()) == 0) { Release initialLtaRelease = findInitialVersionOfMajorRelease(allReleases, updateCenter.getSonar().getLtaVersion().getVersion()); Date initialLtaReleaseDate = initialLtaRelease.getDate(); if (initialLtaReleaseDate == null) { throw new IllegalStateException("Initial Major release date is missing in releases"); } // date of the latest major release should be within 6 months Calendar c = Calendar.getInstance(); c.setTime(new Date(system2.now())); c.add(Calendar.MONTH, -6); return initialLtaReleaseDate.after(c.getTime()); } else { return compareWithoutPatchVersion(installedVersion, findPreviousReleaseIgnoringPatch(allReleases).getVersion()) >= 0; } }
@Test void evaluateIfActiveVersion_whenNoPreviousReleasesFound_shouldThrowIllegalStateException() { when(sonarQubeVersion.get()).thenReturn(parse("10.4.1")); TreeSet<Release> releases = new TreeSet<>(); releases.add(new Release(sonar, Version.create("10.4.1"))); when(sonar.getAllReleases()).thenReturn(releases); assertThatThrownBy(() -> underTest.evaluateIfActiveVersion(updateCenter)) .isInstanceOf(IllegalStateException.class) .hasMessageContaining("Unable to find previous release in releases"); }
@Override public Optional<CompletableFuture<TaskManagerLocation>> getTaskManagerLocation( ExecutionVertexID executionVertexId) { ExecutionVertex ev = getExecutionVertex(executionVertexId); if (ev.getExecutionState() != ExecutionState.CREATED) { return Optional.of(ev.getCurrentTaskManagerLocationFuture()); } else { return Optional.empty(); } }
@Test void testGetEmptyTaskManagerLocationIfVertexNotScheduled() throws Exception { final JobVertex jobVertex = ExecutionGraphTestUtils.createNoOpVertex(1); final ExecutionGraph eg = ExecutionGraphTestUtils.createExecutionGraph( EXECUTOR_EXTENSION.getExecutor(), jobVertex); final ExecutionGraphToInputsLocationsRetrieverAdapter inputsLocationsRetriever = new ExecutionGraphToInputsLocationsRetrieverAdapter(eg); ExecutionVertexID executionVertexId = new ExecutionVertexID(jobVertex.getID(), 0); Optional<CompletableFuture<TaskManagerLocation>> taskManagerLocation = inputsLocationsRetriever.getTaskManagerLocation(executionVertexId); assertThat(taskManagerLocation).isNotPresent(); }
public static List<TraceContext> decoderFromTraceDataString(String traceData) { List<TraceContext> resList = new ArrayList<>(); if (traceData == null || traceData.length() <= 0) { return resList; } String[] contextList = traceData.split(String.valueOf(TraceConstants.FIELD_SPLITOR)); for (String context : contextList) { String[] line = context.split(String.valueOf(TraceConstants.CONTENT_SPLITOR)); if (line[0].equals(TraceType.Pub.name())) { TraceContext pubContext = new TraceContext(); pubContext.setTraceType(TraceType.Pub); pubContext.setTimeStamp(Long.parseLong(line[1])); pubContext.setRegionId(line[2]); pubContext.setGroupName(line[3]); TraceBean bean = new TraceBean(); bean.setTopic(line[4]); bean.setMsgId(line[5]); bean.setTags(line[6]); bean.setKeys(line[7]); bean.setStoreHost(line[8]); bean.setBodyLength(Integer.parseInt(line[9])); pubContext.setCostTime(Integer.parseInt(line[10])); bean.setMsgType(MessageType.values()[Integer.parseInt(line[11])]); if (line.length == 13) { pubContext.setSuccess(Boolean.parseBoolean(line[12])); } else if (line.length == 14) { bean.setOffsetMsgId(line[12]); pubContext.setSuccess(Boolean.parseBoolean(line[13])); } // compatible with the old version if (line.length >= 15) { bean.setOffsetMsgId(line[12]); pubContext.setSuccess(Boolean.parseBoolean(line[13])); bean.setClientHost(line[14]); } pubContext.setTraceBeans(new ArrayList<>(1)); pubContext.getTraceBeans().add(bean); resList.add(pubContext); } else if (line[0].equals(TraceType.SubBefore.name())) { TraceContext subBeforeContext = new TraceContext(); subBeforeContext.setTraceType(TraceType.SubBefore); subBeforeContext.setTimeStamp(Long.parseLong(line[1])); subBeforeContext.setRegionId(line[2]); subBeforeContext.setGroupName(line[3]); subBeforeContext.setRequestId(line[4]); TraceBean bean = new TraceBean(); bean.setMsgId(line[5]); bean.setRetryTimes(Integer.parseInt(line[6])); bean.setKeys(line[7]); subBeforeContext.setTraceBeans(new ArrayList<>(1)); subBeforeContext.getTraceBeans().add(bean); resList.add(subBeforeContext); } else if (line[0].equals(TraceType.SubAfter.name())) { TraceContext subAfterContext = new TraceContext(); subAfterContext.setTraceType(TraceType.SubAfter); subAfterContext.setRequestId(line[1]); TraceBean bean = new TraceBean(); bean.setMsgId(line[2]); bean.setKeys(line[5]); subAfterContext.setTraceBeans(new ArrayList<>(1)); subAfterContext.getTraceBeans().add(bean); subAfterContext.setCostTime(Integer.parseInt(line[3])); subAfterContext.setSuccess(Boolean.parseBoolean(line[4])); if (line.length >= 7) { // add the context type subAfterContext.setContextCode(Integer.parseInt(line[6])); } // compatible with the old version if (line.length >= 9) { subAfterContext.setTimeStamp(Long.parseLong(line[7])); subAfterContext.setGroupName(line[8]); } resList.add(subAfterContext); } else if (line[0].equals(TraceType.EndTransaction.name())) { TraceContext endTransactionContext = new TraceContext(); endTransactionContext.setTraceType(TraceType.EndTransaction); endTransactionContext.setTimeStamp(Long.parseLong(line[1])); endTransactionContext.setRegionId(line[2]); endTransactionContext.setGroupName(line[3]); TraceBean bean = new TraceBean(); bean.setTopic(line[4]); bean.setMsgId(line[5]); bean.setTags(line[6]); bean.setKeys(line[7]); bean.setStoreHost(line[8]); bean.setMsgType(MessageType.values()[Integer.parseInt(line[9])]); bean.setTransactionId(line[10]); bean.setTransactionState(LocalTransactionState.valueOf(line[11])); bean.setFromTransactionCheck(Boolean.parseBoolean(line[12])); endTransactionContext.setTraceBeans(new ArrayList<>(1)); endTransactionContext.getTraceBeans().add(bean); resList.add(endTransactionContext); } } return resList; }
@Test public void testDecoderFromTraceDataString() { List<TraceContext> contexts = TraceDataEncoder.decoderFromTraceDataString(traceData); Assert.assertEquals(contexts.size(), 1); Assert.assertEquals(contexts.get(0).getTraceType(), TraceType.Pub); }
public static List<TargetInfo> parseOptTarget(CommandLine cmd, AlluxioConfiguration conf) throws IOException { String[] targets; if (cmd.hasOption(TARGET_OPTION_NAME)) { String argTarget = cmd.getOptionValue(TARGET_OPTION_NAME); if (StringUtils.isBlank(argTarget)) { throw new IOException("Option " + TARGET_OPTION_NAME + " can not be blank."); } else if (argTarget.contains(TARGET_SEPARATOR)) { targets = argTarget.split(TARGET_SEPARATOR); } else { targets = new String[]{argTarget}; } } else { // By default we set on all targets (master/workers/job_master/job_workers) targets = new String[]{ROLE_MASTER, ROLE_JOB_MASTER, ROLE_WORKERS, ROLE_JOB_WORKERS}; } return getTargetInfos(targets, conf); }
@Test public void parseZooKeeperHAJobMasterTarget() throws Exception { mConf.set(PropertyKey.ZOOKEEPER_ENABLED, true); mConf.set(PropertyKey.ZOOKEEPER_ADDRESS, "masters-1:2181"); CommandLine mockCommandLine = mock(CommandLine.class); String[] mockArgs = new String[]{"--target", "job_master"}; when(mockCommandLine.getArgs()).thenReturn(mockArgs); when(mockCommandLine.hasOption(LogLevel.TARGET_OPTION_NAME)).thenReturn(true); when(mockCommandLine.getOptionValue(LogLevel.TARGET_OPTION_NAME)).thenReturn(mockArgs[1]); try (MockedStatic<JobMasterClient.Factory> mockFactory = mockStatic(JobMasterClient.Factory.class)) { JobMasterClient mockJobClient = mock(JobMasterClient.class); when(mockJobClient.getRemoteSockAddress()).thenReturn(new InetSocketAddress("masters-2", mConf.getInt(PropertyKey.JOB_MASTER_RPC_PORT))); when(mockJobClient.getRemoteHostName()).thenReturn("masters-2"); mockFactory.when(() -> JobMasterClient.Factory.create(any())).thenReturn(mockJobClient); List<LogLevel.TargetInfo> targets = LogLevel.parseOptTarget(mockCommandLine, mConf); assertEquals(1, targets.size()); assertEquals(new LogLevel.TargetInfo("masters-2", JOB_MASTER_WEB_PORT, "job_master"), targets.get(0)); } }
public static String getGroupKey(ThreadPoolParameter parameter) { return StringUtil.createBuilder() .append(parameter.getTpId()) .append(Constants.GROUP_KEY_DELIMITER) .append(parameter.getItemId()) .append(Constants.GROUP_KEY_DELIMITER) .append(parameter.getTenantId()) .toString(); }
@Test public void assertGetGroupKeys() { String testText = "message-consume+dynamic-threadpool-example+prescription"; String groupKey = ContentUtil.getGroupKey("message-consume", "dynamic-threadpool-example", "prescription"); Assert.isTrue(testText.equals(groupKey)); }
public static String getExactlyExpression(final String value) { return Strings.isNullOrEmpty(value) ? value : CharMatcher.anyOf(" ").removeFrom(value); }
@Test void assertGetExactlyExpression() { assertThat(SQLUtils.getExactlyExpression("((a + b*c))"), is("((a+b*c))")); }
@Override protected void write(final MySQLPacketPayload payload) { payload.writeInt4(timestamp); payload.writeInt1(eventType); payload.writeInt4(serverId); payload.writeInt4(eventSize); payload.writeInt4(logPos); payload.writeInt2(flags); }
@Test void assertWrite() { new MySQLBinlogEventHeader(1234567890, MySQLBinlogEventType.UNKNOWN_EVENT.getValue(), 123456, 19, 4, MySQLBinlogEventFlag.LOG_EVENT_BINLOG_IN_USE_F.getValue(), 4).write(payload); verify(payload).writeInt4(1234567890); verify(payload).writeInt1(MySQLBinlogEventType.UNKNOWN_EVENT.getValue()); verify(payload).writeInt4(123456); verify(payload).writeInt4(19); verify(payload).writeInt4(4); verify(payload).writeInt2(MySQLBinlogEventFlag.LOG_EVENT_BINLOG_IN_USE_F.getValue()); }
public void convert(FSConfigToCSConfigConverterParams params) throws Exception { validateParams(params); this.clusterResource = getClusterResource(params); this.convertPlacementRules = params.isConvertPlacementRules(); this.outputDirectory = params.getOutputDirectory(); this.rulesToFile = params.isPlacementRulesToFile(); this.usePercentages = params.isUsePercentages(); this.preemptionMode = params.getPreemptionMode(); prepareOutputFiles(params.isConsole()); loadConversionRules(params.getConversionRulesConfig()); Configuration inputYarnSiteConfig = getInputYarnSiteConfig(params); handleFairSchedulerConfig(params, inputYarnSiteConfig); convert(inputYarnSiteConfig); }
@Test public void testConversionWithInvalidPlacementRules() throws Exception { config = new Configuration(false); config.set(FairSchedulerConfiguration.ALLOCATION_FILE, FS_INVALID_PLACEMENT_RULES_XML); config.setBoolean(FairSchedulerConfiguration.MIGRATION_MODE, true); expectedException.expect(ServiceStateException.class); converter.convert(config); }
@Override public Mono<Authentication> convert(ServerWebExchange exchange) { return super.convert(exchange) // validate the password .<Authentication>flatMap(token -> { var credentials = (String) token.getCredentials(); byte[] credentialsBytes; try { credentialsBytes = Base64.getDecoder().decode(credentials); } catch (IllegalArgumentException e) { // the credentials are not in valid Base64 scheme return Mono.error(new BadCredentialsException("Invalid Base64 scheme.")); } return cryptoService.decrypt(credentialsBytes) .onErrorMap(InvalidEncryptedMessageException.class, error -> new BadCredentialsException("Invalid credential.", error)) .map(decryptedCredentials -> new UsernamePasswordAuthenticationToken( token.getPrincipal(), new String(decryptedCredentials, UTF_8))); }) .transformDeferred(createIpBasedRateLimiter(exchange)) .onErrorMap(RequestNotPermitted.class, RateLimitExceededException::new); }
@Test void applyUsernameAndPasswordThenCreatesTokenSuccess() { var username = "username"; var password = "password"; var decryptedPassword = "decrypted password"; formData.add("username", username); formData.add("password", Base64.getEncoder().encodeToString(password.getBytes())); when(cryptoService.decrypt(password.getBytes())) .thenReturn(Mono.just(decryptedPassword.getBytes())); StepVerifier.create(converter.convert(exchange)) .expectNext(new UsernamePasswordAuthenticationToken(username, decryptedPassword)) .verifyComplete(); verify(cryptoService).decrypt(password.getBytes()); }
public static List<PKafkaOffsetProxyResult> getBatchOffsets(List<PKafkaOffsetProxyRequest> requests) throws UserException { return PROXY_API.getBatchOffsets(requests); }
@Test public void testGetInfoInterruptedException() throws UserException, RpcException { Backend backend = new Backend(1L, "127.0.0.1", 9050); backend.setBeRpcPort(8060); backend.setAlive(true); new Expectations() { { service.getBackendOrComputeNode(anyLong); result = backend; client.getInfo((TNetworkAddress) any, (PProxyRequest) any); result = new InterruptedException("interrupted"); } }; KafkaUtil.ProxyAPI api = new KafkaUtil.ProxyAPI(); LoadException e = Assert.assertThrows(LoadException.class, () -> api.getBatchOffsets(null)); Assert.assertTrue(e.getMessage().contains("Got interrupted exception")); }
@Override public BuiltInPreparedQuery prepareQuery(AnalyzerOptions analyzerOptions, String query, Map<String, String> preparedStatements, WarningCollector warningCollector) { Statement wrappedStatement = sqlParser.createStatement(query, createParsingOptions(analyzerOptions)); if (warningCollector.hasWarnings() && analyzerOptions.getWarningHandlingLevel() == AS_ERROR) { throw new PrestoException(WARNING_AS_ERROR, format("Warning handling level set to AS_ERROR. Warnings: %n %s", warningCollector.getWarnings().stream() .map(PrestoWarning::getMessage) .collect(joining(System.lineSeparator())))); } return prepareQuery(analyzerOptions, wrappedStatement, preparedStatements); }
@Test public void testTooFewParameters() { try { Map<String, String> preparedStatements = ImmutableMap.of("my_query", "SELECT ? FROM foo where col1 = ?"); QUERY_PREPARER.prepareQuery(testAnalyzerOptions, "EXECUTE my_query USING 1", preparedStatements, WarningCollector.NOOP); fail("expected exception"); } catch (SemanticException e) { assertEquals(e.getCode(), INVALID_PARAMETER_USAGE); } }
@Override public PushTelemetryResponse getErrorResponse(int throttleTimeMs, Throwable e) { return errorResponse(throttleTimeMs, Errors.forException(e)); }
@Test public void testGetErrorResponse() { PushTelemetryRequest req = new PushTelemetryRequest(new PushTelemetryRequestData(), (short) 0); PushTelemetryResponse response = req.getErrorResponse(0, Errors.CLUSTER_AUTHORIZATION_FAILED.exception()); assertEquals(Collections.singletonMap(Errors.CLUSTER_AUTHORIZATION_FAILED, 1), response.errorCounts()); }
@Nonnull @Override public List<DataConnectionResource> listResources() { try { try (Connection connection = getConnection()) { DatabaseMetaData databaseMetaData = connection.getMetaData(); ResourceReader reader = new ResourceReader(); switch (resolveDialect(databaseMetaData)) { case H2: reader.withCatalog(connection.getCatalog()) .exclude( (catalog, schema, table) -> H2_SYSTEM_SCHEMA_LIST.contains(schema) ); break; case POSTGRESQL: reader.withCatalog(connection.getCatalog()); break; case MYSQL: reader.exclude( (catalog, schema, table) -> catalog != null && MYSQL_SYSTEM_CATALOG_LIST.contains(catalog.toUpperCase(ROOT)) ); break; case MICROSOFT_SQL_SERVER: reader .withCatalog(connection.getCatalog()) .exclude( (catalog, schema, table) -> MSSQL_SYSTEM_SCHEMA_LIST.contains(schema) || MSSQL_SYSTEM_TABLE_LIST.contains(table) ); break; default: // Nothing to do } return reader.listResources(connection); } } catch (Exception exception) { throw new HazelcastException("Could not read resources for DataConnection " + getName(), exception); } }
@Test public void list_resources_should_return_view() throws Exception { jdbcDataConnection = new JdbcDataConnection(SHARED_DATA_CONNECTION_CONFIG); executeJdbc(JDBC_URL_SHARED, "CREATE TABLE MY_TABLE (ID INT, NAME VARCHAR)"); executeJdbc(JDBC_URL_SHARED, "CREATE VIEW MY_TABLE_VIEW AS SELECT * FROM MY_TABLE"); List<DataConnectionResource> dataConnectionResources = jdbcDataConnection.listResources(); assertThat(dataConnectionResources).contains( new DataConnectionResource("TABLE", DB_NAME_SHARED, "PUBLIC", "MY_TABLE_VIEW") ); }
public Integer doCall() throws Exception { // Operator id must be set if (ObjectHelper.isEmpty(operatorId)) { printer().println("Operator id must be set"); return -1; } List<String> integrationSources = Stream.concat(Arrays.stream(Optional.ofNullable(filePaths).orElseGet(() -> new String[] {})), Arrays.stream(Optional.ofNullable(sources).orElseGet(() -> new String[] {}))).toList(); Integration integration = new Integration(); integration.setSpec(new IntegrationSpec()); integration.getMetadata() .setName(getIntegrationName(integrationSources)); if (dependencies != null && dependencies.length > 0) { List<String> deps = new ArrayList<>(); for (String dependency : dependencies) { String normalized = normalizeDependency(dependency); validateDependency(normalized, printer()); deps.add(normalized); } integration.getSpec().setDependencies(deps); } if (kit != null) { IntegrationKit integrationKit = new IntegrationKit(); integrationKit.setName(kit); integration.getSpec().setIntegrationKit(integrationKit); } if (traitProfile != null) { TraitProfile p = TraitProfile.valueOf(traitProfile.toUpperCase(Locale.US)); integration.getSpec().setProfile(p.name().toLowerCase(Locale.US)); } if (repositories != null && repositories.length > 0) { integration.getSpec().setRepositories(List.of(repositories)); } if (annotations != null && annotations.length > 0) { integration.getMetadata().setAnnotations(Arrays.stream(annotations) .filter(it -> it.contains("=")) .map(it -> it.split("=")) .filter(it -> it.length == 2) .collect(Collectors.toMap(it -> it[0].trim(), it -> it[1].trim()))); } if (integration.getMetadata().getAnnotations() == null) { integration.getMetadata().setAnnotations(new HashMap<>()); } // --operator-id={id} is a syntax sugar for '--annotation camel.apache.org/operator.id={id}' integration.getMetadata().getAnnotations().put(CamelKCommand.OPERATOR_ID_LABEL, operatorId); // --integration-profile={id} is a syntax sugar for '--annotation camel.apache.org/integration-profile.id={id}' if (integrationProfile != null) { if (integrationProfile.contains("/")) { String[] namespacedName = integrationProfile.split("/", 2); integration.getMetadata().getAnnotations().put(CamelKCommand.INTEGRATION_PROFILE_NAMESPACE_ANNOTATION, namespacedName[0]); integration.getMetadata().getAnnotations().put(CamelKCommand.INTEGRATION_PROFILE_ANNOTATION, namespacedName[1]); } else { integration.getMetadata().getAnnotations().put(CamelKCommand.INTEGRATION_PROFILE_ANNOTATION, integrationProfile); } } if (labels != null && labels.length > 0) { integration.getMetadata().setLabels(Arrays.stream(labels) .filter(it -> it.contains("=")) .map(it -> it.split("=")) .filter(it -> it.length == 2) .collect(Collectors.toMap(it -> it[0].trim(), it -> it[1].trim()))); } Traits traitsSpec = TraitHelper.parseTraits(traits); if (image != null) { TraitHelper.configureContainerImage(traitsSpec, image, null, null, null, null); } else { List<Source> resolvedSources = SourceHelper.resolveSources(integrationSources, compression); List<Flows> flows = new ArrayList<>(); List<Sources> sources = new ArrayList<>(); for (Source source : resolvedSources) { if (useFlows && source.isYaml() && !source.compressed()) { JsonNode json = KubernetesHelper.json().convertValue( KubernetesHelper.yaml().load(source.content()), JsonNode.class); if (json.isArray()) { for (JsonNode item : json) { Flows flowSpec = new Flows(); flowSpec.setAdditionalProperties(KubernetesHelper.json().readerFor(Map.class).readValue(item)); flows.add(flowSpec); } } else { Flows flowSpec = new Flows(); flowSpec.setAdditionalProperties(KubernetesHelper.json().readerFor(Map.class).readValue(json)); flows.add(flowSpec); } } else { Sources sourceSpec = new Sources(); sourceSpec.setName(source.name()); sourceSpec.setLanguage(source.language()); sourceSpec.setContent(source.content()); sourceSpec.setCompression(source.compressed()); sources.add(sourceSpec); } } if (!flows.isEmpty()) { integration.getSpec().setFlows(flows); } if (!sources.isEmpty()) { integration.getSpec().setSources(sources); } } if (podTemplate != null) { Source templateSource = SourceHelper.resolveSource(podTemplate); if (!templateSource.isYaml()) { throw new RuntimeCamelException( ("Unsupported pod template %s - " + "please use proper YAML source").formatted(templateSource.extension())); } Spec podSpec = KubernetesHelper.yaml().loadAs(templateSource.content(), Spec.class); Template template = new Template(); template.setSpec(podSpec); integration.getSpec().setTemplate(template); } convertOptionsToTraits(traitsSpec); integration.getSpec().setTraits(traitsSpec); if (serviceAccount != null) { integration.getSpec().setServiceAccountName(serviceAccount); } if (output != null) { switch (output) { case "k8s" -> { List<Source> sources = SourceHelper.resolveSources(integrationSources); TraitContext context = new TraitContext(integration.getMetadata().getName(), "1.0-SNAPSHOT", printer(), sources); TraitHelper.configureContainerImage(traitsSpec, image, "quay.io", null, integration.getMetadata().getName(), "1.0-SNAPSHOT"); new TraitCatalog().apply(traitsSpec, context, traitProfile); printer().println( context.buildItems().stream().map(KubernetesHelper::dumpYaml).collect(Collectors.joining("---"))); } case "yaml" -> printer().println(KubernetesHelper.dumpYaml(integration)); case "json" -> printer().println( JSonHelper.prettyPrint(KubernetesHelper.json().writer().writeValueAsString(integration), 2)); default -> { printer().printf("Unsupported output format '%s' (supported: yaml, json)%n", output); return -1; } } return 0; } final AtomicBoolean updated = new AtomicBoolean(false); client(Integration.class).resource(integration).createOr(it -> { updated.set(true); return it.update(); }); if (updated.get()) { printer().printf("Integration %s updated%n", integration.getMetadata().getName()); } else { printer().printf("Integration %s created%n", integration.getMetadata().getName()); } if (wait || logs) { client(Integration.class).withName(integration.getMetadata().getName()) .waitUntilCondition(it -> "Running".equals(it.getStatus().getPhase()), 10, TimeUnit.MINUTES); } if (logs) { IntegrationLogs logsCommand = new IntegrationLogs(getMain()); logsCommand.withClient(client()); logsCommand.withName(integration.getMetadata().getName()); logsCommand.doCall(); } return 0; }
@Test public void shouldUpdateIntegration() throws Exception { Integration integration = createIntegration("route"); kubernetesClient.resources(Integration.class).resource(integration).create(); IntegrationRun command = createCommand(); command.filePaths = new String[] { "classpath:route.yaml" }; command.doCall(); Assertions.assertEquals("Integration route updated", printer.getOutput()); Integration created = kubernetesClient.resources(Integration.class).withName("route").get(); Assertions.assertEquals("camel-k", created.getMetadata().getAnnotations().get(CamelKCommand.OPERATOR_ID_LABEL)); }
public static SimpleTransform threshold(double min, double max) { return new SimpleTransform(Operation.threshold,min,max); }
@Test public void testThresholdBelow() { double min = 0.0; double max = Double.POSITIVE_INFINITY; TransformationMap t = new TransformationMap(Collections.singletonList(SimpleTransform.threshold(min, max)),new HashMap<>()); testThresholding(t,min,max); }
public void tryMergeIssuesFromSourceBranchOfPullRequest(Component component, Collection<DefaultIssue> newIssues, Input<DefaultIssue> rawInput) { if (sourceBranchInputFactory.hasSourceBranchAnalysis()) { Input<DefaultIssue> sourceBranchInput = sourceBranchInputFactory.createForSourceBranch(component); DefaultTrackingInput rawPrTrackingInput = new DefaultTrackingInput(newIssues, rawInput.getLineHashSequence(), rawInput.getBlockHashSequence()); Tracking<DefaultIssue, DefaultIssue> prTracking = tracker.trackNonClosed(rawPrTrackingInput, sourceBranchInput); for (Map.Entry<DefaultIssue, DefaultIssue> pair : prTracking.getMatchedRaws().entrySet()) { issueLifecycle.copyExistingIssueFromSourceBranchToPullRequest(pair.getKey(), pair.getValue()); } } }
@Test public void tryMergeIssuesFromSourceBranchOfPullRequest_merges_issue_state_from_source_branch_into_pull_request() { DefaultIssue sourceBranchIssue = createIssue("issue2", rule.getKey(), Issue.STATUS_CONFIRMED, new Date()); Input<DefaultIssue> sourceBranchInput = new DefaultTrackingInput(singletonList(sourceBranchIssue), mock(LineHashSequence.class), mock(BlockHashSequence.class)); when(sourceBranchInputFactory.hasSourceBranchAnalysis()).thenReturn(true); when(sourceBranchInputFactory.createForSourceBranch(any())).thenReturn(sourceBranchInput); when(tracker.trackNonClosed(any(), any())).thenReturn(prTracking); when(prTracking.getMatchedRaws()).thenReturn(singletonMap(rawIssue, sourceBranchIssue)); underTest.tryMergeIssuesFromSourceBranchOfPullRequest(FILE_1, rawIssuesInput.getIssues(), rawIssuesInput); verify(issueLifecycle).copyExistingIssueFromSourceBranchToPullRequest(rawIssue, sourceBranchIssue); }
@Override public void register(String qpKey, Status status) { checkNotNull(qpKey, "qpKey can't be null"); checkNotNull(status, "status can't be null"); checkState(statuses.put(qpKey, status) == null, "Quality Profile '%s' is already registered", qpKey); }
@Test @UseDataProvider("qualityProfileStatuses") public void register_fails_with_ISE_if_qp_is_already_registered(QProfileStatusRepository.Status status) { underTest.register("key", status); assertThatThrownBy(() -> underTest.register("key", status)) .isInstanceOf(IllegalStateException.class) .hasMessage("Quality Profile 'key' is already registered"); }
@Override public void storeAll(long firstItemSequence, T[] items) { long startNanos = Timer.nanos(); try { delegate.storeAll(firstItemSequence, items); } finally { storeAllProbe.recordValue(Timer.nanosElapsed(startNanos)); } }
@Test public void storeAll() { String[] items = new String[]{"1", "2"}; ringbufferStore.storeAll(100, items); verify(delegate).storeAll(100, items); assertProbeCalledOnce("storeAll"); }
public static LookupResult multi(final CharSequence singleValue, final Map<Object, Object> multiValue) { return withoutTTL().single(singleValue).multiValue(multiValue).build(); }
@Test public void serializeMultiString() { final LookupResult lookupResult = LookupResult.multi("Foobar", MULTI_VALUE); final JsonNode node = objectMapper.convertValue(lookupResult, JsonNode.class); assertThat(node.isNull()).isFalse(); assertThat(node.path("single_value").asText()).isEqualTo("Foobar"); assertThat(node.path("multi_value").path("int").asInt()).isEqualTo(42); assertThat(node.path("multi_value").path("bool").asBoolean()).isEqualTo(true); assertThat(node.path("multi_value").path("string").asText()).isEqualTo("Foobar"); assertThat(node.path("ttl").asLong()).isEqualTo(Long.MAX_VALUE); }
@Override public int configInfoCount() { ConfigInfoMapper configInfoMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO); String sql = configInfoMapper.count(null); Integer result = databaseOperate.queryOne(sql, Integer.class); if (result == null) { throw new IllegalArgumentException("configInfoCount error"); } return result; }
@Test void testConfigInfoCount() { //mock total count when(databaseOperate.queryOne(anyString(), eq(Integer.class))).thenReturn(new Integer(9)); int count = embeddedConfigInfoPersistService.configInfoCount(); assertEquals(9, count); when(databaseOperate.queryOne(anyString(), eq(Integer.class))).thenReturn(null); try { embeddedConfigInfoPersistService.configInfoCount(); assertTrue(false); } catch (Exception e) { assertTrue(e instanceof IllegalArgumentException); } }
@Override public boolean isAbsentSince(AlluxioURI path, long absentSince) { MountInfo mountInfo = getMountInfo(path); if (mountInfo == null) { return false; } AlluxioURI mountBaseUri = mountInfo.getAlluxioUri(); while (path != null && !path.equals(mountBaseUri)) { Pair<Long, Long> cacheResult = mCache.getIfPresent(path.getPath()); if (cacheResult != null && cacheResult.getFirst() != null && cacheResult.getSecond() != null && cacheResult.getFirst() >= absentSince && cacheResult.getSecond() == mountInfo.getMountId()) { return true; } path = path.getParent(); } // Reached the root, without finding anything in the cache. return false; }
@Test public void isAbsent() throws Exception { AlluxioURI absentPath = new AlluxioURI("/mnt/absent"); // Existence of absentPath is not known yet assertFalse(mUfsAbsentPathCache.isAbsentSince(absentPath, UfsAbsentPathCache.ALWAYS)); process(absentPath); // absentPath is known to be absent assertTrue(mUfsAbsentPathCache.isAbsentSince(absentPath, UfsAbsentPathCache.ALWAYS)); // child of absentPath is also known to be absent assertTrue(mUfsAbsentPathCache.isAbsentSince(absentPath.join("a"), UfsAbsentPathCache.ALWAYS)); mTemp.newFolder("folder"); AlluxioURI newFolder = new AlluxioURI("/mnt/folder"); // Existence of newFolder is not known yet assertFalse(mUfsAbsentPathCache.isAbsentSince(newFolder, UfsAbsentPathCache.ALWAYS)); process(newFolder); // newFolder is known to exist assertFalse(mUfsAbsentPathCache.isAbsentSince(newFolder, UfsAbsentPathCache.ALWAYS)); // Existence of child of newFolder is not known assertFalse(mUfsAbsentPathCache.isAbsentSince(newFolder.join("a"), UfsAbsentPathCache.ALWAYS)); }
@CanIgnoreReturnValue public final Ordered containsAtLeast( @Nullable Object k0, @Nullable Object v0, @Nullable Object... rest) { return containsAtLeastEntriesIn(accumulateMap("containsAtLeast", k0, v0, rest)); }
@Test public void containsAtLeastWrongValue_sameToStringForKeys() { expectFailureWhenTestingThat(ImmutableMap.of(1L, "jan", 1, "feb")) .containsAtLeast(1, "jan", 1L, "feb"); assertFailureKeys( "keys with wrong values", "for key", "expected value", "but got value", "for key", "expected value", "but got value", "---", "expected to contain at least", "but was"); assertFailureValueIndexed("for key", 0, "1 (java.lang.Integer)"); assertFailureValueIndexed("expected value", 0, "jan"); assertFailureValueIndexed("but got value", 0, "feb"); assertFailureValueIndexed("for key", 1, "1 (java.lang.Long)"); assertFailureValueIndexed("expected value", 1, "feb"); assertFailureValueIndexed("but got value", 1, "jan"); }
public static FixedWindows of(Duration size) { return new FixedWindows(size, Duration.ZERO); }
@Test public void testDefaultWindowMappingFn() { PartitioningWindowFn<?, ?> windowFn = FixedWindows.of(Duration.standardMinutes(20L)); WindowMappingFn<?> mapping = windowFn.getDefaultWindowMappingFn(); assertThat( mapping.getSideInputWindow( new BoundedWindow() { @Override public Instant maxTimestamp() { return new Instant(100L); } }), equalTo( new IntervalWindow( new Instant(0L), new Instant(0L).plus(Duration.standardMinutes(20L))))); assertThat(mapping.maximumLookback(), equalTo(Duration.ZERO)); }
@Override public MaskRuleConfiguration buildToBeAlteredRuleConfiguration(final AlterMaskRuleStatement sqlStatement) { return MaskRuleStatementConverter.convert(sqlStatement.getRules()); }
@Test void assertUpdate() { MaskRuleConfiguration currentRuleConfig = createCurrentRuleConfiguration(); MaskColumnSegment columnSegment = new MaskColumnSegment("order_id", new AlgorithmSegment("MD5", new Properties())); MaskRuleSegment ruleSegment = new MaskRuleSegment("t_order", Collections.singleton(columnSegment)); AlterMaskRuleStatement sqlStatement = new AlterMaskRuleStatement(Collections.singleton(ruleSegment)); MaskRule rule = mock(MaskRule.class); when(rule.getConfiguration()).thenReturn(currentRuleConfig); executor.setRule(rule); MaskRuleConfiguration toBeAlteredRuleConfig = executor.buildToBeAlteredRuleConfiguration(sqlStatement); assertThat(toBeAlteredRuleConfig.getTables().size(), is(1)); assertThat(toBeAlteredRuleConfig.getTables().iterator().next().getName(), is("t_order")); assertThat(toBeAlteredRuleConfig.getTables().iterator().next().getColumns().iterator().next().getLogicColumn(), is("order_id")); assertThat(toBeAlteredRuleConfig.getMaskAlgorithms().size(), is(1)); assertTrue(toBeAlteredRuleConfig.getMaskAlgorithms().containsKey("t_order_order_id_md5")); }
public int read(ByteBuffer buffer, long offset, int size) throws IOException { Validate.checkNotNull(buffer, "buffer"); Validate.checkWithinRange(offset, "offset", 0, this.remoteObject.size()); Validate.checkPositiveInteger(size, "size"); if (this.closed) { return -1; } int reqSize = (int) Math.min(size, this.remoteObject.size() - offset); return readOneBlockWithRetries(buffer, offset, reqSize); }
@Test public void testArgChecks() throws Exception { // Should not throw. S3ARemoteObjectReader reader = new S3ARemoteObjectReader(remoteObject); // Verify it throws correctly. intercept( IllegalArgumentException.class, "'remoteObject' must not be null", () -> new S3ARemoteObjectReader(null)); intercept( IllegalArgumentException.class, "'buffer' must not be null", () -> reader.read(null, 10, 2)); ByteBuffer buffer = ByteBuffer.allocate(BUFFER_SIZE); intercept( IllegalArgumentException.class, "'offset' (-1) must be within the range [0, 9]", () -> reader.read(buffer, -1, 2)); intercept( IllegalArgumentException.class, "'offset' (11) must be within the range [0, 9]", () -> reader.read(buffer, 11, 2)); intercept( IllegalArgumentException.class, "'size' must be a positive integer", () -> reader.read(buffer, 1, 0)); intercept( IllegalArgumentException.class, "'size' must be a positive integer", () -> reader.read(buffer, 1, -1)); }
@Override protected Response filter(Request request, RequestMeta meta, Class handlerClazz) { Method method; try { method = getHandleMethod(handlerClazz); } catch (NacosException e) { return null; } if (method.isAnnotationPresent(TpsControl.class) && TpsControlConfig.isTpsControlEnabled()) { try { TpsControl tpsControl = method.getAnnotation(TpsControl.class); String pointName = tpsControl.pointName(); TpsCheckRequest tpsCheckRequest = null; String parseName = StringUtils.isBlank(tpsControl.name()) ? pointName : tpsControl.name(); RemoteTpsCheckRequestParser parser = RemoteTpsCheckRequestParserRegistry.getParser(parseName); if (parser != null) { tpsCheckRequest = parser.parse(request, meta); } if (tpsCheckRequest == null) { tpsCheckRequest = new TpsCheckRequest(); } if (StringUtils.isBlank(tpsCheckRequest.getPointName())) { tpsCheckRequest.setPointName(pointName); } initTpsControlManager(); TpsCheckResponse check = tpsControlManager.check(tpsCheckRequest); if (!check.isSuccess()) { Response response; try { response = super.getDefaultResponseInstance(handlerClazz); response.setErrorInfo(NacosException.OVER_THRESHOLD, "Tps Flow restricted:" + check.getMessage()); return response; } catch (Exception e) { com.alibaba.nacos.plugin.control.Loggers.TPS.warn("Tps check fail , request: {},exception:{}", request.getClass().getSimpleName(), e); return null; } } } catch (Throwable throwable) { com.alibaba.nacos.plugin.control.Loggers.TPS.warn("Tps check exception , request: {},exception:{}", request.getClass().getSimpleName(), throwable); } } return null; }
@Test void testTpsCheckException() { HealthCheckRequest healthCheckRequest = new HealthCheckRequest(); RequestMeta requestMeta = new RequestMeta(); Mockito.when(tpsControlManager.check(any(TpsCheckRequest.class))).thenThrow(new NacosRuntimeException(12345)); Response filterResponse = tpsControlRequestFilter.filter(healthCheckRequest, requestMeta, HealthCheckRequestHandler.class); assertNull(filterResponse); }
@Override public String getResourceOutputNodeType() { return null; }
@Test public void testGetResourceOutputNodeType() throws Exception { assertNull( analyzer.getResourceOutputNodeType() ); }
@Override public List<Integer> applyTransforms(List<Integer> originalGlyphIds) { List<Integer> intermediateGlyphsFromGsub = originalGlyphIds; for (String feature : FEATURES_IN_ORDER) { if (!gsubData.isFeatureSupported(feature)) { LOG.debug("the feature {} was not found", feature); continue; } LOG.debug("applying the feature {}", feature); ScriptFeature scriptFeature = gsubData.getFeature(feature); intermediateGlyphsFromGsub = applyGsubFeature(scriptFeature, intermediateGlyphsFromGsub); } return Collections.unmodifiableList(repositionGlyphs(intermediateGlyphsFromGsub)); }
@Test void testApplyTransforms_khanda_ta() { // given List<Integer> glyphsAfterGsub = Arrays.asList(98, 78, 101, 113); // when List<Integer> result = gsubWorkerForBengali.applyTransforms(getGlyphIds("হঠাৎ")); // then assertEquals(glyphsAfterGsub, result); }
public void add(T value) { UUID uuid = value.getUUID(); if (_map.containsKey(uuid)) { throw new AssertionError(String.format("Existing value found with UUID: %s", uuid)); } _map.put(uuid, value); }
@Test public void addValue() { // try adding a new value UUIDMap<Value> map = new UUIDMap<>(); Value value = addNewValue(map); // try re-adding the value assertThrows(AssertionError.class, () -> map.add(value)); // try adding a clone of the value assertThrows(AssertionError.class, () -> map.add(Cloner.clone(value))); }
long residentMemorySizeEstimate() { return (priorValue == null ? 0 : priorValue.length) + (oldValue == null || priorValue == oldValue ? 0 : oldValue.length) + (newValue == null ? 0 : newValue.length) + recordContext.residentMemorySizeEstimate(); }
@Test public void shouldAccountForDeduplicationInSizeEstimate() { final ProcessorRecordContext context = new ProcessorRecordContext(0L, 0L, 0, "topic", new RecordHeaders()); assertEquals(25L, new BufferValue(null, null, null, context).residentMemorySizeEstimate()); assertEquals(26L, new BufferValue(new byte[] {(byte) 0}, null, null, context).residentMemorySizeEstimate()); assertEquals(26L, new BufferValue(null, new byte[] {(byte) 0}, null, context).residentMemorySizeEstimate()); assertEquals(26L, new BufferValue(new byte[] {(byte) 0}, new byte[] {(byte) 0}, null, context).residentMemorySizeEstimate()); assertEquals(27L, new BufferValue(new byte[] {(byte) 0}, new byte[] {(byte) 1}, null, context).residentMemorySizeEstimate()); // new value should get counted, but doesn't get deduplicated assertEquals(28L, new BufferValue(new byte[] {(byte) 0}, new byte[] {(byte) 1}, new byte[] {(byte) 0}, context).residentMemorySizeEstimate()); }
@Override public String requestMessageForPluginSettingsValidation(PluginSettingsConfiguration configuration) { Map<String, Map<String, Object>> configuredValues = new LinkedHashMap<>(); configuredValues.put("plugin-settings", jsonResultMessageHandler.configurationToMap(configuration)); return GSON.toJson(configuredValues); }
@Test public void shouldBuildRequestBodyForCheckSCMConfigurationValidRequest() throws Exception { PluginSettingsConfiguration configuration = new PluginSettingsConfiguration(); configuration.add(new PluginSettingsProperty("key-one", "value-one")); configuration.add(new PluginSettingsProperty("key-two", "value-two")); String requestMessage = messageHandler.requestMessageForPluginSettingsValidation(configuration); assertThat(requestMessage, is("{\"plugin-settings\":{\"key-one\":{\"value\":\"value-one\"},\"key-two\":{\"value\":\"value-two\"}}}")); }
@Override public Response write(WriteRequest request) throws Exception { CompletableFuture<Response> future = writeAsync(request); // Here you wait for 10 seconds, as long as possible, for the request to complete return future.get(10_000L, TimeUnit.MILLISECONDS); }
@Test void testWrite() throws Exception { raftProtocol.write(writeRequest); verify(serverMock).commit(any(String.class), eq(writeRequest), any(CompletableFuture.class)); }
public Method getMainMethod() { return mainMethod; }
@Test public void testGetMainMethod() { MainMethodFinder mainMethodFinder = new MainMethodFinder(); mainMethodFinder.getMainMethodOfClass(MainMethodFinderTest.class); assertNotNull(mainMethodFinder.mainMethod); }
@Nullable public static Method findPropertySetter( @Nonnull Class<?> clazz, @Nonnull String propertyName, @Nonnull Class<?> propertyType ) { String setterName = "set" + toUpperCase(propertyName.charAt(0)) + propertyName.substring(1); Method method; try { method = clazz.getMethod(setterName, propertyType); } catch (NoSuchMethodException e) { return null; } if (!Modifier.isPublic(method.getModifiers())) { return null; } if (Modifier.isStatic(method.getModifiers())) { return null; } Class<?> returnType = method.getReturnType(); if (returnType != void.class && returnType != Void.class && returnType != clazz) { return null; } return method; }
@Test public void when_findPropertySetter_protected_then_returnsNull() { assertNull(findPropertySetter(JavaProperties.class, "protectedField", int.class)); }
@Override public Collection<DatabasePacket> execute() throws SQLException { switch (packet.getType()) { case 'S': return describePreparedStatement(); case 'P': return Collections.singleton(portalContext.get(packet.getName()).describe()); default: throw new UnsupportedSQLOperationException("Unsupported describe type: " + packet.getType()); } }
@Test void assertDescribePreparedStatementInsertWithColumns() throws SQLException { when(packet.getType()).thenReturn('S'); final String statementId = "S_2"; when(packet.getName()).thenReturn(statementId); String sql = "insert into t_order (id, k, c, pad) values (1, ?, ?, ?), (?, 2, ?, '')"; SQLStatement sqlStatement = SQL_PARSER_ENGINE.parse(sql, false); List<PostgreSQLColumnType> parameterTypes = new ArrayList<>(sqlStatement.getParameterCount()); for (int i = 0; i < sqlStatement.getParameterCount(); i++) { parameterTypes.add(PostgreSQLColumnType.UNSPECIFIED); } SQLStatementContext sqlStatementContext = mock(InsertStatementContext.class); when(sqlStatementContext.getSqlStatement()).thenReturn(sqlStatement); ContextManager contextManager = mockContextManager(); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); List<Integer> parameterIndexes = IntStream.range(0, sqlStatement.getParameterCount()).boxed().collect(Collectors.toList()); connectionSession.getServerPreparedStatementRegistry().addPreparedStatement(statementId, new PostgreSQLServerPreparedStatement(sql, sqlStatementContext, new HintValueContext(), parameterTypes, parameterIndexes)); Collection<DatabasePacket> actualPackets = executor.execute(); assertThat(actualPackets.size(), is(2)); Iterator<DatabasePacket> actualPacketsIterator = actualPackets.iterator(); PostgreSQLParameterDescriptionPacket actualParameterDescription = (PostgreSQLParameterDescriptionPacket) actualPacketsIterator.next(); PostgreSQLPacketPayload mockPayload = mock(PostgreSQLPacketPayload.class); actualParameterDescription.write(mockPayload); verify(mockPayload).writeInt2(5); verify(mockPayload, times(2)).writeInt4(23); verify(mockPayload, times(3)).writeInt4(18); assertThat(actualPacketsIterator.next(), is(PostgreSQLNoDataPacket.getInstance())); }
@Override public boolean equals(@Nullable Object object) { if (object instanceof MetricsMap) { MetricsMap<?, ?> metricsMap = (MetricsMap<?, ?>) object; return Objects.equals(metrics, metricsMap.metrics); } return false; }
@Test public void testEquals() { MetricsMap<String, AtomicLong> metricsMap = new MetricsMap<>(unusedKey -> new AtomicLong()); MetricsMap<String, AtomicLong> equal = new MetricsMap<>(unusedKey -> new AtomicLong()); Assert.assertEquals(metricsMap, equal); Assert.assertEquals(metricsMap.hashCode(), equal.hashCode()); }
public void indexDocument(int docId, Predicate predicate) { if (documentIdCounter == Integer.MAX_VALUE) { throw new IllegalStateException("Index is full, max number of documents is: " + Integer.MAX_VALUE); } else if (seenIds.contains(docId)) { throw new IllegalArgumentException("Document id is already in use: " + docId); } else if (isNeverMatchingDocument(predicate)) { return; } seenIds.add(docId); predicate = optimizer.optimizePredicate(predicate); int internalId = documentIdCounter++; if (isAlwaysMatchingDocument(predicate)) { indexZeroConstraintDocument(internalId); } else { indexDocument(internalId, PredicateTreeAnnotator.createPredicateTreeAnnotations(predicate)); } }
@Test void requireThatIndexingMultiDocumentsWithSameIdThrowsException() { assertThrows(IllegalArgumentException.class, () -> { PredicateIndexBuilder builder = new PredicateIndexBuilder(2); builder.indexDocument(1, Predicate.fromString("a in ['b']")); builder.indexDocument(1, Predicate.fromString("c in ['d']")); }); }
@Override public Object apply(Object input) { return PropertyOrFieldSupport.EXTRACTION.getValueOf(propertyOrFieldName, input); }
@Test void should_throw_error_when_no_property_nor_public_field_match_given_name() { // GIVEN ByNameSingleExtractor underTest = new ByNameSingleExtractor("unknown"); // WHEN Throwable thrown = catchThrowable(() -> underTest.apply(YODA)); // THEN then(thrown).isInstanceOf(IntrospectionError.class); }
public void bookRoom(int roomNumber) throws Exception { var room = hotelDao.getById(roomNumber); if (room.isEmpty()) { throw new Exception("Room number: " + roomNumber + " does not exist"); } else { if (room.get().isBooked()) { throw new Exception("Room already booked!"); } else { var updateRoomBooking = room.get(); updateRoomBooking.setBooked(true); hotelDao.update(updateRoomBooking); } } }
@Test void bookingRoomWithInvalidIdShouldRaiseException() { assertThrows(Exception.class, () -> hotel.bookRoom(getNonExistingRoomId())); }
public static List<FieldSchema> convert(Schema schema) { return schema.columns().stream() .map(col -> new FieldSchema(col.name(), convertToTypeString(col.type()), col.doc())) .collect(Collectors.toList()); }
@Test public void testConversionWithoutLastComment() { Schema expected = new Schema( optional(1, "customer_id", Types.LongType.get(), "customer comment"), optional(2, "first_name", Types.StringType.get(), null) ); Schema schema = HiveSchemaUtil.convert( Arrays.asList("customer_id", "first_name"), Arrays.asList(TypeInfoUtils.getTypeInfoFromTypeString(serdeConstants.BIGINT_TYPE_NAME), TypeInfoUtils.getTypeInfoFromTypeString(serdeConstants.STRING_TYPE_NAME)), Collections.singletonList("customer comment")); assertThat(schema.asStruct()).isEqualTo(expected.asStruct()); }
Queue<String> prepareRollingOrder(List<String> podNamesToConsider, List<Pod> pods) { Deque<String> rollingOrder = new ArrayDeque<>(); for (String podName : podNamesToConsider) { Pod matchingPod = pods.stream().filter(pod -> podName.equals(pod.getMetadata().getName())).findFirst().orElse(null); if (matchingPod == null || !Readiness.isPodReady(matchingPod)) { // Non-existing or unready pods are handled first // This helps to avoid rolling all pods into some situation where they would be all failing rollingOrder.addFirst(podName); } else { // Ready pods are rolled only at the end rollingOrder.addLast(podName); } } return rollingOrder; }
@Test public void testRollingOrderWithMissingPod() { List<Pod> pods = List.of( renamePod(READY_POD, "my-connect-connect-0"), renamePod(READY_POD, "my-connect-connect-2") ); KafkaConnectRoller roller = new KafkaConnectRoller(RECONCILIATION, CLUSTER, 1_000L, null); Queue<String> rollingOrder = roller.prepareRollingOrder(POD_NAMES, pods); assertThat(rollingOrder.size(), is(3)); assertThat(rollingOrder.poll(), is("my-connect-connect-1")); assertThat(rollingOrder.poll(), is("my-connect-connect-0")); assertThat(rollingOrder.poll(), is("my-connect-connect-2")); }
public static int MCRF4XX(@NonNull final byte[] data, final int offset, final int length) { return CRC(0x1021, 0xFFFF, data, offset, length, true, true, 0x0000); }
@Test // See: http://ww1.microchip.com/downloads/en/AppNotes/00752a.pdf public void MCRF4XX_8552F189() { final byte[] data = new byte[] {(byte) 0x58, (byte) 0x25, (byte) 0x1F, (byte) 0x98}; assertEquals(0x07F1, CRC16.MCRF4XX(data, 0, 4)); }
@Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { Target target = getTarget(request); if (target == Target.Other) { chain.doFilter(request, response); return; } HttpServletRequest httpRequest = (HttpServletRequest) request; if (isRateLimited(httpRequest, target)) { incrementStats(target); if (serverConfig.isRateLimiterEnabled()) { ((HttpServletResponse) response).setStatus(HttpServletResponse.SC_SERVICE_UNAVAILABLE); return; } } chain.doFilter(request, response); }
@Test public void testStandardClientsThrottlingEnforceable() throws Exception { ConfigurationManager.getConfigInstance().setProperty("eureka.rateLimiter.throttleStandardClients", true); // Custom clients will go up to the window limit whenRequest(FULL_FETCH, EurekaClientIdentity.DEFAULT_CLIENT_NAME); filter.doFilter(request, response, filterChain); filter.doFilter(request, response, filterChain); verify(filterChain, times(2)).doFilter(request, response); // Now we hit the limit long rateLimiterCounter = EurekaMonitors.RATE_LIMITED.getCount(); filter.doFilter(request, response, filterChain); assertEquals("Expected rate limiter counter increase", rateLimiterCounter + 1, EurekaMonitors.RATE_LIMITED.getCount()); verify(response, times(1)).setStatus(HttpServletResponse.SC_SERVICE_UNAVAILABLE); }
public void validateFilterExpression(final Expression exp) { final SqlType type = getExpressionReturnType(exp); if (!SqlTypes.BOOLEAN.equals(type)) { throw new KsqlStatementException( "Type error in " + filterType.name() + " expression: " + "Should evaluate to boolean but is" + " (" + type.toString(FormatOptions.none()) + ") instead.", "Type error in " + filterType.name() + " expression: " + "Should evaluate to boolean but is " + exp.toString() + " (" + type.toString(FormatOptions.none()) + ") instead.", exp.toString() ); } }
@Test public void shouldThrowOnBadTypeComparison() { // Given: final Expression left = new UnqualifiedColumnReferenceExp(COLUMN1); final Expression right = new IntegerLiteral(10); final Expression comparision = new ComparisonExpression(Type.EQUAL, left, right); when(schema.findValueColumn(any())) .thenReturn(Optional.of(Column.of(COLUMN1, STRING, VALUE, 10))); // When: assertThrows("Error in WHERE expression: " + "Cannot compare col1 (STRING) to 10 (INTEGER) with EQUAL.", KsqlException.class, () -> validator.validateFilterExpression(comparision)); }
public QueryConfiguration applyOverrides(QueryConfigurationOverrides overrides) { Map<String, String> sessionProperties; if (overrides.getSessionPropertiesOverrideStrategy() == OVERRIDE) { sessionProperties = new HashMap<>(overrides.getSessionPropertiesOverride()); } else { sessionProperties = new HashMap<>(this.sessionProperties); if (overrides.getSessionPropertiesOverrideStrategy() == SUBSTITUTE) { sessionProperties.putAll(overrides.getSessionPropertiesOverride()); } } overrides.getSessionPropertiesToRemove().forEach(sessionProperties::remove); return new QueryConfiguration( overrides.getCatalogOverride().orElse(catalog), overrides.getSchemaOverride().orElse(schema), Optional.ofNullable(overrides.getUsernameOverride().orElse(username.orElse(null))), Optional.ofNullable(overrides.getPasswordOverride().orElse(password.orElse(null))), Optional.of(sessionProperties), isReusableTable, Optional.of(partitions)); }
@Test public void testSessionPropertyOverride() { overrides.setSessionPropertiesOverrideStrategy(OVERRIDE); assertEquals(CONFIGURATION_1.applyOverrides(overrides), CONFIGURATION_FULL_OVERRIDE); QueryConfiguration overridden = new QueryConfiguration( CATALOG_OVERRIDE, SCHEMA_OVERRIDE, Optional.of(USERNAME_OVERRIDE), Optional.of(PASSWORD_OVERRIDE), Optional.of(SESSION_PROPERTIES_OVERRIDE), false, Optional.empty()); assertEquals(CONFIGURATION_2.applyOverrides(overrides), overridden); }
public static InetAddress fixScopeIdAndGetInetAddress(final InetAddress inetAddress) throws SocketException { if (!(inetAddress instanceof Inet6Address inet6Address)) { return inetAddress; } if (!inetAddress.isLinkLocalAddress() && !inetAddress.isSiteLocalAddress()) { return inetAddress; } if (inet6Address.getScopeId() > 0 || inet6Address.getScopedInterface() != null) { return inetAddress; } final Inet6Address resultInetAddress = findRealInet6Address(inet6Address); return resultInetAddress == null ? inetAddress : resultInetAddress; }
@Test public void testFixScopeIdAndGetInetAddress_whenNotLinkLocalAddress() throws SocketException, UnknownHostException { InetAddress inetAddress = InetAddress.getByName("2001:db8:85a3:0:0:8a2e:370:7334"); InetAddress actual = AddressUtil.fixScopeIdAndGetInetAddress(inetAddress); assertEquals(inetAddress, actual); }
public static ConnectedComponents findComponentsForStartEdges(Graph graph, EdgeTransitionFilter edgeTransitionFilter, IntContainer edges) { return new EdgeBasedTarjanSCC(graph, edgeTransitionFilter, true).findComponentsForStartEdges(edges); }
@Test public void withStartEdges_simple() { // 0 - 1 4 - 5 - 6 - 7 // | | // 3 - 2 8 - 9 g.edge(0, 1).setDistance(10).set(speedEnc, 10, 10); g.edge(1, 2).setDistance(10).set(speedEnc, 10, 10); g.edge(2, 3).setDistance(10).set(speedEnc, 10, 10); g.edge(3, 0).setDistance(10).set(speedEnc, 10, 10); g.edge(4, 5).setDistance(10).set(speedEnc, 10, 10); g.edge(5, 6).setDistance(10).set(speedEnc, 10, 10); g.edge(6, 7).setDistance(10).set(speedEnc, 10, 10); g.edge(8, 9).setDistance(10).set(speedEnc, 10, 10); // just the left island ConnectedComponents components = EdgeBasedTarjanSCC.findComponentsForStartEdges(g, (prev, edge) -> true, IntArrayList.from(0)); assertEquals(8, components.getEdgeKeys()); assertEquals(1, components.getComponents().size()); // all islands components = EdgeBasedTarjanSCC.findComponentsForStartEdges(g, (prev, edge) -> true, IntArrayList.from(0, 4, 7)); assertEquals(16, components.getEdgeKeys()); assertEquals(3, components.getComponents().size()); // here we initialize as for all islands but the filter still prevents some edges to be found components = EdgeBasedTarjanSCC.findComponentsForStartEdges(g, (prev, edge) -> edge.getEdge() > 3 && edge.getEdge() < 7, IntArrayList.from(0, 4, 7)); assertEquals(6, components.getEdgeKeys()); assertEquals(1, components.getComponents().size()); }
@Override public void run() { try { // make sure we call afterRun() even on crashes // and operate countdown latches, else we may hang the parallel runner if (steps == null) { beforeRun(); } if (skipped) { return; } int count = steps.size(); int index = 0; while ((index = nextStepIndex()) < count) { currentStep = steps.get(index); execute(currentStep); if (currentStepResult != null) { // can be null if debug step-back or hook skip result.addStepResult(currentStepResult); } } } catch (Exception e) { if (currentStepResult != null) { result.addStepResult(currentStepResult); } logError("scenario [run] failed\n" + StringUtils.throwableToString(e)); currentStepResult = result.addFakeStepResult("scenario [run] failed", e); } finally { if (!skipped) { afterRun(); if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) { featureRuntime.suite.abort(); } } if (caller.isNone()) { logAppender.close(); // reclaim memory } } }
@Test void testCallFromJs() { run( "def res = karate.call('called1.feature')" ); matchVar("res", "{ a: 1, foo: { hello: 'world' } }"); }
public static boolean isNotEmpty(CharSequence str) { return !isEmpty(str); }
@Test public void assertIsNotEmpty() { String string = "string"; Assert.assertTrue(StringUtil.isNotEmpty(string)); }
@Override public <K, V> Map<K, V> toMap(DataTable dataTable, Type keyType, Type valueType) { requireNonNull(dataTable, "dataTable may not be null"); requireNonNull(keyType, "keyType may not be null"); requireNonNull(valueType, "valueType may not be null"); if (dataTable.isEmpty()) { return emptyMap(); } DataTable keyColumn = dataTable.columns(0, 1); DataTable valueColumns = dataTable.columns(1); String firstHeaderCell = keyColumn.cell(0, 0); boolean firstHeaderCellIsBlank = firstHeaderCell == null || firstHeaderCell.isEmpty(); List<K> keys = convertEntryKeys(keyType, keyColumn, valueType, firstHeaderCellIsBlank); if (valueColumns.isEmpty()) { return createMap(keyType, keys, valueType, nCopies(keys.size(), null)); } boolean keysImplyTableRowTransformer = keys.size() == dataTable.height() - 1; List<V> values = convertEntryValues(valueColumns, keyType, valueType, keysImplyTableRowTransformer); if (keys.size() != values.size()) { throw keyValueMismatchException(firstHeaderCellIsBlank, keys.size(), keyType, values.size(), valueType); } return createMap(keyType, keys, valueType, values); }
@Test void to_map_of_unknown_type_to_object__throws_exception__register_table_cell_transformer() { DataTable table = parse("", "| | lat | lon |", "| KMSY | 29.993333 | -90.258056 |", "| KSFO | 37.618889 | -122.375 |", "| KSEA | 47.448889 | -122.309444 |", "| KJFK | 40.639722 | -73.778889 |"); registry.defineDataTableType(new DataTableType(Coordinate.class, COORDINATE_TABLE_ENTRY_TRANSFORMER)); CucumberDataTableException exception = assertThrows( CucumberDataTableException.class, () -> converter.toMap(table, AirPortCode.class, Coordinate.class)); assertThat(exception.getMessage(), is("" + "Can't convert DataTable to Map<io.cucumber.datatable.DataTableTypeRegistryTableConverterTest$AirPortCode, io.cucumber.datatable.DataTableTypeRegistryTableConverterTest$Coordinate>.\n" + "Please review these problems:\n" + "\n" + " - There was no table cell transformer registered for io.cucumber.datatable.DataTableTypeRegistryTableConverterTest$AirPortCode.\n" + " Please consider registering a table cell transformer.\n" + "\n" + " - There was no default table cell transformer registered to transform io.cucumber.datatable.DataTableTypeRegistryTableConverterTest$AirPortCode.\n" + " Please consider registering a default table cell transformer.\n" + "\n" + "Note: Usually solving one is enough")); }
public Rule<ProjectNode> projectNodeRule() { return new PullUpExpressionInLambdaProjectNodeRule(); }
@Test public void testLikeExpression() { tester().assertThat(new PullUpExpressionInLambdaRules(getFunctionManager()).projectNodeRule()) .setSystemProperty(PULL_EXPRESSION_FROM_LAMBDA_ENABLED, "true") .on(p -> { p.variable("expr", new ArrayType(BOOLEAN)); p.variable("col", VARCHAR); p.variable("arr1", new ArrayType(VARCHAR)); return p.project( Assignments.builder().put(p.variable("expr", new ArrayType(BOOLEAN)), p.rowExpression("transform(arr1, x-> x like concat(col, 'a'))")).build(), p.values(p.variable("arr1", new ArrayType(VARCHAR)), p.variable("col", VARCHAR))); }) .matches( project( ImmutableMap.of("expr", expression("transform(arr1, x -> x like concat_1)")), project( ImmutableMap.of("concat_1", expression("concat(col, 'a')")), values("arr1", "col")))); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { try { final AttributedList<Path> children = new AttributedList<Path>(); final IRODSFileSystemAO fs = session.getClient(); final IRODSFile f = fs.getIRODSFileFactory().instanceIRODSFile(directory.getAbsolute()); if(!f.exists()) { throw new NotfoundException(directory.getAbsolute()); } for(File file : fs.getListInDirWithFileFilter(f, TrueFileFilter.TRUE)) { final String normalized = PathNormalizer.normalize(file.getAbsolutePath(), true); if(StringUtils.equals(normalized, directory.getAbsolute())) { continue; } final PathAttributes attributes = new PathAttributes(); final ObjStat stats = fs.getObjStat(file.getAbsolutePath()); attributes.setModificationDate(stats.getModifiedAt().getTime()); attributes.setCreationDate(stats.getCreatedAt().getTime()); attributes.setSize(stats.getObjSize()); attributes.setChecksum(Checksum.parse(Hex.encodeHexString(Base64.decodeBase64(stats.getChecksum())))); attributes.setOwner(stats.getOwnerName()); attributes.setGroup(stats.getOwnerZone()); children.add(new Path(directory, PathNormalizer.name(normalized), file.isDirectory() ? EnumSet.of(Path.Type.directory) : EnumSet.of(Path.Type.file), attributes)); listener.chunk(directory, children); } return children; } catch(JargonException e) { throw new IRODSExceptionMappingService().map("Listing directory {0} failed", e, directory); } }
@Test @Ignore public void testList() throws Exception { final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new IRODSProtocol()))); final Profile profile = new ProfilePlistReader(factory).read( this.getClass().getResourceAsStream("/iRODS (iPlant Collaborative).cyberduckprofile")); final Host host = new Host(profile, profile.getDefaultHostname(), new Credentials( PROPERTIES.get("irods.key"), PROPERTIES.get("irods.secret") )); final IRODSSession session = new IRODSSession(host); assertNotNull(session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback())); assertTrue(session.isConnected()); assertNotNull(session.getClient()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); final AttributedList<Path> list = new IRODSListService(session).list(new IRODSHomeFinderService(session).find(), new DisabledListProgressListener()); assertFalse(list.isEmpty()); for(Path p : list) { assertEquals(new IRODSHomeFinderService(session).find(), p.getParent()); assertNotEquals(-1L, p.attributes().getModificationDate()); assertNotEquals(-1L, p.attributes().getSize()); } session.close(); }
public static String toJson(MetadataUpdate metadataUpdate) { return toJson(metadataUpdate, false); }
@Test public void testRemoveSnapshotsToJson() { String action = MetadataUpdateParser.REMOVE_SNAPSHOTS; long snapshotId = 2L; String expected = String.format("{\"action\":\"%s\",\"snapshot-ids\":[2]}", action); MetadataUpdate update = new MetadataUpdate.RemoveSnapshot(snapshotId); String actual = MetadataUpdateParser.toJson(update); assertThat(actual) .as("Remove snapshots should serialize to the correct JSON value") .isEqualTo(expected); }
protected void scheduleRefresh() { FrameworkExecutorRepository repository = frameworkModel.getBeanFactory().getBean(FrameworkExecutorRepository.class); refreshFuture = repository .getSharedScheduledExecutor() .scheduleAtFixedRate( this::generateCert, certConfig.getRefreshInterval(), certConfig.getRefreshInterval(), TimeUnit.MILLISECONDS); }
@Test void testRefresh() { FrameworkModel frameworkModel = new FrameworkModel(); AtomicInteger count = new AtomicInteger(0); DubboCertManager certManager = new DubboCertManager(frameworkModel) { @Override protected CertPair generateCert() { count.incrementAndGet(); return null; } }; certManager.certConfig = new CertConfig(null, null, null, null, 10); certManager.scheduleRefresh(); Assertions.assertNotNull(certManager.refreshFuture); await().until(() -> count.get() > 1); certManager.refreshFuture.cancel(false); frameworkModel.destroy(); }
@ConstantFunction(name = "multiply", argTypes = {DOUBLE, DOUBLE}, returnType = DOUBLE) public static ConstantOperator multiplyDouble(ConstantOperator first, ConstantOperator second) { return ConstantOperator.createDouble(first.getDouble() * second.getDouble()); }
@Test public void multiplyDouble() { assertEquals(10000.0, ScalarOperatorFunctions.multiplyDouble(O_DOUBLE_100, O_DOUBLE_100).getDouble(), 1); }
@SuppressWarnings("unchecked") static Object extractFromRecordValue(Object recordValue, String fieldName) { List<String> fields = Splitter.on('.').splitToList(fieldName); if (recordValue instanceof Struct) { return valueFromStruct((Struct) recordValue, fields); } else if (recordValue instanceof Map) { return valueFromMap((Map<String, ?>) recordValue, fields); } else { throw new UnsupportedOperationException( "Cannot extract value from type: " + recordValue.getClass().getName()); } }
@Test public void testExtractFromRecordValueStruct() { Schema valSchema = SchemaBuilder.struct().field("key", Schema.INT64_SCHEMA).build(); Struct val = new Struct(valSchema).put("key", 123L); Object result = RecordUtils.extractFromRecordValue(val, "key"); assertThat(result).isEqualTo(123L); }
@Override public void dump(OutputStream output) { try (PrintWriter out = new PrintWriter(new OutputStreamWriter(output, UTF_8))) { for (long value : values) { out.printf("%d%n", value); } } }
@Test public void dumpsToAStream() throws Exception { final ByteArrayOutputStream output = new ByteArrayOutputStream(); snapshot.dump(output); assertThat(output.toString()) .isEqualTo(String.format("1%n2%n3%n4%n5%n")); }
public Optional<Long> getCommandSequenceNumber() { return commandSequenceNumber; }
@Test public void shouldHandleNullCommandNumber() { assertThat( new KsqlRequest("sql", SOME_PROPS, Collections.emptyMap(), null).getCommandSequenceNumber(), is(Optional.empty())); }
@Override public void put(K key, V value) { checkState(!destroyed, destroyedMessage); checkNotNull(key, ERROR_NULL_KEY); checkNotNull(value, ERROR_NULL_VALUE); MapValue<V> newValue = new MapValue<>(value, timestampProvider.apply(key, value)); if (putInternal(key, newValue)) { notifyPeers(new UpdateEntry<>(key, newValue), peerUpdateFunction.apply(key, value)); notifyListeners(new EventuallyConsistentMapEvent<>(mapName, PUT, key, value)); } }
@Test public void testPut() throws Exception { // Set up expectations of external events to be sent to listeners during // the test. These don't use timestamps so we can set them all up at once. EventuallyConsistentMapListener<String, String> listener = getListener(); listener.event(new EventuallyConsistentMapEvent<>( MAP_NAME, EventuallyConsistentMapEvent.Type.PUT, KEY1, VALUE1)); listener.event(new EventuallyConsistentMapEvent<>( MAP_NAME, EventuallyConsistentMapEvent.Type.PUT, KEY1, VALUE2)); replay(listener); ecMap.addListener(listener); // Set up expected internal message to be broadcast to peers on first put expectSpecificMulticastMessage(generatePutMessage(KEY1, VALUE1, clockService .peekAtNextTimestamp()), UPDATE_MESSAGE_SUBJECT, clusterCommunicator); // Put first value assertNull(ecMap.get(KEY1)); ecMap.put(KEY1, VALUE1); assertEquals(VALUE1, ecMap.get(KEY1)); verify(clusterCommunicator); // Set up expected internal message to be broadcast to peers on second put expectSpecificMulticastMessage(generatePutMessage( KEY1, VALUE2, clockService.peekAtNextTimestamp()), UPDATE_MESSAGE_SUBJECT, clusterCommunicator); // Update same key to a new value ecMap.put(KEY1, VALUE2); assertEquals(VALUE2, ecMap.get(KEY1)); verify(clusterCommunicator); // Do a put with a older timestamp than the value already there. // The map data should not be changed and no notifications should be sent. reset(clusterCommunicator); replay(clusterCommunicator); clockService.turnBackTime(); ecMap.put(KEY1, VALUE1); // Value should not have changed. assertEquals(VALUE2, ecMap.get(KEY1)); verify(clusterCommunicator); // Check that our listener received the correct events during the test verify(listener); }
public JobStatus getJobStatus(JobID oldJobID) throws IOException { org.apache.hadoop.mapreduce.v2.api.records.JobId jobId = TypeConverter.toYarn(oldJobID); GetJobReportRequest request = recordFactory.newRecordInstance(GetJobReportRequest.class); request.setJobId(jobId); JobReport report = ((GetJobReportResponse) invoke("getJobReport", GetJobReportRequest.class, request)).getJobReport(); JobStatus jobStatus = null; if (report != null) { if (StringUtils.isEmpty(report.getJobFile())) { String jobFile = MRApps.getJobFile(conf, report.getUser(), oldJobID); report.setJobFile(jobFile); } String historyTrackingUrl = report.getTrackingUrl(); String url = StringUtils.isNotEmpty(historyTrackingUrl) ? historyTrackingUrl : trackingUrl; jobStatus = TypeConverter.fromYarn(report, url); } return jobStatus; }
@Test public void testRemoteExceptionFromHistoryServer() throws Exception { MRClientProtocol historyServerProxy = mock(MRClientProtocol.class); when(historyServerProxy.getJobReport(getJobReportRequest())).thenThrow( new IOException("Job ID doesnot Exist")); ResourceMgrDelegate rm = mock(ResourceMgrDelegate.class); when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId())) .thenReturn(null); ClientServiceDelegate clientServiceDelegate = getClientServiceDelegate( historyServerProxy, rm); try { clientServiceDelegate.getJobStatus(oldJobId); Assert.fail("Invoke should throw exception after retries."); } catch (IOException e) { Assert.assertTrue(e.getMessage().contains( "Job ID doesnot Exist")); } }
@Override public void update(V newValue) { throw MODIFICATION_ATTEMPT_ERROR; }
@Test void testUpdate() throws IOException { long value = valueState.value(); assertThat(value).isEqualTo(42L); assertThatThrownBy(() -> valueState.update(54L)) .isInstanceOf(UnsupportedOperationException.class); }
public static String toStringAddress(SocketAddress address) { if (address == null) { return StringUtils.EMPTY; } return toStringAddress((InetSocketAddress) address); }
@Test public void testToStringAddress1() { assertThat(NetUtil.toStringAddress((SocketAddress)ipv4)) .isEqualTo(ipv4.getAddress().getHostAddress() + ":" + ipv4.getPort()); assertThat(NetUtil.toStringAddress((SocketAddress)ipv6)).isEqualTo( ipv6.getAddress().getHostAddress() + ":" + ipv6.getPort()); }
@ApiOperation(value = "Save Or update User (saveUser)", notes = "Create or update the User. When creating user, platform generates User Id as " + UUID_WIKI_LINK + "The newly created User Id will be present in the response. " + "Specify existing User Id to update the device. " + "Referencing non-existing User Id will cause 'Not Found' error." + "\n\nDevice email is unique for entire platform setup." + "Remove 'id', 'tenantId' and optionally 'customerId' from the request body example (below) to create new User entity." + "\n\nAvailable for users with 'SYS_ADMIN', 'TENANT_ADMIN' or 'CUSTOMER_USER' authority.") @PreAuthorize("hasAnyAuthority('SYS_ADMIN', 'TENANT_ADMIN', 'CUSTOMER_USER')") @RequestMapping(value = "/user", method = RequestMethod.POST) @ResponseBody public User saveUser( @Parameter(description = "A JSON value representing the User.", required = true) @RequestBody User user, @Parameter(description = "Send activation email (or use activation link)" , schema = @Schema(defaultValue = "true")) @RequestParam(required = false, defaultValue = "true") boolean sendActivationMail, HttpServletRequest request) throws ThingsboardException { if (!Authority.SYS_ADMIN.equals(getCurrentUser().getAuthority())) { user.setTenantId(getCurrentUser().getTenantId()); } checkEntity(user.getId(), user, Resource.USER); return tbUserService.save(getTenantId(), getCurrentUser().getCustomerId(), user, sendActivationMail, request, getCurrentUser()); }
@Test public void testSaveUser() throws Exception { loginSysAdmin(); User user = createTenantAdminUser(); String email = user.getEmail(); Mockito.reset(tbClusterService, auditLogService); User savedUser = doPost("/api/user", user, User.class); Assert.assertNotNull(savedUser); Assert.assertNotNull(savedUser.getId()); Assert.assertTrue(savedUser.getCreatedTime() > 0); Assert.assertEquals(email, savedUser.getEmail()); User foundUser = doGet("/api/user/" + savedUser.getId().getId().toString(), User.class); Assert.assertEquals(foundUser, savedUser); testNotifyManyEntityManyTimeMsgToEdgeServiceEntityEqAny(foundUser, foundUser, SYSTEM_TENANT, customerNUULId, null, SYS_ADMIN_EMAIL, ActionType.ADDED, 1, 1, 1); Mockito.reset(tbClusterService, auditLogService); resetTokens(); doGet("/api/noauth/activate?activateToken={activateToken}", this.currentActivateToken) .andExpect(status().isSeeOther()) .andExpect(header().string(HttpHeaders.LOCATION, "/login/createPassword?activateToken=" + this.currentActivateToken)); JsonNode activateRequest = JacksonUtil.newObjectNode() .put("activateToken", this.currentActivateToken) .put("password", "testPassword"); JsonNode tokenInfo = readResponse(doPost("/api/noauth/activate", activateRequest).andExpect(status().isOk()), JsonNode.class); validateAndSetJwtToken(tokenInfo, email); doGet("/api/auth/user") .andExpect(status().isOk()) .andExpect(jsonPath("$.authority", is(Authority.TENANT_ADMIN.name()))) .andExpect(jsonPath("$.email", is(email))); resetTokens(); login(email, "testPassword"); doGet("/api/auth/user") .andExpect(status().isOk()) .andExpect(jsonPath("$.authority", is(Authority.TENANT_ADMIN.name()))) .andExpect(jsonPath("$.email", is(email))); loginSysAdmin(); foundUser = doGet("/api/user/" + savedUser.getId().getId().toString(), User.class); Mockito.reset(tbClusterService, auditLogService); doDelete("/api/user/" + savedUser.getId().getId().toString()) .andExpect(status().isOk()); testNotifyEntityAllOneTimeLogEntityActionEntityEqClass(foundUser, foundUser.getId(), foundUser.getId(), SYSTEM_TENANT, customerNUULId, null, SYS_ADMIN_EMAIL, ActionType.DELETED, ActionType.DELETED, SYSTEM_TENANT.getId().toString()); }
@Override public void apply(final Path file, final Local local, final TransferStatus status, final ProgressListener listener) throws BackgroundException { // Rename existing file before putting new file in place if(status.isExists()) { Path rename; do { final String proposal = MessageFormat.format(PreferencesFactory.get().getProperty("queue.upload.file.rename.format"), FilenameUtils.getBaseName(file.getName()), UserDateFormatterFactory.get().getMediumFormat(System.currentTimeMillis(), false).replace(Path.DELIMITER, '-').replace(':', '-'), StringUtils.isNotBlank(file.getExtension()) ? String.format(".%s", file.getExtension()) : StringUtils.EMPTY); rename = new Path(file.getParent(), proposal, file.getType()); } while(find.find(rename)); if(log.isInfoEnabled()) { log.info(String.format("Rename existing file %s to %s", file, rename)); } move.move(file, rename, new TransferStatus().exists(false), new Delete.DisabledCallback(), new DisabledConnectionCallback()); if(log.isDebugEnabled()) { log.debug(String.format("Clear exist flag for file %s", file)); } status.exists(false).getDisplayname().exists(false); } super.apply(file, local, status, listener); }
@Test public void testPrepare() throws Exception { final AtomicBoolean c = new AtomicBoolean(); final RenameExistingFilter f = new RenameExistingFilter(new DisabledUploadSymlinkResolver(), new NullSession(new Host(new TestProtocol())) { @Override @SuppressWarnings("unchecked") public <T> T _getFeature(final Class<T> type) { if(type == Move.class) { return (T) new Move() { @Override public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback callback, final ConnectionCallback connectionCallback) { assertNotSame(file.getName(), renamed.getName()); c.set(true); return renamed; } @Override public boolean isRecursive(final Path source, final Path target) { return true; } }; } return super._getFeature(type); } @Override public AttributedList<Path> list(final Path file, final ListProgressListener listener) { final AttributedList<Path> l = new AttributedList<>(); l.add(new Path("t", EnumSet.of(Path.Type.file))); return l; } }); final Path p = new Path("t", EnumSet.of(Path.Type.file)) { @Override public Path getParent() { return new Path("p", EnumSet.of(Path.Type.directory)); } }; f.prepare(p, new NullLocal(System.getProperty("java.io.tmpdir"), "t"), new TransferStatus().exists(true), new DisabledProgressListener()); assertFalse(c.get()); f.apply(p, new NullLocal(System.getProperty("java.io.tmpdir"), "t"), new TransferStatus().exists(true), new DisabledProgressListener()); assertTrue(c.get()); }
@Override public long get(long key1, int key2) { return super.get0(key1, key2); }
@Test public void testClear() { final long key1 = randomKey(); final int key2 = randomKey(); insert(key1, key2); hsa.clear(); assertEquals(NULL_ADDRESS, hsa.get(key1, key2)); assertEquals(0, hsa.size()); }
@Operation(summary = "Get single connection") @GetMapping(value = "{id}", produces = "application/json") @ResponseBody public Connection getById(@PathVariable("id") Long id) { return connectionService.getConnectionById(id); }
@Test public void getConnectionById() { when(connectionServiceMock.getConnectionById(anyLong())).thenReturn(getNewConnection()); Connection result = controllerMock.getById(anyLong()); verify(connectionServiceMock, times(1)).getConnectionById(anyLong()); assertEquals("connection", result.getName()); }
@Override public void deleteTag(Long id) { // 校验存在 validateTagExists(id); // 校验标签下是否有用户 validateTagHasUser(id); // 删除 memberTagMapper.deleteById(id); }
@Test public void testDeleteTag_notExists() { // 准备参数 Long id = randomLongId(); // 调用, 并断言异常 assertServiceException(() -> tagService.deleteTag(id), TAG_NOT_EXISTS); }
public void succeededGetQueueInfoRetrieved(long duration) { totalSucceededGetQueueInfoRetrieved.add(duration); getQueueInfoLatency.add(duration); }
@Test public void testSucceededGetQueueInfoRetrieved() { long totalGoodBefore = metrics.getNumSucceededGetQueueInfoRetrieved(); goodSubCluster.getQueueInfoRetrieved(150); Assert.assertEquals(totalGoodBefore + 1, metrics.getNumSucceededGetQueueInfoRetrieved()); Assert.assertEquals(150, metrics.getLatencySucceededGetQueueInfoRetrieved(), ASSERT_DOUBLE_DELTA); goodSubCluster.getQueueInfoRetrieved(300); Assert.assertEquals(totalGoodBefore + 2, metrics.getNumSucceededGetQueueInfoRetrieved()); Assert.assertEquals(225, metrics.getLatencySucceededGetQueueInfoRetrieved(), ASSERT_DOUBLE_DELTA); }
@Override public Optional<Endpoint> getRestEndpoint(String clusterId) { Optional<KubernetesService> restService = getService(ExternalServiceDecorator.getExternalServiceName(clusterId)); if (!restService.isPresent()) { return Optional.empty(); } final Service service = restService.get().getInternalResource(); final KubernetesConfigOptions.ServiceExposedType serviceExposedType = ServiceType.classify(service); return serviceExposedType .serviceType() .getRestEndpoint(service, internalClient, nodePortAddressType); }
@Test void testServiceLoadBalancerEmptyHostAndIP() { mockExpectedServiceFromServerSide(buildExternalServiceWithLoadBalancer("", "")); final Optional<Endpoint> resultEndpoint = flinkKubeClient.getRestEndpoint(CLUSTER_ID); assertThat(resultEndpoint).isNotPresent(); }
@Override public void onTaskFinished(TaskAttachment attachment) { if (attachment instanceof BrokerPendingTaskAttachment) { onPendingTaskFinished((BrokerPendingTaskAttachment) attachment); } else if (attachment instanceof BrokerLoadingTaskAttachment) { onLoadingTaskFinished((BrokerLoadingTaskAttachment) attachment); } }
@Test public void testLoadingTaskOnFinishedWithUnfinishedTask(@Injectable BrokerLoadingTaskAttachment attachment, @Injectable LoadTask loadTask1, @Injectable LoadTask loadTask2) { BrokerLoadJob brokerLoadJob = new BrokerLoadJob(); Deencapsulation.setField(brokerLoadJob, "state", JobState.LOADING); Map<Long, LoadTask> idToTasks = Maps.newHashMap(); idToTasks.put(1L, loadTask1); idToTasks.put(2L, loadTask2); Deencapsulation.setField(brokerLoadJob, "idToTasks", idToTasks); new Expectations() { { attachment.getCounter(BrokerLoadJob.DPP_NORMAL_ALL); minTimes = 0; result = 10; attachment.getCounter(BrokerLoadJob.DPP_ABNORMAL_ALL); minTimes = 0; result = 1; attachment.getTaskId(); minTimes = 0; result = 1L; } }; brokerLoadJob.onTaskFinished(attachment); Set<Long> finishedTaskIds = Deencapsulation.getField(brokerLoadJob, "finishedTaskIds"); Assert.assertEquals(1, finishedTaskIds.size()); EtlStatus loadingStatus = Deencapsulation.getField(brokerLoadJob, "loadingStatus"); Assert.assertEquals("10", loadingStatus.getCounters().get(BrokerLoadJob.DPP_NORMAL_ALL)); Assert.assertEquals("1", loadingStatus.getCounters().get(BrokerLoadJob.DPP_ABNORMAL_ALL)); int progress = Deencapsulation.getField(brokerLoadJob, "progress"); Assert.assertEquals(50, progress); }
@VisibleForTesting public boolean getSizeBasedWeight() { return sizeBasedWeight; }
@Test public void testSizeBasedWeightNotAffectAppActivation() throws Exception { CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(); // Define top-level queues String defaultPath = CapacitySchedulerConfiguration.ROOT + ".default"; QueuePath queuePath = new QueuePath(defaultPath); csConf.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getCanonicalName()); csConf.setOrderingPolicy(queuePath, CapacitySchedulerConfiguration.FAIR_APP_ORDERING_POLICY); csConf.setOrderingPolicyParameter(queuePath, FairOrderingPolicy.ENABLE_SIZE_BASED_WEIGHT, "true"); csConf.setMaximumApplicationMasterResourcePerQueuePercent(queuePath, 0.1f); // inject node label manager MockRM rm = new MockRM(csConf); rm.start(); CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); // Get LeafQueue LeafQueue lq = (LeafQueue) cs.getQueue("default"); OrderingPolicy<FiCaSchedulerApp> policy = lq.getOrderingPolicy(); Assert.assertTrue(policy instanceof FairOrderingPolicy); Assert.assertTrue(((FairOrderingPolicy<FiCaSchedulerApp>)policy).getSizeBasedWeight()); rm.registerNode("h1:1234", 10 * GB); // Submit 4 apps MockRMAppSubmissionData data3 = MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm) .withAppName("app") .withUser("user") .withAcls(null) .withQueue("default") .withUnmanagedAM(false) .build(); MockRMAppSubmitter.submit(rm, data3); MockRMAppSubmissionData data2 = MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm) .withAppName("app") .withUser("user") .withAcls(null) .withQueue("default") .withUnmanagedAM(false) .build(); MockRMAppSubmitter.submit(rm, data2); MockRMAppSubmissionData data1 = MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm) .withAppName("app") .withUser("user") .withAcls(null) .withQueue("default") .withUnmanagedAM(false) .build(); MockRMAppSubmitter.submit(rm, data1); MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm) .withAppName("app") .withUser("user") .withAcls(null) .withQueue("default") .withUnmanagedAM(false) .build(); MockRMAppSubmitter.submit(rm, data); Assert.assertEquals(1, lq.getNumActiveApplications()); Assert.assertEquals(3, lq.getNumPendingApplications()); // Try allocate once, #active-apps and #pending-apps should be still correct cs.handle(new NodeUpdateSchedulerEvent( rm.getRMContext().getRMNodes().get(NodeId.newInstance("h1", 1234)))); Assert.assertEquals(1, lq.getNumActiveApplications()); Assert.assertEquals(3, lq.getNumPendingApplications()); }