focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public final BarcodeParameters getParams() { return params; }
@Test final void testGetParams() throws IOException { try (BarcodeDataFormat instance = new BarcodeDataFormat()) { BarcodeParameters result = instance.getParams(); assertNotNull(result); } }
@VisibleForTesting Set<PartitionFieldNode> getRoots() { return roots; }
@Test public void testProjectionCompaction() throws MetaException { PersistenceManager mockPm = Mockito.mock(PersistenceManager.class); List<String> projectionFields = new ArrayList<>(2); projectionFields.add("sd.location"); projectionFields.add("sd.parameters"); projectionFields.add("createTime"); projectionFields.add("sd"); PartitionProjectionEvaluator projectionEvaluator = new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, false, null, null); Set<PartitionFieldNode> roots = projectionEvaluator.getRoots(); Assert.assertFalse("sd.location should not contained since it is already included in sd", roots.contains(new PartitionFieldNode("sd.location"))); Assert.assertFalse("sd.parameters should not contained since it is already included in sd", roots.contains(new PartitionFieldNode("sd.parameters"))); }
public abstract int status(HttpServletResponse response);
@Test void servlet25_status_cached_laterThrows() { HttpServletResponseImpl response = new HttpServletResponseImpl(); servlet25.status(response); response.shouldThrow = true; assertThat(servlet25.status(response)) .isEqualTo(0); }
@Override public List<String> listDbNames() { return ImmutableList.<String>builder() .addAll(this.normal.listDbNames()) .addAll(this.informationSchema.listDbNames()) .build(); }
@Test void testListDbNames(@Mocked ConnectorMetadata connectorMetadata) { new Expectations() { { connectorMetadata.listDbNames(); result = ImmutableList.of("test_db1", "test_db2"); times = 1; } }; CatalogConnectorMetadata catalogConnectorMetadata = new CatalogConnectorMetadata( connectorMetadata, informationSchemaMetadata, metaMetadata ); List<String> dbNames = catalogConnectorMetadata.listDbNames(); List<String> expected = ImmutableList.of("test_db1", "test_db2", InfoSchemaDb.DATABASE_NAME); assertEquals(expected, dbNames); }
String replaceMatchesWithSpace(String name) { return replaceMatchWith(name, " "); }
@Test void replaceMatchWithSpace() { assertThat(argumentPattern.replaceMatchesWithSpace("4"), is(equalTo(" "))); }
@Override public void warn(final Host bookmark, final String title, final String reason, final String defaultButton, final String cancelButton, final String preference) throws ConnectionCanceledException { console.printf("%n%s", reason); if(!prompt.prompt(String.format("%s (y) or %s (n): ", defaultButton, cancelButton))) { // Switch protocol throw new LoginCanceledException(); } }
@Test(expected = LoginCanceledException.class) public void testWarn() throws Exception { new TerminalLoginCallback(new TerminalPromptReader() { @Override public boolean prompt(final String message) { return false; } }).warn(new Host(new TestProtocol()), "", "", "", "", ""); }
public static <T> T nullIsIllegal(T item, String message) { if (item == null) { log.error(message); throw new IllegalArgumentException(message); } return item; }
@Test public void testNullIsIllegal() { String input = "Foo"; String output = Tools.nullIsIllegal(input, "Not found!"); assertEquals(input, output); assertSame(input, output); }
@Override public Iterator<Map.Entry<String, Object>> getIterator() { return variables.getIterator(); }
@Test public void testGetIterator() { assertThat(iteratorToMap(unmodifiables.getIterator()), CoreMatchers.is(iteratorToMap(vars.getIterator()))); }
public int getAppTimeoutsFailedRetrieved() { return numGetAppTimeoutsFailedRetrieved.value(); }
@Test public void testGetAppTimeoutsRetrievedFailed() { long totalBadBefore = metrics.getAppTimeoutsFailedRetrieved(); badSubCluster.getAppTimeoutsFailed(); Assert.assertEquals(totalBadBefore + 1, metrics.getAppTimeoutsFailedRetrieved()); }
@Override public void updateProject(GoViewProjectUpdateReqVO updateReqVO) { // 校验存在 validateProjectExists(updateReqVO.getId()); // 更新 GoViewProjectDO updateObj = GoViewProjectConvert.INSTANCE.convert(updateReqVO); goViewProjectMapper.updateById(updateObj); }
@Test public void testUpdateProject_success() { // mock 数据 GoViewProjectDO dbGoViewProject = randomPojo(GoViewProjectDO.class); goViewProjectMapper.insert(dbGoViewProject);// @Sql: 先插入出一条存在的数据 // 准备参数 GoViewProjectUpdateReqVO reqVO = randomPojo(GoViewProjectUpdateReqVO.class, o -> { o.setId(dbGoViewProject.getId()); // 设置更新的 ID o.setStatus(randomCommonStatus()); }); // 调用 goViewProjectService.updateProject(reqVO); // 校验是否更新正确 GoViewProjectDO goViewProject = goViewProjectMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, goViewProject); }
public String getPrivateKeyAsHex() { return ByteUtils.formatHex(getPrivKeyBytes()); }
@Test public void testGetPrivateKeyAsHex() { ECKey key = ECKey.fromPrivate(BigInteger.TEN).decompress(); // An example private key. assertEquals("000000000000000000000000000000000000000000000000000000000000000a", key.getPrivateKeyAsHex()); }
public void setScheduledTaskQueueCapacity(int scheduledTaskQueueCapacity) { this.scheduledTaskQueueCapacity = checkPositive(scheduledTaskQueueCapacity, "scheduledTaskQueueCapacity"); }
@Test public void test_setScheduledTaskQueueCapacity_whenZero() { ReactorBuilder builder = newBuilder(); assertThrows(IllegalArgumentException.class, () -> builder.setScheduledTaskQueueCapacity(0)); }
@Override public void execute(final ConnectionSession connectionSession) { VariableAssignSegment variableAssignSegment = setStatement.getVariableAssigns().iterator().next(); String variableName = variableAssignSegment.getVariable().getVariable().toLowerCase(); String assignValue = variableAssignSegment.getAssignValue(); new CharsetSetExecutor(databaseType, connectionSession).set(variableName, assignValue); new SessionVariableRecordExecutor(databaseType, connectionSession).recordVariable(variableName, assignValue); }
@Test void assertExecute() { VariableAssignSegment variableAssignSegment = new VariableAssignSegment(); VariableSegment variable = new VariableSegment(0, 0, "key"); variableAssignSegment.setVariable(variable); variableAssignSegment.setAssignValue("value"); PostgreSQLSetStatement setStatement = new PostgreSQLSetStatement(); setStatement.getVariableAssigns().add(variableAssignSegment); PostgreSQLSetVariableAdminExecutor executor = new PostgreSQLSetVariableAdminExecutor(setStatement); ConnectionSession connectionSession = mock(ConnectionSession.class); RequiredSessionVariableRecorder requiredSessionVariableRecorder = mock(RequiredSessionVariableRecorder.class); when(connectionSession.getRequiredSessionVariableRecorder()).thenReturn(requiredSessionVariableRecorder); try (MockedStatic<DatabaseTypedSPILoader> databaseTypedSPILoader = mockStatic(DatabaseTypedSPILoader.class)) { ReplayedSessionVariableProvider replayedSessionVariableProvider = mock(ReplayedSessionVariableProvider.class); when(replayedSessionVariableProvider.isNeedToReplay("key")).thenReturn(true); databaseTypedSPILoader.when(() -> DatabaseTypedSPILoader.findService(ReplayedSessionVariableProvider.class, databaseType)).thenReturn(Optional.of(replayedSessionVariableProvider)); executor.execute(connectionSession); verify(requiredSessionVariableRecorder).setVariable("key", "value"); } }
public static void setFillInOutsideScopeExceptionStacktraces(boolean fillInStacktrace) { if (lockdown) { throw new IllegalStateException("Plugins can't be changed anymore"); } fillInOutsideScopeExceptionStacktraces = fillInStacktrace; }
@Test public void trueStacktraceFill_shouldHaveStacktrace() { AutoDisposePlugins.setFillInOutsideScopeExceptionStacktraces(true); OutsideScopeException started = new OutsideScopeException("Lifecycle not started"); assertThat(started.getStackTrace()).isNotEmpty(); }
@Override @Nonnull public <T> List<Future<T>> invokeAll(@Nonnull Collection<? extends Callable<T>> tasks) { throwRejectedExecutionExceptionIfShutdown(); ArrayList<Future<T>> result = new ArrayList<>(); for (Callable<T> task : tasks) { try { result.add(new CompletedFuture<>(task.call(), null)); } catch (Exception e) { result.add(new CompletedFuture<>(null, e)); } } return result; }
@Test void testRejectedInvokeAnyWithEmptyListAndTimeout() { testRejectedExecutionException( testInstance -> testInstance.invokeAll(Collections.emptyList(), 1L, TimeUnit.DAYS)); }
public static List<Integer> getIntegerList(String property, JsonNode node) { Preconditions.checkArgument(node.has(property), "Cannot parse missing list: %s", property); return ImmutableList.<Integer>builder() .addAll(new JsonIntegerArrayIterator(property, node)) .build(); }
@Test public void getIntegerList() throws JsonProcessingException { assertThatThrownBy(() -> JsonUtil.getIntegerList("items", JsonUtil.mapper().readTree("{}"))) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse missing list: items"); assertThatThrownBy( () -> JsonUtil.getIntegerList("items", JsonUtil.mapper().readTree("{\"items\": null}"))) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse JSON array from non-array value: items: null"); assertThatThrownBy( () -> JsonUtil.getIntegerList( "items", JsonUtil.mapper().readTree("{\"items\": [13, \"23\"]}"))) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse integer from non-int value in items: \"23\""); List<Integer> items = Arrays.asList(23, 45); assertThat( JsonUtil.getIntegerList("items", JsonUtil.mapper().readTree("{\"items\": [23, 45]}"))) .isEqualTo(items); String json = JsonUtil.generate( gen -> { gen.writeStartObject(); JsonUtil.writeIntegerArray("items", items, gen); gen.writeEndObject(); }, false); assertThat(JsonUtil.getIntegerList("items", JsonUtil.mapper().readTree(json))).isEqualTo(items); }
public static void boundsCheck(int capacity, int index, int length) { if (capacity < 0 || index < 0 || length < 0 || (index > (capacity - length))) { throw new IndexOutOfBoundsException(String.format("index=%d, length=%d, capacity=%d", index, length, capacity)); } }
@Test(expected = IndexOutOfBoundsException.class) public void boundsCheck_whenIndexSmallerThanZero() { ArrayUtils.boundsCheck(100, -1, 110); }
@Override public void close() throws IOException { if (closed) { return; } super.close(); closed = true; stream.close(); }
@Test public void testMultipleClose() throws IOException { GCSOutputStream stream = new GCSOutputStream(storage, randomBlobId(), properties, MetricsContext.nullMetrics()); stream.close(); stream.close(); }
public void notifyObservers() { synchronized (this) { /* We don't want the Observer doing callbacks into * arbitrary code while holding its own Monitor. * The code where we extract each Observable from * the Vector and store the state of the Observer * needs synchronization, but notifying observers * does not (should not). The worst result of any * potential race-condition here is that: * 1) a newly-added Observer will miss a * notification in progress * 2) a recently unregistered Observer will be * wrongly notified when it doesn't care */ if (!changed) { return; } clearChanged(); } for (Observer observer : obs) { observer.update(this); } }
@Test void testNotifyObservers() { observable.addObserver(observer); reset(observer); observable.notifyObservers(); assertFalse(observable.hasChanged()); verify(observer, never()).update(observable); observable.setChanged(); assertTrue(observable.hasChanged()); observable.notifyObservers(); verify(observer).update(observable); assertFalse(observable.hasChanged()); }
@Override public void deleteGlobalProperty(String key, DbSession session) { // do nothing }
@Test public void deleteGlobalProperty() { underTest.deleteGlobalProperty(null, dbSession); assertNoInteraction(); }
@SuppressWarnings("squid:S1181") // Yes we really do want to catch Throwable @Override public V apply(U input) { int retryAttempts = 0; while (true) { try { return baseFunction.apply(input); } catch (Throwable t) { if (!exceptionClass.isAssignableFrom(t.getClass()) || retryAttempts == maxRetries) { Throwables.throwIfUnchecked(t); throw new RetriesExceededException(t); } Tools.randomDelay(maxDelayBetweenRetries); retryAttempts++; } } }
@Test(expected = RetryableException.class) public void testNoRetries() { new RetryingFunction<>(this::succeedAfterOneFailure, RetryableException.class, 0, 10).apply(null); }
@Override public boolean supports(Job job) { JobDetails jobDetails = job.getJobDetails(); return jobDetails.hasStaticFieldName(); }
@Test void doesNotSupportJobIfJobIsNotAStaticMethodCall() { Job job = anEnqueuedJob() .withJobDetails(defaultJobDetails()) .build(); assertThat(backgroundStaticFieldJobWithoutIocRunner.supports(job)).isFalse(); }
public RatingValue increment(Rating rating) { if (value.compareTo(rating) > 0) { value = rating; } this.set = true; return this; }
@Test public void multiple_calls_to_increment_increments_by_the_value_of_the_arg() { RatingValue target = new RatingValue() .increment(new RatingValue().increment(B)) .increment(new RatingValue().increment(D)); verifySetValue(target, D); }
@Override public Optional<String> getLocalHadoopConfigurationDirectory() { final String hadoopConfDirEnv = System.getenv(Constants.ENV_HADOOP_CONF_DIR); if (StringUtils.isNotBlank(hadoopConfDirEnv)) { return Optional.of(hadoopConfDirEnv); } final String hadoopHomeEnv = System.getenv(Constants.ENV_HADOOP_HOME); if (StringUtils.isNotBlank(hadoopHomeEnv)) { // Hadoop 2.2+ final File hadoop2ConfDir = new File(hadoopHomeEnv, "/etc/hadoop"); if (hadoop2ConfDir.exists()) { return Optional.of(hadoop2ConfDir.getAbsolutePath()); } // Hadoop 1.x final File hadoop1ConfDir = new File(hadoopHomeEnv, "/conf"); if (hadoop1ConfDir.exists()) { return Optional.of(hadoop1ConfDir.getAbsolutePath()); } } return Optional.empty(); }
@Test void testGetLocalHadoopConfigurationDirectoryFromHadoop2HomeEnv(@TempDir Path temporaryFolder) throws Exception { runTestWithEmptyEnv( () -> { final String hadoopHome = temporaryFolder.toAbsolutePath().toString(); Files.createDirectories(temporaryFolder.resolve(Paths.get("etc", "hadoop"))); setEnv(Constants.ENV_HADOOP_HOME, hadoopHome); final Optional<String> optional = testingKubernetesParameters.getLocalHadoopConfigurationDirectory(); assertThat(optional).isPresent(); assertThat(optional.get()).isEqualTo(hadoopHome + "/etc/hadoop"); }); }
public CompletableFuture<Acknowledge> triggerSavepoint( AsynchronousJobOperationKey operationKey, String targetDirectory, SavepointFormatType formatType, TriggerSavepointMode savepointMode, Time timeout) { return registerOperationIdempotently( operationKey, () -> triggerSavepointFunction.apply( operationKey.getJobId(), targetDirectory, formatType, savepointMode, timeout)); }
@Test public void retryingCompletedOperationDoesNotMarkCacheEntryAsAccessed() throws ExecutionException, InterruptedException { handler.triggerSavepoint( operationKey, targetDirectory, SavepointFormatType.CANONICAL, TriggerSavepointMode.SAVEPOINT, TIMEOUT) .get(); savepointLocationFuture.complete(""); handler.triggerSavepoint( operationKey, targetDirectory, SavepointFormatType.CANONICAL, TriggerSavepointMode.SAVEPOINT, TIMEOUT) .get(); // should not complete because we wait for the result to be accessed assertThat( savepointTriggerCache.closeAsync(), FlinkMatchers.willNotComplete(Duration.ofMillis(10))); }
public static <K> KStreamHolder<K> build( final KStreamHolder<K> left, final KTableHolder<K> right, final StreamTableJoin<K> join, final RuntimeBuildContext buildContext, final JoinedFactory joinedFactory ) { final Formats leftFormats = join.getInternalFormats(); final QueryContext queryContext = join.getProperties().getQueryContext(); final QueryContext.Stacker stacker = QueryContext.Stacker.of(queryContext); final LogicalSchema leftSchema = left.getSchema(); final PhysicalSchema leftPhysicalSchema = PhysicalSchema.from( leftSchema, leftFormats.getKeyFeatures(), leftFormats.getValueFeatures() ); final Serde<GenericRow> leftSerde = buildContext.buildValueSerde( leftFormats.getValueFormat(), leftPhysicalSchema, stacker.push(SERDE_CTX).getQueryContext() ); final Serde<K> keySerde = left.getExecutionKeyFactory().buildKeySerde( leftFormats.getKeyFormat(), leftPhysicalSchema, queryContext ); final Joined<K, GenericRow, GenericRow> joined = joinedFactory.create( keySerde, leftSerde, null, StreamsUtil.buildOpName(queryContext) ); final LogicalSchema rightSchema = right.getSchema(); final JoinParams joinParams = JoinParamsFactory .create(join.getKeyColName(), leftSchema, rightSchema); final KStream<K, GenericRow> result; switch (join.getJoinType()) { case LEFT: result = left.getStream().leftJoin(right.getTable(), joinParams.getJoiner(), joined); break; case INNER: result = left.getStream().join(right.getTable(), joinParams.getJoiner(), joined); break; default: throw new IllegalStateException("invalid join type"); } return left.withStream(result, joinParams.getSchema()); }
@Test public void shouldBuildJoinedCorrectly() { // Given: givenInnerJoin(L_KEY); // When: join.build(planBuilder, planInfo); // Then: verify(joinedFactory).create(keySerde, leftSerde, null, "jo-in"); }
public static FileInputStream getFileInputStream(String fileName) throws IOException { File sourceFile = getFile(fileName); return new FileInputStream(sourceFile); }
@Test public void getFileInputStreamNotExisting() { assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> FileUtils.getFileInputStream(NOT_EXISTING_FILE)); }
@Override public boolean isWarProject() { String packaging = project.getPackaging(); return "war".equals(packaging) || "gwt-app".equals(packaging); }
@Test public void testIsWarProject_warPackagingIsWar() { when(mockMavenProject.getPackaging()).thenReturn("war"); assertThat(mavenProjectProperties.isWarProject()).isTrue(); }
@Override public boolean betterThan(Num criterionValue1, Num criterionValue2) { // because it represents a loss, VaR is non-positive return criterionValue1.isGreaterThan(criterionValue2); }
@Test public void betterThan() { AnalysisCriterion criterion = getCriterion(); assertTrue(criterion.betterThan(numOf(-0.1), numOf(-0.2))); assertFalse(criterion.betterThan(numOf(-0.1), numOf(0.0))); }
public static ParamType getVarArgsSchemaFromType(final Type type) { return getSchemaFromType(type, VARARGS_JAVA_TO_ARG_TYPE); }
@Test public void shouldGetObjectSchemaForObjectClassVariadic() { assertThat( UdfUtil.getVarArgsSchemaFromType(Object.class), equalTo(ParamTypes.ANY) ); }
public Set<Long> calculateUsers(DelegateExecution execution, int level) { Assert.isTrue(level > 0, "level 必须大于 0"); // 获得发起人 ProcessInstance processInstance = processInstanceService.getProcessInstance(execution.getProcessInstanceId()); Long startUserId = NumberUtils.parseLong(processInstance.getStartUserId()); // 获得对应 leve 的部门 DeptRespDTO dept = null; for (int i = 0; i < level; i++) { // 获得 level 对应的部门 if (dept == null) { dept = getStartUserDept(startUserId); if (dept == null) { // 找不到发起人的部门,所以无法使用该规则 return emptySet(); } } else { DeptRespDTO parentDept = deptApi.getDept(dept.getParentId()); if (parentDept == null) { // 找不到父级部门,所以只好结束寻找。原因是:例如说,级别比较高的人,所在部门层级比较少 break; } dept = parentDept; } } return dept.getLeaderUserId() != null ? asSet(dept.getLeaderUserId()) : emptySet(); }
@Test public void testCalculateUsers_noParentDept() { // 准备参数 DelegateExecution execution = mockDelegateExecution(1L); // mock 方法(startUser) AdminUserRespDTO startUser = randomPojo(AdminUserRespDTO.class, o -> o.setDeptId(10L)); when(adminUserApi.getUser(eq(1L))).thenReturn(startUser); DeptRespDTO startUserDept = randomPojo(DeptRespDTO.class, o -> o.setId(10L).setParentId(100L) .setLeaderUserId(20L)); // mock 方法(getDept) when(deptApi.getDept(eq(10L))).thenReturn(startUserDept); when(deptApi.getDept(eq(100L))).thenReturn(null); // 调用 Set<Long> result = expression.calculateUsers(execution, 2); // 断言 assertEquals(asSet(20L), result); }
@Override public void replay( long offset, long producerId, short producerEpoch, CoordinatorRecord record ) throws RuntimeException { ApiMessageAndVersion key = record.key(); ApiMessageAndVersion value = record.value(); switch (key.version()) { case 0: case 1: offsetMetadataManager.replay( offset, producerId, (OffsetCommitKey) key.message(), (OffsetCommitValue) Utils.messageOrNull(value) ); break; case 2: groupMetadataManager.replay( (GroupMetadataKey) key.message(), (GroupMetadataValue) Utils.messageOrNull(value) ); break; case 3: groupMetadataManager.replay( (ConsumerGroupMetadataKey) key.message(), (ConsumerGroupMetadataValue) Utils.messageOrNull(value) ); break; case 4: groupMetadataManager.replay( (ConsumerGroupPartitionMetadataKey) key.message(), (ConsumerGroupPartitionMetadataValue) Utils.messageOrNull(value) ); break; case 5: groupMetadataManager.replay( (ConsumerGroupMemberMetadataKey) key.message(), (ConsumerGroupMemberMetadataValue) Utils.messageOrNull(value) ); break; case 6: groupMetadataManager.replay( (ConsumerGroupTargetAssignmentMetadataKey) key.message(), (ConsumerGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value) ); break; case 7: groupMetadataManager.replay( (ConsumerGroupTargetAssignmentMemberKey) key.message(), (ConsumerGroupTargetAssignmentMemberValue) Utils.messageOrNull(value) ); break; case 8: groupMetadataManager.replay( (ConsumerGroupCurrentMemberAssignmentKey) key.message(), (ConsumerGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value) ); break; case 9: groupMetadataManager.replay( (ShareGroupPartitionMetadataKey) key.message(), (ShareGroupPartitionMetadataValue) Utils.messageOrNull(value) ); break; case 10: groupMetadataManager.replay( (ShareGroupMemberMetadataKey) key.message(), (ShareGroupMemberMetadataValue) Utils.messageOrNull(value) ); break; case 11: groupMetadataManager.replay( (ShareGroupMetadataKey) key.message(), (ShareGroupMetadataValue) Utils.messageOrNull(value) ); break; case 12: groupMetadataManager.replay( (ShareGroupTargetAssignmentMetadataKey) key.message(), (ShareGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value) ); break; case 13: groupMetadataManager.replay( (ShareGroupTargetAssignmentMemberKey) key.message(), (ShareGroupTargetAssignmentMemberValue) Utils.messageOrNull(value) ); break; case 14: groupMetadataManager.replay( (ShareGroupCurrentMemberAssignmentKey) key.message(), (ShareGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value) ); break; default: throw new IllegalStateException("Received an unknown record type " + key.version() + " in " + record); } }
@Test public void testReplayShareGroupMemberMetadata() { GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class); OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class); CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class); CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class); GroupCoordinatorShard coordinator = new GroupCoordinatorShard( new LogContext(), groupMetadataManager, offsetMetadataManager, Time.SYSTEM, new MockCoordinatorTimer<>(Time.SYSTEM), mock(GroupCoordinatorConfig.class), coordinatorMetrics, metricsShard ); ShareGroupMemberMetadataKey key = new ShareGroupMemberMetadataKey(); ShareGroupMemberMetadataValue value = new ShareGroupMemberMetadataValue(); coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, new CoordinatorRecord( new ApiMessageAndVersion(key, (short) 10), new ApiMessageAndVersion(value, (short) 0) )); verify(groupMetadataManager, times(1)).replay(key, value); }
@Override public boolean add(PipelineConfig pipelineConfig) { verifyUniqueName(pipelineConfig); PipelineConfigs part = this.getFirstEditablePartOrNull(); if (part == null) throw bomb("No editable configuration sources"); return part.add(pipelineConfig); }
@Test public void shouldBombWhenAddPipelineAndNoEditablePartExists() { PipelineConfig pipe1 = PipelineConfigMother.pipelineConfig("pipeline1"); BasicPipelineConfigs part1 = new BasicPipelineConfigs(pipe1); MergePipelineConfigs group = new MergePipelineConfigs( part1, new BasicPipelineConfigs()); PipelineConfig pipeline2 = PipelineConfigMother.pipelineConfig("pipeline2"); try { group.add(pipeline2); } catch (Exception ex) { assertThat(ex.getMessage(), is("No editable configuration sources")); return; } fail("exception not thrown"); }
Number evaluateOutlierValue(final Number input) { switch (outlierTreatmentMethod) { case AS_IS: KiePMMLLinearNorm[] limitLinearNorms; if (input.doubleValue() < firstLinearNorm.getOrig()) { limitLinearNorms = linearNorms.subList(0, 2).toArray(new KiePMMLLinearNorm[0]); } else { limitLinearNorms = linearNorms.subList(linearNorms.size() -2, linearNorms.size()).toArray(new KiePMMLLinearNorm[0]); } return evaluate(input, limitLinearNorms); case AS_MISSING_VALUES: return mapMissingTo; case AS_EXTREME_VALUES: return input.doubleValue() < firstLinearNorm.getOrig() ? firstLinearNorm.getNorm() : lastLinearNorm.getNorm(); default: throw new KiePMMLException("Unknown outlierTreatmentMethod " + outlierTreatmentMethod); } }
@Test void evaluateOutlierValueAsIs() { KiePMMLNormContinuous kiePMMLNormContinuous = getKiePMMLNormContinuous(null, OUTLIER_TREATMENT_METHOD.AS_IS, null); Number input = 23; Number retrieved = kiePMMLNormContinuous.evaluateOutlierValue(input); Number expected = kiePMMLNormContinuous.linearNorms.get(0).getNorm() + ((input.doubleValue() - kiePMMLNormContinuous.linearNorms.get(0).getOrig()) / (kiePMMLNormContinuous.linearNorms.get(1).getOrig() - kiePMMLNormContinuous.linearNorms.get(0).getOrig())) * (kiePMMLNormContinuous.linearNorms.get(1).getNorm() - kiePMMLNormContinuous.linearNorms.get(0).getNorm()); assertThat(retrieved).isEqualTo(expected); input = 41; retrieved = kiePMMLNormContinuous.evaluateOutlierValue(input); expected = kiePMMLNormContinuous.linearNorms.get(2).getNorm() + ((input.doubleValue() - kiePMMLNormContinuous.linearNorms.get(2).getOrig()) / (kiePMMLNormContinuous.linearNorms.get(3).getOrig() - kiePMMLNormContinuous.linearNorms.get(2).getOrig())) * (kiePMMLNormContinuous.linearNorms.get(3).getNorm() - kiePMMLNormContinuous.linearNorms.get(2).getNorm()); assertThat(retrieved).isEqualTo(expected); }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testFetchWithTopicId() { buildFetcher(); TopicIdPartition tp = new TopicIdPartition(topicId, new TopicPartition(topicName, 0)); assignFromUser(singleton(tp.topicPartition())); subscriptions.seek(tp.topicPartition(), 0); assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); // Fetch should use latest version client.prepareResponse( fetchRequestMatcher(ApiKeys.FETCH.latestVersion(), tp, 0, Optional.of(validLeaderEpoch)), fullFetchResponse(tp, records, Errors.NONE, 100L, 0) ); consumerClient.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchRecords(); assertTrue(partitionRecords.containsKey(tp.topicPartition())); List<ConsumerRecord<byte[], byte[]>> records = partitionRecords.get(tp.topicPartition()); assertEquals(3, records.size()); assertEquals(4L, subscriptions.position(tp.topicPartition()).offset); // this is the next fetching position long offset = 1; for (ConsumerRecord<byte[], byte[]> record : records) { assertEquals(offset, record.offset()); offset += 1; } }
public HttpHeaders preflightResponseHeaders() { if (preflightHeaders.isEmpty()) { return EmptyHttpHeaders.INSTANCE; } final HttpHeaders preflightHeaders = new DefaultHttpHeaders(); for (Entry<CharSequence, Callable<?>> entry : this.preflightHeaders.entrySet()) { final Object value = getValue(entry.getValue()); if (value instanceof Iterable) { preflightHeaders.add(entry.getKey(), (Iterable<?>) value); } else { preflightHeaders.add(entry.getKey(), value); } } return preflightHeaders; }
@Test public void preflightResponseHeadersSingleValue() { final CorsConfig cors = forAnyOrigin().preflightResponseHeader("SingleValue", "value").build(); assertThat(cors.preflightResponseHeaders().get(of("SingleValue")), equalTo("value")); }
@Override public AnalyticsPluginInfo pluginInfoFor(GoPluginDescriptor descriptor) { Capabilities capabilities = capabilities(descriptor.id()); PluggableInstanceSettings pluginSettingsAndView = getPluginSettingsAndView(descriptor, extension); Image image = image(descriptor.id()); return new AnalyticsPluginInfo(descriptor, image, capabilities, pluginSettingsAndView); }
@Test public void shouldBuildPluginInfoWithImage() { GoPluginDescriptor descriptor = GoPluginDescriptor.builder().id("plugin1").build(); Image icon = new Image("content_type", "data", "hash"); when(extension.getIcon(descriptor.id())).thenReturn(icon); AnalyticsPluginInfo pluginInfo = new AnalyticsPluginInfoBuilder(extension).pluginInfoFor(descriptor); assertThat(pluginInfo.getImage(), is(icon)); }
public ImmutableList<Replacement> format( SnippetKind kind, String source, List<Range<Integer>> ranges, int initialIndent, boolean includeComments) throws FormatterException { RangeSet<Integer> rangeSet = TreeRangeSet.create(); for (Range<Integer> range : ranges) { rangeSet.add(range); } if (includeComments) { if (kind != SnippetKind.COMPILATION_UNIT) { throw new IllegalArgumentException( "comment formatting is only supported for compilation units"); } return formatter.getFormatReplacements(source, ranges); } SnippetWrapper wrapper = snippetWrapper(kind, source, initialIndent); ranges = offsetRanges(ranges, wrapper.offset); String replacement = formatter.formatSource(wrapper.contents.toString(), ranges); replacement = replacement.substring( wrapper.offset, replacement.length() - (wrapper.contents.length() - wrapper.offset - source.length())); return toReplacements(source, replacement).stream() .filter(r -> rangeSet.encloses(r.getReplaceRange())) .collect(toImmutableList()); }
@Test public void classMember() throws FormatterException { String input = "void f() {\n}"; List<Replacement> replacements = new SnippetFormatter() .format( SnippetKind.CLASS_BODY_DECLARATIONS, input, ImmutableList.of(Range.closedOpen(0, input.length())), 4, false); assertThat(replacements).containsExactly(Replacement.create(10, 11, "")); }
@Override public ClientDetailsEntity saveNewClient(ClientDetailsEntity client) { if (client.getId() != null) { // if it's not null, it's already been saved, this is an error throw new IllegalArgumentException("Tried to save a new client with an existing ID: " + client.getId()); } if (client.getRegisteredRedirectUri() != null) { for (String uri : client.getRegisteredRedirectUri()) { if (blacklistedSiteService.isBlacklisted(uri)) { throw new IllegalArgumentException("Client URI is blacklisted: " + uri); } } } // assign a random clientid if it's empty // NOTE: don't assign a random client secret without asking, since public clients have no secret if (Strings.isNullOrEmpty(client.getClientId())) { client = generateClientId(client); } // make sure that clients with the "refresh_token" grant type have the "offline_access" scope, and vice versa ensureRefreshTokenConsistency(client); // make sure we don't have both a JWKS and a JWKS URI ensureKeyConsistency(client); // check consistency when using HEART mode checkHeartMode(client); // timestamp this to right now client.setCreatedAt(new Date()); // check the sector URI checkSectorIdentifierUri(client); ensureNoReservedScopes(client); ClientDetailsEntity c = clientRepository.saveClient(client); statsService.resetCache(); return c; }
@Test(expected = IllegalArgumentException.class) public void heartMode_implicit_authMethod() { Mockito.when(config.isHeartMode()).thenReturn(true); ClientDetailsEntity client = new ClientDetailsEntity(); Set<String> grantTypes = new LinkedHashSet<>(); grantTypes.add("implicit"); client.setGrantTypes(grantTypes); client.setTokenEndpointAuthMethod(AuthMethod.PRIVATE_KEY); client.setRedirectUris(Sets.newHashSet("https://foo.bar/")); client.setJwksUri("https://foo.bar/jwks"); service.saveNewClient(client); }
@Override public void revert(final Path file) throws BackgroundException { try { new NodesApi(session.getClient()).restoreNodes( new RestoreDeletedNodesRequest() .resolutionStrategy(RestoreDeletedNodesRequest.ResolutionStrategyEnum.OVERWRITE) .keepShareLinks(new HostPreferences(session.getHost()).getBoolean("sds.upload.sharelinks.keep")) .addDeletedNodeIdsItem(Long.parseLong(nodeid.getVersionId(file))) .parentId(Long.parseLong(nodeid.getVersionId(file.getParent()))), StringUtils.EMPTY); } catch(ApiException e) { throw new SDSExceptionMappingService(nodeid).map("Failure to write attributes of {0}", e, file); } }
@Test public void testRevert() throws Exception { final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session); final Path room = new SDSDirectoryFeature(session, nodeid).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path test = new SDSTouchFeature(session, nodeid).touch(new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); final PathAttributes initialAttributes = new PathAttributes(test.attributes()); final String initialVersion = test.attributes().getVersionId(); final SDSVersioningFeature feature = new SDSVersioningFeature(session, nodeid); { final byte[] content = RandomUtils.nextBytes(32769); final TransferStatus status = new TransferStatus(); status.setLength(content.length); status.setExists(true); final SDSDirectS3MultipartWriteFeature writer = new SDSDirectS3MultipartWriteFeature(session, nodeid); final StatusOutputStream<Node> out = writer.write(test, status, new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out); assertNotNull(test.attributes().getVersionId()); assertEquals(initialAttributes.getFileId(), test.attributes().getFileId()); assertNotEquals(initialVersion, test.attributes().getVersionId()); final AttributedList<Path> versions = feature.list(test, new DisabledListProgressListener()); assertEquals(1, versions.size()); assertEquals(new Path(test).withAttributes(initialAttributes), versions.get(0)); assertTrue(new SDSFindFeature(session, nodeid).find(versions.get(0))); assertEquals(initialVersion, new SDSAttributesFinderFeature(session, nodeid).find(versions.get(0)).getVersionId()); } { final byte[] content = RandomUtils.nextBytes(2378); final TransferStatus status = new TransferStatus(); status.setLength(content.length); status.setExists(true); final SDSDirectS3MultipartWriteFeature writer = new SDSDirectS3MultipartWriteFeature(session, nodeid); final StatusOutputStream<Node> out = writer.write(test, status, new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out); assertNotNull(test.attributes().getVersionId()); assertNotEquals(initialVersion, test.attributes().getVersionId()); final AttributedList<Path> versions = feature.list(test, new DisabledListProgressListener()); assertEquals(2, versions.size()); assertEquals(32769, versions.get(0).attributes().getSize()); assertEquals(0, versions.get(1).attributes().getSize()); assertEquals(initialVersion, new SDSAttributesFinderFeature(session, nodeid).find(versions.get(1)).getVersionId()); assertTrue(new SDSFindFeature(session, nodeid).find(versions.get(0))); assertTrue(new SDSFindFeature(session, nodeid).find(versions.get(1))); } feature.revert(new Path(test).withAttributes(initialAttributes)); final Path reverted = new SDSListService(session, nodeid).list(room, new DisabledListProgressListener()).find(new DefaultPathPredicate(new Path(test).withAttributes(initialAttributes))); assertEquals(initialVersion, reverted.attributes().getVersionId()); // Restored file is no longer in list of deleted items assertEquals(2, feature.list(reverted, new DisabledListProgressListener()).size()); // Permanently delete trashed version new SDSDeleteFeature(session, nodeid).delete(feature.list(test, new DisabledListProgressListener()).toList(), new DisabledPasswordCallback(), new Delete.DisabledCallback()); new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static boolean isKafkaInvokeBySermant(StackTraceElement[] stackTrace) { return isInvokeBySermant(KAFKA_CONSUMER_CLASS_NAME, KAFKA_CONSUMER_CONTROLLER_CLASS_NAME, stackTrace); }
@Test public void testNotInvokeBySermantWithoutKafkaInvocation() { StackTraceElement[] stackTrace = new StackTraceElement[3]; stackTrace[0] = new StackTraceElement("testClass0", "testMethod0", "testFileName0", 0); stackTrace[1] = new StackTraceElement("testClass1", "testMethod1", "testFileName1", 1); stackTrace[2] = new StackTraceElement("testClass2", "subscribe", "testFileName2", 2); Assert.assertFalse(InvokeUtils.isKafkaInvokeBySermant(stackTrace)); }
public void start(long period, TimeUnit unit) { start(period, period, unit); }
@Test public void shouldStartWithSpecifiedInitialDelay() throws Exception { reporterWithCustomMockExecutor.start(350, 100, TimeUnit.MILLISECONDS); verify(mockExecutor).scheduleWithFixedDelay( any(Runnable.class), eq(350L), eq(100L), eq(TimeUnit.MILLISECONDS) ); }
public FEELFnResult<TemporalAmount> invoke(@ParameterName( "from" ) String val) { if ( val == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null")); } try { // try to parse as days/hours/minute/seconds return FEELFnResult.ofResult( Duration.parse( val ) ); } catch( DateTimeParseException e ) { // if it failed, try to parse as years/months try { return FEELFnResult.ofResult(ComparablePeriod.parse(val).normalized()); } catch( DateTimeParseException e2 ) { // failed to parse, so return null according to the spec return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "date-parsing exception", new RuntimeException(new Throwable() { public final List<Throwable> causes = Arrays.asList( new Throwable[]{e, e2} ); } ))); } } }
@Test void invokeParamTemporalDuration() { FunctionTestUtil.assertResult( durationFunction.invoke(Duration.parse("P2DT3H28M15S")), Duration.of(2, ChronoUnit.DAYS).plusHours(3).plusMinutes(28).plusSeconds(15)); }
private CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> consumerGroupHeartbeat( String groupId, String memberId, int memberEpoch, String instanceId, String rackId, int rebalanceTimeoutMs, String clientId, String clientHost, List<String> subscribedTopicNames, String assignorName, List<ConsumerGroupHeartbeatRequestData.TopicPartitions> ownedTopicPartitions ) throws ApiException { final long currentTimeMs = time.milliseconds(); final List<CoordinatorRecord> records = new ArrayList<>(); // Get or create the consumer group. boolean createIfNotExists = memberEpoch == 0; final ConsumerGroup group = getOrMaybeCreateConsumerGroup(groupId, createIfNotExists, records); throwIfConsumerGroupIsFull(group, memberId); // Get or create the member. if (memberId.isEmpty()) memberId = Uuid.randomUuid().toString(); final ConsumerGroupMember member; if (instanceId == null) { member = getOrMaybeSubscribeDynamicConsumerGroupMember( group, memberId, memberEpoch, ownedTopicPartitions, createIfNotExists, false ); } else { member = getOrMaybeSubscribeStaticConsumerGroupMember( group, memberId, memberEpoch, instanceId, ownedTopicPartitions, createIfNotExists, false, records ); } // 1. Create or update the member. If the member is new or has changed, a ConsumerGroupMemberMetadataValue // record is written to the __consumer_offsets partition to persist the change. If the subscriptions have // changed, the subscription metadata is updated and persisted by writing a ConsumerGroupPartitionMetadataValue // record to the __consumer_offsets partition. Finally, the group epoch is bumped if the subscriptions have // changed, and persisted by writing a ConsumerGroupMetadataValue record to the partition. ConsumerGroupMember updatedMember = new ConsumerGroupMember.Builder(member) .maybeUpdateInstanceId(Optional.ofNullable(instanceId)) .maybeUpdateRackId(Optional.ofNullable(rackId)) .maybeUpdateRebalanceTimeoutMs(ofSentinel(rebalanceTimeoutMs)) .maybeUpdateServerAssignorName(Optional.ofNullable(assignorName)) .maybeUpdateSubscribedTopicNames(Optional.ofNullable(subscribedTopicNames)) .setClientId(clientId) .setClientHost(clientHost) .setClassicMemberMetadata(null) .build(); boolean bumpGroupEpoch = hasMemberSubscriptionChanged( groupId, member, updatedMember, records ); int groupEpoch = group.groupEpoch(); Map<String, TopicMetadata> subscriptionMetadata = group.subscriptionMetadata(); Map<String, Integer> subscribedTopicNamesMap = group.subscribedTopicNames(); SubscriptionType subscriptionType = group.subscriptionType(); if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) { // The subscription metadata is updated in two cases: // 1) The member has updated its subscriptions; // 2) The refresh deadline has been reached. subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember); subscriptionMetadata = group.computeSubscriptionMetadata( subscribedTopicNamesMap, metadataImage.topics(), metadataImage.cluster() ); int numMembers = group.numMembers(); if (!group.hasMember(updatedMember.memberId()) && !group.hasStaticMember(updatedMember.instanceId())) { numMembers++; } subscriptionType = ModernGroup.subscriptionType( subscribedTopicNamesMap, numMembers ); if (!subscriptionMetadata.equals(group.subscriptionMetadata())) { log.info("[GroupId {}] Computed new subscription metadata: {}.", groupId, subscriptionMetadata); bumpGroupEpoch = true; records.add(newConsumerGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata)); } if (bumpGroupEpoch) { groupEpoch += 1; records.add(newConsumerGroupEpochRecord(groupId, groupEpoch)); log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch); metrics.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME); } group.setMetadataRefreshDeadline(currentTimeMs + consumerGroupMetadataRefreshIntervalMs, groupEpoch); } // 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between // the existing and the new target assignment is persisted to the partition. final int targetAssignmentEpoch; final Assignment targetAssignment; if (groupEpoch > group.assignmentEpoch()) { targetAssignment = updateTargetAssignment( group, groupEpoch, member, updatedMember, subscriptionMetadata, subscriptionType, records ); targetAssignmentEpoch = groupEpoch; } else { targetAssignmentEpoch = group.assignmentEpoch(); targetAssignment = group.targetAssignment(updatedMember.memberId(), updatedMember.instanceId()); } // 3. Reconcile the member's assignment with the target assignment if the member is not // fully reconciled yet. updatedMember = maybeReconcile( groupId, updatedMember, group::currentPartitionEpoch, targetAssignmentEpoch, targetAssignment, ownedTopicPartitions, records ); scheduleConsumerGroupSessionTimeout(groupId, memberId); // Prepare the response. ConsumerGroupHeartbeatResponseData response = new ConsumerGroupHeartbeatResponseData() .setMemberId(updatedMember.memberId()) .setMemberEpoch(updatedMember.memberEpoch()) .setHeartbeatIntervalMs(consumerGroupHeartbeatIntervalMs(groupId)); // The assignment is only provided in the following cases: // 1. The member sent a full request. It does so when joining or rejoining the group with zero // as the member epoch; or on any errors (e.g. timeout). We use all the non-optional fields // (rebalanceTimeoutMs, subscribedTopicNames and ownedTopicPartitions) to detect a full request // as those must be set in a full request. // 2. The member's assignment has been updated. boolean isFullRequest = memberEpoch == 0 || (rebalanceTimeoutMs != -1 && subscribedTopicNames != null && ownedTopicPartitions != null); if (isFullRequest || hasAssignedPartitionsChanged(member, updatedMember)) { response.setAssignment(createConsumerGroupResponseAssignment(updatedMember)); } return new CoordinatorResult<>(records, response); }
@Test public void testPartitionAssignorExceptionOnRegularHeartbeat() { String groupId = "fooup"; // Use a static member id as it makes the test easier. String memberId1 = Uuid.randomUuid().toString(); Uuid fooTopicId = Uuid.randomUuid(); String fooTopicName = "foo"; Uuid barTopicId = Uuid.randomUuid(); String barTopicName = "bar"; ConsumerGroupPartitionAssignor assignor = mock(ConsumerGroupPartitionAssignor.class); when(assignor.name()).thenReturn("range"); when(assignor.assign(any(), any())).thenThrow(new PartitionAssignorException("Assignment failed.")); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .withConsumerGroupAssignors(Collections.singletonList(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) .addRacks() .build()) .build(); // Member 1 joins the consumer group. The request fails because the // target assignment computation failed. assertThrows(UnknownServerException.class, () -> context.consumerGroupHeartbeat( new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId1) .setMemberEpoch(0) .setRebalanceTimeoutMs(5000) .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .setServerAssignor("range") .setTopicPartitions(Collections.emptyList()))); }
public static Duration parseDuration(String text) { checkNotNull(text); final String trimmed = text.trim(); checkArgument(!trimmed.isEmpty(), "argument is an empty- or whitespace-only string"); final int len = trimmed.length(); int pos = 0; char current; while (pos < len && (current = trimmed.charAt(pos)) >= '0' && current <= '9') { pos++; } final String number = trimmed.substring(0, pos); final String unitLabel = trimmed.substring(pos).trim().toLowerCase(Locale.US); if (number.isEmpty()) { throw new NumberFormatException("text does not start with a number"); } final BigInteger value; try { value = new BigInteger(number); // this throws a NumberFormatException } catch (NumberFormatException e) { throw new IllegalArgumentException( "The value '" + number + "' cannot be represented as an integer number.", e); } final ChronoUnit unit; if (unitLabel.isEmpty()) { unit = ChronoUnit.MILLIS; } else { unit = LABEL_TO_UNIT_MAP.get(unitLabel); } if (unit == null) { throw new IllegalArgumentException( "Time interval unit label '" + unitLabel + "' does not match any of the recognized units: " + TimeUnit.getAllUnits()); } try { return convertBigIntToDuration(value, unit); } catch (ArithmeticException e) { throw new IllegalArgumentException( "The value '" + number + "' cannot be represented as java.time.Duration (numeric overflow).", e); } }
@Test void testParseDurationHours() { assertThat(TimeUtils.parseDuration("987654h").toHours()).isEqualTo(987654); assertThat(TimeUtils.parseDuration("987654hour").toHours()).isEqualTo(987654); assertThat(TimeUtils.parseDuration("987654hours").toHours()).isEqualTo(987654); assertThat(TimeUtils.parseDuration("987654 h").toHours()).isEqualTo(987654); }
@Override public int getColumnType(final int columnIndex) throws SQLException { return resultSetMetaData.getColumnType(columnIndex); }
@Test void assertGetColumnType() throws SQLException { assertThat(queryResultMetaData.getColumnType(1), is(Types.INTEGER)); }
@Override public void validate(final Analysis analysis) { try { RULES.forEach(rule -> rule.check(analysis)); } catch (final KsqlException e) { throw new KsqlException(e.getMessage() + PULL_QUERY_SYNTAX_HELP, e); } QueryValidatorUtil.validateNoUserColumnsWithSameNameAsPseudoColumns(analysis); }
@Test public void shouldThrowWhenSelectClauseContainsDisallowedColumns() { try(MockedStatic<ColumnExtractor> columnExtractor = mockStatic(ColumnExtractor.class)) { //Given: givenSelectClauseWithDisallowedColumnNames(columnExtractor); // When: final Exception e = assertThrows( KsqlException.class, () -> validator.validate(analysis) ); // Then: assertThat(e.getMessage(), containsString("Pull queries don't support the following columns in SELECT clauses: `ROWPARTITION`, `ROWOFFSET`")); } }
public static NestedField toIcebergNestedField( PrestoIcebergNestedField nestedField, Map<String, Integer> columnNameToIdMapping) { return NestedField.of( nestedField.getId(), nestedField.isOptional(), nestedField.getName(), toIcebergType(nestedField.getPrestoType(), nestedField.getName(), columnNameToIdMapping), nestedField.getDoc().orElse(null)); }
@Test(dataProvider = "allTypes") public void testToIcebergNestedField(int id, String name) { // Create a test TypeManager TypeManager typeManager = createTestFunctionAndTypeManager(); // Create a mock Presto Nested Field PrestoIcebergNestedField prestoNestedField = prestoIcebergNestedField(id, name, typeManager); Types.NestedField expectedNestedField = nestedField(id, name); // Convert Presto Nested Field to Iceberg NestedField Types.NestedField nestedField = toIcebergNestedField(prestoNestedField, columnNameToIdMapping(name)); // Check that the result is not null assertNotNull(nestedField); assertEquals(nestedField, expectedNestedField); }
@Override public PolicerId allocatePolicerId() { // Init step DriverHandler handler = handler(); // First step is to get MeterService MeterService meterService = handler.get(MeterService.class); // There was a problem, return none if (meterService == null) { log.warn("MeterService is null"); return PolicerId.NONE; } // Let's get the device id DeviceId deviceId = handler.data().deviceId(); // Double check correspondence between schemas if (!deviceId.uri().getScheme().equals(OF_SCHEME)) { log.warn("The device {} does not seem to be managed by OpenFlow", deviceId); return PolicerId.NONE; } // Get a new meter id MeterId meterId = meterService.allocateMeterId(deviceId); // There was a problem if (meterId == null) { log.warn("MeterService does not provide valid ids"); return PolicerId.NONE; } // Create a policer id from the meter id return getPolicerIdFromMeterId(meterId); }
@Test public void testWrongDevice() { // Get device handler DriverHandler driverHandler = driverService.createHandler(fooDid); // Get policer config behavior PolicerConfigurable policerConfigurable = driverHandler.behaviour(PolicerConfigurable.class); // Get policer id PolicerId policerId = policerConfigurable.allocatePolicerId(); // Assert that is none assertThat(policerId, is(PolicerId.NONE)); }
@CheckForNull @Override public Set<Path> branchChangedFiles(String targetBranchName, Path rootBaseDir) { return Optional.ofNullable((branchChangedFilesWithFileMovementDetection(targetBranchName, rootBaseDir))) .map(GitScmProvider::extractAbsoluteFilePaths) .orElse(null); }
@Test public void branchChangedFiles_when_git_work_tree_is_above_project_basedir() throws IOException, GitAPIException { git.branchCreate().setName("b1").call(); git.checkout().setName("b1").call(); Path projectDir = worktree.resolve("project"); Files.createDirectory(projectDir); createAndCommitFile("project/file-b1"); assertThat(newScmProvider().branchChangedFiles("master", projectDir)) .containsOnly(projectDir.resolve("file-b1")); }
@Override public Column convert(BasicTypeDefine typeDefine) { PhysicalColumn.PhysicalColumnBuilder builder = PhysicalColumn.builder() .name(typeDefine.getName()) .sourceType(typeDefine.getColumnType()) .nullable(typeDefine.isNullable()) .defaultValue(typeDefine.getDefaultValue()) .comment(typeDefine.getComment()); String pgDataType = typeDefine.getDataType().toLowerCase(); switch (pgDataType) { case PG_BOOLEAN: builder.dataType(BasicType.BOOLEAN_TYPE); break; case PG_BOOLEAN_ARRAY: builder.dataType(ArrayType.BOOLEAN_ARRAY_TYPE); break; case PG_SMALLSERIAL: case PG_SMALLINT: builder.dataType(BasicType.SHORT_TYPE); break; case PG_SMALLINT_ARRAY: builder.dataType(ArrayType.SHORT_ARRAY_TYPE); break; case PG_INTEGER: case PG_SERIAL: builder.dataType(BasicType.INT_TYPE); break; case PG_INTEGER_ARRAY: builder.dataType(ArrayType.INT_ARRAY_TYPE); break; case PG_BIGINT: case PG_BIGSERIAL: builder.dataType(BasicType.LONG_TYPE); break; case PG_BIGINT_ARRAY: builder.dataType(ArrayType.LONG_ARRAY_TYPE); break; case PG_REAL: builder.dataType(BasicType.FLOAT_TYPE); break; case PG_REAL_ARRAY: builder.dataType(ArrayType.FLOAT_ARRAY_TYPE); break; case PG_DOUBLE_PRECISION: builder.dataType(BasicType.DOUBLE_TYPE); break; case PG_DOUBLE_PRECISION_ARRAY: builder.dataType(ArrayType.DOUBLE_ARRAY_TYPE); break; case PG_NUMERIC: DecimalType decimalType; if (typeDefine.getPrecision() != null && typeDefine.getPrecision() > 0) { decimalType = new DecimalType( typeDefine.getPrecision().intValue(), typeDefine.getScale()); } else { decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE); } builder.dataType(decimalType); break; case PG_MONEY: // -92233720368547758.08 to +92233720368547758.07, With the sign bit it's 20, we use // 30 precision to save it DecimalType moneyDecimalType; moneyDecimalType = new DecimalType(30, 2); builder.dataType(moneyDecimalType); builder.columnLength(30L); builder.scale(2); break; case PG_CHAR: case PG_CHARACTER: builder.dataType(BasicType.STRING_TYPE); if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.columnLength(TypeDefineUtils.charTo4ByteLength(1L)); builder.sourceType(pgDataType); } else { builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength())); builder.sourceType(String.format("%s(%s)", pgDataType, typeDefine.getLength())); } break; case PG_VARCHAR: case PG_CHARACTER_VARYING: builder.dataType(BasicType.STRING_TYPE); if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.sourceType(pgDataType); } else { builder.sourceType(String.format("%s(%s)", pgDataType, typeDefine.getLength())); builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength())); } break; case PG_TEXT: builder.dataType(BasicType.STRING_TYPE); break; case PG_UUID: builder.dataType(BasicType.STRING_TYPE); builder.sourceType(pgDataType); builder.columnLength(128L); break; case PG_JSON: case PG_JSONB: case PG_XML: case PG_GEOMETRY: case PG_GEOGRAPHY: builder.dataType(BasicType.STRING_TYPE); break; case PG_CHAR_ARRAY: case PG_VARCHAR_ARRAY: case PG_TEXT_ARRAY: builder.dataType(ArrayType.STRING_ARRAY_TYPE); break; case PG_BYTEA: builder.dataType(PrimitiveByteArrayType.INSTANCE); break; case PG_DATE: builder.dataType(LocalTimeType.LOCAL_DATE_TYPE); break; case PG_TIME: case PG_TIME_TZ: builder.dataType(LocalTimeType.LOCAL_TIME_TYPE); if (typeDefine.getScale() != null && typeDefine.getScale() > MAX_TIME_SCALE) { builder.scale(MAX_TIME_SCALE); log.warn( "The scale of time type is larger than {}, it will be truncated to {}", MAX_TIME_SCALE, MAX_TIME_SCALE); } else { builder.scale(typeDefine.getScale()); } break; case PG_TIMESTAMP: case PG_TIMESTAMP_TZ: builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); if (typeDefine.getScale() != null && typeDefine.getScale() > MAX_TIMESTAMP_SCALE) { builder.scale(MAX_TIMESTAMP_SCALE); log.warn( "The scale of timestamp type is larger than {}, it will be truncated to {}", MAX_TIMESTAMP_SCALE, MAX_TIMESTAMP_SCALE); } else { builder.scale(typeDefine.getScale()); } break; default: throw CommonError.convertToSeaTunnelTypeError( identifier(), typeDefine.getDataType(), typeDefine.getName()); } return builder.build(); }
@Test public void testConvertDouble() { BasicTypeDefine<Object> typeDefine = BasicTypeDefine.builder() .name("test") .columnType("float8") .dataType("float8") .build(); Column column = PostgresTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(BasicType.DOUBLE_TYPE, column.getDataType()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType().toLowerCase()); }
@Override public Reader getCharacterStream(final int columnIndex) throws SQLException { // TODO To be supported: encrypt, mask, and so on return mergeResultSet.getCharacterStream(columnIndex); }
@Test void assertGetCharacterStreamWithColumnIndex() throws SQLException { Reader reader = mock(Reader.class); when(mergeResultSet.getCharacterStream(1)).thenReturn(reader); assertThat(shardingSphereResultSet.getCharacterStream(1), is(reader)); }
public static String getPathOf( RepositoryElementMetaInterface object ) { if ( object != null && !object.isDeleted() ) { RepositoryDirectoryInterface directory = object.getRepositoryDirectory(); if ( directory != null ) { String path = directory.getPath(); if ( path != null ) { if ( !path.endsWith( "/" ) ) { path += "/"; } path += object.getName(); return path; } } } return null; }
@Test public void nullObject() { assertNull( getPathOf( null ) ); }
@Override public HttpResponseOutputStream<Node> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final CreateFileUploadResponse uploadResponse = upload.start(file, status); final String uploadUrl = uploadResponse.getUploadUrl(); if(StringUtils.isBlank(uploadUrl)) { throw new InteroperabilityException("Missing upload URL in server response"); } final String uploadToken = uploadResponse.getToken(); if(StringUtils.isBlank(uploadToken)) { throw new InteroperabilityException("Missing upload token in server response"); } final MultipartUploadTokenOutputStream proxy = new MultipartUploadTokenOutputStream(session, nodeid, file, status, uploadUrl); return new HttpResponseOutputStream<Node>(new MemorySegementingOutputStream(proxy, new HostPreferences(session.getHost()).getInteger("sds.upload.multipart.chunksize")), new SDSAttributesAdapter(session), status) { private final AtomicBoolean close = new AtomicBoolean(); private final AtomicReference<Node> node = new AtomicReference<>(); @Override public Node getStatus() { return node.get(); } @Override public void close() throws IOException { try { if(close.get()) { log.warn(String.format("Skip double close of stream %s", this)); return; } super.close(); node.set(upload.complete(file, uploadToken, status)); } catch(BackgroundException e) { throw new IOException(e); } finally { close.set(true); } } @Override protected void handleIOException(final IOException e) throws IOException { // Cancel upload on error reply try { upload.cancel(file, uploadToken); } catch(BackgroundException f) { log.warn(String.format("Failure %s cancelling upload for file %s with upload token %s after failure %s", f, file, uploadToken, e)); } throw e; } }; }
@Test(expected = TransferStatusCanceledException.class) public void testWriteCancel() throws Exception { final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session); final Path room = new SDSDirectoryFeature(session, nodeid).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final byte[] content = RandomUtils.nextBytes(32769); final Path test = new Path(room, String.format("{%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.file)); final BytecountStreamListener count = new BytecountStreamListener(); final TransferStatus status = new TransferStatus() { @Override public void validate() throws ConnectionCanceledException { if(count.getSent() >= 32768) { throw new TransferStatusCanceledException(); } super.validate(); } }; status.setLength(content.length); final SDSMultipartWriteFeature writer = new SDSMultipartWriteFeature(session, nodeid); final StatusOutputStream<Node> out = writer.write(test, status, new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(status, status).withListener(count).transfer(new ByteArrayInputStream(content), out); assertFalse(new DefaultFindFeature(session).find(test)); out.getStatus(); }
@Override public long getPeriodMillis() { return STATIC; }
@Test public void testGetPeriodMillis() { assertEquals(DiagnosticsPlugin.STATIC, plugin.getPeriodMillis()); }
@Override @Nullable public byte[] readByteArray(@Nonnull String fieldName) throws IOException { return readIncompatibleField(fieldName, BYTE_ARRAY, super::readByteArray); }
@Test(expected = IncompatibleClassChangeError.class) public void testReadFloatArray_IncompatibleClass() throws Exception { reader.readByteArray("byte"); }
public abstract void checkForCrash(String url);
@Test public void testCheckForCrash() { final CrashReporter reporter = CrashReporter.create(); assertNotNull(reporter); reporter.checkForCrash("https://crash.cyberduck.io/report"); }
@Override public CompletableFuture<Void> deleteSubscriptionGroup(String address, DeleteSubscriptionGroupRequestHeader requestHeader, long timeoutMillis) { CompletableFuture<Void> future = new CompletableFuture<>(); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.DELETE_SUBSCRIPTIONGROUP, requestHeader); remotingClient.invoke(address, request, timeoutMillis).thenAccept(response -> { if (response.getCode() == ResponseCode.SUCCESS) { future.complete(null); } else { log.warn("deleteSubscriptionGroup getResponseCommand failed, {} {}, header={}", response.getCode(), response.getRemark(), requestHeader); future.completeExceptionally(new MQClientException(response.getCode(), response.getRemark())); } }); return future; }
@Test public void assertDeleteSubscriptionGroupWithSuccess() throws Exception { setResponseSuccess(null); DeleteSubscriptionGroupRequestHeader requestHeader = mock(DeleteSubscriptionGroupRequestHeader.class); CompletableFuture<Void> actual = mqClientAdminImpl.deleteSubscriptionGroup(defaultBrokerAddr, requestHeader, defaultTimeout); assertNull(actual.get()); }
public String convert(ILoggingEvent event) { StringBuilder sb = new StringBuilder(); int pri = facility + LevelToSyslogSeverity.convert(event); sb.append("<"); sb.append(pri); sb.append(">"); sb.append(computeTimeStampString(event.getTimeStamp())); sb.append(' '); sb.append(localHostName); sb.append(' '); return sb.toString(); }
@Test public void hostnameShouldNotIncludeDomain() throws Exception { // RFC 3164, section 4.1.2: // The Domain Name MUST NOT be included in the HOSTNAME field. String host = HOSTNAME; final int firstPeriod = host.indexOf("."); if (firstPeriod != -1) { host = host.substring(0, firstPeriod); } LoggingEvent le = createLoggingEvent(); calendar.set(2012, Calendar.OCTOBER, 11, 22, 14, 15); le.setTimeStamp(calendar.getTimeInMillis()); assertEquals("<191>Oct 11 22:14:15 " + host + " ", converter.convert(le)); }
@Override public DataSerializableFactory createFactory() { return typeId -> switch (typeId) { case MAP_REPLICATION_UPDATE -> new WanMapAddOrUpdateEvent(); case MAP_REPLICATION_REMOVE -> new WanMapRemoveEvent(); case WAN_MAP_ENTRY_VIEW -> new WanMapEntryView<>(); case WAN_CACHE_ENTRY_VIEW -> new WanCacheEntryView<>(); case WAN_EVENT_CONTAINER_REPLICATION_OPERATION -> new WanEventContainerReplicationOperation(); default -> throw new IllegalArgumentException("Unknown type-id: " + typeId); }; }
@Test public void testExistingTypes() { WanDataSerializerHook hook = new WanDataSerializerHook(); IdentifiedDataSerializable mapUpdate = hook.createFactory() .create(WanDataSerializerHook.MAP_REPLICATION_UPDATE); assertTrue(mapUpdate instanceof WanMapAddOrUpdateEvent); IdentifiedDataSerializable mapRemove = hook.createFactory() .create(WanDataSerializerHook.MAP_REPLICATION_REMOVE); assertTrue(mapRemove instanceof WanMapRemoveEvent); }
public List<String> toPrefix(String in) { List<String> tokens = buildTokens(alignINClause(in)); List<String> output = new ArrayList<>(); List<String> stack = new ArrayList<>(); for (String token : tokens) { if (isOperand(token)) { if (token.equals(")")) { while (openParanthesesFound(stack)) { output.add(stack.remove(stack.size() - 1)); } if (!stack.isEmpty()) { // temporarily fix for issue #189 stack.remove(stack.size() - 1); } } else { while (openParanthesesFound(stack) && !hasHigherPrecedence(token, stack.get(stack.size() - 1))) { output.add(stack.remove(stack.size() - 1)); } stack.add(token); } } else { output.add(token); } } while (!stack.isEmpty()) { output.add(stack.remove(stack.size() - 1)); } return output; }
@Test public void testBetweenAnd() { String query = "a and b between 10 and 15"; List<String> list = parser.toPrefix(query); assertEquals(Arrays.asList("a", "b", "10", "15", "between", "and"), list); }
public void startAsync() { try { udfLoader.load(); ProcessingLogServerUtils.maybeCreateProcessingLogTopic( serviceContext.getTopicClient(), processingLogConfig, ksqlConfig); if (processingLogConfig.getBoolean(ProcessingLogConfig.STREAM_AUTO_CREATE)) { log.warn("processing log auto-create is enabled, but this is not supported " + "for headless mode."); } rocksDBConfigSetterHandler.accept(ksqlConfig); processesQueryFile(readQueriesFile(queriesFile)); showWelcomeMessage(); final Properties properties = new Properties(); ksqlConfig.originals().forEach((key, value) -> { if (nonNull(value)) { properties.put(key, value.toString()); } }); versionChecker.start(KsqlModuleType.SERVER, properties); } catch (final Exception e) { log.error("Failed to start KSQL Server with query file: " + queriesFile, e); throw e; } }
@Test public void shouldRunSetStatements() { // Given: final PreparedStatement<SetProperty> setProp = PreparedStatement.of("SET PROP", new SetProperty(Optional.empty(), ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")); final PreparedStatement<CreateStream> cs = PreparedStatement.of("CS", new CreateStream(SOME_NAME, SOME_ELEMENTS, false, false, JSON_PROPS, false)); givenQueryFileParsesTo(setProp, cs); // When: standaloneExecutor.startAsync(); // Then: verify(ksqlEngine).execute( serviceContext, ConfiguredStatement.of(cs, SessionConfig .of(ksqlConfig, ImmutableMap.of(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")) )); }
public static Duration parse(final String text) { try { final String[] parts = text.split("\\s"); if (parts.length != 2) { throw new IllegalArgumentException("Expected 2 tokens, got: " + parts.length); } final long size = parseNumeric(parts[0]); return buildDuration(size, parts[1]); } catch (final Exception e) { throw new IllegalArgumentException("Invalid duration: '" + text + "'. " + e.getMessage(), e); } }
@Test public void shouldThrowOnTooManyTokens() { // Then: // When: final Exception e = assertThrows( IllegalArgumentException.class, () -> parse("10 Seconds Long") ); // Then: assertThat(e.getMessage(), containsString("Expected 2 tokens, got: 3")); }
public String getExpression() { String column = identifier.getValue(); if (null != nestedObjectAttributes && !nestedObjectAttributes.isEmpty()) { column = String.join(".", column, nestedObjectAttributes.stream().map(IdentifierValue::getValue).collect(Collectors.joining("."))); } return null == owner ? column : String.join(".", owner.getIdentifier().getValue(), column); }
@Test void assertGetExpressionWithoutOwner() { assertThat(new ColumnSegment(0, 0, new IdentifierValue("`col`")).getExpression(), is("col")); }
static void checkValidCollectionName(String databaseName, String collectionName) { String fullCollectionName = databaseName + "." + collectionName; if (collectionName.length() < MIN_COLLECTION_NAME_LENGTH) { throw new IllegalArgumentException("Collection name cannot be empty."); } if (fullCollectionName.length() > MAX_COLLECTION_NAME_LENGTH) { throw new IllegalArgumentException( "Collection name " + fullCollectionName + " cannot be longer than " + MAX_COLLECTION_NAME_LENGTH + " characters, including the database name and dot."); } if (ILLEGAL_COLLECTION_CHARS.matcher(collectionName).find()) { throw new IllegalArgumentException( "Collection name " + collectionName + " is not a valid name. Only letters, numbers, hyphens, underscores and exclamation points are allowed."); } if (collectionName.charAt(0) != '_' && !Character.isLetter(collectionName.charAt(0))) { throw new IllegalArgumentException( "Collection name " + collectionName + " must start with a letter or an underscore."); } String illegalKeyword = "system."; if (collectionName.startsWith(illegalKeyword)) { throw new IllegalArgumentException( "Collection name " + collectionName + " cannot start with the prefix \"" + illegalKeyword + "\"."); } }
@Test public void testCheckValidCollectionNameThrowsErrorWhenNameContainsDollarSign() { assertThrows( IllegalArgumentException.class, () -> checkValidCollectionName("test-database", "test$collection")); }
@Override public ParsedLine parse(final String line, final int cursor, final ParseContext context) { final ParsedLine parsed = delegate.parse(line, cursor, context); if (context != ParseContext.ACCEPT_LINE) { return parsed; } if (UnclosedQuoteChecker.isUnclosedQuote(line)) { throw new EOFError(-1, -1, "Missing end quote", "end quote char"); } final String bare = CommentStripper.strip(parsed.line()); if (bare.isEmpty()) { return parsed; } if (cliCmdPredicate.test(bare)) { return parsed; } if (!bare.endsWith(TERMINATION_CHAR)) { throw new EOFError(-1, -1, "Missing termination char", "termination char"); } return parsed; }
@Test public void shouldCallDelegateWithCorrectParams() { // Given: EasyMock.expect(parsedLine.line()).andReturn(TERMINATED_LINE).anyTimes(); EasyMock.expect(delegate.parse("some-string", 55, ParseContext.ACCEPT_LINE)) .andReturn(parsedLine); EasyMock.replay(delegate, parsedLine); // When: parser.parse("some-string", 55, ParseContext.ACCEPT_LINE); // Then: EasyMock.verify(delegate); }
public BrokerFileSystem getFileSystem(String path, Map<String, String> properties) { WildcardURI pathUri = new WildcardURI(path); String scheme = pathUri.getUri().getScheme(); if (Strings.isNullOrEmpty(scheme)) { throw new BrokerException(TBrokerOperationStatusCode.INVALID_INPUT_FILE_PATH, "invalid path. scheme is null"); } BrokerFileSystem brokerFileSystem = null; if (scheme.equals(HDFS_SCHEME) || scheme.equals(VIEWFS_SCHEME)) { brokerFileSystem = getDistributedFileSystem(scheme, path, properties); } else if (scheme.equals(S3A_SCHEME)) { brokerFileSystem = getS3AFileSystem(path, properties); } else if (scheme.equals(OSS_SCHEME)) { brokerFileSystem = getOSSFileSystem(path, properties); } else if (scheme.equals(COS_SCHEME)) { brokerFileSystem = getCOSFileSystem(path, properties); } else if (scheme.equals(KS3_SCHEME)) { brokerFileSystem = getKS3FileSystem(path, properties); } else if (scheme.equals(OBS_SCHEME)) { brokerFileSystem = getOBSFileSystem(path, properties); } else if (scheme.equals(TOS_SCHEME)) { brokerFileSystem = getTOSFileSystem(path, properties); } else { // If all above match fails, then we will read the settings from hdfs-site.xml, core-site.xml of FE, // and try to create a universal file system. The reason why we can do this is because hadoop/s3 // SDK is compatible with nearly all file/object storage system brokerFileSystem = getUniversalFileSystem(path, properties); } return brokerFileSystem; }
@Test public void testGetFileSystemWithoutPassword() throws IOException { Map<String, String> properties = new HashMap<String, String>(); properties.put("username", "user"); // properties.put("password", "changeit"); boolean haveException = false; try { BrokerFileSystem fs = fileSystemManager.getFileSystem(testHdfsHost + "/data/abc/logs", properties); } catch (BrokerException e) { haveException = true; } assertEquals(true, haveException); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { if(directory.isRoot()) { final AttributedList<Path> list = new AttributedList<>(); for(RootFolder root : session.roots()) { switch(root.getRootFolderType()) { case 0: // My Files case 1: // Common list.add(new Path(directory, PathNormalizer.name(root.getName()), EnumSet.of(Path.Type.directory, Path.Type.volume), attributes.toAttributes(root))); break; } listener.chunk(directory, list); } return list; } else { try { final AttributedList<Path> children = new AttributedList<>(); int pageIndex = 0; int fileCount = 0; FileContents files; do { files = new FilesApi(this.session.getClient()).filesGetById(URIEncoder.encode(fileid.getFileId(directory)), pageIndex, chunksize, "Name asc", 0, // All true, false, false ); for(File f : files.getFiles()) { final PathAttributes attrs = attributes.toAttributes(f); final EnumSet<Path.Type> type = (f.getFlags() & 1) == 1 ? EnumSet.of(Path.Type.directory) : EnumSet.of(Path.Type.file); children.add(new Path(directory, f.getName(), type, attrs)); } pageIndex++; fileCount += files.getFiles().size(); listener.chunk(directory, children); } while(fileCount < files.getTotalRowCount()); return children; } catch(ApiException e) { throw new StoregateExceptionMappingService(fileid).map("Listing directory {0} failed", e, directory); } } }
@Test public void testListWithHiddenFile() throws Exception { final StoregateIdProvider nodeid = new StoregateIdProvider(session); final Path room = new Path("/My files", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path folder = new StoregateDirectoryFeature(session, nodeid).mkdir(new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final Path file = new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final TransferStatus status = new TransferStatus(); status.setHidden(true); new StoregateTouchFeature(session, nodeid).touch(file, status); final AttributedList<Path> list = new StoregateListService(session, nodeid).list(folder, new IndexedListProgressListener() { @Override public void message(final String message) { // } @Override public void visit(final AttributedList<Path> list, final int index, final Path file) { if(file.attributes().isHidden()) { list.remove(index); } } }); assertNotSame(AttributedList.emptyList(), list); assertTrue(list.isEmpty()); new StoregateDeleteFeature(session, nodeid).delete(Arrays.asList(file, folder), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static SchemaAndValue parseString(String value) { if (value == null) { return NULL_SCHEMA_AND_VALUE; } if (value.isEmpty()) { return new SchemaAndValue(Schema.STRING_SCHEMA, value); } ValueParser parser = new ValueParser(new Parser(value)); return parser.parse(false); }
@Test public void shouldNotParseUnquotedEmbeddedMapKeysAsStrings() { SchemaAndValue schemaAndValue = Values.parseString("{foo: 3}"); assertEquals(Type.STRING, schemaAndValue.schema().type()); assertEquals("{foo: 3}", schemaAndValue.value()); }
public static String generateCode(final SqlType sqlType) { return SqlTypeWalker.visit(sqlType, new TypeVisitor()); }
@Test public void shouldGenerateWorkingCodeForAllSqlBaseTypes() { for (final SqlBaseType baseType : SqlBaseType.values()) { // When: final String code = SqlTypeCodeGen.generateCode(TypeInstances.typeInstanceFor(baseType)); // Then: final Object result = CodeGenTestUtil.cookAndEval(code, SqlType.class); assertThat(result, is(instanceOf(SqlType.class))); assertThat(((SqlType) result).baseType(), is(baseType)); } }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { final DownloadBuilder builder = new DbxUserFilesRequests(session.getClient(file)) .downloadBuilder(containerService.getKey(file)).withRev(file.attributes().getVersionId()); if(status.isAppend()) { final HttpRange range = HttpRange.withStatus(status); builder.range(range.getStart()); } final DbxDownloader<FileMetadata> downloader = builder.start(); return downloader.getInputStream(); } catch(DbxException e) { throw new DropboxExceptionMappingService().map("Download {0} failed", e, file); } }
@Test public void testReadRevision() throws Exception { final byte[] content = RandomUtils.nextBytes(1645); final TransferStatus status = new TransferStatus().withLength(content.length); final Path directory = new DropboxDirectoryFeature(session).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path test = new Path(directory, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final DropboxWriteFeature writer = new DropboxWriteFeature(session); final HttpResponseOutputStream<Metadata> out = writer.write(test, status, new DisabledConnectionCallback()); new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out); test.withAttributes(status.getResponse()); assertNotNull(test.attributes().getVersionId()); // Only latest version assertTrue(new DropboxVersioningFeature(session).list(test, new DisabledListProgressListener()).isEmpty()); assertArrayEquals(content, IOUtils.readFully(new DropboxReadFeature(session).read(test, new TransferStatus(), new DisabledConnectionCallback()), content.length)); new DropboxDeleteFeature(session).delete(Arrays.asList(test, directory), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public boolean isBeforeOrAt(KinesisRecord other) { if (shardIteratorType == AT_TIMESTAMP) { return timestamp.compareTo(other.getApproximateArrivalTimestamp()) <= 0; } int result = extendedSequenceNumber().compareTo(other.getExtendedSequenceNumber()); if (result == 0) { return shardIteratorType == AT_SEQUENCE_NUMBER; } return result < 0; }
@Test public void testComparisonWithExtendedSequenceNumber() { assertThat( new ShardCheckpoint("", "", new StartingPoint(LATEST)) .isBeforeOrAt(recordWith(new ExtendedSequenceNumber("100", 0L)))) .isTrue(); assertThat( new ShardCheckpoint("", "", new StartingPoint(TRIM_HORIZON)) .isBeforeOrAt(recordWith(new ExtendedSequenceNumber("100", 0L)))) .isTrue(); assertThat( checkpoint(AFTER_SEQUENCE_NUMBER, "10", 1L) .isBeforeOrAt(recordWith(new ExtendedSequenceNumber("100", 0L)))) .isTrue(); assertThat( checkpoint(AT_SEQUENCE_NUMBER, "100", 0L) .isBeforeOrAt(recordWith(new ExtendedSequenceNumber("100", 0L)))) .isTrue(); assertThat( checkpoint(AFTER_SEQUENCE_NUMBER, "100", 0L) .isBeforeOrAt(recordWith(new ExtendedSequenceNumber("100", 0L)))) .isFalse(); assertThat( checkpoint(AT_SEQUENCE_NUMBER, "100", 1L) .isBeforeOrAt(recordWith(new ExtendedSequenceNumber("100", 0L)))) .isFalse(); assertThat( checkpoint(AFTER_SEQUENCE_NUMBER, "100", 0L) .isBeforeOrAt(recordWith(new ExtendedSequenceNumber("99", 1L)))) .isFalse(); }
public GlobalJobParameters getGlobalJobParameters() { return configuration .getOptional(PipelineOptions.GLOBAL_JOB_PARAMETERS) .map(MapBasedJobParameters::new) .orElse(new MapBasedJobParameters(Collections.emptyMap())); }
@Test void testGlobalParametersNotNull() { final ExecutionConfig config = new ExecutionConfig(); assertThat(config.getGlobalJobParameters()).isNotNull(); }
@Override public void createIndex(String indexName, IndexOptions options, FieldIndex... fields) { commandExecutor.get(createIndexAsync(indexName, options, fields)); }
@Test public void testFieldTag() { IndexOptions indexOptions = IndexOptions.defaults() .on(IndexType.JSON) .prefix(Arrays.asList("items")); FieldIndex[] fields = new FieldIndex[]{ FieldIndex.tag("$.name") .caseSensitive() .withSuffixTrie() .noIndex() .separator("a") .sortMode(SortMode.NORMALIZED) .as("name") }; RSearch s = redisson.getSearch(); s.createIndex("itemIndex", indexOptions, fields); }
@Override public String rpcType() { return RpcTypeEnum.WEB_SOCKET.getName(); }
@Test public void testRpcType() { assertEquals(RpcTypeEnum.WEB_SOCKET.getName(), shenyuClientRegisterWebSocketService.rpcType()); }
@Override public void transfer(V v) throws InterruptedException { RFuture<Void> future = service.invoke(v); commandExecutor.getInterrupted(future); }
@Test public void testTransfer() throws InterruptedException, ExecutionException { RTransferQueue<Integer> queue1 = redisson.getTransferQueue("queue"); AtomicBoolean takeExecuted = new AtomicBoolean(); Future<?> f = Executors.newSingleThreadExecutor().submit(() -> { RTransferQueue<Integer> queue = redisson.getTransferQueue("queue"); try { long time = System.currentTimeMillis(); queue.transfer(3); assertThat(takeExecuted.get()).isTrue(); assertThat(System.currentTimeMillis() - time).isGreaterThan(2850); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } }); Thread.sleep(3000); assertThat(queue1.size()).isEqualTo(1); assertThat(queue1.peek()).isEqualTo(3); assertThat(queue1.take()).isEqualTo(3); takeExecuted.set(true); f.get(); assertThat(queue1.size()).isZero(); assertThat(queue1.peek()).isNull(); }
@Override public void notifyCheckpointAborted(long checkpointId) throws Exception { super.notifyCheckpointAborted(checkpointId); sourceReader.notifyCheckpointAborted(checkpointId); }
@Test void testNotifyCheckpointAborted() throws Exception { StateInitializationContext stateContext = context.createStateContext(); operator.initializeState(stateContext); operator.open(); operator.snapshotState(new StateSnapshotContextSynchronousImpl(100L, 100L)); operator.notifyCheckpointAborted(100L); assertThat(mockSourceReader.getAbortedCheckpoints().get(0)).isEqualTo(100L); }
public static Function getFunctionOfRound(FunctionCallExpr node, Function fn, List<Type> argumentTypes) { return getFunctionOfRound(node.getParams(), fn, argumentTypes); }
@Test public void testGetFnOfTruncateForDecimalAndSlotRef() { List<Expr> params = Lists.newArrayList(); params.add(new DecimalLiteral(new BigDecimal(new BigInteger("1845076"), 2))); TableName tableName = new TableName("db", "table"); SlotRef slotRef = new SlotRef(tableName, "v1"); params.add(slotRef); FunctionCallExpr node = new FunctionCallExpr(FunctionSet.TRUNCATE, params); List<Type> paramTypes = Lists.newArrayList(); paramTypes.add(ScalarType.createDecimalV3Type(PrimitiveType.DECIMAL32, 7, 2)); paramTypes.add(Type.TINYINT); Function function = Expr.getBuiltinFunction(FunctionSet.TRUNCATE, paramTypes.toArray(new Type[0]), Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); Assert.assertNotNull(function); Function newFn = DecimalV3FunctionAnalyzer.getFunctionOfRound(node, function, paramTypes); Type returnType = newFn.getReturnType(); Assert.assertTrue(returnType.isDecimalV3()); Assert.assertEquals(Integer.valueOf(38), returnType.getPrecision()); }
@Override public String execute(SampleResult previousResult, Sampler currentSampler) throws InvalidVariableException { String originalString = values[0].execute(); String mode = null; // default if (values.length > 1) { mode = values[1].execute(); } if(StringUtils.isEmpty(mode)){ mode = ChangeCaseMode.UPPER.getName(); // default } String targetString = changeCase(originalString, mode); addVariableValue(targetString, values, 2); return targetString; }
@Test public void testChangeCaseWrongModeIgnore() throws Exception { String returnValue = execute("ab-CD eF", "Wrong"); assertEquals("ab-CD eF", returnValue); }
@Override public String getSessionId() { return sessionID; }
@Test public void testDeleteConfigRequestWithRunningDatastoreIdDuration() { log.info("Starting delete-config async"); assertNotNull("Incorrect sessionId", session1.getSessionId()); try { assertFalse("NETCONF delete-config command failed", session1.deleteConfig(RUNNING)); } catch (NetconfException e) { e.printStackTrace(); fail("NETCONF delete-config test failed: " + e.getMessage()); } log.info("Finishing delete-config async"); }
public static double mul(float v1, float v2) { return mul(Float.toString(v1), Float.toString(v2)).doubleValue(); }
@Test public void mulTest(){ final BigDecimal mul = NumberUtil.mul(new BigDecimal("10"), null); assertEquals(BigDecimal.ZERO, mul); }
@Override public List<DictTypeDO> getDictTypeList() { return dictTypeMapper.selectList(); }
@Test public void testGetDictTypeList() { // 准备参数 DictTypeDO dictTypeDO01 = randomDictTypeDO(); dictTypeMapper.insert(dictTypeDO01); DictTypeDO dictTypeDO02 = randomDictTypeDO(); dictTypeMapper.insert(dictTypeDO02); // mock 方法 // 调用 List<DictTypeDO> dictTypeDOList = dictTypeService.getDictTypeList(); // 断言 assertEquals(2, dictTypeDOList.size()); assertPojoEquals(dictTypeDO01, dictTypeDOList.get(0)); assertPojoEquals(dictTypeDO02, dictTypeDOList.get(1)); }
Optional<String> placementGroupEc2() { return getOptionalMetadata(ec2MetadataEndpoint.concat("/placement/group-name/"), "placement group"); }
@Test public void failToFetchPlacementGroupEc2() { // given stubFor(get(urlEqualTo(GROUP_NAME_URL)) .willReturn(aResponse().withStatus(HttpURLConnection.HTTP_INTERNAL_ERROR).withBody("Service Unavailable"))); // when Optional<String> placementGroupResult = awsMetadataApi.placementGroupEc2(); // then assertEquals(Optional.empty(), placementGroupResult); verify(moreThan(RETRY_COUNT), getRequestedFor(urlEqualTo(GROUP_NAME_URL))); }
@Override public List<ParagraphInfo> getParagraphList(String user, String noteId) throws IOException, TException, ServiceException{ // Check READER permission Set<String> userAndRoles = new HashSet<>(); userAndRoles.add(user); boolean isAllowed = authorizationService.isReader(noteId, userAndRoles); Set<String> allowed = authorizationService.getReaders(noteId); if (!isAllowed) { String errorMsg = "Insufficient privileges to READER note. " + "Allowed users or roles: " + allowed; throw new ServiceException(errorMsg); } return getNotebook().processNote(noteId, note -> { if (null == note) { throw new IOException("Not found this note : " + noteId); } // Convert Paragraph to ParagraphInfo List<ParagraphInfo> paragraphInfos = new ArrayList<>(); List<Paragraph> paragraphs = note.getParagraphs(); for (Paragraph paragraph : paragraphs) { ParagraphInfo paraInfo = new ParagraphInfo(); paraInfo.setNoteId(noteId); paraInfo.setParagraphId(paragraph.getId()); paraInfo.setParagraphTitle(paragraph.getTitle()); paraInfo.setParagraphText(paragraph.getText()); paragraphInfos.add(paraInfo); } return paragraphInfos; }); }
@Test void testGetParagraphList() throws IOException { String noteId = null; try { noteId = notebook.createNote("note1", anonymous); notebook.processNote(noteId, note -> { Paragraph p1 = note.addNewParagraph(anonymous); p1.setText("%md start remote interpreter process"); p1.setAuthenticationInfo(anonymous); notebook.saveNote(note, anonymous); return null; }); String user1Id = "user1", user2Id = "user2"; // test user1 can get anonymous's note List<ParagraphInfo> paragraphList0 = null; try { paragraphList0 = notebookServer.getParagraphList(user1Id, noteId); } catch (ServiceException e) { e.printStackTrace(); } catch (TException e) { e.printStackTrace(); } assertNotNull(paragraphList0, user1Id + " can get anonymous's note"); // test user1 cannot get user2's note authorizationService.setOwners(noteId, new HashSet<>(Arrays.asList(user2Id))); authorizationService.setReaders(noteId, new HashSet<>(Arrays.asList(user2Id))); authorizationService.setRunners(noteId, new HashSet<>(Arrays.asList(user2Id))); authorizationService.setWriters(noteId, new HashSet<>(Arrays.asList(user2Id))); List<ParagraphInfo> paragraphList1 = null; try { paragraphList1 = notebookServer.getParagraphList(user1Id, noteId); } catch (ServiceException e) { e.printStackTrace(); } catch (TException e) { e.printStackTrace(); } assertNull(paragraphList1, user1Id + " cannot get " + user2Id + "'s note"); // test user1 can get user2's shared note authorizationService.setOwners(noteId, new HashSet<>(Arrays.asList(user2Id))); authorizationService.setReaders(noteId, new HashSet<>(Arrays.asList(user1Id, user2Id))); authorizationService.setRunners(noteId, new HashSet<>(Arrays.asList(user2Id))); authorizationService.setWriters(noteId, new HashSet<>(Arrays.asList(user2Id))); List<ParagraphInfo> paragraphList2 = null; try { paragraphList2 = notebookServer.getParagraphList(user1Id, noteId); } catch (ServiceException e) { e.printStackTrace(); } catch (TException e) { e.printStackTrace(); } assertNotNull(paragraphList2, user1Id + " can get " + user2Id + "'s shared note"); } finally { if (null != noteId) { notebook.removeNote(noteId, anonymous); } } }
public static void setTransferEncodingChunked(HttpMessage m, boolean chunked) { if (chunked) { m.headers().set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED); m.headers().remove(HttpHeaderNames.CONTENT_LENGTH); } else { List<String> encodings = m.headers().getAll(HttpHeaderNames.TRANSFER_ENCODING); if (encodings.isEmpty()) { return; } List<CharSequence> values = new ArrayList<CharSequence>(encodings); Iterator<CharSequence> valuesIt = values.iterator(); while (valuesIt.hasNext()) { CharSequence value = valuesIt.next(); if (HttpHeaderValues.CHUNKED.contentEqualsIgnoreCase(value)) { valuesIt.remove(); } } if (values.isEmpty()) { m.headers().remove(HttpHeaderNames.TRANSFER_ENCODING); } else { m.headers().set(HttpHeaderNames.TRANSFER_ENCODING, values); } } }
@Test public void testRemoveTransferEncodingIgnoreCase() { HttpMessage message = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); message.headers().set(HttpHeaderNames.TRANSFER_ENCODING, "Chunked"); assertFalse(message.headers().isEmpty()); HttpUtil.setTransferEncodingChunked(message, false); assertTrue(message.headers().isEmpty()); }
public void addMetricValue(String name, RuntimeUnit unit, long value) { metrics.computeIfAbsent(name, k -> new RuntimeMetric(name, unit)).addValue(value); }
@Test public void testAddMetricValue() { RuntimeStats stats = new RuntimeStats(); stats.addMetricValue(TEST_METRIC_NAME_1, NONE, 2); stats.addMetricValue(TEST_METRIC_NAME_1, NONE, 3); stats.addMetricValue(TEST_METRIC_NAME_1, NONE, 5); stats.addMetricValue(TEST_METRIC_NAME_NANO_1, NANO, 7); assertRuntimeMetricEquals( stats.getMetric(TEST_METRIC_NAME_1), new RuntimeMetric(TEST_METRIC_NAME_1, NONE, 10, 3, 5, 2)); assertRuntimeMetricEquals( stats.getMetric(TEST_METRIC_NAME_NANO_1), new RuntimeMetric(TEST_METRIC_NAME_NANO_1, NANO, 7, 1, 7, 7)); stats.reset(); assertEquals(stats.getMetrics().size(), 0); }
@SuppressWarnings("unchecked") @Override public void configure(final Map<String, ?> configs, final boolean isKey) { final String windowedInnerClassSerdeConfig = (String) configs.get(StreamsConfig.WINDOWED_INNER_CLASS_SERDE); Serde<T> windowInnerClassSerde = null; if (windowedInnerClassSerdeConfig != null) { try { windowInnerClassSerde = Utils.newInstance(windowedInnerClassSerdeConfig, Serde.class); } catch (final ClassNotFoundException e) { throw new ConfigException(StreamsConfig.WINDOWED_INNER_CLASS_SERDE, windowedInnerClassSerdeConfig, "Serde class " + windowedInnerClassSerdeConfig + " could not be found."); } } if (inner != null && windowedInnerClassSerdeConfig != null) { if (!inner.getClass().getName().equals(windowInnerClassSerde.deserializer().getClass().getName())) { throw new IllegalArgumentException("Inner class deserializer set using constructor " + "(" + inner.getClass().getName() + ")" + " is different from the one set in windowed.inner.class.serde config " + "(" + windowInnerClassSerde.deserializer().getClass().getName() + ")."); } } else if (inner == null && windowedInnerClassSerdeConfig == null) { throw new IllegalArgumentException("Inner class deserializer should be set either via constructor " + "or via the windowed.inner.class.serde config"); } else if (inner == null) inner = windowInnerClassSerde.deserializer(); }
@Test public void shouldThrowErrorIfWindowInnerClassDeserialiserIsNotSet() { final SessionWindowedDeserializer<?> deserializer = new SessionWindowedDeserializer<>(); assertThrows(IllegalArgumentException.class, () -> deserializer.configure(props, false)); }
public static String localIP() { if (!StringUtils.isEmpty(localIp)) { return localIp; } if (System.getProperties().containsKey(CLIENT_LOCAL_IP_PROPERTY)) { return localIp = System.getProperty(CLIENT_LOCAL_IP_PROPERTY, getAddress()); } localIp = getAddress(); return localIp; }
@Test void testLocalIpWithPreferHostname() throws Exception { InetAddress inetAddress = invokeGetInetAddress(); String hostname = inetAddress.getHostName(); System.setProperty("com.alibaba.nacos.client.local.preferHostname", "true"); assertEquals(hostname, NetUtils.localIP()); }
@Override public Timed start() { return new DefaultTimed(this, timeUnit); }
@Test public void multipleStops() { Timer timer = new DefaultTimer(TimeUnit.NANOSECONDS); Timer.Timed timed = timer.start(); timed.stop(); // we didn't start the timer again assertThatThrownBy(timed::stop) .isInstanceOf(IllegalStateException.class) .hasMessage("stop() called multiple times"); }
File getHomeDir() { return homeDir; }
@Test public void detectHomeDir_returns_existing_dir() { assertThat(new AppSettingsLoaderImpl(system, new String[0], serviceLoaderWrapper).getHomeDir()).exists().isDirectory(); }
public boolean isAllowedNamespace(String tenant, String namespace, String fromTenant, String fromNamespace) { return true; }
@Test void isAllowedNamespace() { assertTrue(flowService.isAllowedNamespace("tenant", "namespace", "fromTenant", "fromNamespace")); }
public static ImmutableList<String> splitToLowercaseTerms(String identifierName) { if (ONLY_UNDERSCORES.matcher(identifierName).matches()) { // Degenerate case of names which contain only underscore return ImmutableList.of(identifierName); } return TERM_SPLITTER .splitToStream(identifierName) .map(String::toLowerCase) .collect(toImmutableList()); }
@Test public void splitToLowercaseTerms_separatesTrailingDigits_withoutDelimiter() { String identifierName = "term123"; ImmutableList<String> terms = NamingConventions.splitToLowercaseTerms(identifierName); assertThat(terms).containsExactly("term", "123"); }
static List<ObjectCreationExpr> getMiningFieldsObjectCreations(final List<MiningField> miningFields) { return miningFields.stream() .map(miningField -> { ObjectCreationExpr toReturn = new ObjectCreationExpr(); toReturn.setType(MiningField.class.getCanonicalName()); Expression name = miningField.getName() != null ? new StringLiteralExpr(miningField.getName()) : new NullLiteralExpr(); FIELD_USAGE_TYPE fieldUsageType = miningField.getUsageType(); Expression usageType = fieldUsageType != null ? new NameExpr(fieldUsageType.getClass().getName() + "." + fieldUsageType.name()) : new NullLiteralExpr(); OP_TYPE oPT = miningField.getOpType(); Expression opType = oPT != null ? new NameExpr(oPT.getClass().getName() + "." + oPT.name()) : new NullLiteralExpr(); DATA_TYPE dtT = miningField.getDataType(); Expression dataType = dtT != null ? new NameExpr(dtT.getClass().getName() + "." + dtT.name()) : new NullLiteralExpr(); MISSING_VALUE_TREATMENT_METHOD mVTM = miningField.getMissingValueTreatmentMethod(); Expression missingValueTreatmentMethod = mVTM != null ? new NameExpr(mVTM.getClass().getName() + "." + mVTM.name()) : new NullLiteralExpr(); INVALID_VALUE_TREATMENT_METHOD iVTM = miningField.getInvalidValueTreatmentMethod(); Expression invalidValueTreatmentMethod = iVTM != null ? new NameExpr(iVTM.getClass().getName() + "." + iVTM.name()) : new NullLiteralExpr(); Expression missingValueReplacement = miningField.getMissingValueReplacement() != null ? new StringLiteralExpr(miningField.getMissingValueReplacement()) : new NullLiteralExpr(); Expression invalidValueReplacement = miningField.getInvalidValueReplacement() != null ? new StringLiteralExpr(miningField.getInvalidValueReplacement()) : new NullLiteralExpr(); Expression allowedValues = miningField.getAllowedValues() != null ? createArraysAsListFromList(miningField.getAllowedValues()).getExpression() : new NullLiteralExpr(); Expression intervals = miningField.getIntervals() != null ? createIntervalsExpression(miningField.getIntervals()) : new NullLiteralExpr(); toReturn.setArguments(NodeList.nodeList(name, usageType, opType, dataType, missingValueTreatmentMethod, invalidValueTreatmentMethod, missingValueReplacement, invalidValueReplacement, allowedValues, intervals)); return toReturn; }) .collect(Collectors.toList()); }
@Test void getMiningFieldsObjectCreations() { List<MiningField> miningFields = IntStream.range(0, 3) .mapToObj(i -> ModelUtils.convertToKieMiningField(getRandomMiningField(), getRandomDataField())) .collect(Collectors.toList()); List retrieved = org.kie.pmml.compiler.commons.codegenfactories.KiePMMLModelFactoryUtils.getMiningFieldsObjectCreations(miningFields); commonVerifyMiningFieldsObjectCreation(retrieved, miningFields); }
public T initialBufferSize(int value) { if (value <= 0) { throw new IllegalArgumentException("initialBufferSize must be strictly positive"); } this.initialBufferSize = value; return get(); }
@Test void initialBufferSize() { checkDefaultInitialBufferSize(conf); conf.initialBufferSize(123); assertThat(conf.initialBufferSize()).as("initial buffer size").isEqualTo(123); checkDefaultMaxInitialLineLength(conf); checkDefaultMaxHeaderSize(conf); checkDefaultMaxChunkSize(conf); checkDefaultValidateHeaders(conf); checkDefaultAllowDuplicateContentLengths(conf); }
@Override public boolean onOptionsItemSelected(MenuItem item) { switch (item.getItemId()) { case R.id.about_menu_option: Navigation.findNavController(requireView()) .navigate(MainFragmentDirections.actionMainFragmentToAboutAnySoftKeyboardFragment()); return true; case R.id.tweaks_menu_option: Navigation.findNavController(requireView()) .navigate(MainFragmentDirections.actionMainFragmentToMainTweaksFragment()); return true; case R.id.backup_prefs: mDialogController.showDialog(R.id.backup_prefs); return true; case R.id.restore_prefs: mDialogController.showDialog(R.id.restore_prefs); return true; default: return super.onOptionsItemSelected(item); } }
@Test public void testTweaksMenuCommand() throws Exception { final MainFragment fragment = startFragment(); final FragmentActivity activity = fragment.getActivity(); Menu menu = Shadows.shadowOf(activity).getOptionsMenu(); Assert.assertNotNull(menu); final MenuItem item = menu.findItem(R.id.tweaks_menu_option); Assert.assertNotNull(item); Assert.assertTrue(item.isVisible()); fragment.onOptionsItemSelected(item); TestRxSchedulers.foregroundFlushAllJobs(); Fragment aboutFragment = getCurrentFragment(); Assert.assertNotNull(aboutFragment); Assert.assertTrue(aboutFragment instanceof MainTweaksFragment); }
public static ConfigDiskService getInstance() { if (configDiskService == null) { synchronized (ConfigDiskServiceFactory.class) { if (configDiskService == null) { String type = System.getProperty("config_disk_type", TYPE_RAW_DISK); if (type.equalsIgnoreCase(TYPE_ROCKSDB)) { configDiskService = new ConfigRocksDbDiskService(); } else { configDiskService = new ConfigRawDiskService(); } } return configDiskService; } } return configDiskService; }
@Test void getRawDiskInstance() { System.setProperty("config_disk_type", "rawdisk"); ConfigDiskService instance = ConfigDiskServiceFactory.getInstance(); assertTrue(instance instanceof ConfigRawDiskService); }
public synchronized boolean isServing() { return mWebServer != null && mWebServer.getServer().isRunning(); }
@Test public void alwaysOnTest() { Configuration.set(PropertyKey.STANDBY_MASTER_WEB_ENABLED, true); WebServerService webService = WebServerService.Factory.create(mWebAddress, mMasterProcess); Assert.assertTrue(webService instanceof AlwaysOnWebServerService); Assert.assertTrue(waitForFree()); Assert.assertFalse(webService.isServing()); webService.start(); Assert.assertTrue(webService.isServing()); Assert.assertTrue(isBound()); for (int i = 0; i < 5; i++) { webService.promote(); Assert.assertTrue(webService.isServing()); Assert.assertTrue(isBound()); webService.demote(); Assert.assertTrue(webService.isServing()); Assert.assertTrue(isBound()); } webService.stop(); Assert.assertTrue(waitForFree()); Assert.assertFalse(webService.isServing()); }
public String authorizationId() { return authorizationId; }
@Test public void testAuthorizationId() throws Exception { String message = "n,a=myuser,\u0001auth=Bearer 345\u0001\u0001"; OAuthBearerClientInitialResponse response = new OAuthBearerClientInitialResponse(message.getBytes(StandardCharsets.UTF_8)); assertEquals("345", response.tokenValue()); assertEquals("myuser", response.authorizationId()); }
@Override public AverageAccumulator clone() { AverageAccumulator average = new AverageAccumulator(); average.count = this.count; average.sum = this.sum; return average; }
@Test void testClone() { AverageAccumulator average = new AverageAccumulator(); average.add(1); AverageAccumulator averageNew = average.clone(); assertThat(averageNew.getLocalValue()).isCloseTo(1, within(0.0)); }
@Override public State getState() { if (terminalState != null) { return terminalState; } return delegate.getState(); }
@Test public void givenNotTerminated_reportsState() { PipelineResult delegate = mock(PipelineResult.class); when(delegate.getState()).thenReturn(PipelineResult.State.RUNNING); PrismPipelineResult underTest = new PrismPipelineResult(delegate, exec::stop); assertThat(underTest.getState()).isEqualTo(PipelineResult.State.RUNNING); exec.stop(); }