focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public RowData nextRecord(RowData reuse) { // return the next row row.setRowId(this.nextRow++); return row; }
@Test void testReachEnd() throws Exception { FileInputSplit[] splits = createSplits(testFileFlat, 1); try (OrcColumnarRowSplitReader reader = createReader(new int[] {0, 1}, testSchemaFlat, new HashMap<>(), splits[0])) { while (!reader.reachedEnd()) { reader.nextRecord(null); } assertThat(reader.reachedEnd()).isTrue(); } }
static ProjectMeasuresQuery newProjectMeasuresQuery(List<Criterion> criteria, @Nullable Set<String> projectUuids) { ProjectMeasuresQuery query = new ProjectMeasuresQuery(); Optional.ofNullable(projectUuids).ifPresent(query::setProjectUuids); criteria.forEach(criterion -> processCriterion(criterion, query)); return query; }
@Test public void fail_to_create_query_having_q_with_other_operator_than_equals() { assertThatThrownBy(() -> { newProjectMeasuresQuery(singletonList(Criterion.builder().setKey("query").setOperator(LT).setValue("java").build()), emptySet()); }) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Query should only be used with equals operator"); }
@Override public Set<String> getMetricKeys(QualityGate gate) { Set<String> metricKeys = new HashSet<>(); metricKeys.add(CoreMetrics.NEW_LINES_KEY); for (Condition condition : gate.getConditions()) { metricKeys.add(condition.getMetricKey()); } return metricKeys; }
@Test public void getMetricKeys_includes_metrics_from_qgate() { Set<String> metricKeys = ImmutableSet.of("foo", "bar", "baz"); Set<Condition> conditions = metricKeys.stream().map(key -> { Condition condition = mock(Condition.class); when(condition.getMetricKey()).thenReturn(key); return condition; }).collect(Collectors.toSet()); QualityGate gate = mock(QualityGate.class); when(gate.getConditions()).thenReturn(conditions); assertThat(underTest.getMetricKeys(gate)).containsAll(metricKeys); }
@NonNull public static List<VideoStream> getSortedStreamVideosList( @NonNull final Context context, @Nullable final List<VideoStream> videoStreams, @Nullable final List<VideoStream> videoOnlyStreams, final boolean ascendingOrder, final boolean preferVideoOnlyStreams) { final SharedPreferences preferences = PreferenceManager.getDefaultSharedPreferences(context); final boolean showHigherResolutions = preferences.getBoolean( context.getString(R.string.show_higher_resolutions_key), false); final MediaFormat defaultFormat = getDefaultFormat(context, R.string.default_video_format_key, R.string.default_video_format_value); return getSortedStreamVideosList(defaultFormat, showHigherResolutions, videoStreams, videoOnlyStreams, ascendingOrder, preferVideoOnlyStreams); }
@Test public void getSortedStreamVideosExceptHighResolutionsTest() { //////////////////////////////////// // Don't show Higher resolutions // ////////////////////////////////// final List<VideoStream> result = ListHelper.getSortedStreamVideosList(MediaFormat.MPEG_4, false, VIDEO_STREAMS_TEST_LIST, VIDEO_ONLY_STREAMS_TEST_LIST, false, false); final List<String> expected = List.of( "1080p60", "1080p", "720p60", "720p", "480p", "360p", "240p", "144p"); assertEquals(expected.size(), result.size()); for (int i = 0; i < result.size(); i++) { assertEquals(expected.get(i), result.get(i).getResolution()); } }
public static org.apache.avro.Schema toAvroSchema( Schema beamSchema, @Nullable String name, @Nullable String namespace) { final String schemaName = Strings.isNullOrEmpty(name) ? "topLevelRecord" : name; final String schemaNamespace = namespace == null ? "" : namespace; String childNamespace = !"".equals(schemaNamespace) ? schemaNamespace + "." + schemaName : schemaName; List<org.apache.avro.Schema.Field> fields = Lists.newArrayList(); for (Field field : beamSchema.getFields()) { org.apache.avro.Schema.Field recordField = toAvroField(field, childNamespace); fields.add(recordField); } return org.apache.avro.Schema.createRecord(schemaName, null, schemaNamespace, false, fields); }
@Test public void testJdbcLogicalVarCharRowDataToAvroSchema() { String expectedAvroSchemaJson = "{ " + " \"name\": \"topLevelRecord\", " + " \"type\": \"record\", " + " \"fields\": [{ " + " \"name\": \"my_varchar_field\", " + " \"type\": {\"type\": \"string\", \"logicalType\": \"varchar\", \"maxLength\": 10}" + " }, " + " { " + " \"name\": \"my_longvarchar_field\", " + " \"type\": {\"type\": \"string\", \"logicalType\": \"varchar\", \"maxLength\": 50}" + " }, " + " { " + " \"name\": \"my_nvarchar_field\", " + " \"type\": {\"type\": \"string\", \"logicalType\": \"varchar\", \"maxLength\": 10}" + " }, " + " { " + " \"name\": \"my_longnvarchar_field\", " + " \"type\": {\"type\": \"string\", \"logicalType\": \"varchar\", \"maxLength\": 50}" + " }, " + " { " + " \"name\": \"fixed_length_char_field\", " + " \"type\": {\"type\": \"string\", \"logicalType\": \"char\", \"maxLength\": 25}" + " } " + " ] " + "}"; Schema beamSchema = Schema.builder() .addField( Field.of( "my_varchar_field", FieldType.logicalType(JdbcType.StringType.varchar(10)))) .addField( Field.of( "my_longvarchar_field", FieldType.logicalType(JdbcType.StringType.longvarchar(50)))) .addField( Field.of( "my_nvarchar_field", FieldType.logicalType(JdbcType.StringType.nvarchar(10)))) .addField( Field.of( "my_longnvarchar_field", FieldType.logicalType(JdbcType.StringType.longnvarchar(50)))) .addField( Field.of( "fixed_length_char_field", FieldType.logicalType(JdbcType.StringType.fixedLengthChar(25)))) .build(); assertEquals( new org.apache.avro.Schema.Parser().parse(expectedAvroSchemaJson), AvroUtils.toAvroSchema(beamSchema)); }
public void writeTo(T object, DataWriter writer) throws IOException { writeTo(object, 0, writer); }
@Test(expected = IOException.class) public void testNotExportedBeanFailing() throws IOException { ExportConfig config = new ExportConfig().withFlavor(Flavor.JSON).withExportInterceptor(new ExportInterceptor2()).withSkipIfFail(true); StringWriter writer = new StringWriter(); ExportableBean b = new ExportableBean(); builder.get(ExportableBean.class).writeTo(b,Flavor.JSON.createDataWriter(b, writer, config)); }
public static String executeDockerCommand(DockerCommand dockerCommand, String containerId, Map<String, String> env, PrivilegedOperationExecutor privilegedOperationExecutor, boolean disableFailureLogging, Context nmContext) throws ContainerExecutionException { PrivilegedOperation dockerOp = dockerCommand.preparePrivilegedOperation( dockerCommand, containerId, env, nmContext); if (disableFailureLogging) { dockerOp.disableFailureLogging(); } LOG.debug("Running docker command: {}", dockerCommand); try { String result = privilegedOperationExecutor .executePrivilegedOperation(null, dockerOp, null, env, true, false); if (result != null && !result.isEmpty()) { result = result.trim(); } return result; } catch (PrivilegedOperationException e) { throw new ContainerExecutionException("Docker operation failed", e.getExitCode(), e.getOutput(), e.getErrorOutput()); } }
@Test public void testExecuteDockerCommand() throws Exception { DockerStopCommand dockerStopCommand = new DockerStopCommand(MOCK_CONTAINER_ID); DockerCommandExecutor.executeDockerCommand(dockerStopCommand, cId.toString(), env, mockExecutor, false, nmContext); List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor .capturePrivilegedOperations(mockExecutor, 1, true); assertEquals(1, ops.size()); assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(), ops.get(0).getOperationType().name()); }
protected boolean isIntegralNumber(String field, FieldPresence presence, long... minMax) { return isIntegralNumber(object, field, presence, minMax); }
@Test public void isIntegralNumber() { assertTrue("is not proper number", cfg.isIntegralNumber(LONG, MANDATORY)); assertTrue("is not proper number", cfg.isIntegralNumber(LONG, MANDATORY, 0)); assertTrue("is not proper number", cfg.isIntegralNumber(LONG, MANDATORY, 0, 10)); assertTrue("is not proper number", cfg.isIntegralNumber(LONG, MANDATORY, 5, 6)); assertTrue("is not in range", expectInvalidField(() -> cfg.isIntegralNumber(LONG, MANDATORY, 6, 10))); assertTrue("is not in range", cfg.isIntegralNumber(LONG, MANDATORY, 4, 5)); assertTrue("is not proper number", cfg.isIntegralNumber(LONG, OPTIONAL, 0, 10)); assertTrue("is not proper number", cfg.isIntegralNumber(LONG, OPTIONAL)); assertTrue("is not proper number", cfg.isIntegralNumber("none", OPTIONAL)); assertTrue("did not detect missing field", expectInvalidField(() -> cfg.isIntegralNumber("none", MANDATORY))); assertTrue("is not proper number", expectInvalidField(() -> cfg.isIntegralNumber(TEXT, MANDATORY))); assertTrue("is not in range", expectInvalidField(() -> cfg.isIntegralNumber(DOUBLE, MANDATORY, 0, 10))); }
public List<Cookie> decodeAll(String header) { List<Cookie> cookies = new ArrayList<Cookie>(); decode(cookies, header); return Collections.unmodifiableList(cookies); }
@Test public void testDecodingAllMultipleCookies() { String c1 = "myCookie=myValue;"; String c2 = "myCookie=myValue2;"; String c3 = "myCookie=myValue3;"; List<Cookie> cookies = ServerCookieDecoder.STRICT.decodeAll(c1 + c2 + c3); assertEquals(3, cookies.size()); Iterator<Cookie> it = cookies.iterator(); Cookie cookie = it.next(); assertNotNull(cookie); assertEquals("myValue", cookie.value()); cookie = it.next(); assertNotNull(cookie); assertEquals("myValue2", cookie.value()); cookie = it.next(); assertNotNull(cookie); assertEquals("myValue3", cookie.value()); }
public String createNote(String notePath, AuthenticationInfo subject) throws IOException { return createNote(notePath, interpreterSettingManager.getDefaultInterpreterSetting().getName(), subject); }
@Test void testScheduleDisabledWithName() throws InterruptedException, IOException { zConf.setProperty(ConfVars.ZEPPELIN_NOTEBOOK_CRON_FOLDERS.getVarName(), "/System"); final int timeout = 30; final String everySecondCron = "* * * * * ?"; // each run starts a new JVM and the job takes about ~5 seconds final CountDownLatch jobsToExecuteCount = new CountDownLatch(5); final String noteId = notebook.createNote("note1", anonymous); executeNewParagraphByCron(noteId, everySecondCron); afterStatusChangedListener = new StatusChangedListener() { @Override public void onStatusChanged(Job<?> job, Status before, Status after) { if (after == Status.FINISHED) { jobsToExecuteCount.countDown(); } } }; // This job should not run because it's path does not matches "ZEPPELIN_NOTEBOOK_CRON_FOLDERS" assertFalse(jobsToExecuteCount.await(timeout, TimeUnit.SECONDS)); terminateScheduledNote(noteId); afterStatusChangedListener = null; final String noteNameSystemId = notebook.createNote("/System/test1", anonymous); final CountDownLatch jobsToExecuteCountNameSystem = new CountDownLatch(5); executeNewParagraphByCron(noteNameSystemId, everySecondCron); afterStatusChangedListener = new StatusChangedListener() { @Override public void onStatusChanged(Job<?> job, Status before, Status after) { if (after == Status.FINISHED) { jobsToExecuteCountNameSystem.countDown(); } } }; // This job should run because it's path contains "System/" assertTrue(jobsToExecuteCountNameSystem.await(timeout, TimeUnit.SECONDS)); terminateScheduledNote(noteNameSystemId); afterStatusChangedListener = null; }
@VisibleForTesting public void validateDictDataValueUnique(Long id, String dictType, String value) { DictDataDO dictData = dictDataMapper.selectByDictTypeAndValue(dictType, value); if (dictData == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的字典数据 if (id == null) { throw exception(DICT_DATA_VALUE_DUPLICATE); } if (!dictData.getId().equals(id)) { throw exception(DICT_DATA_VALUE_DUPLICATE); } }
@Test public void testValidateDictDataValueUnique_success() { // 调用,成功 dictDataService.validateDictDataValueUnique(randomLongId(), randomString(), randomString()); }
@Override public Connection connectToServer(ServerInfo serverInfo) { // the newest connection id String connectionId = ""; try { if (grpcExecutor == null) { this.grpcExecutor = createGrpcExecutor(serverInfo.getServerIp()); } int port = serverInfo.getServerPort() + rpcPortOffset(); ManagedChannel managedChannel = createNewManagedChannel(serverInfo.getServerIp(), port); RequestGrpc.RequestFutureStub newChannelStubTemp = createNewChannelStub(managedChannel); Response response = serverCheck(serverInfo.getServerIp(), port, newChannelStubTemp); if (!(response instanceof ServerCheckResponse)) { shuntDownChannel(managedChannel); return null; } // submit ability table as soon as possible // ability table will be null if server doesn't support ability table ServerCheckResponse serverCheckResponse = (ServerCheckResponse) response; connectionId = serverCheckResponse.getConnectionId(); BiRequestStreamGrpc.BiRequestStreamStub biRequestStreamStub = BiRequestStreamGrpc.newStub( newChannelStubTemp.getChannel()); GrpcConnection grpcConn = new GrpcConnection(serverInfo, grpcExecutor); grpcConn.setConnectionId(connectionId); // if not supported, it will be false if (serverCheckResponse.isSupportAbilityNegotiation()) { // mark this.recAbilityContext.reset(grpcConn); // promise null if no abilities receive grpcConn.setAbilityTable(null); } //create stream request and bind connection event to this connection. StreamObserver<Payload> payloadStreamObserver = bindRequestStream(biRequestStreamStub, grpcConn); // stream observer to send response to server grpcConn.setPayloadStreamObserver(payloadStreamObserver); grpcConn.setGrpcFutureServiceStub(newChannelStubTemp); grpcConn.setChannel(managedChannel); //send a setup request. ConnectionSetupRequest conSetupRequest = new ConnectionSetupRequest(); conSetupRequest.setClientVersion(VersionUtils.getFullClientVersion()); conSetupRequest.setLabels(super.getLabels()); // set ability table conSetupRequest.setAbilityTable( NacosAbilityManagerHolder.getInstance().getCurrentNodeAbilities(abilityMode())); conSetupRequest.setTenant(super.getTenant()); grpcConn.sendRequest(conSetupRequest); // wait for response if (recAbilityContext.isNeedToSync()) { // try to wait for notify response recAbilityContext.await(this.clientConfig.capabilityNegotiationTimeout(), TimeUnit.MILLISECONDS); // if no server abilities receiving, then reconnect if (!recAbilityContext.check(grpcConn)) { return null; } } else { // leave for adapting old version server // registration is considered successful by default after 100ms // wait to register connection setup Thread.sleep(100L); } return grpcConn; } catch (Exception e) { LOGGER.error("[{}]Fail to connect to server!,error={}", GrpcClient.this.getName(), e); // remove and notify recAbilityContext.release(null); } return null; }
@Test void testConnectToServerFailed() { assertNull(grpcClient.connectToServer(serverInfo)); }
@SuppressWarnings("unchecked") public static void addNamedOutput(Job job, String namedOutput, Class<? extends OutputFormat> outputFormatClass, Schema keySchema) { addNamedOutput(job, namedOutput, outputFormatClass, keySchema, null); }
@Test void avroSpecificOutput() throws Exception { Job job = Job.getInstance(); FileInputFormat.setInputPaths(job, new Path(getClass().getResource("/org/apache/avro/mapreduce/mapreduce-test-input.txt").toURI().toString())); job.setInputFormatClass(TextInputFormat.class); job.setMapperClass(LineCountMapper.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(IntWritable.class); AvroMultipleOutputs.addNamedOutput(job, "myavro3", AvroKeyOutputFormat.class, TextStats.SCHEMA$, null); job.setReducerClass(SpecificStatsReducer.class); AvroJob.setOutputKeySchema(job, TextStats.SCHEMA$); job.setOutputFormatClass(AvroKeyOutputFormat.class); Path outputPath = new Path(DIR.getPath() + "/testAvroSpecificOutput"); outputPath.getFileSystem(job.getConfiguration()).delete(outputPath, true); FileOutputFormat.setOutputPath(job, outputPath); assertTrue(job.waitForCompletion(true)); FileSystem fileSystem = FileSystem.get(job.getConfiguration()); FileStatus[] outputFiles = fileSystem.globStatus(outputPath.suffix("/myavro3-*")); assertEquals(1, outputFiles.length); Map<String, Integer> counts = new HashMap<>(); try (DataFileReader<TextStats> reader = new DataFileReader<>( new FsInput(outputFiles[0].getPath(), job.getConfiguration()), new SpecificDatumReader<>())) { for (TextStats record : reader) { counts.put(record.getName().toString(), record.getCount()); } } assertEquals(3, counts.get("apple").intValue()); assertEquals(2, counts.get("banana").intValue()); assertEquals(1, counts.get("carrot").intValue()); }
public static BytesInput fromUnsignedVarLong(long longValue) { return new UnsignedVarLongBytesInput(longValue); }
@Test public void testFromUnsignedVarLong() throws IOException { long value = RANDOM.nextInt(Integer.MAX_VALUE); ByteArrayOutputStream baos = new ByteArrayOutputStream(4); BytesUtils.writeUnsignedVarLong(value, baos); byte[] data = baos.toByteArray(); Supplier<BytesInput> factory = () -> BytesInput.fromUnsignedVarLong(value); validate(data, factory); }
public MapConfig setCacheDeserializedValues(CacheDeserializedValues cacheDeserializedValues) { this.cacheDeserializedValues = cacheDeserializedValues; return this; }
@Test @Ignore(value = "this MapStoreConfig does not override equals/hashcode -> this cannot pass right now") public void givenSetCacheDeserializedValuesIsINDEX_ONLY_whenComparedWithOtherConfigWhereCacheIsINDEX_ONLY_thenReturnTrue() { // given MapConfig mapConfig = new MapConfig(); mapConfig.setCacheDeserializedValues(CacheDeserializedValues.INDEX_ONLY); // when MapConfig otherMapConfig = new MapConfig(); otherMapConfig.setCacheDeserializedValues(CacheDeserializedValues.INDEX_ONLY); // then assertEquals(mapConfig, otherMapConfig); }
@Override public <T extends Metric> T register(String name, T metric) throws IllegalArgumentException { if (metric == null) { throw new NullPointerException("metric == null"); } return metric; }
@Test public void registeringACounterTriggersNoNotification() { assertThat(registry.register("thing", counter)).isEqualTo(counter); verify(listener, never()).onCounterAdded("thing", counter); }
@Override public Name getLocation(final Path file) throws BackgroundException { if(StringUtils.isNotBlank(session.getHost().getRegion())) { return new S3Region(session.getHost().getRegion()); } final Path bucket = containerService.getContainer(file); return this.getLocation(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName()); }
@Test public void testGetLocation() throws Exception { final RegionEndpointCache cache = session.getClient().getRegionEndpointCache(); final S3LocationFeature feature = new S3LocationFeature(session, cache); assertEquals(new S3LocationFeature.S3Region("eu-west-1"), feature.getLocation( new Path("test-eu-west-1-cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory)) )); assertEquals("eu-west-1", cache.getRegionForBucketName("test-eu-west-1-cyberduck")); assertEquals(new S3LocationFeature.S3Region("eu-central-1"), feature.getLocation( new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory)) )); assertEquals("eu-central-1", cache.getRegionForBucketName("test-eu-central-1-cyberduck")); assertEquals(new S3LocationFeature.S3Region("us-east-1"), feature.getLocation( new Path("test-us-east-1-cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory)) )); assertEquals("us-east-1", cache.getRegionForBucketName("test-us-east-1-cyberduck")); assertEquals(new S3LocationFeature.S3Region("us-east-1"), feature.getLocation( new Path("/", EnumSet.of(Path.Type.volume, Path.Type.directory)) )); assertEquals("us-east-1", cache.getRegionForBucketName("")); }
static final String addFunctionParameter(ParameterDescriptor descriptor, RuleBuilderStep step) { final String parameterName = descriptor.name(); // parameter name needed by function final Map<String, Object> parameters = step.parameters(); if (Objects.isNull(parameters)) { return null; } final Object value = parameters.get(parameterName); // parameter value set by rule definition String syntax = " " + parameterName + " : "; if (value == null) { return null; } else if (value instanceof String valueString) { if (StringUtils.isEmpty(valueString)) { return null; } else if (valueString.startsWith("$")) { // value set as variable syntax += valueString.substring(1); } else { syntax += "\"" + StringEscapeUtils.escapeJava(valueString) + "\""; // value set as string } } else { syntax += value; } return syntax; }
@Test public void addFunctionParameterSyntaxOk_WhenNumericParameterValueIsSet() { String parameterName = "foo"; var parameterValue = 42; RuleBuilderStep step = mock(RuleBuilderStep.class); Map<String, Object> params = Map.of(parameterName, parameterValue); when(step.parameters()).thenReturn(params); ParameterDescriptor descriptor = mock(ParameterDescriptor.class); when(descriptor.name()).thenReturn(parameterName); assertThat(ParserUtil.addFunctionParameter(descriptor, step)) .isEqualTo(" foo : 42"); }
public void replayTruncateTable(TruncateTableInfo info) { Database db = getDb(info.getDbId()); Locker locker = new Locker(); locker.lockDatabase(db, LockType.WRITE); try { OlapTable olapTable = (OlapTable) db.getTable(info.getTblId()); truncateTableInternal(olapTable, info.getPartitions(), info.isEntireTable(), true); if (!GlobalStateMgr.isCheckpointThread()) { // add tablet to inverted index TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); for (Partition partition : info.getPartitions()) { long partitionId = partition.getId(); TStorageMedium medium = olapTable.getPartitionInfo().getDataProperty( partitionId).getStorageMedium(); for (PhysicalPartition physicalPartition : partition.getSubPartitions()) { for (MaterializedIndex mIndex : physicalPartition.getMaterializedIndices( MaterializedIndex.IndexExtState.ALL)) { long indexId = mIndex.getId(); int schemaHash = olapTable.getSchemaHashByIndexId(indexId); TabletMeta tabletMeta = new TabletMeta(db.getId(), olapTable.getId(), physicalPartition.getId(), indexId, schemaHash, medium, olapTable.isCloudNativeTableOrMaterializedView()); for (Tablet tablet : mIndex.getTablets()) { long tabletId = tablet.getId(); invertedIndex.addTablet(tabletId, tabletMeta); if (olapTable.isOlapTable()) { for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) { invertedIndex.addReplica(tabletId, replica); } } } } } } } } finally { locker.unLockDatabase(db, LockType.WRITE); } }
@Test public void testReplayTruncateTable() throws DdlException { Database db = connectContext.getGlobalStateMgr().getDb("test"); OlapTable table = (OlapTable) db.getTable("t1"); Partition p = table.getPartitions().stream().findFirst().get(); TruncateTableInfo info = new TruncateTableInfo(db.getId(), table.getId(), Lists.newArrayList(p), false); LocalMetastore localMetastore = connectContext.getGlobalStateMgr().getLocalMetastore(); localMetastore.replayTruncateTable(info); }
@Override public int get(TemporalField field) { return offsetTime.get(field); }
@Test void get() { Arrays.stream(ChronoField.values()) .filter(offsetTime::isSupported) .filter(field -> field != ChronoField.NANO_OF_DAY && field != ChronoField.MICRO_OF_DAY) // Unsupported by offsettime.get() .forEach(field -> assertEquals(offsetTime.get(field), zoneTime.get(field))); }
public double p(int[] o, int[] s) { return Math.exp(logp(o, s)); }
@Test public void testJointP() { System.out.println("joint p"); int[] o = {0, 0, 1, 1, 0, 1, 1, 0}; int[] s = {0, 0, 1, 1, 1, 1, 1, 0}; HMM hmm = new HMM(pi, Matrix.of(a), Matrix.of(b)); double expResult = 7.33836e-05; double result = hmm.p(o, s); assertEquals(expResult, result, 1E-10); }
@SuppressWarnings({"CastCanBeRemovedNarrowingVariableType", "unchecked"}) public E relaxedPoll() { final E[] buffer = consumerBuffer; final long index = consumerIndex; final long mask = consumerMask; final long offset = modifiedCalcElementOffset(index, mask); Object e = lvElement(buffer, offset);// LoadLoad if (e == null) { return null; } if (e == JUMP) { final E[] nextBuffer = getNextBuffer(buffer, mask); return newBufferPoll(nextBuffer, index); } soElement(buffer, offset, null); soConsumerIndex(this, index + 2); return (E) e; }
@Test(dataProvider = "empty") public void relaxedPoll_whenEmpty(MpscGrowableArrayQueue<Integer> queue) { assertThat(queue.relaxedPoll()).isNull(); }
public static <T> GoConfigClassLoader<T> classParser(Element e, Class<T> aClass, ConfigCache configCache, GoCipher goCipher, final ConfigElementImplementationRegistry registry, ConfigReferenceElements configReferenceElements) { return new GoConfigClassLoader<>(e, aClass, configCache, goCipher, registry, configReferenceElements); }
@Test public void shouldErrorOutWhenAttributeAwareConfigTagClassHasConfigAttributeWithSameName() { final Element element = new Element("example"); element.setAttribute("type", "example-type"); when(configCache.getFieldCache()).thenReturn(new ClassAttributeCache.FieldCache()); final GoConfigClassLoader<AttributeAwareConfigTagHasConfigAttributeWithSameName> loader = GoConfigClassLoader.classParser(element, AttributeAwareConfigTagHasConfigAttributeWithSameName.class, configCache, goCipher, registry, referenceElements); assertThatThrownBy(loader::parse) .isInstanceOf(RuntimeException.class) .hasMessageContaining("Attribute `type` is not allowed in com.thoughtworks.go.config.parser.AttributeAwareConfigTagHasConfigAttributeWithSameName. You cannot use @ConfigAttribute annotation with attribute name `type` when @AttributeAwareConfigTag is configured with same name."); }
public static FuryBuilder builder() { return new FuryBuilder(); }
@Test public void testSerializePackageLevelBeanJIT() { Fury fury = Fury.builder() .withLanguage(Language.JAVA) .withCodegen(true) .requireClassRegistration(false) .build(); PackageLevelBean o = new PackageLevelBean(); o.f1 = 10; o.f2 = 1; serDeCheckSerializer(fury, o, "PackageLevelBean"); }
private RestLiResponseAttachments(final RestLiResponseAttachments.Builder builder) { _multiPartMimeWriterBuilder = builder._multiPartMimeWriterBuilder; }
@Test public void testRestLiResponseAttachments() { //In this test we simply add a few attachments and verify the size of the resulting MultiPartMIMEWriter. //More detailed tests can be found in TestAttachmentUtils. final RestLiResponseAttachments emptyAttachments = new RestLiResponseAttachments.Builder().build(); Assert.assertEquals(emptyAttachments.getMultiPartMimeWriterBuilder().getCurrentSize(), 0); //For multiple data attachments final RestLiTestAttachmentDataSource dataSourceA = new RestLiTestAttachmentDataSource("A", ByteString.copyString("partA", Charset.defaultCharset())); final RestLiTestAttachmentDataSource dataSourceB = new RestLiTestAttachmentDataSource("B", ByteString.copyString("partB", Charset.defaultCharset())); final RestLiTestAttachmentDataSource dataSourceC = new RestLiTestAttachmentDataSource("C", ByteString.copyString("partC", Charset.defaultCharset())); final RestLiResponseAttachments.Builder multipleAttachmentsBuilder = new RestLiResponseAttachments.Builder(); multipleAttachmentsBuilder.appendSingleAttachment(dataSourceA); final RestLiTestAttachmentDataSourceIterator dataSourceIterator = new RestLiTestAttachmentDataSourceIterator( Arrays.asList(dataSourceB, dataSourceC), new IllegalArgumentException()); multipleAttachmentsBuilder.appendMultipleAttachments(dataSourceIterator); RestLiResponseAttachments attachments = multipleAttachmentsBuilder.build(); Assert.assertEquals(attachments.getMultiPartMimeWriterBuilder().getCurrentSize(), 2); }
@VisibleForTesting static int checkJar(Path file) throws Exception { final URI uri = file.toUri(); int numSevereIssues = 0; try (final FileSystem fileSystem = FileSystems.newFileSystem( new URI("jar:file", uri.getHost(), uri.getPath(), uri.getFragment()), Collections.emptyMap())) { if (isTestJarAndEmpty(file, fileSystem.getPath("/"))) { return 0; } if (!noticeFileExistsAndIsValid(fileSystem.getPath("META-INF", "NOTICE"), file)) { numSevereIssues++; } if (!licenseFileExistsAndIsValid(fileSystem.getPath("META-INF", "LICENSE"), file)) { numSevereIssues++; } numSevereIssues += getNumLicenseFilesOutsideMetaInfDirectory(file, fileSystem.getPath("/")); numSevereIssues += getFilesWithIncompatibleLicenses(file, fileSystem.getPath("/")); } return numSevereIssues; }
@Test void testForbiddenLGPLongTextDetected(@TempDir Path tempDir) throws Exception { assertThat( JarFileChecker.checkJar( createJar( tempDir, Entry.fileEntry(VALID_NOTICE_CONTENTS, VALID_NOTICE_PATH), Entry.fileEntry(VALID_LICENSE_CONTENTS, VALID_LICENSE_PATH), Entry.fileEntry( "some GNU Lesser General public License text", Collections.singletonList("some_file.txt"))))) .isEqualTo(1); }
public V setValue(V value, long ttlMillis) { access(); return setValueInternal(value, ttlMillis); }
@Test public void testSetValue() { assertEquals(0, replicatedRecord.getHits()); assertEquals("value", replicatedRecord.getValueInternal()); replicatedRecord.setValue("newValue", 0); assertEquals(1, replicatedRecord.getHits()); assertEquals("newValue", replicatedRecord.getValueInternal()); }
@Operation(summary = "get challenge", tags = { SwaggerConfig.ACTIVATE_WEBSITE, SwaggerConfig.REQUEST_ACCOUNT_AND_APP, SwaggerConfig.ACTIVATE_SMS, SwaggerConfig.ACTIVATE_LETTER, SwaggerConfig.ACTIVATE_RDA, SwaggerConfig.ACTIVATE_WITH_APP, SwaggerConfig.RS_ACTIVATE_WITH_APP}, operationId = "challenge_response", parameters = {@Parameter(ref = "API-V"), @Parameter(ref = "OS-T"), @Parameter(ref = "APP-V"), @Parameter(ref = "OS-V"), @Parameter(ref = "REL-T")}) @PostMapping(value = "challenge_response", produces = "application/json") @ResponseBody public AppResponse challengeResponse(@Valid @RequestBody ChallengeResponseRequest request) throws FlowNotDefinedException, NoSuchAlgorithmException, FlowStateNotDefinedException, IOException, SharedServiceClientException { return service.processAction(ActivationFlowFactory.TYPE, Action.CONFIRM_CHALLENGE, request); }
@Test void validateIfCorrectProcessesAreCalledChallengeResponse() throws FlowNotDefinedException, NoSuchAlgorithmException, IOException, FlowStateNotDefinedException, SharedServiceClientException { ChallengeResponseRequest request = new ChallengeResponseRequest(); request.setAppPublicKey("not-null"); activationController.challengeResponse(request); verify(flowService, times(1)).processAction(anyString(), any(Action.class), any(ChallengeResponseRequest.class)); }
public static <T> Stream<T> stream(Enumeration<T> e) { return StreamSupport.stream( Spliterators.spliteratorUnknownSize( new Iterator<T>() { public T next() { return e.nextElement(); } public boolean hasNext() { return e.hasMoreElements(); } }, Spliterator.ORDERED), false); }
@Test public void test_count_stream_from_enumeration() { Enumeration<Integer> someEnumeration = Collections.enumeration(Arrays.asList(1, 2, 3)); assertThat(EnumerationUtil.stream(someEnumeration).count()).isEqualTo(3); }
private CompletionStage<RestResponse> pushStateStatus(RestRequest request) { return statusOperation(request, PUSH_STATE_STATUS); }
@Test public void testPushAllCaches() { RestClient restClientLon = clientPerSite.get(LON); RestClient restClientSfo = clientPerSite.get(SFO); RestCacheClient cache1Lon = restClientLon.cache(CACHE_1); RestCacheClient cache2Lon = restClientLon.cache(CACHE_2); RestCacheClient cache1Sfo = restClientSfo.cache(CACHE_1); RestCacheClient cache2Sfo = restClientSfo.cache(CACHE_2); // Take SFO offline for all caches assertSuccessful(restClientLon.container().takeOffline(SFO)); Json backupStatuses = jsonResponseBody(restClientLon.container().backupStatuses()); assertEquals("offline", backupStatuses.at(SFO).at("status").asString()); // Write to the caches int entries = 10; IntStream.range(0, entries).forEach(i -> { String key = String.valueOf(i); String value = "value"; assertNoContent(cache1Lon.put(key, value)); assertNoContent(cache2Lon.put(key, value)); }); // Backups should be empty assertEquals(0, getCacheSize(cache1Sfo)); assertEquals(0, getCacheSize(cache2Sfo)); // Start state push assertSuccessful(restClientLon.container().pushSiteState(SFO)); // Backups go online online immediately assertEquals(ONLINE, getBackupStatus(LON, SFO)); // State push should eventually finish eventuallyEquals("OK", () -> pushStateStatus(cache1Lon, SFO)); eventuallyEquals("OK", () -> pushStateStatus(cache2Lon, SFO)); // ... and with state assertEquals(entries, getCacheSize(cache1Sfo)); assertEquals(entries, getCacheSize(cache2Sfo)); }
@Override public void callBeforeLog() { if ( parent != null ) { parent.callBeforeLog(); } }
@Test public void testJobCallBeforeLog() { Trans trans = new Trans(); LoggingObjectInterface parent = mock( LoggingObjectInterface.class ); setInternalState( trans, "parent", parent ); trans.callBeforeLog(); verify( parent, times( 1 ) ).callBeforeLog(); }
@GET @Path("{id}/stats") @Timed @ApiOperation(value = "Get index set statistics") @ApiResponses(value = { @ApiResponse(code = 403, message = "Unauthorized"), @ApiResponse(code = 404, message = "Index set not found"), }) public IndexSetStats indexSetStatistics(@ApiParam(name = "id", required = true) @PathParam("id") String id) { checkPermission(RestPermissions.INDEXSETS_READ, id); return indexSetRegistry.get(id) .map(indexSetStatsCreator::getForIndexSet) .orElseThrow(() -> new NotFoundException("Couldn't load index set with ID <" + id + ">")); }
@Test public void indexSetStatisticsDenied() { notPermitted(); expectedException.expect(ForbiddenException.class); expectedException.expectMessage("Not authorized to access resource id <id>"); try { indexSetsResource.indexSetStatistics("id"); } finally { verifyNoMoreInteractions(indexSetRegistry); } }
public JmxCollector register() { return register(PrometheusRegistry.defaultRegistry); }
@Test(expected = IllegalStateException.class) public void testDelayedStartNotReady() throws Exception { JmxCollector jc = new JmxCollector("---\nstartDelaySeconds: 1").register(prometheusRegistry); assertNull(getSampleValue("boolean_Test_True", new String[] {}, new String[] {})); fail(); }
public void deleteDatabaseNameListenerAssisted(final String databaseName) { repository.delete(ListenerAssistedNodePath.getDatabaseNameNodePath(databaseName)); }
@Test void assertDeleteDatabaseNameListenerAssisted() { new ListenerAssistedPersistService(repository).deleteDatabaseNameListenerAssisted("foo_db"); verify(repository).delete("/listener_assisted/foo_db"); }
public boolean isLaunchIntentsActivity(Activity activity) { final Intent helperIntent = activity.getPackageManager().getLaunchIntentForPackage(activity.getPackageName()); final String activityName = activity.getComponentName().getClassName(); final String launchIntentActivityName = helperIntent.getComponent().getClassName(); return activityName.equals(launchIntentActivityName); }
@Test public void isLaunchIntentsActivity_activityIsMainLauncherActivity_returnTrue() throws Exception { Activity activity = getActivityMock(APP_MAIN_ACTIVITY_NAME); final AppLaunchHelper uut = getUUT(); boolean result = uut.isLaunchIntentsActivity(activity); assertTrue(result); }
public CronPattern(String pattern) { this.pattern = pattern; this.matchers = PatternParser.parse(pattern); }
@Test public void cronPatternTest() { CronPattern pattern; // 12:11匹配 pattern = CronPattern.of("39 11 12 * * *"); assertMatch(pattern, "12:11:39"); // 每5分钟匹配,匹配分钟为:[0,5,10,15,20,25,30,35,40,45,50,55] pattern = CronPattern.of("39 */5 * * * *"); assertMatch(pattern, "12:00:39"); assertMatch(pattern, "12:05:39"); assertMatch(pattern, "12:10:39"); assertMatch(pattern, "12:15:39"); assertMatch(pattern, "12:20:39"); assertMatch(pattern, "12:25:39"); assertMatch(pattern, "12:30:39"); assertMatch(pattern, "12:35:39"); assertMatch(pattern, "12:40:39"); assertMatch(pattern, "12:45:39"); assertMatch(pattern, "12:50:39"); assertMatch(pattern, "12:55:39"); // 2:01,3:01,4:01 pattern = CronPattern.of("39 1 2-4 * * *"); assertMatch(pattern, "02:01:39"); assertMatch(pattern, "03:01:39"); assertMatch(pattern, "04:01:39"); // 2:01,3:01,4:01 pattern = CronPattern.of("39 1 2,3,4 * * *"); assertMatch(pattern, "02:01:39"); assertMatch(pattern, "03:01:39"); assertMatch(pattern, "04:01:39"); // 08-07, 08-06 pattern = CronPattern.of("39 0 0 6,7 8 *"); assertMatch(pattern, "2016-08-07 00:00:39"); assertMatch(pattern, "2016-08-06 00:00:39"); // 别名忽略大小写 pattern = CronPattern.of("39 0 0 6,7 Aug *"); assertMatch(pattern, "2016-08-06 00:00:39"); assertMatch(pattern, "2016-08-07 00:00:39"); pattern = CronPattern.of("39 0 0 7 aug *"); assertMatch(pattern, "2016-08-07 00:00:39"); }
@Udf public String extractQuery( @UdfParameter(description = "a valid URL to extract a query from") final String input) { return UrlParser.extract(input, URI::getQuery); }
@Test public void shouldReturnNullIfNoQuery() { assertThat(extractUdf.extractQuery("https://current/ksql/docs/syntax-reference.html#scalar-functions"), nullValue()); }
public static Builder newBuilder() { return new Builder(); }
@Test public void argumentValidation_withSampleUpdateFrequency_lteqSamplePeriod() { RpcQosOptions.newBuilder() .withSamplePeriod(Duration.millis(5)) .withSamplePeriodBucketSize(Duration.millis(5)) .validateRelatedFields(); try { RpcQosOptions.newBuilder() .withSamplePeriod(Duration.millis(5)) .withSamplePeriodBucketSize(Duration.millis(6)) .validateRelatedFields(); fail("expected validation failure for samplePeriodBucketSize > samplePeriod"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("samplePeriodBucketSize <= samplePeriod")); } }
public static String buildGlueExpression(Map<Column, Domain> partitionPredicates) { List<String> perColumnExpressions = new ArrayList<>(); int expressionLength = 0; for (Map.Entry<Column, Domain> partitionPredicate : partitionPredicates.entrySet()) { String columnName = partitionPredicate.getKey().getName(); if (JSQL_PARSER_RESERVED_KEYWORDS.contains(columnName.toUpperCase(ENGLISH))) { // The column name is a reserved keyword in the grammar of the SQL parser used internally by Glue API continue; } Domain domain = partitionPredicate.getValue(); if (domain != null && !domain.isAll()) { Optional<String> columnExpression = buildGlueExpressionForSingleDomain(columnName, domain); if (columnExpression.isPresent()) { int newExpressionLength = expressionLength + columnExpression.get().length(); if (expressionLength > 0) { newExpressionLength += CONJUNCT_SEPARATOR.length(); } if (newExpressionLength > GLUE_EXPRESSION_CHAR_LIMIT) { continue; } perColumnExpressions.add((columnExpression.get())); expressionLength = newExpressionLength; } } } return Joiner.on(CONJUNCT_SEPARATOR).join(perColumnExpressions); }
@Test public void testBuildGlueExpressionTupleDomainEqualAndRangeLong() { Map<Column, Domain> predicates = new PartitionFilterBuilder(HIVE_TYPE_TRANSLATOR) .addBigintValues("col1", 3L) .addRanges("col1", Range.greaterThan(BIGINT, 100L)) .addRanges("col1", Range.lessThan(BIGINT, 0L)) .build(); String expression = buildGlueExpression(predicates); assertEquals(expression, "((col1 < 0) OR (col1 > 100) OR (col1 = 3))"); }
public static int checkNotNegative(int value, String paramName) { if (value < 0) { throw new IllegalArgumentException(paramName + " is " + value + " but must be >= 0"); } return value; }
@Test public void test_checkPositive_whenPositive() { checkNotNegative(1, "foo"); }
public String decode(byte[] val) { return codecs[0].decode(val, 0, val.length); }
@Test public void testDecodeChinesePersonNameUTF8() { assertEquals(CHINESE_PERSON_NAME_UTF8, utf8().decode(CHINESE_PERSON_NAME_UTF8_BYTES)); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { return this.list(directory, listener, new HostPreferences(session.getHost()).getInteger("box.listing.chunksize")); }
@Test(expected = NotfoundException.class) public void testNotFound() throws Exception { final BoxFileidProvider fileid = new BoxFileidProvider(session); new BoxListService(session, fileid).list(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(directory)), new DisabledListProgressListener()); }
public abstract int release(byte[] array);
@Test public void testAllocateRecycle() throws Exception { final int countThreshold = 4; final int countLimit = 8; final long countResetTimePeriodMs = 200L; final ByteArrayManager.Impl bam = new ByteArrayManager.Impl( new ByteArrayManager.Conf( countThreshold, countLimit, countResetTimePeriodMs)); final CounterMap counters = bam.getCounters(); final ManagerMap managers = bam.getManagers(); final int[] uncommonArrays = {0, 1, 2, 4, 8, 16, 32, 64}; final int arrayLength = 1024; final Allocator allocator = new Allocator(bam); final Recycler recycler = new Recycler(bam); try { { // allocate within threshold for(int i = 0; i < countThreshold; i++) { allocator.submit(arrayLength); } waitForAll(allocator.futures); Assert.assertEquals(countThreshold, counters.get(arrayLength, false).getCount()); Assert.assertNull(managers.get(arrayLength, false)); for(int n : uncommonArrays) { Assert.assertNull(counters.get(n, false)); Assert.assertNull(managers.get(n, false)); } } { // recycle half of the arrays for(int i = 0; i < countThreshold/2; i++) { recycler.submit(removeLast(allocator.futures).get()); } for(Future<Integer> f : recycler.furtures) { Assert.assertEquals(-1, f.get().intValue()); } recycler.furtures.clear(); } { // allocate one more allocator.submit(arrayLength).get(); Assert.assertEquals(countThreshold + 1, counters.get(arrayLength, false).getCount()); Assert.assertNotNull(managers.get(arrayLength, false)); } { // recycle the remaining arrays final int n = allocator.recycleAll(recycler); recycler.verify(n); } { // allocate until the maximum. for(int i = 0; i < countLimit; i++) { allocator.submit(arrayLength); } waitForAll(allocator.futures); // allocate one more should be blocked final AllocatorThread t = new AllocatorThread(arrayLength, bam); t.start(); // check if the thread is waiting, timed wait or runnable. for(int i = 0; i < 5; i++) { Thread.sleep(100); final Thread.State threadState = t.getState(); if (threadState != Thread.State.RUNNABLE && threadState != Thread.State.WAITING && threadState != Thread.State.TIMED_WAITING) { Assert.fail("threadState = " + threadState); } } // recycle an array recycler.submit(removeLast(allocator.futures).get()); Assert.assertEquals(1, removeLast(recycler.furtures).get().intValue()); // check if the thread is unblocked Thread.sleep(100); Assert.assertEquals(Thread.State.TERMINATED, t.getState()); // recycle the remaining, the recycle should be full. Assert.assertEquals(countLimit-1, allocator.recycleAll(recycler)); recycler.submit(t.array); recycler.verify(countLimit); // recycle one more; it should not increase the free queue size Assert.assertEquals(countLimit, bam.release(new byte[arrayLength])); } } finally { allocator.pool.shutdown(); recycler.pool.shutdown(); } }
public String build() { return build(null, Maps.<String, Object>newHashMap()); }
@Test public void testScheme() { UriSpec spec = new UriSpec("{scheme}://foo.com"); ServiceInstanceBuilder<Void> builder = new ServiceInstanceBuilder<Void>(); builder.id("x"); builder.name("foo"); builder.port(5); ServiceInstance<Void> instance = builder.build(); assertEquals(spec.build(instance), "http://foo.com"); builder.sslPort(5); instance = builder.build(); assertEquals(spec.build(instance), "https://foo.com"); }
public ImmutableList<T> append(T element) { ArgumentUtil.notNull(element, "element"); return new ImmutableList<>(element, this); }
@Test public void testAppend() { ImmutableList<String> empty = ImmutableList.empty(); ImmutableList<String> a = empty.append("a"); ImmutableList<String> ab = a.append("b"); ImmutableList<String> abc = ab.append("c"); assertEquals(a.size(), 1); assertEquals(ab.size(), 2); assertEquals(abc.size(), 3); assertEquals(a.toArray(), new Object[] { "a" }); assertEquals(ab.toArray(), new Object[] { "a", "b" }); assertEquals(abc.toArray(), new Object[] { "a", "b", "c" }); }
@Override public InputStream open(String path) throws IOException { return new URL(path).openStream(); }
@Test void readsFileContents() throws Exception { try (InputStream input = provider.open(getClass().getResource("/example.txt").toString())) { assertThat(new String(input.readAllBytes(), StandardCharsets.UTF_8).trim()) .isEqualTo("whee"); } }
SuspensionLimit getConcurrentSuspensionLimit(ClusterApi clusterApi) { // Possible service clusters on a node as of 2024-06-09: // // CLUSTER ID SERVICE TYPE HEALTH ASSOCIATION // 1 CCN-controllers container-clustercontrollers Slobrok 1, 3, or 6 in content cluster // 2 CCN distributor Slobrok content cluster // 3 CCN storagenode Slobrok content cluster // 4 CCN searchnode Slobrok content cluster // 5 JCCN container Slobrok jdisc container cluster // 6 admin slobrok not checked 1-3 in jdisc container cluster // 7 metrics metricsproxy-container Slobrok application // 8 admin logd not checked application // 9 admin config-sentinel not checked application // 10 admin configproxy not checked application // 11 admin logforwarder not checked application // 12 controller controller state/v1 controllers // 13 zone-config-servers configserver state/v1 config servers // 14 controller-host hostadmin state/v1 controller hosts // 15 configserver-host hostadmin state/v1 config server hosts // 16 tenant-host hostadmin state/v1 tenant hosts // 17 proxy-host hostadmin state/v1 proxy hosts // // CCN refers to the content cluster's name, as specified in services.xml. // JCCN refers to the jdisc container cluster's name, as specified in services.xml. // // For instance a content node will have 2-4 and 7-11 and possibly 1, while a combined // cluster node may have all 1-11. // // The services on a node can be categorized into these main types, ref association column above: // A content // B container // C tenant host // D config server // E config server host // F controller // G controller host // H proxy (same as B) // I proxy host Optional<SuspensionLimit> override = clusterApi.clusterPolicyOverride().getSuspensionLimit(); if (override.isPresent()) { return override.get(); } if (clusterApi.serviceType().equals(ServiceType.CLUSTER_CONTROLLER)) { return SuspensionLimit.fromAllowedDown(1); } if (Set.of(ServiceType.STORAGE, ServiceType.SEARCH, ServiceType.DISTRIBUTOR) .contains(clusterApi.serviceType())) { // Delegate to the cluster controller return SuspensionLimit.fromAllowedDownRatio(1); } if (clusterApi.serviceType().equals(ServiceType.CONTAINER)) { return SuspensionLimit.fromAllowedDownRatio(0.1); } if (VespaModelUtil.ADMIN_CLUSTER_ID.equals(clusterApi.clusterId())) { if (ServiceType.SLOBROK.equals(clusterApi.serviceType())) { return SuspensionLimit.fromAllowedDown(1); } return SuspensionLimit.fromAllowedDownRatio(1); } else if (ServiceType.METRICS_PROXY.equals(clusterApi.serviceType())) { return SuspensionLimit.fromAllowedDownRatio(1); } if (Set.of(ServiceType.CONFIG_SERVER, ServiceType.CONTROLLER).contains(clusterApi.serviceType())) { return SuspensionLimit.fromAllowedDown(1); } if (clusterApi.serviceType().equals(ServiceType.HOST_ADMIN)) { if (Set.of(ClusterId.CONFIG_SERVER_HOST, ClusterId.CONTROLLER_HOST).contains(clusterApi.clusterId())) { return SuspensionLimit.fromAllowedDown(1); } return zone.system().isCd() ? SuspensionLimit.fromAllowedDownRatio(0.5) : SuspensionLimit.fromAllowedDownRatio(0.2); } // The above should cover all cases, but if not we'll return a reasonable default: return SuspensionLimit.fromAllowedDownRatio(0.1); }
@Test public void testSlobrokSuspensionLimit() { when(clusterApi.clusterId()).thenReturn(VespaModelUtil.ADMIN_CLUSTER_ID); when(clusterApi.serviceType()).thenReturn(ServiceType.SLOBROK); assertEquals(SuspensionLimit.fromAllowedDown(1), policy.getConcurrentSuspensionLimit(clusterApi)); }
public static <T> CommonPager<T> result(final PageParameter pageParameter, final Supplier<Integer> countSupplier, final Supplier<List<T>> listSupplier) { Integer count = countSupplier.get(); if (Objects.nonNull(count) && count > 0) { return new CommonPager<>(new PageParameter(pageParameter.getCurrentPage(), pageParameter.getPageSize(), count), listSupplier.get()); } return new CommonPager<>(new PageParameter(pageParameter.getCurrentPage(), pageParameter.getPageSize(), 0), Collections.emptyList()); }
@Test public void testEmptyResult() { final PageParameter pageParameter = new PageParameter(1, 10, 0); final CommonPager<String> result = PageResultUtils.result(pageParameter, () -> 0, ArrayList::new); assertEquals(result.getDataList().size(), 0); }
public static File applyBaseDirIfRelative(File baseDir, File actualFileToUse) { if (actualFileToUse == null) { return baseDir; } if (actualFileToUse.isAbsolute()) { return actualFileToUse; } if (StringUtils.isBlank(baseDir.getPath())) { return actualFileToUse; } return new File(baseDir, actualFileToUse.getPath()); }
@Test void shouldUseSpecifiedFolderIfBaseDirIsEmpty() { assertThat(FileUtil.applyBaseDirIfRelative(new File(""), new File("zx"))).isEqualTo(new File("zx")); }
@Override public double[] smooth(double[] input) { if (input.length < weights.length) { return input; } double[] smoothed = new double[input.length]; int halfWindowFloored = weights.length / 2; // we want to exclude the center point fillSmoothedLeftSide(smoothed, input, halfWindowFloored); fillSmoothedRightSide(smoothed, input, halfWindowFloored); for (int i = halfWindowFloored; i < input.length - halfWindowFloored; i++) { for (int windowIndex = 0; windowIndex < smoothCoeff.length; windowIndex++) { smoothed[i] += smoothCoeff[windowIndex] * input[i + windowIndex + offsetFromWindowCenter]; } } return smoothed; }
@Test public void Smooth_FromFakeTrackWithOutlier_RemoveBumps() { SavitzkyGolayFilter test = new SavitzkyGolayFilter(1.0); double[] input = new double[]{ 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 13.0, // <-- outlier 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0 }; double[] actual = test.smooth(input); assertThat(actual.length, equalTo(input.length)); for (int i = 0; i < input.length; i++) { assertThat(actual[i], closeTo(10.0, 1.0)); } }
public static String pickBestEncoding(String acceptHeader, Set<String> customMimeTypesSupported) { return pickBestEncoding(acceptHeader, null, customMimeTypesSupported); }
@Test(dataProvider = "successfulMatch") public void testPickBestEncodingWithValidMimeTypes(String header, String result) { Assert.assertEquals(RestUtils.pickBestEncoding(header, Collections.emptySet()), result); }
@Override public ExecuteContext onThrow(ExecuteContext context) { ThreadLocalUtils.removeRequestTag(); ThreadLocalUtils.removeRequestData(); return context; }
@Test public void testOnThrow() { ThreadLocalUtils.setRequestData(new RequestData(Collections.emptyMap(), "", "")); interceptor.onThrow(context); Assert.assertNull(ThreadLocalUtils.getRequestData()); }
public static MongoSinkConfig load(String yamlFile) throws IOException { final ObjectMapper mapper = new ObjectMapper(new YAMLFactory()); final MongoSinkConfig cfg = mapper.readValue(new File(yamlFile), MongoSinkConfig.class); return cfg; }
@Test public void testLoadMapConfigUrlFromSecret() throws IOException { final Map<String, Object> commonConfigMap = TestHelper.createCommonConfigMap(); commonConfigMap.put("batchSize", TestHelper.BATCH_SIZE); commonConfigMap.put("batchTimeMs", TestHelper.BATCH_TIME); commonConfigMap.remove("mongoUri"); SinkContext sinkContext = Mockito.mock(SinkContext.class); Mockito.when(sinkContext.getSecret("mongoUri")) .thenReturn(TestHelper.URI); final MongoSinkConfig cfg = MongoSinkConfig.load(commonConfigMap, sinkContext); assertEquals(cfg.getMongoUri(), TestHelper.URI); assertEquals(cfg.getDatabase(), TestHelper.DB); assertEquals(cfg.getCollection(), TestHelper.COLL); assertEquals(cfg.getBatchSize(), TestHelper.BATCH_SIZE); assertEquals(cfg.getBatchTimeMs(), TestHelper.BATCH_TIME); }
public DefaultIssue setLine(@Nullable Integer l) { Preconditions.checkArgument(l == null || l > 0, "Line must be null or greater than zero (got %s)", l); this.line = l; return this; }
@Test void setLine_whenLineIsZero_shouldThrowException() { assertThatThrownBy(() -> issue.setLine(0)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Line must be null or greater than zero (got 0)"); }
public T send() throws IOException { return web3jService.send(this, responseType); }
@Test public void testEthSign() throws Exception { web3j.ethSign( "0x8a3106a3e50576d4b6794a0e74d3bb5f8c9acaab", "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") .send(); verifyResult( "{\"jsonrpc\":\"2.0\",\"method\":\"eth_sign\"," + "\"params\":[\"0x8a3106a3e50576d4b6794a0e74d3bb5f8c9acaab\"," + "\"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470\"]," + "\"id\":1}"); }
@GetMapping(value = "/simple/nodes") @Secured(resource = Commons.NACOS_CORE_CONTEXT + "/cluster", action = ActionTypes.READ, signType = SignType.CONSOLE) public RestResult<Collection<String>> listSimpleNodes() { return RestResultUtils.success(memberManager.getMemberAddressInfos()); }
@Test void testListSimpleNodes() { Mockito.when(serverMemberManager.getMemberAddressInfos()).thenReturn(Collections.singleton("1.1.1.1")); RestResult<Collection<String>> result = nacosClusterController.listSimpleNodes(); assertEquals(1, result.getData().size()); }
@Override public void showPreviewForKey( Keyboard.Key key, Drawable icon, View parentView, PreviewPopupTheme previewPopupTheme) { KeyPreview popup = getPopupForKey(key, parentView, previewPopupTheme); Point previewPosition = mPositionCalculator.calculatePositionForPreview( key, previewPopupTheme, getLocationInWindow(parentView)); popup.showPreviewForKey(key, icon, previewPosition); }
@Test public void testSetupPopupLayoutForKeyDrawable() { final Drawable drawable = getApplicationContext().getDrawable(R.drawable.ic_accept); KeyPreviewsManager underTest = new KeyPreviewsManager(getApplicationContext(), mPositionCalculator, 3); underTest.showPreviewForKey(mTestKeys[0], drawable, mKeyboardView, mTheme); final PopupWindow window = getLatestCreatedPopupWindow(); final TextView textView = window.getContentView().findViewById(R.id.key_preview_text); Assert.assertEquals(View.GONE, textView.getVisibility()); final ImageView imageView = window.getContentView().findViewById(R.id.key_preview_icon); Assert.assertEquals(View.VISIBLE, imageView.getVisibility()); Assert.assertSame(drawable, imageView.getDrawable()); }
void readEntries(ReadHandle lh, long firstEntry, long lastEntry, boolean shouldCacheEntry, final AsyncCallbacks.ReadEntriesCallback callback, Object ctx) { final PendingReadKey key = new PendingReadKey(firstEntry, lastEntry); Map<PendingReadKey, PendingRead> pendingReadsForLedger = cachedPendingReads.computeIfAbsent(lh.getId(), (l) -> new ConcurrentHashMap<>()); boolean listenerAdded = false; while (!listenerAdded) { AtomicBoolean createdByThisThread = new AtomicBoolean(); FindPendingReadOutcome findBestCandidateOutcome = findPendingRead(key, pendingReadsForLedger, createdByThisThread); PendingRead pendingRead = findBestCandidateOutcome.pendingRead; if (findBestCandidateOutcome.needsAdditionalReads()) { AsyncCallbacks.ReadEntriesCallback wrappedCallback = new AsyncCallbacks.ReadEntriesCallback() { @Override public void readEntriesComplete(List<Entry> entries, Object ctx) { PendingReadKey missingOnLeft = findBestCandidateOutcome.missingOnLeft; PendingReadKey missingOnRight = findBestCandidateOutcome.missingOnRight; if (missingOnRight != null && missingOnLeft != null) { AsyncCallbacks.ReadEntriesCallback readFromLeftCallback = new AsyncCallbacks.ReadEntriesCallback() { @Override public void readEntriesComplete(List<Entry> entriesFromLeft, Object dummyCtx1) { AsyncCallbacks.ReadEntriesCallback readFromRightCallback = new AsyncCallbacks.ReadEntriesCallback() { @Override public void readEntriesComplete(List<Entry> entriesFromRight, Object dummyCtx2) { List<Entry> finalResult = new ArrayList<>(entriesFromLeft.size() + entries.size() + entriesFromRight.size()); finalResult.addAll(entriesFromLeft); finalResult.addAll(entries); finalResult.addAll(entriesFromRight); callback.readEntriesComplete(finalResult, ctx); } @Override public void readEntriesFailed(ManagedLedgerException exception, Object dummyCtx3) { entries.forEach(Entry::release); entriesFromLeft.forEach(Entry::release); callback.readEntriesFailed(exception, ctx); } }; rangeEntryCache.asyncReadEntry0(lh, missingOnRight.startEntry, missingOnRight.endEntry, shouldCacheEntry, readFromRightCallback, null); } @Override public void readEntriesFailed(ManagedLedgerException exception, Object dummyCtx4) { entries.forEach(Entry::release); callback.readEntriesFailed(exception, ctx); } }; rangeEntryCache.asyncReadEntry0(lh, missingOnLeft.startEntry, missingOnLeft.endEntry, shouldCacheEntry, readFromLeftCallback, null); } else if (missingOnLeft != null) { AsyncCallbacks.ReadEntriesCallback readFromLeftCallback = new AsyncCallbacks.ReadEntriesCallback() { @Override public void readEntriesComplete(List<Entry> entriesFromLeft, Object dummyCtx5) { List<Entry> finalResult = new ArrayList<>(entriesFromLeft.size() + entries.size()); finalResult.addAll(entriesFromLeft); finalResult.addAll(entries); callback.readEntriesComplete(finalResult, ctx); } @Override public void readEntriesFailed(ManagedLedgerException exception, Object dummyCtx6) { entries.forEach(Entry::release); callback.readEntriesFailed(exception, ctx); } }; rangeEntryCache.asyncReadEntry0(lh, missingOnLeft.startEntry, missingOnLeft.endEntry, shouldCacheEntry, readFromLeftCallback, null); } else if (missingOnRight != null) { AsyncCallbacks.ReadEntriesCallback readFromRightCallback = new AsyncCallbacks.ReadEntriesCallback() { @Override public void readEntriesComplete(List<Entry> entriesFromRight, Object dummyCtx7) { List<Entry> finalResult = new ArrayList<>(entriesFromRight.size() + entries.size()); finalResult.addAll(entries); finalResult.addAll(entriesFromRight); callback.readEntriesComplete(finalResult, ctx); } @Override public void readEntriesFailed(ManagedLedgerException exception, Object dummyCtx8) { entries.forEach(Entry::release); callback.readEntriesFailed(exception, ctx); } }; rangeEntryCache.asyncReadEntry0(lh, missingOnRight.startEntry, missingOnRight.endEntry, shouldCacheEntry, readFromRightCallback, null); } } @Override public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { callback.readEntriesFailed(exception, ctx); } }; listenerAdded = pendingRead.addListener(wrappedCallback, ctx, key.startEntry, key.endEntry); } else { listenerAdded = pendingRead.addListener(callback, ctx, key.startEntry, key.endEntry); } if (createdByThisThread.get()) { CompletableFuture<List<EntryImpl>> readResult = rangeEntryCache.readFromStorage(lh, firstEntry, lastEntry, shouldCacheEntry); pendingRead.attach(readResult); } } }
@Test public void simpleConcurrentReadMissingLeft() throws Exception { long firstEntry = 100; long endEntry = 199; long firstEntrySecondRead = firstEntry - 10; long endEntrySecondRead = endEntry; boolean shouldCacheEntry = false; PreparedReadFromStorage read1 = prepareReadFromStorage(lh, rangeEntryCache, firstEntry, endEntry, shouldCacheEntry); PreparedReadFromStorage readForLeft = prepareReadFromStorage(lh, rangeEntryCache, firstEntrySecondRead, firstEntry - 1, shouldCacheEntry); PendingReadsManager pendingReadsManager = new PendingReadsManager(rangeEntryCache); CapturingReadEntriesCallback callback = new CapturingReadEntriesCallback(); pendingReadsManager.readEntries(lh, firstEntry, endEntry, shouldCacheEntry, callback, CTX); CapturingReadEntriesCallback callback2 = new CapturingReadEntriesCallback(); pendingReadsManager.readEntries(lh, firstEntrySecondRead, endEntrySecondRead, shouldCacheEntry, callback2, CTX2); // complete the read from BK read1.storageReadCompleted(); // the first read can move forward callback.get(); readForLeft.storageReadCompleted(); callback2.get(); assertSame(callback.getCtx(), CTX); assertSame(callback2.getCtx(), CTX2); verifyRange(callback.entries, firstEntry, endEntry); verifyRange(callback2.entries, firstEntrySecondRead, endEntrySecondRead); }
@Operation(summary = "Gets the status of ongoing database migrations, if any", description = "Return the detailed status of ongoing database migrations" + " including starting date. If no migration is ongoing or needed it is still possible to call this endpoint and receive appropriate information.") @GetMapping public DatabaseMigrationsResponse getStatus() { Optional<Long> currentVersion = databaseVersion.getVersion(); checkState(currentVersion.isPresent(), NO_CONNECTION_TO_DB); DatabaseVersion.Status status = databaseVersion.getStatus(); if (status == DatabaseVersion.Status.UP_TO_DATE || status == DatabaseVersion.Status.REQUIRES_DOWNGRADE) { return new DatabaseMigrationsResponse(databaseMigrationState); } else if (!database.getDialect().supportsMigration()) { return new DatabaseMigrationsResponse(DatabaseMigrationState.Status.STATUS_NOT_SUPPORTED); } else { return switch (databaseMigrationState.getStatus()) { case RUNNING, FAILED, SUCCEEDED -> new DatabaseMigrationsResponse(databaseMigrationState); case NONE -> new DatabaseMigrationsResponse(DatabaseMigrationState.Status.MIGRATION_REQUIRED); default -> throw new IllegalArgumentException(UNSUPPORTED_DATABASE_MIGRATION_STATUS); }; } }
@Test void getStatus_migrationNotNeeded_returnUpToDateStatus() throws Exception { when(databaseVersion.getStatus()).thenReturn(DatabaseVersion.Status.UP_TO_DATE); when(migrationState.getStatus()).thenReturn(NONE); mockMvc.perform(get(DATABASE_MIGRATIONS_ENDPOINT)).andExpectAll(status().isOk(), content().json("{\"status\":\"NO_MIGRATION\",\"message\":\"Database is up-to-date, no migration needed.\"}")); }
public static <T extends Writable> WritableCoder<T> of(Class<T> clazz) { return new WritableCoder<>(clazz); }
@Test public void testNullWritableEncoding() throws Exception { NullWritable value = NullWritable.get(); WritableCoder<NullWritable> coder = WritableCoder.of(NullWritable.class); CoderProperties.coderDecodeEncodeEqual(coder, value); }
@Override public InputStream open(String path) throws IOException { try (InputStream in = delegate.open(path)) { final String config = new String(in.readAllBytes(), StandardCharsets.UTF_8); final String substituted = substitutor.replace(config); return new ByteArrayInputStream(substituted.getBytes(StandardCharsets.UTF_8)); } }
@Test void shouldSubstituteCorrectly() throws IOException { StringLookup dummyLookup = (x) -> "baz"; DummySourceProvider dummyProvider = new DummySourceProvider(); SubstitutingSourceProvider provider = new SubstitutingSourceProvider(dummyProvider, new StringSubstitutor(dummyLookup)); assertThat(provider.open("foo: ${bar}")).hasSameContentAs(new ByteArrayInputStream("foo: baz".getBytes(StandardCharsets.UTF_8))); // ensure that opened streams are closed assertThatIOException() .isThrownBy(() -> dummyProvider.lastStream.read()) .withMessage("Stream closed"); }
@Override public void acknowledgeMessage(List<Position> positions, AckType ackType, Map<String, Long> properties) { cursor.updateLastActive(); Position previousMarkDeletePosition = cursor.getMarkDeletedPosition(); if (ackType == AckType.Cumulative) { if (positions.size() != 1) { log.warn("[{}][{}] Invalid cumulative ack received with multiple message ids.", topicName, subName); return; } Position position = positions.get(0); if (log.isDebugEnabled()) { log.debug("[{}][{}] Cumulative ack on {}", topicName, subName, position); } cursor.asyncMarkDelete(position, mergeCursorProperties(properties), markDeleteCallback, previousMarkDeletePosition); } else { if (log.isDebugEnabled()) { log.debug("[{}][{}] Individual acks on {}", topicName, subName, positions); } cursor.asyncDelete(positions, deleteCallback, previousMarkDeletePosition); if (topic.getBrokerService().getPulsar().getConfig().isTransactionCoordinatorEnabled()) { positions.forEach(position -> { if (((ManagedCursorImpl) cursor).isMessageDeleted(position)) { pendingAckHandle.clearIndividualPosition(position); } }); } if (dispatcher != null) { dispatcher.getRedeliveryTracker().removeBatch(positions); } } if (!cursor.getMarkDeletedPosition().equals(previousMarkDeletePosition)) { this.updateLastMarkDeleteAdvancedTimestamp(); // Mark delete position advance ReplicatedSubscriptionSnapshotCache snapshotCache = this.replicatedSubscriptionSnapshotCache; if (snapshotCache != null) { ReplicatedSubscriptionsSnapshot snapshot = snapshotCache .advancedMarkDeletePosition(cursor.getMarkDeletedPosition()); if (snapshot != null) { topic.getReplicatedSubscriptionController() .ifPresent(c -> c.localSubscriptionUpdated(subName, snapshot)); } } } if (topic.getManagedLedger().isTerminated() && cursor.getNumberOfEntriesInBacklog(false) == 0) { // Notify all consumer that the end of topic was reached if (dispatcher != null) { checkAndApplyReachedEndOfTopicOrTopicMigration(topic, dispatcher.getConsumers()); } } }
@Test public void testAcknowledgeUpdateCursorLastActive() throws Exception { doAnswer((invocationOnMock) -> { ((AsyncCallbacks.DeleteCallback) invocationOnMock.getArguments()[1]) .deleteComplete(invocationOnMock.getArguments()[2]); return null; }).when(cursorMock).asyncDelete(any(List.class), any(AsyncCallbacks.DeleteCallback.class), any()); doCallRealMethod().when(cursorMock).updateLastActive(); doCallRealMethod().when(cursorMock).getLastActive(); List<Position> positionList = new ArrayList<>(); positionList.add(PositionFactory.create(1, 1)); long beforeAcknowledgeTimestamp = System.currentTimeMillis(); Thread.sleep(1); persistentSubscription.acknowledgeMessage(positionList, AckType.Individual, Collections.emptyMap()); // `acknowledgeMessage` should update cursor last active assertTrue(persistentSubscription.cursor.getLastActive() > beforeAcknowledgeTimestamp); }
@Override public WebhookPayload create(ProjectAnalysis analysis) { Writer string = new StringWriter(); try (JsonWriter writer = JsonWriter.of(string)) { writer.beginObject(); writeServer(writer); writeTask(writer, analysis.getCeTask()); writeAnalysis(writer, analysis, system2); writeProject(analysis, writer, analysis.getProject()); analysis.getBranch().ifPresent(b -> writeBranch(writer, analysis.getProject(), b)); analysis.getQualityGate().ifPresent(qualityGate -> writeQualityGate(writer, qualityGate)); writeAnalysisProperties(writer, analysis.getProperties()); writer.endObject().close(); return new WebhookPayload(analysis.getProject().getKey(), string.toString()); } }
@Test public void create_payload_for_no_analysis_date() { CeTask ceTask = new CeTask("#1", CeTask.Status.FAILED); ProjectAnalysis analysis = newAnalysis(ceTask, null, null, null, emptyMap()); WebhookPayload payload = underTest.create(analysis); assertThat(payload.getProjectKey()).isEqualTo(PROJECT_KEY); assertJson(payload.getJson()) .isSimilarTo("{" + " \"serverUrl\": \"http://foo\"," + " \"taskId\": \"#1\"," + " \"status\": \"FAILED\"," + " \"changedAt\": \"1970-01-01T01:25:00+0100\"," + " \"project\": {" + " \"key\": \"P1\"," + " \"name\": \"Project One\"" + " }," + " \"properties\": {" + " }" + "}"); }
public static byte[] encode(Predicate predicate) { Objects.requireNonNull(predicate, "predicate"); Slime slime = new Slime(); encode(predicate, slime.setObject()); return com.yahoo.slime.BinaryFormat.encode(slime); }
@Test void requireThatUnknownNodeThrows() { try { BinaryFormat.encode(SimplePredicates.newString("foo")); fail(); } catch (UnsupportedOperationException e) { } }
@Override public void resolveDiscardMsg(MessageExt msgExt) { log.error("MsgExt:{} has been checked too many times, so discard it by moving it to system topic TRANS_CHECK_MAXTIME_TOPIC", msgExt); try { MessageExtBrokerInner brokerInner = toMessageExtBrokerInner(msgExt); PutMessageResult putMessageResult = this.getBrokerController().getMessageStore().putMessage(brokerInner); if (putMessageResult != null && putMessageResult.getPutMessageStatus() == PutMessageStatus.PUT_OK) { log.info("Put checked-too-many-time half message to TRANS_CHECK_MAXTIME_TOPIC OK. Restored in queueOffset={}, " + "commitLogOffset={}, real topic={}", msgExt.getQueueOffset(), msgExt.getCommitLogOffset(), msgExt.getUserProperty(MessageConst.PROPERTY_REAL_TOPIC)); // discarded, then the num of half-messages minus 1 this.getBrokerController().getTransactionalMessageService().getTransactionMetrics().addAndGet(msgExt.getUserProperty(MessageConst.PROPERTY_REAL_TOPIC), -1); } else { log.error("Put checked-too-many-time half message to TRANS_CHECK_MAXTIME_TOPIC failed, real topic={}, msgId={}", msgExt.getTopic(), msgExt.getMsgId()); } } catch (Exception e) { log.warn("Put checked-too-many-time message to TRANS_CHECK_MAXTIME_TOPIC error. {}", e); } }
@Test public void sendCheckMessage() { listener.resolveDiscardMsg(createMessageExt()); }
public List<Long> availableWindows() { return getWindowList(_oldestWindowIndex, _currentWindowIndex - 1); }
@Test public void testAvailableWindows() { MetricSampleAggregator<String, IntegerEntity> aggregator = new MetricSampleAggregator<>(NUM_WINDOWS, WINDOW_MS, MIN_SAMPLES_PER_WINDOW, 0, _metricDef); assertTrue(aggregator.availableWindows().isEmpty()); CruiseControlUnitTestUtils.populateSampleAggregator(1, MIN_SAMPLES_PER_WINDOW, aggregator, ENTITY1, 0, WINDOW_MS, _metricDef); assertTrue(aggregator.availableWindows().isEmpty()); CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS - 2, MIN_SAMPLES_PER_WINDOW, aggregator, ENTITY1, 1, WINDOW_MS, _metricDef); List<Long> availableWindows = aggregator.availableWindows(); assertEquals(NUM_WINDOWS - 2, availableWindows.size()); for (int i = 0; i < NUM_WINDOWS - 2; i++) { assertEquals((i + 1) * WINDOW_MS, availableWindows.get(i).longValue()); } }
@Override public NSImage fileIcon(final Local file, final Integer size) { NSImage icon = null; if(file.exists()) { icon = this.load(file.getAbsolute(), size); if(null == icon) { return this.cache(file.getName(), this.convert(file.getName(), workspace.iconForFile(file.getAbsolute()), size), size); } } if(null == icon) { return this.iconNamed("notfound.tiff", size); } return icon; }
@Test public void testIconForPathFolder() throws Exception { final Path f = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)); final NSImageIconCache cache = new NSImageIconCache(); NSImage icon = cache.fileIcon(f, 16); assertNotNull(icon); assertTrue(icon.isValid()); assertFalse(icon.isTemplate()); }
@Override public InputStream getInputStream(final int columnIndex, final String type) throws SQLException { throw new SQLFeatureNotSupportedException("getInputStream"); }
@Test void assertGetInputStream() { LocalDataMergedResult actual = new LocalDataMergedResult(Collections.singletonList(new LocalDataQueryResultRow("value"))); assertThrows(SQLFeatureNotSupportedException.class, () -> actual.getInputStream(1, "Ascii")); }
public static void checkTypeMatch(SelTypes lhs, SelTypes rhs) { if (lhs != rhs) { throw new IllegalArgumentException( "Type mismatch, lhs type: " + lhs + ", rhs object type: " + rhs); } }
@Test public void testTypeMatch() { SelTypeUtil.checkTypeMatch(SelTypes.NULL, SelTypes.NULL); }
public static String buildUrl(boolean isHttps, String serverAddr, String... subPaths) { StringBuilder sb = new StringBuilder(); if (isHttps) { sb.append(HTTPS_PREFIX); } else { sb.append(HTTP_PREFIX); } sb.append(serverAddr); String pre = null; for (String subPath : subPaths) { if (StringUtils.isBlank(subPath)) { continue; } Matcher matcher = CONTEXT_PATH_MATCH.matcher(subPath); if (matcher.find()) { throw new IllegalArgumentException("Illegal url path expression : " + subPath); } if (pre == null || !pre.endsWith("/")) { if (subPath.startsWith("/")) { sb.append(subPath); } else { sb.append('/').append(subPath); } } else { if (subPath.startsWith("/")) { sb.append(subPath.replaceFirst("\\/", "")); } else { sb.append(subPath); } } pre = subPath; } return sb.toString(); }
@Test void testBuildHttpUrl2() { assertThrows(IllegalArgumentException.class, () -> { String targetUrl = HttpUtils.buildUrl(false, "127.0.0.1:8080", "//v1/api/test"); assertNotEquals(exceptUrl, targetUrl); }); }
@Override public Map<String, Metric> getMetrics() { final Map<String, Metric> gauges = new HashMap<>(); gauges.put("total.init", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getInit() + mxBean.getNonHeapMemoryUsage().getInit()); gauges.put("total.used", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getUsed() + mxBean.getNonHeapMemoryUsage().getUsed()); gauges.put("total.max", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getMax() == -1 ? -1 : mxBean.getHeapMemoryUsage().getMax() + mxBean.getNonHeapMemoryUsage().getMax()); gauges.put("total.committed", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getCommitted() + mxBean.getNonHeapMemoryUsage().getCommitted()); gauges.put("heap.init", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getInit()); gauges.put("heap.used", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getUsed()); gauges.put("heap.max", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getMax()); gauges.put("heap.committed", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getCommitted()); gauges.put("heap.usage", new RatioGauge() { @Override protected Ratio getRatio() { final MemoryUsage usage = mxBean.getHeapMemoryUsage(); return Ratio.of(usage.getUsed(), usage.getMax()); } }); gauges.put("non-heap.init", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getInit()); gauges.put("non-heap.used", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getUsed()); gauges.put("non-heap.max", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getMax()); gauges.put("non-heap.committed", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getCommitted()); gauges.put("non-heap.usage", new RatioGauge() { @Override protected Ratio getRatio() { final MemoryUsage usage = mxBean.getNonHeapMemoryUsage(); return Ratio.of(usage.getUsed(), usage.getMax() == -1 ? usage.getCommitted() : usage.getMax()); } }); for (final MemoryPoolMXBean pool : memoryPools) { final String poolName = name("pools", WHITESPACE.matcher(pool.getName()).replaceAll("-")); gauges.put(name(poolName, "usage"), new RatioGauge() { @Override protected Ratio getRatio() { MemoryUsage usage = pool.getUsage(); return Ratio.of(usage.getUsed(), usage.getMax() == -1 ? usage.getCommitted() : usage.getMax()); } }); gauges.put(name(poolName, "max"), (Gauge<Long>) () -> pool.getUsage().getMax()); gauges.put(name(poolName, "used"), (Gauge<Long>) () -> pool.getUsage().getUsed()); gauges.put(name(poolName, "committed"), (Gauge<Long>) () -> pool.getUsage().getCommitted()); // Only register GC usage metrics if the memory pool supports usage statistics. if (pool.getCollectionUsage() != null) { gauges.put(name(poolName, "used-after-gc"), (Gauge<Long>) () -> pool.getCollectionUsage().getUsed()); } gauges.put(name(poolName, "init"), (Gauge<Long>) () -> pool.getUsage().getInit()); } return Collections.unmodifiableMap(gauges); }
@Test public void hasAGaugeForTotalCommitted() { final Gauge gauge = (Gauge) gauges.getMetrics().get("total.committed"); assertThat(gauge.getValue()) .isEqualTo(11L); }
@Override public ParDoFn create( PipelineOptions options, CloudObject cloudUserFn, @Nullable List<SideInputInfo> sideInputInfos, TupleTag<?> mainOutputTag, Map<TupleTag<?>, Integer> outputTupleTagsToReceiverIndices, DataflowExecutionContext<?> executionContext, DataflowOperationContext operationContext) throws Exception { DoFnInstanceManager instanceManager = fnCache.get( operationContext.nameContext().systemName(), () -> DoFnInstanceManagers.cloningPool(doFnExtractor.getDoFnInfo(cloudUserFn), options)); DoFnInfo<?, ?> doFnInfo = instanceManager.peek(); DataflowExecutionContext.DataflowStepContext stepContext = executionContext.getStepContext(operationContext); Iterable<PCollectionView<?>> sideInputViews = doFnInfo.getSideInputViews(); SideInputReader sideInputReader = executionContext.getSideInputReader(sideInputInfos, sideInputViews, operationContext); if (doFnInfo.getDoFn() instanceof BatchStatefulParDoOverrides.BatchStatefulDoFn) { // HACK: BatchStatefulDoFn is a class from DataflowRunner's overrides // that just instructs the worker to execute it differently. This will // be replaced by metadata in the Runner API payload BatchStatefulParDoOverrides.BatchStatefulDoFn fn = (BatchStatefulParDoOverrides.BatchStatefulDoFn) doFnInfo.getDoFn(); DoFn underlyingFn = fn.getUnderlyingDoFn(); return new BatchModeUngroupingParDoFn( (BatchModeExecutionContext.StepContext) stepContext, new SimpleParDoFn( options, DoFnInstanceManagers.singleInstance(doFnInfo.withFn(underlyingFn)), sideInputReader, doFnInfo.getMainOutput(), outputTupleTagsToReceiverIndices, stepContext, operationContext, doFnInfo.getDoFnSchemaInformation(), doFnInfo.getSideInputMapping(), runnerFactory)); } else if (doFnInfo.getDoFn() instanceof StreamingPCollectionViewWriterFn) { // HACK: StreamingPCollectionViewWriterFn is a class from // DataflowPipelineTranslator. Using the class as an indicator is a migration path // to simply having an indicator string. checkArgument( stepContext instanceof StreamingModeExecutionContext.StreamingModeStepContext, "stepContext must be a StreamingModeStepContext to use StreamingPCollectionViewWriterFn"); DataflowRunner.StreamingPCollectionViewWriterFn<Object> writerFn = (StreamingPCollectionViewWriterFn<Object>) doFnInfo.getDoFn(); return new StreamingPCollectionViewWriterParDoFn( (StreamingModeExecutionContext.StreamingModeStepContext) stepContext, writerFn.getView().getTagInternal(), writerFn.getDataCoder(), (Coder<BoundedWindow>) doFnInfo.getWindowingStrategy().getWindowFn().windowCoder()); } else { return new SimpleParDoFn( options, instanceManager, sideInputReader, doFnInfo.getMainOutput(), outputTupleTagsToReceiverIndices, stepContext, operationContext, doFnInfo.getDoFnSchemaInformation(), doFnInfo.getSideInputMapping(), runnerFactory); } }
@Test public void testFactorySimultaneousUse() throws Exception { PipelineOptions options = PipelineOptionsFactory.create(); CounterSet counters = new CounterSet(); TestDoFn initialFn = new TestDoFn(Collections.<TupleTag<String>>emptyList()); CloudObject cloudObject = getCloudObject(initialFn); ParDoFn parDoFn = factory.create( options, cloudObject, null, MAIN_OUTPUT, ImmutableMap.<TupleTag<?>, Integer>of(MAIN_OUTPUT, 0), BatchModeExecutionContext.forTesting(options, "testStage"), TestOperationContext.create(counters)); // The fn should not be reused while the first ParDoFn is not finished ParDoFn secondParDoFn = factory.create( options, cloudObject, null, MAIN_OUTPUT, ImmutableMap.<TupleTag<?>, Integer>of(MAIN_OUTPUT, 0), BatchModeExecutionContext.forTesting(options, "testStage"), TestOperationContext.create(counters)); Receiver rcvr = new OutputReceiver(); parDoFn.startBundle(rcvr); parDoFn.processElement(WindowedValue.valueInGlobalWindow("foo")); // Must be after the first call to process element for reallyStartBundle to have been called TestDoFn firstDoFn = (TestDoFn) ((SimpleParDoFn) parDoFn).getDoFnInfo().getDoFn(); secondParDoFn.startBundle(rcvr); secondParDoFn.processElement(WindowedValue.valueInGlobalWindow("spam")); // Must be after the first call to process element for reallyStartBundle to have been called TestDoFn secondDoFn = (TestDoFn) ((SimpleParDoFn) secondParDoFn).getDoFnInfo().getDoFn(); parDoFn.finishBundle(); secondParDoFn.finishBundle(); assertThat(firstDoFn, not(theInstance(secondDoFn))); assertThat(firstDoFn.state, equalTo(TestDoFn.State.FINISHED)); assertThat(secondDoFn.state, equalTo(TestDoFn.State.FINISHED)); }
public Chooser(K uniqueKey) { this(uniqueKey, new ArrayList<>()); }
@Test void testChooser() { //Test the correctness of Chooser, the weight of the final selected instance must be greater than 0 List<Instance> hosts = getInstanceList(); Instance target = getRandomInstance(hosts); assertTrue(hosts.contains(target) && target.getWeight() > 0); }
@Override protected String buildUndoSQL() { TableRecords afterImage = sqlUndoLog.getAfterImage(); List<Row> afterImageRows = afterImage.getRows(); if (CollectionUtils.isEmpty(afterImageRows)) { throw new ShouldNeverHappenException("Invalid UNDO LOG"); } return generateDeleteSql(afterImageRows, afterImage); }
@Test public void buildUndoSQL() { String sql = executor.buildUndoSQL().toUpperCase(); Assertions.assertNotNull(sql); Assertions.assertTrue(sql.contains("DELETE")); Assertions.assertTrue(sql.contains("TABLE_NAME")); Assertions.assertTrue(sql.contains("ID")); }
public Connection getConnectionById(Long id) { Optional<Connection> con = connectionRepository.findById(id); if (con.isEmpty()) { throw new NotFoundException("Could not find connection with id: " + id); } return con.get(); }
@Test void connectionNotFound() { Optional<Connection> connectionOptional = Optional.empty(); when(connectionRepositoryMock.findById(anyLong())).thenReturn(connectionOptional); assertThrows(NotFoundException.class, () -> connectionServiceMock.getConnectionById(anyLong())); }
@Override public int getMajorJavaVersion() { JavaVersion version = JavaVersion.current(); JavaPluginExtension javaPluginExtension = project.getExtensions().findByType(JavaPluginExtension.class); if (javaPluginExtension != null) { version = javaPluginExtension.getTargetCompatibility(); } return Integer.valueOf(version.getMajorVersion()); }
@Test public void testGetMajorJavaVersion_jvm8() { Assume.assumeThat(JavaVersion.current(), CoreMatchers.is(JavaVersion.VERSION_1_8)); assertThat(gradleProjectProperties.getMajorJavaVersion()).isEqualTo(8); }
@Override public TYPE getType() { return Delta.TYPE.CHANGE; }
@Test void testGetType() { // given Chunk<String> chunk = new Chunk<>(1, EMPTY_LIST); Delta<String> delta = new ChangeDelta<>(chunk, chunk); // when Delta.TYPE type = delta.getType(); // then assertThat(type).isEqualTo(Delta.TYPE.CHANGE); }
private boolean ldapLogin(String username, String password) throws AuthenticationException { return ldapTemplate.authenticate("", "(" + filterPrefix + "=" + username + ")", password); }
@Test void testldapLogin() { try { Boolean result = (Boolean) ldapLogin.invoke(ldapAuthenticationProvider, adminUserName, defaultPassWord); assertTrue(result); } catch (IllegalAccessException e) { fail(); } catch (InvocationTargetException e) { fail(); } try { Boolean result = (Boolean) ldapLogin.invoke(ldapAuthenticationProvider, adminUserName, "123"); assertFalse(result); } catch (IllegalAccessException e) { fail(); } catch (InvocationTargetException e) { fail(); } }
@Override public MapSettings setProperty(String key, String value) { return (MapSettings) super.setProperty(key, value); }
@Test public void setStringArrayWithNullValues() { Settings settings = new MapSettings(definitions); settings.setProperty("multi_values", new String[]{"A,B", null, "C,D"}); String[] array = settings.getStringArray("multi_values"); assertThat(array).isEqualTo(new String[]{"A,B", "", "C,D"}); }
@ShellMethod(key = "compactions showarchived", value = "Shows compaction details for specified time window") public String compactionsShowArchived( @ShellOption(value = {"--includeExtraMetadata"}, help = "Include extra metadata", defaultValue = "false") final boolean includeExtraMetadata, @ShellOption(value = {"--startTs"}, defaultValue = ShellOption.NULL, help = "start time for compactions, default: now - 10 days") String startTs, @ShellOption(value = {"--endTs"}, defaultValue = ShellOption.NULL, help = "end time for compactions, default: now - 1 day") String endTs, @ShellOption(value = {"--limit"}, help = "Limit compactions", defaultValue = "-1") final Integer limit, @ShellOption(value = {"--sortBy"}, help = "Sorting Field", defaultValue = "") final String sortByField, @ShellOption(value = {"--desc"}, help = "Ordering", defaultValue = "false") final boolean descending, @ShellOption(value = {"--headeronly"}, help = "Print Header Only", defaultValue = "false") final boolean headerOnly) { if (StringUtils.isNullOrEmpty(startTs)) { startTs = getTimeDaysAgo(10); } if (StringUtils.isNullOrEmpty(endTs)) { endTs = getTimeDaysAgo(1); } HoodieTableMetaClient client = checkAndGetMetaClient(); HoodieArchivedTimeline archivedTimeline = client.getArchivedTimeline(); archivedTimeline.loadCompactionDetailsInMemory(startTs, endTs); try { return printAllCompactions(archivedTimeline, compactionPlanReader(this::readCompactionPlanForArchivedTimeline, archivedTimeline), includeExtraMetadata, sortByField, descending, limit, headerOnly); } finally { archivedTimeline.clearInstantDetailsFromMemory(startTs, endTs); } }
@Test public void testCompactionsShowArchived() throws IOException { generateCompactionInstances(); generateArchive(); Object result = shell.evaluate(() -> "compactions showarchived --startTs 001 --endTs 005"); // generate result Map<String, Integer> fileMap = new HashMap<>(); fileMap.put("001", 1); fileMap.put("003", 2); fileMap.put("005", 3); List<Comparable[]> rows = Arrays.asList("005", "003", "001").stream().map(i -> new Comparable[] {i, HoodieInstant.State.COMPLETED, fileMap.get(i)}).collect(Collectors.toList()); Map<String, Function<Object, String>> fieldNameToConverterMap = new HashMap<>(); TableHeader header = new TableHeader().addTableHeaderField("Compaction Instant Time").addTableHeaderField("State") .addTableHeaderField("Total FileIds to be Compacted"); String expected = HoodiePrintHelper.print(header, fieldNameToConverterMap, "", false, -1, false, rows); expected = removeNonWordAndStripSpace(expected); String got = removeNonWordAndStripSpace(result.toString()); assertEquals(expected, got); }
public boolean shouldRestartTask(TaskStatus status) { return includeTasks && (!onlyFailed || status.state() == AbstractStatus.State.FAILED); }
@Test public void restartOnlyFailedTasks() { RestartRequest restartRequest = new RestartRequest(CONNECTOR_NAME, true, true); assertTrue(restartRequest.shouldRestartTask(createTaskStatus(AbstractStatus.State.FAILED))); assertFalse(restartRequest.shouldRestartTask(createTaskStatus(AbstractStatus.State.RUNNING))); assertFalse(restartRequest.shouldRestartTask(createTaskStatus(AbstractStatus.State.PAUSED))); }
public static TableSchema toSchema(RowType rowType) { TableSchema.Builder builder = TableSchema.builder(); for (RowType.RowField field : rowType.getFields()) { builder.field(field.getName(), TypeConversions.fromLogicalToDataType(field.getType())); } return builder.build(); }
@Test public void testConvertFlinkSchemaWithPrimaryKeys() { Schema icebergSchema = new Schema( Lists.newArrayList( Types.NestedField.required(1, "int", Types.IntegerType.get()), Types.NestedField.required(2, "string", Types.StringType.get())), Sets.newHashSet(1, 2)); TableSchema tableSchema = FlinkSchemaUtil.toSchema(icebergSchema); assertThat(tableSchema.getPrimaryKey()) .isPresent() .get() .satisfies(k -> assertThat(k.getColumns()).containsExactly("int", "string")); }
public static void mergeMap(boolean decrypt, Map<String, Object> config) { merge(decrypt, config); }
@Test public void testMap_valueCastToBoolean() { Map<String, Object> testMap = new HashMap<>(); testMap.put("key", "${TEST.boolean: true}"); CentralizedManagement.mergeMap(true, testMap); Assert.assertTrue(testMap.get("key") instanceof Boolean); }
@Nullable static String channelKind(@Nullable Destination destination) { if (destination == null) return null; return isQueue(destination) ? "queue" : "topic"; }
@Test void channelKind_queueAndTopic_null() { assertThat(MessageParser.channelKind(null)).isNull(); }
@CheckForNull public String clientSecret() { return config.get(CONSUMER_SECRET).orElse(null); }
@Test public void return_client_secret() { settings.setProperty("sonar.auth.bitbucket.clientSecret.secured", "secret"); assertThat(underTest.clientSecret()).isEqualTo("secret"); }
@Override public double mean() { return mu; }
@Test public void testMean() { System.out.println("mean"); LogisticDistribution instance = new LogisticDistribution(2.0, 1.0); instance.rand(); assertEquals(2.0, instance.mean(), 1E-7); }
@Override public PayloadSerializer getSerializer(Schema schema, Map<String, Object> tableParams) { Class<? extends Message> protoClass = getClass(tableParams); inferAndVerifySchema(protoClass, schema); SimpleFunction<byte[], Row> toRowFn = ProtoMessageSchema.getProtoBytesToRowFn(protoClass); return PayloadSerializer.of( ProtoMessageSchema.getRowToProtoBytesFn(protoClass), bytes -> { Row rawRow = toRowFn.apply(bytes); return castRow(rawRow, rawRow.getSchema(), schema); }); }
@Test public void serialize() throws Exception { byte[] bytes = provider .getSerializer( SHUFFLED_SCHEMA, ImmutableMap.of("protoClass", PayloadMessages.TestMessage.class.getName())) .serialize(ROW); PayloadMessages.TestMessage result = PayloadMessages.TestMessage.parseFrom(bytes); assertEquals(MESSAGE, result); }
public boolean isTerminated() { if (getInternalResource().getStatus() != null) { final boolean podFailed = PodPhase.Failed.name().equals(getInternalResource().getStatus().getPhase()); final boolean containersFailed = getInternalResource().getStatus().getContainerStatuses().stream() .anyMatch( e -> e.getState() != null && e.getState().getTerminated() != null); return containersFailed || podFailed; } return false; }
@Test void testIsTerminatedShouldReturnTrueWhenPodFailed() { final Pod pod = new PodBuilder().build(); pod.setStatus( new PodStatusBuilder() .withPhase(KubernetesPod.PodPhase.Failed.name()) .withMessage("Pod Node didn't have enough resource") .withReason("OutOfMemory") .build()); assertThat(new KubernetesPod(pod).isTerminated()).isTrue(); }
@Override public Collection<SQLToken> generateSQLTokens(final SelectStatementContext sqlStatementContext) { Collection<SQLToken> result = new LinkedHashSet<>(); ShardingSphereSchema schema = sqlStatementContext.getTablesContext().getSchemaName().map(schemas::get).orElseGet(() -> defaultSchema); for (OrderByItem each : getGroupByItems(sqlStatementContext)) { if (each.getSegment() instanceof ColumnOrderByItemSegment) { ColumnSegment columnSegment = ((ColumnOrderByItemSegment) each.getSegment()).getColumn(); Map<String, String> columnTableNames = sqlStatementContext.getTablesContext().findTableNames(Collections.singleton(columnSegment), schema); generateSQLToken(columnSegment, columnTableNames).ifPresent(result::add); } } return result; }
@Test void assertGenerateSQLTokens() { assertThat(generator.generateSQLTokens(buildSelectStatementContext()).size(), is(1)); }
public RemotingDesc parserRemotingServiceInfo(Object bean, String beanName, RemotingParser remotingParser) { if (remotingServiceMap.containsKey(bean)) { return remotingServiceMap.get(bean); } RemotingDesc remotingBeanDesc = remotingParser.getServiceDesc(bean, beanName); if (remotingBeanDesc == null) { return null; } remotingServiceMap.put(bean, remotingBeanDesc); if (remotingParser.isReference(bean, beanName)) { //reference bean, TCC proxy remotingBeanDesc.setReference(true); } return remotingBeanDesc; }
@Test public void testParserRemotingServiceInfoFail() { SimpleBean simpleBean = new SimpleBean(); assertNull(remotingParser.parserRemotingServiceInfo(simpleBean, simpleBean.getClass().getName(), new SimpleRemotingParser())); }
public Optional<String> getNameByActiveVersion(final String path) { Matcher matcher = activeVersionPathPattern.matcher(path); return matcher.find() ? Optional.of(matcher.group(1)) : Optional.empty(); }
@Test void assertGetNameByActiveVersion() { Optional<String> actual = converter.getNameByActiveVersion("/metadata/foo_db/rules/foo/tables/foo_table/active_version"); assertTrue(actual.isPresent()); assertThat(actual.get(), is("foo_table")); }
public static HttpRequest toNettyRequest(RestRequest request) throws Exception { HttpMethod nettyMethod = HttpMethod.valueOf(request.getMethod()); URL url = new URL(request.getURI().toString()); String path = url.getFile(); // RFC 2616, section 5.1.2: // Note that the absolute path cannot be empty; if none is present in the original URI, // it MUST be given as "/" (the server root). if (path.isEmpty()) { path = "/"; } ByteBuf content = Unpooled.wrappedBuffer(request.getEntity().asByteBuffer()); HttpRequest nettyRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, nettyMethod, path, content); nettyRequest.headers().set(HttpConstants.CONTENT_LENGTH, request.getEntity().length()); setHttpHeadersAndCookies(request, url, nettyRequest); return nettyRequest; }
@Test public void testRestToNettyRequest() throws Exception { RestRequestBuilder restRequestBuilder = new RestRequestBuilder(new URI(ANY_URI)); restRequestBuilder.setMethod("POST"); restRequestBuilder.setEntity(ByteString.copyString(ANY_ENTITY, Charset.defaultCharset())); restRequestBuilder.setHeader("Content-Length", Integer.toString(restRequestBuilder.getEntity().length())); restRequestBuilder.setHeader("Content-Type", "application/json"); restRequestBuilder.setCookies(Collections.singletonList(ANY_COOKIE)); RestRequest restRequest = restRequestBuilder.build(); HttpRequest nettyRequest = NettyRequestAdapter.toNettyRequest(restRequest); Assert.assertEquals(nettyRequest.uri(), "/foo/bar?q=baz"); Assert.assertEquals(nettyRequest.method(), HttpMethod.POST); Assert.assertEquals(nettyRequest.protocolVersion(), HttpVersion.HTTP_1_1); Assert.assertEquals(nettyRequest.headers().get("Content-Length"), Integer.toString(restRequestBuilder.getEntity().length())); Assert.assertEquals(nettyRequest.headers().get("Content-Type"), "application/json"); Assert.assertEquals(nettyRequest.headers().get("Cookie"), ANY_COOKIE); }
public MetadataBlockType getType() { return _errCodeToExceptionMap.isEmpty() ? MetadataBlockType.EOS : MetadataBlockType.ERROR; }
@Test public void v2EosWithoutStatsIsReadInV1AsEosWithoutStats() throws IOException { ByteBuffer stats = ByteBuffer.wrap(new byte[]{0, 0, 0, 0}); MetadataBlock metadataBlock = new MetadataBlock(Lists.newArrayList(stats)); byte[] bytes = metadataBlock.toBytes(); // This is how V1 blocks were deserialized ByteBuffer buff = ByteBuffer.wrap(bytes); DataBlockUtils.readVersionType(buff); // consume the version information before decoding V1MetadataBlock v1MetadataBlock = new V1MetadataBlock(buff); assertEquals(v1MetadataBlock.getType(), MetadataBlock.MetadataBlockType.EOS, "Expected EOS type"); assertEquals(v1MetadataBlock.getStats(), Collections.emptyMap(), "Expected no stats by stage"); assertEquals(v1MetadataBlock.getExceptions(), metadataBlock.getExceptions(), "Expected no exceptions"); }
@Override public void process(Exchange exchange) throws Exception { JsonElement json = getBodyAsJsonElement(exchange); String operation = exchange.getIn().getHeader(CouchDbConstants.HEADER_METHOD, String.class); if (ObjectHelper.isEmpty(operation)) { Response<DocumentResult> save = saveJsonElement(json); if (save == null) { throw new CouchDbException("Could not save document [unknown reason]", exchange); } if (LOG.isTraceEnabled()) { LOG.trace("Document saved [_id={}, _rev={}]", save.getResult().getId(), save.getResult().getRev()); } exchange.getIn().setHeader(CouchDbConstants.HEADER_DOC_REV, save.getResult().getRev()); exchange.getIn().setHeader(CouchDbConstants.HEADER_DOC_ID, save.getResult().getId()); } else { if (operation.equalsIgnoreCase(CouchDbOperations.DELETE.toString())) { Response<DocumentResult> delete = deleteJsonElement(json); if (delete == null) { throw new CouchDbException("Could not delete document [unknown reason]", exchange); } if (LOG.isTraceEnabled()) { LOG.trace("Document saved [_id={}, _rev={}]", delete.getResult().getId(), delete.getResult().getRev()); } exchange.getIn().setHeader(CouchDbConstants.HEADER_DOC_REV, delete.getResult().getRev()); exchange.getIn().setHeader(CouchDbConstants.HEADER_DOC_ID, delete.getResult().getId()); } if (operation.equalsIgnoreCase(CouchDbOperations.GET.toString())) { String docId = exchange.getIn().getHeader(CouchDbConstants.HEADER_DOC_ID, String.class); if (docId == null) { throw new CouchDbException("Could not get document, document id is missing", exchange); } Object response = getElement(docId); if (LOG.isTraceEnabled()) { LOG.trace("Document retrieved [_id={}]", docId); } exchange.getIn().setBody(response); } } }
@Test void testBodyMandatory() throws Exception { when(msg.getMandatoryBody()).thenThrow(InvalidPayloadException.class); assertThrows(InvalidPayloadException.class, () -> { producer.process(exchange); }); }
@Override public TopicAssignment place( PlacementSpec placement, ClusterDescriber cluster ) throws InvalidReplicationFactorException { RackList rackList = new RackList(random, cluster.usableBrokers()); throwInvalidReplicationFactorIfNonPositive(placement.numReplicas()); throwInvalidReplicationFactorIfZero(rackList.numUnfencedBrokers()); throwInvalidReplicationFactorIfTooFewBrokers(placement.numReplicas(), rackList.numTotalBrokers()); List<List<Integer>> placements = new ArrayList<>(placement.numPartitions()); for (int partition = 0; partition < placement.numPartitions(); partition++) { placements.add(rackList.place(placement.numReplicas())); } return new TopicAssignment( placements.stream().map(replicas -> new PartitionAssignment(replicas, cluster)).collect(Collectors.toList()) ); }
@Test public void testRackListNotEnoughBrokers() { MockRandom random = new MockRandom(); RackList rackList = new RackList(random, Arrays.asList( new UsableBroker(11, Optional.of("1"), false), new UsableBroker(10, Optional.of("1"), false)).iterator()); assertEquals("The target replication factor of 3 cannot be reached because only " + "2 broker(s) are registered.", assertThrows(InvalidReplicationFactorException.class, () -> rackList.place(3)).getMessage()); }
public static <K, V> MutableMultimap<K, V> groupBy( Iterable<V> iterable, Function<? super V, ? extends K> function) { return FJIterate.groupBy(iterable, function, FJIterate.DEFAULT_MIN_FORK_SIZE, FJIterate.FORK_JOIN_POOL); }
@Test public void groupBy() { FastList<String> source = FastList.newListWith("Ted", "Sally", "Mary", "Bob", "Sara"); Multimap<Character, String> result1 = FJIterate.groupBy(source, StringFunctions.firstLetter(), 1); Multimap<Character, String> result2 = FJIterate.groupBy(Collections.synchronizedList(source), StringFunctions.firstLetter(), 1); Multimap<Character, String> result3 = FJIterate.groupBy(Collections.synchronizedCollection(source), StringFunctions.firstLetter(), 1); Multimap<Character, String> result4 = FJIterate.groupBy(LazyIterate.adapt(source), StringFunctions.firstLetter(), 1); Multimap<Character, String> result5 = FJIterate.groupBy(new ArrayList<>(source), StringFunctions.firstLetter(), 1); Multimap<Character, String> result6 = FJIterate.groupBy(source.toSet(), StringFunctions.firstLetter(), 1); Multimap<Character, String> result7 = FJIterate.groupBy(source.toMap(Functions.getStringPassThru(), Functions.getStringPassThru()), StringFunctions.firstLetter(), 1); Multimap<Character, String> result8 = FJIterate.groupBy(source.toBag(), StringFunctions.firstLetter(), 1); Multimap<Character, String> result9 = FJIterate.groupBy(source.toImmutable(), StringFunctions.firstLetter(), 1); MutableMultimap<Character, String> expected = HashBagMultimap.newMultimap(); expected.put('T', "Ted"); expected.put('S', "Sally"); expected.put('M', "Mary"); expected.put('B', "Bob"); expected.put('S', "Sara"); assertEquals(expected, HashBagMultimap.newMultimap(result1)); assertEquals(expected, HashBagMultimap.newMultimap(result2)); assertEquals(expected, HashBagMultimap.newMultimap(result3)); assertEquals(expected, HashBagMultimap.newMultimap(result4)); assertEquals(expected, HashBagMultimap.newMultimap(result5)); assertEquals(expected, HashBagMultimap.newMultimap(result6)); assertEquals(expected, HashBagMultimap.newMultimap(result7)); assertEquals(expected, HashBagMultimap.newMultimap(result8)); assertEquals(expected, HashBagMultimap.newMultimap(result9)); assertThrows(IllegalArgumentException.class, () -> FJIterate.groupBy(null, null, 1)); }
void processJob() { boolean success = false; UUID jobId = JobMetadata.getJobId(); monitor.debug(() -> format("Begin processing jobId: %s", jobId), EventCode.WORKER_JOB_STARTED); try { markJobStarted(jobId); hooks.jobStarted(jobId); PortabilityJob job = store.findJob(jobId); JobAuthorization jobAuthorization = job.jobAuthorization(); monitor.debug( () -> format( "Starting copy job, id: %s, source: %s, destination: %s", jobId, job.exportService(), job.importService())); String scheme = jobAuthorization.encryptionScheme(); AuthDataDecryptService decryptService = getAuthDecryptService(scheme); if (decryptService == null) { monitor.severe( () -> format( "No auth decrypter found for scheme %s while processing job: %s", scheme, jobId)); return; } String encrypted = jobAuthorization.encryptedAuthData(); byte[] encodedPrivateKey = JobMetadata.getPrivateKey(); AuthDataPair pair = decryptService.decrypt(encrypted, encodedPrivateKey); AuthData exportAuthData = objectMapper.readValue(pair.getExportAuthData(), AuthData.class); AuthData importAuthData = objectMapper.readValue(pair.getImportAuthData(), AuthData.class); String exportInfoStr = job.exportInformation(); Optional<ExportInformation> exportInfo = Optional.empty(); if (!Strings.isNullOrEmpty(exportInfoStr)) { exportInfo = Optional.ofNullable(objectMapper.readValue(exportInfoStr, ExportInformation.class)); } // Copy the data dtpInternalMetricRecorder.startedJob( JobMetadata.getDataType(), JobMetadata.getExportService(), JobMetadata.getImportService()); JobMetadata.getStopWatch().start(); copier.copy(exportAuthData, importAuthData, jobId, exportInfo); success = true; } catch (CopyExceptionWithFailureReason e) { String failureReason = e.getFailureReason(); if (failureReason.contains(FailureReasons.DESTINATION_FULL.toString())) { monitor.info(() -> "The remaining storage in the user's account is not enough to perform this operation.", e); } else if (failureReason.contains(FailureReasons.INVALID_TOKEN.toString()) || failureReason.contains(FailureReasons.SESSION_INVALIDATED.toString()) || failureReason.contains(FailureReasons.UNCONFIRMED_USER.toString()) || failureReason.contains(FailureReasons.USER_CHECKPOINTED.toString())) { monitor.info(() -> "Got token error", e); } else { monitor.severe( () -> format( "Error with failure code '%s' while processing jobId: %s", failureReason, jobId), e, EventCode.WORKER_JOB_ERRORED); } addFailureReasonToJob(jobId, failureReason); } catch (IOException | CopyException | RuntimeException e) { monitor.severe(() -> "Error processing jobId: " + jobId, e, EventCode.WORKER_JOB_ERRORED); } finally { // The errors returned by copier.getErrors are those logged by the idempotentImportExecutor // and are distinct from the exceptions thrown by copier.copy final Collection<ErrorDetail> loggedErrors = copier.getErrors(jobId); final int numErrors = loggedErrors.size(); // success is set to true above if copy returned without throwing success &= loggedErrors.isEmpty(); monitor.debug( () -> format("Finished processing jobId: %s with %d error(s).", jobId, numErrors), EventCode.WORKER_JOB_FINISHED); addErrorsAndMarkJobFinished(jobId, success, loggedErrors); hooks.jobFinished(jobId, success); dtpInternalMetricRecorder.finishedJob( JobMetadata.getDataType(), JobMetadata.getExportService(), JobMetadata.getImportService(), success, JobMetadata.getStopWatch().elapsed()); monitor.flushLogs(); JobMetadata.reset(); } }
@Test public void processJobGetsErrorsEvenWhenCopyThrows() throws CopyException, IOException { Mockito.doThrow(new CopyException("error", new Exception())).when(copier) .copy(importAuthData, exportAuthData, jobId, Optional.of(exportInfo)); processor.processJob(); Mockito.verify(copier).getErrors(jobId); }
@Override public DescriptiveUrl toUrl(final Host bookmark) { try { // Run password flow final TokenResponse response; try { final Host target = new Host(new DAVSSLProtocol(), "oauth.freenet.de"); final X509TrustManager trust = new KeychainX509TrustManager(new DisabledCertificateTrustCallback(), new DefaultTrustManagerHostnameCallback(target), CertificateStoreFactory.get()); final X509KeyManager key = new KeychainX509KeyManager(new DisabledCertificateIdentityCallback(), target, CertificateStoreFactory.get()); final CloseableHttpClient client = new HttpConnectionPoolBuilder( target, new ThreadLocalHostnameDelegatingTrustManager(trust, target.getHostname()), key, ProxyFactory.get() ).build(ProxyFactory.get(), new DisabledTranscriptListener(), new DisabledLoginCallback()) .setUserAgent(new FreenetUserAgentProvider().get()) .build(); final String username = bookmark.getCredentials().getUsername(); final String password; if(StringUtils.isBlank(bookmark.getCredentials().getPassword())) { password = keychain.findLoginPassword(bookmark); } else { password = bookmark.getCredentials().getPassword(); } if(null == password) { log.warn(String.format("No password found for %s", bookmark)); return DescriptiveUrl.EMPTY; } response = new PasswordTokenRequest(new ApacheHttpTransport(client), new GsonFactory(), new GenericUrl("https://oauth.freenet.de/oauth/token"), username, password) .setClientAuthentication(new BasicAuthentication("desktop_client", "6LIGIHuOSkznLomu5xw0EPPBJOXb2jLp")) .setRequestInitializer(new UserAgentHttpRequestInitializer(new FreenetUserAgentProvider())) .set("world", new HostPreferences(bookmark).getProperty("world")) .set("webLogin", Boolean.TRUE) .execute(); final FreenetTemporaryLoginResponse login = this.getLoginSession(client, response.getAccessToken()); return new DescriptiveUrl(URI.create(login.urls.login), DescriptiveUrl.Type.authenticated); } catch(IOException e) { throw new HttpExceptionMappingService().map(e); } } catch(BackgroundException e) { log.warn(String.format("Failure %s retrieving authenticated URL for %s", e, bookmark)); return DescriptiveUrl.EMPTY; } }
@Test public void testToUrl() { final FreenetAuthenticatedUrlProvider provider = new FreenetAuthenticatedUrlProvider(new DefaultHostPasswordStore() { @Override public String getPassword(final String serviceName, final String accountName) throws LocalAccessDeniedException { return PROPERTIES.get("freenet.password"); } @Override public void addPassword(final String serviceName, final String accountName, final String password) throws LocalAccessDeniedException { } @Override public String getPassword(final Scheme scheme, final int port, final String hostname, final String user) throws LocalAccessDeniedException { return PROPERTIES.get("freenet.password"); } @Override public void addPassword(final Scheme scheme, final int port, final String hostname, final String user, final String password) throws LocalAccessDeniedException { } @Override public void deletePassword(final String serviceName, final String user) throws LocalAccessDeniedException { } @Override public void deletePassword(final Scheme scheme, final int port, final String hostname, final String user) throws LocalAccessDeniedException { } }); final DescriptiveUrl url = provider.toUrl(session.getHost()); assertNotEquals(DescriptiveUrl.EMPTY, url); assertEquals(DescriptiveUrl.Type.authenticated, url.getType()); }
protected abstract String getGroupId(T request);
@Test public void testManyNodes() { Node node1 = Mockito.mock(Node.class); Mockito.when(node1.getGroupId()).thenReturn("test"); Mockito.when(node1.getNodeId()).thenReturn(new NodeId("test", new PeerId("localhost", 8081))); NodeOptions opts = new NodeOptions(); Mockito.when(node1.getOptions()).thenReturn(opts); NodeManager.getInstance().addAddress(new Endpoint("localhost", 8081)); NodeManager.getInstance().add(node1); Node node2 = Mockito.mock(Node.class); Mockito.when(node2.getGroupId()).thenReturn("test"); Mockito.when(node2.getNodeId()).thenReturn(new NodeId("test", new PeerId("localhost", 8082))); Mockito.when(node2.getOptions()).thenReturn(opts); NodeManager.getInstance().addAddress(new Endpoint("localhost", 8082)); NodeManager.getInstance().add(node2); this.processor = new MockCliRequestProcessor(null, "test"); this.processor.handleRequest(asyncContext, TestUtils.createPingRequest()); ErrorResponse resp = (ErrorResponse) asyncContext.getResponseObject(); assertNotNull(resp); assertEquals(RaftError.EINVAL.getNumber(), resp.getErrorCode()); assertEquals("Peer must be specified since there're 2 nodes in group test", resp.getErrorMsg()); }
@Override public PTransformOverrideFactory.PTransformReplacement< PCollection<? extends InputT>, PCollection<OutputT>> getReplacementTransform( AppliedPTransform< PCollection<? extends InputT>, PCollection<OutputT>, SingleOutput<InputT, OutputT>> transform) { return PTransformOverrideFactory.PTransformReplacement.of( PTransformReplacements.getSingletonMainInput(transform), new ParDoSingle<>( transform.getTransform(), Iterables.getOnlyElement(transform.getOutputs().keySet()), PTransformReplacements.getSingletonMainOutput(transform).getCoder())); }
@Test public void getReplacementTransformGetSideInputs() { PCollectionView<Long> sideLong = pipeline .apply("LongSideInputVals", Create.of(-1L, -2L, -4L)) .apply("SideLongView", Sum.longsGlobally().asSingletonView()); PCollectionView<List<String>> sideStrings = pipeline .apply("StringSideInputVals", Create.of("foo", "bar", "baz")) .apply("SideStringsView", View.asList()); ParDo.SingleOutput<Integer, Long> originalTransform = ParDo.of(new ToLongFn()).withSideInputs(sideLong, sideStrings); PCollection<? extends Integer> input = pipeline.apply(Create.of(1, 2, 3)); AppliedPTransform< PCollection<? extends Integer>, PCollection<Long>, ParDo.SingleOutput<Integer, Long>> application = AppliedPTransform.of( "original", PValues.expandInput(input), PValues.expandOutput(input.apply(originalTransform)), originalTransform, ResourceHints.create(), pipeline); PTransformReplacement<PCollection<? extends Integer>, PCollection<Long>> replacementTransform = factory.getReplacementTransform(application); ParDoSingle<Integer, Long> parDoSingle = (ParDoSingle<Integer, Long>) replacementTransform.getTransform(); assertThat(parDoSingle.getSideInputs().values(), containsInAnyOrder(sideStrings, sideLong)); }