focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() < 2) { onInvalidDataReceived(device, data); return; } // Read the Op Code final int opCode = data.getIntValue(Data.FORMAT_UINT8, 0); // Estimate the expected operand size based on the Op Code int expectedOperandSize; switch (opCode) { case OP_CODE_COMMUNICATION_INTERVAL_RESPONSE -> // UINT8 expectedOperandSize = 1; case OP_CODE_CALIBRATION_VALUE_RESPONSE -> // Calibration Value expectedOperandSize = 10; case OP_CODE_PATIENT_HIGH_ALERT_LEVEL_RESPONSE, OP_CODE_PATIENT_LOW_ALERT_LEVEL_RESPONSE, OP_CODE_HYPO_ALERT_LEVEL_RESPONSE, OP_CODE_HYPER_ALERT_LEVEL_RESPONSE, OP_CODE_RATE_OF_DECREASE_ALERT_LEVEL_RESPONSE, OP_CODE_RATE_OF_INCREASE_ALERT_LEVEL_RESPONSE -> // SFLOAT expectedOperandSize = 2; case OP_CODE_RESPONSE_CODE -> // Request Op Code (UINT8), Response Code Value (UINT8) expectedOperandSize = 2; default -> { onInvalidDataReceived(device, data); return; } } // Verify packet length if (data.size() != 1 + expectedOperandSize && data.size() != 1 + expectedOperandSize + 2) { onInvalidDataReceived(device, data); return; } // Verify CRC if present final boolean crcPresent = data.size() == 1 + expectedOperandSize + 2; // opCode + expected operand + CRC if (crcPresent) { final int expectedCrc = data.getIntValue(Data.FORMAT_UINT16_LE, 1 + expectedOperandSize); final int actualCrc = CRC16.MCRF4XX(data.getValue(), 0, 1 + expectedOperandSize); if (expectedCrc != actualCrc) { onCGMSpecificOpsResponseReceivedWithCrcError(device, data); return; } } switch (opCode) { case OP_CODE_COMMUNICATION_INTERVAL_RESPONSE -> { final int interval = data.getIntValue(Data.FORMAT_UINT8, 1); onContinuousGlucoseCommunicationIntervalReceived(device, interval, crcPresent); return; } case OP_CODE_CALIBRATION_VALUE_RESPONSE -> { final float glucoseConcentrationOfCalibration = data.getFloatValue(Data.FORMAT_SFLOAT, 1); final int calibrationTime = data.getIntValue(Data.FORMAT_UINT16_LE, 3); final int calibrationTypeAndSampleLocation = data.getIntValue(Data.FORMAT_UINT8, 5); @SuppressLint("WrongConstant") final int calibrationType = calibrationTypeAndSampleLocation & 0x0F; final int calibrationSampleLocation = calibrationTypeAndSampleLocation >> 4; final int nextCalibrationTime = data.getIntValue(Data.FORMAT_UINT16_LE, 6); final int calibrationDataRecordNumber = data.getIntValue(Data.FORMAT_UINT16_LE, 8); final int calibrationStatus = data.getIntValue(Data.FORMAT_UINT8, 10); onContinuousGlucoseCalibrationValueReceived(device, glucoseConcentrationOfCalibration, calibrationTime, nextCalibrationTime, calibrationType, calibrationSampleLocation, calibrationDataRecordNumber, new CGMCalibrationStatus(calibrationStatus), crcPresent); return; } case OP_CODE_RESPONSE_CODE -> { final int requestCode = data.getIntValue(Data.FORMAT_UINT8, 1); // ignore final int responseCode = data.getIntValue(Data.FORMAT_UINT8, 2); if (responseCode == CGM_RESPONSE_SUCCESS) { onCGMSpecificOpsOperationCompleted(device, requestCode, crcPresent); } else { onCGMSpecificOpsOperationError(device, requestCode, responseCode, crcPresent); } return; } } // Read SFLOAT value final float value = data.getFloatValue(Data.FORMAT_SFLOAT, 1); switch (opCode) { case OP_CODE_PATIENT_HIGH_ALERT_LEVEL_RESPONSE -> onContinuousGlucosePatientHighAlertReceived(device, value, crcPresent); case OP_CODE_PATIENT_LOW_ALERT_LEVEL_RESPONSE -> onContinuousGlucosePatientLowAlertReceived(device, value, crcPresent); case OP_CODE_HYPO_ALERT_LEVEL_RESPONSE -> onContinuousGlucoseHypoAlertReceived(device, value, crcPresent); case OP_CODE_HYPER_ALERT_LEVEL_RESPONSE -> onContinuousGlucoseHyperAlertReceived(device, value, crcPresent); case OP_CODE_RATE_OF_DECREASE_ALERT_LEVEL_RESPONSE -> onContinuousGlucoseRateOfDecreaseAlertReceived(device, value, crcPresent); case OP_CODE_RATE_OF_INCREASE_ALERT_LEVEL_RESPONSE -> onContinuousGlucoseRateOfIncreaseAlertReceived(device, value, crcPresent); } }
@Test public void onCGMSpecificOpsOperationCompleted_withCrc() { final Data data = new Data(new byte[] { 28, 2, 1, (byte) 0x3C, (byte) 0x3B}); callback.onDataReceived(null, data); assertTrue(success); assertTrue(secured); assertEquals(2, requestCode); }
@Override public ApiResult<AllBrokersStrategy.BrokerKey, Collection<TransactionListing>> handleResponse( Node broker, Set<AllBrokersStrategy.BrokerKey> keys, AbstractResponse abstractResponse ) { int brokerId = broker.id(); AllBrokersStrategy.BrokerKey key = requireSingleton(keys, brokerId); ListTransactionsResponse response = (ListTransactionsResponse) abstractResponse; Errors error = Errors.forCode(response.data().errorCode()); if (error == Errors.COORDINATOR_LOAD_IN_PROGRESS) { log.debug("The `ListTransactions` request sent to broker {} failed because the " + "coordinator is still loading state. Will try again after backing off", brokerId); return ApiResult.empty(); } else if (error == Errors.COORDINATOR_NOT_AVAILABLE) { log.debug("The `ListTransactions` request sent to broker {} failed because the " + "coordinator is shutting down", brokerId); return ApiResult.failed(key, new CoordinatorNotAvailableException("ListTransactions " + "request sent to broker " + brokerId + " failed because the coordinator is shutting down")); } else if (error != Errors.NONE) { log.error("The `ListTransactions` request sent to broker {} failed because of an " + "unexpected error {}", brokerId, error); return ApiResult.failed(key, error.exception("ListTransactions request " + "sent to broker " + brokerId + " failed with an unexpected exception")); } else { List<TransactionListing> listings = response.data().transactionStates().stream() .map(transactionState -> new TransactionListing( transactionState.transactionalId(), transactionState.producerId(), TransactionState.parse(transactionState.transactionState()))) .collect(Collectors.toList()); return ApiResult.completed(key, listings); } }
@Test public void testHandleSuccessfulResponse() { int brokerId = 1; BrokerKey brokerKey = new BrokerKey(OptionalInt.of(brokerId)); ListTransactionsOptions options = new ListTransactionsOptions(); ListTransactionsHandler handler = new ListTransactionsHandler(options, logContext); ListTransactionsResponse response = sampleListTransactionsResponse1(); ApiResult<BrokerKey, Collection<TransactionListing>> result = handler.handleResponse( node, singleton(brokerKey), response); assertEquals(singleton(brokerKey), result.completedKeys.keySet()); assertExpectedTransactions(response.data().transactionStates(), result.completedKeys.get(brokerKey)); }
@Override public void subscribe(URL url, NotifyListener listener) { if (url == null) { throw new IllegalArgumentException("subscribe url == null"); } if (listener == null) { throw new IllegalArgumentException("subscribe listener == null"); } if (logger.isInfoEnabled()) { logger.info("Subscribe: " + url); } Set<NotifyListener> listeners = subscribed.computeIfAbsent(url, n -> new ConcurrentHashSet<>()); listeners.add(listener); }
@Test void testSubscribeIfUrlNull() { Assertions.assertThrows(IllegalArgumentException.class, () -> { final AtomicReference<Boolean> notified = new AtomicReference<Boolean>(false); NotifyListener listener = urls -> notified.set(Boolean.TRUE); URL url = new ServiceConfigURL("dubbo", "192.168.0.1", 2200); abstractRegistry.subscribe(null, listener); Assertions.fail("subscribe url == null"); }); }
@Override public String getFieldDefinition( ValueMetaInterface v, String tk, String pk, boolean useAutoinc, boolean addFieldName, boolean addCr ) { String retval = ""; String fieldname = v.getName(); int length = v.getLength(); int precision = v.getPrecision(); if ( addFieldName ) { retval += fieldname + " "; } int type = v.getType(); switch ( type ) { case ValueMetaInterface.TYPE_DATE: retval += "DATE"; break; case ValueMetaInterface.TYPE_BOOLEAN: retval += "CHAR(1)"; break; case ValueMetaInterface.TYPE_NUMBER: case ValueMetaInterface.TYPE_INTEGER: case ValueMetaInterface.TYPE_BIGNUMBER: if ( fieldname.equalsIgnoreCase( tk ) || // Technical key fieldname.equalsIgnoreCase( pk ) // Primary key ) { retval += "INTEGER"; } else { if ( length > 0 ) { if ( precision > 0 || length > 18 ) { retval += "DECIMAL(" + length + ", " + precision + ")"; } else { retval += "INTEGER"; } } else { retval += "DOUBLE PRECISION"; } } break; case ValueMetaInterface.TYPE_STRING: if ( length >= MAX_VARCHAR_LENGTH || length <= 0 ) { retval += "VARCHAR(" + MAX_VARCHAR_LENGTH + ")"; } else { retval += "VARCHAR(" + length + ")"; } break; default: retval += " UNKNOWN"; break; } if ( addCr ) { retval += Const.CR; } return retval; }
@Test public void testGetFieldDefinition() { assertEquals( "FOO DATE", nativeMeta.getFieldDefinition( new ValueMetaDate( "FOO" ), "", "", false, true, false ) ); assertEquals( "DATE", nativeMeta.getFieldDefinition( new ValueMetaDate( "FOO" ), "", "", false, false, false ) ); // Note - Rocket U2 does *not* support timestamps ... assertEquals( "CHAR(1)", nativeMeta.getFieldDefinition( new ValueMetaBoolean( "FOO" ), "", "", false, false, false ) ); assertEquals( "INTEGER", nativeMeta.getFieldDefinition( new ValueMetaNumber( "FOO", 10, 0 ), "FOO", "", false, false, false ) ); assertEquals( "INTEGER", nativeMeta.getFieldDefinition( new ValueMetaNumber( "FOO", 10, 0 ), "", "FOO", false, false, false ) ); // Numeric Types assertEquals( "DECIMAL(5, 5)", nativeMeta.getFieldDefinition( new ValueMetaNumber( "FOO", 5, 5 ), "", "", false, false, false ) ); assertEquals( "DECIMAL(19, 0)", nativeMeta.getFieldDefinition( new ValueMetaBigNumber( "FOO", 19, 0 ), "", "", false, false, false ) ); assertEquals( "INTEGER", nativeMeta.getFieldDefinition( new ValueMetaInteger( "FOO", 18, 0 ), "", "", false, false, false ) ); assertEquals( "DOUBLE PRECISION", nativeMeta.getFieldDefinition( new ValueMetaNumber( "FOO", -7, -3 ), "", "", false, false, false ) ); assertEquals( "VARCHAR(15)", nativeMeta.getFieldDefinition( new ValueMetaString( "FOO", 15, 0 ), "", "", false, false, false ) ); assertEquals( "VARCHAR(65535)", nativeMeta.getFieldDefinition( new ValueMetaString( "FOO", 65537, 0 ), "", "", false, false, false ) ); assertEquals( " UNKNOWN", nativeMeta.getFieldDefinition( new ValueMetaInternetAddress( "FOO" ), "", "", false, false, false ) ); assertEquals( " UNKNOWN" + System.getProperty( "line.separator" ), nativeMeta.getFieldDefinition( new ValueMetaInternetAddress( "FOO" ), "", "", false, false, true ) ); }
@Transactional @ApolloAuditLog(type = OpType.CREATE, name = "App.create") public App createAppAndAddRolePermission( App app, Set<String> admins ) { App createdApp = this.createAppInLocal(app); publisher.publishEvent(new AppCreationEvent(createdApp)); if (!CollectionUtils.isEmpty(admins)) { rolePermissionService .assignRoleToUsers(RoleUtils.buildAppMasterRoleName(createdApp.getAppId()), admins, userInfoHolder.getUser().getUserId()); } return createdApp; }
@Test void createAppAndAddRolePermission() { final String userId = "user100"; final String appId = "appId100"; { UserInfo userInfo = new UserInfo(); userInfo.setUserId(userId); userInfo.setEmail("xxx@xxx.com"); Mockito.when(userService.findByUserId(Mockito.eq(userId))) .thenReturn(userInfo); } final App app = new App(); app.setAppId(appId); app.setOwnerName(userId); Set<String> admins = new HashSet<>(Arrays.asList("user1", "user2")); final App createdApp = new App(); createdApp.setAppId(appId); createdApp.setOwnerName(userId); { Mockito.when(appRepository.save(Mockito.eq(app))) .thenReturn(createdApp); } appService.createAppAndAddRolePermission(app, admins); Mockito.verify(appRepository, Mockito.times(1)) .findByAppId(Mockito.eq(appId)); Mockito.verify(userService, Mockito.times(1)) .findByUserId(Mockito.eq(userId)); Mockito.verify(userInfoHolder, Mockito.times(2)) .getUser(); Mockito.verify(appRepository, Mockito.times(1)) .save(Mockito.eq(app)); Mockito.verify(appNamespaceService, Mockito.times(1)) .createDefaultAppNamespace(Mockito.eq(appId)); Mockito.verify(roleInitializationService, Mockito.times(1)) .initAppRoles(Mockito.eq(createdApp)); Mockito.verify(rolePermissionService, Mockito.times(1)) .assignRoleToUsers(Mockito.any(), Mockito.eq(admins), Mockito.eq(OPERATOR_USER_ID)); }
public void finish() throws IOException { if (finished) { return; } flush(); // Finish the stream with the terminatorValue. VarInt.encode(terminatorValue, os); if (!BUFFER_POOL.offer(buffer)) { // The pool is full, we can't store the buffer. We just drop the buffer. } finished = true; }
@Test public void testBuffersAreTakenAndReturned() throws Exception { BUFFER_POOL.clear(); BUFFER_POOL.offer(ByteBuffer.allocate(256)); ByteArrayOutputStream baos = new ByteArrayOutputStream(); BufferedElementCountingOutputStream os = createAndWriteValues(toBytes("abcdefghij"), baos); assertEquals(0, BUFFER_POOL.size()); os.finish(); assertEquals(1, BUFFER_POOL.size()); }
public static KiePMMLClusteringModel getKiePMMLClusteringModel(final ClusteringCompilationDTO compilationDTO) { logger.trace("getKiePMMLClusteringModel {}", compilationDTO); try { ClusteringModel clusteringModel = compilationDTO.getModel(); final KiePMMLClusteringModel.ModelClass modelClass = modelClassFrom(clusteringModel.getModelClass()); final List<KiePMMLCluster> clusters = getKiePMMLClusters(clusteringModel.getClusters()); final List<KiePMMLClusteringField> clusteringFields = getKiePMMLClusteringFields(clusteringModel.getClusteringFields()); final KiePMMLComparisonMeasure comparisonMeasure = getKiePMMLComparisonMeasure(clusteringModel.getComparisonMeasure()); final KiePMMLMissingValueWeights missingValueWeights = getKiePMMLMissingValueWeights(clusteringModel.getMissingValueWeights()); return KiePMMLClusteringModel.builder(compilationDTO.getFileName(), compilationDTO.getModelName(), compilationDTO.getMINING_FUNCTION()) .withModelClass(modelClass) .withClusters(clusters) .withClusteringFields(clusteringFields) .withComparisonMeasure(comparisonMeasure) .withMissingValueWeights(missingValueWeights) .withTargetField(compilationDTO.getTargetFieldName()) .withMiningFields(compilationDTO.getKieMiningFields()) .withOutputFields(compilationDTO.getKieOutputFields()) .withKiePMMLMiningFields(compilationDTO.getKiePMMLMiningFields()) .withKiePMMLOutputFields(compilationDTO.getKiePMMLOutputFields()) .withKiePMMLTargets(compilationDTO.getKiePMMLTargetFields()) .withKiePMMLTransformationDictionary(compilationDTO.getKiePMMLTransformationDictionary()) .withKiePMMLLocalTransformations(compilationDTO.getKiePMMLLocalTransformations()) .build(); } catch (Exception e) { throw new KiePMMLException(e); } }
@Test void getKiePMMLClusteringModel() { final CommonCompilationDTO<ClusteringModel> compilationDTO = CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME, pmml, clusteringModel, new PMMLCompilationContextMock(), "fileName"); KiePMMLClusteringModel retrieved = KiePMMLClusteringModelFactory.getKiePMMLClusteringModel(ClusteringCompilationDTO.fromCompilationDTO(compilationDTO)); assertThat(retrieved).isNotNull(); assertThat(retrieved.getName()).isEqualTo(clusteringModel.getModelName()); assertThat(retrieved.getModelClass().getName()).isEqualTo(clusteringModel.getModelClass().value()); List<KiePMMLCluster> retrievedClusters = retrieved.getClusters(); assertThat(retrievedClusters).hasSameSizeAs(clusteringModel.getClusters()); IntStream.range(0, clusteringModel.getClusters().size()).forEach(i -> commonEvaluateKiePMMLCluster(retrievedClusters.get(i), clusteringModel.getClusters().get(i))); List<KiePMMLClusteringField> retrievedClusteringFields = retrieved.getClusteringFields(); assertThat(retrievedClusters).hasSameSizeAs(clusteringModel.getClusters()); IntStream.range(0, clusteringModel.getClusters().size()).forEach(i -> commonEvaluateKiePMMLCluster(retrievedClusters.get(i), clusteringModel.getClusters().get(i))); assertThat(retrievedClusteringFields).hasSameSizeAs(clusteringModel.getClusteringFields()); IntStream.range(0, clusteringModel.getClusteringFields().size()).forEach(i -> commonEvaluateKiePMMLClusteringField(retrievedClusteringFields.get(i), clusteringModel.getClusteringFields().get(i))); commonEvaluateKiePMMLComparisonMeasure(retrieved.getComparisonMeasure(), clusteringModel.getComparisonMeasure()); commonEvaluateKiePMMLMissingValueWeights(retrieved.getMissingValueWeights(), clusteringModel.getMissingValueWeights()); }
@Override public void deleteArticle(Long id) { // 校验存在 validateArticleExists(id); // 删除 articleMapper.deleteById(id); }
@Test public void testDeleteArticle_success() { // mock 数据 ArticleDO dbArticle = randomPojo(ArticleDO.class); articleMapper.insert(dbArticle);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbArticle.getId(); // 调用 articleService.deleteArticle(id); // 校验数据不存在了 assertNull(articleMapper.selectById(id)); }
public static Map<UUID, PartitionIdSet> createPartitionMap( NodeEngine nodeEngine, @Nullable MemberVersion localMemberVersion, boolean failOnUnassignedPartition ) { Collection<Partition> parts = nodeEngine.getHazelcastInstance().getPartitionService().getPartitions(); int partCnt = parts.size(); Map<UUID, PartitionIdSet> partMap = new LinkedHashMap<>(); for (Partition part : parts) { Member owner = part.getOwner(); if (owner == null) { if (failOnUnassignedPartition) { throw QueryException.error( SqlErrorCode.PARTITION_DISTRIBUTION, "Partition is not assigned to any member: " + part.getPartitionId() ); } else { continue; } } if (localMemberVersion != null) { if (!localMemberVersion.equals(owner.getVersion())) { UUID localMemberId = nodeEngine.getLocalMember().getUuid(); throw QueryException.error("Cannot execute SQL query when members have different versions " + "(make sure that all members have the same version) {localMemberId=" + localMemberId + ", localMemberVersion=" + localMemberVersion + ", remoteMemberId=" + owner.getUuid() + ", remoteMemberVersion=" + owner.getVersion() + "}"); } } partMap.computeIfAbsent(owner.getUuid(), (key) -> new PartitionIdSet(partCnt)).add(part.getPartitionId()); } return partMap; }
@Test public void testUnassignedPartition_ignore() { HazelcastInstance member = factory.newHazelcastInstance(); member.getCluster().changeClusterState(ClusterState.FROZEN); Map<UUID, PartitionIdSet> map = QueryUtils.createPartitionMap(Accessors.getNodeEngineImpl(member), null, false); assertTrue(map.isEmpty()); }
@SuppressWarnings("deprecation") @Override public ByteBuf asReadOnly() { if (isReadOnly()) { return this; } return Unpooled.unmodifiableBuffer(this); }
@Test public void testReadyOnlyNioBufferWithPositionLength() { assertReadyOnlyNioBufferWithPositionLength(buffer.asReadOnly()); }
@Override public int compareTo(DateTimeStamp dateTimeStamp) { return comparator.compare(this,dateTimeStamp); }
@Test void testCompareNullValue() { DateTimeStamp smaller = new DateTimeStamp("2018-04-04T09:10:00.586-0100"); assertEquals(-1, smaller.compareTo(null)); }
@Override public PollResult poll(long currentTimeMs) { return pollInternal( prepareFetchRequests(), this::handleFetchSuccess, this::handleFetchFailure ); }
@Test public void testFetchTopicIdUpgradeDowngrade() { buildFetcher(); TopicIdPartition fooWithoutId = new TopicIdPartition(Uuid.ZERO_UUID, new TopicPartition("foo", 0)); // Assign foo without a topic id. subscriptions.assignFromUser(singleton(fooWithoutId.topicPartition())); client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(fooWithoutId), tp -> validLeaderEpoch)); subscriptions.seek(fooWithoutId.topicPartition(), 0); // Fetch should use version 12. assertEquals(1, sendFetches()); client.prepareResponse( fetchRequestMatcher((short) 12, singletonMap(fooWithoutId, new PartitionData( fooWithoutId.topicId(), 0, FetchRequest.INVALID_LOG_START_OFFSET, fetchSize, Optional.of(validLeaderEpoch)) ), emptyList() ), fullFetchResponse(1, fooWithoutId, records, Errors.NONE, 100L, 0) ); networkClientDelegate.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); fetchRecords(); // Upgrade. TopicIdPartition fooWithId = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); subscriptions.assignFromUser(singleton(fooWithId.topicPartition())); client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(fooWithId), tp -> validLeaderEpoch)); subscriptions.seek(fooWithId.topicPartition(), 0); // Fetch should use latest version. assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); // foo with old topic id should be removed from the session. client.prepareResponse( fetchRequestMatcher(ApiKeys.FETCH.latestVersion(), singletonMap(fooWithId, new PartitionData( fooWithId.topicId(), 0, FetchRequest.INVALID_LOG_START_OFFSET, fetchSize, Optional.of(validLeaderEpoch)) ), emptyList() ), fullFetchResponse(1, fooWithId, records, Errors.NONE, 100L, 0) ); networkClientDelegate.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); fetchRecords(); // Downgrade. subscriptions.assignFromUser(singleton(fooWithoutId.topicPartition())); client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(fooWithoutId), tp -> validLeaderEpoch)); subscriptions.seek(fooWithoutId.topicPartition(), 0); // Fetch should use version 12. assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); // foo with old topic id should be removed from the session. client.prepareResponse( fetchRequestMatcher((short) 12, singletonMap(fooWithoutId, new PartitionData( fooWithoutId.topicId(), 0, FetchRequest.INVALID_LOG_START_OFFSET, fetchSize, Optional.of(validLeaderEpoch)) ), emptyList() ), fullFetchResponse(1, fooWithoutId, records, Errors.NONE, 100L, 0) ); networkClientDelegate.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); fetchRecords(); }
public static String get(String urlString, Charset customCharset) { return HttpRequest.get(urlString).charset(customCharset).execute().body(); }
@Test @Disabled public void acplayTest(){ final String body = HttpRequest.get("https://api.acplay.net/api/v2/bangumi/9541") .execute().body(); Console.log(body); }
@Nonnull public static <T> Sink<T> list(@Nonnull String listName) { return fromProcessor("listSink(" + listName + ')', writeListP(listName)); }
@Test public void list_byName() { // Given populateList(srcList); // When Sink<Object> sink = Sinks.list(sinkName); // Then p.readFrom(Sources.list(srcList)).writeTo(sink); execute(); assertEquals(itemCount, sinkList.size()); }
public synchronized void setMemoryPool(MemoryPool newMemoryPool) { // This method first acquires the monitor of this instance. // After that in this method if we acquire the monitors of the // user/revocable memory contexts in the queryMemoryContext instance // (say, by calling queryMemoryContext.getUserMemory()) it's possible // to have a deadlock. Because, the driver threads running the operators // will allocate memory concurrently through the child memory context -> ... -> // root memory context -> this.updateUserMemory() calls, and will acquire // the monitors of the user/revocable memory contexts in the queryMemoryContext instance // first, and then the monitor of this, which may cause deadlocks. // That's why instead of calling methods on queryMemoryContext to get the // user/revocable memory reservations, we call the MemoryPool to get the same // information. requireNonNull(newMemoryPool, "newMemoryPool is null"); if (memoryPool == newMemoryPool) { // Don't unblock our tasks and thrash the pools, if this is a no-op return; } ListenableFuture<?> future = memoryPool.moveQuery(queryId, newMemoryPool); memoryPool = newMemoryPool; if (resourceOverCommit) { // Reset the memory limits based on the new pool assignment setResourceOvercommit(); } future.addListener(() -> { // Unblock all the tasks, if they were waiting for memory, since we're in a new pool. taskContexts.values().forEach(TaskContext::moreMemoryAvailable); }, directExecutor()); }
@Test(dataProvider = "testSetMemoryPoolOptions") public void testSetMemoryPool(boolean useReservedPool) { QueryId secondQuery = new QueryId("second"); MemoryPool reservedPool = new MemoryPool(RESERVED_POOL, new DataSize(10, BYTE)); long secondQueryMemory = reservedPool.getMaxBytes() - 1; if (useReservedPool) { assertTrue(reservedPool.reserve(secondQuery, "test", secondQueryMemory).isDone()); } try (LocalQueryRunner localQueryRunner = new LocalQueryRunner(TEST_SESSION)) { QueryContext queryContext = new QueryContext( new QueryId("query"), new DataSize(10, BYTE), new DataSize(20, BYTE), new DataSize(10, BYTE), new DataSize(1, GIGABYTE), new MemoryPool(GENERAL_POOL, new DataSize(10, BYTE)), new TestingGcMonitor(), localQueryRunner.getExecutor(), localQueryRunner.getScheduler(), new DataSize(0, BYTE), new SpillSpaceTracker(new DataSize(0, BYTE)), listJsonCodec(TaskMemoryReservationSummary.class)); // Use memory queryContext.getQueryMemoryContext().initializeLocalMemoryContexts("test"); LocalMemoryContext userMemoryContext = queryContext.getQueryMemoryContext().localUserMemoryContext(); LocalMemoryContext revocableMemoryContext = queryContext.getQueryMemoryContext().localRevocableMemoryContext(); assertTrue(userMemoryContext.setBytes(3).isDone()); assertTrue(revocableMemoryContext.setBytes(5).isDone()); queryContext.setMemoryPool(reservedPool); if (useReservedPool) { reservedPool.free(secondQuery, "test", secondQueryMemory); } // Free memory userMemoryContext.close(); revocableMemoryContext.close(); } }
public static Path getConfigHome() { return getConfigHome(System.getProperties(), System.getenv()); }
@Test public void testGetConfigHome_windows() { Properties fakeProperties = new Properties(); fakeProperties.setProperty("user.home", "nonexistent"); fakeProperties.setProperty("os.name", "os is WiNdOwS"); Map<String, String> fakeEnvironment = ImmutableMap.of("LOCALAPPDATA", fakeConfigHome); Assert.assertEquals( Paths.get(fakeConfigHome).resolve("Google").resolve("Jib").resolve("Config"), XdgDirectories.getConfigHome(fakeProperties, fakeEnvironment)); }
private boolean processBackgroundEvents() { AtomicReference<KafkaException> firstError = new AtomicReference<>(); LinkedList<BackgroundEvent> events = new LinkedList<>(); backgroundEventQueue.drainTo(events); for (BackgroundEvent event : events) { try { if (event instanceof CompletableEvent) backgroundEventReaper.add((CompletableEvent<?>) event); backgroundEventProcessor.process(event); } catch (Throwable t) { KafkaException e = ConsumerUtils.maybeWrapAsKafkaException(t); if (!firstError.compareAndSet(null, e)) log.warn("An error occurred when processing the background event: {}", e.getMessage(), e); } } backgroundEventReaper.reap(time.milliseconds()); if (firstError.get() != null) throw firstError.get(); return !events.isEmpty(); }
@Test public void testProcessBackgroundEventsWithInitialDelay() throws Exception { consumer = newConsumer(); Time time = new MockTime(); Timer timer = time.timer(1000); CompletableFuture<?> future = mock(CompletableFuture.class); CountDownLatch latch = new CountDownLatch(3); // Mock our call to Future.get(timeout) so that it mimics a delay of 200 milliseconds. Keep in mind that // the incremental timeout inside processBackgroundEvents is 100 milliseconds for each pass. Our first two passes // will exceed the incremental timeout, but the third will return. doAnswer(invocation -> { latch.countDown(); if (latch.getCount() > 0) { long timeout = invocation.getArgument(0, Long.class); timer.sleep(timeout); throw new java.util.concurrent.TimeoutException("Intentional timeout"); } future.complete(null); return null; }).when(future).get(any(Long.class), any(TimeUnit.class)); consumer.processBackgroundEvents(future, timer); // 800 is the 1000 ms timeout (above) minus the 200 ms delay for the two incremental timeouts/retries. assertEquals(800, timer.remainingMs()); }
public static String checkNotNullEmpty(String value, String name) throws IllegalArgumentException { if (isBlank(value)) { throw new IllegalArgumentException(name + " is null or empty"); } return value; }
@Test public void testCheckNotNullEmptyInputSpaceThrowsException() { thrown.expect(IllegalArgumentException.class); EagleEyeCoreUtils.checkNotNullEmpty(" ", "bar"); // Method is not expected to return due to exception thrown }
public Optional<Violation> validate(IndexSetConfig newConfig) { // Don't validate prefix conflicts in case of an update if (Strings.isNullOrEmpty(newConfig.id())) { final Violation prefixViolation = validatePrefix(newConfig); if (prefixViolation != null) { return Optional.of(prefixViolation); } } final Violation fieldMappingViolation = validateMappingChangesAreLegal(newConfig); if (fieldMappingViolation != null) { return Optional.of(fieldMappingViolation); } Violation refreshIntervalViolation = validateSimpleIndexSetConfig(newConfig); if (refreshIntervalViolation != null){ return Optional.of(refreshIntervalViolation); } return Optional.empty(); }
@Test public void validateMaxRetentionPeriod() { when(indexSetRegistry.iterator()).thenReturn(Collections.emptyIterator()); // no max retention period configured assertThat(validator.validate(testIndexSetConfig())).isNotPresent(); // max retention period >= effective retention period when(elasticsearchConfiguration.getMaxIndexRetentionPeriod()).thenReturn(Period.days(10)); assertThat(validator.validate(testIndexSetConfig())).isNotPresent(); // max retention period < effective retention period when(elasticsearchConfiguration.getMaxIndexRetentionPeriod()).thenReturn(Period.days(9)); assertThat(validator.validate(testIndexSetConfig())).hasValueSatisfying(v -> assertThat(v.message()).contains("effective index retention period of P1W3D") ); // rotation strategy is not time-based final IndexSetConfig modifiedConfig = testIndexSetConfig().toBuilder() .rotationStrategyConfig(MessageCountRotationStrategyConfig.create(Integer.MAX_VALUE)) .rotationStrategyClass(MessageCountRotationStrategy.class.getCanonicalName()) .build(); assertThat(validator.validate(modifiedConfig)).isNotPresent(); }
public <T extends Enum<T>> T getEnumProperty(String key, Class<T> enumClass, T defaultValue) { return getEnumProperty(key, enumClass, defaultValue, false); }
@Test public void testEnumProperty() { TypedProperties p = createProperties(); assertEquals(COLOR.BLUE, p.getEnumProperty("enum_cast", COLOR.class, COLOR.BLUE)); assertEquals(COLOR.RED, p.getEnumProperty("enum", COLOR.class, COLOR.BLUE)); assertEquals(COLOR.RED, p.getEnumProperty("enum_put_str", COLOR.class, COLOR.BLUE)); assertEquals(COLOR.BLUE, p.getEnumProperty("enum_invalid", COLOR.class, COLOR.BLUE)); assertEquals(COLOR.BLUE, p.getEnumProperty("enum_null", COLOR.class, COLOR.BLUE)); assertEquals(COLOR.BLUE, p.getEnumProperty("enum_other", COLOR.class, COLOR.BLUE)); }
public static Exception toException(int code, String msg) throws Exception { if (code == Response.Status.NOT_FOUND.getStatusCode()) { throw new NotFoundException(msg); } else if (code == Response.Status.NOT_IMPLEMENTED.getStatusCode()) { throw new ClassNotFoundException(msg); } else if (code == Response.Status.BAD_REQUEST.getStatusCode()) { throw new InvalidRequestException(msg); } else if (code == Response.Status.CONFLICT.getStatusCode()) { throw new RequestConflictException(msg); } else { throw new RuntimeException(msg); } }
@Test public void testToExceptionRuntimeException() { assertThrows(RuntimeException.class, () -> RestExceptionMapper.toException(-1, "Unknown status code")); }
public static List<Type> decode(String rawInput, List<TypeReference<Type>> outputParameters) { return decoder.decodeFunctionResult(rawInput, outputParameters); }
@Test public void testDynamicStructOfDynamicStructWithAdditionalParametersReturn() throws ClassNotFoundException { // Return data from 'testInputAndOutput' function of this contract // https://sepolia.etherscan.io/address/0x009C10396226ECFE3E39b3f1AEFa072E37578e30#readContract // struct MyStruct { // uint256 value1; // string value2; // string value3; // MyStruct2 value4; // } // // struct MyStruct2 { // string value1; // string value2; // } // function testInputAndOutput(MyStruct memory struc) external pure // returns(string memory valueBefore, MyStruct memory, string memory valueAfter) { // // return ("valuebefore", mystruc, "valueafter"); // // } String returnedData = "0x000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000000b76616c75656265666f72650000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000001320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000013300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000004313233340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000063078313233340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a76616c7565616674657200000000000000000000000000000000000000000000"; List<TypeReference<?>> myStruct2Types = new ArrayList<>(); List<TypeReference<?>> myStructTypes = new ArrayList<>(); List<TypeReference<?>> myParameters = new ArrayList<>(); myStruct2Types.add(TypeReference.makeTypeReference("string")); myStruct2Types.add(TypeReference.makeTypeReference("string")); myStructTypes.add(TypeReference.makeTypeReference("uint256")); myStructTypes.add(TypeReference.makeTypeReference("string")); myStructTypes.add(TypeReference.makeTypeReference("string")); myStructTypes.add(new TypeReference<DynamicStruct>(false, myStruct2Types) {}); myParameters.add(TypeReference.makeTypeReference("string")); myParameters.add(new TypeReference<DynamicStruct>(false, myStructTypes) {}); myParameters.add(TypeReference.makeTypeReference("string")); List<Type> decodedData = FunctionReturnDecoder.decode(returnedData, Utils.convert(myParameters)); assertEquals(decodedData.get(0).getValue(), "valuebefore"); List<Type> structData = ((DynamicStruct) decodedData.get(1)).getValue(); assertEquals(structData.get(0).getValue(), BigInteger.valueOf(1)); assertEquals(structData.get(1).getValue(), "2"); assertEquals(structData.get(2).getValue(), "3"); List<Type> innerStructData = ((DynamicStruct) structData.get(3)).getValue(); assertEquals(innerStructData.get(0).getValue(), "1234"); assertEquals(innerStructData.get(1).getValue(), "0x1234"); assertEquals(decodedData.get(2).getValue(), "valueafter"); }
@Override public YamlShardingAuditStrategyConfiguration swapToYamlConfiguration(final ShardingAuditStrategyConfiguration data) { YamlShardingAuditStrategyConfiguration result = new YamlShardingAuditStrategyConfiguration(); result.setAuditorNames(new LinkedList<>(data.getAuditorNames())); result.setAllowHintDisable(data.isAllowHintDisable()); return result; }
@Test void assertSwapToYamlConfiguration() { ShardingAuditStrategyConfiguration data = new ShardingAuditStrategyConfiguration(Collections.singletonList("audit_algorithm"), false); YamlShardingAuditStrategyConfigurationSwapper swapper = new YamlShardingAuditStrategyConfigurationSwapper(); YamlShardingAuditStrategyConfiguration actual = swapper.swapToYamlConfiguration(data); assertThat(actual.getAuditorNames(), is(Collections.singletonList("audit_algorithm"))); assertFalse(actual.isAllowHintDisable()); }
@SuppressWarnings("unchecked") public <T> T convert(DocString docString, Type targetType) { if (DocString.class.equals(targetType)) { return (T) docString; } List<DocStringType> docStringTypes = docStringTypeRegistry.lookup(docString.getContentType(), targetType); if (docStringTypes.isEmpty()) { if (docString.getContentType() == null) { throw new CucumberDocStringException(format( "It appears you did not register docstring type for %s", targetType.getTypeName())); } throw new CucumberDocStringException(format( "It appears you did not register docstring type for '%s' or %s", docString.getContentType(), targetType.getTypeName())); } if (docStringTypes.size() > 1) { List<String> suggestedContentTypes = suggestedContentTypes(docStringTypes); if (docString.getContentType() == null) { throw new CucumberDocStringException(format( "Multiple converters found for type %s, add one of the following content types to your docstring %s", targetType.getTypeName(), suggestedContentTypes)); } throw new CucumberDocStringException(format( "Multiple converters found for type %s, and the content type '%s' did not match any of the registered types %s. Change the content type of the docstring or register a docstring type for '%s'", targetType.getTypeName(), docString.getContentType(), suggestedContentTypes, docString.getContentType())); } return (T) docStringTypes.get(0).transform(docString.getContent()); }
@Test void anonymous_to_string_uses_default() { DocString docString = DocString.create("hello world"); assertThat(converter.convert(docString, String.class), is("hello world")); }
public static Optional<PMMLModel> getPMMLModel(String fileName, String modelName, PMMLRuntimeContext pmmlContext) { logger.trace("getPMMLModel {} {}", fileName, modelName); String fileNameToUse = !fileName.endsWith(PMML_SUFFIX) ? fileName + PMML_SUFFIX : fileName; return getPMMLModels(pmmlContext) .stream() .filter(model -> Objects.equals(fileNameToUse, model.getFileName()) && Objects.equals(modelName, model.getName())) .findFirst(); }
@Test void getPMMLModelFromMemoryCLassloader() { PMMLRuntimeContext pmmlContext = getPMMLContext(FILE_NAME, MODEL_NAME, memoryCompilerClassLoader); Optional<PMMLModel> retrieved = PMMLRuntimeHelper.getPMMLModel(FILE_NAME, MODEL_NAME, pmmlContext); assertThat(retrieved).isNotNull().isPresent(); retrieved = PMMLRuntimeHelper.getPMMLModel("FileName", "NoTestMod", pmmlContext); assertThat(retrieved).isNotNull().isNotPresent(); }
List<Token> tokenize() throws ScanException { List<Token> tokenList = new ArrayList<Token>(); StringBuffer buf = new StringBuffer(); while (pointer < patternLength) { char c = pattern.charAt(pointer); pointer++; switch (state) { case LITERAL_STATE: handleLiteralState(c, tokenList, buf); break; case FORMAT_MODIFIER_STATE: handleFormatModifierState(c, tokenList, buf); break; case OPTION_STATE: processOption(c, tokenList, buf); break; case KEYWORD_STATE: handleKeywordState(c, tokenList, buf); break; case RIGHT_PARENTHESIS_STATE: handleRightParenthesisState(c, tokenList, buf); break; default: } } // EOS switch (state) { case LITERAL_STATE: addValuedToken(Token.LITERAL, buf, tokenList); break; case KEYWORD_STATE: tokenList.add(new Token(Token.SIMPLE_KEYWORD, buf.toString())); break; case RIGHT_PARENTHESIS_STATE: tokenList.add(Token.RIGHT_PARENTHESIS_TOKEN); break; case FORMAT_MODIFIER_STATE: case OPTION_STATE: throw new ScanException("Unexpected end of pattern string"); } return tokenList; }
@Test public void compositedKeywordFollowedByOptions() throws ScanException { { List<Token> tl = new TokenStream("%d(A){o}", new AlmostAsIsEscapeUtil()).tokenize(); List<Token> witness = new ArrayList<Token>(); witness.add(Token.PERCENT_TOKEN); witness.add(new Token(Token.COMPOSITE_KEYWORD, "d")); witness.add(new Token(Token.LITERAL, "A")); witness.add(Token.RIGHT_PARENTHESIS_TOKEN); List<String> ol = new ArrayList<String>(); ol.add("o"); witness.add(new Token(Token.OPTION, ol)); assertEquals(witness, tl); } }
@Override public ClusterClientProvider<String> deployApplicationCluster( final ClusterSpecification clusterSpecification, final ApplicationConfiguration applicationConfiguration) throws ClusterDeploymentException { if (client.getService(ExternalServiceDecorator.getExternalServiceName(clusterId)) .isPresent()) { throw new ClusterDeploymentException( "The Flink cluster " + clusterId + " already exists."); } checkNotNull(clusterSpecification); checkNotNull(applicationConfiguration); final KubernetesDeploymentTarget deploymentTarget = KubernetesDeploymentTarget.fromConfig(flinkConfig); if (KubernetesDeploymentTarget.APPLICATION != deploymentTarget) { throw new ClusterDeploymentException( "Couldn't deploy Kubernetes Application Cluster." + " Expected deployment.target=" + KubernetesDeploymentTarget.APPLICATION.getName() + " but actual one was \"" + deploymentTarget + "\""); } applicationConfiguration.applyToConfiguration(flinkConfig); // No need to do pipelineJars validation if it is a PyFlink job. if (!(PackagedProgramUtils.isPython(applicationConfiguration.getApplicationClassName()) || PackagedProgramUtils.isPython(applicationConfiguration.getProgramArguments()))) { final List<URI> pipelineJars = KubernetesUtils.checkJarFileForApplicationMode(flinkConfig); Preconditions.checkArgument(pipelineJars.size() == 1, "Should only have one jar"); } try { artifactUploader.uploadAll(flinkConfig); } catch (Exception ex) { throw new ClusterDeploymentException(ex); } final ClusterClientProvider<String> clusterClientProvider = deployClusterInternal( KubernetesApplicationClusterEntrypoint.class.getName(), clusterSpecification, false); try (ClusterClient<String> clusterClient = clusterClientProvider.getClusterClient()) { LOG.info( "Create flink application cluster {} successfully, JobManager Web Interface: {}", clusterId, clusterClient.getWebInterfaceURL()); } return clusterClientProvider; }
@Test void testDeployApplicationClusterWithClusterAlreadyExists() { flinkConfig.set( PipelineOptions.JARS, Collections.singletonList("local:///path/of/user.jar")); flinkConfig.set(DeploymentOptions.TARGET, KubernetesDeploymentTarget.APPLICATION.getName()); mockExpectedServiceFromServerSide(loadBalancerSvc); assertThatThrownBy( () -> descriptor.deployApplicationCluster(clusterSpecification, appConfig)) .satisfies( cause -> assertThat(cause) .isInstanceOf(ClusterDeploymentException.class) .hasMessageContaining( "The Flink cluster " + CLUSTER_ID + " already exists.")); }
public Optional<DoFn.ProcessContinuation> run( PartitionRecord partitionRecord, ChangeStreamRecord record, RestrictionTracker<StreamProgress, StreamProgress> tracker, DoFn.OutputReceiver<KV<ByteString, ChangeStreamRecord>> receiver, ManualWatermarkEstimator<Instant> watermarkEstimator, BytesThroughputEstimator<KV<ByteString, ChangeStreamRecord>> throughputEstimator) { if (record instanceof Heartbeat) { Heartbeat heartbeat = (Heartbeat) record; final Instant watermark = toJodaTime(heartbeat.getEstimatedLowWatermark()); // These will be filtered so the key doesn't really matter but the most logical thing to // key a heartbeat by is the partition it corresponds to. ByteString heartbeatKey = Range.ByteStringRange.serializeToByteString(partitionRecord.getPartition()); KV<ByteString, ChangeStreamRecord> outputRecord = KV.of(heartbeatKey, heartbeat); throughputEstimator.update(Instant.now(), outputRecord); StreamProgress streamProgress = new StreamProgress( heartbeat.getChangeStreamContinuationToken(), watermark, throughputEstimator.get(), Instant.now(), true); watermarkEstimator.setWatermark(watermark); // If the tracker fail to claim the streamProgress, it most likely means the runner initiated // a checkpoint. See {@link // org.apache.beam.sdk.io.gcp.bigtable.changestreams.restriction.ReadChangeStreamPartitionProgressTracker} // for more information regarding runner initiated checkpoints. if (!tracker.tryClaim(streamProgress)) { return Optional.of(DoFn.ProcessContinuation.stop()); } metrics.incHeartbeatCount(); // We output heartbeats so that they are factored into throughput and can be used to // autoscale. These will be filtered in a downstream step and never returned to users. This is // to prevent autoscaler from scaling down when we have large tables with no throughput but // we need enough workers to keep up with heartbeats. // We are outputting elements with timestamp of 0 to prevent reliance on event time. This // limits the ability to window on commit time of any data changes. It is still possible to // window on processing time. receiver.outputWithTimestamp(outputRecord, Instant.EPOCH); } else if (record instanceof CloseStream) { CloseStream closeStream = (CloseStream) record; StreamProgress streamProgress = new StreamProgress(closeStream); // If the tracker fail to claim the streamProgress, it most likely means the runner initiated // a checkpoint. See {@link // org.apache.beam.sdk.io.gcp.bigtable.changestreams.restriction.ReadChangeStreamPartitionProgressTracker} // for more information regarding runner initiated checkpoints. if (!tracker.tryClaim(streamProgress)) { return Optional.of(DoFn.ProcessContinuation.stop()); } metrics.incClosestreamCount(); return Optional.of(DoFn.ProcessContinuation.resume()); } else if (record instanceof ChangeStreamMutation) { ChangeStreamMutation changeStreamMutation = (ChangeStreamMutation) record; final Instant watermark = toJodaTime(changeStreamMutation.getEstimatedLowWatermark()); watermarkEstimator.setWatermark(watermark); // Build a new StreamProgress with the continuation token to be claimed. ChangeStreamContinuationToken changeStreamContinuationToken = ChangeStreamContinuationToken.create( Range.ByteStringRange.create( partitionRecord.getPartition().getStart(), partitionRecord.getPartition().getEnd()), changeStreamMutation.getToken()); KV<ByteString, ChangeStreamRecord> outputRecord = KV.of(changeStreamMutation.getRowKey(), changeStreamMutation); throughputEstimator.update(Instant.now(), outputRecord); StreamProgress streamProgress = new StreamProgress( changeStreamContinuationToken, watermark, throughputEstimator.get(), Instant.now(), false); // If the tracker fail to claim the streamProgress, it most likely means the runner initiated // a checkpoint. See ReadChangeStreamPartitionProgressTracker for more information regarding // runner initiated checkpoints. if (!tracker.tryClaim(streamProgress)) { return Optional.of(DoFn.ProcessContinuation.stop()); } if (changeStreamMutation.getType() == ChangeStreamMutation.MutationType.GARBAGE_COLLECTION) { metrics.incChangeStreamMutationGcCounter(); } else if (changeStreamMutation.getType() == ChangeStreamMutation.MutationType.USER) { metrics.incChangeStreamMutationUserCounter(); } Instant delay = toJodaTime(changeStreamMutation.getCommitTimestamp()); metrics.updateProcessingDelayFromCommitTimestamp( Instant.now().getMillis() - delay.getMillis()); // We are outputting elements with timestamp of 0 to prevent reliance on event time. This // limits the ability to window on commit time of any data changes. It is still possible to // window on processing time. receiver.outputWithTimestamp(outputRecord, Instant.EPOCH); } else { LOG.warn( "RCSP {}: Invalid response type", formatByteStringRange(partitionRecord.getPartition())); } return Optional.empty(); }
@Test public void testCloseStreamResume() { ChangeStreamContinuationToken changeStreamContinuationToken = ChangeStreamContinuationToken.create(ByteStringRange.create("a", "b"), "1234"); CloseStream mockCloseStream = Mockito.mock(CloseStream.class); Status statusProto = Status.newBuilder().setCode(11).build(); Mockito.when(mockCloseStream.getStatus()) .thenReturn(com.google.cloud.bigtable.common.Status.fromProto(statusProto)); Mockito.when(mockCloseStream.getChangeStreamContinuationTokens()) .thenReturn(Collections.singletonList(changeStreamContinuationToken)); final Optional<DoFn.ProcessContinuation> result = action.run( partitionRecord, mockCloseStream, tracker, receiver, watermarkEstimator, throughputEstimator); assertTrue(result.isPresent()); assertEquals(DoFn.ProcessContinuation.resume(), result.get()); verify(metrics).incClosestreamCount(); StreamProgress streamProgress = new StreamProgress(mockCloseStream); verify(tracker).tryClaim(eq(streamProgress)); }
public static <T> T readJsonSR( @Nonnull final byte[] jsonWithMagic, final ObjectMapper mapper, final Class<? extends T> clazz ) throws IOException { if (!hasMagicByte(jsonWithMagic)) { // don't log contents of jsonWithMagic to avoid leaking data into the logs throw new KsqlException( "Got unexpected JSON serialization format that did not start with the magic byte. If " + "this stream was not serialized using the JsonSchemaConverter, then make sure " + "the stream is declared with JSON format (not JSON_SR)."); } return mapper.readValue( jsonWithMagic, SIZE_OF_SR_PREFIX, jsonWithMagic.length - SIZE_OF_SR_PREFIX, clazz ); }
@Test public void shouldSetCorrectOffsetWithMagicByte() throws IOException { // Given: byte[] json = new byte[]{/* magic */ 0x00, /* id */ 0x00, 0x00, 0x00, 0x01, /* data */ 0x01}; // When: JsonSerdeUtils.readJsonSR(json, mapper, Object.class); // Then: Mockito.verify(mapper, Mockito.times(1)).readValue(json, 5, 1, Object.class); }
public SingleValueHeaders getHeaders() { return headers; }
@Test void deserialize_singleValuedHeaders() throws IOException { AwsProxyRequest req = new AwsProxyRequestBuilder().fromJsonString(getSingleValueRequestJson()).build(); assertThat(req.getHeaders().get("accept"), is("*")); }
public static Resource subtract(Resource lhs, Resource rhs) { return subtractFrom(clone(lhs), rhs); }
@Test void testSubtract() { assertEquals(createResource(1, 0), subtract(createResource(2, 1), createResource(1, 1))); assertEquals(createResource(0, 1), subtract(createResource(1, 2), createResource(1, 1))); assertEquals(createResource(2, 2, 0), subtract(createResource(3, 3, 0), createResource(1, 1, 0))); assertEquals(createResource(1, 1, 2), subtract(createResource(2, 2, 3), createResource(1, 1, 1))); }
protected abstract SchemaTransform from(ConfigT configuration);
@Test public void testFrom() { SchemaTransformProvider provider = new FakeTypedSchemaIOProvider(); SchemaTransformProvider minimalProvider = new FakeMinimalTypedProvider(); Row inputConfig = Row.withSchema(provider.configurationSchema()) .withFieldValue("string_field", "field1") .withFieldValue("integer_field", Integer.valueOf(13)) .build(); Configuration outputConfig = ((FakeSchemaTransform) provider.from(inputConfig)).config; Configuration minimalOutputConfig = ((FakeSchemaTransform) minimalProvider.from(inputConfig)).config; for (Configuration config : Arrays.asList(outputConfig, minimalOutputConfig)) { assertEquals("field1", config.getStringField()); assertEquals(13, config.getIntegerField().intValue()); } assertEquals("Description of fake provider", provider.description()); }
@Override public Configuration toConfiguration(CommandLine commandLine) throws FlinkException { final Configuration resultingConfiguration = super.toConfiguration(commandLine); if (commandLine.hasOption(addressOption.getOpt())) { String addressWithPort = commandLine.getOptionValue(addressOption.getOpt()); InetSocketAddress jobManagerAddress = NetUtils.parseHostPortAddress(addressWithPort); setJobManagerAddressInConfig(resultingConfiguration, jobManagerAddress); URL url = NetUtils.getCorrectHostnamePort(addressWithPort); resultingConfiguration.set(RestOptions.PATH, url.getPath()); resultingConfiguration.set(SecurityOptions.SSL_REST_ENABLED, isHttpsProtocol(url)); } resultingConfiguration.set(DeploymentOptions.TARGET, RemoteExecutor.NAME); DynamicPropertiesUtil.encodeDynamicProperties(commandLine, resultingConfiguration); return resultingConfiguration; }
@Test void testDynamicPropertyMaterialization() throws Exception { final String[] args = { "-D" + PipelineOptions.AUTO_WATERMARK_INTERVAL.key() + "=42", "-D" + PipelineOptions.AUTO_GENERATE_UIDS.key() + "=true" }; final AbstractCustomCommandLine defaultCLI = new DefaultCLI(); final CommandLine commandLine = defaultCLI.parseCommandLineOptions(args, false); Configuration configuration = defaultCLI.toConfiguration(commandLine); assertThat(configuration.get(PipelineOptions.AUTO_WATERMARK_INTERVAL)) .isEqualTo(Duration.ofMillis(42L)); assertThat(configuration.get(PipelineOptions.AUTO_GENERATE_UIDS)).isTrue(); }
@Udf public String elt( @UdfParameter(description = "the nth element to extract") final int n, @UdfParameter(description = "the strings of which to extract the nth") final String... args ) { if (args == null) { return null; } if (n < 1 || n > args.length) { return null; } return args[n - 1]; }
@Test public void shouldReturnNullIfNIsLessThanOne() { // When: final String el = elt.elt(0, "a", "b"); // Then: assertThat(el, is(nullValue())); }
@Override public boolean betterThan(Num criterionValue1, Num criterionValue2) { return criterionValue1.isGreaterThan(criterionValue2); }
@Test public void betterThan() { AnalysisCriterion criterion = getCriterion(); assertTrue(criterion.betterThan(numOf(2.0), numOf(1.5))); assertFalse(criterion.betterThan(numOf(1.5), numOf(2.0))); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { return this.list(directory, listener, String.valueOf(Path.DELIMITER)); }
@Test public void testListPlaceholderPlusCharacter() throws Exception { final Path container = new Path("cyberduck-test-eu", EnumSet.of(Path.Type.directory, Path.Type.volume)); container.attributes().setRegion("us-east-1"); final Path directory = new GoogleStorageDirectoryFeature(session).mkdir( new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final Path placeholder = new GoogleStorageDirectoryFeature(session).mkdir( new Path(directory, String.format("test+%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory)), new TransferStatus()); assertTrue(new GoogleStorageObjectListService(session).list(directory, new DisabledListProgressListener()).contains(placeholder)); assertTrue(new GoogleStorageObjectListService(session).list(placeholder, new DisabledListProgressListener()).isEmpty()); new GoogleStorageDeleteFeature(session).delete(Arrays.asList(placeholder, directory), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static StsConfig getInstance() { return Singleton.INSTANCE; }
@Test void testGetInstance() { StsConfig instance1 = StsConfig.getInstance(); StsConfig instance2 = StsConfig.getInstance(); assertEquals(instance1, instance2); }
static BlockStmt getParameterFieldVariableDeclaration(final String variableName, final ParameterField parameterField) { final MethodDeclaration methodDeclaration = PARAMETER_FIELD_TEMPLATE.getMethodsByName(GEKIEPMMLPARAMETERFIELD).get(0).clone(); final BlockStmt toReturn = methodDeclaration.getBody().orElseThrow(() -> new KiePMMLException(String.format(MISSING_BODY_TEMPLATE, methodDeclaration))); final VariableDeclarator variableDeclarator = getVariableDeclarator(toReturn, PARAMETER_FIELD) .orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_IN_BODY, PARAMETER_FIELD, toReturn))); variableDeclarator.setName(variableName); final MethodCallExpr initializer = variableDeclarator.getInitializer() .orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_INITIALIZER_TEMPLATE, PARAMETER_FIELD, toReturn))) .asMethodCallExpr(); final MethodCallExpr builder = getChainedMethodCallExprFrom("builder", initializer); builder.setArgument(0, new StringLiteralExpr(parameterField.getName())); final Expression dataTypeExpression = getExpressionForDataType(parameterField.getDataType()); final Expression opTypeExpression = getExpressionForOpType(parameterField.getOpType()); getChainedMethodCallExprFrom("withDataType", initializer).setArgument(0, dataTypeExpression); getChainedMethodCallExprFrom("withOpType", initializer).setArgument(0, opTypeExpression); getChainedMethodCallExprFrom("withDisplayName", initializer).setArgument(0, getExpressionForObject(parameterField.getDisplayName())); return toReturn; }
@Test void getParameterFieldVariableDeclaration() throws IOException { String variableName = "variableName"; ParameterField parameterField = new ParameterField(variableName); parameterField.setDataType(DataType.DOUBLE); parameterField.setOpType(OpType.CONTINUOUS); parameterField.setDisplayName("displayName"); String dataType = getDATA_TYPEString(parameterField.getDataType()); String opType = getOP_TYPEString(parameterField.getOpType()); BlockStmt retrieved = KiePMMLParameterFieldFactory.getParameterFieldVariableDeclaration(variableName, parameterField); String text = getFileContent(TEST_01_SOURCE); Statement expected = JavaParserUtils.parseBlock(String.format(text, variableName, dataType, opType, parameterField.getDisplayName())); assertThat(JavaParserUtils.equalsNode(expected, retrieved)).isTrue(); List<Class<?>> imports = Arrays.asList(KiePMMLParameterField.class, Collections.class); commonValidateCompilationWithImports(retrieved, imports); }
@Operation(summary = "list", description = "list all latest configurations") @GetMapping("/latest") public ResponseEntity<List<ServiceConfigVO>> latest(@PathVariable Long clusterId) { return ResponseEntity.success(configService.latest(clusterId)); }
@Test void latestReturnsEmptyForInvalidClusterId() { Long clusterId = 999L; when(configService.latest(clusterId)).thenReturn(List.of()); ResponseEntity<List<ServiceConfigVO>> response = configController.latest(clusterId); assertTrue(response.isSuccess()); assertTrue(response.getData().isEmpty()); }
@Override public void checkOutputSpecs(JobContext context) throws IOException { Configuration conf = context.getConfiguration(); if (getCommitDirectory(conf) == null) { throw new IllegalStateException("Commit directory not configured"); } Path workingPath = getWorkingDirectory(conf); if (workingPath == null) { throw new IllegalStateException("Working directory not configured"); } // get delegation token for outDir's file system TokenCache.obtainTokensForNamenodes(context.getCredentials(), new Path[] {workingPath}, conf); }
@Test public void testCheckOutputSpecs() { try { OutputFormat outputFormat = new CopyOutputFormat(); Job job = Job.getInstance(new Configuration()); JobID jobID = new JobID("200707121733", 1); try { JobContext context = new JobContextImpl(job.getConfiguration(), jobID); outputFormat.checkOutputSpecs(context); Assert.fail("No checking for invalid work/commit path"); } catch (IllegalStateException ignore) { } CopyOutputFormat.setWorkingDirectory(job, new Path("/tmp/work")); try { JobContext context = new JobContextImpl(job.getConfiguration(), jobID); outputFormat.checkOutputSpecs(context); Assert.fail("No checking for invalid commit path"); } catch (IllegalStateException ignore) { } job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, ""); CopyOutputFormat.setCommitDirectory(job, new Path("/tmp/commit")); try { JobContext context = new JobContextImpl(job.getConfiguration(), jobID); outputFormat.checkOutputSpecs(context); Assert.fail("No checking for invalid work path"); } catch (IllegalStateException ignore) { } CopyOutputFormat.setWorkingDirectory(job, new Path("/tmp/work")); CopyOutputFormat.setCommitDirectory(job, new Path("/tmp/commit")); try { JobContext context = new JobContextImpl(job.getConfiguration(), jobID); outputFormat.checkOutputSpecs(context); } catch (IllegalStateException ignore) { Assert.fail("Output spec check failed."); } } catch (IOException e) { LOG.error("Exception encountered while testing checkoutput specs", e); Assert.fail("Checkoutput Spec failure"); } catch (InterruptedException e) { LOG.error("Exception encountered while testing checkoutput specs", e); Assert.fail("Checkoutput Spec failure"); } }
public String borrow() { return String.format("Borrower %s wants to get some money.", name); }
@Test void borrowTest() { var borrowerRole = new BorrowerRole(); borrowerRole.setName("test"); assertEquals("Borrower test wants to get some money.", borrowerRole.borrow()); }
public static NodesInfo deleteDuplicateNodesInfo(ArrayList<NodeInfo> nodes) { NodesInfo nodesInfo = new NodesInfo(); Map<String, NodeInfo> nodesMap = new LinkedHashMap<>(); for (NodeInfo node : nodes) { String nodeId = node.getNodeId(); // If the node already exists, it could be an old instance if (nodesMap.containsKey(nodeId)) { // Check if the node is an old instance if (nodesMap.get(nodeId).getLastHealthUpdate() < node .getLastHealthUpdate()) { nodesMap.put(node.getNodeId(), node); } } else { nodesMap.put(node.getNodeId(), node); } } nodesInfo.addAll(new ArrayList<>(nodesMap.values())); return nodesInfo; }
@Test public void testDeleteDuplicateNodes() { NodesInfo nodes = new NodesInfo(); NodeInfo node1 = new NodeInfo(); node1.setId(NODE1); node1.setLastHealthUpdate(0); nodes.add(node1); NodeInfo node2 = new NodeInfo(); node2.setId(NODE1); node2.setLastHealthUpdate(1); nodes.add(node2); NodeInfo node3 = new NodeInfo(); node3.setId(NODE1); node3.setLastHealthUpdate(2); nodes.add(node3); NodesInfo result = RouterWebServiceUtil.deleteDuplicateNodesInfo(nodes.getNodes()); Assert.assertNotNull(result); Assert.assertEquals(1, result.getNodes().size()); NodeInfo node = result.getNodes().get(0); Assert.assertEquals(NODE1, node.getNodeId()); Assert.assertEquals(2, node.getLastHealthUpdate()); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() < 1) { onInvalidDataReceived(device, data); return; } int offset = 0; while (offset < data.size()) { // Packet size final int size = data.getIntValue(Data.FORMAT_UINT8, offset); if (size < 6 || offset + size > data.size()) { onInvalidDataReceived(device, data); return; } // Flags final int flags = data.getIntValue(Data.FORMAT_UINT8, offset + 1); final boolean cgmTrendInformationPresent = (flags & 0x01) != 0; final boolean cgmQualityInformationPresent = (flags & 0x02) != 0; final boolean sensorWarningOctetPresent = (flags & 0x20) != 0; final boolean sensorCalTempOctetPresent = (flags & 0x40) != 0; final boolean sensorStatusOctetPresent = (flags & 0x80) != 0; final int dataSize = 6 + (cgmTrendInformationPresent ? 2 : 0) + (cgmQualityInformationPresent ? 2 : 0) + (sensorWarningOctetPresent ? 1 : 0) + (sensorCalTempOctetPresent ? 1 : 0) + (sensorStatusOctetPresent ? 1 : 0); if (size != dataSize && size != dataSize + 2) { onInvalidDataReceived(device, data); return; } final boolean crcPresent = size == dataSize + 2; if (crcPresent) { final int expectedCrc = data.getIntValue(Data.FORMAT_UINT16_LE, offset + dataSize); final int actualCrc = CRC16.MCRF4XX(data.getValue(), offset, dataSize); if (expectedCrc != actualCrc) { onContinuousGlucoseMeasurementReceivedWithCrcError(device, data); return; } } offset += 2; // Glucose concentration final float glucoseConcentration = data.getFloatValue(Data.FORMAT_SFLOAT, offset); offset += 2; // Time offset (in minutes since Session Start) final int timeOffset = data.getIntValue(Data.FORMAT_UINT16_LE, offset); offset += 2; // Sensor Status Annunciation int warningStatus = 0; int calibrationTempStatus = 0; int sensorStatus = 0; CGMStatus status = null; if (sensorWarningOctetPresent) { warningStatus = data.getIntValue(Data.FORMAT_UINT8, offset++); } if (sensorCalTempOctetPresent) { calibrationTempStatus = data.getIntValue(Data.FORMAT_UINT8, offset++); } if (sensorStatusOctetPresent) { sensorStatus = data.getIntValue(Data.FORMAT_UINT8, offset++); } if (sensorWarningOctetPresent || sensorCalTempOctetPresent || sensorStatusOctetPresent) { status = new CGMStatus(warningStatus, calibrationTempStatus, sensorStatus); } // CGM Trend Information Float trend = null; if (cgmTrendInformationPresent) { trend = data.getFloatValue(Data.FORMAT_SFLOAT, offset); offset += 2; } // CGM Quality Information Float quality = null; if (cgmQualityInformationPresent) { quality = data.getFloatValue(Data.FORMAT_SFLOAT, offset); offset += 2; } // E2E-CRC if (crcPresent) { offset += 2; } onContinuousGlucoseMeasurementReceived(device, glucoseConcentration, trend, quality, status, timeOffset, crcPresent); } }
@Test public void onContinuousGlucoseMeasurementReceived_crcError() { final DataReceivedCallback callback = new ContinuousGlucoseMeasurementDataCallback() { @Override public void onContinuousGlucoseMeasurementReceived(@NonNull final BluetoothDevice device, final float glucoseConcentration, @Nullable final Float cgmTrend, @Nullable final Float cgmQuality, final CGMStatus status, final int timeOffset, final boolean secured) { assertEquals("Measurement reported despite wrong CRC", 1, 2); } @Override public void onContinuousGlucoseMeasurementReceivedWithCrcError(@NonNull final BluetoothDevice device, @NonNull final Data data) { final int actualCrc = CRC16.MCRF4XX(data.getValue(), 0, 6); assertEquals("CRC error", 0x6F59, actualCrc); } @Override public void onInvalidDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { assertEquals("Correct data reported as invalid", 1, 2); } }; final MutableData data = new MutableData(new byte[8]); // Size assertTrue(data.setValue(8, Data.FORMAT_UINT8, 0)); // Flags assertTrue(data.setByte(0b00000000, 1)); // Glucose Concentration assertTrue(data.setValue(12, 1, Data.FORMAT_SFLOAT, 2)); // Time offset assertTrue(data.setValue(6, Data.FORMAT_UINT16_LE, 4)); // E2E CRC assertTrue(data.setValue(0x6F58, Data.FORMAT_UINT16_LE, 6)); callback.onDataReceived(null, data); }
protected boolean databaseForBothDbInterfacesIsTheSame( DatabaseInterface primary, DatabaseInterface secondary ) { if ( primary == null || secondary == null ) { throw new IllegalArgumentException( "DatabaseInterface shouldn't be null!" ); } if ( primary.getPluginId() == null || secondary.getPluginId() == null ) { return false; } if ( primary.getPluginId().equals( secondary.getPluginId() ) ) { return true; } return primary.getClass().isAssignableFrom( secondary.getClass() ); }
@Test public void databases_WithDifferentDbConnTypes_AreDifferent_IfNonOfThemIsSubsetOfAnother() { DatabaseInterface mssqlServerDatabaseMeta = new MSSQLServerDatabaseMeta(); mssqlServerDatabaseMeta.setPluginId( "MSSQL" ); DatabaseInterface oracleDatabaseMeta = new OracleDatabaseMeta(); oracleDatabaseMeta.setPluginId( "ORACLE" ); assertFalse( databaseMeta.databaseForBothDbInterfacesIsTheSame( mssqlServerDatabaseMeta, oracleDatabaseMeta ) ); }
public static byte[] deriveMac(byte[] seed, byte[] nonce) { final MessageDigest md = DigestUtils.digest("SHA-256"); md.update(seed); if (nonce != null) md.update(nonce); md.update(new byte[] {0, 0, 0, 2}); return Arrays.copyOfRange(md.digest(), 0, 32); }
@Test public void shouldDeriveMacKey() { assertEquals( "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", ByteArrayUtils.prettyHex(AESSecureMessaging.deriveMac(Hex.decode("CA"), null)) ); }
@Override protected int command() { if (!validateConfigFilePresent()) { return 1; } final MigrationConfig config; try { config = MigrationConfig.load(getConfigFile()); } catch (KsqlException | MigrationException e) { LOGGER.error(e.getMessage()); return 1; } return command( config, MigrationsUtil::getKsqlClient, getMigrationsDir(getConfigFile(), config) ); }
@Test public void shouldValidateNoMigrations() throws Exception { // Given: final List<String> versions = ImmutableList.of(); final List<String> checksums = givenExistingMigrationFiles(versions); givenAppliedMigrations(versions, checksums); // When: final int result = command.command(config, cfg -> ksqlClient, migrationsDir); // Then: assertThat(result, is(0)); verifyClientCallsForVersions(versions); }
public static boolean containsLocalIp(List<InetSocketAddress> clusterAddresses, AlluxioConfiguration conf) { String localAddressIp = getLocalIpAddress((int) conf.getMs(PropertyKey .NETWORK_HOST_RESOLUTION_TIMEOUT_MS)); for (InetSocketAddress addr : clusterAddresses) { String clusterNodeIp; try { clusterNodeIp = InetAddress.getByName(addr.getHostName()).getHostAddress(); if (clusterNodeIp.equals(localAddressIp)) { return true; } } catch (UnknownHostException e) { LOG.error("Get raft cluster node ip by hostname({}) failed", addr.getHostName(), e); } } return false; }
@Test public void TestisNotLocalAddress() { List<InetSocketAddress> clusterAddresses = new ArrayList<>(); InetSocketAddress raftNodeAddress1 = new InetSocketAddress("host1", 10); InetSocketAddress raftNodeAddress2 = new InetSocketAddress("host2", 20); InetSocketAddress raftNodeAddress3 = new InetSocketAddress("host3", 30); clusterAddresses.add(raftNodeAddress1); clusterAddresses.add(raftNodeAddress2); clusterAddresses.add(raftNodeAddress3); assertFalse(NetworkAddressUtils.containsLocalIp(clusterAddresses, mConfiguration)); }
@Override @PublicAPI(usage = ACCESS) public boolean isMetaAnnotatedWith(Class<? extends Annotation> annotationType) { return isMetaAnnotatedWith(annotationType.getName()); }
@Test public void isMetaAnnotatedWith_type_on_resolved_target() { JavaClasses classes = importClassesWithContext(Origin.class, Target.class, QueriedAnnotation.class); JavaCall<?> call = simulateCall().from(classes.get(Origin.class), "call").to(classes.get(Target.class).getMethod("called")); assertThat(call.getTarget().isMetaAnnotatedWith(QueriedAnnotation.class)) .as("target is meta-annotated with @" + QueriedAnnotation.class.getSimpleName()) .isTrue(); assertThat(call.getTarget().isMetaAnnotatedWith(Retention.class)) .as("target is meta-annotated with @" + Retention.class.getSimpleName()) .isTrue(); assertThat(call.getTarget().isMetaAnnotatedWith(Deprecated.class)) .as("target is meta-annotated with @" + Deprecated.class.getSimpleName()) .isFalse(); }
public static String format(String source, Object... parameters) { String current = source; for (Object parameter : parameters) { if (!current.contains("{}")) { return current; } current = current.replaceFirst("\\{\\}", String.valueOf(parameter)); } return current; }
@Test public void testFormatNull() { String fmt = "Some string {} 2 {}"; assertEquals("Some string 1 2 null", format(fmt, 1, null)); }
public String getShardIterator( final String streamName, final String shardId, final ShardIteratorType shardIteratorType, final String startingSequenceNumber, final Instant timestamp) throws TransientKinesisException { final Date date = timestamp != null ? timestamp.toDate() : null; return wrapExceptions( () -> kinesis .getShardIterator( new GetShardIteratorRequest() .withStreamName(streamName) .withShardId(shardId) .withShardIteratorType(shardIteratorType) .withStartingSequenceNumber(startingSequenceNumber) .withTimestamp(date)) .getShardIterator()); }
@Test public void shouldReturnIteratorStartingWithSequenceNumber() throws Exception { when(kinesis.getShardIterator( new GetShardIteratorRequest() .withStreamName(STREAM) .withShardId(SHARD_1) .withShardIteratorType(ShardIteratorType.AT_SEQUENCE_NUMBER) .withStartingSequenceNumber(SEQUENCE_NUMBER))) .thenReturn(new GetShardIteratorResult().withShardIterator(SHARD_ITERATOR)); String stream = underTest.getShardIterator( STREAM, SHARD_1, ShardIteratorType.AT_SEQUENCE_NUMBER, SEQUENCE_NUMBER, null); assertThat(stream).isEqualTo(SHARD_ITERATOR); }
public static Ip4Address makeMaskPrefix(int prefixLength) { byte[] mask = IpAddress.makeMaskPrefixArray(VERSION, prefixLength); return new Ip4Address(mask); }
@Test(expected = IllegalArgumentException.class) public void testInvalidMakeNegativeMaskPrefixIPv4() { Ip4Address ipAddress; ipAddress = Ip4Address.makeMaskPrefix(-1); }
@Override public MessageQueueView getCurrentMessageQueueView(ProxyContext ctx, String topicName) throws Exception { return getAllMessageQueueView(ctx, topicName); }
@Test public void testGetCurrentMessageQueueView() throws Throwable { ProxyContext ctx = ProxyContext.create(); MQClientException exception = catchThrowableOfType(() -> this.topicRouteService.getCurrentMessageQueueView(ctx, ERR_TOPIC), MQClientException.class); assertTrue(TopicRouteHelper.isTopicNotExistError(exception)); assertEquals(1, this.topicRouteService.topicCache.asMap().size()); assertNotNull(this.topicRouteService.getCurrentMessageQueueView(ctx, TOPIC)); assertEquals(2, this.topicRouteService.topicCache.asMap().size()); }
void runOnce() { if (transactionManager != null) { try { transactionManager.maybeResolveSequences(); RuntimeException lastError = transactionManager.lastError(); // do not continue sending if the transaction manager is in a failed state if (transactionManager.hasFatalError()) { if (lastError != null) maybeAbortBatches(lastError); client.poll(retryBackoffMs, time.milliseconds()); return; } if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) { return; } // Check whether we need a new producerId. If so, we will enqueue an InitProducerId // request which will be sent below transactionManager.bumpIdempotentEpochAndResetIdIfNeeded(); if (maybeSendAndPollTransactionalRequest()) { return; } } catch (AuthenticationException e) { // This is already logged as error, but propagated here to perform any clean ups. log.trace("Authentication exception while processing transactional request", e); transactionManager.authenticationFailed(e); } } long currentTimeMs = time.milliseconds(); long pollTimeout = sendProducerData(currentTimeMs); client.poll(pollTimeout, currentTimeMs); }
@Test public void testExpiryOfUnsentBatchesShouldNotCauseUnresolvedSequences() throws Exception { final long producerId = 343434L; TransactionManager transactionManager = createTransactionManager(); setupWithTransactionState(transactionManager); prepareAndReceiveInitProducerId(producerId, Errors.NONE); assertTrue(transactionManager.hasProducerId()); assertEquals(0, transactionManager.sequenceNumber(tp0)); // Send first ProduceRequest Future<RecordMetadata> request1 = appendToAccumulator(tp0, 0L, "key", "value"); Node node = metadata.fetch().nodes().get(0); time.sleep(10000L); client.disconnect(node.idString()); client.backoff(node, 10); sender.runOnce(); assertFutureFailure(request1, TimeoutException.class); assertFalse(transactionManager.hasUnresolvedSequence(tp0)); }
@Override public StreamDataDecoderResult decode(StreamMessage message) { assert message.getValue() != null; try { _reuse.clear(); GenericRow row = _valueDecoder.decode(message.getValue(), 0, message.getLength(), _reuse); if (row != null) { if (message.getKey() != null) { row.putValue(KEY, new String(message.getKey(), StandardCharsets.UTF_8)); } StreamMessageMetadata metadata = message.getMetadata(); if (metadata != null) { if (metadata.getHeaders() != null) { metadata.getHeaders().getFieldToValueMap() .forEach((key, value) -> row.putValue(HEADER_KEY_PREFIX + key, value)); } if (metadata.getRecordMetadata() != null) { metadata.getRecordMetadata().forEach((key, value) -> row.putValue(METADATA_KEY_PREFIX + key, value)); } } return new StreamDataDecoderResult(row, null); } else { return new StreamDataDecoderResult(null, new RuntimeException("Encountered unknown exception when decoding a Stream message")); } } catch (Exception e) { LOGGER.error("Failed to decode StreamMessage", e); return new StreamDataDecoderResult(null, e); } }
@Test public void testDecodeValueOnly() throws Exception { TestDecoder messageDecoder = new TestDecoder(); messageDecoder.init(Collections.emptyMap(), ImmutableSet.of(NAME_FIELD), ""); String value = "Alice"; BytesStreamMessage message = new BytesStreamMessage(value.getBytes(StandardCharsets.UTF_8)); StreamDataDecoderResult result = new StreamDataDecoderImpl(messageDecoder).decode(message); Assert.assertNotNull(result); Assert.assertNull(result.getException()); Assert.assertNotNull(result.getResult()); GenericRow row = result.getResult(); Assert.assertEquals(row.getFieldToValueMap().size(), 1); Assert.assertEquals(String.valueOf(row.getValue(NAME_FIELD)), value); }
@Override public SlotAssignmentResult ensure(long key1, int key2) { return super.ensure0(key1, key2); }
@Test public void testPut() { final long key1 = randomKey(); final int key2 = randomKey(); SlotAssignmentResult slot = insert(key1, key2); final long valueAddress = slot.address(); assertTrue(slot.isNew()); slot = hsa.ensure(key1, key2); assertFalse(slot.isNew()); assertEquals(valueAddress, slot.address()); }
public CoordinatorResult<OffsetCommitResponseData, CoordinatorRecord> commitOffset( RequestContext context, OffsetCommitRequestData request ) throws ApiException { Group group = validateOffsetCommit(context, request); // In the old consumer group protocol, the offset commits maintain the session if // the group is in Stable or PreparingRebalance state. if (group.type() == Group.GroupType.CLASSIC) { ClassicGroup classicGroup = (ClassicGroup) group; if (classicGroup.isInState(ClassicGroupState.STABLE) || classicGroup.isInState(ClassicGroupState.PREPARING_REBALANCE)) { groupMetadataManager.rescheduleClassicGroupMemberHeartbeat( classicGroup, classicGroup.member(request.memberId()) ); } } final OffsetCommitResponseData response = new OffsetCommitResponseData(); final List<CoordinatorRecord> records = new ArrayList<>(); final long currentTimeMs = time.milliseconds(); final OptionalLong expireTimestampMs = expireTimestampMs(request.retentionTimeMs(), currentTimeMs); request.topics().forEach(topic -> { final OffsetCommitResponseTopic topicResponse = new OffsetCommitResponseTopic().setName(topic.name()); response.topics().add(topicResponse); topic.partitions().forEach(partition -> { if (isMetadataInvalid(partition.committedMetadata())) { topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code())); } else { log.debug("[GroupId {}] Committing offsets {} for partition {}-{} from member {} with leader epoch {}.", request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(), request.memberId(), partition.committedLeaderEpoch()); topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.NONE.code())); final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest( partition, currentTimeMs, expireTimestampMs ); records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord( request.groupId(), topic.name(), partition.partitionIndex(), offsetAndMetadata, metadataImage.features().metadataVersion() )); } }); }); if (!records.isEmpty()) { metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size()); } return new CoordinatorResult<>(records, response); }
@Test public void testConsumerGroupOffsetDelete() { OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build(); ConsumerGroup group = context.groupMetadataManager.getOrMaybeCreatePersistedConsumerGroup( "foo", true ); context.commitOffset("foo", "bar", 0, 100L, 0); assertFalse(group.isSubscribedToTopic("bar")); context.testOffsetDeleteWith("foo", "bar", 0, Errors.NONE); }
public HollowOrdinalIterator findKeysWithPrefix(String prefix) { TST current; HollowOrdinalIterator it; do { current = prefixIndexVolatile; it = current.findKeysWithPrefix(prefix); } while (current != this.prefixIndexVolatile); return it; }
@Test public void testMovieActorMapReference() throws Exception { Map<Integer, Actor> idActorMap = new HashMap<>(); idActorMap.put(1, new Actor("Keanu Reeves")); idActorMap.put(2, new Actor("Laurence Fishburne")); idActorMap.put(3, new Actor("Carrie-Anne Moss")); MovieActorMapReference movieActorMapReference = new MovieActorMapReference(1, 1999, "The Matrix", idActorMap); objectMapper.add(movieActorMapReference); StateEngineRoundTripper.roundTripSnapshot(writeStateEngine, readStateEngine); HollowPrefixIndex prefixIndex = new HollowPrefixIndex(readStateEngine, "MovieActorMapReference", "idActorNameMap.value"); Set<Integer> ordinals = toSet(prefixIndex.findKeysWithPrefix("carr")); Assert.assertTrue(ordinals.size() == 1); ordinals = toSet(prefixIndex.findKeysWithPrefix("aaa")); Assert.assertTrue(ordinals.size() == 0); }
public Collection<NacosTraceSubscriber> getAllTraceSubscribers() { return new HashSet<>(traceSubscribers.values()); }
@Test void testGetAllTraceSubscribers() { assertFalse(NacosTracePluginManager.getInstance().getAllTraceSubscribers().isEmpty()); assertContainsTestPlugin(); }
@Nullable static String lastStringHeader(Headers headers, String key) { Header header = headers.lastHeader(key); if (header == null || header.value() == null) return null; return new String(header.value(), UTF_8); }
@Test void lastStringHeader() { record.headers().add("b3", new byte[] {'1'}); assertThat(KafkaHeaders.lastStringHeader(record.headers(), "b3")) .isEqualTo("1"); }
protected HashMap<String, Double> computeModularity(Graph graph, CommunityStructure theStructure, int[] comStructure, double currentResolution, boolean randomized, boolean weighted) { isCanceled = false; Progress.start(progress); Random rand = new Random(); double totalWeight = theStructure.graphWeightSum; double[] nodeDegrees = theStructure.weights.clone(); HashMap<String, Double> results = new HashMap<>(); if (isCanceled) { return results; } boolean someChange = true; while (someChange) { someChange = false; boolean localChange = true; while (localChange) { localChange = false; int start = 0; if (randomized) { start = Math.abs(rand.nextInt()) % theStructure.N; } int step = 0; for (int i = start; step < theStructure.N; i = (i + 1) % theStructure.N) { step++; Community bestCommunity = updateBestCommunity(theStructure, i, currentResolution); if ((theStructure.nodeCommunities[i] != bestCommunity) && (bestCommunity != null)) { theStructure.moveNodeTo(i, bestCommunity); localChange = true; } if (isCanceled) { return results; } } someChange = localChange || someChange; if (isCanceled) { return results; } } if (someChange) { theStructure.zoomOut(); } } fillComStructure(graph, theStructure, comStructure); double[] degreeCount = fillDegreeCount(graph, theStructure, comStructure, nodeDegrees, weighted); double computedModularity = finalQ(comStructure, degreeCount, graph, theStructure, totalWeight, 1., weighted); double computedModularityResolution = finalQ(comStructure, degreeCount, graph, theStructure, totalWeight, currentResolution, weighted); results.put("modularity", computedModularity); results.put("modularityResolution", computedModularityResolution); return results; }
@Test public void testGraphWithouLinksModularity() { GraphModel graphModel = GraphGenerator.generateNullUndirectedGraph(5); UndirectedGraph graph = graphModel.getUndirectedGraph(); Modularity mod = new Modularity(); Modularity.CommunityStructure theStructure = mod.new CommunityStructure(graph); int[] comStructure = new int[graph.getNodeCount()]; HashMap<String, Double> modularityValues = mod.computeModularity(graph, theStructure, comStructure, 1., true, false); double modValue = modularityValues.get("modularity"); assertEquals(modValue, Double.NaN); }
public String getCopyStrategy() { return copyStrategy; }
@Test public void testCopyStrategy() { final DistCpOptions.Builder builder = new DistCpOptions.Builder( new Path("hdfs://localhost:8020/source/first"), new Path("hdfs://localhost:8020/target/")); Assert.assertEquals(DistCpConstants.UNIFORMSIZE, builder.build().getCopyStrategy()); builder.withCopyStrategy("dynamic"); Assert.assertEquals("dynamic", builder.build().getCopyStrategy()); }
public T send() throws IOException { return web3jService.send(this, responseType); }
@Test public void testEthEstimateGas() throws Exception { web3j.ethEstimateGas( Transaction.createEthCallTransaction( "0xa70e8dd61c5d32be8058bb8eb970870f07233155", "0x52b93c80364dc2dd4444c146d73b9836bbbb2b3f", "0x0")) .send(); verifyResult( "{\"jsonrpc\":\"2.0\",\"method\":\"eth_estimateGas\"," + "\"params\":[{\"from\":\"0xa70e8dd61c5d32be8058bb8eb970870f07233155\"," + "\"to\":\"0x52b93c80364dc2dd4444c146d73b9836bbbb2b3f\",\"data\":\"0x0\"}]," + "\"id\":1}"); }
public static MetricName name(Class<?> klass, String... names) { return name(klass.getName(), names); }
@Test public void elidesNullValuesFromNamesWhenNullAndNotNullPassedIn() throws Exception { assertThat(name("one", null, "three")) .isEqualTo(MetricName.build("one.three")); }
public static Matcher<HttpRequest> methodEquals(String method) { if (method == null) throw new NullPointerException("method == null"); if (method.isEmpty()) throw new NullPointerException("method is empty"); return new MethodEquals(method); }
@Test void methodEquals_unmatched_mixedCase() { when(httpRequest.method()).thenReturn("PoSt"); assertThat(methodEquals("POST").matches(httpRequest)).isFalse(); }
public byte[] encode(String val, String delimiters) { return codecs[0].encode(val); }
@Test public void testEncodeHebrewPersonName() { assertArrayEquals(HEBREW_PERSON_NAME_BYTE, iso8859_8().encode(HEBREW_PERSON_NAME, PN_DELIMS)); }
@Override public BasicTypeDefine reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.builder() .name(column.getName()) .nullable(column.isNullable()) .comment(column.getComment()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case BOOLEAN: builder.columnType(PG_BOOLEAN); builder.dataType(PG_BOOLEAN); break; case TINYINT: case SMALLINT: builder.columnType(PG_SMALLINT); builder.dataType(PG_SMALLINT); break; case INT: builder.columnType(PG_INTEGER); builder.dataType(PG_INTEGER); break; case BIGINT: builder.columnType(PG_BIGINT); builder.dataType(PG_BIGINT); break; case FLOAT: builder.columnType(PG_REAL); builder.dataType(PG_REAL); break; case DOUBLE: builder.columnType(PG_DOUBLE_PRECISION); builder.dataType(PG_DOUBLE_PRECISION); break; case DECIMAL: if (column.getSourceType() != null && column.getSourceType().equalsIgnoreCase(PG_MONEY)) { builder.columnType(PG_MONEY); builder.dataType(PG_MONEY); } else { DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = (int) Math.max(0, scale - (precision - MAX_PRECISION)); precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } builder.columnType(String.format("%s(%s,%s)", PG_NUMERIC, precision, scale)); builder.dataType(PG_NUMERIC); builder.precision(precision); builder.scale(scale); } break; case BYTES: builder.columnType(PG_BYTEA); builder.dataType(PG_BYTEA); break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(PG_TEXT); builder.dataType(PG_TEXT); } else if (column.getColumnLength() <= MAX_VARCHAR_LENGTH) { builder.columnType( String.format("%s(%s)", PG_VARCHAR, column.getColumnLength())); builder.dataType(PG_VARCHAR); } else { builder.columnType(PG_TEXT); builder.dataType(PG_TEXT); } break; case DATE: builder.columnType(PG_DATE); builder.dataType(PG_DATE); break; case TIME: Integer timeScale = column.getScale(); if (timeScale != null && timeScale > MAX_TIME_SCALE) { timeScale = MAX_TIME_SCALE; log.warn( "The time column {} type time({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to time({})", column.getName(), column.getScale(), MAX_SCALE, timeScale); } if (timeScale != null && timeScale > 0) { builder.columnType(String.format("%s(%s)", PG_TIME, timeScale)); } else { builder.columnType(PG_TIME); } builder.dataType(PG_TIME); builder.scale(timeScale); break; case TIMESTAMP: Integer timestampScale = column.getScale(); if (timestampScale != null && timestampScale > MAX_TIMESTAMP_SCALE) { timestampScale = MAX_TIMESTAMP_SCALE; log.warn( "The timestamp column {} type timestamp({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to timestamp({})", column.getName(), column.getScale(), MAX_TIMESTAMP_SCALE, timestampScale); } if (timestampScale != null && timestampScale > 0) { builder.columnType(String.format("%s(%s)", PG_TIMESTAMP, timestampScale)); } else { builder.columnType(PG_TIMESTAMP); } builder.dataType(PG_TIMESTAMP); builder.scale(timestampScale); break; case ARRAY: ArrayType arrayType = (ArrayType) column.getDataType(); SeaTunnelDataType elementType = arrayType.getElementType(); switch (elementType.getSqlType()) { case BOOLEAN: builder.columnType(PG_BOOLEAN_ARRAY); builder.dataType(PG_BOOLEAN_ARRAY); break; case TINYINT: case SMALLINT: builder.columnType(PG_SMALLINT_ARRAY); builder.dataType(PG_SMALLINT_ARRAY); break; case INT: builder.columnType(PG_INTEGER_ARRAY); builder.dataType(PG_INTEGER_ARRAY); break; case BIGINT: builder.columnType(PG_BIGINT_ARRAY); builder.dataType(PG_BIGINT_ARRAY); break; case FLOAT: builder.columnType(PG_REAL_ARRAY); builder.dataType(PG_REAL_ARRAY); break; case DOUBLE: builder.columnType(PG_DOUBLE_PRECISION_ARRAY); builder.dataType(PG_DOUBLE_PRECISION_ARRAY); break; case BYTES: builder.columnType(PG_BYTEA); builder.dataType(PG_BYTEA); break; case STRING: builder.columnType(PG_TEXT_ARRAY); builder.dataType(PG_TEXT_ARRAY); break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.POSTGRESQL, elementType.getSqlType().name(), column.getName()); } break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.POSTGRESQL, column.getDataType().getSqlType().name(), column.getName()); } return builder.build(); }
@Test public void testReconvertBoolean() { Column column = PhysicalColumn.builder() .name("test") .dataType(BasicType.BOOLEAN_TYPE) .nullable(true) .defaultValue(true) .comment("test") .build(); BasicTypeDefine typeDefine = PostgresTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(PostgresTypeConverter.PG_BOOLEAN, typeDefine.getColumnType()); Assertions.assertEquals(PostgresTypeConverter.PG_BOOLEAN, typeDefine.getDataType()); Assertions.assertEquals(column.isNullable(), typeDefine.isNullable()); Assertions.assertEquals(column.getDefaultValue(), typeDefine.getDefaultValue()); Assertions.assertEquals(column.getComment(), typeDefine.getComment()); }
public static ScanReport fromJson(String json) { return JsonUtil.parse(json, ScanReportParser::fromJson); }
@Test public void invalidSnapshotId() { assertThatThrownBy( () -> ScanReportParser.fromJson( "{\"table-name\":\"roundTripTableName\",\"snapshot-id\":\"invalid\"}")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse to a long value: snapshot-id: \"invalid\""); }
public boolean supportsPipelineAnalytics() { return hasSupportFor(PIPELINE_TYPE); }
@Test public void shouldSupportPipelineAnalyticsIfPluginListsPipelineMetricsAsCapability() { assertTrue(new Capabilities(List.of(new SupportedAnalytics("pipeline", "id", "title"))).supportsPipelineAnalytics()); assertTrue(new Capabilities(List.of(new SupportedAnalytics("PipeLine", "id", "title"))).supportsPipelineAnalytics()); assertFalse(new Capabilities(Collections.emptyList()).supportsPipelineAnalytics()); }
public static List<ComponentContainers> filterInstances( ServiceContext context, ClientAMProtocol.GetCompInstancesRequestProto filterReq) { Map<String, ComponentContainers> containersByComp = new HashMap<>(); Map<ContainerId, ComponentInstance> instances = context.scheduler.getLiveInstances(); instances.forEach(((containerId, instance) -> { boolean include = true; if (filterReq.getComponentNamesList() != null && !filterReq.getComponentNamesList().isEmpty()) { // filter by component name if (!filterReq.getComponentNamesList().contains( instance.getComponent().getName())) { include = false; } } if (filterReq.getVersion() != null && !filterReq.getVersion().isEmpty()) { // filter by version String instanceServiceVersion = instance.getServiceVersion(); if (instanceServiceVersion == null || !instanceServiceVersion.equals( filterReq.getVersion())) { include = false; } } if (filterReq.getContainerStatesList() != null && !filterReq.getContainerStatesList().isEmpty()) { // filter by state if (!filterReq.getContainerStatesList().contains( instance.getContainerState().toString())) { include = false; } } if (include) { ComponentContainers compContainers = containersByComp.computeIfAbsent(instance.getCompName(), k -> { ComponentContainers result = new ComponentContainers(); result.setContainers(new ArrayList<>()); result.setComponentName(instance.getCompName()); return result; }); compContainers.addContainer(instance.getContainerSpec()); } })); List<ComponentContainers> result = new ArrayList<>(); result.addAll(containersByComp.values()); return result; }
@Test public void testFilterWithComp() throws Exception { GetCompInstancesRequestProto req = GetCompInstancesRequestProto.newBuilder() .addAllComponentNames(Lists.newArrayList("compa")).build(); List<ComponentContainers> compContainers = FilterUtils.filterInstances( new MockRunningServiceContext(rule, TestServiceManager.createBaseDef("service")), req); Assert.assertEquals("num comps", 1, compContainers.size()); Assert.assertEquals("comp name", "compa", compContainers.get(0).getComponentName()); Assert.assertEquals("num containers", 2, compContainers.get(0).getContainers().size()); }
public void execute() { Profiler stepProfiler = Profiler.create(LOGGER).logTimeLast(true); boolean allStepsExecuted = false; try { executeSteps(stepProfiler); allStepsExecuted = true; } finally { if (listener != null) { executeListener(allStepsExecuted); } } }
@Test public void execute_does_not_fail_if_listener_throws_Throwable() { ComputationStepExecutor.Listener listener = mock(ComputationStepExecutor.Listener.class); doThrow(new Error("Facking error thrown by Listener")) .when(listener) .finished(anyBoolean()); new ComputationStepExecutor(mockComputationSteps(computationStep1), taskInterrupter, listener).execute(); }
Bytes toBytes(final KO foreignKey, final K primaryKey) { //The serialization format - note that primaryKeySerialized may be null, such as when a prefixScan //key is being created. //{Integer.BYTES foreignKeyLength}{foreignKeySerialized}{Optional-primaryKeySerialized} final byte[] foreignKeySerializedData = foreignKeySerializer.serialize(foreignKeySerdeTopic, foreignKey); //? bytes final byte[] primaryKeySerializedData = primaryKeySerializer.serialize(primaryKeySerdeTopic, primaryKey); final ByteBuffer buf = ByteBuffer.allocate(Integer.BYTES + foreignKeySerializedData.length + primaryKeySerializedData.length); buf.putInt(foreignKeySerializedData.length); buf.put(foreignKeySerializedData); buf.put(primaryKeySerializedData); return Bytes.wrap(buf.array()); }
@Test public void nullPrimaryKeySerdeTest() { final CombinedKeySchema<String, Integer> cks = new CombinedKeySchema<>( () -> "fkTopic", Serdes.String(), () -> "pkTopic", Serdes.Integer() ); assertThrows(NullPointerException.class, () -> cks.toBytes("foreignKey", null)); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() < 10) { onInvalidDataReceived(device, data); return; } int offset = 0; final int flags = data.getIntValue(Data.FORMAT_UINT8, offset++); final boolean timeOffsetPresent = (flags & 0x01) != 0; final boolean glucoseDataPresent = (flags & 0x02) != 0; final boolean unitMolL = (flags & 0x04) != 0; final boolean sensorStatusAnnunciationPresent = (flags & 0x08) != 0; final boolean contextInformationFollows = (flags & 0x10) != 0; if (data.size() < 10 + (timeOffsetPresent ? 2 : 0) + (glucoseDataPresent ? 3 : 0) + (sensorStatusAnnunciationPresent ? 2 : 0)) { onInvalidDataReceived(device, data); return; } // Required fields final int sequenceNumber = data.getIntValue(Data.FORMAT_UINT16_LE, offset); offset += 2; final Calendar baseTime = DateTimeDataCallback.readDateTime(data, 3); offset += 7; if (baseTime == null) { onInvalidDataReceived(device, data); return; } // Optional fields if (timeOffsetPresent) { final int timeOffset = data.getIntValue(Data.FORMAT_SINT16_LE, offset); offset += 2; baseTime.add(Calendar.MINUTE, timeOffset); } Float glucoseConcentration = null; Integer unit = null; Integer type = null; Integer sampleLocation = null; if (glucoseDataPresent) { glucoseConcentration = data.getFloatValue(Data.FORMAT_SFLOAT, offset); final int typeAndSampleLocation = data.getIntValue(Data.FORMAT_UINT8, offset + 2); offset += 3; type = typeAndSampleLocation & 0x0F; sampleLocation = typeAndSampleLocation >> 4; unit = unitMolL ? UNIT_mol_L : UNIT_kg_L; } GlucoseStatus status = null; if (sensorStatusAnnunciationPresent) { final int value = data.getIntValue(Data.FORMAT_UINT16_LE, offset); // offset += 2; status = new GlucoseStatus(value); } onGlucoseMeasurementReceived(device, sequenceNumber, baseTime /* with offset */, glucoseConcentration, unit, type, sampleLocation, status, contextInformationFollows); }
@Test public void onGlucoseMeasurementReceived() { final Data data = new Data(new byte[] { (byte) 0b011011, // Time Offset, Type and Location Present, unit: kg/L, Status Annunciation Present, Context follows 1, 0, // Seq = 1 (byte) 0xE3, 0x07, // 2019 2, // February 27, // 27th 11, 10, 30, // at 11:10:30 (byte) 0xFB, (byte) 0xFF, // Time offset = -5 minutes 30, 0, // Glucose concentration = 30.0 kg/L 0x12, // Type = 2 (TYPE_CAPILLARY_PLASMA), Location = 1 (SAMPLE_LOCATION_FINGER) 0b101, 0b1 // Status: Low battery, Device Fault, Sensor Temp too low }); callback.onDataReceived(null, data); assertTrue(success); }
@Override protected int getDefaultPort() { return (Integer) PropertyKey.MASTER_RPC_PORT.getDefaultValue(); }
@Test public void defaultPortTest() throws Exception { // this test ensures that that default port for Alluxio Hadoop file system is the same // whether in Hadoop 1.x or Hadoop 2.x int defaultRpcPort = (int) PropertyKey.MASTER_RPC_PORT.getDefaultValue(); Configuration conf = new Configuration(); conf.set("fs.AbstractFileSystem.alluxio.impl", "alluxio.hadoop.AlluxioFileSystem"); conf.set("fs.alluxio.impl", "alluxio.hadoop.FileSystem"); URI uri = new URI("alluxio:///test"); org.apache.hadoop.fs.AbstractFileSystem system = org.apache.hadoop.fs.AbstractFileSystem .createFileSystem(uri, conf); assertTrue(system instanceof AlluxioFileSystem); assertEquals(defaultRpcPort, system.getUriDefaultPort()); org.apache.hadoop.fs.FileSystem system2 = org.apache.hadoop.fs.FileSystem.get(uri, conf); assertTrue(system2 instanceof FileSystem); // casting is required as org.apache.hadoop.fs.FileSystem#getDefaultPort is protected assertEquals(defaultRpcPort, ((FileSystem) system2).getDefaultPort()); }
@Override public Set<Host> getHostsByIp(IpAddress ip) { checkNotNull(ip, "IP address cannot be null"); return filter(getHostsColl(), host -> host.ipAddresses().contains(ip)); }
@Test(expected = NullPointerException.class) public void testGetHostsByNullIp() { VirtualNetwork vnet = setupVnet(); HostService hostService = manager.get(vnet.id(), HostService.class); hostService.getHostsByIp(null); }
public B addProtocols(List<ProtocolConfig> protocols) { if (this.protocols == null) { this.protocols = new ArrayList<>(); } this.protocols.addAll(protocols); return getThis(); }
@Test void addProtocols() { ProtocolConfig protocol = new ProtocolConfig(); ServiceBuilder builder = new ServiceBuilder(); Assertions.assertNull(builder.build().getProtocols()); builder.addProtocols(Collections.singletonList(protocol)); Assertions.assertNotNull(builder.build().getProtocols()); Assertions.assertEquals(1, builder.build().getProtocols().size()); }
public static <T> List<T> randomEles(final List<T> list, final int count) { final List<T> result = new ArrayList<>(count); final int limit = list.size(); while (result.size() < count) { result.add(randomEle(list, limit)); } return result; }
@Test public void randomElesTest(){ List<Integer> result = RandomUtil.randomEles(CollUtil.newArrayList(1, 2, 3, 4, 5, 6), 2); assertEquals(result.size(), 2); }
@ApiOperation(value = "Sync edge (syncEdge)", notes = "Starts synchronization process between edge and cloud. \n" + "All entities that are assigned to particular edge are going to be send to remote edge service." + TENANT_AUTHORITY_PARAGRAPH) @PreAuthorize("hasAuthority('TENANT_ADMIN')") @PostMapping(value = "/edge/sync/{edgeId}") public DeferredResult<ResponseEntity> syncEdge(@Parameter(description = EDGE_ID_PARAM_DESCRIPTION, required = true) @PathVariable("edgeId") String strEdgeId) throws ThingsboardException { checkParameter("edgeId", strEdgeId); final DeferredResult<ResponseEntity> response = new DeferredResult<>(); if (isEdgesEnabled() && edgeRpcServiceOpt.isPresent()) { EdgeId edgeId = new EdgeId(toUUID(strEdgeId)); edgeId = checkNotNull(edgeId); SecurityUser user = getCurrentUser(); TenantId tenantId = user.getTenantId(); ToEdgeSyncRequest request = new ToEdgeSyncRequest(UUID.randomUUID(), tenantId, edgeId); edgeRpcServiceOpt.get().processSyncRequest(request, fromEdgeSyncResponse -> reply(response, fromEdgeSyncResponse)); } else { throw new ThingsboardException("Edges support disabled", ThingsboardErrorCode.GENERAL); } return response; }
@Test public void testSyncEdge() throws Exception { loginSysAdmin(); // get jwt settings from yaml config JwtSettings settings = doGet("/api/admin/jwtSettings", JwtSettings.class); // save jwt settings into db doPost("/api/admin/jwtSettings", settings).andExpect(status().isOk()); loginTenantAdmin(); Edge edge = doPost("/api/edge", constructEdge("Test Sync Edge", "test"), Edge.class); Asset asset = new Asset(); asset.setName("Test Sync Edge Asset 1"); asset.setType("test"); Asset savedAsset = doPost("/api/asset", asset, Asset.class); Device device = new Device(); device.setName("Test Sync Edge Device 1"); device.setType("default"); Device savedDevice = doPost("/api/device", device, Device.class); simulateEdgeActivation(edge); doPost("/api/edge/" + edge.getId().getId().toString() + "/device/" + savedDevice.getId().getId().toString(), Device.class); doPost("/api/edge/" + edge.getId().getId().toString() + "/asset/" + savedAsset.getId().getId().toString(), Asset.class); EdgeImitator edgeImitator = new EdgeImitator(EDGE_HOST, EDGE_PORT, edge.getRoutingKey(), edge.getSecret()); edgeImitator.ignoreType(UserCredentialsUpdateMsg.class); edgeImitator.ignoreType(OAuth2UpdateMsg.class); edgeImitator.expectMessageAmount(27); edgeImitator.connect(); waitForMessages(edgeImitator); verifyFetchersMsgs(edgeImitator, savedDevice); // verify queue msgs Assert.assertTrue(popDeviceProfileMsg(edgeImitator.getDownlinkMsgs(), UpdateMsgType.ENTITY_CREATED_RPC_MESSAGE, "default")); Assert.assertTrue(popDeviceMsg(edgeImitator.getDownlinkMsgs(), UpdateMsgType.ENTITY_CREATED_RPC_MESSAGE, "Test Sync Edge Device 1")); Assert.assertTrue(popDeviceCredentialsMsg(edgeImitator.getDownlinkMsgs(), savedDevice.getId())); Assert.assertTrue(popAssetProfileMsg(edgeImitator.getDownlinkMsgs(), UpdateMsgType.ENTITY_CREATED_RPC_MESSAGE, "test")); Assert.assertTrue(popAssetMsg(edgeImitator.getDownlinkMsgs(), UpdateMsgType.ENTITY_CREATED_RPC_MESSAGE, "Test Sync Edge Asset 1")); Assert.assertTrue(edgeImitator.getDownlinkMsgs().isEmpty()); edgeImitator.expectMessageAmount(22); doPost("/api/edge/sync/" + edge.getId()); waitForMessages(edgeImitator); verifyFetchersMsgs(edgeImitator, savedDevice); Assert.assertTrue(edgeImitator.getDownlinkMsgs().isEmpty()); edgeImitator.allowIgnoredTypes(); try { edgeImitator.disconnect(); } catch (Exception ignored) { } doDelete("/api/device/" + savedDevice.getId().getId().toString()) .andExpect(status().isOk()); doDelete("/api/asset/" + savedAsset.getId().getId().toString()) .andExpect(status().isOk()); doDelete("/api/edge/" + edge.getId().getId().toString()) .andExpect(status().isOk()); }
private ElasticsearchQueryString groupByQueryString(Event event) { ElasticsearchQueryString result = ElasticsearchQueryString.empty(); if (!config.groupBy().isEmpty()) { for (String key : event.getGroupByFields().keySet()) { String value = event.getGroupByFields().get(key); String query = new StringBuilder(key).append(":\"").append(luceneEscape(value)).append("\"").toString(); result = result.concatenate(ElasticsearchQueryString.of(query)); } } return result; }
@Test public void testGroupByQueryString() throws EventProcessorException { Map<String, String> groupByFields = ImmutableMap.of( "group_field_one", "one", "group_field_two", "two" ); sourceMessagesWithAggregation(groupByFields, 1, emptyList()); String expectedQueryString = "(aQueryString) AND ((group_field_one:\"one\") AND (group_field_two:\"two\"))"; verify(moreSearch).scrollQuery(eq(expectedQueryString), any(), any(), any(), any(), eq(1), any()); }
@Override public void mkdir(final Path dir, final FsPermission permission, final boolean createParent) throws IOException, UnresolvedLinkException { myFs.mkdir(fullPath(dir), permission, createParent); }
@Test public void testRenameAcrossFs() throws IOException { fc.mkdir(new Path("/newDir/dirFoo"), FileContext.DEFAULT_PERM, true); // the root will get interpreted to the root of the chrooted fs. fc.rename(new Path("/newDir/dirFoo"), new Path("file:///dirFooBar")); FileContextTestHelper.isDir(fc, new Path("/dirFooBar")); }
public JmxCollector register() { return register(PrometheusRegistry.defaultRegistry); }
@Test public void testDefaultExportLowercaseOutputName() throws Exception { JmxCollector jc = new JmxCollector("---\nlowercaseOutputName: true").register(prometheusRegistry); assertNotNull( getSampleValue( "java_lang_operatingsystem_processcputime", new String[] {}, new String[] {})); }
public synchronized boolean remove(String key) throws IOException { checkNotClosed(); Entry entry = lruEntries.get(key); if (entry == null || entry.currentEditor != null) { return false; } for (int i = 0; i < valueCount; i++) { File file = entry.getCleanFile(i); if (file.exists() && !file.delete()) { throw new IOException("failed to delete " + file); } size -= entry.lengths[i]; entry.lengths[i] = 0; } redundantOpCount++; journalWriter.append(REMOVE); journalWriter.append(' '); journalWriter.append(key); journalWriter.append('\n'); lruEntries.remove(key); if (journalRebuildRequired()) { executorService.submit(cleanupCallable); } return true; }
@Test public void removeAbsentElement() throws Exception { cache.remove("a"); }
@Override public boolean getBooleanValue() { checkValueType(BOOLEAN); return measure.getBooleanValue(); }
@Test public void fail_with_ISE_when_not_boolean_value() { assertThatThrownBy(() -> { MeasureImpl measure = new MeasureImpl(Measure.newMeasureBuilder().create(1d, 1)); measure.getBooleanValue(); }) .isInstanceOf(IllegalStateException.class) .hasMessage("Value can not be converted to boolean because current value type is a DOUBLE"); }
@Override public String format(final Schema schema) { final String converted = SchemaWalker.visit(schema, new Converter()) + typePostFix(schema); return options.contains(Option.AS_COLUMN_LIST) ? stripTopLevelStruct(converted) : converted; }
@Test public void shouldFormatOptionalBigint() { assertThat(DEFAULT.format(Schema.OPTIONAL_INT64_SCHEMA), is("BIGINT")); assertThat(STRICT.format(Schema.OPTIONAL_INT64_SCHEMA), is("BIGINT")); }
public static List<LayoutLocation> fromCompactListString(String compactList) { List<LayoutLocation> locs = new ArrayList<>(); if (!Strings.isNullOrEmpty(compactList)) { String[] items = compactList.split(TILDE); for (String s : items) { locs.add(fromCompactString(s)); } } return locs; }
@Test public void fromCompactList() { List<LayoutLocation> locs = fromCompactListString(COMPACT_LIST); ll = locs.get(0); ll2 = locs.get(1); verifyLL1(ll); verifyLL2(ll2); }
byte[] removeEscapedEnclosures( byte[] field, int nrEnclosuresFound ) { byte[] result = new byte[field.length - nrEnclosuresFound]; int resultIndex = 0; for ( int i = 0; i < field.length; i++ ) { result[resultIndex++] = field[i]; if ( field[i] == enclosure[0] && i + 1 < field.length && field[i + 1] == enclosure[0] ) { // Skip the escaped enclosure after adding the first one i++; } } return result; }
@Test public void testRemoveEscapedEnclosuresWithCharacterInTheMiddleOfThem() { CsvInputData csvInputData = new CsvInputData(); csvInputData.enclosure = "\"".getBytes(); String result = new String( csvInputData.removeEscapedEnclosures( "345\"\"1\"\"abc".getBytes(), 2 ) ); assertEquals( "345\"1\"abc", result ); }
@VisibleForTesting synchronized List<RemoteNode> getLeastLoadedNodes() { long currTime = System.currentTimeMillis(); if ((currTime - lastCacheUpdateTime > cacheRefreshInterval) || (cachedNodes == null)) { cachedNodes = convertToRemoteNodes( this.nodeMonitor.selectLeastLoadedNodes(this.numNodes)); if (cachedNodes.size() > 0) { lastCacheUpdateTime = currTime; } } return cachedNodes; }
@Test(timeout = 60000) public void testContainerPromoteAfterContainerStart() throws Exception { HashMap<NodeId, MockNM> nodes = new HashMap<>(); MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService()); nodes.put(nm1.getNodeId(), nm1); MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService()); nodes.put(nm2.getNodeId(), nm2); nm1.registerNode(); nm2.registerNode(); nm1.nodeHeartbeat(oppContainersStatus, true); nm2.nodeHeartbeat(oppContainersStatus, true); OpportunisticContainerAllocatorAMService amservice = (OpportunisticContainerAllocatorAMService) rm .getApplicationMasterService(); MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm) .withAppName("app") .withUser("user") .withAcls(null) .withQueue("default") .withUnmanagedAM(false) .build(); RMApp app1 = MockRMAppSubmitter.submit(rm, data); MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2); ResourceScheduler scheduler = rm.getResourceScheduler(); // All nodes 1 to 2 will be applicable for scheduling. nm1.nodeHeartbeat(oppContainersStatus, true); nm2.nodeHeartbeat(oppContainersStatus, true); GenericTestUtils.waitFor(() -> amservice.getLeastLoadedNodes().size() == 2, 10, 10 * 100); QueueMetrics metrics = ((CapacityScheduler) scheduler).getRootQueue() .getMetrics(); // Verify Metrics verifyMetrics(metrics, 7168, 7, 1024, 1, 1); AllocateResponse allocateResponse = am1.allocate( Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1), "*", Resources.createResource(1 * GB), 2, true, null, ExecutionTypeRequest.newInstance( ExecutionType.OPPORTUNISTIC, true))), null); List<Container> allocatedContainers = allocateResponse .getAllocatedContainers(); Assert.assertEquals(2, allocatedContainers.size()); Container container = allocatedContainers.get(0); MockNM allocNode = nodes.get(container.getNodeId()); // Start Container in NM allocNode.nodeHeartbeat(Arrays.asList( ContainerStatus.newInstance(container.getId(), ExecutionType.OPPORTUNISTIC, ContainerState.RUNNING, "", 0)), true); rm.drainEvents(); // Verify that container is actually running wrt the RM.. RMContainer rmContainer = ((CapacityScheduler) scheduler) .getApplicationAttempt( container.getId().getApplicationAttemptId()).getRMContainer( container.getId()); Assert.assertEquals(RMContainerState.RUNNING, rmContainer.getState()); // Verify Metrics After OPP allocation (Nothing should change) verifyMetrics(metrics, 7168, 7, 1024, 1, 1); am1.sendContainerUpdateRequest( Arrays.asList(UpdateContainerRequest.newInstance(0, container.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE, null, ExecutionType.GUARANTEED))); // Verify Metrics After OPP allocation (Nothing should change again) verifyMetrics(metrics, 7168, 7, 1024, 1, 1); // Send Promotion req again... this should result in update error allocateResponse = am1.sendContainerUpdateRequest( Arrays.asList(UpdateContainerRequest.newInstance(0, container.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE, null, ExecutionType.GUARANTEED))); Assert.assertEquals(0, allocateResponse.getUpdatedContainers().size()); Assert.assertEquals(1, allocateResponse.getUpdateErrors().size()); Assert.assertEquals("UPDATE_OUTSTANDING_ERROR", allocateResponse.getUpdateErrors().get(0).getReason()); Assert.assertEquals(container.getId(), allocateResponse.getUpdateErrors().get(0) .getUpdateContainerRequest().getContainerId()); // Start Container in NM allocNode.nodeHeartbeat(Arrays.asList( ContainerStatus.newInstance(container.getId(), ExecutionType.OPPORTUNISTIC, ContainerState.RUNNING, "", 0)), true); rm.drainEvents(); allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>()); Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size()); Container uc = allocateResponse.getUpdatedContainers().get(0).getContainer(); Assert.assertEquals(ExecutionType.GUARANTEED, uc.getExecutionType()); Assert.assertEquals(uc.getId(), container.getId()); Assert.assertEquals(uc.getVersion(), container.getVersion() + 1); // Verify that the Container is still in RUNNING state wrt RM.. rmContainer = ((CapacityScheduler) scheduler) .getApplicationAttempt( uc.getId().getApplicationAttemptId()).getRMContainer(uc.getId()); Assert.assertEquals(RMContainerState.RUNNING, rmContainer.getState()); // Verify Metrics After OPP allocation : // Allocated cores+mem should have increased, available should decrease verifyMetrics(metrics, 6144, 6, 2048, 2, 2); }
@Override public void check(Collection<? extends T> collection, ConditionEvents events) { ViolatedAndSatisfiedConditionEvents subEvents = new ViolatedAndSatisfiedConditionEvents(); for (T item : collection) { condition.check(item, subEvents); } if (!subEvents.getAllowed().isEmpty() || !subEvents.getViolating().isEmpty()) { events.add(new OnlyConditionEvent(collection, subEvents)); } }
@Test public void if_there_are_no_input_events_no_ContainsOnlyEvent_is_added() { ViolatedAndSatisfiedConditionEvents events = new ViolatedAndSatisfiedConditionEvents(); containOnlyElementsThat(IS_SERIALIZABLE).check(emptyList(), events); assertThat(events.getAllowed()).as("allowed events").isEmpty(); assertThat(events.getViolating()).as("violated events").isEmpty(); }
@Override public Optional<ScmInfo> getScmInfo(Component component) { requireNonNull(component, "Component cannot be null"); if (component.getType() != Component.Type.FILE) { return Optional.empty(); } return scmInfoCache.computeIfAbsent(component, this::getScmInfoForComponent); }
@Test public void generate_scm_info_when_nothing_in_db_and_report_is_has_no_changesets() { when(dbLoader.getScmInfo(FILE)).thenReturn(Optional.empty()); addFileSourceInReport(3); ScmInfo scmInfo = underTest.getScmInfo(FILE).get(); assertThat(scmInfo.getAllChangesets()).hasSize(3); for (int i = 1; i <= 3; i++) { assertChangeset(scmInfo.getChangesetForLine(i), null, null, analysisDate.getTime()); } verify(dbLoader).getScmInfo(FILE); verifyNoMoreInteractions(dbLoader); verifyNoInteractions(fileStatuses); verifyNoInteractions(diff); }
@ScalarOperator(CAST) @SqlType(StandardTypes.SMALLINT) public static long castToSmallint(@SqlType(StandardTypes.INTEGER) long value) { try { return Shorts.checkedCast(value); } catch (IllegalArgumentException e) { throw new PrestoException(NUMERIC_VALUE_OUT_OF_RANGE, "Out of range for smallint: " + value, e); } }
@Test public void testCastToSmallint() { assertFunction("cast(INTEGER'37' as smallint)", SMALLINT, (short) 37); assertFunction("cast(INTEGER'17' as smallint)", SMALLINT, (short) 17); }
@Override public KsqlSecurityContext provide(final ApiSecurityContext apiSecurityContext) { final Optional<KsqlPrincipal> principal = apiSecurityContext.getPrincipal(); final Optional<String> authHeader = apiSecurityContext.getAuthHeader(); final List<Entry<String, String>> requestHeaders = apiSecurityContext.getRequestHeaders(); // A user context is not necessary if a user context provider is not present or the user // principal is missing. If a failed authentication attempt results in a missing principle, // then the authentication plugin will have already failed the connection before calling // this method. Therefore, if we've reached this method with a missing principle, then this // must be a valid connection that does not require authentication. // For these cases, we create a default service context that the missing user can use. final boolean requiresUserContext = securityExtension != null && securityExtension.getUserContextProvider().isPresent() && principal.isPresent(); if (!requiresUserContext) { return new KsqlSecurityContext( principal, defaultServiceContextFactory.create( ksqlConfig, authHeader, schemaRegistryClientFactory, connectClientFactory, sharedClient, requestHeaders, principal) ); } return securityExtension.getUserContextProvider() .map(provider -> new KsqlSecurityContext( principal, userServiceContextFactory.create( ksqlConfig, authHeader, provider.getKafkaClientSupplier(principal.get()), provider.getSchemaRegistryClientFactory(principal.get()), connectClientFactory, sharedClient, requestHeaders, principal))) .get(); }
@Test public void shouldPassRequestHeadersToUserFactory() { // Given: when(securityExtension.getUserContextProvider()).thenReturn(Optional.of(userContextProvider)); // When: ksqlSecurityContextProvider.provide(apiSecurityContext); // Then: verify(userServiceContextFactory) .create(any(), any(), any(), any(), any(), any(), eq(incomingRequestHeaders), any()); }
public static String toUnderlineCase(CharSequence str) { return toSymbolCase(str, CharUtil.UNDERLINE); }
@Test public void toUnderLineCaseTest() { Dict.create() .set("Table_Test_Of_day", "table_test_of_day") .set("_Table_Test_Of_day_", "_table_test_of_day_") .set("_Table_Test_Of_DAY_", "_table_test_of_DAY_") .set("_TableTestOfDAYToday", "_table_test_of_DAY_today") .set("HelloWorld_test", "hello_world_test") .set("H2", "H2") .set("H#case", "H#case") .set("PNLabel", "PN_label") .set("wPRunOZTime", "w_P_run_OZ_time") // https://github.com/dromara/hutool/issues/2070 .set("customerNickV2", "customer_nick_v2") // https://gitee.com/dromara/hutool/issues/I4X9TT .set("DEPT_NAME","DEPT_NAME") .forEach((key, value) -> assertEquals(value, NamingCase.toUnderlineCase(key))); }
public static ShardingRouteEngine newInstance(final ShardingRule shardingRule, final ShardingSphereDatabase database, final QueryContext queryContext, final ShardingConditions shardingConditions, final ConfigurationProperties props, final ConnectionContext connectionContext) { SQLStatementContext sqlStatementContext = queryContext.getSqlStatementContext(); SQLStatement sqlStatement = sqlStatementContext.getSqlStatement(); if (sqlStatement instanceof TCLStatement) { return new ShardingDatabaseBroadcastRoutingEngine(); } if (sqlStatement instanceof DDLStatement) { if (sqlStatementContext instanceof CursorAvailable) { return getCursorRouteEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props); } return getDDLRoutingEngine(shardingRule, database, sqlStatementContext); } if (sqlStatement instanceof DALStatement) { return getDALRoutingEngine(shardingRule, database, sqlStatementContext, connectionContext); } if (sqlStatement instanceof DCLStatement) { return getDCLRoutingEngine(shardingRule, database, sqlStatementContext); } return getDQLRoutingEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props, connectionContext); }
@Test void assertNewInstanceForSetResourceGroup() { MySQLSetResourceGroupStatement resourceGroupStatement = mock(MySQLSetResourceGroupStatement.class); when(sqlStatementContext.getSqlStatement()).thenReturn(resourceGroupStatement); QueryContext queryContext = new QueryContext(sqlStatementContext, "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class)); ShardingRouteEngine actual = ShardingRouteEngineFactory.newInstance(shardingRule, database, queryContext, shardingConditions, props, new ConnectionContext(Collections::emptySet)); assertThat(actual, instanceOf(ShardingInstanceBroadcastRoutingEngine.class)); }
@VisibleForTesting synchronized List<StorageDirectory> addStorageLocations(DataNode datanode, NamespaceInfo nsInfo, Collection<StorageLocation> dataDirs, StartupOption startOpt) throws IOException { final int numThreads = getParallelVolumeLoadThreadsNum( dataDirs.size(), datanode.getConf()); final ExecutorService executor = Executors.newFixedThreadPool(numThreads); try { final List<StorageLocation> successLocations = loadDataStorage( datanode, nsInfo, dataDirs, startOpt, executor); if (successLocations.isEmpty()) { return Lists.newArrayList(); } return loadBlockPoolSliceStorage( datanode, nsInfo, successLocations, startOpt, executor); } finally { executor.shutdown(); } }
@Test public void testMissingVersion() throws IOException, URISyntaxException { final int numLocations = 1; final int numNamespace = 1; List<StorageLocation> locations = createStorageLocations(numLocations); StorageLocation firstStorage = locations.get(0); Storage.StorageDirectory sd = new Storage.StorageDirectory(firstStorage); // the directory is not initialized so VERSION does not exist // create a fake directory under current/ File currentDir = new File(sd.getCurrentDir(), "BP-787466439-172.26.24.43-1462305406642"); assertTrue("unable to mkdir " + currentDir.getName(), currentDir.mkdirs()); // Add volumes for multiple namespaces. List<NamespaceInfo> namespaceInfos = createNamespaceInfos(numNamespace); for (NamespaceInfo ni : namespaceInfos) { storage.addStorageLocations(mockDN, ni, locations, START_OPT); } // It should not format the directory because VERSION is missing. assertTrue("Storage directory was formatted", currentDir.exists()); }
@Override public @NonNull TrustedSectoralIdpStep redirectToSectoralIdp(@NonNull String sectoralIdpIss) { var trustedIdpEntityStatement = fedMasterClient.establishIdpTrust(URI.create(sectoralIdpIss)); // start PAR with sectoral IdP // https://datatracker.ietf.org/doc/html/rfc9126 var parBody = ParBodyBuilder.create() .clientId(selfIssuer.toString()) .codeChallenge(codeChallengeS256) .codeChallengeMethod("S256") .redirectUri(callbackUri) .nonce(nonce) .state(state) .scopes(scopes) .acrValues("gematik-ehealth-loa-high") .responseType("code"); var res = doPushedAuthorizationRequest(parBody, trustedIdpEntityStatement.body()); var redirectUri = buildAuthorizationUrl(res.requestUri(), trustedIdpEntityStatement.body()); return new TrustedSectoralIdpStepImpl( openIdClient, selfIssuer, redirectUri, callbackUri, trustedIdpEntityStatement, relyingPartyEncKeySupplier); }
@Test void redirectToSectoralIdp() { var self = URI.create("https://fachdienst.example.com"); var callbackUri = self.resolve("/callback"); var fedmasterClient = mock(FederationMasterClient.class); var openIdClient = mock(OpenIdClient.class); var sut = new SelectSectoralIdpStepImpl( self, fedmasterClient, openIdClient, null, callbackUri, null, "test", "test-state", List.of()); var sectoralIdp = URI.create("https://tk.example.com"); var entityConfig = sectoralIdpEntityConfiguration(sectoralIdp); when(fedmasterClient.establishIdpTrust(sectoralIdp)).thenReturn(entityConfig); var parResponse = new ParResponse(sectoralIdp.resolve("/auth").toString(), 0); when(openIdClient.requestPushedUri(any(), any())).thenReturn(parResponse); // when var step = sut.redirectToSectoralIdp(sectoralIdp.toString()); // then assertEquals( "https://tk.example.com/auth?request_uri=https%3A%2F%2Ftk.example.com%2Fauth&client_id=https%3A%2F%2Ffachdienst.example.com", step.idpRedirectUri().toString()); }
static public ObjectName register(String serviceName, String nameName, Object theMbean) { return register(serviceName, nameName, Collections.emptyMap(), theMbean); }
@Test public void testRegister() throws Exception { ObjectName objectName = null; try { counter = 23; objectName = MBeans.register("UnitTest", "RegisterTest", this); MBeanServer platformMBeanServer = ManagementFactory.getPlatformMBeanServer(); int jmxCounter = (int) platformMBeanServer .getAttribute(objectName, "Counter"); Assert.assertEquals(counter, jmxCounter); } finally { if (objectName != null) { MBeans.unregister(objectName); } } }
public String doLayout(ILoggingEvent event) { StringBuilder buf = new StringBuilder(); startNewTableIfLimitReached(buf); boolean odd = true; if (((counter++) & 1) == 0) { odd = false; } String level = event.getLevel().toString().toLowerCase(); buf.append(LINE_SEPARATOR); buf.append("<tr class=\""); buf.append(level); if (odd) { buf.append(" odd\">"); } else { buf.append(" even\">"); } buf.append(LINE_SEPARATOR); Converter<ILoggingEvent> c = head; while (c != null) { appendEventToBuffer(buf, c, event); c = c.getNext(); } buf.append("</tr>"); buf.append(LINE_SEPARATOR); if (event.getThrowableProxy() != null) { throwableRenderer.render(buf, event); } return buf.toString(); }
@Test public void layoutWithException() throws Exception { layout.setPattern("%level %thread %msg %ex"); LoggingEvent le = createLoggingEvent(); le.setThrowableProxy(new ThrowableProxy(new Exception("test Exception"))); String result = layout.doLayout(le); String stringToParse = layout.getFileHeader(); stringToParse = stringToParse + layout.getPresentationHeader(); stringToParse += result; stringToParse += "</table></body></html>"; // System.out.println(stringToParse); Document doc = parseOutput(stringToParse); Element rootElement = doc.getRootElement(); Element bodyElement = rootElement.element("body"); Element tableElement = bodyElement.element("table"); List<Element> trElementList = tableElement.elements(); Element exceptionRowElement = trElementList.get(2); Element exceptionElement = exceptionRowElement.element("td"); assertEquals(3, tableElement.elements().size()); assertTrue(exceptionElement.getText().contains("java.lang.Exception: test Exception")); }
public static ByteBuf buffer() { return ALLOC.heapBuffer(); }
@SuppressWarnings("deprecation") @Test public void littleEndianWriteOnLittleEndianBufferMustStoreLittleEndianValue() { ByteBuf b = buffer(1024).order(ByteOrder.LITTLE_ENDIAN); b.writeShortLE(0x0102); assertEquals((short) 0x0102, b.getShortLE(0)); assertEquals((short) 0x0102, b.getShort(0)); b.clear(); b.writeMediumLE(0x010203); assertEquals(0x010203, b.getMediumLE(0)); assertEquals(0x010203, b.getMedium(0)); b.clear(); b.writeIntLE(0x01020304); assertEquals(0x01020304, b.getIntLE(0)); assertEquals(0x01020304, b.getInt(0)); b.clear(); b.writeLongLE(0x0102030405060708L); assertEquals(0x0102030405060708L, b.getLongLE(0)); assertEquals(0x0102030405060708L, b.getLong(0)); }
@Override public List<PinotTaskConfig> generateTasks(List<TableConfig> tableConfigs) { String taskType = MinionConstants.UpsertCompactionTask.TASK_TYPE; List<PinotTaskConfig> pinotTaskConfigs = new ArrayList<>(); for (TableConfig tableConfig : tableConfigs) { if (!validate(tableConfig)) { LOGGER.warn("Validation failed for table {}. Skipping..", tableConfig.getTableName()); continue; } String tableNameWithType = tableConfig.getTableName(); LOGGER.info("Start generating task configs for table: {}", tableNameWithType); if (tableConfig.getTaskConfig() == null) { LOGGER.warn("Task config is null for table: {}", tableNameWithType); continue; } Map<String, String> taskConfigs = tableConfig.getTaskConfig().getConfigsForTaskType(taskType); List<SegmentZKMetadata> allSegments = _clusterInfoAccessor.getSegmentsZKMetadata(tableNameWithType); // Get completed segments and filter out the segments based on the buffer time configuration List<SegmentZKMetadata> completedSegments = getCompletedSegments(taskConfigs, allSegments, System.currentTimeMillis()); if (completedSegments.isEmpty()) { LOGGER.info("No completed segments were eligible for compaction for table: {}", tableNameWithType); continue; } // Only schedule 1 task of this type, per table Map<String, TaskState> incompleteTasks = TaskGeneratorUtils.getIncompleteTasks(taskType, tableNameWithType, _clusterInfoAccessor); if (!incompleteTasks.isEmpty()) { LOGGER.warn("Found incomplete tasks: {} for same table: {} and task type: {}. Skipping task generation.", incompleteTasks.keySet(), tableNameWithType, taskType); continue; } // get server to segment mappings PinotHelixResourceManager pinotHelixResourceManager = _clusterInfoAccessor.getPinotHelixResourceManager(); Map<String, List<String>> serverToSegments = pinotHelixResourceManager.getServerToSegmentsMap(tableNameWithType); BiMap<String, String> serverToEndpoints; try { serverToEndpoints = pinotHelixResourceManager.getDataInstanceAdminEndpoints(serverToSegments.keySet()); } catch (InvalidConfigException e) { throw new RuntimeException(e); } ServerSegmentMetadataReader serverSegmentMetadataReader = new ServerSegmentMetadataReader(_clusterInfoAccessor.getExecutor(), _clusterInfoAccessor.getConnectionManager()); // By default, we use 'snapshot' for validDocIdsType. This means that we will use the validDocIds bitmap from // the snapshot from Pinot segment. This will require 'enableSnapshot' from UpsertConfig to be set to true. String validDocIdsTypeStr = taskConfigs.getOrDefault(UpsertCompactionTask.VALID_DOC_IDS_TYPE, ValidDocIdsType.SNAPSHOT.toString()); ValidDocIdsType validDocIdsType = ValidDocIdsType.valueOf(validDocIdsTypeStr.toUpperCase()); // Number of segments to query per server request. If a table has a lot of segments, then we might send a // huge payload to pinot-server in request. Batching the requests will help in reducing the payload size. int numSegmentsBatchPerServerRequest = Integer.parseInt( taskConfigs.getOrDefault(UpsertCompactionTask.NUM_SEGMENTS_BATCH_PER_SERVER_REQUEST, String.valueOf(DEFAULT_NUM_SEGMENTS_BATCH_PER_SERVER_REQUEST))); // Validate that the snapshot is enabled if validDocIdsType is validDocIdsSnapshot if (validDocIdsType == ValidDocIdsType.SNAPSHOT) { UpsertConfig upsertConfig = tableConfig.getUpsertConfig(); Preconditions.checkNotNull(upsertConfig, "UpsertConfig must be provided for UpsertCompactionTask"); Preconditions.checkState(upsertConfig.isEnableSnapshot(), String.format( "'enableSnapshot' from UpsertConfig must be enabled for UpsertCompactionTask with validDocIdsType = %s", validDocIdsType)); } else if (validDocIdsType == ValidDocIdsType.IN_MEMORY_WITH_DELETE) { UpsertConfig upsertConfig = tableConfig.getUpsertConfig(); Preconditions.checkNotNull(upsertConfig, "UpsertConfig must be provided for UpsertCompactionTask"); Preconditions.checkNotNull(upsertConfig.getDeleteRecordColumn(), String.format("deleteRecordColumn must be provided for " + "UpsertCompactionTask with validDocIdsType = %s", validDocIdsType)); } Map<String, List<ValidDocIdsMetadataInfo>> validDocIdsMetadataList = serverSegmentMetadataReader.getSegmentToValidDocIdsMetadataFromServer(tableNameWithType, serverToSegments, serverToEndpoints, null, 60_000, validDocIdsType.toString(), numSegmentsBatchPerServerRequest); Map<String, SegmentZKMetadata> completedSegmentsMap = completedSegments.stream().collect(Collectors.toMap(SegmentZKMetadata::getSegmentName, Function.identity())); SegmentSelectionResult segmentSelectionResult = processValidDocIdsMetadata(taskConfigs, completedSegmentsMap, validDocIdsMetadataList); if (!segmentSelectionResult.getSegmentsForDeletion().isEmpty()) { pinotHelixResourceManager.deleteSegments(tableNameWithType, segmentSelectionResult.getSegmentsForDeletion(), "0d"); LOGGER.info( "Deleted segments containing only invalid records for table: {}, number of segments to be deleted: {}", tableNameWithType, segmentSelectionResult.getSegmentsForDeletion()); } int numTasks = 0; int maxTasks = getMaxTasks(taskType, tableNameWithType, taskConfigs); for (SegmentZKMetadata segment : segmentSelectionResult.getSegmentsForCompaction()) { if (numTasks == maxTasks) { break; } if (StringUtils.isBlank(segment.getDownloadUrl())) { LOGGER.warn("Skipping segment {} for task {} as download url is empty", segment.getSegmentName(), taskType); continue; } Map<String, String> configs = new HashMap<>(getBaseTaskConfigs(tableConfig, List.of(segment.getSegmentName()))); configs.put(MinionConstants.DOWNLOAD_URL_KEY, segment.getDownloadUrl()); configs.put(MinionConstants.UPLOAD_URL_KEY, _clusterInfoAccessor.getVipUrl() + "/segments"); configs.put(MinionConstants.ORIGINAL_SEGMENT_CRC_KEY, String.valueOf(segment.getCrc())); configs.put(UpsertCompactionTask.VALID_DOC_IDS_TYPE, validDocIdsType.toString()); pinotTaskConfigs.add(new PinotTaskConfig(UpsertCompactionTask.TASK_TYPE, configs)); numTasks++; } LOGGER.info("Finished generating {} tasks configs for table: {}", numTasks, tableNameWithType); } return pinotTaskConfigs; }
@Test public void testGenerateTasksValidatesTableConfigs() { UpsertCompactionTaskGenerator taskGenerator = new UpsertCompactionTaskGenerator(); TableConfig offlineTableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(RAW_TABLE_NAME).setTimeColumnName(TIME_COLUMN_NAME) .build(); List<PinotTaskConfig> pinotTaskConfigs = taskGenerator.generateTasks(Lists.newArrayList(offlineTableConfig)); assertTrue(pinotTaskConfigs.isEmpty()); TableConfig realtimeTableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(RAW_TABLE_NAME).setTimeColumnName(TIME_COLUMN_NAME) .build(); pinotTaskConfigs = taskGenerator.generateTasks(Lists.newArrayList(realtimeTableConfig)); assertTrue(pinotTaskConfigs.isEmpty()); }
private LayoutLocation(String id, Type locType, double latOrY, double longOrX) { this.id = id; this.latOrY = latOrY; this.longOrX = longOrX; this.locType = locType; }
@Test(expected = IllegalArgumentException.class) public void badType() { layoutLocation(SOME_ID, "foo", ZERO, PI); }
@Override public AdjacencyList setWeight(int source, int target, double weight) { if (digraph) { for (Edge edge : graph[source]) { if (edge.v2 == target) { edge.weight = weight; return this; } } } else { for (Edge edge : graph[source]) { if ((edge.v1 == source && edge.v2 == target) || (edge.v2 == source && edge.v1 == target)) { edge.weight = weight; return this; } } } addEdge(source, target, weight); return this; }
@Test public void testSetWeight() { System.out.println("setWeight"); g4.setWeight(1, 4, 5.7); assertEquals(5.7, g4.getWeight(1, 4), 1E-10); assertEquals(1.0, g4.getWeight(4, 1), 1E-10); g8.setWeight(1, 4, 5.7); assertEquals(5.7, g8.getWeight(1, 4), 1E-10); assertEquals(5.7, g8.getWeight(4, 1), 1E-10); }