focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public boolean init( StepMetaInterface smi, StepDataInterface sdi ) { meta = (SplitFieldToRowsMeta) smi; data = (SplitFieldToRowsData) sdi; if ( super.init( smi, sdi ) ) { data.rownr = 1L; try { String delimiter = Const.nullToEmpty( meta.getDelimiter() ); if ( meta.isDelimiterRegex() ) { data.delimiterPattern = Pattern.compile( environmentSubstitute( delimiter ) ); } else { data.delimiterPattern = Pattern.compile( Pattern.quote( environmentSubstitute( delimiter ) ) ); } } catch ( PatternSyntaxException pse ) { log.logError( pse.getMessage() ); throw pse; } return true; } return false; }
@Test public void interpretsNullDelimiterAsEmpty() throws Exception { SplitFieldToRows step = StepMockUtil.getStep( SplitFieldToRows.class, SplitFieldToRowsMeta.class, "handlesNullDelimiter" ); SplitFieldToRowsMeta meta = new SplitFieldToRowsMeta(); meta.setDelimiter( null ); meta.setDelimiterRegex( false ); SplitFieldToRowsData data = new SplitFieldToRowsData(); step.init( meta, data ); // empty string should be quoted --> \Q\E assertEquals( "\\Q\\E", data.delimiterPattern.pattern() ); }
public static JsonSchemaValidator matchesJsonSchemaInClasspath(String pathToSchemaInClasspath) { return matchesJsonSchema(Thread.currentThread().getContextClassLoader().getResource(pathToSchemaInClasspath)); }
@Test public void validates_schema_in_classpath() { // Given String greetingJson = "{\n" + " \"greeting\": {\n" + " \"firstName\": \"John\",\n" + " \"lastName\": \"Doe\"\n" + " }\n" + "}"; // Then MatcherAssert.assertThat(greetingJson, JsonSchemaValidator.matchesJsonSchemaInClasspath("greeting-schema.json")); }
@Nullable public static ByteBuf accumulate( ByteBuf target, ByteBuf source, int targetAccumulationSize, int accumulatedSize) { if (accumulatedSize == 0 && source.readableBytes() >= targetAccumulationSize) { return source; } int copyLength = Math.min(source.readableBytes(), targetAccumulationSize - accumulatedSize); if (copyLength > 0) { target.writeBytes(source, copyLength); } if (accumulatedSize + copyLength == targetAccumulationSize) { return target; } return null; }
@Test void testAccumulateWithCopy() { int sourceLength = 128; int firstSourceReaderIndex = 32; int secondSourceReaderIndex = 0; int expectedAccumulationSize = 128; int firstAccumulationSize = sourceLength - firstSourceReaderIndex; int secondAccumulationSize = expectedAccumulationSize - firstAccumulationSize; ByteBuf firstSource = createSourceBuffer(sourceLength, firstSourceReaderIndex, firstAccumulationSize); ByteBuf secondSource = createSourceBuffer(sourceLength, secondSourceReaderIndex, secondAccumulationSize); ByteBuf target = Unpooled.buffer(expectedAccumulationSize); // If src does not have enough data, src will be copied into target and null will be // returned. ByteBuf accumulated = ByteBufUtils.accumulate( target, firstSource, expectedAccumulationSize, target.readableBytes()); assertThat(accumulated).isNull(); assertThat(firstSource.readerIndex()).isEqualTo(sourceLength); assertThat(target.readableBytes()).isEqualTo(firstAccumulationSize); // The remaining data will be copied from the second buffer, and the target buffer will be // returned // after all data is accumulated. accumulated = ByteBufUtils.accumulate( target, secondSource, expectedAccumulationSize, target.readableBytes()); assertThat(accumulated).isSameAs(target); assertThat(secondSource.readerIndex()) .isEqualTo(secondSourceReaderIndex + secondAccumulationSize); assertThat(target.readableBytes()).isEqualTo(expectedAccumulationSize); verifyBufferContent(accumulated, 0, expectedAccumulationSize); }
@Override public void seek(long desired) throws IOException { final int available = compressingDelegate.available(); if (available > 0) { if (available != compressingDelegate.skip(available)) { throw new IOException("Unable to skip buffered data."); } } delegate.seek(desired); }
@Test void testSeek() throws IOException { final List<String> records = Arrays.asList("first", "second", "third", "fourth", "fifth"); final Map<String, Long> positions = new HashMap<>(); byte[] compressedBytes; try (final TestingOutputStream outputStream = new TestingOutputStream(); final CompressibleFSDataOutputStream compressibleOutputStream = new CompressibleFSDataOutputStream( outputStream, new SnappyStreamCompressionDecorator())) { for (String record : records) { positions.put(record, compressibleOutputStream.getPos()); compressibleOutputStream.write(record.getBytes(StandardCharsets.UTF_8)); } compressibleOutputStream.flush(); compressedBytes = outputStream.toByteArray(); } try (final FSDataInputStream inputStream = new InputStreamFSInputWrapper(new ByteArrayInputStream(compressedBytes)); final FSDataInputStream compressibleInputStream = new CompressibleFSDataInputStream( inputStream, new SnappyStreamCompressionDecorator())) { verifyRecord(compressibleInputStream, positions, "first"); verifyRecord(compressibleInputStream, positions, "third"); verifyRecord(compressibleInputStream, positions, "fifth"); } // Verify read of partial records. This ensures that we skip any unread data in the // underlying buffers. try (final FSDataInputStream inputStream = new InputStreamFSInputWrapper(new ByteArrayInputStream(compressedBytes)); final FSDataInputStream compressibleInputStream = new CompressibleFSDataInputStream( inputStream, new SnappyStreamCompressionDecorator())) { verifyRecordPrefix(compressibleInputStream, positions, "first", "fir"); verifyRecordPrefix(compressibleInputStream, positions, "third", "thi"); verifyRecord(compressibleInputStream, positions, "fifth"); } }
public void addUndetected(String shardId, BigInteger startingHashKey, long currentTimeMs) { assert !info.containsKey(shardId); info.put(shardId, new TrackingInfo(findOwner(startingHashKey), currentTimeMs)); }
@Test public void detection() { addUndetected(SHARD2, 0); addUndetected(SHARD4, 0); assertNew(set(SHARD0, SHARD2, SHARD5), SHARD0, 0, SHARD2, 1, SHARD5, 2); assertNew(set(SHARD0, SHARD2, SHARD5)); assertNew(set(SHARD1, SHARD3, SHARD4), SHARD1, 0, SHARD3, 1, SHARD4, 2); assertNew(set(SHARD1, SHARD3, SHARD4)); }
@NonNull public <T extends VFSConnectionDetails> VFSConnectionProvider<T> getExistingProvider( @NonNull ConnectionManager manager, @Nullable String key ) throws KettleException { VFSConnectionProvider<T> provider = getProvider( manager, key ); if ( provider == null ) { throw new KettleException( String.format( "Undefined connection provider for key '%s'.", key ) ); } return provider; }
@Test( expected = KettleException.class ) public void testGetExistingProviderOfDetailsReturnsNullForNonExistingProviderInManager() throws KettleException { String provider1Key = "missingProvider1"; VFSConnectionDetails details1 = mock( VFSConnectionDetails.class ); doReturn( provider1Key ).when( details1 ).getType(); vfsConnectionManagerHelper.getExistingProvider( connectionManager, details1 ); }
public NamenodeBeanMetrics getNamenodeMetrics() throws IOException { if (this.metrics == null) { throw new IOException("Namenode metrics is not initialized"); } return this.metrics.getNamenodeMetrics(); }
@Test public void testRouterMetricsWhenDisabled() throws Exception { Router router = new Router(); router.init(new RouterConfigBuilder(conf).rpc().build()); router.start(); intercept(IOException.class, "Namenode metrics is not initialized", () -> router.getNamenodeMetrics().getCacheCapacity()); router.stop(); router.close(); }
public boolean statsHaveChanged() { if (!aggregatedStats.hasUpdatesFromAllDistributors()) { return false; } for (ContentNodeStats contentNodeStats : aggregatedStats.getStats()) { int nodeIndex = contentNodeStats.getNodeIndex(); boolean currValue = mayHaveMergesPendingInGlobalSpace(nodeIndex); Boolean prevValue = prevMayHaveMergesPendingInGlobalSpace(nodeIndex); if (prevValue != null) { if (prevValue != currValue) { return true; } } else { return true; } } return false; }
@Test void stats_have_changed_if_one_node_has_in_sync_to_buckets_pending_transition() { Fixture f = Fixture.fromStats(stats().bucketsPending(0).inSync(1)); f.newAggregatedStats(stats().bucketsPending(0).bucketsPending(1)); assertTrue(f.statsHaveChanged()); }
public FEELFnResult<TemporalAccessor> invoke(@ParameterName("from") String val) { if ( val == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null")); } try { TemporalAccessor parsed = FEEL_TIME.parse(val); if (parsed.query(TemporalQueries.offset()) != null) { // it is an offset-zoned time, so I can know for certain an OffsetTime OffsetTime asOffSetTime = parsed.query(OffsetTime::from); return FEELFnResult.ofResult(asOffSetTime); } else if (parsed.query(TemporalQueries.zone()) == null) { // if it does not contain any zone information at all, then I know for certain is a local time. LocalTime asLocalTime = parsed.query(LocalTime::from); return FEELFnResult.ofResult(asLocalTime); } else if (parsed.query(TemporalQueries.zone()) != null) { boolean hasSeconds = timeStringWithSeconds(val); LocalTime asLocalTime = parsed.query(LocalTime::from); ZoneId zoneId = parsed.query(TemporalQueries.zone()); ZoneTime zoneTime = ZoneTime.of(asLocalTime, zoneId, hasSeconds); return FEELFnResult.ofResult(zoneTime); } return FEELFnResult.ofResult(parsed); } catch (DateTimeException e) { return manageDateTimeException(e, val); } }
@Test void invokeTemporalAccessorParamUnsupportedAccessor() { FunctionTestUtil.assertResultError(timeFunction.invoke(DayOfWeek.MONDAY), InvalidParametersEvent.class); }
@Override public void downgradeLastEdge() { Preconditions.checkState(!endsInInode(), "Cannot downgrade last edge when lock list %s ends in an inode", this); Preconditions.checkState(!mLocks.isEmpty(), "Cannot downgrade last edge when the lock list is empty"); Preconditions.checkState(endsInWriteLock(), "Cannot downgrade last edge when lock list %s is not write locked", this); if (!endsInMultipleWriteLocks()) { Edge lastEdge = lastEdge(); RWLockResource newLock = mInodeLockManager.lockEdge(lastEdge, LockMode.READ, mUseTryLock); removeLastLock(); addEdgeLock(lastEdge, LockMode.READ, newLock); } }
@Test public void downgradeLastEdge() { mLockList.lockRootEdge(LockMode.WRITE); mLockList.downgradeLastEdge(); assertEquals(LockMode.READ, mLockList.getLockMode()); mLockList.lockInode(mRootDir, LockMode.READ); mLockList.lockEdge(mRootDir, mDirA.getName(), LockMode.WRITE); mLockList.downgradeLastEdge(); assertEquals(LockMode.READ, mLockList.getLockMode()); checkOnlyNodesReadLocked(mRootDir); checkOnlyNodesWriteLocked(); checkOnlyIncomingEdgesReadLocked(mRootDir, mDirA); checkOnlyIncomingEdgesWriteLocked(); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { return this.list(directory, listener, String.valueOf(Path.DELIMITER)); }
@Test public void testListFilePlusCharacter() throws Exception { final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path file = new S3TouchFeature(session, new S3AccessControlListFeature(session)).touch( new Path(container, String.format("test+%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.file)), new TransferStatus()); assertTrue(new S3ObjectListService(session, new S3AccessControlListFeature(session)).list(container, new DisabledListProgressListener()).contains(file)); new S3DefaultDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static Read read() { return new AutoValue_RabbitMqIO_Read.Builder() .setQueueDeclare(false) .setExchangeDeclare(false) .setMaxReadTime(null) .setMaxNumRecords(Long.MAX_VALUE) .setUseCorrelationId(false) .build(); }
@Test public void testReadQueue() throws Exception { final int maxNumRecords = 10; PCollection<RabbitMqMessage> raw = p.apply( RabbitMqIO.read() .withUri("amqp://guest:guest@localhost:" + port) .withQueue("READ") .withMaxNumRecords(maxNumRecords)); PCollection<String> output = raw.apply( MapElements.into(TypeDescriptors.strings()) .via( (RabbitMqMessage message) -> RabbitMqTestUtils.recordToString(message.getBody()))); List<String> records = RabbitMqTestUtils.generateRecords(maxNumRecords).stream() .map(RabbitMqTestUtils::recordToString) .collect(Collectors.toList()); PAssert.that(output).containsInAnyOrder(records); ConnectionFactory connectionFactory = new ConnectionFactory(); connectionFactory.setUri("amqp://guest:guest@localhost:" + port); Connection connection = null; Channel channel = null; try { connection = connectionFactory.newConnection(); channel = connection.createChannel(); channel.queueDeclare("READ", false, false, false, null); for (String record : records) { channel.basicPublish("", "READ", null, record.getBytes(StandardCharsets.UTF_8)); } p.run(); } finally { if (channel != null) { channel.close(); } if (connection != null) { connection.close(); } } }
@Override public boolean betterThan(Num criterionValue1, Num criterionValue2) { return positionFilter == PositionFilter.PROFIT ? criterionValue1.isGreaterThan(criterionValue2) : criterionValue1.isLessThan(criterionValue2); }
@Test public void betterThan() { AnalysisCriterion winningPositionsRatio = getCriterion(PositionFilter.PROFIT); assertTrue(winningPositionsRatio.betterThan(numOf(12), numOf(8))); assertFalse(winningPositionsRatio.betterThan(numOf(8), numOf(12))); AnalysisCriterion losingPositionsRatio = getCriterion(PositionFilter.LOSS); assertTrue(losingPositionsRatio.betterThan(numOf(8), numOf(12))); assertFalse(losingPositionsRatio.betterThan(numOf(12), numOf(8))); }
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) { return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature); }
@Test public void testRequiresStableInputProcessElement() throws Exception { DoFnSignature sig = DoFnSignatures.getSignature( new DoFn<String, String>() { @ProcessElement @RequiresStableInput public void process(ProcessContext c) {} }.getClass()); assertThat(sig.processElement().requiresStableInput(), is(true)); }
@Override public boolean checkIndexExists( Database database, String schemaName, String tableName, String[] idxFields ) throws KettleDatabaseException { String tablename = database.getDatabaseMeta().getQuotedSchemaTableCombination( schemaName, tableName ); boolean[] exists = new boolean[idxFields.length]; for ( int i = 0; i < exists.length; i++ ) { exists[i] = false; } try { // Get a list of all the indexes for this table ResultSet indexList = null; try { indexList = database.getDatabaseMetaData().getIndexInfo( null, null, tablename, false, true ); while ( indexList.next() ) { String column = indexList.getString( "COLUMN_NAME" ); int idx = Const.indexOfString( column, idxFields ); if ( idx >= 0 ) { exists[idx] = true; } } } finally { if ( indexList != null ) { indexList.close(); } } // See if all the fields are indexed... boolean all = true; for ( int i = 0; i < exists.length && all; i++ ) { if ( !exists[i] ) { all = false; } } return all; } catch ( Exception e ) { throw new KettleDatabaseException( "Unable to determine if indexes exists on table [" + tablename + "]", e ); } }
@Test public void testCheckIndexExists() throws Exception { Database db = Mockito.mock( Database.class ); ResultSet rs = Mockito.mock( ResultSet.class ); DatabaseMetaData dmd = Mockito.mock( DatabaseMetaData.class ); DatabaseMeta dm = Mockito.mock( DatabaseMeta.class ); Mockito.when( dm.getQuotedSchemaTableCombination( "", "FOO" ) ).thenReturn( "FOO" ); Mockito.when( rs.next() ).thenAnswer( new Answer<Boolean>() { public Boolean answer( InvocationOnMock invocation ) throws Throwable { rowCnt++; return new Boolean( rowCnt < 3 ); } } ); Mockito.when( db.getDatabaseMetaData() ).thenReturn( dmd ); Mockito.when( dmd.getIndexInfo( null, null, "FOO", false, true ) ).thenReturn( rs ); Mockito.when( rs.getString( "COLUMN_NAME" ) ).thenAnswer( new Answer<String>() { @Override public String answer( InvocationOnMock invocation ) throws Throwable { if ( rowCnt == 1 ) { return "ROW1COL2"; } else if ( rowCnt == 2 ) { return "ROW2COL2"; } else { return null; } } } ); Mockito.when( db.getDatabaseMeta() ).thenReturn( dm ); }
public static BuildInfo getBuildInfo() { if (Overrides.isEnabled()) { // never use cache when override is enabled -> we need to re-parse everything Overrides overrides = Overrides.fromProperties(); return getBuildInfoInternalVersion(overrides); } return BUILD_INFO_CACHE; }
@Test public void testReadValues() { BuildInfo buildInfo = BuildInfoProvider.getBuildInfo(); String version = buildInfo.getVersion(); String build = buildInfo.getBuild(); int buildNumber = buildInfo.getBuildNumber(); assertTrue(buildInfo.toString(), VERSION_PATTERN.matcher(version).matches()); assertEquals(buildInfo.toString(), buildNumber, Integer.parseInt(build)); assertFalse(buildInfo.toString(), buildInfo.isEnterprise()); }
@ExceptionHandler(ShenyuException.class) protected ShenyuAdminResult handleShenyuException(final ShenyuException exception) { String message = Objects.isNull(exception.getCause()) ? null : exception.getCause().getMessage(); if (!StringUtils.hasText(message)) { message = exception.getMessage(); } LOG.error(exception.getMessage()); return ShenyuAdminResult.error(message); }
@Test public void testServerExceptionHandlerByShenyuException() { ShenyuException shenyuException = new ShenyuException(new Throwable("Test shenyuException message!")); ShenyuAdminResult result = exceptionHandlersUnderTest.handleShenyuException(shenyuException); Assertions.assertEquals(result.getCode().intValue(), CommonErrorCode.ERROR); Assertions.assertEquals(result.getMessage(), shenyuException.getCause().getMessage()); }
@Override public Optional<Entity> exportEntity(EntityDescriptor entityDescriptor, EntityDescriptorIds entityDescriptorIds) { final ModelId modelId = entityDescriptor.id(); final Configuration configuration = configurationService.find(modelId.id()); if (isNull(configuration)) { LOG.debug("Couldn't find collector configuration {}", entityDescriptor); return Optional.empty(); } return Optional.of(exportNativeEntity(configuration, entityDescriptorIds)); }
@Test @MongoDBFixtures("SidecarCollectorConfigurationFacadeTest.json") public void exportEntity() { final EntityDescriptor descriptor = EntityDescriptor.create("5b17e1a53f3ab8204eea1051", ModelTypes.SIDECAR_COLLECTOR_CONFIGURATION_V1); final EntityDescriptor collectorDescriptor = EntityDescriptor.create("5b4c920b4b900a0024af0001", ModelTypes.SIDECAR_COLLECTOR_V1); final EntityDescriptorIds entityDescriptorIds = EntityDescriptorIds.of(descriptor, collectorDescriptor); final Entity entity = facade.exportEntity(descriptor, entityDescriptorIds).orElseThrow(AssertionError::new); assertThat(entity).isInstanceOf(EntityV1.class); assertThat(entity.id()).isEqualTo(ModelId.of(entityDescriptorIds.get(descriptor).orElse(null))); assertThat(entity.type()).isEqualTo(ModelTypes.SIDECAR_COLLECTOR_CONFIGURATION_V1); final EntityV1 entityV1 = (EntityV1) entity; final SidecarCollectorConfigurationEntity configEntity = objectMapper.convertValue(entityV1.data(), SidecarCollectorConfigurationEntity.class); assertThat(configEntity.title()).isEqualTo(ValueReference.of("filebeat config")); assertThat(configEntity.collectorId()).isEqualTo(ValueReference.of(entityDescriptorIds.get(collectorDescriptor).orElse(null))); assertThat(configEntity.color().asString(Collections.emptyMap())).isEqualTo("#ffffff"); assertThat(configEntity.template().asString(Collections.emptyMap())).isEqualTo("empty template"); }
@Override public void run() { try { // We kill containers until the kernel reports the OOM situation resolved // Note: If the kernel has a delay this may kill more than necessary while (true) { String status = cgroups.getCGroupParam( CGroupsHandler.CGroupController.MEMORY, "", CGROUP_PARAM_MEMORY_OOM_CONTROL); if (!status.contains(CGroupsHandler.UNDER_OOM)) { break; } boolean containerKilled = killContainer(); if (!containerKilled) { // This can happen, if SIGKILL did not clean up // non-PGID or containers or containers launched by other users // or if a process was put to the root YARN cgroup. throw new YarnRuntimeException( "Could not find any containers but CGroups " + "reserved for containers ran out of memory. " + "I am giving up"); } } } catch (ResourceHandlerException ex) { LOG.warn("Could not fetch OOM status. " + "This is expected at shutdown. Exiting.", ex); } }
@Test public void testKillBothOpportunisticContainerUponOOM() throws Exception { int currentContainerId = 0; ConcurrentHashMap<ContainerId, Container> containers = new ConcurrentHashMap<>(); Container c1 = createContainer(currentContainerId++, false, 2, true); containers.put(c1.getContainerId(), c1); Container c2 = createContainer(currentContainerId++, false, 1, true); containers.put(c2.getContainerId(), c2); Container c3 = createContainer(currentContainerId++, true, 1, true); containers.put(c3.getContainerId(), c3); ContainerExecutor ex = createContainerExecutor(containers); Context context = mock(Context.class); when(context.getContainers()).thenReturn(containers); when(context.getContainerExecutor()).thenReturn(ex); CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); when(cGroupsHandler.getCGroupParam( CGroupsHandler.CGroupController.MEMORY, "", CGROUP_PARAM_MEMORY_OOM_CONTROL)) .thenReturn("under_oom 1") .thenReturn("under_oom 1") .thenReturn("under_oom 0"); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c1.getContainerId().toString(), CGROUP_PROCS_FILE)) .thenReturn("1234").thenReturn(""); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) .thenReturn(getMB(9)); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) .thenReturn(getMB(9)); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c2.getContainerId().toString(), CGROUP_PROCS_FILE)) .thenReturn("1235").thenReturn(""); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) .thenReturn(getMB(11)); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) .thenReturn(getMB(11)); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c3.getContainerId().toString(), CGROUP_PROCS_FILE)) .thenReturn("1236").thenReturn(""); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c3.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) .thenReturn(getMB(9)); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c3.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) .thenReturn(getMB(9)); DefaultOOMHandler handler = new DefaultOOMHandler(context, false) { @Override protected CGroupsHandler getCGroupsHandler() { return cGroupsHandler; } }; handler.run(); verify(ex, times(1)).signalContainer( new ContainerSignalContext.Builder() .setPid("1235") .setContainer(c1) .setSignal(ContainerExecutor.Signal.KILL) .build() ); verify(ex, times(1)).signalContainer( new ContainerSignalContext.Builder() .setPid("1234") .setContainer(c2) .setSignal(ContainerExecutor.Signal.KILL) .build() ); verify(ex, times(2)).signalContainer(any()); }
@Override protected void setKeyboard(@NonNull AnyKeyboard newKeyboard, float verticalCorrection) { mExtensionKey = null; mExtensionVisible = false; mUtilityKey = null; super.setKeyboard(newKeyboard, verticalCorrection); setProximityCorrectionEnabled(true); // looking for the space-bar, so I'll be able to detect swipes starting // at it mSpaceBarKey = null; for (Keyboard.Key aKey : newKeyboard.getKeys()) { if (aKey.getPrimaryCode() == KeyCodes.SPACE) { mSpaceBarKey = aKey; break; } } final Keyboard.Key lastKey = newKeyboard.getKeys().get(newKeyboard.getKeys().size() - 1); mWatermarkEdgeX = Keyboard.Key.getEndX(lastKey); }
@Test public void testKeyClickDomain() { mEnglishKeyboard = AnyApplication.getKeyboardFactory(getApplicationContext()) .getEnabledAddOn() .createKeyboard(Keyboard.KEYBOARD_ROW_MODE_URL); mEnglishKeyboard.loadKeyboard(mViewUnderTest.getThemedKeyboardDimens()); mViewUnderTest.setKeyboard(mEnglishKeyboard, 0); AnyKeyboard.AnyKey key = findKey(KeyCodes.DOMAIN); Assert.assertNotNull(key); Mockito.reset(mMockKeyboardListener); ViewTestUtils.navigateFromTo(mViewUnderTest, key, key, 30, true, true); Mockito.verify(mMockKeyboardListener).onText(same(key), eq(".com")); Mockito.verify(mMockKeyboardListener, Mockito.never()) .onKey(anyInt(), any(), anyInt(), any(), anyBoolean()); Mockito.reset(mMockKeyboardListener); Assert.assertNull( Shadows.shadowOf((Application) ApplicationProvider.getApplicationContext()) .getLatestPopupWindow()); ViewTestUtils.navigateFromTo(mViewUnderTest, key, key, 1000, true, false); Mockito.verify(mMockKeyboardListener, Mockito.never()).onText(any(), any()); Mockito.verify(mMockKeyboardListener, Mockito.never()) .onKey(anyInt(), any(), anyInt(), any(), anyBoolean()); Mockito.reset(mMockKeyboardListener); PopupWindow currentlyShownPopup = Shadows.shadowOf((Application) ApplicationProvider.getApplicationContext()) .getLatestPopupWindow(); Assert.assertNotNull(currentlyShownPopup); Assert.assertTrue(currentlyShownPopup.isShowing()); AnyKeyboardViewBase miniKeyboard = mViewUnderTest.getMiniKeyboard(); Assert.assertNotNull(miniKeyboard); }
public static List<Event> computeEventDiff(final Params params) { final List<Event> events = new ArrayList<>(); emitPerNodeDiffEvents(createBaselineParams(params), events); emitWholeClusterDiffEvent(createBaselineParams(params), events); emitDerivedBucketSpaceStatesDiffEvents(params, events); return events; }
@Test void feed_block_engage_edge_emits_cluster_event() { final EventFixture fixture = EventFixture.createForNodes(3) .clusterStateBefore("distributor:3 storage:3") .feedBlockBefore(null) .clusterStateAfter("distributor:3 storage:3") .feedBlockAfter(ClusterStateBundle.FeedBlock.blockedWithDescription("we're closed")); final List<Event> events = fixture.computeEventDiff(); assertThat(events.size(), equalTo(1)); assertThat(events, hasItem( clusterEventWithDescription("Cluster feed blocked due to resource exhaustion: we're closed"))); }
@Override public NetworkId networkId() { return networkId; }
@Test public void testEquality() { DefaultVirtualHost host1 = new DefaultVirtualHost(NetworkId.networkId(0), HID1, MAC1, VLAN1, LOC1, IPSET1); DefaultVirtualHost host2 = new DefaultVirtualHost(NetworkId.networkId(0), HID1, MAC1, VLAN1, LOC1, IPSET1); DefaultVirtualHost host3 = new DefaultVirtualHost(NetworkId.networkId(0), HID2, MAC1, VLAN1, LOC1, IPSET1); DefaultVirtualHost host4 = new DefaultVirtualHost(NetworkId.networkId(1), HID2, MAC1, VLAN1, LOC1, IPSET1); new EqualsTester().addEqualityGroup(host1, host2).addEqualityGroup(host3) .addEqualityGroup(host4).testEquals(); }
public Value get( Key key ) throws Exception { ActiveCacheResult<Value> result = null; Future<ActiveCacheResult<Value>> futureResult = null; synchronized ( this ) { result = valueMap.get( key ); boolean shouldReload = false; long time = System.currentTimeMillis(); if ( result == null || result.getTimeLoaded() + timeout < time ) { // Expired, we need to wait on reload result = null; shouldReload = true; } else if ( result.getTimeLoaded() + ( timeout / 2.0 ) < time ) { // Preemptively reload shouldReload = true; } if ( shouldReload ) { futureResult = loadingMap.get( key ); if ( futureResult == null ) { futureResult = executorServiceGetter.getExecutor().submit( new ActiveCacheCallable<Key, Value>( this, valueMap, loadingMap, key, loader ) ); loadingMap.put( key, futureResult ); } } } if ( result == null ) { result = futureResult.get(); } Exception exception = result.getException(); if ( exception != null ) { throw exception; } return result.getValue(); }
@Test public void testActiveCacheDoesntCacheExceptions() throws Exception { long timeout = 100; @SuppressWarnings( "unchecked" ) ActiveCacheLoader<String, String> mockLoader = mock( ActiveCacheLoader.class ); ActiveCache<String, String> cache = new ActiveCache<String, String>( mockLoader, timeout ); String testKey = "TEST-KEY"; Exception testResult = new Exception( "TEST-RESULT" ); String testResult2 = "TEST-RESULT-2"; when( mockLoader.load( testKey ) ).thenThrow( testResult ).thenReturn( testResult2 ); try { cache.get( testKey ); fail(); } catch ( Exception e ) { assertEquals( testResult, e ); } assertEquals( testResult2, cache.get( testKey ) ); verify( mockLoader, times( 2 ) ).load( testKey ); }
public static String getGroupedName(final String serviceName, final String groupName) { if (StringUtils.isBlank(serviceName)) { throw new IllegalArgumentException("Param 'serviceName' is illegal, serviceName is blank"); } if (StringUtils.isBlank(groupName)) { throw new IllegalArgumentException("Param 'groupName' is illegal, groupName is blank"); } final String resultGroupedName = groupName + Constants.SERVICE_INFO_SPLITER + serviceName; return resultGroupedName.intern(); }
@Test void testGetGroupedName() { assertEquals("group@@serviceName", NamingUtils.getGroupedName("serviceName", "group")); }
@Override public Object parse(final String property, final Object value) { if (property.equalsIgnoreCase(KsqlConstants.LEGACY_RUN_SCRIPT_STATEMENTS_CONTENT)) { validator.validate(property, value); return value; } final ConfigItem configItem = resolver.resolve(property, true) .orElseThrow(() -> new PropertyNotFoundException(property)); final Object parsedValue = configItem.parseValue(value); validator.validate(configItem.getPropertyName(), parsedValue); return parsedValue; }
@Test public void shouldNotCallResolverForRunScriptConstant() { // When: parser.parse(KsqlConstants.LEGACY_RUN_SCRIPT_STATEMENTS_CONTENT, "100"); // Then: verify(resolver, never()).resolve(anyString(), anyBoolean()); }
public static TriggerStateMachine stateMachineForTrigger(RunnerApi.Trigger trigger) { switch (trigger.getTriggerCase()) { case AFTER_ALL: return AfterAllStateMachine.of( stateMachinesForTriggers(trigger.getAfterAll().getSubtriggersList())); case AFTER_ANY: return AfterFirstStateMachine.of( stateMachinesForTriggers(trigger.getAfterAny().getSubtriggersList())); case AFTER_END_OF_WINDOW: return stateMachineForAfterEndOfWindow(trigger.getAfterEndOfWindow()); case ELEMENT_COUNT: return AfterPaneStateMachine.elementCountAtLeast( trigger.getElementCount().getElementCount()); case AFTER_SYNCHRONIZED_PROCESSING_TIME: return AfterSynchronizedProcessingTimeStateMachine.ofFirstElement(); case DEFAULT: return DefaultTriggerStateMachine.of(); case NEVER: return NeverStateMachine.ever(); case ALWAYS: return ReshuffleTriggerStateMachine.create(); case OR_FINALLY: return stateMachineForTrigger(trigger.getOrFinally().getMain()) .orFinally(stateMachineForTrigger(trigger.getOrFinally().getFinally())); case REPEAT: return RepeatedlyStateMachine.forever( stateMachineForTrigger(trigger.getRepeat().getSubtrigger())); case AFTER_EACH: return AfterEachStateMachine.inOrder( stateMachinesForTriggers(trigger.getAfterEach().getSubtriggersList())); case AFTER_PROCESSING_TIME: return stateMachineForAfterProcessingTime(trigger.getAfterProcessingTime()); case TRIGGER_NOT_SET: throw new IllegalArgumentException( String.format("Required field 'trigger' not set on %s", trigger)); default: throw new IllegalArgumentException(String.format("Unknown trigger type %s", trigger)); } }
@Test public void testDefaultTriggerTranslation() { RunnerApi.Trigger trigger = RunnerApi.Trigger.newBuilder() .setDefault(RunnerApi.Trigger.Default.getDefaultInstance()) .build(); assertThat( TriggerStateMachines.stateMachineForTrigger(trigger), instanceOf(DefaultTriggerStateMachine.class)); }
public String getTableName() { return tableName; }
@Test public void testSetGetTableName() { String tableName = "tableName"; assertEquals(tableName, tableMeta.getTableName(), "Table name should match the value set"); }
public static List<String> listFileNames(String path) throws IORuntimeException { if (path == null) { return new ArrayList<>(0); } int index = path.lastIndexOf(FileUtil.JAR_PATH_EXT); if (index < 0) { // 普通目录 final List<String> paths = new ArrayList<>(); final File[] files = ls(path); for (File file : files) { if (file.isFile()) { paths.add(file.getName()); } } return paths; } // jar文件 path = getAbsolutePath(path); // jar文件中的路径 index = index + FileUtil.JAR_FILE_EXT.length(); JarFile jarFile = null; try { jarFile = new JarFile(path.substring(0, index)); // 防止出现jar!/cn/hutool/这类路径导致文件找不到 return ZipUtil.listFileNames(jarFile, StrUtil.removePrefix(path.substring(index + 1), "/")); } catch (IOException e) { throw new IORuntimeException(StrUtil.format("Can not read file path of [{}]", path), e); } finally { IoUtil.close(jarFile); } }
@Test @Disabled public void listFileNamesTest2() { final List<String> names = FileUtil.listFileNames("D:\\m2_repo\\commons-cli\\commons-cli\\1.0\\commons-cli-1.0.jar!org/apache/commons/cli/"); for (final String string : names) { Console.log(string); } }
public static Collection<AndPredicate> getAndPredicates(final ExpressionSegment expression) { Collection<AndPredicate> result = new LinkedList<>(); extractAndPredicates(result, expression); return result; }
@Test void assertExtractAndPredicatesOrCondition() { ColumnSegment columnSegment1 = new ColumnSegment(28, 33, new IdentifierValue("status")); ParameterMarkerExpressionSegment parameterMarkerExpressionSegment1 = new ParameterMarkerExpressionSegment(35, 35, 0); ExpressionSegment expressionSegment1 = new BinaryOperationExpression(28, 39, columnSegment1, parameterMarkerExpressionSegment1, "=", "status=?"); ColumnSegment columnSegment2 = new ColumnSegment(40, 45, new IdentifierValue("status")); ParameterMarkerExpressionSegment parameterMarkerExpressionSegment2 = new ParameterMarkerExpressionSegment(47, 47, 1); ExpressionSegment expressionSegment2 = new BinaryOperationExpression(40, 47, columnSegment2, parameterMarkerExpressionSegment2, "=", "status=?"); BinaryOperationExpression expression = new BinaryOperationExpression(28, 47, expressionSegment1, expressionSegment2, "OR", "status=? OR status=?"); Collection<AndPredicate> actual = ExpressionExtractUtils.getAndPredicates(expression); assertThat(actual.size(), is(2)); Iterator<AndPredicate> andPredicateIterator = actual.iterator(); AndPredicate andPredicate1 = andPredicateIterator.next(); AndPredicate andPredicate2 = andPredicateIterator.next(); assertThat(andPredicate1.getPredicates().iterator().next(), is(expressionSegment1)); assertThat(andPredicate2.getPredicates().iterator().next(), is(expressionSegment2)); }
@Override public double mean() { return 1 / p; }
@Test public void testMean() { System.out.println("mean"); ShiftedGeometricDistribution instance = new ShiftedGeometricDistribution(0.3); instance.rand(); assertEquals(3.333333, instance.mean(), 1E-6); }
@Override public void onOutOfMemory(OutOfMemoryError oome, HazelcastInstance[] hazelcastInstances) { for (HazelcastInstance instance : hazelcastInstances) { if (instance instanceof HazelcastClientInstanceImpl impl) { ClientHelper.cleanResources(impl); } } try { oome.printStackTrace(System.err); } catch (Throwable ignored) { ignore(ignored); } }
@Test public void testOnOutOfMemory() { outOfMemoryHandler.onOutOfMemory(new OutOfMemoryError(), instances); assertTrueEventually(() -> assertFalse("The client should be shutdown", client.getLifecycleService().isRunning())); }
public void update(Map<String, NamespaceBundleStats> bundleStats, int topk) { arr.clear(); try { var isLoadBalancerSheddingBundlesWithPoliciesEnabled = pulsar.getConfiguration().isLoadBalancerSheddingBundlesWithPoliciesEnabled(); for (var etr : bundleStats.entrySet()) { String bundle = etr.getKey(); // TODO: do not filter system topic while shedding if (NamespaceService.isSystemServiceNamespace(NamespaceBundle.getBundleNamespace(bundle))) { continue; } if (!isLoadBalancerSheddingBundlesWithPoliciesEnabled && hasPolicies(bundle)) { continue; } arr.add(etr); } var topKBundlesLoadData = loadData.getTopBundlesLoadData(); topKBundlesLoadData.clear(); if (arr.isEmpty()) { return; } topk = Math.min(topk, arr.size()); partitionSort(arr, topk); for (int i = topk - 1; i >= 0; i--) { var etr = arr.get(i); topKBundlesLoadData.add( new TopBundlesLoadData.BundleLoadData(etr.getKey(), (NamespaceBundleStats) etr.getValue())); } } finally { arr.clear(); } }
@Test public void testSystemNamespace() { Map<String, NamespaceBundleStats> bundleStats = new HashMap<>(); var topKBundles = new TopKBundles(pulsar); NamespaceBundleStats stats1 = new NamespaceBundleStats(); stats1.msgRateIn = 500; bundleStats.put("pulsar/system/0x00000000_0x0FFFFFFF", stats1); NamespaceBundleStats stats2 = new NamespaceBundleStats(); stats2.msgRateIn = 10000; bundleStats.put(bundle1, stats2); topKBundles.update(bundleStats, 2); assertEquals(topKBundles.getLoadData().getTopBundlesLoadData().size(), 1); var top0 = topKBundles.getLoadData().getTopBundlesLoadData().get(0); assertEquals(top0.bundleName(), bundle1); }
@Override public Optional<ExecuteResult> getSaneQueryResult(final SQLStatement sqlStatement, final SQLException ex) { if (ER_PARSE_ERROR == ex.getErrorCode()) { return Optional.empty(); } if (sqlStatement instanceof SelectStatement) { return createQueryResult((SelectStatement) sqlStatement); } if (sqlStatement instanceof MySQLShowOtherStatement) { return Optional.of(createQueryResult()); } if (sqlStatement instanceof MySQLSetStatement) { return Optional.of(new UpdateResult(0, 0L)); } return Optional.empty(); }
@Test void assertGetSaneQueryResultForOtherStatements() { assertThat(new MySQLDialectSaneQueryResultEngine().getSaneQueryResult(new MySQLInsertStatement(), new SQLException("")), is(Optional.empty())); }
public static ThreadFactory create(final String namePrefix, final boolean daemon) { return create(namePrefix, daemon, Thread.NORM_PRIORITY); }
@Test public void testCreate() { ThreadFactory threadFactory = ShenyuThreadFactory.create(NAME_PREFIX, true); assertThat(threadFactory, notNullValue()); }
@Override public long extract(final ConsumerRecord<Object, Object> record, final long previousTimestamp) { try { return delegate.extract(record, previousTimestamp); } catch (final RuntimeException e) { return handleFailure(record.key(), record.value(), e); } }
@Test public void shouldLogExceptionsAndNotFailOnExtractFromRecordWithNullKeyAndValue() { // Given: when(record.key()).thenReturn(null); when(record.value()).thenReturn(null); final KsqlException e = new KsqlException("foo"); final LoggingTimestampExtractor extractor = new LoggingTimestampExtractor( (k, v) -> { throw e; }, logger, false ); // When: final long result = extractor.extract(record, PREVIOUS_TS); // Then (did not throw): verify(logger).error(RecordProcessingError .recordProcessingError("Failed to extract timestamp from row", e, () -> "key:null, value:null")); assertThat(result, is(-1L)); }
private void initConfig() { LOG.info("job config file path: " + jobConfigFilePath); Dataset<String> ds = spark.read().textFile(jobConfigFilePath); String jsonConfig = ds.first(); LOG.info("rdd read json config: " + jsonConfig); etlJobConfig = EtlJobConfig.configFromJson(jsonConfig); LOG.info("etl job config: " + etlJobConfig); }
@Test public void testInitConfig(@Mocked SparkSession spark, @Injectable Dataset<String> ds) { new Expectations() { { SparkSession.builder().enableHiveSupport().getOrCreate(); result = spark; spark.read().textFile(anyString); result = ds; ds.first(); result = etlJobConfig.configToJson(); } }; SparkEtlJob job = Deencapsulation.newInstance(SparkEtlJob.class, "hdfs://127.0.0.1:10000/jobconfig.json"); Deencapsulation.invoke(job, "initSparkEnvironment"); Deencapsulation.invoke(job, "initConfig"); EtlJobConfig parsedConfig = Deencapsulation.getField(job, "etlJobConfig"); Assert.assertTrue(parsedConfig.tables.containsKey(tableId)); EtlTable table = parsedConfig.tables.get(tableId); Assert.assertEquals(2, table.indexes.size()); Assert.assertEquals(2, table.partitionInfo.partitions.size()); Assert.assertEquals(false, parsedConfig.properties.strictMode); Assert.assertEquals("label0", parsedConfig.label); }
@VisibleForTesting Path getStagingDir(FileSystem defaultFileSystem) throws IOException { final String configuredStagingDir = flinkConfiguration.get(YarnConfigOptions.STAGING_DIRECTORY); if (configuredStagingDir == null) { return defaultFileSystem.getHomeDirectory(); } FileSystem stagingDirFs = new Path(configuredStagingDir).getFileSystem(defaultFileSystem.getConf()); return stagingDirFs.makeQualified(new Path(configuredStagingDir)); }
@Test void testGetStagingDirWithSpecifyingStagingDir() throws IOException { final Configuration flinkConfig = new Configuration(); flinkConfig.set(YarnConfigOptions.STAGING_DIRECTORY, "file:///tmp/path1"); try (final YarnClusterDescriptor yarnClusterDescriptor = createYarnClusterDescriptor(flinkConfig)) { YarnConfiguration yarnConfig = new YarnConfiguration(); yarnConfig.set("fs.defaultFS", "viewfs://hadoop-ns01"); yarnConfig.set("fs.viewfs.mounttable.hadoop-ns01.link./tmp", "file://tmp"); FileSystem defaultFileSystem = FileSystem.get(yarnConfig); Path stagingDir = yarnClusterDescriptor.getStagingDir(defaultFileSystem); assertThat(defaultFileSystem.getScheme()).isEqualTo("viewfs"); assertThat(stagingDir.getFileSystem(yarnConfig).getScheme()).isEqualTo("file"); } }
public DataSinkTask(Environment environment) { super(environment); }
@Test void testDataSinkTask() { FileReader fr = null; BufferedReader br = null; try { int keyCnt = 100; int valCnt = 20; super.initEnvironment(MEMORY_MANAGER_SIZE, NETWORK_BUFFER_SIZE); super.addInput(new UniformRecordGenerator(keyCnt, valCnt, false), 0); DataSinkTask<Record> testTask = new DataSinkTask<>(this.mockEnv); File tempTestFile = new File(tempFolder.toFile(), UUID.randomUUID().toString()); super.registerFileOutputTask( MockOutputFormat.class, tempTestFile.toURI().toString(), new Configuration()); testTask.invoke(); assertThat(tempTestFile).withFailMessage("Temp output file does not exist").exists(); fr = new FileReader(tempTestFile); br = new BufferedReader(fr); HashMap<Integer, HashSet<Integer>> keyValueCountMap = new HashMap<>(keyCnt); while (br.ready()) { String line = br.readLine(); Integer key = Integer.parseInt(line.substring(0, line.indexOf("_"))); Integer val = Integer.parseInt(line.substring(line.indexOf("_") + 1, line.length())); if (!keyValueCountMap.containsKey(key)) { keyValueCountMap.put(key, new HashSet<Integer>()); } keyValueCountMap.get(key).add(val); } assertThat(keyValueCountMap) .withFailMessage( "Invalid key count in out file. Expected: %d Actual: %d", keyCnt, keyValueCountMap.size()) .hasSize(keyCnt); for (Integer key : keyValueCountMap.keySet()) { assertThat(keyValueCountMap.get(key)) .withFailMessage( "Invalid value count for key: %d. Expected: %d Actual: %d", key, valCnt, keyValueCountMap.get(key).size()) .hasSize(valCnt); } } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } finally { if (br != null) { try { br.close(); } catch (Throwable t) { } } if (fr != null) { try { fr.close(); } catch (Throwable t) { } } } }
@Operation(summary = "Download metadata from processed file") @GetMapping(value = "show_metadata/results/{result_id}", produces = "application/xml") @ResponseBody public String getProcessedMetadata(@PathVariable("result_id") Long resultId) { return metadataRetrieverService.getProcessedMetadata(resultId); }
@Test public void getProcessedMetadata() { when(metadataRetrieverServiceMock.getProcessedMetadata(anyLong())).thenReturn("metadata"); String result = controllerMock.getProcessedMetadata(1L); verify(metadataRetrieverServiceMock, times(1)).getProcessedMetadata(anyLong()); assertNotNull(result); }
static void moveAuthTag(byte[] messageKey, byte[] cipherText, byte[] messageKeyWithAuthTag, byte[] cipherTextWithoutAuthTag) { // Check dimensions of arrays if (messageKeyWithAuthTag.length != messageKey.length + 16) { throw new IllegalArgumentException("Length of messageKeyWithAuthTag must be length of messageKey + " + "length of AuthTag (16)"); } if (cipherTextWithoutAuthTag.length != cipherText.length - 16) { throw new IllegalArgumentException("Length of cipherTextWithoutAuthTag must be length of cipherText " + "- length of AuthTag (16)"); } // Move auth tag from cipherText to messageKey System.arraycopy(messageKey, 0, messageKeyWithAuthTag, 0, 16); System.arraycopy(cipherText, 0, cipherTextWithoutAuthTag, 0, cipherTextWithoutAuthTag.length); System.arraycopy(cipherText, cipherText.length - 16, messageKeyWithAuthTag, 16, 16); }
@Test public void testMoveAuthTag() { // Extract authTag for testing purposes byte[] authTag = new byte[16]; System.arraycopy(cipherTextWithAuthTag, 35, authTag, 0, 16); byte[] messageKeyWithAuthTag = new byte[16 + 16]; byte[] cipherTextWithoutAuthTag = new byte[35]; OmemoMessageBuilder.moveAuthTag(messageKey, cipherTextWithAuthTag, messageKeyWithAuthTag, cipherTextWithoutAuthTag); // Check if first n - 16 bytes of cipherText got copied over to cipherTextWithoutAuthTag correctly byte[] checkCipherText = new byte[35]; System.arraycopy(cipherTextWithAuthTag, 0, checkCipherText, 0, 35); assertTrue(Arrays.equals(checkCipherText, cipherTextWithoutAuthTag)); byte[] checkMessageKey = new byte[16]; System.arraycopy(messageKeyWithAuthTag, 0, checkMessageKey, 0, 16); assertTrue(Arrays.equals(checkMessageKey, messageKey)); byte[] checkAuthTag = new byte[16]; System.arraycopy(messageKeyWithAuthTag, 16, checkAuthTag, 0, 16); assertTrue(Arrays.equals(checkAuthTag, authTag)); }
static Optional<Integer> typePrefixLength(List<String> nameParts) { TyParseState state = TyParseState.START; Optional<Integer> typeLength = Optional.empty(); for (int i = 0; i < nameParts.size(); i++) { state = state.next(JavaCaseFormat.from(nameParts.get(i))); if (state == TyParseState.REJECT) { break; } if (state.isSingleUnit()) { typeLength = Optional.of(i); } } return typeLength; }
@Test public void typePrefixLength() { assertThat(getPrefix("fieldName")).isEmpty(); assertThat(getPrefix("CONST")).isEmpty(); assertThat(getPrefix("ClassName")).hasValue(0); assertThat(getPrefix("com.ClassName")).hasValue(1); assertThat(getPrefix("ClassName.foo")).hasValue(1); assertThat(getPrefix("com.ClassName.foo")).hasValue(2); assertThat(getPrefix("ClassName.foo.bar")).hasValue(1); assertThat(getPrefix("com.ClassName.foo.bar")).hasValue(2); assertThat(getPrefix("ClassName.CONST")).hasValue(1); assertThat(getPrefix("ClassName.varName")).hasValue(1); assertThat(getPrefix("ClassName.Inner.varName")).hasValue(2); assertThat(getPrefix("com.R.foo")).hasValue(2); }
public static void rethrowIfFatalError(Throwable t) { if (isJvmFatalError(t)) { throw (Error) t; } }
@Test void testRethrowFatalError() { // fatal error is rethrown assertThatThrownBy(() -> ExceptionUtils.rethrowIfFatalError(new InternalError())) .isInstanceOf(InternalError.class); // non-fatal error is not rethrown ExceptionUtils.rethrowIfFatalError(new NoClassDefFoundError()); }
@Override public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo, List<String> partNames, boolean areAllPartsFound) throws MetaException { checkStatisticsList(colStatsWithSourceInfo); ColumnStatisticsObj statsObj = null; String colType; String colName = null; // check if all the ColumnStatisticsObjs contain stats and all the ndv are // bitvectors boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size(); NumDistinctValueEstimator ndvEstimator = null; boolean areAllNDVEstimatorsMergeable = true; for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); if (statsObj == null) { colName = cso.getColName(); colType = cso.getColType(); statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso.getStatsData().getSetField()); LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName, doAllPartitionContainStats); } DecimalColumnStatsDataInspector columnStatsData = decimalInspectorFromStats(cso); // check if we can merge NDV estimators if (columnStatsData.getNdvEstimator() == null) { areAllNDVEstimatorsMergeable = false; break; } else { NumDistinctValueEstimator estimator = columnStatsData.getNdvEstimator(); if (ndvEstimator == null) { ndvEstimator = estimator; } else { if (!ndvEstimator.canMerge(estimator)) { areAllNDVEstimatorsMergeable = false; break; } } } } if (areAllNDVEstimatorsMergeable && ndvEstimator != null) { ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator); } LOG.debug("all of the bit vectors can merge for {} is {}", colName, areAllNDVEstimatorsMergeable); ColumnStatisticsData columnStatisticsData = initColumnStatisticsData(); if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) { DecimalColumnStatsDataInspector aggregateData = null; long lowerBound = 0; long higherBound = 0; double densityAvgSum = 0.0; DecimalColumnStatsMerger merger = new DecimalColumnStatsMerger(); for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); DecimalColumnStatsDataInspector newData = decimalInspectorFromStats(cso); lowerBound = Math.max(lowerBound, newData.getNumDVs()); higherBound += newData.getNumDVs(); if (newData.isSetLowValue() && newData.isSetHighValue()) { densityAvgSum += (MetaStoreServerUtils.decimalToDouble(newData.getHighValue()) - MetaStoreServerUtils .decimalToDouble(newData.getLowValue())) / newData.getNumDVs(); } if (areAllNDVEstimatorsMergeable && ndvEstimator != null) { ndvEstimator.mergeEstimators(newData.getNdvEstimator()); } if (aggregateData == null) { aggregateData = newData.deepCopy(); } else { aggregateData.setLowValue(merger.mergeLowValue( merger.getLowValue(aggregateData), merger.getLowValue(newData))); aggregateData.setHighValue(merger.mergeHighValue( merger.getHighValue(aggregateData), merger.getHighValue(newData))); aggregateData.setNumNulls(merger.mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls())); aggregateData.setNumDVs(merger.mergeNumDVs(aggregateData.getNumDVs(), newData.getNumDVs())); } } if (areAllNDVEstimatorsMergeable && ndvEstimator != null) { // if all the ColumnStatisticsObjs contain bitvectors, we do not need to // use uniform distribution assumption because we can merge bitvectors // to get a good estimation. aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); } else { long estimation; if (useDensityFunctionForNDVEstimation && aggregateData != null && aggregateData.isSetLowValue() && aggregateData.isSetHighValue()) { // We have estimation, lowerbound and higherbound. We use estimation // if it is between lowerbound and higherbound. double densityAvg = densityAvgSum / partNames.size(); estimation = (long) ((MetaStoreServerUtils.decimalToDouble(aggregateData.getHighValue()) - MetaStoreServerUtils .decimalToDouble(aggregateData.getLowValue())) / densityAvg); if (estimation < lowerBound) { estimation = lowerBound; } else if (estimation > higherBound) { estimation = higherBound; } } else { estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner); } aggregateData.setNumDVs(estimation); } columnStatisticsData.setDecimalStats(aggregateData); } else { // TODO: bail out if missing stats are over a certain threshold // we need extrapolation LOG.debug("start extrapolation for {}", colName); Map<String, Integer> indexMap = new HashMap<>(); for (int index = 0; index < partNames.size(); index++) { indexMap.put(partNames.get(index), index); } Map<String, Double> adjustedIndexMap = new HashMap<>(); Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>(); // while we scan the css, we also get the densityAvg, lowerbound and // higherbound when useDensityFunctionForNDVEstimation is true. double densityAvgSum = 0.0; if (!areAllNDVEstimatorsMergeable) { // if not every partition uses bitvector for ndv, we just fall back to // the traditional extrapolation methods. for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); String partName = csp.getPartName(); DecimalColumnStatsData newData = cso.getStatsData().getDecimalStats(); if (useDensityFunctionForNDVEstimation && newData.isSetLowValue() && newData.isSetHighValue()) { densityAvgSum += (MetaStoreServerUtils.decimalToDouble(newData.getHighValue()) - MetaStoreServerUtils .decimalToDouble(newData.getLowValue())) / newData.getNumDVs(); } adjustedIndexMap.put(partName, (double) indexMap.get(partName)); adjustedStatsMap.put(partName, cso.getStatsData()); } } else { // we first merge all the adjacent bitvectors that we could merge and // derive new partition names and index. StringBuilder pseudoPartName = new StringBuilder(); double pseudoIndexSum = 0; int length = 0; int curIndex = -1; DecimalColumnStatsDataInspector aggregateData = null; for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); String partName = csp.getPartName(); DecimalColumnStatsDataInspector newData = decimalInspectorFromStats(cso); // newData.isSetBitVectors() should be true for sure because we // already checked it before. if (indexMap.get(partName) != curIndex) { // There is bitvector, but it is not adjacent to the previous ones. if (length > 0) { // we have to set ndv adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); ColumnStatisticsData csd = new ColumnStatisticsData(); csd.setDecimalStats(aggregateData); adjustedStatsMap.put(pseudoPartName.toString(), csd); if (useDensityFunctionForNDVEstimation) { densityAvgSum += (MetaStoreServerUtils.decimalToDouble(aggregateData.getHighValue()) - MetaStoreServerUtils .decimalToDouble(aggregateData.getLowValue())) / aggregateData.getNumDVs(); } // reset everything pseudoPartName = new StringBuilder(); pseudoIndexSum = 0; length = 0; ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator); } aggregateData = null; } curIndex = indexMap.get(partName); pseudoPartName.append(partName); pseudoIndexSum += curIndex; length++; curIndex++; if (aggregateData == null) { aggregateData = newData.deepCopy(); } else { if (MetaStoreServerUtils.decimalToDouble(aggregateData.getLowValue()) < MetaStoreServerUtils .decimalToDouble(newData.getLowValue())) { aggregateData.setLowValue(aggregateData.getLowValue()); } else { aggregateData.setLowValue(newData.getLowValue()); } if (MetaStoreServerUtils.decimalToDouble(aggregateData.getHighValue()) > MetaStoreServerUtils .decimalToDouble(newData.getHighValue())) { aggregateData.setHighValue(aggregateData.getHighValue()); } else { aggregateData.setHighValue(newData.getHighValue()); } aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); } ndvEstimator.mergeEstimators(newData.getNdvEstimator()); } if (length > 0) { // we have to set ndv adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); ColumnStatisticsData csd = new ColumnStatisticsData(); csd.setDecimalStats(aggregateData); adjustedStatsMap.put(pseudoPartName.toString(), csd); if (useDensityFunctionForNDVEstimation) { densityAvgSum += (MetaStoreServerUtils.decimalToDouble(aggregateData.getHighValue()) - MetaStoreServerUtils .decimalToDouble(aggregateData.getLowValue())) / aggregateData.getNumDVs(); } } } extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(), adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size()); } LOG.debug( "Ndv estimation for {} is {}. # of partitions requested: {}. # of partitions found: {}", colName, columnStatisticsData.getDecimalStats().getNumDVs(), partNames.size(), colStatsWithSourceInfo.size()); KllHistogramEstimator mergedKllHistogramEstimator = mergeHistograms(colStatsWithSourceInfo); if (mergedKllHistogramEstimator != null) { columnStatisticsData.getDecimalStats().setHistogram(mergedKllHistogramEstimator.serialize()); } statsObj.setStatsData(columnStatisticsData); return statsObj; }
@Test public void testAggregateSingleStat() throws MetaException { List<String> partitions = Collections.singletonList("part1"); ColumnStatisticsData data1 = new ColStatsBuilder<>(Decimal.class).numNulls(1).numDVs(2) .low(ONE).high(FOUR).hll(1, 4).kll(1, 4).build(); List<ColStatsObjWithSourceInfo> statsList = Collections.singletonList(createStatsWithInfo(data1, TABLE, COL, partitions.get(0))); DecimalColumnStatsAggregator aggregator = new DecimalColumnStatsAggregator(); ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, true); assertEqualStatistics(data1, computedStatsObj.getStatsData()); }
public static <T> T loadData(Map<String, Object> config, T existingData, Class<T> dataCls) { try { String existingConfigJson = MAPPER.writeValueAsString(existingData); Map<String, Object> existingConfig = MAPPER.readValue(existingConfigJson, Map.class); Map<String, Object> newConfig = new HashMap<>(); newConfig.putAll(existingConfig); newConfig.putAll(config); String configJson = MAPPER.writeValueAsString(newConfig); return MAPPER.readValue(configJson, dataCls); } catch (IOException e) { throw new RuntimeException("Failed to load config into existing configuration data", e); } }
@Test public void testLoadClientConfigurationData() { ClientConfigurationData confData = new ClientConfigurationData(); confData.setServiceUrl("pulsar://unknown:6650"); confData.setMaxLookupRequest(600); confData.setMaxLookupRedirects(10); confData.setNumIoThreads(33); Map<String, Object> config = new HashMap<>(); Map<String, String> authParamMap = new HashMap<>(); authParamMap.put("k1", "v1"); authParamMap.put("k2", "v2"); config.put("serviceUrl", "pulsar://localhost:6650"); config.put("maxLookupRequest", 70000); config.put("maxLookupRedirects", 50); config.put("authParams", "testAuthParams"); config.put("authParamMap", authParamMap); config.put("dnsLookupBindAddress", "0.0.0.0"); config.put("dnsLookupBindPort", 0); List<InetSocketAddress> dnsServerAddresses = Arrays.asList(new InetSocketAddress[] { new InetSocketAddress("1.1.1.1", 53), new InetSocketAddress("2.2.2.2",100) }); config.put("dnsServerAddresses", dnsServerAddresses); confData = ConfigurationDataUtils.loadData(config, confData, ClientConfigurationData.class); assertEquals("pulsar://localhost:6650", confData.getServiceUrl()); assertEquals(70000, confData.getMaxLookupRequest()); assertEquals(50, confData.getMaxLookupRedirects()); assertEquals(33, confData.getNumIoThreads()); assertEquals("testAuthParams", confData.getAuthParams()); assertEquals("v1", confData.getAuthParamMap().get("k1")); assertEquals("v2", confData.getAuthParamMap().get("k2")); assertEquals("0.0.0.0", confData.getDnsLookupBindAddress()); assertEquals(0, confData.getDnsLookupBindPort()); assertEquals(dnsServerAddresses, confData.getDnsServerAddresses()); }
public <T extends Notification> int deliverEmails(Collection<T> notifications) { if (handlers.isEmpty()) { return 0; } Class<T> aClass = typeClassOf(notifications); if (aClass == null) { return 0; } checkArgument(aClass != Notification.class, "Type of notification objects must be a subtype of " + Notification.class.getSimpleName()); return handlers.stream() .filter(t -> t.getNotificationClass() == aClass) .map(t -> (NotificationHandler<T>) t) .mapToInt(handler -> handler.deliver(notifications)) .sum(); }
@Test public void deliverEmails_collection_has_no_effect_if_no_handler() { NotificationDispatcher dispatcher = mock(NotificationDispatcher.class); List<Notification> notifications = IntStream.range(0, 10) .mapToObj(i -> mock(Notification.class)) .toList(); NotificationService underTest = new NotificationService(dbClient, new NotificationDispatcher[]{dispatcher}); assertThat(underTest.deliverEmails(notifications)).isZero(); verifyNoInteractions(dispatcher); verifyNoInteractions(dbClient); }
private ResourceMethodIdentifierGenerator() { }
@Test(dataProvider = "testData") public void testResourceMethodIdentifierGenerator(String baseUriTemplate, ResourceMethod method, String methodName, String expected) { final String resourceMethodIdentifier = ResourceMethodIdentifierGenerator.generate(baseUriTemplate, method, methodName); final String keylessRMI = ResourceMethodIdentifierGenerator.stripPathKeys(resourceMethodIdentifier); final String keylessBaseUriTemplate = ResourceMethodIdentifierGenerator.stripPathKeys(baseUriTemplate); Assert.assertEquals(resourceMethodIdentifier, expected, "ResourceMethodIdentifier is incorrect"); Assert.assertFalse(keylessRMI.contains("{}"), "keylessRMI should not contain key pattern: " + keylessRMI); Assert.assertEquals(keylessRMI, resourceMethodIdentifier.replaceAll("/?\\{}", ""), "keylessRMI is incorrect for " + resourceMethodIdentifier); if (baseUriTemplate != null) { Assert.assertEquals(keylessBaseUriTemplate, baseUriTemplate.replaceAll("/?\\{[^}]*}", ""), "Keyless baseUriTemplate is incorrect for " + baseUriTemplate); } else { Assert.assertNull(keylessBaseUriTemplate); } }
public Stream<ColumnName> resolveSelectStar( final Optional<SourceName> sourceName ) { return getSources().stream() .filter(s -> !sourceName.isPresent() || !s.getSourceName().isPresent() || sourceName.equals(s.getSourceName())) .flatMap(s -> s.resolveSelectStar(sourceName)); }
@Test public void shouldResolveAliasedSelectStarByCallingOnlyCorrectParent() { // When: final Stream<ColumnName> result = planNode.resolveSelectStar(Optional.of(SOURCE_2_NAME)); // Then: final List<ColumnName> columns = result.collect(Collectors.toList()); assertThat(columns, contains(COL2, COL3)); verify(source1, never()).resolveSelectStar(any()); verify(source2).resolveSelectStar(Optional.of(SOURCE_2_NAME)); }
@Override public void importFrom(Import theImport, String sourceSystemId) { this.namespace = theImport.getNamespace() == null ? "" : theImport.getNamespace() + ":"; this.importFrom(theImport.getLocation()); }
@Test public void testComplexTypeMixed() throws Exception { URL url = ReflectUtil.getResource("org/flowable/engine/impl/webservice/complexType-mixed.wsdl"); importer.importFrom(url.toString()); }
@Override protected int command() { if (!validateConfigFilePresent()) { return 1; } final MigrationConfig config; try { config = MigrationConfig.load(getConfigFile()); } catch (KsqlException | MigrationException e) { LOGGER.error(e.getMessage()); return 1; } return command(config, MigrationsUtil::getKsqlClient); }
@Test public void shouldFailIfMultipleQueriesWritingToTable() { // Given: when(sourceDescription.writeQueries()).thenReturn(ImmutableList.of(ctasQueryInfo, otherQueryInfo)); // When: final int status = command.command(config, cfg -> client); // Then: assertThat(status, is(1)); verify(client, never()).executeStatement("TERMINATE " + CTAS_QUERY_ID + ";"); verify(client, never()).executeStatement("DROP TABLE " + MIGRATIONS_TABLE + " DELETE TOPIC;"); verify(client, never()).executeStatement("DROP STREAM " + MIGRATIONS_STREAM + " DELETE TOPIC;"); }
public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (topologyMetadata.hasNamedTopologies()) { throw new IllegalArgumentException("Cannot invoke the getAllMetadataForStore(storeName) method when" + "using named topologies, please use the overload that accepts" + "a topologyName parameter to identify the correct store"); } if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final Collection<String> sourceTopics = topologyMetadata.sourceTopicsForStore(storeName, null); if (sourceTopics.isEmpty()) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (final StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName) || metadata.standbyStateStoreNames().contains(storeName)) { results.add(metadata); } } return results; }
@Test public void shouldReturnEmptyCollectionOnGetAllInstancesWithStoreWhenStoreDoesntExist() { final Collection<StreamsMetadata> actual = metadataState.getAllMetadataForStore("not-a-store"); assertTrue(actual.isEmpty()); }
@Override public ImportResult importItem( UUID jobId, IdempotentImportExecutor idempotentImportExecutor, TokensAndUrlAuthData authData, MusicContainerResource data) throws Exception { if (data == null) { // Nothing to do return new ImportResult(new AppleContentException("Null MusicContainerResource received on AppleMusicImporter::importItem")); } int playlistsCount = 0; int playlistItemsCount = 0; AppleMusicInterface musicInterface = factory .getOrCreateMusicInterface(jobId, authData, appCredentials, exportingService, monitor); if (!data.getPlaylists().isEmpty()) { playlistsCount = musicInterface.importPlaylists(jobId, idempotentImportExecutor, data.getPlaylists()); } if (!data.getPlaylistItems().isEmpty()) { playlistItemsCount = musicInterface.importMusicPlaylistItems(jobId, idempotentImportExecutor, data.getPlaylistItems()); } final Map<String, Integer> counts = new ImmutableMap.Builder<String, Integer>() .put(AppleMusicConstants.PLAYLISTS_COUNT_DATA_NAME, playlistsCount) .put(AppleMusicConstants.PLAYLIST_ITEMS_COUNT_DATA_NAME, playlistItemsCount) .build(); return ImportResult.OK .copyWithCounts(counts); }
@Test public void importPlaylists() throws Exception { List<MusicPlaylist> musicPlaylists = createTestMusicPlaylists(); setUpImportPlaylistsBatchResponse(musicPlaylists.stream().collect( Collectors.toMap(MusicPlaylist::getId, playlist -> SC_OK))); MusicContainerResource playlistsResource = new MusicContainerResource(musicPlaylists, null, null, null); final ImportResult importResult = appleMusicImporter.importItem(uuid, executor, authData, playlistsResource); verify(appleMusicInterface) .importPlaylistsBatch(uuid.toString(), musicPlaylists); assertThat(importResult.getCounts().isPresent()); // Should be the same as the number of playlists sent in. assertThat(importResult.getCounts().get().get(AppleMusicConstants.PLAYLISTS_COUNT_DATA_NAME) == playlistsResource.getPlaylists().size()); // No playlist items were sent. assertThat(importResult.getCounts().get().get(AppleMusicConstants.PLAYLIST_ITEMS_COUNT_DATA_NAME) == 0); }
@Override public PageResult<ProductSpuDO> getSpuPage(ProductSpuPageReqVO pageReqVO) { return productSpuMapper.selectPage(pageReqVO); }
@Test void getSpuPage_alarmStock_empty() { // 准备参数 ArrayList<ProductSpuDO> createReqVOs = Lists.newArrayList(randomPojo(ProductSpuDO.class,o->{ o.setCategoryId(generateId()); o.setBrandId(generateId()); o.setDeliveryTemplateId(generateId()); o.setSort(RandomUtil.randomInt(1,100)); // 限制排序范围 o.setGiveIntegral(generaInt()); // 限制范围为正整数 o.setVirtualSalesCount(generaInt()); // 限制范围为正整数 o.setPrice(generaInt()); // 限制范围为正整数 o.setMarketPrice(generaInt()); // 限制范围为正整数 o.setCostPrice(generaInt()); // 限制范围为正整数 o.setStock(11); // 限制范围为正整数 o.setGiveIntegral(generaInt()); // 限制范围为正整数 o.setSalesCount(generaInt()); // 限制范围为正整数 o.setBrowseCount(generaInt()); // 限制范围为正整数 }), randomPojo(ProductSpuDO.class,o->{ o.setCategoryId(generateId()); o.setBrandId(generateId()); o.setDeliveryTemplateId(generateId()); o.setSort(RandomUtil.randomInt(1,100)); // 限制排序范围 o.setGiveIntegral(generaInt()); // 限制范围为正整数 o.setVirtualSalesCount(generaInt()); // 限制范围为正整数 o.setPrice(generaInt()); // 限制范围为正整数 o.setMarketPrice(generaInt()); // 限制范围为正整数 o.setCostPrice(generaInt()); // 限制范围为正整数 o.setStock(11); // 限制范围为正整数 o.setGiveIntegral(generaInt()); // 限制范围为正整数 o.setSalesCount(generaInt()); // 限制范围为正整数 o.setBrowseCount(generaInt()); // 限制范围为正整数 })); productSpuMapper.insertBatch(createReqVOs); // 调用 ProductSpuPageReqVO productSpuPageReqVO = new ProductSpuPageReqVO(); productSpuPageReqVO.setTabType(ProductSpuPageReqVO.ALERT_STOCK); PageResult<ProductSpuDO> spuPage = productSpuService.getSpuPage(productSpuPageReqVO); PageResult<Object> result = PageResult.empty(); Assertions.assertIterableEquals(result.getList(), spuPage.getList()); assertEquals(spuPage.getTotal(), result.getTotal()); }
@PreAuthorize("hasAnyAuthority('SYS_ADMIN', 'TENANT_ADMIN')") @PutMapping(value = IMAGE_URL, consumes = MediaType.MULTIPART_FORM_DATA_VALUE) public TbResourceInfo updateImage(@Parameter(description = IMAGE_TYPE_PARAM_DESCRIPTION, schema = @Schema(allowableValues = {"tenant", "system"}), required = true) @PathVariable String type, @Parameter(description = IMAGE_KEY_PARAM_DESCRIPTION, required = true) @PathVariable String key, @RequestPart MultipartFile file) throws Exception { TbResourceInfo imageInfo = checkImageInfo(type, key, Operation.WRITE); resourceValidator.validateResourceSize(getTenantId(), imageInfo.getId(), file.getSize()); TbResource image = new TbResource(imageInfo); image.setData(file.getBytes()); image.setFileName(file.getOriginalFilename()); image.updateDescriptor(ImageDescriptor.class, descriptor -> { descriptor.setMediaType(file.getContentType()); return descriptor; }); return tbImageService.save(image, getCurrentUser()); }
@Test public void testUpdateImage() throws Exception { String filename = "my_png_image.png"; TbResourceInfo imageInfo = uploadImage(HttpMethod.POST, "/api/image", filename, "image/png", PNG_IMAGE); checkPngImageDescriptor(imageInfo.getDescriptor(ImageDescriptor.class)); String newFilename = "my_jpeg_image.png"; TbResourceInfo newImageInfo = uploadImage(HttpMethod.PUT, "/api/images/tenant/" + filename, newFilename, "image/jpeg", JPEG_IMAGE); assertThat(newImageInfo.getTitle()).isEqualTo(filename); assertThat(newImageInfo.getResourceKey()).isEqualTo(filename); assertThat(newImageInfo.getFileName()).isEqualTo(newFilename); assertThat(newImageInfo.getPublicResourceKey()).isEqualTo(imageInfo.getPublicResourceKey()); ImageDescriptor imageDescriptor = newImageInfo.getDescriptor(ImageDescriptor.class); checkJpegImageDescriptor(imageDescriptor); assertThat(downloadImage("tenant", filename)).containsExactly(JPEG_IMAGE); assertThat(downloadImagePreview("tenant", filename)).hasSize((int) imageDescriptor.getPreviewDescriptor().getSize()); }
private void removePublisherIndexes(Service service, String clientId) { publisherIndexes.computeIfPresent(service, (s, ids) -> { ids.remove(clientId); NotifyCenter.publishEvent(new ServiceEvent.ServiceChangedEvent(service, true)); return ids.isEmpty() ? null : ids; }); }
@Test void testRemovePublisherIndexes() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException { String clientId = "clientId"; Class<ClientServiceIndexesManager> clientServiceIndexesManagerClass = ClientServiceIndexesManager.class; Method removePublisherIndexes = clientServiceIndexesManagerClass.getDeclaredMethod("removePublisherIndexes", Service.class, String.class); removePublisherIndexes.setAccessible(true); removePublisherIndexes.invoke(clientServiceIndexesManager, service, clientId); Collection<String> allClientsSubscribeService = clientServiceIndexesManager.getAllClientsRegisteredService(service); assertNotNull(allClientsSubscribeService); assertEquals(1, allClientsSubscribeService.size()); }
public String getDefaultResourceRequestAppPlacementType() { if (this.rmContext != null && this.rmContext.getYarnConfiguration() != null) { String appPlacementClass = applicationSchedulingEnvs.get( ApplicationSchedulingConfig.ENV_APPLICATION_PLACEMENT_TYPE_CLASS); if (null != appPlacementClass) { return appPlacementClass; } else { Configuration conf = rmContext.getYarnConfiguration(); return conf.get( YarnConfiguration.APPLICATION_PLACEMENT_TYPE_CLASS); } } return null; }
@Test public void testApplicationPlacementTypeNotConfigured() { Configuration conf = new Configuration(); RMContext rmContext = mock(RMContext.class); when(rmContext.getYarnConfiguration()).thenReturn(conf); ApplicationId appIdImpl = ApplicationId.newInstance(0, 1); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appIdImpl, 1); Queue queue = mock(Queue.class); HashMap<String, String> applicationSchedulingEnvs = new HashMap<>(); applicationSchedulingEnvs.put("APPLICATION_PLACEMENT_TYPE_CLASS", LocalityAppPlacementAllocator.class.getName()); AppSchedulingInfo info = new AppSchedulingInfo(appAttemptId, "test", queue, mock(ActiveUsersManager.class), 0, new ResourceUsage(), applicationSchedulingEnvs, rmContext, false); // This should be set from applicationSchedulingEnvs Assert.assertEquals(info.getDefaultResourceRequestAppPlacementType(), LocalityAppPlacementAllocator.class.getName()); }
public static String version() { if (null == VERSION.get()) { String detectedVersion; try { detectedVersion = versionFromJar(); // use unknown version in case exact implementation version can't be found from the jar // (this can happen if the DataStream class appears multiple times in the same classpath // such as with shading) detectedVersion = detectedVersion != null ? detectedVersion : FLINK_UNKNOWN_VERSION; } catch (Exception e) { detectedVersion = FLINK_UNKNOWN_VERSION; } VERSION.set(detectedVersion); } return VERSION.get(); }
@Test public void testVersion() { assertThat(FlinkPackage.version()).isEqualTo("1.19.0"); }
@Nullable static String channelKind(@Nullable Destination destination) { if (destination == null) return null; return isQueue(destination) ? "queue" : "topic"; }
@Test void channelKind_queueAndTopic_topicOnNoQueueName() throws JMSException { QueueAndTopic destination = mock(QueueAndTopic.class); when(destination.getTopicName()).thenReturn("topic-foo"); assertThat(MessageParser.channelKind(destination)) .isEqualTo("topic"); }
protected Map<String, Object> headersToMap(Http2Headers trailers, Supplier<Object> convertUpperHeaderSupplier) { if (trailers == null) { return Collections.emptyMap(); } Map<String, Object> attachments = new HashMap<>(trailers.size()); for (Map.Entry<CharSequence, CharSequence> header : trailers) { String key = header.getKey().toString(); if (key.endsWith(TripleConstant.HEADER_BIN_SUFFIX) && key.length() > TripleConstant.HEADER_BIN_SUFFIX.length()) { try { String realKey = key.substring(0, key.length() - TripleConstant.HEADER_BIN_SUFFIX.length()); byte[] value = StreamUtils.decodeASCIIByte(header.getValue()); attachments.put(realKey, value); } catch (Exception e) { LOGGER.error(PROTOCOL_FAILED_PARSE, "", "", "Failed to parse response attachment key=" + key, e); } } else { attachments.put(key, header.getValue().toString()); } } // try converting upper key Object obj = convertUpperHeaderSupplier.get(); if (obj == null) { return attachments; } if (obj instanceof String) { String json = TriRpcStatus.decodeMessage((String) obj); Map<String, String> map = JsonUtils.toJavaObject(json, Map.class); for (Map.Entry<String, String> entry : map.entrySet()) { Object val = attachments.remove(entry.getKey()); if (val != null) { attachments.put(entry.getValue(), val); } } } else { // If convertUpperHeaderSupplier does not return String, just fail... // Internal invocation, use INTERNAL_ERROR instead. LOGGER.error( INTERNAL_ERROR, "wrong internal invocation", "", "Triple convertNoLowerCaseHeader error, obj is not String"); } return attachments; }
@Test void headersToMap() { AbstractH2TransportListener listener = new AbstractH2TransportListener() { @Override public void onHeader(Http2Headers headers, boolean endStream) {} @Override public void onData(ByteBuf data, boolean endStream) {} @Override public void cancelByRemote(long errorCode) {} }; DefaultHttp2Headers headers = new DefaultHttp2Headers(); headers.scheme(HTTPS.name()).path("/foo.bar").method(HttpMethod.POST.asciiName()); headers.set("foo", "bar"); final Map<String, Object> map = listener.headersToMap(headers, () -> null); Assertions.assertEquals(4, map.size()); }
public V remove(K key) { requireNonNull(key); long h = hash(key); return getSection(h).remove(key, null, (int) h); }
@Test public void testRemove() { ConcurrentOpenHashMap<String, String> map = ConcurrentOpenHashMap.<String, String>newBuilder().build(); assertTrue(map.isEmpty()); assertNull(map.put("1", "one")); assertFalse(map.isEmpty()); assertFalse(map.remove("0", "zero")); assertFalse(map.remove("1", "uno")); assertFalse(map.isEmpty()); assertTrue(map.remove("1", "one")); assertTrue(map.isEmpty()); }
public V put(K key, V value) { return resolve(map.put(key, new WeakReference<>(value))); }
@Test public void testPruneNullEntries() { referenceMap.put(1, "1"); assertPruned(0); referenceMap.put(2, null); assertMapSize(2); assertPruned(1); assertMapSize(1); assertMapDoesNotContainKey(2); assertMapEntryEquals(1, "1"); assertLostCount(1); }
public static String join(CharSequence delimiter, CharSequence... elements) { StringBuilder builder = new StringBuilder(); boolean first = true; for (CharSequence element : elements) { if(first) { first = false; } else { builder.append(delimiter); } builder.append(element); } return builder.toString(); }
@Test public void testJoin(){ assertEquals( "Oracle,PostgreSQL,MySQL,SQL Server", StringUtils.join(",", "Oracle", "PostgreSQL", "MySQL", "SQL Server") ); }
@Override public TypeDefinition build( ProcessingEnvironment processingEnv, DeclaredType type, Map<String, TypeDefinition> typeCache) { String typeName = type.toString(); TypeDefinition typeDefinition = new TypeDefinition(typeName); // Generic Type arguments type.getTypeArguments().stream() .map(typeArgument -> TypeDefinitionBuilder.build( processingEnv, typeArgument, typeCache)) // build the TypeDefinition from typeArgument .filter(Objects::nonNull) .map(TypeDefinition::getType) .forEach(typeDefinition.getItems()::add); // Add into the declared TypeDefinition return typeDefinition; }
@Test void testBuild() { buildAndAssertTypeDefinition( processingEnv, stringsField, "java.util.Collection<java.lang.String>", "java.lang.String", builder); buildAndAssertTypeDefinition( processingEnv, colorsField, "java.util.List<org.apache.dubbo.metadata.annotation.processing.model.Color>", "org.apache.dubbo.metadata.annotation.processing.model.Color", builder); buildAndAssertTypeDefinition( processingEnv, primitiveTypeModelsField, "java.util.Queue<org.apache.dubbo.metadata.annotation.processing.model.PrimitiveTypeModel>", "org.apache.dubbo.metadata.annotation.processing.model.PrimitiveTypeModel", builder); buildAndAssertTypeDefinition( processingEnv, modelsField, "java.util.Deque<org.apache.dubbo.metadata.annotation.processing.model.Model>", "org.apache.dubbo.metadata.annotation.processing.model.Model", builder); buildAndAssertTypeDefinition( processingEnv, modelArraysField, "java.util.Set<org.apache.dubbo.metadata.annotation.processing.model.Model[]>", "org.apache.dubbo.metadata.annotation.processing.model.Model[]", builder); }
public static <K, V> KafkaRecordCoder<K, V> of(Coder<K> keyCoder, Coder<V> valueCoder) { return new KafkaRecordCoder<>(keyCoder, valueCoder); }
@Test public void testCoderIsSerializableWithWellKnownCoderType() { CoderProperties.coderSerializable( KafkaRecordCoder.of(GlobalWindow.Coder.INSTANCE, GlobalWindow.Coder.INSTANCE)); }
public double[] getInitialStateProbabilities() { return pi; }
@Test public void testGetInitialStateProbabilities() { System.out.println("getInitialStateProbabilities"); HMM hmm = new HMM(pi, Matrix.of(a), Matrix.of(b)); double[] result = hmm.getInitialStateProbabilities(); for (int i = 0; i < pi.length; i++) { assertEquals(pi[i], result[i], 1E-7); } }
@Override public List<String> readFilesWithRetries(Sleeper sleeper, BackOff backOff) throws IOException, InterruptedException { IOException lastException = null; do { try { // Match inputPath which may contains glob Collection<Metadata> files = Iterables.getOnlyElement(FileSystems.match(Collections.singletonList(filePattern))) .metadata(); LOG.debug("Found {} file(s) by matching the path: {}", files.size(), filePattern); if (files.isEmpty() || !checkTotalNumOfFiles(files)) { continue; } // Read data from file paths return readLines(files); } catch (IOException e) { // Ignore and retry lastException = e; LOG.warn("Error in file reading. Ignore and retry."); } } while (BackOffUtils.next(sleeper, backOff)); // Failed after max retries throw new IOException( String.format("Unable to read file(s) after retrying %d times", MAX_READ_RETRIES), lastException); }
@Test public void testReadEmpty() throws Exception { File emptyFile = tmpFolder.newFile("result-000-of-001"); Files.asCharSink(emptyFile, StandardCharsets.UTF_8).write(""); NumberedShardedFile shardedFile = new NumberedShardedFile(filePattern); assertThat(shardedFile.readFilesWithRetries(), empty()); }
static S3ResourceId fromUri(String uri) { Matcher m = S3_URI.matcher(uri); checkArgument(m.matches(), "Invalid S3 URI: [%s]", uri); String scheme = m.group("SCHEME"); String bucket = m.group("BUCKET"); String key = Strings.nullToEmpty(m.group("KEY")); if (!key.startsWith("/")) { key = "/" + key; } return fromComponents(scheme, bucket, key); }
@Test public void testInvalidPathNoBucket() { assertThrows( "Invalid S3 URI: [s3://]", IllegalArgumentException.class, () -> S3ResourceId.fromUri("s3://")); }
public static boolean acceptsGzip(Headers headers) { String ae = headers.getFirst(HttpHeaderNames.ACCEPT_ENCODING); return ae != null && ae.contains(HttpHeaderValues.GZIP.toString()); }
@Test void acceptsGzip_only() { Headers headers = new Headers(); headers.add("Accept-Encoding", "deflate"); assertFalse(HttpUtils.acceptsGzip(headers)); }
@Override public Optional<GaugeMetricFamilyMetricsCollector> export(final String pluginType) { if (null == ProxyContext.getInstance().getContextManager()) { return Optional.empty(); } GaugeMetricFamilyMetricsCollector result = MetricsCollectorRegistry.get(config, pluginType); result.cleanMetrics(); MetaDataContexts metaDataContexts = ProxyContext.getInstance().getContextManager().getMetaDataContexts(); result.addMetric(Collections.singletonList("database_count"), metaDataContexts.getMetaData().getDatabases().size()); result.addMetric(Collections.singletonList("storage_unit_count"), getStorageUnitCount(metaDataContexts)); return Optional.of(result); }
@Test void assertExportWithContextManager() { ContextManager contextManager = mockContextManager(); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); Optional<GaugeMetricFamilyMetricsCollector> collector = new ProxyMetaDataInfoExporter().export("FIXTURE"); assertTrue(collector.isPresent()); assertThat(collector.get().toString(), is("database_count=1, storage_unit_count=1")); }
@Override public void deleteConfig(Long id) { // 校验配置存在 ConfigDO config = validateConfigExists(id); // 内置配置,不允许删除 if (ConfigTypeEnum.SYSTEM.getType().equals(config.getType())) { throw exception(CONFIG_CAN_NOT_DELETE_SYSTEM_TYPE); } // 删除 configMapper.deleteById(id); }
@Test public void testDeleteConfig_success() { // mock 数据 ConfigDO dbConfig = randomConfigDO(o -> { o.setType(ConfigTypeEnum.CUSTOM.getType()); // 只能删除 CUSTOM 类型 }); configMapper.insert(dbConfig);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbConfig.getId(); // 调用 configService.deleteConfig(id); // 校验数据不存在了 assertNull(configMapper.selectById(id)); }
@Override public boolean supportsGroupByUnrelated() { return false; }
@Test void assertSupportsGroupByUnrelated() { assertFalse(metaData.supportsGroupByUnrelated()); }
public void transferAllStateDataToDirectory( Collection<StateHandleDownloadSpec> downloadRequests, CloseableRegistry closeableRegistry) throws Exception { // We use this closer for fine-grained shutdown of all parallel downloading. CloseableRegistry internalCloser = new CloseableRegistry(); // Make sure we also react to external close signals. closeableRegistry.registerCloseable(internalCloser); try { // We have to wait for all futures to be completed, to make sure in // case of failure that we will clean up all the files FutureUtils.completeAll( createDownloadRunnables(downloadRequests, internalCloser).stream() .map( runnable -> CompletableFuture.runAsync( runnable, transfer.getExecutorService())) .collect(Collectors.toList())) .get(); } catch (Exception e) { downloadRequests.stream() .map(StateHandleDownloadSpec::getDownloadDestination) .map(Path::toFile) .forEach(FileUtils::deleteDirectoryQuietly); // Error reporting Throwable throwable = ExceptionUtils.stripExecutionException(e); throwable = ExceptionUtils.stripException(throwable, RuntimeException.class); if (throwable instanceof IOException) { throw (IOException) throwable; } else { throw new FlinkRuntimeException("Failed to download data for state handles.", e); } } finally { // Unregister and close the internal closer. if (closeableRegistry.unregisterCloseable(internalCloser)) { IOUtils.closeQuietly(internalCloser); } } }
@Test public void testMultiThreadCleanupOnFailure() throws Exception { int numRemoteHandles = 3; int numSubHandles = 6; byte[][][] contents = createContents(numRemoteHandles, numSubHandles); List<StateHandleDownloadSpec> downloadRequests = new ArrayList<>(numRemoteHandles); for (int i = 0; i < numRemoteHandles; ++i) { downloadRequests.add( createDownloadRequestForContent( temporaryFolder.newFolder().toPath(), contents[i], i)); } IncrementalRemoteKeyedStateHandle stateHandle = downloadRequests.get(downloadRequests.size() - 1).getStateHandle(); // Add a state handle that induces an exception stateHandle .getSharedState() .add( HandleAndLocalPath.of( new ThrowingStateHandle(new IOException("Test exception.")), "error-handle")); CloseableRegistry closeableRegistry = new CloseableRegistry(); try (RocksDBStateDownloader rocksDBStateDownloader = new RocksDBStateDownloader(5)) { rocksDBStateDownloader.transferAllStateDataToDirectory( downloadRequests, closeableRegistry); fail("Exception is expected"); } catch (IOException ignore) { } // Check that all download directories have been deleted for (StateHandleDownloadSpec downloadRequest : downloadRequests) { Assert.assertFalse(downloadRequest.getDownloadDestination().toFile().exists()); } // The passed in closable registry should not be closed by us on failure. Assert.assertFalse(closeableRegistry.isClosed()); }
public static boolean deleteQuietly(@Nullable File file) { if (file == null) { return false; } return deleteQuietly(file.toPath()); }
@Test public void deleteQuietly_deletes_symbolicLink() throws IOException { assumeTrue(SystemUtils.IS_OS_UNIX); Path folder = temporaryFolder.newFolder().toPath(); Path file1 = Files.createFile(folder.resolve("file1.txt")); Path symLink = Files.createSymbolicLink(folder.resolve("link1"), file1); assertThat(file1).isRegularFile(); assertThat(symLink).isSymbolicLink(); FileUtils.deleteQuietly(symLink.toFile()); assertThat(symLink).doesNotExist(); assertThat(file1).isRegularFile(); }
public UiLinkId uiLinkId() { return uiLinkId; }
@Test public void uiLinkId() { blink = new ConcreteLink(UiLinkId.uiLinkId(RA, RB)); print(blink); assertEquals("non-canon AB", EXP_RA_RB, blink.linkId()); assertNull("key not null", blink.key()); assertNull("one not null", blink.one()); assertNull("two not null", blink.two()); }
public List<InsertBucketCumulativeWeightPair> getInsertBuckets(String partitionPath) { return partitionPathToInsertBucketInfos.get(partitionPath); }
@Test public void testUpsertPartitionerWithRecordsPerBucket() throws Exception { final String testPartitionPath = "2016/09/26"; // Inserts + Updates... Check all updates go together & inserts subsplit UpsertPartitioner partitioner = getUpsertPartitioner(0, 250, 100, 1024, testPartitionPath, false); List<InsertBucketCumulativeWeightPair> insertBuckets = partitioner.getInsertBuckets(testPartitionPath); int insertSplitSize = partitioner.config.getCopyOnWriteInsertSplitSize(); int remainedInsertSize = 250 - 2 * insertSplitSize; // will assigned 3 insertBuckets. 100, 100, 50 each assertEquals(3, insertBuckets.size(), "Total of 3 insert buckets"); assertEquals(0.4, insertBuckets.get(0).getLeft().weight, "insert " + insertSplitSize + " records"); assertEquals(0.4, insertBuckets.get(1).getLeft().weight, "insert " + insertSplitSize + " records"); assertEquals(0.2, insertBuckets.get(2).getLeft().weight, "insert " + remainedInsertSize + " records"); }
@Override public byte[] evaluateResponse(byte[] response) throws SaslException, SaslAuthenticationException { if (response.length == 1 && response[0] == OAuthBearerSaslClient.BYTE_CONTROL_A && errorMessage != null) { log.debug("Received %x01 response from client after it received our error"); throw new SaslAuthenticationException(errorMessage); } errorMessage = null; OAuthBearerClientInitialResponse clientResponse; try { clientResponse = new OAuthBearerClientInitialResponse(response); } catch (SaslException e) { log.debug(e.getMessage()); throw e; } return process(clientResponse.tokenValue(), clientResponse.authorizationId(), clientResponse.extensions()); }
@Test public void throwsAuthenticationExceptionOnInvalidExtensions() { OAuthBearerUnsecuredValidatorCallbackHandler invalidHandler = new OAuthBearerUnsecuredValidatorCallbackHandler() { @Override public void handle(Callback[] callbacks) throws UnsupportedCallbackException { for (Callback callback : callbacks) { if (callback instanceof OAuthBearerValidatorCallback) { OAuthBearerValidatorCallback validationCallback = (OAuthBearerValidatorCallback) callback; validationCallback.token(new OAuthBearerTokenMock()); } else if (callback instanceof OAuthBearerExtensionsValidatorCallback) { OAuthBearerExtensionsValidatorCallback extensionsCallback = (OAuthBearerExtensionsValidatorCallback) callback; extensionsCallback.error("firstKey", "is not valid"); extensionsCallback.error("secondKey", "is not valid either"); } else throw new UnsupportedCallbackException(callback); } } }; saslServer = new OAuthBearerSaslServer(invalidHandler); Map<String, String> customExtensions = new HashMap<>(); customExtensions.put("firstKey", "value"); customExtensions.put("secondKey", "value"); assertThrows(SaslAuthenticationException.class, () -> saslServer.evaluateResponse(clientInitialResponse(null, false, customExtensions))); }
public B delay(Integer delay) { this.delay = delay; return getThis(); }
@Test void delay() { ServiceBuilder builder = new ServiceBuilder(); builder.delay(1000); Assertions.assertEquals(1000, builder.build().getDelay()); }
public static boolean authenticate(String username, String password) { try { String dn = getUid(username); if (dn != null) { /* Found user - test password */ if ( testBind( dn, password ) ) { if(logger.isDebugEnabled()) logger.debug("user '" + username + "' authentication succeeded"); return true; } else { if(logger.isDebugEnabled()) logger.debug("user '" + username + "' authentication failed"); return false; } } else { if(logger.isDebugEnabled()) logger.debug("user '" + username + "' not found"); return false; } } catch (Exception e) { logger.error("Exception:", e); return false; } }
@Ignore @Test public void testAuthentication() throws Exception { String user = "jduke"; String password = "theduke"; Assert.assertEquals(true, LdapUtil.authenticate(user, password)); }
public RecordType getRecordType(String cuid) { if (cuid == null) throw new NullPointerException(); lazyLoadDefaultConfiguration(); RecordType recordType = recordTypes.get(cuid); return recordType != null ? recordType : RecordType.PRIVATE; }
@Test public void testGetRecordType() { RecordFactory f = new RecordFactory(); assertEquals(RecordType.IMAGE, f.getRecordType(UID.SecondaryCaptureImageStorage)); }
public static long noHeapMemoryUsed() { return noHeapMemoryUsage.getUsed(); }
@Test public void noHeapMemoryUsed() { long memoryUsed = MemoryUtil.noHeapMemoryUsed(); Assert.assertNotEquals(0, memoryUsed); }
public static List<Type> decode(String rawInput, List<TypeReference<Type>> outputParameters) { return decoder.decodeFunctionResult(rawInput, outputParameters); }
@Test public void testDecodeTupleDynamicStructNested() { String rawInput = "0x0000000000000000000000000000000000000000000000000000000000000060" + "0000000000000000000000000000000000000000000000000000000000000001" + "000000000000000000000000000000000000000000000000000000000000000a" + "0000000000000000000000000000000000000000000000000000000000000040" + "0000000000000000000000000000000000000000000000000000000000000080" + "0000000000000000000000000000000000000000000000000000000000000002" + "6964000000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000004" + "6e616d6500000000000000000000000000000000000000000000000000000000"; assertEquals( FunctionReturnDecoder.decode( rawInput, AbiV2TestFixture.getFooBarFunction.getOutputParameters()), Arrays.asList( new AbiV2TestFixture.Foo("id", "name"), new AbiV2TestFixture.Bar(BigInteger.ONE, BigInteger.TEN))); }
@Override public void install(SAContextManager contextManager) { mEncryptAPIImpl = new SAEncryptAPIImpl(contextManager); if (!contextManager.getInternalConfigs().saConfigOptions.isDisableSDK()) { setModuleState(true); } }
@Test public void install() { SAHelper.initSensors(mApplication); SAEncryptProtocolImpl encryptProtocol = new SAEncryptProtocolImpl(); encryptProtocol.install(SensorsDataAPI.sharedInstance(mApplication).getSAContextManager()); }
public PackageRevision latestModificationSince(String pluginId, final com.thoughtworks.go.plugin.api.material.packagerepository.PackageConfiguration packageConfiguration, final RepositoryConfiguration repositoryConfiguration, final PackageRevision previouslyKnownRevision) { return pluginRequestHelper.submitRequest(pluginId, REQUEST_LATEST_REVISION_SINCE, new DefaultPluginInteractionCallback<>() { @Override public String requestBody(String resolvedExtensionVersion) { return messageConverter(resolvedExtensionVersion).requestMessageForLatestRevisionSince(packageConfiguration, repositoryConfiguration, previouslyKnownRevision); } @Override public PackageRevision onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) { return messageConverter(resolvedExtensionVersion).responseMessageForLatestRevisionSince(responseBody); } }); }
@Test public void shouldTalkToPluginToGetLatestModificationSinceLastRevision() throws Exception { String expectedRequestBody = "{\"repository-configuration\":{\"key-one\":{\"value\":\"value-one\"},\"key-two\":{\"value\":\"value-two\"}}," + "\"package-configuration\":{\"key-three\":{\"value\":\"value-three\"},\"key-four\":{\"value\":\"value-four\"}}," + "\"previous-revision\":{\"revision\":\"abc.rpm\",\"timestamp\":\"2011-07-13T19:43:37.100Z\",\"data\":{\"dataKeyOne\":\"data-value-one\",\"dataKeyTwo\":\"data-value-two\"}}}"; String expectedResponseBody = "{\"revision\":\"abc.rpm\",\"timestamp\":\"2011-07-14T19:43:37.100Z\",\"user\":\"some-user\",\"revisionComment\":\"comment\"," + "\"trackbackUrl\":\"http:\\\\localhost:9999\",\"data\":{\"dataKeyOne\":\"data-value-one\",\"dataKeyTwo\":\"data-value-two\"}}"; Date timestamp = new SimpleDateFormat(DATE_FORMAT).parse("2011-07-13T19:43:37.100Z"); Map data = new LinkedHashMap(); data.put("dataKeyOne", "data-value-one"); data.put("dataKeyTwo", "data-value-two"); PackageRevision previouslyKnownRevision = new PackageRevision("abc.rpm", timestamp, "someuser", "comment", null, data); when(pluginManager.isPluginOfType(PACKAGE_MATERIAL_EXTENSION, PLUGIN_ID)).thenReturn(true); when(pluginManager.submitTo(eq(PLUGIN_ID), eq(PACKAGE_MATERIAL_EXTENSION), requestArgumentCaptor.capture())).thenReturn(DefaultGoPluginApiResponse.success(expectedResponseBody)); PackageRevision packageRevision = extension.latestModificationSince(PLUGIN_ID, packageConfiguration, repositoryConfiguration, previouslyKnownRevision); assertRequest(requestArgumentCaptor.getValue(), PACKAGE_MATERIAL_EXTENSION, "1.0", PackageRepositoryExtension.REQUEST_LATEST_REVISION_SINCE, expectedRequestBody); assertPackageRevision(packageRevision, "abc.rpm", "some-user", "2011-07-14T19:43:37.100Z", "comment", "http:\\localhost:9999"); }
@Override public Boolean addIfExists(double longitude, double latitude, V member) { return get(addIfExistsAsync(longitude, latitude, member)); }
@Test public void testAddIfExists() { RGeo<String> geo = redisson.getGeo("test"); assertThat(geo.add(2.51, 3.12, "city1")).isEqualTo(1); assertThat(geo.addIfExists(2.9, 3.9, "city1")).isTrue(); Map<String, GeoPosition> pos = geo.pos("city1"); System.out.println("" + pos.get("city1")); assertThat(pos.get("city1").getLatitude()).isBetween(3.8, 3.9); assertThat(pos.get("city1").getLongitude()).isBetween(2.8, 3.0); assertThat(geo.addIfExists(2.12, 3.5, "city2")).isFalse(); }
@Subscribe public void onChatMessage(ChatMessage event) { if (event.getType() != ChatMessageType.SPAM) { return; } if (event.getMessage().startsWith("You retrieve a bar of")) { if (session == null) { session = new SmeltingSession(); } session.increaseBarsSmelted(); } else if (event.getMessage().endsWith(" to form 8 cannonballs.")) { cannonBallsMade = 8; } else if (event.getMessage().endsWith(" to form 4 cannonballs.")) { cannonBallsMade = 4; } else if (event.getMessage().startsWith("You remove the cannonballs from the mould")) { if (session == null) { session = new SmeltingSession(); } session.increaseCannonBallsSmelted(cannonBallsMade); } }
@Test public void testBars() { ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.SPAM, "", SMELT_BAR, "", 0); smeltingPlugin.onChatMessage(chatMessage); SmeltingSession smeltingSession = smeltingPlugin.getSession(); assertNotNull(smeltingSession); assertEquals(1, smeltingSession.getBarsSmelted()); }
protected boolean evaluation(Object rawValue) { String stringValue = (String) ConverterTypeUtil.convert(String.class, rawValue); Object convertedValue = arrayType.getValue(stringValue); switch (inNotIn) { case IN: return values.contains(convertedValue); case NOT_IN: return !values.contains(convertedValue); default: throw new KiePMMLException("Unknown IN_NOTIN" + inNotIn); } }
@Test void evaluationStringIn() { ARRAY_TYPE arrayType = ARRAY_TYPE.STRING; List<Object> values = getObjects(arrayType, 1); KiePMMLSimpleSetPredicate kiePMMLSimpleSetPredicate = getKiePMMLSimpleSetPredicate(values, arrayType, IN_NOTIN.IN); assertThat(kiePMMLSimpleSetPredicate.evaluation("NOT")).isFalse(); assertThat(kiePMMLSimpleSetPredicate.evaluation(values.get(0))).isTrue(); }
@Override public void afterPropertiesSet() { Collection<DataChangedListener> listenerBeans = applicationContext.getBeansOfType(DataChangedListener.class).values(); this.listeners = Collections.unmodifiableList(new ArrayList<>(listenerBeans)); }
@Test @SuppressWarnings("unchecked") public void afterPropertiesSetTest() { List<DataChangedListener> listeners = (List<DataChangedListener>) ReflectionTestUtils.getField(dataChangedEventDispatcher, "listeners"); assertTrue(listeners.contains(httpLongPollingDataChangedListener)); assertTrue(listeners.contains(nacosDataChangedListener)); assertTrue(listeners.contains(websocketDataChangedListener)); assertTrue(listeners.contains(zookeeperDataChangedListener)); }
public String migrate(String oldJSON, int targetVersion) { LOGGER.debug("Migrating to version {}: {}", targetVersion, oldJSON); Chainr transform = getTransformerFor(targetVersion); Object transformedObject = transform.transform(JsonUtils.jsonToMap(oldJSON), getContextMap(targetVersion)); String transformedJSON = JsonUtils.toJsonString(transformedObject); LOGGER.debug("After migration to version {}: {}", targetVersion, transformedJSON); return transformedJSON; }
@Test void migrateV2ToV3_shouldDoNothingIfFetchExternalArtifactTaskIsConfiguredInV2() { ConfigRepoDocumentMother documentMother = new ConfigRepoDocumentMother(); String oldJSON = documentMother.v2WithFetchExternalArtifactTask(); String newJson = documentMother.v3WithFetchExternalArtifactTask(); String transformedJSON = migrator.migrate(oldJSON, 3); assertThatJson(newJson).isEqualTo(transformedJSON); }
public boolean poll(Timer timer, boolean waitForJoinGroup) { maybeUpdateSubscriptionMetadata(); invokeCompletedOffsetCommitCallbacks(); if (subscriptions.hasAutoAssignedPartitions()) { if (protocol == null) { throw new IllegalStateException("User configured " + ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG + " to empty while trying to subscribe for group protocol to auto assign partitions"); } // Always update the heartbeat last poll time so that the heartbeat thread does not leave the // group proactively due to application inactivity even if (say) the coordinator cannot be found. pollHeartbeat(timer.currentTimeMs()); if (coordinatorUnknownAndUnreadySync(timer)) { return false; } if (rejoinNeededOrPending()) { // due to a race condition between the initial metadata fetch and the initial rebalance, // we need to ensure that the metadata is fresh before joining initially. This ensures // that we have matched the pattern against the cluster's topics at least once before joining. if (subscriptions.hasPatternSubscription()) { // For consumer group that uses pattern-based subscription, after a topic is created, // any consumer that discovers the topic after metadata refresh can trigger rebalance // across the entire consumer group. Multiple rebalances can be triggered after one topic // creation if consumers refresh metadata at vastly different times. We can significantly // reduce the number of rebalances caused by single topic creation by asking consumer to // refresh metadata before re-joining the group as long as the refresh backoff time has // passed. if (this.metadata.timeToAllowUpdate(timer.currentTimeMs()) == 0) { this.metadata.requestUpdate(true); } if (!client.ensureFreshMetadata(timer)) { return false; } maybeUpdateSubscriptionMetadata(); } // if not wait for join group, we would just use a timer of 0 if (!ensureActiveGroup(waitForJoinGroup ? timer : time.timer(0L))) { // since we may use a different timer in the callee, we'd still need // to update the original timer's current time after the call timer.update(time.milliseconds()); return false; } } } else { // For manually assigned partitions, we do not try to pro-actively lookup coordinator; // instead we only try to refresh metadata when necessary. // If connections to all nodes fail, wakeups triggered while attempting to send fetch // requests result in polls returning immediately, causing a tight loop of polls. Without // the wakeup, poll() with no channels would block for the timeout, delaying re-connection. // awaitMetadataUpdate() in ensureCoordinatorReady initiates new connections with configured backoff and avoids the busy loop. if (metadata.updateRequested() && !client.hasReadyNodes(timer.currentTimeMs())) { client.awaitMetadataUpdate(timer); } // if there is pending coordinator requests, ensure they have a chance to be transmitted. client.pollNoWakeup(); } maybeAutoCommitOffsetsAsync(timer.currentTimeMs()); return true; }
@Test public void testAutoCommitManualAssignmentCoordinatorUnknown() { try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, true, subscriptions)) { subscriptions.assignFromUser(singleton(t1p)); subscriptions.seek(t1p, 100); // no commit initially since coordinator is unknown consumerClient.poll(time.timer(0)); time.sleep(autoCommitIntervalMs); consumerClient.poll(time.timer(0)); // now find the coordinator client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); // sleep only for the retry backoff time.sleep(retryBackoffMs); prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE); coordinator.poll(time.timer(Long.MAX_VALUE)); assertFalse(client.hasPendingResponses()); } }
@Override public void recordStateMachineRestarted(StateMachineInstance machineInstance, ProcessContext context) { if (machineInstance != null) { //save to db Date gmtUpdated = new Date(); int effect = executeUpdate(stateLogStoreSqls.getUpdateStateMachineRunningStatusSql(dbType), machineInstance.isRunning(), new Timestamp(gmtUpdated.getTime()), machineInstance.getId(), new Timestamp(machineInstance.getGmtUpdated().getTime())); if (effect < 1) { throw new EngineExecutionException( "StateMachineInstance [id:" + machineInstance.getId() + "] is recovered by an other execution, restart denied", FrameworkErrorCode.OperationDenied); } machineInstance.setGmtUpdated(gmtUpdated); } }
@Test public void testRecordStateMachineRestarted() { DbAndReportTcStateLogStore dbAndReportTcStateLogStore = new DbAndReportTcStateLogStore(); StateMachineInstanceImpl stateMachineInstance = new StateMachineInstanceImpl(); ProcessContextImpl context = new ProcessContextImpl(); context.setVariable(DomainConstants.VAR_NAME_STATEMACHINE_CONFIG, new DbStateMachineConfig()); Assertions.assertThrows(NullPointerException.class, () -> dbAndReportTcStateLogStore.recordStateMachineRestarted(stateMachineInstance, context)); }
public GsubData getGsubData() { return gsubData; }
@Test void testGetGsubData() throws IOException { // given RandomAccessReadBuffer randomAccessReadBuffer = new RandomAccessReadBuffer( GSUBTableDebugger.class.getResourceAsStream("/ttf/Lohit-Bengali.ttf")); RandomAccessReadDataStream randomAccessReadBufferDataStream = new RandomAccessReadDataStream( randomAccessReadBuffer); randomAccessReadBufferDataStream.seek(DATA_POSITION_FOR_GSUB_TABLE); GlyphSubstitutionTable testClass = new GlyphSubstitutionTable(); // when testClass.read(null, randomAccessReadBufferDataStream); // then GsubData gsubData = testClass.getGsubData(); assertNotNull(gsubData); assertNotEquals(GsubData.NO_DATA_FOUND, gsubData); assertEquals(Language.BENGALI, gsubData.getLanguage()); assertEquals("bng2", gsubData.getActiveScriptName()); assertEquals(new HashSet<>(EXPECTED_FEATURE_NAMES), gsubData.getSupportedFeatures()); String templatePathToFile = "/gsub/lohit_bengali/bng2/%s.txt"; for (String featureName : EXPECTED_FEATURE_NAMES) { System.out.println("******* Testing feature: " + featureName); Map<List<Integer>, List<Integer>> expectedGsubTableRawData = getExpectedGsubTableRawData( String.format(templatePathToFile, featureName)); ScriptFeature scriptFeature = new MapBackedScriptFeature(featureName, expectedGsubTableRawData); assertEquals(scriptFeature, gsubData.getFeature(featureName)); } }
public long getUnknownLen() { return unknown_len; }
@Test public void getUnknownLen() { assertEquals(TestParameters.VP_UNKNOWN_LEN, chmItsfHeader.getUnknownLen()); }
public void clean(final Date now) { List<String> files = this.findFiles(); List<String> expiredFiles = this.filterFiles(files, this.createExpiredFileFilter(now)); for (String f : expiredFiles) { this.delete(new File(f)); } if (this.totalSizeCap != CoreConstants.UNBOUNDED_TOTAL_SIZE_CAP && this.totalSizeCap > 0) { this.capTotalSize(files); } List<String> emptyDirs = this.findEmptyDirs(); for (String dir : emptyDirs) { this.delete(new File(dir)); } }
@Test public void removesOlderFilesThatExceedTotalSizeCap() { setupSizeCapTest(); remover.clean(EXPIRY); for (File f : Arrays.asList(expiredFiles).subList(MAX_HISTORY - NUM_FILES_TO_KEEP, expiredFiles.length)) { verify(fileProvider).deleteFile(f); } }
public static <T> CheckedFunction0<T> recover(CheckedFunction0<T> function, CheckedFunction1<Throwable, T> exceptionHandler) { return () -> { try { return function.apply(); } catch (Throwable throwable) { return exceptionHandler.apply(throwable); } }; }
@Test(expected = RuntimeException.class) public void shouldRethrowException() throws Throwable { CheckedFunction0<String> callable = () -> { throw new IOException("BAM!"); }; CheckedFunction0<String> callableWithRecovery = VavrCheckedFunctionUtils.recover(callable, (ex) -> { throw new RuntimeException(); }); callableWithRecovery.apply(); }
@Override public String toString() { StringBuilder b = new StringBuilder(); if (StringUtils.isNotBlank(protocol)) { b.append(protocol); b.append("://"); } if (StringUtils.isNotBlank(host)) { b.append(host); } if (!isPortDefault() && port != -1) { b.append(':'); b.append(port); } if (StringUtils.isNotBlank(path)) { // If no scheme/host/port, leave the path as is if (b.length() > 0 && !path.startsWith("/")) { b.append('/'); } b.append(encodePath(path)); } if (queryString != null && !queryString.isEmpty()) { b.append(queryString.toString()); } if (fragment != null) { b.append("#"); b.append(encodePath(fragment)); } return b.toString(); }
@Test public void testRelativeURLs() { s = "./blah"; t = "./blah"; assertEquals(t, new HttpURL(s).toString()); s = "/blah"; t = "/blah"; assertEquals(t, new HttpURL(s).toString()); s = "blah?param=value#frag"; t = "blah?param=value#frag"; assertEquals(t, new HttpURL(s).toString()); }
@Override public void onIssue(Component component, DefaultIssue issue) { if (component.getType() != Component.Type.FILE || component.getUuid().equals(issue.componentUuid())) { return; } Optional<OriginalFile> originalFileOptional = movedFilesRepository.getOriginalFile(component); checkState(originalFileOptional.isPresent(), "Issue %s for component %s has a different component key but no original file exist in MovedFilesRepository", issue, component); OriginalFile originalFile = originalFileOptional.get(); String fileUuid = originalFile.uuid(); checkState(fileUuid.equals(issue.componentUuid()), "Issue %s doesn't belong to file %s registered as original file of current file %s", issue, fileUuid, component); // changes the issue's component uuid, and set issue as changed, to enforce it is persisted to DB issueUpdater.setIssueComponent(issue, component.getUuid(), component.getKey(), new Date(analysisMetadataHolder.getAnalysisDate())); }
@Test public void onIssue_does_not_alter_issue_if_component_is_not_a_file() { DefaultIssue issue = mock(DefaultIssue.class); underTest.onIssue(ReportComponent.builder(Component.Type.DIRECTORY, 1).build(), issue); verifyNoInteractions(issue); }
public static boolean parse(final String str, ResTable_config out) { return parse(str, out, true); }
@Test public void parse_navigation_wheel() { ResTable_config config = new ResTable_config(); ConfigDescription.parse("wheel", config); assertThat(config.navigation).isEqualTo(NAVIGATION_WHEEL); }
public MeterProducer(MetricsEndpoint endpoint) { super(endpoint); }
@Test public void testMeterProducer() { assertThat(producer, is(notNullValue())); assertThat(producer.getEndpoint(), is(equalTo(endpoint))); }
public static NameMapping of(MappedField... fields) { return new NameMapping(MappedFields.of(ImmutableList.copyOf(fields))); }
@Test public void testAllowsDuplicateNamesInSeparateContexts() { new NameMapping( MappedFields.of( MappedField.of(1, "x", MappedFields.of(MappedField.of(3, "x"))), MappedField.of(2, "y", MappedFields.of(MappedField.of(4, "x"))))); }
public synchronized void changeBrokerRole(final Long newMasterBrokerId, final String newMasterAddress, final Integer newMasterEpoch, final Integer syncStateSetEpoch, final Set<Long> syncStateSet) throws Exception { if (newMasterBrokerId != null && newMasterEpoch > this.masterEpoch) { if (newMasterBrokerId.equals(this.brokerControllerId)) { changeToMaster(newMasterEpoch, syncStateSetEpoch, syncStateSet); } else { changeToSlave(newMasterAddress, newMasterEpoch, newMasterBrokerId); } } }
@Test public void changeBrokerRoleTest() { HashSet<Long> syncStateSetA = new HashSet<>(); syncStateSetA.add(BROKER_ID_1); HashSet<Long> syncStateSetB = new HashSet<>(); syncStateSetA.add(BROKER_ID_2); // not equal to localAddress Assertions.assertThatCode(() -> replicasManager.changeBrokerRole(BROKER_ID_2, NEW_MASTER_ADDRESS, NEW_MASTER_EPOCH, OLD_MASTER_EPOCH, syncStateSetB)) .doesNotThrowAnyException(); // equal to localAddress Assertions.assertThatCode(() -> replicasManager.changeBrokerRole(BROKER_ID_1, OLD_MASTER_ADDRESS, NEW_MASTER_EPOCH, OLD_MASTER_EPOCH, syncStateSetA)) .doesNotThrowAnyException(); }
@Override public <T> T serialize(final Serializer<T> dict) { dict.setStringForKey(String.valueOf(type), "Type"); dict.setStringForKey(this.getAbsolute(), "Remote"); if(symlink != null) { dict.setObjectForKey(symlink, "Symbolic Link"); } dict.setObjectForKey(attributes, "Attributes"); return dict.getSerialized(); }
@Test public void testDictionaryDirectory() { Path path = new Path("/path", EnumSet.of(Path.Type.directory)); assertEquals(path, new PathDictionary<>().deserialize(path.serialize(SerializerFactory.get()))); }