focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public boolean isUnion() {
return this instanceof UnionSchema;
} | @Test
void isUnionOnRecord() {
Schema schema = createDefaultRecord();
assertFalse(schema.isUnion());
} |
public IsJson(Matcher<? super ReadContext> jsonMatcher) {
this.jsonMatcher = jsonMatcher;
} | @Test
public void shouldMatchOnJsonObject() {
assertThat("{ \"hi\" : \"there\" }", isJson());
} |
@Override
public void delete(final Host bookmark) throws BackgroundException {
{
final String account = TripleCryptKeyPair.toServiceName(bookmark, UserKeyPair.Version.RSA2048);
if(log.isDebugEnabled()) {
log.debug(String.format("Delete credentials for %s in keychain %s", account, keychain));
}
keychain.deletePassword(account, TripleCryptKeyPair.toAccountName(bookmark));
}
{
final String account = TripleCryptKeyPair.toServiceName(bookmark, UserKeyPair.Version.RSA4096);
if(log.isDebugEnabled()) {
log.debug(String.format("Delete credentials for %s in keychain %s", account, keychain));
}
keychain.deletePassword(account, TripleCryptKeyPair.toAccountName(bookmark));
}
} | @Test
public void testDelete() throws Exception {
new TripleCryptCleanupFeature().delete(session.getHost());
} |
static Time toTime(final JsonNode object) {
if (object instanceof NumericNode) {
return returnTimeOrThrow(object.asLong());
}
if (object instanceof TextNode) {
try {
return returnTimeOrThrow(Long.parseLong(object.textValue()));
} catch (final NumberFormatException e) {
throw failedStringCoercionException(SqlBaseType.TIME);
}
}
throw invalidConversionException(object, SqlBaseType.TIME);
} | @Test
public void shouldNotConvertNegativeStringToTime() {
try {
JsonSerdeUtils.toTime(JsonNodeFactory.instance.textNode("-5"));
} catch (Exception e) {
assertThat(e.getMessage(), equalTo("Time values must use number of milliseconds greater than 0 and less than 86400000."));
}
} |
public SortedMap<String, HealthCheck.Result> runHealthChecks() {
return runHealthChecks(HealthCheckFilter.ALL);
} | @Test
public void runsRegisteredHealthChecksWithNonMatchingFilter() {
final Map<String, HealthCheck.Result> results = registry.runHealthChecks((name, healthCheck) -> false);
assertThat(results).isEmpty();
} |
@Override
public void convert(File v2SegmentDirectory)
throws Exception {
Preconditions.checkNotNull(v2SegmentDirectory, "Segment directory should not be null");
Preconditions.checkState(v2SegmentDirectory.exists() && v2SegmentDirectory.isDirectory(),
"Segment directory: " + v2SegmentDirectory + " must exist and should be a directory");
LOGGER.info("Converting segment: {} to v3 format", v2SegmentDirectory);
// check existing segment version
SegmentMetadataImpl v2Metadata = new SegmentMetadataImpl(v2SegmentDirectory);
SegmentVersion oldVersion = v2Metadata.getVersion();
Preconditions.checkState(oldVersion != SegmentVersion.v3, "Segment %s is already in v3 format but at wrong path",
v2Metadata.getName());
Preconditions.checkArgument(oldVersion == SegmentVersion.v1 || oldVersion == SegmentVersion.v2,
"Can not convert segment version: %s at path: %s ", oldVersion, v2SegmentDirectory);
deleteStaleConversionDirectories(v2SegmentDirectory);
File v3TempDirectory = v3ConversionTempDirectory(v2SegmentDirectory);
setDirectoryPermissions(v3TempDirectory);
createMetadataFile(v2SegmentDirectory, v3TempDirectory);
copyCreationMetadataIfExists(v2SegmentDirectory, v3TempDirectory);
copyIndexData(v2SegmentDirectory, v2Metadata, v3TempDirectory);
File newLocation = SegmentDirectoryPaths.segmentDirectoryFor(v2SegmentDirectory, SegmentVersion.v3);
LOGGER.info("v3 segment location for segment: {} is {}", v2Metadata.getName(), newLocation);
v3TempDirectory.renameTo(newLocation);
deleteV2Files(v2SegmentDirectory);
} | @Test
public void testConvert()
throws Exception {
SegmentMetadataImpl beforeConversionMeta = new SegmentMetadataImpl(_segmentDirectory);
SegmentV1V2ToV3FormatConverter converter = new SegmentV1V2ToV3FormatConverter();
converter.convert(_segmentDirectory);
File v3Location = SegmentDirectoryPaths.segmentDirectoryFor(_segmentDirectory, SegmentVersion.v3);
Assert.assertTrue(v3Location.exists());
Assert.assertTrue(v3Location.isDirectory());
SegmentMetadataImpl metadata = new SegmentMetadataImpl(v3Location);
Assert.assertEquals(metadata.getVersion(), SegmentVersion.v3);
Assert.assertTrue(new File(v3Location, V1Constants.SEGMENT_CREATION_META).exists());
FileTime afterConversionTime = Files.getLastModifiedTime(v3Location.toPath());
// verify that the segment loads correctly. This is necessary and sufficient
// full proof way to ensure that segment is correctly translated
IndexSegment indexSegment = ImmutableSegmentLoader.load(_segmentDirectory, _v3IndexLoadingConfig, null, false);
Assert.assertNotNull(indexSegment);
Assert.assertEquals(indexSegment.getSegmentName(), metadata.getName());
Assert.assertEquals(indexSegment.getSegmentMetadata().getVersion(), SegmentVersion.v3);
FileTime afterLoadTime = Files.getLastModifiedTime(v3Location.toPath());
Assert.assertEquals(afterConversionTime, afterLoadTime);
// verify that SegmentMetadataImpl loaded from segmentDirectory correctly sets
// metadata information after conversion. This has impacted us while loading
// segments by triggering download. That's costly. That's also difficult to test
Assert.assertFalse(new File(_segmentDirectory, V1Constants.MetadataKeys.METADATA_FILE_NAME).exists());
SegmentMetadataImpl metaAfterConversion = new SegmentMetadataImpl(_segmentDirectory);
Assert.assertNotNull(metaAfterConversion);
Assert.assertFalse(metaAfterConversion.getCrc().equalsIgnoreCase(String.valueOf(Long.MIN_VALUE)));
Assert.assertEquals(metaAfterConversion.getCrc(), beforeConversionMeta.getCrc());
Assert.assertTrue(metaAfterConversion.getIndexCreationTime() != Long.MIN_VALUE);
Assert.assertEquals(metaAfterConversion.getIndexCreationTime(), beforeConversionMeta.getIndexCreationTime());
} |
@SneakyThrows({InterruptedException.class, ExecutionException.class})
@Override
public void persist(final String key, final String value) {
buildParentPath(key);
client.getKVClient().put(ByteSequence.from(key, StandardCharsets.UTF_8), ByteSequence.from(value, StandardCharsets.UTF_8)).get();
} | @Test
void assertPersist() {
repository.persist("key1", "value1");
verify(kv).put(any(ByteSequence.class), any(ByteSequence.class));
} |
@Override
public ConnectionProperties parse(final String url, final String username, final String catalog) {
JdbcUrl jdbcUrl = new StandardJdbcUrlParser().parse(url);
return new StandardConnectionProperties(jdbcUrl.getHostname(), jdbcUrl.getPort(DEFAULT_PORT),
null == catalog ? jdbcUrl.getDatabase() : catalog, null, jdbcUrl.getQueryProperties(), buildDefaultQueryProperties());
} | @Test
void assertNewConstructorFailure() {
assertThrows(UnrecognizedDatabaseURLException.class, () -> parser.parse("jdbc:mysql:xxxxxxxx", null, null));
} |
@Override
public boolean test(final String resourceName) {
return resourceName.matches(blackList);
} | @SuppressWarnings("ResultOfMethodCallIgnored")
@Test
public void shouldNotBlacklistAnythingIfBlacklistFileIsEmpty() {
final Blacklist blacklist = new Blacklist(this.blacklistFile);
assertFalse(blacklist.test("java.lang.Process"));
assertFalse(blacklist.test("java.util.List"));
assertFalse(blacklist.test("java.lang.ProcessEnvironment"));
assertFalse(blacklist.test("java.lang.Class"));
} |
public static PostgreSQLErrorResponsePacket newInstance(final Exception cause) {
Optional<ServerErrorMessage> serverErrorMessage = findServerErrorMessage(cause);
return serverErrorMessage.map(PostgreSQLErrorPacketFactory::createErrorResponsePacket)
.orElseGet(() -> createErrorResponsePacket(SQLExceptionTransformEngine.toSQLException(cause, DATABASE_TYPE)));
} | @Test
void assertRuntimeException() throws ReflectiveOperationException {
PostgreSQLErrorResponsePacket actual = PostgreSQLErrorPacketFactory.newInstance(new RuntimeException("No reason"));
Map<Character, String> fields = (Map<Character, String>) Plugins.getMemberAccessor().get(PostgreSQLErrorResponsePacket.class.getDeclaredField("fields"), actual);
assertThat(fields.get(PostgreSQLErrorResponsePacket.FIELD_TYPE_MESSAGE), is("Unknown exception." + System.lineSeparator() + "More details: java.lang.RuntimeException: No reason"));
} |
@Override
public int hashCode() {
return underlying().hashCode();
} | @Test
public void testHashCode() {
final TreePSet<Object> mock = mock(TreePSet.class);
assertEquals(mock.hashCode(), new PCollectionsImmutableNavigableSet<>(mock).hashCode());
final TreePSet<Object> someOtherMock = mock(TreePSet.class);
assertNotEquals(mock.hashCode(), new PCollectionsImmutableNavigableSet<>(someOtherMock).hashCode());
} |
static byte[] hmacSha512(HMac hmacSha512, byte[] input) {
hmacSha512.reset();
hmacSha512.update(input, 0, input.length);
byte[] out = new byte[64];
hmacSha512.doFinal(out, 0);
return out;
} | @Test
public void testHmac() {
String[] tv = {
"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b" +
"0b0b0b0b",
"4869205468657265",
"87aa7cdea5ef619d4ff0b4241a1d6cb0" +
"2379f4e2ce4ec2787ad0b30545e17cde" +
"daa833b7d6b8a702038b274eaea3f4e4" +
"be9d914eeb61f1702e696c203a126854",
"4a656665",
"7768617420646f2079612077616e7420" +
"666f72206e6f7468696e673f",
"164b7a7bfcf819e2e395fbe73b56e0a3" +
"87bd64222e831fd610270cd7ea250554" +
"9758bf75c05a994a6d034f65f8f0e6fd" +
"caeab1a34d4a6b4b636e070a38bce737",
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
"aaaaaaaa",
"dddddddddddddddddddddddddddddddd" +
"dddddddddddddddddddddddddddddddd" +
"dddddddddddddddddddddddddddddddd" +
"dddd",
"fa73b0089d56a284efb0f0756c890be9" +
"b1b5dbdd8ee81a3655f83e33b2279d39" +
"bf3e848279a722c806b485a47e67c807" +
"b946a337bee8942674278859e13292fb",
"0102030405060708090a0b0c0d0e0f10" +
"111213141516171819",
"cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd" +
"cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd" +
"cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd" +
"cdcd",
"b0ba465637458c6990e5a8c5f61d4af7" +
"e576d97ff94b872de76f8050361ee3db" +
"a91ca5c11aa25eb4d679275cc5788063" +
"a5f19741120c4f2de2adebeb10a298dd",
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
"aaaaaa",
"54657374205573696e67204c61726765" +
"72205468616e20426c6f636b2d53697a" +
"65204b6579202d2048617368204b6579" +
"204669727374",
"80b24263c7c1a3ebb71493c1dd7be8b4" +
"9b46d1f41b4aeec1121b013783f8f352" +
"6b56d037e05f2598bd0fd2215d6a1e52" +
"95e64f73f63f0aec8b915a985d786598",
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
"aaaaaa",
"54686973206973206120746573742075" +
"73696e672061206c6172676572207468" +
"616e20626c6f636b2d73697a65206b65" +
"7920616e642061206c61726765722074" +
"68616e20626c6f636b2d73697a652064" +
"6174612e20546865206b6579206e6565" +
"647320746f2062652068617368656420" +
"6265666f7265206265696e6720757365" +
"642062792074686520484d414320616c" +
"676f726974686d2e",
"e37b6a775dc87dbaa4dfa9f96e5e3ffd" +
"debd71f8867289865df5a32d20cdc944" +
"b6022cac3c4982b10d5eeb55c3e4de15" +
"134676fb6de0446065c97440fa8c6a58"
};
for (int i = 0; i < tv.length; i += 3) {
assertArrayEquals("Case " + i, getBytes(tv, i + 2), HDUtils.hmacSha512(getBytes(tv, i), getBytes(tv, i + 1)));
}
} |
@Override
public Optional<ExecuteResult> getSaneQueryResult(final SQLStatement sqlStatement, final SQLException ex) {
if (ER_PARSE_ERROR == ex.getErrorCode()) {
return Optional.empty();
}
if (sqlStatement instanceof SelectStatement) {
return createQueryResult((SelectStatement) sqlStatement);
}
if (sqlStatement instanceof MySQLShowOtherStatement) {
return Optional.of(createQueryResult());
}
if (sqlStatement instanceof MySQLSetStatement) {
return Optional.of(new UpdateResult(0, 0L));
}
return Optional.empty();
} | @Test
void assertGetSaneQueryResultForSetStatement() {
Optional<ExecuteResult> actual = new MySQLDialectSaneQueryResultEngine().getSaneQueryResult(new MySQLSetStatement(), new SQLException(""));
assertTrue(actual.isPresent());
assertThat(actual.get(), instanceOf(UpdateResult.class));
} |
public static String getFullUrl(HttpServletRequest request) {
if (request.getQueryString() == null) {
return request.getRequestURI();
}
return request.getRequestURI() + "?" + request.getQueryString();
} | @Test
void formatsBasicURIs() throws Exception {
assertThat(Servlets.getFullUrl(request))
.isEqualTo("/one/two");
} |
@Override
@ManagedOperation(description = "Remove the key from the store")
public boolean remove(String key) {
cache.remove(key);
return true;
} | @Test
void testRemove() {
// add key to remove
assertTrue(repo.add(key01));
assertTrue(repo.add(key02));
assertTrue(cache.containsKey(key01));
assertTrue(cache.containsKey(key02));
// clear repo
repo.clear();
assertFalse(cache.containsKey(key01));
assertFalse(cache.containsKey(key02));
} |
public static IdGenerator decrementingLongs() {
AtomicLong longs = new AtomicLong();
return () -> Long.toString(longs.decrementAndGet());
} | @Test
public void decrementing() {
IdGenerator gen = IdGenerators.decrementingLongs();
assertThat(gen.getId(), equalTo("-1"));
assertThat(gen.getId(), equalTo("-2"));
} |
@Override
public RemotingCommand invokeSync(String addr, final RemotingCommand request, long timeoutMillis)
throws InterruptedException, RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException {
long beginStartTime = System.currentTimeMillis();
final Channel channel = this.getAndCreateChannel(addr);
String channelRemoteAddr = RemotingHelper.parseChannelRemoteAddr(channel);
if (channel != null && channel.isActive()) {
long left = timeoutMillis;
try {
long costTime = System.currentTimeMillis() - beginStartTime;
left -= costTime;
if (left <= 0) {
throw new RemotingTimeoutException("invokeSync call the addr[" + channelRemoteAddr + "] timeout");
}
RemotingCommand response = this.invokeSyncImpl(channel, request, left);
updateChannelLastResponseTime(addr);
return response;
} catch (RemotingSendRequestException e) {
LOGGER.warn("invokeSync: send request exception, so close the channel[{}]", channelRemoteAddr);
this.closeChannel(addr, channel);
throw e;
} catch (RemotingTimeoutException e) {
// avoid close the success channel if left timeout is small, since it may cost too much time in get the success channel, the left timeout for read is small
boolean shouldClose = left > MIN_CLOSE_TIMEOUT_MILLIS || left > timeoutMillis / 4;
if (nettyClientConfig.isClientCloseSocketIfTimeout() && shouldClose) {
this.closeChannel(addr, channel);
LOGGER.warn("invokeSync: close socket because of timeout, {}ms, {}", timeoutMillis, channelRemoteAddr);
}
LOGGER.warn("invokeSync: wait response timeout exception, the channel[{}]", channelRemoteAddr);
throw e;
}
} else {
this.closeChannel(addr, channel);
throw new RemotingConnectException(addr);
}
} | @Test
public void testInvokeSync() throws RemotingSendRequestException, RemotingTimeoutException, InterruptedException {
remotingClient.registerRPCHook(rpcHookMock);
Channel channel = new LocalChannel();
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.PULL_MESSAGE, null);
RemotingCommand response = RemotingCommand.createResponseCommand(null);
response.setCode(ResponseCode.SUCCESS);
ResponseFuture responseFuture = new ResponseFuture(channel, request.getOpaque(), request, 1000, new InvokeCallback() {
@Override
public void operationComplete(ResponseFuture responseFuture) {
}
}, new SemaphoreReleaseOnlyOnce(new Semaphore(1)));
responseFuture.setResponseCommand(response);
CompletableFuture<ResponseFuture> future = new CompletableFuture<>();
future.complete(responseFuture);
doReturn(future).when(remotingClient).invoke0(any(Channel.class), any(RemotingCommand.class), anyLong());
RemotingCommand actual = remotingClient.invokeSyncImpl(channel, request, 1000);
assertThat(actual).isEqualTo(response);
verify(rpcHookMock).doBeforeRequest(anyString(), eq(request));
verify(rpcHookMock).doAfterResponse(anyString(), eq(request), eq(response));
} |
@Override
public Optional<SimpleAddress> selectAddress(Optional<String> addressSelectionContext)
{
if (addressSelectionContext.isPresent()) {
return addressSelectionContext
.map(HostAndPort::fromString)
.map(SimpleAddress::new);
}
List<HostAndPort> catalogServers = internalNodeManager.getCatalogServers().stream()
.filter(node -> node.getThriftPort().isPresent())
.map(catalogServerNode -> {
HostAddress hostAndPort = catalogServerNode.getHostAndPort();
return HostAndPort.fromParts(hostAndPort.getHostText(), catalogServerNode.getThriftPort().getAsInt());
})
.collect(toImmutableList());
return hostSelector.apply(catalogServers).map(SimpleAddress::new);
} | @Test
public void testAddressSelectionContextPresent()
{
InMemoryNodeManager internalNodeManager = new InMemoryNodeManager();
RandomCatalogServerAddressSelector selector = new RandomCatalogServerAddressSelector(internalNodeManager);
HostAndPort hostAndPort = HostAndPort.fromParts("abc", 123);
Optional<SimpleAddressSelector.SimpleAddress> address = selector.selectAddress(Optional.of(hostAndPort.toString()));
assertTrue(address.isPresent());
assertEquals(address.get().getHostAndPort(), hostAndPort);
} |
@Override
public void onSwipeRight(boolean twoFingers) {} | @Test
public void testOnSwipeRight() {
mUnderTest.onSwipeRight(true);
Mockito.verifyZeroInteractions(mMockParentListener, mMockKeyboardDismissAction);
} |
public static <T> JSONSchema<T> of(SchemaDefinition<T> schemaDefinition) {
SchemaReader<T> reader = schemaDefinition.getSchemaReaderOpt()
.orElseGet(() -> new JacksonJsonReader<>(jsonMapper(), schemaDefinition.getPojo()));
SchemaWriter<T> writer = schemaDefinition.getSchemaWriterOpt()
.orElseGet(() -> new JacksonJsonWriter<>(jsonMapper()));
return new JSONSchema<>(parseSchemaInfo(schemaDefinition, SchemaType.JSON), schemaDefinition.getPojo(),
reader, writer);
} | @Test(expectedExceptions = SchemaSerializationException.class)
public void testAllowNullDecodeWithInvalidContent() {
JSONSchema<Foo> jsonSchema = JSONSchema.of(SchemaDefinition.<Foo>builder().withPojo(Foo.class).build());
jsonSchema.decode(new byte[0]);
} |
@Operation(summary = "Gets the status of ongoing database migrations, if any", description = "Return the detailed status of ongoing database migrations" +
" including starting date. If no migration is ongoing or needed it is still possible to call this endpoint and receive appropriate information.")
@GetMapping
public DatabaseMigrationsResponse getStatus() {
Optional<Long> currentVersion = databaseVersion.getVersion();
checkState(currentVersion.isPresent(), NO_CONNECTION_TO_DB);
DatabaseVersion.Status status = databaseVersion.getStatus();
if (status == DatabaseVersion.Status.UP_TO_DATE || status == DatabaseVersion.Status.REQUIRES_DOWNGRADE) {
return new DatabaseMigrationsResponse(databaseMigrationState);
} else if (!database.getDialect().supportsMigration()) {
return new DatabaseMigrationsResponse(DatabaseMigrationState.Status.STATUS_NOT_SUPPORTED);
} else {
return switch (databaseMigrationState.getStatus()) {
case RUNNING, FAILED, SUCCEEDED -> new DatabaseMigrationsResponse(databaseMigrationState);
case NONE -> new DatabaseMigrationsResponse(DatabaseMigrationState.Status.MIGRATION_REQUIRED);
default -> throw new IllegalArgumentException(UNSUPPORTED_DATABASE_MIGRATION_STATUS);
};
}
} | @Test
void getStatus_whenDbRequiresUpgradeButDialectIsNotSupported_returnNotSupported() throws Exception {
when(databaseVersion.getStatus()).thenReturn(DatabaseVersion.Status.FRESH_INSTALL);
when(dialect.supportsMigration()).thenReturn(false);
mockMvc.perform(get(DATABASE_MIGRATIONS_ENDPOINT)).andExpectAll(status().isOk(),
content().json("{\"status\":\"NOT_SUPPORTED\",\"message\":\"Upgrade is not supported on embedded database.\"}"));
} |
@Override
public PageResult<JobDO> getJobPage(JobPageReqVO pageReqVO) {
return jobMapper.selectPage(pageReqVO);
} | @Test
public void testGetJobPage() {
// mock 数据
JobDO dbJob = randomPojo(JobDO.class, o -> {
o.setName("定时任务测试");
o.setHandlerName("handlerName 单元测试");
o.setStatus(JobStatusEnum.INIT.getStatus());
});
jobMapper.insert(dbJob);
// 测试 name 不匹配
jobMapper.insert(cloneIgnoreId(dbJob, o -> o.setName("土豆")));
// 测试 status 不匹配
jobMapper.insert(cloneIgnoreId(dbJob, o -> o.setStatus(JobStatusEnum.NORMAL.getStatus())));
// 测试 handlerName 不匹配
jobMapper.insert(cloneIgnoreId(dbJob, o -> o.setHandlerName(randomString())));
// 准备参数
JobPageReqVO reqVo = new JobPageReqVO();
reqVo.setName("定时");
reqVo.setStatus(JobStatusEnum.INIT.getStatus());
reqVo.setHandlerName("单元");
// 调用
PageResult<JobDO> pageResult = jobService.getJobPage(reqVo);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbJob, pageResult.getList().get(0));
} |
public Connection updateAttributes(Long id, ConnectionDTO connectionDTO) {
Connection connection = getConnectionById(id);
return connectionMapper.toUpdatedConnection(connection, connectionDTO);
} | @Test
void updateAttributes() {
Connection connectionOld = new Connection();
connectionOld.setName("old");
connectionOld.setStatus(new Status());
Optional<Connection> connectionOptional = Optional.of(connectionOld);
when(connectionRepositoryMock.findById(anyLong())).thenReturn(connectionOptional);
Connection result = connectionServiceMock.updateAttributes(1L, newConnectionDTO());
assertEquals(result.getName(), newConnection().getName());
assertEquals(result.getEntityId(), newConnection().getEntityId());
assertNotNull(result);
} |
@Override
public long getDelay() {
return config.getLong(DELAY_IN_MILISECONDS_PROPERTY).orElse(10_000L);
} | @Test
public void getDelay_returnNumberFromConfig() {
config.put("sonar.server.monitoring.ce.initial.delay", "100000");
long delay = underTest.getDelay();
assertThat(delay).isEqualTo(100_000L);
} |
public Properties getProperties()
{
return properties;
} | @Test
public void testNonEmptyPassword()
throws SQLException
{
PrestoDriverUri parameters = createDriverUri("presto://localhost:8080?password=secret");
assertEquals(parameters.getProperties().getProperty("password"), "secret");
} |
@Override
public Throwable getException() {
return exception;
} | @Test
void testAppResponseWithEmptyStackTraceException() {
Throwable throwable = buildEmptyStackTraceException();
assumeFalse(throwable == null);
AppResponse appResponse = new AppResponse(throwable);
StackTraceElement[] stackTrace = appResponse.getException().getStackTrace();
Assertions.assertNotNull(stackTrace);
Assertions.assertEquals(0, stackTrace.length);
} |
@Override
public String getSubchannelsInfo() {
return "[]";
} | @Test
public void testGetSubchannelsInfo() {
EmptyPicker picker = new EmptyPicker(mock(Status.class));
assertNotNull(picker.getSubchannelsInfo());
} |
@Override
public String getName() {
return FUNCTION_NAME;
} | @Test
public void testCastTransformFunction() {
ExpressionContext expression =
RequestContextUtils.getExpression(String.format("CAST(%s AS string)", INT_SV_COLUMN));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof CastTransformFunction);
assertEquals(transformFunction.getName(), CastTransformFunction.FUNCTION_NAME);
String[] expectedStringValues = new String[NUM_ROWS];
String[] scalarStringValues = new String[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedStringValues[i] = Integer.toString(_intSVValues[i]);
scalarStringValues[i] = (String) cast(_intSVValues[i], "string");
}
testTransformFunction(transformFunction, expectedStringValues);
assertEquals(expectedStringValues, scalarStringValues);
expression = RequestContextUtils.getExpression(String.format("CAST(CAST(%s as INT) as FLOAT)", FLOAT_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof CastTransformFunction);
float[] expectedFloatValues = new float[NUM_ROWS];
float[] scalarFloatValues = new float[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedFloatValues[i] = (int) _floatSVValues[i];
scalarFloatValues[i] = (float) cast(cast(_floatSVValues[i], "int"), "float");
}
testTransformFunction(transformFunction, expectedFloatValues);
assertEquals(expectedFloatValues, scalarFloatValues);
expression = RequestContextUtils.getExpression(String.format("CAST(CAST(%s as BOOLEAN) as STRING)", INT_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof CastTransformFunction);
for (int i = 0; i < NUM_ROWS; i++) {
expectedStringValues[i] = Boolean.toString(_intSVValues[i] != 0);
scalarStringValues[i] = (String) cast(cast(_intSVValues[i], "boolean"), "string");
}
testTransformFunction(transformFunction, expectedStringValues);
assertEquals(expectedStringValues, scalarStringValues);
expression =
RequestContextUtils.getExpression(String.format("CAST(CAST(%s as TIMESTAMP) as STRING)", LONG_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof CastTransformFunction);
for (int i = 0; i < NUM_ROWS; i++) {
expectedStringValues[i] = new Timestamp(_longSVValues[i]).toString();
scalarStringValues[i] = (String) cast(cast(_longSVValues[i], "timestamp"), "string");
}
testTransformFunction(transformFunction, expectedStringValues);
assertEquals(expectedStringValues, scalarStringValues);
expression = RequestContextUtils.getExpression(String.format("CAST(CAST(%s as BOOLEAN) as INT)", INT_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof CastTransformFunction);
int[] expectedIntValues = new int[NUM_ROWS];
int[] scalarIntValues = new int[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedIntValues[i] = _intSVValues[i] != 0 ? 1 : 0;
scalarIntValues[i] = (int) cast(cast(_intSVValues[i], "boolean"), "int");
}
testTransformFunction(transformFunction, expectedIntValues);
assertEquals(expectedIntValues, scalarIntValues);
expression = RequestContextUtils.getExpression(
String.format("CAST(ADD(CAST(%s AS LONG), %s) AS STRING)", DOUBLE_SV_COLUMN, LONG_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof CastTransformFunction);
for (int i = 0; i < NUM_ROWS; i++) {
expectedStringValues[i] = Double.toString((double) (long) _doubleSVValues[i] + (double) _longSVValues[i]);
scalarStringValues[i] =
(String) cast((double) (long) cast(_doubleSVValues[i], "long") + (double) _longSVValues[i], "string");
}
testTransformFunction(transformFunction, expectedStringValues);
assertEquals(expectedStringValues, scalarStringValues);
expression = RequestContextUtils.getExpression(
String.format("caSt(cAst(casT(%s as inT) + %s aS sTring) As DouBle)", FLOAT_SV_COLUMN, INT_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof CastTransformFunction);
double[] expectedDoubleValues = new double[NUM_ROWS];
double[] scalarDoubleValues = new double[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedDoubleValues[i] = (double) (int) _floatSVValues[i] + (double) _intSVValues[i];
scalarDoubleValues[i] =
(double) cast(cast((double) (int) cast(_floatSVValues[i], "int") + (double) _intSVValues[i], "string"),
"double");
}
testTransformFunction(transformFunction, expectedDoubleValues);
assertEquals(expectedDoubleValues, scalarDoubleValues);
expression = RequestContextUtils.getExpression(
String.format("CAST(CAST(%s AS INT) - CAST(%s AS FLOAT) / CAST(%s AS DOUBLE) AS LONG)", DOUBLE_SV_COLUMN,
LONG_SV_COLUMN, INT_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof CastTransformFunction);
long[] expectedLongValues = new long[NUM_ROWS];
long[] longScalarValues = new long[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedLongValues[i] =
(long) ((double) (int) _doubleSVValues[i] - (double) (float) _longSVValues[i] / (double) _intSVValues[i]);
longScalarValues[i] = (long) cast((double) (int) cast(_doubleSVValues[i], "int")
- (double) (float) cast(_longSVValues[i], "float") / (double) cast(_intSVValues[i], "double"), "long");
}
testTransformFunction(transformFunction, expectedLongValues);
assertEquals(expectedLongValues, longScalarValues);
expression = RequestContextUtils.getExpression(String.format("CAST(%s AS BIG_DECIMAL)", LONG_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof CastTransformFunction);
BigDecimal[] expectedBigDecimalValues = new BigDecimal[NUM_ROWS];
BigDecimal[] bigDecimalScalarValues = new BigDecimal[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedBigDecimalValues[i] = BigDecimal.valueOf(_longSVValues[i]);
bigDecimalScalarValues[i] = (BigDecimal) cast(_longSVValues[i], "BIG_DECIMAL");
}
testTransformFunction(transformFunction, expectedBigDecimalValues);
assertEquals(expectedBigDecimalValues, bigDecimalScalarValues);
expression = RequestContextUtils.getExpression(
String.format("CAST(CAST(%s AS DOUBLE) - CAST(%s AS DOUBLE) / CAST(%s AS DOUBLE) AS BIG_DECIMAL)",
BIG_DECIMAL_SV_COLUMN, LONG_SV_COLUMN, INT_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof CastTransformFunction);
expectedBigDecimalValues = new BigDecimal[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedBigDecimalValues[i] = BigDecimal.valueOf(
_bigDecimalSVValues[i].doubleValue() - (double) _longSVValues[i] / (double) _intSVValues[i]);
double d =
(double) cast(_bigDecimalSVValues[i], "double") - (double) cast(_longSVValues[i], "double") / (double) cast(
_intSVValues[i], "double");
bigDecimalScalarValues[i] = (BigDecimal) cast(d, "BIG_DECIMAL");
}
testTransformFunction(transformFunction, expectedBigDecimalValues);
assertEquals(expectedBigDecimalValues, bigDecimalScalarValues);
} |
public static boolean checkpointsMatch(
Collection<CompletedCheckpoint> first, Collection<CompletedCheckpoint> second) {
if (first.size() != second.size()) {
return false;
}
List<Tuple2<Long, JobID>> firstInterestingFields = new ArrayList<>(first.size());
for (CompletedCheckpoint checkpoint : first) {
firstInterestingFields.add(
new Tuple2<>(checkpoint.getCheckpointID(), checkpoint.getJobId()));
}
List<Tuple2<Long, JobID>> secondInterestingFields = new ArrayList<>(second.size());
for (CompletedCheckpoint checkpoint : second) {
secondInterestingFields.add(
new Tuple2<>(checkpoint.getCheckpointID(), checkpoint.getJobId()));
}
return firstInterestingFields.equals(secondInterestingFields);
} | @Test
void testCompareCheckpointsWithSameCheckpointId() {
JobID jobID1 = new JobID();
JobID jobID2 = new JobID();
CompletedCheckpoint checkpoint1 =
new CompletedCheckpoint(
jobID1,
0,
0,
1,
new HashMap<>(),
Collections.emptyList(),
CheckpointProperties.forCheckpoint(
CheckpointRetentionPolicy.RETAIN_ON_FAILURE),
new TestCompletedCheckpointStorageLocation(),
null);
CompletedCheckpoint checkpoint2 =
new CompletedCheckpoint(
jobID2,
0,
0,
1,
new HashMap<>(),
Collections.emptyList(),
CheckpointProperties.forCheckpoint(
CheckpointRetentionPolicy.RETAIN_ON_FAILURE),
new TestCompletedCheckpointStorageLocation(),
null);
List<CompletedCheckpoint> checkpoints1 = new ArrayList<>();
checkpoints1.add(checkpoint1);
List<CompletedCheckpoint> checkpoints2 = new ArrayList<>();
checkpoints2.add(checkpoint2);
assertThat(CompletedCheckpoint.checkpointsMatch(checkpoints1, checkpoints2)).isFalse();
} |
@Override
protected boolean isSecure(String key) {
AuthorizationPluginInfo pluginInfo = this.metadataStore().getPluginInfo(getPluginId());
if (pluginInfo == null
|| pluginInfo.getAuthConfigSettings() == null
|| pluginInfo.getAuthConfigSettings().getConfiguration(key) == null) {
return false;
}
return pluginInfo.getAuthConfigSettings().getConfiguration(key).isSecure();
} | @Test
public void postConstruct_shouldEncryptSecureConfigurations() throws Exception {
PluggableInstanceSettings profileSettings = new PluggableInstanceSettings(List.of(new PluginConfiguration("password", new Metadata(true, true))));
AuthorizationPluginInfo pluginInfo = new AuthorizationPluginInfo(pluginDescriptor("plugin_id"), profileSettings, null, null, null);
store.setPluginInfo(pluginInfo);
SecurityAuthConfig authConfig = new SecurityAuthConfig("id", "plugin_id", new ConfigurationProperty(new ConfigurationKey("password"), new ConfigurationValue("pass")));
authConfig.encryptSecureConfigurations();
assertThat(authConfig.size(), is(1));
assertTrue(authConfig.first().isSecure());
} |
public static void trim(String[] strs) {
if (null == strs) {
return;
}
String str;
for (int i = 0; i < strs.length; i++) {
str = strs[i];
if (null != str) {
strs[i] = trim(str);
}
}
} | @Test
public void trimTest() {
final String blank = " 哈哈 ";
final String trim = StrUtil.trim(blank);
assertEquals("哈哈", trim);
} |
public static RestSettingBuilder get(final String id) {
return get(eq(checkId(id)));
} | @Test
public void should_get_resource_by_id() throws Exception {
Plain resource1 = new Plain();
resource1.code = 1;
resource1.message = "hello";
Plain resource2 = new Plain();
resource2.code = 2;
resource2.message = "world";
server.resource("targets",
get("1").response(json(resource1)),
get("2").response(json(resource2))
);
running(server, () -> {
Plain response1 = getResource("/targets/1");
assertThat(response1.code, is(1));
assertThat(response1.message, is("hello"));
Plain response2 = getResource("/targets/2");
assertThat(response2.code, is(2));
assertThat(response2.message, is("world"));
});
} |
@VisibleForTesting
void validateParentMenu(Long parentId, Long childId) {
if (parentId == null || ID_ROOT.equals(parentId)) {
return;
}
// 不能设置自己为父菜单
if (parentId.equals(childId)) {
throw exception(MENU_PARENT_ERROR);
}
MenuDO menu = menuMapper.selectById(parentId);
// 父菜单不存在
if (menu == null) {
throw exception(MENU_PARENT_NOT_EXISTS);
}
// 父菜单必须是目录或者菜单类型
if (!MenuTypeEnum.DIR.getType().equals(menu.getType())
&& !MenuTypeEnum.MENU.getType().equals(menu.getType())) {
throw exception(MENU_PARENT_NOT_DIR_OR_MENU);
}
} | @Test
public void testValidateParentMenu_parentNotExist() {
// 调用,并断言异常
assertServiceException(() -> menuService.validateParentMenu(randomLongId(), null),
MENU_PARENT_NOT_EXISTS);
} |
public static void zipDirectory(File sourceDirectory, File zipFile) throws IOException {
zipDirectory(sourceDirectory, zipFile, false);
} | @Test
public void testEmptySubdirectoryHasZipEntry() throws Exception {
File zipDir = new File(tmpDir, "zip");
File subDirEmpty = new File(zipDir, "subDirEmpty");
assertTrue(subDirEmpty.mkdirs());
ZipFiles.zipDirectory(tmpDir, zipFile);
assertZipOnlyContains("zip/subDirEmpty/");
} |
public static List<ComponentDto> sortComponents(List<ComponentDto> components, ComponentTreeRequest wsRequest, List<MetricDto> metrics,
Table<String, MetricDto, ComponentTreeData.Measure> measuresByComponentUuidAndMetric) {
List<String> sortParameters = wsRequest.getSort();
if (sortParameters == null || sortParameters.isEmpty()) {
return components;
}
boolean isAscending = wsRequest.getAsc();
Map<String, Ordering<ComponentDto>> orderingsBySortField = ImmutableMap.<String, Ordering<ComponentDto>>builder()
.put(NAME_SORT, componentNameOrdering(isAscending))
.put(QUALIFIER_SORT, componentQualifierOrdering(isAscending))
.put(PATH_SORT, componentPathOrdering(isAscending))
.put(METRIC_SORT, metricValueOrdering(wsRequest, metrics, measuresByComponentUuidAndMetric))
.put(METRIC_PERIOD_SORT, metricPeriodOrdering(wsRequest, metrics, measuresByComponentUuidAndMetric))
.build();
String firstSortParameter = sortParameters.get(0);
Ordering<ComponentDto> primaryOrdering = orderingsBySortField.get(firstSortParameter);
if (sortParameters.size() > 1) {
for (int i = 1; i < sortParameters.size(); i++) {
String secondarySortParameter = sortParameters.get(i);
Ordering<ComponentDto> secondaryOrdering = orderingsBySortField.get(secondarySortParameter);
primaryOrdering = primaryOrdering.compound(secondaryOrdering);
}
}
primaryOrdering = primaryOrdering.compound(componentNameOrdering(true));
return primaryOrdering.immutableSortedCopy(components);
} | @Test
void sort_by_numerical_metric_period_5_key() {
components.add(newComponentWithoutSnapshotId("name-without-measure", "qualifier-without-measure", "path-without-measure"));
ComponentTreeRequest wsRequest = newRequest(singletonList(METRIC_SORT), false, NUM_METRIC_KEY).setMetricPeriodSort(5);
List<ComponentDto> result = sortComponents(wsRequest);
assertThat(result).extracting("path")
.containsExactly("path-9", "path-8", "path-7", "path-6", "path-5", "path-4", "path-3", "path-2", "path-1", "path-without-measure");
} |
static DeduplicationResult ensureSingleProducer(
QueryablePipeline pipeline,
Collection<ExecutableStage> stages,
Collection<PipelineNode.PTransformNode> unfusedTransforms) {
RunnerApi.Components.Builder unzippedComponents = pipeline.getComponents().toBuilder();
Multimap<PipelineNode.PCollectionNode, StageOrTransform> pcollectionProducers =
getProducers(pipeline, stages, unfusedTransforms);
Multimap<StageOrTransform, PipelineNode.PCollectionNode> requiresNewOutput =
HashMultimap.create();
// Create a synthetic PCollection for each of these nodes. The transforms in the runner
// portion of the graph that creates them should be replaced in the result components. The
// ExecutableStage must also be rewritten to have updated outputs and transforms.
for (Map.Entry<PipelineNode.PCollectionNode, Collection<StageOrTransform>> collectionProducer :
pcollectionProducers.asMap().entrySet()) {
if (collectionProducer.getValue().size() > 1) {
for (StageOrTransform producer : collectionProducer.getValue()) {
requiresNewOutput.put(producer, collectionProducer.getKey());
}
}
}
Map<ExecutableStage, ExecutableStage> updatedStages = new LinkedHashMap<>();
Map<String, PipelineNode.PTransformNode> updatedTransforms = new LinkedHashMap<>();
Multimap<String, PipelineNode.PCollectionNode> originalToPartial = HashMultimap.create();
for (Map.Entry<StageOrTransform, Collection<PipelineNode.PCollectionNode>>
deduplicationTargets : requiresNewOutput.asMap().entrySet()) {
if (deduplicationTargets.getKey().getStage() != null) {
StageDeduplication deduplication =
deduplicatePCollections(
deduplicationTargets.getKey().getStage(),
deduplicationTargets.getValue(),
unzippedComponents::containsPcollections);
for (Entry<String, PipelineNode.PCollectionNode> originalToPartialReplacement :
deduplication.getOriginalToPartialPCollections().entrySet()) {
originalToPartial.put(
originalToPartialReplacement.getKey(), originalToPartialReplacement.getValue());
unzippedComponents.putPcollections(
originalToPartialReplacement.getValue().getId(),
originalToPartialReplacement.getValue().getPCollection());
}
updatedStages.put(
deduplicationTargets.getKey().getStage(), deduplication.getUpdatedStage());
} else if (deduplicationTargets.getKey().getTransform() != null) {
PTransformDeduplication deduplication =
deduplicatePCollections(
deduplicationTargets.getKey().getTransform(),
deduplicationTargets.getValue(),
unzippedComponents::containsPcollections);
for (Entry<String, PipelineNode.PCollectionNode> originalToPartialReplacement :
deduplication.getOriginalToPartialPCollections().entrySet()) {
originalToPartial.put(
originalToPartialReplacement.getKey(), originalToPartialReplacement.getValue());
unzippedComponents.putPcollections(
originalToPartialReplacement.getValue().getId(),
originalToPartialReplacement.getValue().getPCollection());
}
updatedTransforms.put(
deduplicationTargets.getKey().getTransform().getId(),
deduplication.getUpdatedTransform());
} else {
throw new IllegalStateException(
String.format(
"%s with no %s or %s",
StageOrTransform.class.getSimpleName(),
ExecutableStage.class.getSimpleName(),
PipelineNode.PTransformNode.class.getSimpleName()));
}
}
Set<PipelineNode.PTransformNode> introducedFlattens = new LinkedHashSet<>();
for (Map.Entry<String, Collection<PipelineNode.PCollectionNode>> partialFlattenTargets :
originalToPartial.asMap().entrySet()) {
String flattenId =
SyntheticComponents.uniqueId("unzipped_flatten", unzippedComponents::containsTransforms);
PTransform flattenPartialPCollections =
createFlattenOfPartials(
flattenId, partialFlattenTargets.getKey(), partialFlattenTargets.getValue());
unzippedComponents.putTransforms(flattenId, flattenPartialPCollections);
introducedFlattens.add(PipelineNode.pTransform(flattenId, flattenPartialPCollections));
}
Components components = unzippedComponents.build();
return DeduplicationResult.of(components, introducedFlattens, updatedStages, updatedTransforms);
} | @Test
public void duplicateOverStages() {
/* When multiple stages and a runner-executed transform produce a PCollection, all should be
* replaced with synthetic flattens.
* original graph:
* --> one -> .out \
* red -> .out | -> shared -> .out -> blue -> .out
* --> two -> .out /
*
* fused graph:
* --> [one -> .out -> shared ->] .out
* red -> .out | (shared.out) -> blue -> .out
* --> [two -> .out -> shared ->] .out
*
* deduplicated graph:
* --> [one -> .out -> shared ->] .out:0 \
* red -> .out | -> shared -> .out -> blue ->.out
* --> [two -> .out -> shared ->] .out:1 /
*/
PCollection redOut = PCollection.newBuilder().setUniqueName("red.out").build();
PTransform red =
PTransform.newBuilder()
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)
.build())
.putOutputs("out", redOut.getUniqueName())
.build();
PCollection oneOut = PCollection.newBuilder().setUniqueName("one.out").build();
PTransform one =
PTransform.newBuilder()
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)
.build())
.putInputs("in", redOut.getUniqueName())
.putOutputs("out", oneOut.getUniqueName())
.build();
PCollection twoOut = PCollection.newBuilder().setUniqueName("two.out").build();
PTransform two =
PTransform.newBuilder()
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)
.build())
.putInputs("in", redOut.getUniqueName())
.putOutputs("out", twoOut.getUniqueName())
.build();
PCollection sharedOut = PCollection.newBuilder().setUniqueName("shared.out").build();
PTransform shared =
PTransform.newBuilder()
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)
.build())
.putInputs("one", oneOut.getUniqueName())
.putInputs("two", twoOut.getUniqueName())
.putOutputs("shared", sharedOut.getUniqueName())
.build();
PCollection blueOut = PCollection.newBuilder().setUniqueName("blue.out").build();
PTransform blue =
PTransform.newBuilder()
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)
.build())
.putInputs("in", sharedOut.getUniqueName())
.putOutputs("out", blueOut.getUniqueName())
.build();
RunnerApi.Components components =
Components.newBuilder()
.putTransforms("one", one)
.putPcollections(oneOut.getUniqueName(), oneOut)
.putTransforms("two", two)
.putPcollections(twoOut.getUniqueName(), twoOut)
.putTransforms("shared", shared)
.putPcollections(sharedOut.getUniqueName(), sharedOut)
.putTransforms("red", red)
.putPcollections(redOut.getUniqueName(), redOut)
.putTransforms("blue", blue)
.putPcollections(blueOut.getUniqueName(), blueOut)
.build();
ExecutableStage oneStage =
ImmutableExecutableStage.of(
components,
Environment.getDefaultInstance(),
PipelineNode.pCollection(redOut.getUniqueName(), redOut),
ImmutableList.of(),
ImmutableList.of(),
ImmutableList.of(),
ImmutableList.of(
PipelineNode.pTransform("one", one), PipelineNode.pTransform("shared", shared)),
ImmutableList.of(PipelineNode.pCollection(sharedOut.getUniqueName(), sharedOut)),
DEFAULT_WIRE_CODER_SETTINGS);
ExecutableStage twoStage =
ImmutableExecutableStage.of(
components,
Environment.getDefaultInstance(),
PipelineNode.pCollection(redOut.getUniqueName(), redOut),
ImmutableList.of(),
ImmutableList.of(),
ImmutableList.of(),
ImmutableList.of(
PipelineNode.pTransform("two", two), PipelineNode.pTransform("shared", shared)),
ImmutableList.of(PipelineNode.pCollection(sharedOut.getUniqueName(), sharedOut)),
DEFAULT_WIRE_CODER_SETTINGS);
PTransformNode redTransform = PipelineNode.pTransform("red", red);
PTransformNode blueTransform = PipelineNode.pTransform("blue", blue);
QueryablePipeline pipeline = QueryablePipeline.forPrimitivesIn(components);
OutputDeduplicator.DeduplicationResult result =
OutputDeduplicator.ensureSingleProducer(
pipeline,
ImmutableList.of(oneStage, twoStage),
ImmutableList.of(redTransform, blueTransform));
assertThat(result.getIntroducedTransforms(), hasSize(1));
PTransformNode introduced = getOnlyElement(result.getIntroducedTransforms());
assertThat(introduced.getTransform().getOutputsMap().size(), equalTo(1));
assertThat(
getOnlyElement(introduced.getTransform().getOutputsMap().values()),
equalTo(sharedOut.getUniqueName()));
assertThat(
result.getDeduplicatedComponents().getPcollectionsMap().keySet(),
hasItems(introduced.getTransform().getInputsMap().values().toArray(new String[0])));
assertThat(result.getDeduplicatedStages().keySet(), hasSize(2));
List<String> stageOutputs =
result.getDeduplicatedStages().values().stream()
.flatMap(stage -> stage.getOutputPCollections().stream().map(PCollectionNode::getId))
.collect(Collectors.toList());
assertThat(
stageOutputs,
containsInAnyOrder(introduced.getTransform().getInputsMap().values().toArray()));
assertThat(result.getDeduplicatedTransforms().keySet(), empty());
assertThat(
result.getDeduplicatedComponents().getPcollectionsMap().keySet(),
hasItems(stageOutputs.toArray(new String[0])));
assertThat(
result.getDeduplicatedComponents().getTransformsMap(),
hasEntry(introduced.getId(), introduced.getTransform()));
} |
@Override
public RateLimiter rateLimiter(final String name) {
return rateLimiter(name, getDefaultConfig());
} | @Test
public void rateLimiterNewWithNullName() throws Exception {
exception.expect(NullPointerException.class);
exception.expectMessage(NAME_MUST_NOT_BE_NULL);
RateLimiterRegistry registry = new InMemoryRateLimiterRegistry(config);
registry.rateLimiter(null);
} |
public static <T> T newInstanceOrNull(Class<? extends T> clazz, Object... params) {
Constructor<T> constructor = selectMatchingConstructor(clazz, params);
if (constructor == null) {
return null;
}
try {
return constructor.newInstance(params);
} catch (IllegalAccessException | InstantiationException | InvocationTargetException e) {
return null;
}
} | @Test
public void newInstanceOrNull_createInstanceWithNoArguments() {
ClassWithNonArgConstructor instance = InstantiationUtils.newInstanceOrNull(ClassWithNonArgConstructor.class);
assertNotNull(instance);
} |
@Override
public void handleRequest(RestRequest request, RequestContext requestContext, final Callback<RestResponse> callback)
{
if (HttpMethod.POST != HttpMethod.valueOf(request.getMethod()))
{
_log.error("POST is expected, but " + request.getMethod() + " received");
callback.onError(RestException.forError(HttpStatus.S_405_METHOD_NOT_ALLOWED.getCode(), "Invalid method"));
return;
}
// Disable server-side latency instrumentation for multiplexed requests
requestContext.putLocalAttr(TimingContextUtil.TIMINGS_DISABLED_KEY_NAME, true);
IndividualRequestMap individualRequests;
try
{
individualRequests = extractIndividualRequests(request);
if (_multiplexerSingletonFilter != null) {
individualRequests = _multiplexerSingletonFilter.filterRequests(individualRequests);
}
}
catch (RestException e)
{
_log.error("Invalid multiplexed request", e);
callback.onError(e);
return;
}
catch (Exception e)
{
_log.error("Invalid multiplexed request", e);
callback.onError(RestException.forError(HttpStatus.S_400_BAD_REQUEST.getCode(), e));
return;
}
// prepare the map of individual responses to be collected
final IndividualResponseMap individualResponses = new IndividualResponseMap(individualRequests.size());
final Map<String, HttpCookie> responseCookies = new HashMap<>();
// all tasks are Void and side effect based, that will be useful when we add streaming
Task<?> requestProcessingTask = createParallelRequestsTask(request, requestContext, individualRequests, individualResponses, responseCookies);
Task<Void> responseAggregationTask = Task.action("send aggregated response", () ->
{
RestResponse aggregatedResponse = aggregateResponses(individualResponses, responseCookies);
callback.onSuccess(aggregatedResponse);
}
);
_engine.run(requestProcessingTask.andThen(responseAggregationTask), MUX_PLAN_CLASS);
} | @Test(dataProvider = "multiplexerConfigurations")
public void testRequestHeaderWhiteListing(MultiplexerRunMode multiplexerRunMode) throws Exception
{
// Validating request header white listing logic
// Create a mockHandler. Make it return different cookies based on the request
SynchronousRequestHandler mockHandler = new SynchronousRequestHandler() {
@Override
public RestResponse handleRequestSync(RestRequest request, RequestContext requestContext)
{
try
{
return fakeIndRestResponse(jsonBodyToByteString(fakeIndividualBody("foobar")));
}
catch (Exception e)
{
throw new RuntimeException(e);
}
}
};
// Prepare request to mux handler
FutureCallback<RestResponse> callback = new FutureCallback<>();
RequestContext requestContext = new RequestContext();
Map<String, IndividualRequest> individualRequests = ImmutableMap.of(
"0", fakeIndRequest("/request1",
ImmutableMap.of("x-I-am-a-good-Header", "headerValue"),
Collections.<String, IndividualRequest>emptyMap()),
"1", fakeIndRequest("/request2",
ImmutableMap.of("X-Malicious-Header", "evilHeader"),
Collections.<String, IndividualRequest>emptyMap()));
Set<String> headerWhiteList = new HashSet<>();
headerWhiteList.add("X-I-AM-A-GOOD-HEADER");
// Create mux handler instance
MultiplexedRequestHandlerImpl multiplexer = createMultiplexer(mockHandler, null, headerWhiteList, individualRequests.size(), multiplexerRunMode);
try
{
multiplexer.handleRequest(fakeMuxRestRequest(individualRequests), requestContext, callback);
}
catch (Exception e)
{
fail("Multiplexer should not throw exception", e);
}
RestResponse muxRestResponse = callback.get();
// Assert multiplexed request should return a 200 status code
assertEquals(muxRestResponse.getStatus(), 200, "Multiplexer should return 200");
MultiplexedResponseContent muxResponseContent = new MultiplexedResponseContent(DataMapConverter.bytesToDataMap(muxRestResponse.getHeaders(), muxRestResponse.getEntity()));
assertEquals(muxResponseContent.getResponses().get("0").getStatus().intValue(), 200, "Request with whitelisted request header should complete successfully");
assertEquals(muxResponseContent.getResponses().get("1").getStatus().intValue(), 400, "Request with non-whitelisted request header should receive a 400 bad request error");
} |
public double getLongitudeSpan() {
return this.maxLongitude - this.minLongitude;
} | @Test
public void getLongitudeSpanTest() {
BoundingBox boundingBox = new BoundingBox(MIN_LATITUDE, MIN_LONGITUDE, MAX_LATITUDE, MAX_LONGITUDE);
Assert.assertEquals(MAX_LONGITUDE - MIN_LONGITUDE, boundingBox.getLongitudeSpan(), 0);
} |
public void invalidateAll(UUID callerUuid) {
for (WaitSetEntry entry : queue) {
if (!entry.isValid()) {
continue;
}
Operation op = entry.getOperation();
if (callerUuid.equals(op.getCallerUuid())) {
entry.setValid(false);
}
}
} | @Test
public void invalidateAll() {
WaitSet waitSet = newWaitSet();
UUID uuid = UUID.randomUUID();
UUID anotherUuid = UUID.randomUUID();
BlockedOperation op1 = newBlockingOperationWithCallerUuid(uuid);
waitSet.park(op1);
BlockedOperation op2 = newBlockingOperationWithCallerUuid(anotherUuid);
waitSet.park(op2);
BlockedOperation op3 = newBlockingOperationWithCallerUuid(uuid);
waitSet.park(op3);
waitSet.invalidateAll(uuid);
assertValid(waitSet, op1, false);
assertValid(waitSet, op2, true);
assertValid(waitSet, op3, false);
} |
public Point lastPoint() {
return points.getLast();
} | @Test
public void testLastPoint() {
Point<?> firstPoint = Point.builder().time(EPOCH.minusSeconds(5)).latLong(0.0, 0.0).build();
Point<?> secondPoint = Point.builder().time(EPOCH).latLong(0.0, 0.0).build();
TrackUnderConstruction tip = new TrackUnderConstruction(firstPoint);
tip.addPoint(secondPoint);
//the "last point" should be the oldest point added
assertEquals(EPOCH, tip.timeOfLatestPoint());
} |
public static String revertValue(Object cleanValue) {
if (cleanValue == null) {
return "null";
}
Class<?> clazz = cleanValue.getClass();
if (clazz.isAssignableFrom(String.class)) {
return String.valueOf(cleanValue);
} else if (clazz.isAssignableFrom(BigDecimal.class)) {
return String.valueOf(cleanValue);
} else if (clazz.isAssignableFrom(BigInteger.class)) {
return String.valueOf(cleanValue);
} else if (clazz.isAssignableFrom(Boolean.class) || clazz.isAssignableFrom(boolean.class)) {
return Boolean.toString((Boolean) cleanValue);
} else if (clazz.isAssignableFrom(Byte.class) || clazz.isAssignableFrom(byte.class)) {
return String.valueOf(cleanValue);
} else if (clazz.isAssignableFrom(Character.class) || clazz.isAssignableFrom(char.class)) {
return String.valueOf(cleanValue);
} else if (clazz.isAssignableFrom(Double.class) || clazz.isAssignableFrom(double.class)) {
return revertDouble((Double) cleanValue);
} else if (clazz.isAssignableFrom(Float.class) || clazz.isAssignableFrom(float.class)) {
return cleanValue + "f";
} else if (clazz.isAssignableFrom(Integer.class) || clazz.isAssignableFrom(int.class)) {
return Integer.toString((Integer) cleanValue);
} else if (clazz.isAssignableFrom(LocalDate.class)) {
LocalDate localDate = (LocalDate) cleanValue;
return String.format("%04d-%02d-%02d", localDate.getYear(), localDate.getMonthValue(), localDate.getDayOfMonth());
} else if (clazz.isAssignableFrom(LocalDateTime.class)) {
return formatLocalDateTime((LocalDateTime) cleanValue);
} else if (clazz.isAssignableFrom(LocalTime.class)) {
return formatLocalTime((LocalTime) cleanValue);
}
else if (clazz.isAssignableFrom(Long.class) || clazz.isAssignableFrom(long.class)) {
return Long.toString((Long) cleanValue);
} else if (clazz.isAssignableFrom(Short.class) || clazz.isAssignableFrom(short.class)) {
return String.valueOf(cleanValue);
} else if (Enum.class.isAssignableFrom(clazz)) {
return String.valueOf(cleanValue);
} else {
return String.valueOf(cleanValue);
}
} | @Test
public void revertValue_manyCases() {
assertThat(revertValue("Test")).isEqualTo("Test");
assertThat(revertValue(BigDecimal.valueOf(10000.83))).isEqualTo("10000.83");
assertThat(revertValue(BigDecimal.valueOf(10000))).isEqualTo("10000");
assertThat(revertValue(BigInteger.valueOf(10000))).isEqualTo("10000");
assertThat(revertValue(Boolean.FALSE)).isEqualTo("false");
assertThat(revertValue(Boolean.TRUE)).isEqualTo("true");
assertThat(revertValue(false)).isEqualTo("false");
assertThat(revertValue(true)).isEqualTo("true");
assertThat(revertValue(1)).isEqualTo("1");
assertThat(revertValue(new Integer(1))).isEqualTo("1");
assertThat(revertValue(1L)).isEqualTo("1");
assertThat(revertValue(new Long(1))).isEqualTo("1");
assertThat(revertValue(1.1d)).isEqualTo("1.1d");
assertThat(revertValue(new Double(1.1))).isEqualTo("1.1d");
assertThat(revertValue(Double.NaN)).isEqualTo("NaN");
assertThat(revertValue(Double.POSITIVE_INFINITY)).isEqualTo("Infinity");
assertThat(revertValue(Double.NEGATIVE_INFINITY)).isEqualTo("-Infinity");
assertThat(revertValue(1.1f)).isEqualTo("1.1f");
assertThat(revertValue(new Float(1.1))).isEqualTo("1.1f");
assertThat(revertValue('a')).isEqualTo("a");
assertThat(revertValue(new Character('a'))).isEqualTo("a");
assertThat(revertValue((short) 1)).isEqualTo("1");
assertThat(revertValue("0".getBytes()[0])).isEqualTo(String.valueOf("0".getBytes()[0]));
assertThat(revertValue(new Byte("0".getBytes()[0]))).isEqualTo(String.valueOf("0".getBytes()[0]));
assertThat(revertValue(null)).isEqualTo("null");
assertThat(revertValue(LocalDate.of(2018, 10, 20))).isEqualTo("2018-10-20");
assertThat(revertValue(LocalDateTime.of(2018, 10, 20, 2, 13))).isEqualTo("2018-10-20T02:13:00");
assertThat(revertValue(LocalDateTime.of(2018, 10, 20, 2, 13, 3))).isEqualTo("2018-10-20T02:13:03");
assertThat(revertValue(LocalDateTime.of(2018, 10, 20, 2, 13, 3, 9999))).isEqualTo("2018-10-20T02:13:03.000009999");
assertThat(revertValue(LocalTime.of(2, 13))).isEqualTo("02:13:00");
assertThat(revertValue(LocalTime.of(2, 13, 3))).isEqualTo("02:13:03");
assertThat(revertValue(LocalTime.of(2, 13, 3, 9999))).isEqualTo("02:13:03.000009999");
assertThat(revertValue(EnumTest.FIRST)).isEqualTo("FIRST");
} |
public static boolean isIPv4(String addr) {
return InetAddressValidator.isIPv4Address(addr);
} | @Test
void testIsIPv4() {
assertTrue(InternetAddressUtil.isIPv4("127.0.0.1"));
assertFalse(InternetAddressUtil.isIPv4("[::1]"));
assertFalse(InternetAddressUtil.isIPv4("asdfasf"));
assertFalse(InternetAddressUtil.isIPv4("ffgertert"));
assertFalse(InternetAddressUtil.isIPv4("127.100.19"));
} |
@Override
@Transactional(rollbackFor = Exception.class)
public void updateFileConfigMaster(Long id) {
// 校验存在
validateFileConfigExists(id);
// 更新其它为非 master
fileConfigMapper.updateBatch(new FileConfigDO().setMaster(false));
// 更新
fileConfigMapper.updateById(new FileConfigDO().setId(id).setMaster(true));
// 清空缓存
clearCache(null, true);
} | @Test
public void testUpdateFileConfigMaster_notExists() {
// 调用, 并断言异常
assertServiceException(() -> fileConfigService.updateFileConfigMaster(randomLongId()), FILE_CONFIG_NOT_EXISTS);
} |
@Override
public <U> StateFuture<Collection<U>> onNext(Function<T, StateFuture<? extends U>> iterating) {
// Public interface implementation, this is on task thread.
// We perform the user code on cache, and create a new request and chain with it.
if (isEmpty()) {
return StateFutureUtils.completedFuture(Collections.emptyList());
}
Collection<StateFuture<? extends U>> resultFutures = new ArrayList<>();
for (T item : cache) {
resultFutures.add(iterating.apply(item));
}
if (hasNext()) {
return StateFutureUtils.combineAll(resultFutures)
.thenCombine(
asyncNextLoad().thenCompose(itr -> itr.onNext(iterating)),
(a, b) -> {
// TODO optimization: Avoid results copy.
Collection<U> result = new ArrayList<>(a.size() + b.size());
result.addAll(a);
result.addAll(b);
return result;
});
} else {
return StateFutureUtils.combineAll(resultFutures);
}
} | @Test
@SuppressWarnings({"unchecked", "rawtypes"})
public void testPartialLoadingWithReturnValue() {
TestIteratorStateExecutor stateExecutor = new TestIteratorStateExecutor(100, 3);
AsyncExecutionController aec =
new AsyncExecutionController(
new SyncMailboxExecutor(), (a, b) -> {}, stateExecutor, 1, 100, 1000, 1);
stateExecutor.bindAec(aec);
RecordContext<String> recordContext = aec.buildContext("1", "key1");
aec.setCurrentContext(recordContext);
AtomicInteger processed = new AtomicInteger();
aec.handleRequest(null, StateRequestType.MAP_ITER, null)
.thenAccept(
(iter) -> {
assertThat(iter).isInstanceOf(StateIterator.class);
((StateIterator<Integer>) iter)
.onNext(
(item) -> {
assertThat(item)
.isEqualTo(processed.getAndIncrement());
return StateFutureUtils.completedFuture(
String.valueOf(item));
})
.thenAccept(
(strings) -> {
assertThat(processed.get()).isEqualTo(100);
int validate = 0;
for (String item : strings) {
assertThat(item)
.isEqualTo(String.valueOf(validate++));
}
});
});
aec.drainInflightRecords(0);
} |
@Override
public boolean syncData(DistroData data, String targetServer) {
if (isNoExistTarget(targetServer)) {
return true;
}
DistroDataRequest request = new DistroDataRequest(data, data.getType());
Member member = memberManager.find(targetServer);
if (checkTargetServerStatusUnhealthy(member)) {
Loggers.DISTRO
.warn("[DISTRO] Cancel distro sync caused by target server {} unhealthy, key: {}", targetServer,
data.getDistroKey());
return false;
}
try {
Response response = clusterRpcClientProxy.sendRequest(member, request);
return checkResponse(response);
} catch (NacosException e) {
Loggers.DISTRO.error("[DISTRO-FAILED] Sync distro data failed! key: {}", data.getDistroKey(), e);
}
return false;
} | @Test
void testSyncDataWithCallbackFailure() throws NacosException {
when(memberManager.hasMember(member.getAddress())).thenReturn(true);
when(memberManager.find(member.getAddress())).thenReturn(member);
member.setState(NodeState.UP);
response.setErrorInfo(ResponseCode.FAIL.getCode(), "TEST");
when(clusterRpcClientProxy.isRunning(member)).thenReturn(true);
transportAgent.syncData(new DistroData(), member.getAddress(), distroCallback);
verify(distroCallback).onFailed(null);
} |
@ApiOperation(value = "Create Or Update Edge (saveEdge)",
notes = "Create or update the Edge. When creating edge, platform generates Edge Id as " + UUID_WIKI_LINK +
"The newly created edge id will be present in the response. " +
"Specify existing Edge id to update the edge. " +
"Referencing non-existing Edge Id will cause 'Not Found' error." +
"\n\nEdge name is unique in the scope of tenant. Use unique identifiers like MAC or IMEI for the edge names and non-unique 'label' field for user-friendly visualization purposes." +
"Remove 'id', 'tenantId' and optionally 'customerId' from the request body example (below) to create new Edge entity. " +
TENANT_AUTHORITY_PARAGRAPH)
@PreAuthorize("hasAuthority('TENANT_ADMIN')")
@PostMapping(value = "/edge")
public Edge saveEdge(@Parameter(description = "A JSON value representing the edge.", required = true)
@RequestBody Edge edge) throws Exception {
TenantId tenantId = getTenantId();
edge.setTenantId(tenantId);
boolean created = edge.getId() == null;
RuleChain edgeTemplateRootRuleChain = null;
if (created) {
edgeTemplateRootRuleChain = ruleChainService.getEdgeTemplateRootRuleChain(tenantId);
if (edgeTemplateRootRuleChain == null) {
throw new DataValidationException("Root edge rule chain is not available!");
}
}
Operation operation = created ? Operation.CREATE : Operation.WRITE;
accessControlService.checkPermission(getCurrentUser(), Resource.EDGE, operation, edge.getId(), edge);
return tbEdgeService.save(edge, edgeTemplateRootRuleChain, getCurrentUser());
} | @Test
public void testSaveEdge() throws Exception {
Edge edge = constructEdge("My edge", "default");
Mockito.reset(tbClusterService, auditLogService);
Edge savedEdge = doPost("/api/edge", edge, Edge.class);
Assert.assertNotNull(savedEdge);
Assert.assertNotNull(savedEdge.getId());
Assert.assertTrue(savedEdge.getCreatedTime() > 0);
Assert.assertEquals(tenantId, savedEdge.getTenantId());
Assert.assertNotNull(savedEdge.getCustomerId());
Assert.assertEquals(NULL_UUID, savedEdge.getCustomerId().getId());
Assert.assertEquals(edge.getName(), savedEdge.getName());
testNotifyEntityBroadcastEntityStateChangeEventManyTimeMsgToEdgeServiceNever(savedEdge, savedEdge.getId(), savedEdge.getId(),
tenantId, tenantAdminUser.getCustomerId(), tenantAdminUser.getId(), tenantAdminUser.getEmail(),
ActionType.ADDED, 2);
savedEdge.setName("My new edge");
doPost("/api/edge", savedEdge, Edge.class);
Edge foundEdge = doGet("/api/edge/" + savedEdge.getId().getId().toString(), Edge.class);
Assert.assertEquals(foundEdge.getName(), savedEdge.getName());
testNotifyEntityBroadcastEntityStateChangeEventManyTimeMsgToEdgeServiceNever(foundEdge, foundEdge.getId(), foundEdge.getId(),
tenantId, tenantAdminUser.getCustomerId(), tenantAdminUser.getId(), tenantAdminUser.getEmail(),
ActionType.UPDATED, 1);
} |
@Override
public void close() {
loggerFactory.getLoggersWithPrefix(queryId.toString()).forEach(ProcessingLogger::close);
sharedKafkaStreamsRuntime.stop(queryId, true);
scalablePushRegistry.ifPresent(ScalablePushRegistry::close);
listener.onClose(this);
} | @Test
public void shouldCloseProcessingLoggers() {
// Given:
final ProcessingLogger processingLogger1 = mock(ProcessingLogger.class);
final ProcessingLogger processingLogger2 = mock(ProcessingLogger.class);
when(loggerFactory.getLoggersWithPrefix(QUERY_ID.toString())).thenReturn(Arrays.asList(processingLogger1, processingLogger2));
// When:
query.close();
// Then:
verify(processingLogger1).close();
verify(processingLogger2).close();
} |
@Override
public Ring<T> createRing(Map<T, Integer> pointsMap) {
return _ringFactory.createRing(pointsMap);
} | @Test(groups = { "small", "back-end" })
public void testFactoryWithDistributionBasedAndRegix() {
RingFactory<String> factory = new DelegatingRingFactory<>(configBuilder("distributionBased", "uriRegex"));
Ring<String> ring = factory.createRing(buildPointsMap(10));
assertTrue(ring instanceof MPConsistentHashRing);
} |
@VisibleForTesting
static Instant getCreationTime(String configuredCreationTime, ProjectProperties projectProperties)
throws DateTimeParseException, InvalidCreationTimeException {
try {
switch (configuredCreationTime) {
case "EPOCH":
return Instant.EPOCH;
case "USE_CURRENT_TIMESTAMP":
projectProperties.log(
LogEvent.debug(
"Setting image creation time to current time; your image may not be reproducible."));
return Instant.now();
default:
DateTimeFormatter formatter =
new DateTimeFormatterBuilder()
.append(DateTimeFormatter.ISO_DATE_TIME) // parses isoStrict
// add ability to parse with no ":" in tz
.optionalStart()
.appendOffset("+HHmm", "+0000")
.optionalEnd()
.toFormatter();
return formatter.parse(configuredCreationTime, Instant::from);
}
} catch (DateTimeParseException ex) {
throw new InvalidCreationTimeException(configuredCreationTime, configuredCreationTime, ex);
}
} | @Test
public void testGetCreationTime_isoDateTimeValueRequiresTimeZone() {
// getCreationTime should fail if timezone not specified.
// this is the expected behavior, not specifically designed like this for any reason, feel
// free to change this behavior and update the test
assertThrows(
InvalidCreationTimeException.class,
() ->
PluginConfigurationProcessor.getCreationTime("2011-12-03T01:15:30", projectProperties));
} |
@Override
public KsMaterializedQueryResult<Row> get(
final GenericKey key,
final int partition,
final Optional<Position> position
) {
try {
final KeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = KeyQuery.withKey(key);
StateQueryRequest<ValueAndTimestamp<GenericRow>>
request = inStore(stateStore.getStateStoreName())
.withQuery(query)
.withPartitions(ImmutableSet.of(partition));
if (position.isPresent()) {
request = request.withPositionBound(PositionBound.at(position.get()));
}
final StateQueryResult<ValueAndTimestamp<GenericRow>>
result = stateStore.getKafkaStreams().query(request);
final QueryResult<ValueAndTimestamp<GenericRow>> queryResult =
result.getPartitionResults().get(partition);
// Some of these failures are retriable, and in the future, we may want to retry
// locally before throwing.
if (queryResult.isFailure()) {
throw failedQueryException(queryResult);
} else if (queryResult.getResult() == null) {
return KsMaterializedQueryResult.rowIteratorWithPosition(
Collections.emptyIterator(), queryResult.getPosition());
} else {
final ValueAndTimestamp<GenericRow> row = queryResult.getResult();
return KsMaterializedQueryResult.rowIteratorWithPosition(
ImmutableList.of(Row.of(stateStore.schema(), key, row.value(), row.timestamp()))
.iterator(),
queryResult.getPosition());
}
} catch (final NotUpToBoundException | MaterializationException e) {
throw e;
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
} | @Test
public void shouldRangeQueryWithCorrectParams_upperBound() {
// Given:
when(kafkaStreams.query(any())).thenReturn(getIteratorResult());
// When:
table.get(PARTITION, null, A_KEY2);
// Then:
verify(kafkaStreams).query(queryTypeCaptor.capture());
StateQueryRequest request = queryTypeCaptor.getValue();
assertThat(request.getQuery(), instanceOf(RangeQuery.class));
RangeQuery rangeQuery = (RangeQuery)request.getQuery();
assertThat(rangeQuery.getLowerBound(), is(Optional.empty()));
assertThat(rangeQuery.getUpperBound(), is(Optional.of(A_KEY2)));
} |
public QueryMetadata parse(String queryString) {
if (Strings.isNullOrEmpty(queryString)) {
return QueryMetadata.empty();
}
Map<String, List<SubstringMultilinePosition>> positions = new LinkedHashMap<>();
final String[] lines = queryString.split("\n");
for (int line = 0; line < lines.length; line++) {
final String currentLine = lines[line];
final Matcher matcher = PLACEHOLDER_PATTERN.matcher(currentLine);
while (matcher.find()) {
final String name = matcher.group(1);
if (!positions.containsKey(name)) {
positions.put(name, new ArrayList<>());
}
positions.get(name).add(SubstringMultilinePosition.create(line + 1, matcher.start(), matcher.end()));
}
}
final ImmutableSet<QueryParam> params = positions.entrySet().stream()
.map(entry -> QueryParam.create(entry.getKey(), entry.getValue()))
.collect(ImmutableSet.toImmutableSet());
return QueryMetadata.builder()
.usedParameters(params)
.build();
} | @Test
void testParamPositionsMultiline() {
final QueryMetadata metadata = queryStringParser.parse("foo:$bar$ AND\nlorem:$bar$ OR ipsum:$bar$");
assertThat(metadata.usedParameters().size()).isEqualTo(1);
final QueryParam param = metadata.usedParameters().iterator().next();
assertThat(param.name()).isEqualTo("bar");
assertThat(param.positions()).containsExactly(
SubstringMultilinePosition.create(1, 4, 9),
SubstringMultilinePosition.create(2, 6, 11),
SubstringMultilinePosition.create(2, 21, 26)
);
} |
@Override
public Collection<Subscriber> getFuzzySubscribers(String namespaceId, String serviceName) {
Collection<Subscriber> result = new LinkedList<>(
subscriberServiceLocal.getFuzzySubscribers(namespaceId, serviceName));
if (memberManager.getServerList().size() > 1) {
getSubscribersFromRemotes(namespaceId, serviceName, result);
}
return result;
} | @Test
void testGetFuzzySubscribersByStringWithLocal() {
Collection<Subscriber> actual = aggregation.getFuzzySubscribers(namespace, serviceName);
assertEquals(1, actual.size());
assertEquals("local", actual.iterator().next().getAddrStr());
} |
public List<String> getServices() {
try {
return Optional.of(nacosServiceDiscovery.getServices()).map(services -> {
ServiceCache.setServiceIds(services);
return services;
}).get();
} catch (NacosException e) {
LOGGER.log(Level.SEVERE, String.format(Locale.ENGLISH, "getServices failed,"
+ "isFailureToleranceEnabled={%s}", nacosRegisterConfig.isFailureToleranceEnabled()), e);
return nacosRegisterConfig.isFailureToleranceEnabled() ? ServiceCache.getServiceIds()
: Collections.emptyList();
}
} | @Test
public void testGetServices() throws NacosException {
mockNamingService();
Assert.assertNotNull(nacosClient.getServices());
} |
static List<String> locateScripts(ArgsMap argsMap) {
String script = argsMap.get(SCRIPT);
String scriptDir = argsMap.get(SCRIPT_DIR);
List<String> scripts = new ArrayList<>();
if (script != null) {
StringTokenizer tokenizer = new StringTokenizer(script, ":");
if (log.isDebugEnabled()) {
log.debug(
((tokenizer.countTokens() == 1) ? "initial script is {}" : "initial scripts are {}"),
script);
}
while (tokenizer.hasMoreTokens()) {
scripts.add(tokenizer.nextToken());
}
}
if (scriptDir != null) {
File dir = new File(scriptDir);
if (dir.isDirectory()) {
if (log.isDebugEnabled()) {
log.debug("found scriptdir: {}", dir.getAbsolutePath());
}
File[] files = dir.listFiles();
if (files != null) {
for (File file : files) {
scripts.add(file.getAbsolutePath());
}
}
}
}
return scripts;
} | @Test
void locateScriptsMulti() {
ArgsMap argsMap = new ArgsMap(new String[] {"script=script1:script2"});
List<String> scripts = Main.locateScripts(argsMap);
assertEquals(2, scripts.size());
} |
protected RepositoryMeta createPurRepositoryMetaRepositoryMeta( String url ) {
RepositoryMeta purRepositoryMeta = null;
try {
Class<?> purRepositoryLocationClass = purPluginClassLoader.loadClass( "org.pentaho.di.repository.pur"
+ ".PurRepositoryLocation" );
Constructor<?> purRepositoryLocationConstructor = purRepositoryLocationClass.getConstructor( String.class );
Object purRepositoryLocation = purRepositoryLocationConstructor.newInstance( url );
Class<?> purRepositoryMetaClass = purPluginClassLoader.loadClass( "org.pentaho.di.repository.pur.PurRepositoryMeta" );
purRepositoryMeta = (RepositoryMeta) purRepositoryMetaClass.newInstance();
Method setRepositoryLocationMethod = purRepositoryMetaClass.getMethod( "setRepositoryLocation", purRepositoryLocationClass );
setRepositoryLocationMethod.invoke( purRepositoryMeta, purRepositoryLocation );
} catch ( Exception e ) {
logger.error( "Unable to instantiate repository meta!" );
}
return purRepositoryMeta;
} | @Test
public void createPurRepositoryMetaRepositoryMetaTest() {
RepositoryMeta repositoryMeta = null;
try {
Mockito.<Class<?>>when( mockClassLoader.loadClass( "org.pentaho.di.repository.pur"
+ ".PurRepositoryLocation" ) ).thenReturn( Class.forName( "org.pentaho.di.repository.pur"
+ ".PurRepositoryLocation" ) );
Mockito.<Class<?>>when( mockClassLoader.loadClass( "org.pentaho.di.repository.pur.PurRepositoryMeta" ) )
.thenReturn( Class.forName( "org.pentaho.di.repository.pur.PurRepositoryMeta" ) );
} catch ( ClassNotFoundException e ) {
e.printStackTrace();
}
repositoryMeta = proxy.createPurRepositoryMetaRepositoryMeta( "SomeUrl" );
assertNotNull( repositoryMeta );
} |
protected boolean isLoadBalancerSheddingBundlesWithPoliciesEnabled(LoadManagerContext context,
NamespaceBundle namespaceBundle) {
if (isolationPoliciesHelper != null
&& isolationPoliciesHelper.hasIsolationPolicy(namespaceBundle.getNamespaceObject())) {
return context.brokerConfiguration().isLoadBalancerSheddingBundlesWithPoliciesEnabled();
}
if (antiAffinityGroupPolicyHelper != null
&& antiAffinityGroupPolicyHelper.hasAntiAffinityGroupPolicy(namespaceBundle.toString())) {
return context.brokerConfiguration().isLoadBalancerSheddingBundlesWithPoliciesEnabled();
}
return true;
} | @Test
public void testIsLoadBalancerSheddingBundlesWithPoliciesEnabled() {
var counter = new UnloadCounter();
TransferShedder transferShedder = new TransferShedder(pulsar, counter, new ArrayList<>(),
isolationPoliciesHelper, antiAffinityGroupPolicyHelper);
var ctx = setupContext();
NamespaceBundle namespaceBundle = mock(NamespaceBundle.class);
doReturn("bundle").when(namespaceBundle).toString();
boolean[][] expects = {
{true, true, true, true},
{true, true, false, false},
{true, false, true, true},
{true, false, false, false},
{false, true, true, true},
{false, true, false, false},
{false, false, true, true},
{false, false, false, true}
};
for (boolean[] expect : expects) {
doReturn(expect[0]).when(isolationPoliciesHelper).hasIsolationPolicy(any());
doReturn(expect[1]).when(antiAffinityGroupPolicyHelper).hasAntiAffinityGroupPolicy(any());
ctx.brokerConfiguration().setLoadBalancerSheddingBundlesWithPoliciesEnabled(expect[2]);
assertEquals(transferShedder.isLoadBalancerSheddingBundlesWithPoliciesEnabled(ctx, namespaceBundle),
expect[3]);
}
} |
public static void pauseConsumers(final Queue<Consumer<byte[]>> consumers) {
consumers.forEach(Consumer::pause);
} | @Test
public void pauseConsumers() {
Consumer<byte[]> consumer = mock(Consumer.class);
Queue<Consumer<byte[]>> consumers = new ConcurrentLinkedQueue<>();
consumers.add(consumer);
PulsarUtils.pauseConsumers(consumers);
verify(consumer).pause();
} |
public List<String> getShowInfo() {
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
Table tbl = null;
if (db != null) {
Locker locker = new Locker();
locker.lockDatabase(db, LockType.READ);
try {
tbl = db.getTable(tableId);
} finally {
locker.unLockDatabase(db, LockType.READ);
}
}
readLock();
try {
List<String> row = Lists.newArrayList();
row.add(String.valueOf(id));
row.add(name);
row.add(TimeUtils.longToTimeString(createTimestamp));
row.add(TimeUtils.longToTimeString(pauseTimestamp));
row.add(TimeUtils.longToTimeString(endTimestamp));
row.add(db == null ? String.valueOf(dbId) : db.getFullName());
row.add(tbl == null ? String.valueOf(tableId) : tbl.getName());
if (state == JobState.RUNNING) {
row.add(substate == JobSubstate.STABLE ? state.name() : substate.name());
} else {
row.add(state.name());
}
row.add(dataSourceType.name());
row.add(String.valueOf(getSizeOfRoutineLoadTaskInfoList()));
row.add(jobPropertiesToJsonString());
row.add(dataSourcePropertiesJsonToString());
row.add(customPropertiesJsonToString());
row.add(getStatistic());
row.add(getProgress().toJsonString());
row.add(getTimestampProgress().toJsonString());
switch (state) {
case PAUSED:
row.add(pauseReason == null ? "" : pauseReason.toString());
break;
case CANCELLED:
row.add(cancelReason == null ? "" : cancelReason.toString());
break;
case RUNNING:
if (substate == JobSubstate.UNSTABLE) {
row.add(stateChangedReason == null ? "" : stateChangedReason.toString());
} else {
row.add("");
}
break;
default:
row.add("");
}
// tracking url
if (!errorLogUrls.isEmpty()) {
row.add(Joiner.on(", ").join(errorLogUrls));
row.add("select tracking_log from information_schema.load_tracking_logs where job_id=" + id);
} else {
row.add("");
row.add("");
}
row.add(otherMsg);
if (RunMode.getCurrentRunMode() == RunMode.SHARED_DATA) {
try {
Warehouse warehouse = GlobalStateMgr.getCurrentState().getWarehouseMgr().getWarehouse(warehouseId);
row.add(warehouse.getName());
} catch (Exception e) {
row.add(e.getMessage());
}
}
row.add(getSourceProgressString());
return row;
} finally {
readUnlock();
}
} | @Test
public void testGetShowInfo() throws UserException {
{
// PAUSE state
KafkaRoutineLoadJob routineLoadJob = new KafkaRoutineLoadJob();
Deencapsulation.setField(routineLoadJob, "state", RoutineLoadJob.JobState.PAUSED);
ErrorReason errorReason = new ErrorReason(InternalErrorCode.INTERNAL_ERR,
TransactionState.TxnStatusChangeReason.OFFSET_OUT_OF_RANGE.toString());
Deencapsulation.setField(routineLoadJob, "pauseReason", errorReason);
List<String> showInfo = routineLoadJob.getShowInfo();
Assert.assertEquals(true, showInfo.stream().filter(entity -> !Strings.isNullOrEmpty(entity))
.anyMatch(entity -> entity.equals(errorReason.toString())));
}
{
// Progress
KafkaRoutineLoadJob routineLoadJob = new KafkaRoutineLoadJob();
Map<Integer, Long> partitionOffsets = Maps.newHashMap();
partitionOffsets.put(Integer.valueOf(0), Long.valueOf(1234));
KafkaProgress kafkaProgress = new KafkaProgress(partitionOffsets);
Deencapsulation.setField(routineLoadJob, "progress", kafkaProgress);
Map<Integer, Long> partitionOffsetTimestamps = Maps.newHashMap();
partitionOffsetTimestamps.put(Integer.valueOf(0), Long.valueOf(1701411708410L));
KafkaProgress kafkaTimestampProgress = new KafkaProgress(partitionOffsetTimestamps);
Deencapsulation.setField(routineLoadJob, "timestampProgress", kafkaTimestampProgress);
routineLoadJob.setPartitionOffset(0, 12345);
List<String> showInfo = routineLoadJob.getShowInfo();
Assert.assertEquals("{\"0\":\"12345\"}", showInfo.get(20));
//The displayed value is the actual value - 1
Assert.assertEquals("{\"0\":\"1233\"}", showInfo.get(14));
Assert.assertEquals("{\"0\":\"1701411708409\"}", showInfo.get(15));
}
{
// UNSTABLE substate
KafkaRoutineLoadJob routineLoadJob = new KafkaRoutineLoadJob();
Map<Integer, Long> partitionOffsetTimestamps = Maps.newHashMap();
partitionOffsetTimestamps.put(Integer.valueOf(0), Long.valueOf(1701411708410L));
KafkaProgress kafkaTimestampProgress = new KafkaProgress(partitionOffsetTimestamps);
Deencapsulation.setField(routineLoadJob, "timestampProgress", kafkaTimestampProgress);
routineLoadJob.updateState(RoutineLoadJob.JobState.RUNNING, null, false);
// The job is set unstable due to the progress is too slow.
routineLoadJob.updateSubstate();
List<String> showInfo = routineLoadJob.getShowInfo();
Assert.assertEquals("UNSTABLE", showInfo.get(7));
// The lag [xxx] of partition [0] exceeds Config.routine_load_unstable_threshold_second [3600]
Assert.assertTrue(showInfo.get(16).contains(
"partition [0] exceeds Config.routine_load_unstable_threshold_second [3600]"));
partitionOffsetTimestamps.put(Integer.valueOf(0), Long.valueOf(System.currentTimeMillis()));
kafkaTimestampProgress = new KafkaProgress(partitionOffsetTimestamps);
Deencapsulation.setField(routineLoadJob, "timestampProgress", kafkaTimestampProgress);
// The job is set stable due to the progress is kept up.
routineLoadJob.updateSubstate();
showInfo = routineLoadJob.getShowInfo();
Assert.assertEquals("RUNNING", showInfo.get(7));
Assert.assertEquals("", showInfo.get(16));
// The job is set stable.
routineLoadJob.updateSubstateStable();
showInfo = routineLoadJob.getShowInfo();
Assert.assertEquals("RUNNING", showInfo.get(7));
Assert.assertEquals("", showInfo.get(16));
}
} |
@Override
public void close() {
try {
restHighLevelClient.close();
} catch (IOException e) {
throw new ElasticsearchException("Could not close ES Rest high level client", e);
}
} | @Test
public void should_rethrow_ex_when_close_client_throws() throws IOException {
doThrow(IOException.class).when(restClient).close();
assertThatThrownBy(() -> underTest.close())
.isInstanceOf(ElasticsearchException.class);
} |
@Override
public Object getValue(final int columnIndex, final Class<?> type) throws SQLException {
if (boolean.class == type) {
return resultSet.getBoolean(columnIndex);
}
if (byte.class == type) {
return resultSet.getByte(columnIndex);
}
if (short.class == type) {
return resultSet.getShort(columnIndex);
}
if (int.class == type) {
return resultSet.getInt(columnIndex);
}
if (long.class == type) {
return resultSet.getLong(columnIndex);
}
if (float.class == type) {
return resultSet.getFloat(columnIndex);
}
if (double.class == type) {
return resultSet.getDouble(columnIndex);
}
if (String.class == type) {
return resultSet.getString(columnIndex);
}
if (BigDecimal.class == type) {
return resultSet.getBigDecimal(columnIndex);
}
if (byte[].class == type) {
return resultSet.getBytes(columnIndex);
}
if (Date.class == type) {
return resultSet.getDate(columnIndex);
}
if (Time.class == type) {
return resultSet.getTime(columnIndex);
}
if (Timestamp.class == type) {
return resultSet.getTimestamp(columnIndex);
}
if (Blob.class == type) {
return resultSet.getBlob(columnIndex);
}
if (Clob.class == type) {
return resultSet.getClob(columnIndex);
}
if (Array.class == type) {
return resultSet.getArray(columnIndex);
}
return resultSet.getObject(columnIndex);
} | @Test
void assertGetValueByFloat() throws SQLException {
ResultSet resultSet = mock(ResultSet.class);
when(resultSet.getFloat(1)).thenReturn(1.0F);
assertThat(new JDBCStreamQueryResult(resultSet).getValue(1, float.class), is(1.0F));
} |
@SuppressWarnings("unchecked")
@Override
public void execute(String mapName, Predicate predicate, Collection<Integer> partitions, Result result) {
runUsingPartitionScanWithoutPaging(mapName, predicate, partitions, result);
if (predicate instanceof PagingPredicateImpl pagingPredicate) {
Map.Entry<Integer, Map.Entry> nearestAnchorEntry = pagingPredicate.getNearestAnchorEntry();
result.orderAndLimit(pagingPredicate, nearestAnchorEntry);
}
} | @Test
public void execute_fail() {
PartitionScanRunner runner = mock(PartitionScanRunner.class);
ParallelPartitionScanExecutor executor = executor(runner);
Predicate<Object, Object> predicate = Predicates.equal("attribute", 1);
QueryResult queryResult = new QueryResult(IterationType.ENTRY, null, null, Long.MAX_VALUE, false);
doThrow(new QueryException()).when(runner).run(anyString(), eq(predicate), anyInt(), isA(QueryResult.class));
List<Integer> list = asList(1, 2, 3);
assertThatThrownBy(() -> executor.execute("Map", predicate, list, queryResult))
.isInstanceOf(QueryException.class);
} |
@Override
public AuthLoginRespVO refreshToken(String refreshToken) {
OAuth2AccessTokenDO accessTokenDO = oauth2TokenService.refreshAccessToken(refreshToken, OAuth2ClientConstants.CLIENT_ID_DEFAULT);
return AuthConvert.INSTANCE.convert(accessTokenDO);
} | @Test
public void testRefreshToken() {
// 准备参数
String refreshToken = randomString();
// mock 方法
OAuth2AccessTokenDO accessTokenDO = randomPojo(OAuth2AccessTokenDO.class);
when(oauth2TokenService.refreshAccessToken(eq(refreshToken), eq("default")))
.thenReturn(accessTokenDO);
// 调用
AuthLoginRespVO loginRespVO = authService.refreshToken(refreshToken);
// 断言
assertPojoEquals(accessTokenDO, loginRespVO);
} |
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) {
return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature);
} | @Test
public void testOnWindowExpirationDisallowedParameter() throws Exception {
// Timers are not allowed in OnWindowExpiration
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("Illegal parameter type");
thrown.expectMessage("TimerParameter");
thrown.expectMessage("myTimer");
DoFnSignatures.getSignature(
new DoFn<String, String>() {
@TimerId("foo")
private final TimerSpec myTimer = TimerSpecs.timer(TimeDomain.EVENT_TIME);
@ProcessElement
public void foo() {}
@OnTimer("foo")
public void onFoo() {}
@OnWindowExpiration
public void bar(@TimerId("foo") Timer t) {}
}.getClass());
} |
@Override
protected ExecuteContext doBefore(ExecuteContext context) throws Exception {
LogUtils.printHttpRequestBeforePoint(context);
final InvokerService invokerService = PluginServiceManager.getPluginService(InvokerService.class);
final Optional<Request> rawRequest = getRequest(context);
if (!rawRequest.isPresent()) {
return context;
}
Request request = rawRequest.get();
URI uri = request.url().uri();
if (!PlugEffectWhiteBlackUtils.isHostEqualRealmName(uri.getHost())) {
return context;
}
Map<String, String> hostAndPath = RequestInterceptorUtils.recoverHostAndPath(uri.getPath());
if (!PlugEffectWhiteBlackUtils.isPlugEffect(hostAndPath.get(HttpConstants.HTTP_URI_SERVICE))) {
return context;
}
RequestInterceptorUtils.printRequestLog("OkHttp3", hostAndPath);
AtomicReference<Request> rebuildRequest = new AtomicReference<>();
rebuildRequest.set(request);
invokerService.invoke(
buildInvokerFunc(uri, hostAndPath, request, rebuildRequest, context),
buildExFunc(rebuildRequest),
hostAndPath.get(HttpConstants.HTTP_URI_SERVICE))
.ifPresent(object -> setResultOrThrow(context, object, uri.getPath()));
return context;
} | @Test
public void testRestTemplateInterceptor() throws Exception {
Optional<?> configMapOptional = ReflectUtils.getStaticFieldValue(ConfigManager.class, "CONFIG_MAP");
DiscoveryPluginConfig discoveryPluginConfig = new DiscoveryPluginConfig();
if (configMapOptional.isPresent()) {
Map<String, BaseConfig> configMap = (Map<String, BaseConfig>) configMapOptional.get();
configMap.put(DiscoveryPluginConfig.class.getAnnotation(ConfigTypeKey.class).value(),
discoveryPluginConfig);
}
OkHttpClient client = new OkHttpClient();
Request request = createRequest(url);
ExecuteContext context = ExecuteContext.forMemberMethod(client.newCall(request),
String.class.getDeclaredMethod("trim"), arguments, null,
null);
discoveryPluginConfig.setRealmName(realmName);
initStrategy(PlugEffectWhiteBlackConstants.STRATEGY_ALL, "zookeeper-provider-demo");
interceptor.doBefore(context);
Request requestNew = (Request)context.getRawMemberFieldValue("originalRequest");
Assert.assertEquals(url, requestNew.url().uri().toString());
} |
public static NacosRestTemplate getNacosRestTemplate(Logger logger) {
return getNacosRestTemplate(new DefaultHttpClientFactory(logger));
} | @Test
void testGetNacosRestTemplateWithCustomFactory() {
assertTrue(restMap.isEmpty());
HttpClientBeanHolder.getNacosRestTemplate((Logger) null);
assertEquals(1, restMap.size());
NacosRestTemplate actual = HttpClientBeanHolder.getNacosRestTemplate(mockFactory);
assertEquals(2, restMap.size());
assertEquals(mockRestTemplate, actual);
} |
public static void main(String[] args) {
var wizard = new Wizard();
var goblin = new Goblin();
goblin.printStatus();
wizard.castSpell(goblin::changeSize);
goblin.printStatus();
wizard.castSpell(goblin::changeVisibility);
goblin.printStatus();
wizard.undoLastSpell();
goblin.printStatus();
wizard.undoLastSpell();
goblin.printStatus();
wizard.redoLastSpell();
goblin.printStatus();
wizard.redoLastSpell();
goblin.printStatus();
} | @Test
void shouldExecuteApplicationWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
} |
@Override
public Dataset<Row> apply(
final JavaSparkContext jsc,
final SparkSession sparkSession,
final Dataset<Row> rowDataset,
final TypedProperties props) {
final String sqlFile = getStringWithAltKeys(props, SqlTransformerConfig.TRANSFORMER_SQL_FILE);
final FileSystem fs = HadoopFSUtils.getFs(sqlFile, jsc.hadoopConfiguration(), true);
// tmp table name doesn't like dashes
final String tmpTable = TMP_TABLE.concat(UUID.randomUUID().toString().replace("-", "_"));
LOG.info("Registering tmp table: {}", tmpTable);
rowDataset.createOrReplaceTempView(tmpTable);
try (final Scanner scanner = new Scanner(fs.open(new Path(sqlFile)), "UTF-8")) {
Dataset<Row> rows = null;
// each sql statement is separated with semicolon hence set that as delimiter.
scanner.useDelimiter(";");
LOG.info("SQL Query for transformation:");
while (scanner.hasNext()) {
String sqlStr = scanner.next();
sqlStr = sqlStr.replaceAll(SRC_PATTERN, tmpTable).trim();
if (!sqlStr.isEmpty()) {
LOG.info(sqlStr);
// overwrite the same dataset object until the last statement then return.
rows = sparkSession.sql(sqlStr);
}
}
return rows;
} catch (final IOException ioe) {
throw new HoodieTransformExecutionException("Error reading transformer SQL file.", ioe);
} finally {
sparkSession.catalog().dropTempView(tmpTable);
}
} | @Test
public void testSqlFileBasedTransformerIllegalArguments() {
// Test if the class throws illegal argument exception when argument not present.
assertThrows(
IllegalArgumentException.class,
() -> sqlFileTransformer.apply(jsc, sparkSession, inputDatasetRows, props));
} |
static String escape(String input) {
return input.replace("\\", "\\\\").replace("(", "\\(").replace(")", "\\)");
} | @Test
public void testEscapeParentheses() {
assertEquals("\\(bob's name\\)", ClientQuotasImageNode.escape("(bob's name)"));
} |
public static Ip6Address valueOf(byte[] value) {
return new Ip6Address(value);
} | @Test
public void testToStringIPv6() {
Ip6Address ipAddress;
ipAddress =
Ip6Address.valueOf("1111:2222:3333:4444:5555:6666:7777:8888");
assertThat(ipAddress.toString(),
is("1111:2222:3333:4444:5555:6666:7777:8888"));
ipAddress = Ip6Address.valueOf("1111::8888");
assertThat(ipAddress.toString(), is("1111::8888"));
ipAddress = Ip6Address.valueOf("1111::");
assertThat(ipAddress.toString(), is("1111::"));
ipAddress = Ip6Address.valueOf("::8888");
assertThat(ipAddress.toString(), is("::8888"));
ipAddress = Ip6Address.valueOf("::");
assertThat(ipAddress.toString(), is("::"));
ipAddress =
Ip6Address.valueOf("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff");
assertThat(ipAddress.toString(),
is("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"));
} |
public static Source source(InputStream in) {
return source(in, new Timeout());
} | @Test public void sourceFromInputStream() throws Exception {
InputStream in = new ByteArrayInputStream(
("a" + repeat('b', Segment.SIZE * 2) + "c").getBytes(UTF_8));
// Source: ab...bc
Source source = Okio.source(in);
Buffer sink = new Buffer();
// Source: b...bc. Sink: abb.
assertEquals(3, source.read(sink, 3));
assertEquals("abb", sink.readUtf8(3));
// Source: b...bc. Sink: b...b.
assertEquals(Segment.SIZE, source.read(sink, 20000));
assertEquals(repeat('b', Segment.SIZE), sink.readUtf8());
// Source: b...bc. Sink: b...bc.
assertEquals(Segment.SIZE - 1, source.read(sink, 20000));
assertEquals(repeat('b', Segment.SIZE - 2) + "c", sink.readUtf8());
// Source and sink are empty.
assertEquals(-1, source.read(sink, 1));
} |
@Override
public AppQueue getAppQueue(HttpServletRequest hsr, String appId)
throws AuthorizationException {
try {
long startTime = clock.getTime();
DefaultRequestInterceptorREST interceptor = getOrCreateInterceptorByAppId(appId);
AppQueue queue = interceptor.getAppQueue(hsr, appId);
if (queue != null) {
long stopTime = clock.getTime();
routerMetrics.succeededGetAppQueueRetrieved((stopTime - startTime));
RouterAuditLogger.logSuccess(getUser().getShortUserName(), GET_QUEUEINFO,
TARGET_WEB_SERVICE);
return queue;
}
} catch (IllegalArgumentException e) {
routerMetrics.incrGetAppQueueFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_QUEUEINFO,
UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage());
RouterServerUtil.logAndThrowRunTimeException(e, "Unable to get queue by appId: %s.", appId);
} catch (YarnException e) {
routerMetrics.incrGetAppQueueFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_QUEUEINFO,
UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage());
RouterServerUtil.logAndThrowRunTimeException("getAppQueue error.", e);
}
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_QUEUEINFO,
UNKNOWN, TARGET_WEB_SERVICE, "getAppQueue Failed.");
routerMetrics.incrGetAppQueueFailedRetrieved();
throw new RuntimeException("getAppQueue Failed.");
} | @Test
public void testGetAppQueue() throws IOException, InterruptedException {
String queueName = "queueName";
// Submit application to multiSubCluster
ApplicationId appId = ApplicationId.newInstance(Time.now(), 1);
ApplicationSubmissionContextInfo context = new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
context.setQueue(queueName);
Assert.assertNotNull(interceptor.submitApplication(context, null));
// Get Queue by application
AppQueue queue = interceptor.getAppQueue(null, appId.toString());
Assert.assertNotNull(queue);
Assert.assertEquals(queueName, queue.getQueue());
} |
@Override
public ThreadPoolPlugin disableThreadPoolPlugin(String pluginId) {
ThreadPoolPlugin removed = enableThreadPoolPlugins.remove(pluginId);
if (Objects.nonNull(removed)) {
managedThreadPoolPluginSupports.values().forEach(support -> support.unregister(pluginId));
}
return removed;
} | @Test
public void testDisableThreadPoolPlugin() {
GlobalThreadPoolPluginManager manager = new DefaultGlobalThreadPoolPluginManager();
manager.enableThreadPoolPlugin(new TestPlugin("1"));
manager.enableThreadPoolPlugin(new TestPlugin("2"));
manager.disableThreadPoolPlugin("2");
Assert.assertEquals(1, manager.getAllEnableThreadPoolPlugins().size());
} |
@SuppressWarnings("unchecked")
public CompletableFuture<Void> onResponse(final FilterRequestContext requestContext,
final FilterResponseContext responseContext)
{
RestLiResponseData<?> responseData = responseContext.getResponseData();
if (responseData.getResponseEnvelope().isErrorResponse())
{
return CompletableFuture.completedFuture(null);
}
if (shouldValidateOnResponse(requestContext))
{
ResourceMethod method = requestContext.getMethodType();
RestLiDataValidator validator = createResponseRestLiDataValidator(requestContext);
switch (method)
{
case GET:
validateSingleResponse(validator, ((GetResponseEnvelope) responseData.getResponseEnvelope()).getRecord());
break;
case CREATE:
if (requestContext.isReturnEntityMethod() && requestContext.isReturnEntityRequested())
{
validateSingleResponse(validator, ((CreateResponseEnvelope) responseData.getResponseEnvelope()).getRecord());
}
break;
case PARTIAL_UPDATE:
if (requestContext.isReturnEntityMethod() && requestContext.isReturnEntityRequested())
{
validateSingleResponse(validator, ((PartialUpdateResponseEnvelope) responseData.getResponseEnvelope()).getRecord());
}
break;
case GET_ALL:
validateCollectionResponse(validator, ((GetAllResponseEnvelope) responseData.getResponseEnvelope()).getCollectionResponse());
break;
case FINDER:
validateCollectionResponse(validator, ((FinderResponseEnvelope) responseData.getResponseEnvelope()).getCollectionResponse());
break;
case BATCH_FINDER:
validateBatchCollectionResponse(validator, ((BatchFinderResponseEnvelope) responseData.getResponseEnvelope()).getItems());
break;
case BATCH_GET:
validateBatchResponse(validator, ((BatchGetResponseEnvelope) responseData.getResponseEnvelope()).getBatchResponseMap());
break;
case BATCH_CREATE:
if (requestContext.isReturnEntityMethod() && requestContext.isReturnEntityRequested())
{
validateCreateCollectionResponse(validator, ((BatchCreateResponseEnvelope) responseData.getResponseEnvelope()).getCreateResponses());
}
break;
case BATCH_PARTIAL_UPDATE:
if (requestContext.isReturnEntityMethod() && requestContext.isReturnEntityRequested())
{
validateBatchResponse(validator, ((BatchPartialUpdateResponseEnvelope) responseData.getResponseEnvelope()).getBatchResponseMap());
}
break;
}
}
return CompletableFuture.completedFuture(null);
} | @Test(dataProvider = "returnEntityValidateOnResponseData")
@SuppressWarnings({"unchecked", "rawtypes"})
public void testReturnEntityValidateOnResponse(ResourceMethod resourceMethod, RestLiResponseData responseData,
boolean isReturnEntityMethod, boolean isReturnEntityRequested)
{
when(filterRequestContext.getMethodType()).thenReturn(resourceMethod);
when(filterRequestContext.isReturnEntityMethod()).thenReturn(isReturnEntityMethod);
when(filterRequestContext.isReturnEntityRequested()).thenReturn(isReturnEntityRequested);
when(filterResponseContext.getResponseData()).thenReturn(responseData);
RestLiValidationFilter validationFilter = new RestLiValidationFilter();
final boolean expectValidateEntity = isReturnEntityMethod && isReturnEntityRequested;
try
{
// Check if validation occurred by catching exceptions for invalid entities
validationFilter.onResponse(filterRequestContext, filterResponseContext);
if (expectValidateEntity)
{
Assert.fail("Expected validation to occur and cause an exception, but no exception was encountered.");
}
}
catch (RestLiServiceException e)
{
if (!expectValidateEntity)
{
Assert.fail("Expected validation to be skipped without exceptions, but encountered exception: " + e.getMessage());
}
Assert.assertEquals(e.getStatus().getCode(), HttpStatus.S_500_INTERNAL_SERVER_ERROR.getCode(), "Expected HTTP status code 500 for this validation failure.");
Assert.assertTrue(e.getMessage().contains("/intField :: notAnInt cannot be coerced to Integer"), "Expected validation error for field \"intField\", but found another error.");
}
} |
@Override
public boolean equals(Object other) {
if (!(other instanceof Versioned)) {
return false;
}
Versioned<V> that = (Versioned) other;
return Objects.equal(this.value, that.value) &&
Objects.equal(this.version, that.version) &&
Objects.equal(this.creationTime, that.creationTime);
} | @Test
public void testEquals() {
new EqualsTester()
.addEqualityGroup(stats1, stats1)
.addEqualityGroup(stats2)
.testEquals();
} |
public IdentityProvider getEnabledByKey(String key) {
IdentityProvider identityProvider = providersByKey.get(key);
if (identityProvider != null && IS_ENABLED_FILTER.test(identityProvider)) {
return identityProvider;
}
throw new IllegalArgumentException(String.format("Identity provider %s does not exist or is not enabled", key));
} | @Test
public void return_enabled_provider() {
IdentityProviderRepository underTest = new IdentityProviderRepository(asList(GITHUB, BITBUCKET, DISABLED));
assertThat(underTest.getEnabledByKey(GITHUB.getKey())).isEqualTo(GITHUB);
assertThat(underTest.getEnabledByKey(BITBUCKET.getKey())).isEqualTo(BITBUCKET);
} |
public static Multimap<String, SourceDescription> fetchSourceDescriptions(
final RemoteHostExecutor remoteHostExecutor
) {
final List<SourceDescription> sourceDescriptions = Maps
.transformValues(
remoteHostExecutor.fetchAllRemoteResults().getLeft(),
SourceDescriptionList.class::cast)
.values()
.stream()
.flatMap((rsl) -> rsl.getSourceDescriptions().stream())
.collect(toImmutableList());
return Multimaps.index(sourceDescriptions, SourceDescription::getName);
} | @SuppressWarnings({"unchecked", "rawtypes"})
@Test
public void itShouldReturnRemoteSourceDescriptionsGroupedByName() {
// Given
when(augmenter.fetchAllRemoteResults()).thenReturn(new Pair(response, ImmutableSet.of()));
Multimap<String, SourceDescription> res = RemoteSourceDescriptionExecutor.fetchSourceDescriptions(augmenter);
Map<String, List<SourceDescription>> queryHostCounts = descriptionLists.stream()
.flatMap((v) -> v.getSourceDescriptions().stream())
.collect(Collectors.groupingBy(SourceDescription::getName));
assertThat(res.values(), everyItem(instanceOf(SourceDescription.class)));
response.forEach((host, value) -> value.getSourceDescriptions().forEach(
(sd) -> assertThat(res.get(sd.getName()), hasSize(queryHostCounts.get(sd.getName()).size()))
));
} |
public T send() throws IOException {
return web3jService.send(this, responseType);
} | @Test
public void testShhHasIdentity() throws Exception {
web3j.shhHasIdentity(
"0x04f96a5e25610293e42a73908e93ccc8c4d4dc0edcfa9fa872f50cb214e08ebf61a03e245533f97284d442460f2998cd41858798ddfd4d661997d3940272b717b1")
.send();
verifyResult(
"{\"jsonrpc\":\"2.0\",\"method\":\"shh_hasIdentity\",\"params\":[\"0x04f96a5e25610293e42a73908e93ccc8c4d4dc0edcfa9fa872f50cb214e08ebf61a03e245533f97284d442460f2998cd41858798ddfd4d661997d3940272b717b1\"],\"id\":1}");
} |
@Override
public void transitionToActive(final StreamTask streamTask, final RecordCollector recordCollector, final ThreadCache newCache) {
if (stateManager.taskType() != TaskType.ACTIVE) {
throw new IllegalStateException("Tried to transition processor context to active but the state manager's " +
"type was " + stateManager.taskType());
}
this.streamTask = streamTask;
this.collector = recordCollector;
this.cache = newCache;
addAllFlushListenersToNewCache();
} | @Test
public void localKeyValueStoreShouldNotAllowInitOrClose() {
foreachSetUp();
when(stateManager.taskType()).thenReturn(TaskType.ACTIVE);
when(stateManager.getGlobalStore(anyString())).thenReturn(null);
final KeyValueStore<String, Long> keyValueStoreMock = mock(KeyValueStore.class);
when(stateManager.getStore("LocalKeyValueStore")).thenAnswer(answer -> keyValueStoreMock(keyValueStoreMock));
mockStateStoreFlush(keyValueStoreMock);
mockKeyValueStoreOperation(keyValueStoreMock);
context = buildProcessorContextImpl(streamsConfig, stateManager);
final StreamTask task = mock(StreamTask.class);
context.transitionToActive(task, null, null);
mockProcessorNodeWithLocalKeyValueStore();
doTest("LocalKeyValueStore", (Consumer<KeyValueStore<String, Long>>) store -> {
verifyStoreCannotBeInitializedOrClosed(store);
store.flush();
assertTrue(flushExecuted);
store.put("1", 1L);
assertTrue(putExecuted);
store.putIfAbsent("1", 1L);
assertTrue(putIfAbsentExecuted);
store.putAll(Collections.emptyList());
assertTrue(putAllExecuted);
store.delete("1");
assertTrue(deleteExecuted);
assertEquals((Long) VALUE, store.get(KEY));
assertEquals(rangeIter, store.range("one", "two"));
assertEquals(allIter, store.all());
assertEquals(VALUE, store.approximateNumEntries());
});
} |
@GET
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public AppInfo get() {
return getAppInfo();
} | @Test
public void testInfo() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("info").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
response.getType().toString());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
verifyAMInfo(json.getJSONObject("info"), appContext);
} |
@Override
public String ping(RedisClusterNode node) {
return execute(node, RedisCommands.PING);
} | @Test
public void testClusterPing() {
RedisClusterNode master = getFirstMaster();
String res = connection.ping(master);
assertThat(res).isEqualTo("PONG");
} |
@Override
public Row deserialize(byte[] message) throws IOException {
return deserialize(message, true);
} | @Test
void testTimestampSpecificSerializeDeserializeNewMapping() throws Exception {
final Tuple4<Class<? extends SpecificRecord>, SpecificRecord, GenericRecord, Row> testData =
AvroTestUtils.getTimestampTestData();
final String schemaString = testData.f1.getSchema().toString();
final AvroRowSerializationSchema serializationSchema =
new AvroRowSerializationSchema(schemaString);
final AvroRowDeserializationSchema deserializationSchema =
new AvroRowDeserializationSchema(schemaString);
final byte[] bytes = serializationSchema.serialize(testData.f3, false);
final Row actual = deserializationSchema.deserialize(bytes, false);
assertThat(actual).isEqualTo(testData.f3);
} |
public void handleAssignment(final Map<TaskId, Set<TopicPartition>> activeTasks,
final Map<TaskId, Set<TopicPartition>> standbyTasks) {
log.info("Handle new assignment with:\n" +
"\tNew active tasks: {}\n" +
"\tNew standby tasks: {}\n" +
"\tExisting active tasks: {}\n" +
"\tExisting standby tasks: {}",
activeTasks.keySet(), standbyTasks.keySet(), activeTaskIds(), standbyTaskIds());
topologyMetadata.addSubscribedTopicsFromAssignment(
activeTasks.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()),
logPrefix
);
final Map<TaskId, Set<TopicPartition>> activeTasksToCreate = new HashMap<>(activeTasks);
final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate = new HashMap<>(standbyTasks);
final Map<Task, Set<TopicPartition>> tasksToRecycle = new HashMap<>();
final Set<Task> tasksToCloseClean = new TreeSet<>(Comparator.comparing(Task::id));
final Set<TaskId> tasksToLock =
tasks.allTaskIds().stream()
.filter(x -> activeTasksToCreate.containsKey(x) || standbyTasksToCreate.containsKey(x))
.collect(Collectors.toSet());
maybeLockTasks(tasksToLock);
// first put aside those unrecognized tasks because of unknown named-topologies
tasks.clearPendingTasksToCreate();
tasks.addPendingActiveTasksToCreate(pendingTasksToCreate(activeTasksToCreate));
tasks.addPendingStandbyTasksToCreate(pendingTasksToCreate(standbyTasksToCreate));
// first rectify all existing tasks:
// 1. for tasks that are already owned, just update input partitions / resume and skip re-creating them
// 2. for tasks that have changed active/standby status, just recycle and skip re-creating them
// 3. otherwise, close them since they are no longer owned
final Map<TaskId, RuntimeException> failedTasks = new LinkedHashMap<>();
if (stateUpdater == null) {
handleTasksWithoutStateUpdater(activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean);
} else {
handleTasksWithStateUpdater(
activeTasksToCreate,
standbyTasksToCreate,
tasksToRecycle,
tasksToCloseClean,
failedTasks
);
failedTasks.putAll(collectExceptionsAndFailedTasksFromStateUpdater());
}
final Map<TaskId, RuntimeException> taskCloseExceptions = closeAndRecycleTasks(tasksToRecycle, tasksToCloseClean);
maybeUnlockTasks(tasksToLock);
failedTasks.putAll(taskCloseExceptions);
maybeThrowTaskExceptions(failedTasks);
createNewTasks(activeTasksToCreate, standbyTasksToCreate);
} | @Test
public void shouldKeepReassignedActiveTaskInStateUpdater() {
final StreamTask reassignedActiveTask = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId03Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManager(ProcessingMode.AT_LEAST_ONCE, tasks, true);
when(stateUpdater.getTasks()).thenReturn(mkSet(reassignedActiveTask));
taskManager.handleAssignment(
mkMap(mkEntry(reassignedActiveTask.id(), reassignedActiveTask.inputPartitions())),
Collections.emptyMap()
);
verify(stateUpdater, never()).remove(reassignedActiveTask.id());
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
} |
@Override
public boolean updateReservation(ReservationAllocation reservation)
throws PlanningException {
writeLock.lock();
boolean result = false;
try {
ReservationId resId = reservation.getReservationId();
ReservationAllocation currReservation = getReservationById(resId);
if (currReservation == null) {
String errMsg = "The specified Reservation with ID " + resId
+ " does not exist in the plan";
LOG.error(errMsg);
throw new IllegalArgumentException(errMsg);
}
// validate if we can accept this reservation, throws exception if
// validation fails
policy.validate(this, reservation);
if (!removeReservation(currReservation)) {
LOG.error("Unable to replace reservation: {} from plan.",
reservation.getReservationId());
return result;
}
try {
result = addReservation(reservation, false);
} catch (PlanningException e) {
LOG.error("Unable to update reservation: {} from plan due to {}.",
reservation.getReservationId(), e.getMessage());
}
if (result) {
LOG.info("Successfully updated reservation: {} in plan.",
reservation.getReservationId());
return result;
} else {
// rollback delete
addReservation(currReservation, false);
LOG.info("Rollbacked update reservation: {} from plan.",
reservation.getReservationId());
return result;
}
} finally {
writeLock.unlock();
}
} | @Test
public void testUpdateReservation() {
Plan plan = new InMemoryPlan(queueMetrics, policy, agent, totalCapacity, 1L,
resCalc, minAlloc, maxAlloc, planName, replanner, true, context);
ReservationId reservationID =
ReservationSystemTestUtil.getNewReservationId();
// First add a reservation
int[] alloc = { 10, 10, 10, 10, 10, 10 };
int start = 100;
ReservationAllocation rAllocation =
createReservationAllocation(reservationID, start, alloc);
Assert.assertNull(plan.getReservationById(reservationID));
try {
plan.addReservation(rAllocation, false);
} catch (PlanningException e) {
Assert.fail(e.getMessage());
}
doAssertions(plan, rAllocation);
RLESparseResourceAllocation userCons =
plan.getConsumptionForUserOverTime(user, start, start + alloc.length);
for (int i = 0; i < alloc.length; i++) {
assertEquals(Resource.newInstance(1024 * (alloc[i]), (alloc[i])),
plan.getTotalCommittedResources(start + i));
assertEquals(Resource.newInstance(1024 * (alloc[i]), (alloc[i])),
userCons.getCapacityAtTime(start + i));
}
// Now update it
start = 110;
int[] updatedAlloc = { 0, 5, 10, 10, 5, 0 };
rAllocation =
createReservationAllocation(reservationID, start, updatedAlloc, true);
try {
plan.updateReservation(rAllocation);
} catch (PlanningException e) {
Assert.fail(e.getMessage());
}
doAssertions(plan, rAllocation);
userCons = plan.getConsumptionForUserOverTime(user, start,
start + updatedAlloc.length);
for (int i = 0; i < updatedAlloc.length; i++) {
assertEquals(Resource.newInstance(1024 * (updatedAlloc[i] + i),
updatedAlloc[i] + i), plan.getTotalCommittedResources(start + i));
assertEquals(Resource.newInstance(1024 * (updatedAlloc[i] + i),
updatedAlloc[i] + i), userCons.getCapacityAtTime(start + i));
}
} |
public void addAssignmentsForPartitions(final Set<TopicIdPartition> partitions) {
updateAssignments(Objects.requireNonNull(partitions), Collections.emptySet());
} | @Test
public void testAddAssignmentsForPartitions() {
final List<TopicIdPartition> idPartitions = getIdPartitions("sample", 3);
final Map<TopicPartition, Long> endOffsets = idPartitions.stream()
.map(idp -> toRemoteLogPartition(partitioner.metadataPartition(idp)))
.collect(Collectors.toMap(Function.identity(), e -> 0L, (a, b) -> b));
consumer.updateEndOffsets(endOffsets);
consumerTask.addAssignmentsForPartitions(new HashSet<>(idPartitions));
consumerTask.ingestRecords();
for (final TopicIdPartition idPartition : idPartitions) {
assertTrue(consumerTask.isUserPartitionAssigned(idPartition), "Partition " + idPartition + " has not been assigned");
assertTrue(consumerTask.isMetadataPartitionAssigned(partitioner.metadataPartition(idPartition)));
assertTrue(handler.isPartitionLoaded.get(idPartition));
}
} |
public static String formatSql(final AstNode root) {
final StringBuilder builder = new StringBuilder();
new Formatter(builder).process(root, 0);
return StringUtils.stripEnd(builder.toString(), "\n");
} | @Test
public void shouldFormatTerminateAllQueries() {
// Given:
final TerminateQuery terminateQuery = TerminateQuery.all(Optional.empty());
// When:
final String formatted = SqlFormatter.formatSql(terminateQuery);
// Then:
assertThat(formatted, is("TERMINATE ALL"));
} |
@Override
public void emit(String emitKey, List<Metadata> metadataList, ParseContext parseContext)
throws IOException, TikaEmitterException {
if (metadataList == null || metadataList.size() < 1) {
return;
}
List<EmitData> emitDataList = new ArrayList<>();
emitDataList.add(new EmitData(new EmitKey("", emitKey), metadataList));
emit(emitDataList);
} | @Test
public void testVarcharTruncation(@TempDir Path tmpDir) throws Exception {
Files.createDirectories(tmpDir.resolve("db"));
Path dbDir = tmpDir.resolve("db/h2");
Path config = tmpDir.resolve("tika-config.xml");
String connectionString = "jdbc:h2:file:" + dbDir.toAbsolutePath();
writeConfig("/configs/tika-config-jdbc-emitter-trunc.xml",
connectionString, config);
EmitterManager emitterManager = EmitterManager.load(config);
Emitter emitter = emitterManager.getEmitter();
List<String[]> data = new ArrayList<>();
data.add(new String[]{"k1", "abcd"});
data.add(new String[]{"k1", "abcdefghijklmnopqrs"});
data.add(new String[]{"k1", "abcdefghijk"});
int id = 0;
for (String[] d : data) {
emitter.emit("id" + id++, Collections.singletonList(m(d)), new ParseContext());
}
int rows = 0;
try (Connection connection = DriverManager.getConnection(connectionString)) {
try (Statement st = connection.createStatement()) {
try (ResultSet rs = st.executeQuery("select * from test")) {
while (rs.next()) {
String s = rs.getString(2);
assertTrue(s.length() < 13);
assertFalse(s.contains("m"));
rows++;
}
}
}
}
assertEquals(3, rows);
} |
@Override
public void finish()
throws IOException
{
printRows(ImmutableList.of(), true);
writer.append(format("(%s row%s)%n", rowCount, (rowCount != 1) ? "s" : ""));
writer.flush();
} | @Test
public void testAlignedPrintingNoRows()
throws Exception
{
StringWriter writer = new StringWriter();
List<String> fieldNames = ImmutableList.of("first", "last");
OutputPrinter printer = new AlignedTablePrinter(fieldNames, writer);
printer.finish();
String expected = "" +
" first | last \n" +
"-------+------\n" +
"(0 rows)\n";
assertEquals(writer.getBuffer().toString(), expected);
} |
@Override
public NetworkClientDelegate.PollResult poll(long currentTimeMs) {
if (!coordinatorRequestManager.coordinator().isPresent() ||
shareMembershipManager.shouldSkipHeartbeat() ||
pollTimer.isExpired()) {
shareMembershipManager.onHeartbeatRequestSkipped();
return NetworkClientDelegate.PollResult.EMPTY;
}
pollTimer.update(currentTimeMs);
if (pollTimer.isExpired() && !shareMembershipManager.isLeavingGroup()) {
logger.warn("Share consumer poll timeout has expired. This means the time between subsequent calls to poll() " +
"was longer than the configured max.poll.interval.ms, which typically implies that " +
"the poll loop is spending too much time processing messages. You can address this " +
"either by increasing max.poll.interval.ms or by reducing the maximum size of batches " +
"returned in poll() with max.poll.records.");
shareMembershipManager.transitionToSendingLeaveGroup(true);
NetworkClientDelegate.UnsentRequest request = makeHeartbeatRequest(currentTimeMs, true);
// We can ignore the leave response because we can join before or after receiving the response.
heartbeatRequestState.reset();
heartbeatState.reset();
return new NetworkClientDelegate.PollResult(heartbeatRequestState.heartbeatIntervalMs, Collections.singletonList(request));
}
boolean heartbeatNow = shareMembershipManager.shouldHeartbeatNow() && !heartbeatRequestState.requestInFlight();
if (!heartbeatRequestState.canSendRequest(currentTimeMs) && !heartbeatNow) {
return new NetworkClientDelegate.PollResult(heartbeatRequestState.timeToNextHeartbeatMs(currentTimeMs));
}
NetworkClientDelegate.UnsentRequest request = makeHeartbeatRequest(currentTimeMs, false);
return new NetworkClientDelegate.PollResult(heartbeatRequestState.heartbeatIntervalMs, Collections.singletonList(request));
} | @Test
public void testSuccessfulHeartbeatTiming() {
NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(0, result.unsentRequests.size(),
"No heartbeat should be sent while interval has not expired");
assertEquals(heartbeatRequestState.timeToNextHeartbeatMs(time.milliseconds()), result.timeUntilNextPollMs);
assertNextHeartbeatTiming(DEFAULT_HEARTBEAT_INTERVAL_MS);
result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(1, result.unsentRequests.size(), "A heartbeat should be sent when interval expires");
NetworkClientDelegate.UnsentRequest inflightReq = result.unsentRequests.get(0);
assertEquals(DEFAULT_HEARTBEAT_INTERVAL_MS,
heartbeatRequestState.timeToNextHeartbeatMs(time.milliseconds()),
"Heartbeat timer was not reset to the interval when the heartbeat request was sent.");
long partOfInterval = DEFAULT_HEARTBEAT_INTERVAL_MS / 3;
time.sleep(partOfInterval);
result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(0, result.unsentRequests.size(),
"No heartbeat should be sent while only part of the interval has passed");
assertEquals(DEFAULT_HEARTBEAT_INTERVAL_MS - partOfInterval,
heartbeatRequestState.timeToNextHeartbeatMs(time.milliseconds()),
"Time to next interval was not properly updated.");
inflightReq.handler().onComplete(createHeartbeatResponse(inflightReq, Errors.NONE));
assertNextHeartbeatTiming(DEFAULT_HEARTBEAT_INTERVAL_MS - partOfInterval);
} |
public static String serializePublicKey(PublicKey publicKey) throws Exception {
// Serialize the public key
byte[] publicKeyBytes = publicKey.getEncoded();
return Base64.getEncoder().encodeToString(publicKeyBytes);
} | @Test
public void testSerializePublicKey() throws Exception {
KeyPair keyPair = KeyUtil.generateKeyPair("RSA", 2048);
System.out.println("public key = " + KeyUtil.serializePublicKey(keyPair.getPublic()));
System.out.println("private key = " + KeyUtil.serializePrivateKey(keyPair.getPrivate()));
} |
public float getFloat(HazelcastProperty property) {
return Float.valueOf(getString(property));
} | @Test
public void getFloat() {
HazelcastProperty property = new HazelcastProperty("foo", 10.1F);
float foo = defaultProperties.getFloat(property);
assertEquals(10.1F, foo, 0.0001);
} |
public static boolean isConsumer(URL url) {
return url.getProtocol().equalsIgnoreCase(CONSUMER) || url.getPort() == 0;
} | @Test
public void testIsConsumer() {
String address1 = "remote://root:alibaba@127.0.0.1:9090";
URL url1 = UrlUtils.parseURL(address1, null);
String address2 = "consumer://root:alibaba@127.0.0.1:9090";
URL url2 = UrlUtils.parseURL(address2, null);
String address3 = "consumer://root:alibaba@127.0.0.1";
URL url3 = UrlUtils.parseURL(address3, null);
assertFalse(UrlUtils.isConsumer(url1));
assertTrue(UrlUtils.isConsumer(url2));
assertTrue(UrlUtils.isConsumer(url3));
} |
public static List<String> getStackFrameList(final Throwable t, int maxDepth) {
final String stackTrace = getStackTrace(t);
final String linebreak = System.lineSeparator();
final StringTokenizer frames = new StringTokenizer(stackTrace, linebreak);
final List<String> list = new ArrayList<>();
for (int i = 0; i < maxDepth && frames.hasMoreTokens(); i++) {
list.add(frames.nextToken());
}
return list;
} | @Test
void testGetStackFrameList() {
List<String> stackFrameList = ExceptionUtils.getStackFrameList(exception);
Assertions.assertNotEquals(10, stackFrameList.size());
} |
@Override
public String getSinkTableName(Table table) {
String tableName = table.getName();
Map<String, String> sink = config.getSink();
// Add table name mapping logic
String mappingRoute = sink.get(FlinkCDCConfig.TABLE_MAPPING_ROUTES);
if (mappingRoute != null) {
Map<String, String> mappingRules = parseMappingRoute(mappingRoute);
if (mappingRules.containsKey(tableName)) {
tableName = mappingRules.get(tableName);
}
}
tableName = sink.getOrDefault(FlinkCDCConfig.TABLE_PREFIX, "")
+ tableName
+ sink.getOrDefault(FlinkCDCConfig.TABLE_SUFFIX, "");
// table.lower and table.upper can not be true at the same time
if (Boolean.parseBoolean(sink.get(FlinkCDCConfig.TABLE_LOWER))
&& Boolean.parseBoolean(sink.get(FlinkCDCConfig.TABLE_UPPER))) {
throw new IllegalArgumentException("table.lower and table.upper can not be true at the same time");
}
if (Boolean.parseBoolean(sink.get(FlinkCDCConfig.TABLE_UPPER))) {
tableName = tableName.toUpperCase();
}
if (Boolean.parseBoolean(sink.get(FlinkCDCConfig.TABLE_LOWER))) {
tableName = tableName.toLowerCase();
}
// Implement regular expressions to replace table names through
// sink.table.replace.pattern and table.replace.with
String replacePattern = sink.get(FlinkCDCConfig.TABLE_REPLACE_PATTERN);
String replaceWith = sink.get(FlinkCDCConfig.TABLE_REPLACE_WITH);
if (replacePattern != null && replaceWith != null) {
Pattern pattern = Pattern.compile(replacePattern);
Matcher matcher = pattern.matcher(tableName);
tableName = matcher.replaceAll(replaceWith);
}
// add schema
if (Boolean.parseBoolean(sink.get("table.prefix.schema"))) {
tableName = table.getSchema() + "_" + tableName;
}
return tableName;
} | @Test
public void testGetSinkTableNameWithNoConfigPrefixOrSuffix() {
Map<String, String> sinkConfig = new HashMap<>();
sinkConfig.put("table.prefix", "");
sinkConfig.put("table.suffix", "");
sinkConfig.put("table.lower", "false");
sinkConfig.put("table.upper", "false");
when(config.getSink()).thenReturn(sinkConfig);
Table table = new Table("testTable", "testSchema", null);
String expectedTableName = "testTable";
Assert.assertEquals(expectedTableName, sinkBuilder.getSinkTableName(table));
} |
@ShellMethod(key = "compaction showarchived", value = "Shows compaction details for a specific compaction instant")
public String compactionShowArchived(
@ShellOption(value = "--instant", help = "instant time") final String compactionInstantTime,
@ShellOption(value = {"--limit"}, help = "Limit commits", defaultValue = "-1") final Integer limit,
@ShellOption(value = {"--sortBy"}, help = "Sorting Field", defaultValue = "") final String sortByField,
@ShellOption(value = {"--desc"}, help = "Ordering", defaultValue = "false") final boolean descending,
@ShellOption(value = {"--headeronly"}, help = "Print Header Only",
defaultValue = "false") final boolean headerOnly,
@ShellOption(value = {"--partition"}, help = "Partition value", defaultValue = ShellOption.NULL) final String partition)
throws Exception {
HoodieTableMetaClient client = checkAndGetMetaClient();
HoodieArchivedTimeline archivedTimeline = client.getArchivedTimeline();
HoodieInstant instant = new HoodieInstant(HoodieInstant.State.COMPLETED,
HoodieTimeline.COMPACTION_ACTION, compactionInstantTime);
try {
archivedTimeline.loadCompactionDetailsInMemory(compactionInstantTime);
HoodieCompactionPlan compactionPlan =
TimelineMetadataUtils.deserializeCompactionPlan(archivedTimeline.getInstantDetails(instant).get());
return printCompaction(compactionPlan, sortByField, descending, limit, headerOnly, partition);
} finally {
archivedTimeline.clearInstantDetailsFromMemory(compactionInstantTime);
}
} | @Test
public void testCompactionShowArchived() throws IOException {
generateCompactionInstances();
String instance = "001";
// get compaction plan before compaction
HoodieCompactionPlan plan = TimelineMetadataUtils.deserializeCompactionPlan(
HoodieCLI.getTableMetaClient().reloadActiveTimeline().readCompactionPlanAsBytes(
HoodieTimeline.getCompactionRequestedInstant(instance)).get());
generateArchive();
Object result = shell.evaluate(() -> "compaction showarchived --instant " + instance);
// generate expected
String expected = CompactionCommand.printCompaction(plan, "", false, -1, false, null);
expected = removeNonWordAndStripSpace(expected);
String got = removeNonWordAndStripSpace(result.toString());
assertEquals(expected, got);
} |
static String getAbbreviation(Exception ex,
Integer statusCode,
String storageErrorMessage) {
String result = null;
for (RetryReasonCategory retryReasonCategory : rankedReasonCategories) {
final String abbreviation
= retryReasonCategory.captureAndGetAbbreviation(ex,
statusCode, storageErrorMessage);
if (abbreviation != null) {
result = abbreviation;
}
}
return result;
} | @Test
public void testUnknownHostRetryReason() {
Assertions.assertThat(RetryReason.getAbbreviation(new UnknownHostException(), null, null)).isEqualTo(
UNKNOWN_HOST_EXCEPTION_ABBREVIATION
);
} |
@GET
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public NodeInfo get() {
return getNodeInfo();
} | @Test
public void testNodeInfoSlash() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("node")
.path("info/").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
response.getType().toString());
JSONObject json = response.getEntity(JSONObject.class);
verifyNodeInfo(json);
} |
@Override
public void execute() {
boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log);
if (debugMode) {
log.info("Load balancer enabled: {}, Split enabled: {}.",
conf.isLoadBalancerEnabled(), conf.isLoadBalancerAutoBundleSplitEnabled());
}
if (!isLoadBalancerAutoBundleSplitEnabled()) {
if (debugMode) {
log.info("The load balancer or load balancer split already disabled. Skipping.");
}
return;
}
synchronized (bundleSplitStrategy) {
final Set<SplitDecision> decisions = bundleSplitStrategy.findBundlesToSplit(context, pulsar);
if (debugMode) {
log.info("Split Decisions:", decisions);
}
if (!decisions.isEmpty()) {
// currently following the unloading timeout
var asyncOpTimeoutMs = conf.getNamespaceBundleUnloadingTimeoutMs();
List<CompletableFuture<Void>> futures = new ArrayList<>();
for (SplitDecision decision : decisions) {
if (decision.getLabel() == Success) {
var split = decision.getSplit();
futures.add(
splitManager.waitAsync(
serviceUnitStateChannel.publishSplitEventAsync(split),
split.serviceUnit(),
decision,
asyncOpTimeoutMs, TimeUnit.MILLISECONDS)
);
}
}
try {
FutureUtil.waitForAll(futures)
.get(asyncOpTimeoutMs, TimeUnit.MILLISECONDS);
} catch (Throwable e) {
log.error("Failed to wait for split events to persist.", e);
}
} else {
if (debugMode) {
log.info("BundleSplitStrategy returned no bundles to split.");
}
}
}
if (counter.updatedAt() > counterLastUpdatedAt) {
splitMetrics.set(counter.toMetrics(pulsar.getAdvertisedAddress()));
counterLastUpdatedAt = counter.updatedAt();
}
} | @Test(timeOut = 30 * 1000)
public void testExecuteFailure() {
AtomicReference<List<Metrics>> reference = new AtomicReference();
SplitCounter counter = new SplitCounter();
SplitManager manager = new SplitManager(counter);
SplitScheduler scheduler = new SplitScheduler(pulsar, channel, manager, counter, reference, context, strategy);
doReturn(CompletableFuture.failedFuture(new RuntimeException())).when(channel).publishSplitEventAsync(any());
scheduler.execute();
var counterExpected = new SplitCounter();
counterExpected.update(SplitDecision.Label.Failure, SplitDecision.Reason.Unknown);
counterExpected.update(SplitDecision.Label.Failure, SplitDecision.Reason.Unknown);
verify(channel, times(1)).publishSplitEventAsync(eq(decision1.getSplit()));
verify(channel, times(1)).publishSplitEventAsync(eq(decision2.getSplit()));
assertEquals(reference.get().toString(), counterExpected.toMetrics(pulsar.getAdvertisedAddress()).toString());
} |
public static String get(String urlString, Charset customCharset) {
return HttpRequest.get(urlString).charset(customCharset).execute().body();
} | @Test
@Disabled
public void getNocovTest(){
final String url = "https://qiniu.nocov.cn/medical-manage%2Ftest%2FBANNER_IMG%2F444004467954556928%2F1595215173047icon.png~imgReduce?e=1597081986&token=V2lJYVgQgAv_sbypfEZ0qpKs6TzD1q5JIDVr0Tw8:89cbBkLLwEc9JsMoCLkAEOu820E=";
final String s = HttpUtil.get(url);
Console.log(s);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.