focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public void notifyMasterOnline() {
for (ManyPullRequest mpr : this.pullRequestTable.values()) {
if (mpr == null || mpr.isEmpty()) {
continue;
}
for (PullRequest request : mpr.cloneListAndClear()) {
try {
log.info("notify master online, wakeup {} {}", request.getClientChannel(), request.getRequestCommand());
this.brokerController.getPullMessageProcessor().executeRequestWhenWakeup(request.getClientChannel(),
request.getRequestCommand());
} catch (Throwable e) {
log.error("execute request when master online failed.", e);
}
}
}
} | @Test
public void notifyMasterOnlineTest() {
Assertions.assertThatCode(() -> pullRequestHoldService.suspendPullRequest(TEST_TOPIC, DEFAULT_QUEUE_ID, pullRequest)).doesNotThrowAnyException();
Assertions.assertThatCode(() -> pullRequestHoldService.notifyMasterOnline()).doesNotThrowAnyException();
} |
public static <X extends Throwable> void check(boolean expression, Supplier<? extends X> exceptionSupplier) throws X {
if (!expression)
throw exceptionSupplier.get();
} | @Test(expected = ArithmeticException.class)
public void check_false() {
Preconditions.check(false, ArithmeticException::new);
} |
@Override
public ClassLoaderLease registerClassLoaderLease(JobID jobId) {
synchronized (lockObject) {
return cacheEntries
.computeIfAbsent(jobId, jobID -> new LibraryCacheEntry(jobId))
.obtainLease();
}
} | @Test
public void closingAllLeases_willReleaseUserCodeClassLoader() throws IOException {
final TestingClassLoader classLoader = new TestingClassLoader();
final BlobLibraryCacheManager libraryCacheManager =
new TestingBlobLibraryCacheManagerBuilder()
.setClassLoaderFactory(ignored -> classLoader)
.build();
final JobID jobId = new JobID();
final LibraryCacheManager.ClassLoaderLease classLoaderLease1 =
libraryCacheManager.registerClassLoaderLease(jobId);
final LibraryCacheManager.ClassLoaderLease classLoaderLease2 =
libraryCacheManager.registerClassLoaderLease(jobId);
UserCodeClassLoader userCodeClassLoader =
classLoaderLease1.getOrResolveClassLoader(
Collections.emptyList(), Collections.emptyList());
classLoaderLease1.release();
assertFalse(classLoader.isClosed());
classLoaderLease2.release();
if (wrapsSystemClassLoader) {
assertEquals(userCodeClassLoader.asClassLoader(), ClassLoader.getSystemClassLoader());
assertFalse(classLoader.isClosed());
} else {
assertTrue(classLoader.isClosed());
}
} |
public static byte[] hexString2Bytes(String hexString) {
if (isSpace(hexString)) return null;
int len = hexString.length();
if (len % 2 != 0) {
hexString = "0" + hexString;
len = len + 1;
}
char[] hexBytes = hexString.toUpperCase().toCharArray();
byte[] ret = new byte[len >> 1];
for (int i = 0; i < len; i += 2) {
ret[i >> 1] = (byte) (hex2Dec(hexBytes[i]) << 4 | hex2Dec(hexBytes[i + 1]));
}
return ret;
} | @Test
public void hexString2Bytes() throws Exception {
TestCase.assertTrue(
Arrays.equals(
mBytes,
ConvertKit.hexString2Bytes(hexString)
)
);
} |
@SuppressWarnings("rawtypes")
@Converter(fallback = true)
public static <T> T convertTo(Class<T> type, Exchange exchange, Object value, TypeConverterRegistry registry) {
if (value instanceof JavaResult.ResultMap) {
for (Object mapValue : ((Map) value).values()) {
if (type.isInstance(mapValue)) {
return type.cast(mapValue);
}
}
}
return null;
} | @Test
public void convertStringResultToStreamSource() throws Exception {
StringResult stringResult = createStringResult("Bajja");
StreamSource streamSource = typeConverter.convertTo(StreamSource.class, stringResult);
BufferedReader reader = new BufferedReader(streamSource.getReader());
assertEquals("Bajja", reader.readLine());
} |
@Override
public void run() {
try {
backgroundJobServer.getJobSteward().notifyThreadOccupied();
MDCMapper.loadMDCContextFromJob(job);
performJob();
} catch (Exception e) {
if (isJobDeletedWhileProcessing(e)) {
// nothing to do anymore as Job is deleted
return;
} else if (isJobServerStopped(e)) {
updateJobStateToFailedAndRunJobFilters("Job processing was stopped as background job server has stopped", e);
Thread.currentThread().interrupt();
} else if (isJobNotFoundException(e)) {
updateJobStateToFailedAndRunJobFilters("Job method not found", e);
} else {
updateJobStateToFailedAndRunJobFilters("An exception occurred during the performance of the job", e);
}
} finally {
backgroundJobServer.getJobSteward().notifyThreadIdle();
MDC.clear();
}
} | @Test
void allStateChangesArePassingViaTheApplyStateFilterOnSuccess() {
Job job = anEnqueuedJob().build();
when(backgroundJobServer.getBackgroundJobRunner(job)).thenReturn(new BackgroundStaticFieldJobWithoutIocRunner());
BackgroundJobPerformer backgroundJobPerformer = new BackgroundJobPerformer(backgroundJobServer, job);
backgroundJobPerformer.run();
assertThat(logAllStateChangesFilter.getStateChanges(job)).containsExactly("ENQUEUED->PROCESSING", "PROCESSING->SUCCEEDED");
assertThat(logAllStateChangesFilter.onProcessingIsCalled(job)).isTrue();
assertThat(logAllStateChangesFilter.onProcessingSucceededIsCalled(job)).isTrue();
assertThat(logAllStateChangesFilter.onProcessingFailedIsCalled(job)).isFalse();
} |
static ViewHistoryEntry fromJson(String json) {
return JsonUtil.parse(json, ViewHistoryEntryParser::fromJson);
} | @Test
public void testViewHistoryEntryMissingFields() {
assertThatThrownBy(() -> ViewHistoryEntryParser.fromJson("{}"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse missing int: version-id");
assertThatThrownBy(() -> ViewHistoryEntryParser.fromJson("{\"timestamp-ms\":\"123\"}"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse missing int: version-id");
assertThatThrownBy(() -> ViewHistoryEntryParser.fromJson("{\"version-id\":1}"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse missing long: timestamp-ms");
} |
public static HazelcastInstance getOrCreateHazelcastClient() {
return getOrCreateClientInternal(null);
} | @Test
public void testGetOrCreateHazelcastClientConcurrently() throws ExecutionException, InterruptedException {
String instanceName = randomString();
ClientConfig config = new ClientConfig();
config.setInstanceName(instanceName);
int clientCount = 10;
List<HazelcastInstance> clients = Collections.synchronizedList(new ArrayList<>(clientCount));
List<Future<?>> futures = new ArrayList<>(clientCount);
for (int i = 0; i < clientCount; i++) {
futures.add(spawn(() -> {
clients.add(HazelcastClient.getOrCreateHazelcastClient(config));
}));
}
for (int i = 0; i < clientCount; i++) {
futures.get(i).get();
}
assertEquals(clientCount, clients.size());
for (int i = 1; i < clientCount; i++) {
assertEquals(clients.get(0), clients.get(i));
}
} |
public static String getSDbl( double Value, int DecPrec ) {
//
String Result = "";
//
if ( Double.isNaN( Value ) ) return "NaN";
//
if ( DecPrec < 0 ) DecPrec = 0;
//
String DFS = "###,###,##0";
//
if ( DecPrec > 0 ) {
int idx = 0;
DFS += ".";
while ( idx < DecPrec ) {
DFS = DFS + "0";
idx ++;
if ( idx > 100 ) break;
}
}
//
// Locale locale = new Locale("en", "UK");
//
DecimalFormatSymbols DcmFrmSmb = new DecimalFormatSymbols( Locale.getDefault());
DcmFrmSmb.setDecimalSeparator('.');
DcmFrmSmb.setGroupingSeparator(' ');
//
DecimalFormat DcmFrm;
//
DcmFrm = new DecimalFormat( DFS, DcmFrmSmb );
//
// DcmFrm.setGroupingSize( 3 );
//
Result = DcmFrm.format( Value );
//
return Result;
} | @Test
public void testgetSDbl() throws Exception {
//
assertEquals( "NaN", BTools.getSDbl( Double.NaN, 0 ) );
assertEquals( "-6", BTools.getSDbl( -5.5D, 0 ) );
assertEquals( "-5.50", BTools.getSDbl( -5.5D, 2 ) );
assertEquals( "-5.30", BTools.getSDbl( -5.3D, 2 ) );
assertEquals( "-5", BTools.getSDbl( -5.3D, 0 ) );
assertEquals( "0.00", BTools.getSDbl( 0D, 2 ) );
assertEquals( "0", BTools.getSDbl( 0D, 0 ) );
assertEquals( "0.30", BTools.getSDbl( 0.3D, 2 ) );
assertEquals( "4.50", BTools.getSDbl( 4.5D, 2 ) );
assertEquals( "4", BTools.getSDbl( 4.5D, 0 ) );
assertEquals( "6", BTools.getSDbl( 5.5D, 0 ) );
assertEquals( "12 345 678", BTools.getSDbl( 12345678D, 0 ) );
//
assertEquals( "-456", BTools.getSDbl( -456D, 0, false ) );
assertEquals( "-456", BTools.getSDbl( -456D, 0, true ) );
assertEquals( "+456", BTools.getSDbl( 456D, 0, true ) );
assertEquals( "456", BTools.getSDbl( 456D, 0, false ) );
assertEquals( " 0", BTools.getSDbl( 0D, 0, true ) );
assertEquals( "0", BTools.getSDbl( 0D, 0, false ) );
//
assertEquals( " 4.50", BTools.getSDbl( 4.5D, 2, false, 6 ) );
assertEquals( " +4.50", BTools.getSDbl( 4.5D, 2, true, 6 ) );
assertEquals( " +456", BTools.getSDbl( 456D, 0, true, 7 ) );
assertEquals( " 456", BTools.getSDbl( 456D, 0, false, 7 ) );
//
} |
public static boolean isDigits(String str) {
if (StringUtils.isEmpty(str)) {
return false;
}
for (int i = 0; i < str.length(); i++) {
if (!Character.isDigit(str.charAt(i))) {
return false;
}
}
return true;
} | @Test
void testIsDigits() {
assertFalse(NumberUtils.isDigits(null));
assertFalse(NumberUtils.isDigits(""));
assertTrue(NumberUtils.isDigits("12345"));
assertFalse(NumberUtils.isDigits("1234.5"));
assertFalse(NumberUtils.isDigits("1ab"));
assertFalse(NumberUtils.isDigits("abc"));
} |
public Optional<Branch> findDefaultBranch() {
return branches.stream().filter(Branch::isDefault).findFirst();
} | @Test
public void findDefaultBranch_givenNoBranches_returnEmptyOptional(){
BranchesList branchesList = new BranchesList();
Optional<Branch> defaultBranch = branchesList.findDefaultBranch();
assertThat(defaultBranch).isNotPresent();
} |
@Override
public Health health() {
Map<String, Health> healths = circuitBreakerRegistry.getAllCircuitBreakers().stream()
.filter(this::isRegisterHealthIndicator)
.collect(Collectors.toMap(CircuitBreaker::getName,
this::mapBackendMonitorState));
Status status = this.statusAggregator.getAggregateStatus(healths.values().stream().map(Health::getStatus).collect(Collectors.toSet()));
return Health.status(status).withDetails(healths).build();
} | @Test
public void healthIndicatorMaxImpactCanBeOverridden() {
CircuitBreaker openCircuitBreaker = mock(CircuitBreaker.class);
CircuitBreaker halfOpenCircuitBreaker = mock(CircuitBreaker.class);
CircuitBreaker closeCircuitBreaker = mock(CircuitBreaker.class);
Map<CircuitBreaker.State, CircuitBreaker> expectedStateToCircuitBreaker = new HashMap<>();
expectedStateToCircuitBreaker.put(OPEN, openCircuitBreaker);
expectedStateToCircuitBreaker.put(HALF_OPEN, halfOpenCircuitBreaker);
expectedStateToCircuitBreaker.put(CLOSED, closeCircuitBreaker);
CircuitBreakerConfigurationProperties.InstanceProperties instanceProperties =
mock(CircuitBreakerConfigurationProperties.InstanceProperties.class);
CircuitBreakerConfigurationProperties circuitBreakerProperties = mock(CircuitBreakerConfigurationProperties.class);
// given
CircuitBreakerRegistry registry = mock(CircuitBreakerRegistry.class);
CircuitBreakerConfig config = mock(CircuitBreakerConfig.class);
CircuitBreaker.Metrics metrics = mock(CircuitBreaker.Metrics.class);
// when
when(registry.getAllCircuitBreakers()).thenReturn(new HashSet<>(expectedStateToCircuitBreaker.values()));
boolean allowHealthIndicatorToFail = false; // do not allow health indicator to fail
expectedStateToCircuitBreaker.forEach(
(state, circuitBreaker) -> setCircuitBreakerWhen(state, circuitBreaker, config, metrics, instanceProperties, circuitBreakerProperties, allowHealthIndicatorToFail));
CircuitBreakersHealthIndicator healthIndicator =
new CircuitBreakersHealthIndicator(registry, circuitBreakerProperties, new SimpleStatusAggregator());
// then
Health health = healthIndicator.health();
then(health.getStatus()).isEqualTo(Status.UP);
then(health.getDetails()).containsKeys(OPEN.name(), HALF_OPEN.name(), CLOSED.name());
assertState(OPEN, new Status("CIRCUIT_OPEN"), health.getDetails());
assertState(HALF_OPEN, new Status("CIRCUIT_HALF_OPEN"), health.getDetails());
assertState(CLOSED, Status.UP, health.getDetails());
} |
@Override
public List<MemberLevelDO> getLevelList(Collection<Long> ids) {
if (CollUtil.isEmpty(ids)) {
return Collections.emptyList();
}
return memberLevelMapper.selectBatchIds(ids);
} | @Test
public void testGetLevelList() {
// mock 数据
MemberLevelDO dbLevel = randomPojo(MemberLevelDO.class, o -> { // 等会查询到
o.setName("黄金会员");
o.setStatus(1);
});
memberlevelMapper.insert(dbLevel);
// 测试 name 不匹配
memberlevelMapper.insert(cloneIgnoreId(dbLevel, o -> o.setName("")));
// 测试 status 不匹配
memberlevelMapper.insert(cloneIgnoreId(dbLevel, o -> o.setStatus(0)));
// 准备参数
MemberLevelListReqVO reqVO = new MemberLevelListReqVO();
reqVO.setName("黄金会员");
reqVO.setStatus(1);
// 调用
List<MemberLevelDO> list = levelService.getLevelList(reqVO);
// 断言
assertEquals(1, list.size());
assertPojoEquals(dbLevel, list.get(0));
} |
@Override
public FSDataOutputStream create(Path path, boolean overwrite, int bufferSize, short replication,
long blockSize, Progressable progress) throws IOException {
String confUmask = mAlluxioConf.getString(PropertyKey.SECURITY_AUTHORIZATION_PERMISSION_UMASK);
Mode mode = ModeUtils.applyFileUMask(Mode.defaults(), confUmask);
return this.create(path, new FsPermission(mode.toShort()), overwrite, bufferSize, replication,
blockSize, progress);
} | @Test
public void resetContextFromZookeeperToMultiMaster() throws Exception {
URI uri = URI.create(Constants.HEADER + "zk@zkHost:2181/tmp/path.txt");
FileSystem fs = getHadoopFilesystem(org.apache.hadoop.fs.FileSystem.get(uri, getConf()));
assertTrue(fs.mFileSystem.getConf().getBoolean(PropertyKey.ZOOKEEPER_ENABLED));
assertEquals("zkHost:2181", fs.mFileSystem.getConf().get(PropertyKey.ZOOKEEPER_ADDRESS));
uri = URI.create(Constants.HEADER + "host1:19998,host2:19998,host3:19998/tmp/path.txt");
fs = getHadoopFilesystem(org.apache.hadoop.fs.FileSystem.get(uri, getConf()));
assertFalse(fs.mFileSystem.getConf().getBoolean(PropertyKey.ZOOKEEPER_ENABLED));
assertEquals(3,
ConfigurationUtils.getMasterRpcAddresses(fs.mFileSystem.getConf()).size());
assertEquals("host1:19998,host2:19998,host3:19998",
fs.mFileSystem.getConf().get(PropertyKey.MASTER_RPC_ADDRESSES));
} |
public SearchChainRegistry getSearchChainRegistry() { return executionFactory.searchChainRegistry();
} | @Test
synchronized void testWorkingReconfiguration() throws Exception {
assertJsonResult("http://localhost?query=abc", driver);
// reconfiguration
IOUtils.copyDirectory(new File(testDir, "handlers2"), new File(tempDir), 1);
generateComponentsConfigForActive();
configurer.reloadConfig();
// ...and check the resulting config
SearchHandler newSearchHandler = fetchSearchHandler(configurer);
assertNotSame(searchHandler, newSearchHandler, "Have a new instance of the search handler");
assertNotNull(fetchSearchHandler(configurer).getSearchChainRegistry().getChain("hello"), "Have the new search chain");
assertNull(fetchSearchHandler(configurer).getSearchChainRegistry().getChain("classLoadingError"), "Don't have the new search chain");
try (RequestHandlerTestDriver newDriver = new RequestHandlerTestDriver(newSearchHandler)) {
assertJsonResult("http://localhost?query=abc", newDriver);
}
} |
@Override
protected void channelRead0(ChannelHandlerContext ctx, HttpRequest msg) throws Exception {
CommandContext commandContext = HttpCommandDecoder.decode(msg);
// return 404 when fail to construct command context
if (commandContext == null) {
log.warn(QOS_UNEXPECTED_EXCEPTION, "", "", "can not found commandContext, url: " + msg.uri());
FullHttpResponse response = http(404);
ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE);
} else {
commandContext.setRemote(ctx.channel());
commandContext.setQosConfiguration(qosConfiguration);
try {
String result = commandExecutor.execute(commandContext);
int httpCode = commandContext.getHttpCode();
FullHttpResponse response = http(httpCode, result);
ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE);
} catch (NoSuchCommandException ex) {
log.error(QOS_COMMAND_NOT_FOUND, "", "", "can not find command: " + commandContext, ex);
FullHttpResponse response = http(404);
ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE);
} catch (PermissionDenyException ex) {
log.error(
QOS_PERMISSION_DENY_EXCEPTION,
"",
"",
"permission deny to access command: " + commandContext,
ex);
FullHttpResponse response = http(403);
ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE);
} catch (Exception qosEx) {
log.error(
QOS_UNEXPECTED_EXCEPTION,
"",
"",
"execute commandContext: " + commandContext + " got exception",
qosEx);
FullHttpResponse response = http(500, qosEx.getMessage());
ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE);
}
}
} | @Test
void test3() throws Exception {
ChannelHandlerContext context = mock(ChannelHandlerContext.class);
ChannelFuture future = mock(ChannelFuture.class);
when(context.writeAndFlush(any(FullHttpResponse.class))).thenReturn(future);
HttpRequest message = Mockito.mock(HttpRequest.class);
when(message.uri()).thenReturn("localhost:80/test");
when(message.method()).thenReturn(HttpMethod.GET);
HttpProcessHandler handler = new HttpProcessHandler(
FrameworkModel.defaultModel(),
QosConfiguration.builder()
.anonymousAccessPermissionLevel(PermissionLevel.NONE.name())
.build());
handler.channelRead0(context, message);
verify(future).addListener(ChannelFutureListener.CLOSE);
ArgumentCaptor<FullHttpResponse> captor = ArgumentCaptor.forClass(FullHttpResponse.class);
verify(context).writeAndFlush(captor.capture());
FullHttpResponse response = captor.getValue();
assertThat(response.status().code(), equalTo(404));
} |
public static PinotFS getLocalPinotFs() {
return new LocalPinotFS();
} | @Test
public void testGetLocalPinotFs() {
assertTrue(MinionTaskUtils.getLocalPinotFs() instanceof LocalPinotFS);
} |
@Override
public V get() throws InterruptedException, ExecutionException {
return resolve(super.get());
} | @Test
public void test_get_Data_withTimeout() throws Exception {
Object value = "value";
DeserializingCompletableFuture<Object> future = new DeserializingCompletableFuture<>(serializationService, deserialize);
future.complete(serializationService.toData(value));
if (deserialize) {
assertEquals(value, future.get(1, TimeUnit.MILLISECONDS));
} else {
assertEquals(serializationService.toData(value), future.get());
}
} |
public static String processPattern(String pattern, TbMsg tbMsg) {
try {
String result = processPattern(pattern, tbMsg.getMetaData());
JsonNode json = JacksonUtil.toJsonNode(tbMsg.getData());
if (json.isObject()) {
Matcher matcher = DATA_PATTERN.matcher(result);
while (matcher.find()) {
String group = matcher.group(2);
String[] keys = group.split("\\.");
JsonNode jsonNode = json;
for (String key : keys) {
if (!StringUtils.isEmpty(key) && jsonNode != null) {
jsonNode = jsonNode.get(key);
} else {
jsonNode = null;
break;
}
}
if (jsonNode != null && jsonNode.isValueNode()) {
result = result.replace(formatDataVarTemplate(group), jsonNode.asText());
}
}
}
return result;
} catch (Exception e) {
throw new RuntimeException("Failed to process pattern!", e);
}
} | @Test
public void testArrayReplacementDoesNotWork() {
String pattern = "ABC ${key} $[key1.key2[0].key3]";
TbMsgMetaData md = new TbMsgMetaData();
md.putValue("key", "metadata_value");
ObjectNode key2Node = JacksonUtil.newObjectNode();
key2Node.put("key3", "value3");
ObjectNode key1Node = JacksonUtil.newObjectNode();
key1Node.set("key2", key2Node);
ObjectNode node = JacksonUtil.newObjectNode();
node.set("key1", key1Node);
TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, TenantId.SYS_TENANT_ID, md, JacksonUtil.toString(node));
String result = TbNodeUtils.processPattern(pattern, msg);
Assertions.assertEquals("ABC metadata_value $[key1.key2[0].key3]", result);
} |
@Override
public void run(Namespace namespace, Liquibase liquibase) throws Exception {
final String tag = namespace.getString("tag");
final Integer count = namespace.getInt("count");
final Date date = namespace.get("date");
final boolean dryRun = namespace.getBoolean("dry-run") != null && namespace.getBoolean("dry-run");
final String context = getContext(namespace);
if (Stream.of(tag, count, date).filter(Objects::nonNull).count() != 1) {
throw new IllegalArgumentException("Must specify either a count, a tag, or a date.");
}
if (count != null) {
if (dryRun) {
liquibase.rollback(count, context, new OutputStreamWriter(outputStream, StandardCharsets.UTF_8));
} else {
liquibase.rollback(count, context);
}
} else if (tag != null) {
if (dryRun) {
liquibase.rollback(tag, context, new OutputStreamWriter(outputStream, StandardCharsets.UTF_8));
} else {
liquibase.rollback(tag, context);
}
} else {
if (dryRun) {
liquibase.rollback(date, context, new OutputStreamWriter(outputStream, StandardCharsets.UTF_8));
} else {
liquibase.rollback(date, context);
}
}
} | @Test
void testRollbackToDate() throws Exception {
// Migrate some DDL changes to the database
long migrationDate = System.currentTimeMillis();
migrateCommand.run(null, new Namespace(Map.of()), conf);
try (Handle handle = dbi.open()) {
assertThat(MigrationTestSupport.tableExists(handle, "PERSONS"))
.isTrue();
}
// Rollback both changes (they're tearDown the migration date)
rollbackCommand.run(null, new Namespace(Map.of("date", new Date(migrationDate - 1000))),
conf);
try (Handle handle = dbi.open()) {
assertThat(MigrationTestSupport.tableExists(handle, "PERSONS"))
.isFalse();
}
} |
@Override
public boolean isAvailable() {
try {
return multicastSocket != null;
} catch (Throwable t) {
return false;
}
} | @Test
void testAvailability() {
int port = NetUtils.getAvailablePort(20880 + new Random().nextInt(10000));
MulticastRegistry registry = new MulticastRegistry(URL.valueOf("multicast://224.5.6.8:" + port));
assertTrue(registry.isAvailable());
} |
private Map<String, Object> augmentAndFilterConnectorConfig(String connectorConfigs) throws IOException {
return augmentAndFilterConnectorConfig(connectorConfigs, instanceConfig, secretsProvider,
componentClassLoader, componentType);
} | @Test
public void testSourceConfigParsingPreservesOriginalType() throws Exception {
final Map<String, Object> parsedConfig = JavaInstanceRunnable.augmentAndFilterConnectorConfig(
"{\"ttl\": 9223372036854775807}",
new InstanceConfig(),
new EnvironmentBasedSecretsProvider(),
null,
FunctionDetails.ComponentType.SOURCE
);
Assert.assertEquals(parsedConfig.get("ttl").getClass(), Long.class);
Assert.assertEquals(parsedConfig.get("ttl"), Long.MAX_VALUE);
} |
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
} | @Test
public void deleteStickerFromSet() {
InputSticker inputSticker = new InputSticker("BQADAgADuAAD7yupS4eB23UmZhGuAg", Sticker.Format.Static, new String[]{"\uD83D\uDE15"});
BaseResponse response = bot.execute(new AddStickerToSet(chatId, stickerSet, inputSticker));
assertTrue(response.isOk());
GetStickerSetResponse setResponse = bot.execute(new GetStickerSet(stickerSet));
int size = setResponse.stickerSet().stickers().length;
Sticker sticker = setResponse.stickerSet().stickers()[size - 1];
response = bot.execute(new DeleteStickerFromSet(sticker.fileId()));
if (!response.isOk()) {
assertEquals(400, response.errorCode());
assertEquals("Bad Request: STICKERSET_NOT_MODIFIED", response.description());
}
} |
@Udf
public <T> T asValue(final T keyColumn) {
return keyColumn;
} | @Test
public void shouldHandlePrimitiveTypes() {
assertThat(udf.asValue(Boolean.TRUE), is(Boolean.TRUE));
assertThat(udf.asValue(Integer.MIN_VALUE), is(Integer.MIN_VALUE));
assertThat(udf.asValue(Long.MAX_VALUE), is(Long.MAX_VALUE));
assertThat(udf.asValue(Double.MAX_VALUE), is(Double.MAX_VALUE));
assertThat(udf.asValue("string"), is("string"));
} |
@Override public Message receive() {
Message message = delegate.receive();
handleReceive(message);
return message;
} | @Test void receive_creates_consumer_span() throws Exception {
ActiveMQTextMessage message = new ActiveMQTextMessage();
receive(message);
MutableSpan consumer = testSpanHandler.takeRemoteSpan(CONSUMER);
assertThat(consumer.name()).isEqualTo("receive");
assertThat(consumer.name()).isEqualTo("receive");
} |
public static byte[] checkPassword(String passwdString) {
if (Strings.isNullOrEmpty(passwdString)) {
return EMPTY_PASSWORD;
}
byte[] passwd;
passwdString = passwdString.toUpperCase();
passwd = passwdString.getBytes(StandardCharsets.UTF_8);
if (passwd.length != SCRAMBLE_LENGTH_HEX_LENGTH || passwd[0] != PVERSION41_CHAR) {
throw ErrorReportException.report(ErrorCode.ERR_PASSWD_LENGTH, 41);
}
for (int i = 1; i < passwd.length; ++i) {
if (!((passwd[i] <= '9' && passwd[i] >= '0') || passwd[i] >= 'A' && passwd[i] <= 'F')) {
throw ErrorReportException.report(ErrorCode.ERR_PASSWD_LENGTH, 41);
}
}
return passwd;
} | @Test
public void testCheckPassword() {
Assert.assertEquals("*9A6EC51164108A8D3DA3BE3F35A56F6499B6FC32",
new String(MysqlPassword.checkPassword("*9A6EC51164108A8D3DA3BE3F35A56F6499B6FC32")));
Assert.assertEquals("", new String(MysqlPassword.checkPassword(null)));
} |
@Override
public void execute(Exchange exchange) throws SmppException {
SubmitSm[] submitSms = createSubmitSm(exchange);
List<String> messageIDs = new ArrayList<>(submitSms.length);
String messageID = null;
for (int i = 0; i < submitSms.length; i++) {
SubmitSm submitSm = submitSms[i];
messageID = null;
if (log.isDebugEnabled()) {
log.debug("Sending short message {} for exchange id '{}'...", i, exchange.getExchangeId());
}
try {
SubmitSmResult result = session.submitShortMessage(
submitSm.getServiceType(),
TypeOfNumber.valueOf(submitSm.getSourceAddrTon()),
NumberingPlanIndicator.valueOf(submitSm.getSourceAddrNpi()),
submitSm.getSourceAddr(),
TypeOfNumber.valueOf(submitSm.getDestAddrTon()),
NumberingPlanIndicator.valueOf(submitSm.getDestAddrNpi()),
submitSm.getDestAddress(),
new ESMClass(submitSm.getEsmClass()),
submitSm.getProtocolId(),
submitSm.getPriorityFlag(),
submitSm.getScheduleDeliveryTime(),
submitSm.getValidityPeriod(),
new RegisteredDelivery(submitSm.getRegisteredDelivery()),
submitSm.getReplaceIfPresent(),
DataCodings.newInstance(submitSm.getDataCoding()),
(byte) 0,
submitSm.getShortMessage(),
submitSm.getOptionalParameters());
if (result != null) {
messageID = result.getMessageId();
}
} catch (Exception e) {
throw new SmppException(e);
}
if (messageID != null) {
messageIDs.add(messageID);
}
}
if (log.isDebugEnabled()) {
log.debug("Sent short message for exchange id '{}' and received message ids '{}'",
exchange.getExchangeId(), messageIDs);
}
Message message = ExchangeHelper.getResultMessage(exchange);
message.setHeader(SmppConstants.ID, messageIDs);
message.setHeader(SmppConstants.SENT_MESSAGE_COUNT, messageIDs.size());
} | @Test
public void executeWithConfigurationData() throws Exception {
Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut);
exchange.getIn().setHeader(SmppConstants.COMMAND, "SubmitSm");
exchange.getIn().setHeader(SmppConstants.ID, "1");
exchange.getIn().setBody(
"1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890");
when(session.submitShortMessage(eq("CMT"), eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1616"),
eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1717"), eq(new ESMClass()), eq((byte) 0),
eq((byte) 1),
(String) isNull(), (String) isNull(), eq(new RegisteredDelivery(SMSCDeliveryReceipt.SUCCESS_FAILURE)),
eq(ReplaceIfPresentFlag.DEFAULT.value()),
eq(DataCodings.newInstance((byte) 0)), eq((byte) 0),
eq("1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890"
.getBytes())))
.thenReturn(new SubmitSmResult(new MessageId("1"), null));
command.execute(exchange);
assertEquals(Collections.singletonList("1"), exchange.getMessage().getHeader(SmppConstants.ID));
assertEquals(1, exchange.getMessage().getHeader(SmppConstants.SENT_MESSAGE_COUNT));
} |
@Override
public Class<?> loadClass(String name) throws ClassNotFoundException {
if (existsInTfsJar(name)) {
return jarClassLoader.loadClass(name);
}
return super.loadClass(name);
} | @Test
public void canLoadClassFromTopLevelOfJar() throws Exception {
assertThat(nestedJarClassLoader.loadClass(JAR_CLASS))
.isNotNull()
.hasPackage("");
} |
public Map<String, String> getLabels(String labelType) {
if (CollectionUtils.isEmpty(labels)) {
return Collections.emptyMap();
}
Map<String, String> subLabels = labels.get(labelType);
if (CollectionUtils.isEmpty(subLabels)) {
return Collections.emptyMap();
}
return Collections.unmodifiableMap(subLabels);
} | @Test
public void testGetEmptyRouterContext() {
PolarisRouterContext routerContext = new PolarisRouterContext();
assertThat(routerContext.getLabels(RouterConstant.TRANSITIVE_LABELS).size()).isEqualTo(0);
assertThat(routerContext.getLabels(RouterConstant.ROUTER_LABELS).size()).isEqualTo(0);
} |
@PostMapping
@Secured(resource = AuthConstants.CONSOLE_RESOURCE_NAME_PREFIX + "namespaces", action = ActionTypes.WRITE)
public Boolean createNamespace(@RequestParam("customNamespaceId") String namespaceId,
@RequestParam("namespaceName") String namespaceName,
@RequestParam(value = "namespaceDesc", required = false) String namespaceDesc) {
if (StringUtils.isBlank(namespaceId)) {
namespaceId = UUID.randomUUID().toString();
} else {
namespaceId = namespaceId.trim();
if (!namespaceIdCheckPattern.matcher(namespaceId).matches()) {
return false;
}
if (namespaceId.length() > NAMESPACE_ID_MAX_LENGTH) {
return false;
}
// check unique
if (namespacePersistService.tenantInfoCountByTenantId(namespaceId) > 0) {
return false;
}
}
// contains illegal chars
if (!namespaceNameCheckPattern.matcher(namespaceName).matches()) {
return false;
}
try {
return namespaceOperationService.createNamespace(namespaceId, namespaceName, namespaceDesc);
} catch (NacosException e) {
return false;
}
} | @Test
void testCreateNamespaceWithIllegalCustomId() throws Exception {
assertFalse(namespaceController.createNamespace("test.Id", "testName", "testDesc"));
verify(namespaceOperationService, never()).createNamespace("test.Id", "testName", "testDesc");
} |
public void trackReceiveMessageData(String sfDate, String msgId) {
try {
if (mPushHandler.hasMessages(GT_PUSH_MSG) && mGeTuiPushInfoMap.containsKey(msgId)) {
mPushHandler.removeMessages(GT_PUSH_MSG);
SALog.i(TAG, "remove GeTui Push Message");
NotificationInfo push = mGeTuiPushInfoMap.get(msgId);
if (push != null) {
PushAutoTrackHelper.trackGeTuiNotificationClicked(push.title, push.content, sfDate, push.time);
}
mGeTuiPushInfoMap.remove(msgId);
SALog.i(TAG, " onGeTuiReceiveMessage:msg id : " + msgId);
}
} catch (Exception e) {
SALog.printStackTrace(e);
}
} | @Test
public void trackReceiveMessageData() {
SAHelper.initSensors(mApplication);
PushProcess.getInstance().trackReceiveMessageData("sdajh-asdjfhjas", "mock_123213");
} |
@Override
public void afterJob(JobExecution jobExecution) {
LOG.debug("sending after job execution event [{}]...", jobExecution);
producerTemplate.sendBodyAndHeader(endpointUri, jobExecution, EventType.HEADER_KEY, EventType.AFTER.name());
LOG.debug("sent after job execution event");
} | @Test
public void shouldSendAfterJobEvent() throws Exception {
// When
jobExecutionListener.afterJob(jobExecution);
// Then
assertEquals(jobExecution, consumer().receiveBody("seda:eventQueue"));
} |
@Override
public void handleTenantInfo(TenantInfoHandler handler) {
// 如果禁用,则不执行逻辑
if (isTenantDisable()) {
return;
}
// 获得租户
TenantDO tenant = getTenant(TenantContextHolder.getRequiredTenantId());
// 执行处理器
handler.handle(tenant);
} | @Test
public void testHandleTenantInfo_disable() {
// 准备参数
TenantInfoHandler handler = mock(TenantInfoHandler.class);
// mock 禁用
when(tenantProperties.getEnable()).thenReturn(false);
// 调用
tenantService.handleTenantInfo(handler);
// 断言
verify(handler, never()).handle(any());
} |
@Override
public K8sNode removeNode(String hostname) {
checkArgument(!Strings.isNullOrEmpty(hostname), ERR_NULL_HOSTNAME);
K8sNode node = nodeStore.removeNode(hostname);
log.info(String.format(MSG_NODE, hostname, MSG_REMOVED));
return node;
} | @Test(expected = IllegalArgumentException.class)
public void testRemoveNullNode() {
target.removeNode(null);
} |
public void setSourceConnectorWrapper(SourceConnectorWrapper sourceConnectorWrapper) {
this.sourceConnectorWrapper = sourceConnectorWrapper;
} | @Test
void should_require_projectionFn() {
var wrapper = new SourceConnectorWrapper(minimalProperties(), 0, context);
assertThatThrownBy(() -> new ReadKafkaConnectP<>(noEventTime(), null)
.setSourceConnectorWrapper(wrapper))
.isInstanceOf(NullPointerException.class)
.hasMessage("projectionFn is required");
} |
public PackageDefinition getPackageDefinition() {
return packageDefinition;
} | @Test
public void shouldAddErrorIfPackageDoesNotExistsForGivenPackageId() throws Exception {
PipelineConfigSaveValidationContext configSaveValidationContext = mock(PipelineConfigSaveValidationContext.class);
when(configSaveValidationContext.findPackageById(anyString())).thenReturn(mock(PackageRepository.class));
PackageRepository packageRepository = mock(PackageRepository.class);
when(packageRepository.doesPluginExist()).thenReturn(true);
PackageMaterialConfig packageMaterialConfig = new PackageMaterialConfig(new CaseInsensitiveString("package-name"), "package-id", PackageDefinitionMother.create("package-id"));
packageMaterialConfig.getPackageDefinition().setRepository(packageRepository);
packageMaterialConfig.validateTree(configSaveValidationContext);
assertThat(packageMaterialConfig.errors().getAll().size(), is(1));
assertThat(packageMaterialConfig.errors().on(PackageMaterialConfig.PACKAGE_ID), is("Could not find plugin for given package id:[package-id]."));
} |
public static HttpRequestMessage getRequestFromChannel(Channel ch) {
return ch.attr(ATTR_ZUUL_REQ).get();
} | @Test
void headersAllCopied() {
ClientRequestReceiver receiver = new ClientRequestReceiver(null);
EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestEncoder());
PassportLoggingHandler loggingHandler = new PassportLoggingHandler(new DefaultRegistry());
// Required for messages
channel.attr(SourceAddressChannelHandler.ATTR_SERVER_LOCAL_PORT).set(1234);
channel.pipeline().addLast(new HttpServerCodec());
channel.pipeline().addLast(receiver);
channel.pipeline().addLast(loggingHandler);
HttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/post");
httpRequest.headers().add("Header1", "Value1");
httpRequest.headers().add("Header2", "Value2");
httpRequest.headers().add("Duplicate", "Duplicate1");
httpRequest.headers().add("Duplicate", "Duplicate2");
channel.writeOutbound(httpRequest);
ByteBuf byteBuf = channel.readOutbound();
channel.writeInbound(byteBuf);
channel.readInbound();
channel.close();
HttpRequestMessage request = ClientRequestReceiver.getRequestFromChannel(channel);
Headers headers = request.getHeaders();
assertEquals(4, headers.size());
assertEquals("Value1", headers.getFirst("Header1"));
assertEquals("Value2", headers.getFirst("Header2"));
List<String> duplicates = headers.getAll("Duplicate");
assertEquals(Arrays.asList("Duplicate1", "Duplicate2"), duplicates);
} |
@Override
public InterpreterResult interpret(String st, InterpreterContext context)
throws InterpreterException {
try {
Properties finalProperties = new Properties();
finalProperties.putAll(getProperties());
Properties newProperties = new Properties();
newProperties.load(new StringReader(st));
for (String key : newProperties.stringPropertyNames()) {
finalProperties.put(key.trim(), newProperties.getProperty(key).trim());
}
LOGGER.debug("Properties for InterpreterGroup: {} is {}", interpreterGroupId, finalProperties);
interpreterSetting.setInterpreterGroupProperties(interpreterGroupId, finalProperties);
return new InterpreterResult(InterpreterResult.Code.SUCCESS);
} catch (IOException e) {
LOGGER.error("Fail to update interpreter setting", e);
return new InterpreterResult(InterpreterResult.Code.ERROR, ExceptionUtils.getStackTrace(e));
}
} | @Test
void testRunningAfterOtherInterpreter() throws InterpreterException {
assertTrue(interpreterFactory.getInterpreter("test.conf", executionContext) instanceof ConfInterpreter);
ConfInterpreter confInterpreter = (ConfInterpreter) interpreterFactory.getInterpreter("test.conf", executionContext);
InterpreterContext context = InterpreterContext.builder()
.setNoteId("noteId")
.setParagraphId("paragraphId")
.build();
RemoteInterpreter remoteInterpreter = (RemoteInterpreter) interpreterFactory.getInterpreter("test", executionContext);
InterpreterResult result = remoteInterpreter.interpret("hello world", context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code);
result = confInterpreter.interpret("property_1\tnew_value\nnew_property\tdummy_value", context);
assertEquals(InterpreterResult.Code.ERROR, result.code);
} |
@Description("natural logarithm")
@ScalarFunction
@SqlType(StandardTypes.DOUBLE)
public static double ln(@SqlType(StandardTypes.DOUBLE) double num)
{
return Math.log(num);
} | @Test
public void testLn()
{
for (double doubleValue : DOUBLE_VALUES) {
assertFunction("ln(" + doubleValue + ")", DOUBLE, Math.log(doubleValue));
}
assertFunction("ln(NULL)", DOUBLE, null);
} |
double nextRate(double rate, long periodNanos, long totalPublished, long totalReceived) {
long expected = (long) ((rate / ONE_SECOND_IN_NANOS) * periodNanos);
long published = totalPublished - previousTotalPublished;
long received = totalReceived - previousTotalReceived;
previousTotalPublished = totalPublished;
previousTotalReceived = totalReceived;
if (log.isDebugEnabled()) {
log.debug(
"Current rate: {} -- Publish rate {} -- Receive Rate: {}",
rate,
rate(published, periodNanos),
rate(received, periodNanos));
}
long receiveBacklog = totalPublished - totalReceived;
if (receiveBacklog > receiveBacklogLimit) {
return nextRate(periodNanos, received, expected, receiveBacklog, "Receive");
}
long publishBacklog = expected - published;
if (publishBacklog > publishBacklogLimit) {
return nextRate(periodNanos, published, expected, publishBacklog, "Publish");
}
rampUp();
return rate + (rate * rampingFactor);
} | @Test
void publishBacklog() {
assertThat(rateController.getRampingFactor()).isEqualTo(1);
// no backlog
rate = rateController.nextRate(rate, periodNanos, 10_000, 10_000);
assertThat(rate).isEqualTo(20_000);
assertThat(rateController.getRampingFactor()).isEqualTo(1);
// publish backlog
rate = rateController.nextRate(rate, periodNanos, 15_000, 20_000);
assertThat(rate).isEqualTo(5_000);
assertThat(rateController.getRampingFactor()).isEqualTo(0.5);
} |
public static Deserializer<NeighborSolicitation> deserializer() {
return (data, offset, length) -> {
checkInput(data, offset, length, HEADER_LENGTH);
NeighborSolicitation neighborSolicitation = new NeighborSolicitation();
ByteBuffer bb = ByteBuffer.wrap(data, offset, length);
bb.getInt();
bb.get(neighborSolicitation.targetAddress, 0, Ip6Address.BYTE_LENGTH);
if (bb.limit() - bb.position() > 0) {
NeighborDiscoveryOptions options = NeighborDiscoveryOptions.deserializer()
.deserialize(data, bb.position(), bb.limit() - bb.position());
for (NeighborDiscoveryOptions.Option option : options.options()) {
neighborSolicitation.addOption(option.type(), option.data());
}
}
return neighborSolicitation;
};
} | @Test
public void testDeserializeBadInput() throws Exception {
PacketTestUtils.testDeserializeBadInput(NeighborSolicitation.deserializer());
} |
public static String getTagValue( Node n, KettleAttributeInterface code ) {
return getTagValue( n, code.getXmlCode() );
} | @Test
public void getTagValueEmptyTagYieldsEmptyValue() {
System.setProperty( Const.KETTLE_XML_EMPTY_TAG_YIELDS_EMPTY_VALUE, "Y" );
assertEquals( "", XMLHandler.getTagValue( getNode(), "text" ) );
} |
@Override
@Deprecated
public <VR> KStream<K, VR> flatTransformValues(final org.apache.kafka.streams.kstream.ValueTransformerSupplier<? super V, Iterable<VR>> valueTransformerSupplier,
final String... stateStoreNames) {
Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null");
return doFlatTransformValues(
toValueTransformerWithKeySupplier(valueTransformerSupplier),
NamedInternal.empty(),
stateStoreNames);
} | @Test
@SuppressWarnings("deprecation")
public void shouldNotAllowNullStoreNamesOnFlatTransformValuesWithFlatValueSupplier() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.flatTransformValues(
flatValueTransformerSupplier,
(String[]) null));
assertThat(exception.getMessage(), equalTo("stateStoreNames can't be a null array"));
} |
public static void initRequestEntity(HttpRequestBase requestBase, Object body, Header header) throws Exception {
if (body == null) {
return;
}
if (requestBase instanceof HttpEntityEnclosingRequest) {
HttpEntityEnclosingRequest request = (HttpEntityEnclosingRequest) requestBase;
MediaType mediaType = MediaType.valueOf(header.getValue(HttpHeaderConsts.CONTENT_TYPE));
ContentType contentType = ContentType.create(mediaType.getType(), mediaType.getCharset());
HttpEntity entity;
if (body instanceof byte[]) {
entity = new ByteArrayEntity((byte[]) body, contentType);
} else {
entity = new StringEntity(body instanceof String ? (String) body : JacksonUtils.toJson(body),
contentType);
}
request.setEntity(entity);
}
} | @Test
void testInitRequestEntity2() throws Exception {
BaseHttpMethod.HttpGetWithEntity httpRequest = new BaseHttpMethod.HttpGetWithEntity("");
Header header = Header.newInstance();
header.addParam(HttpHeaderConsts.CONTENT_TYPE, "text/html");
HttpUtils.initRequestEntity(httpRequest, Collections.singletonMap("k", "v"), header);
HttpEntity entity = httpRequest.getEntity();
InputStream contentStream = entity.getContent();
byte[] bytes = new byte[contentStream.available()];
contentStream.read(bytes);
assertEquals("{\"k\":\"v\"}", new String(bytes, Constants.ENCODE));
assertEquals(HttpHeaderConsts.CONTENT_TYPE, entity.getContentType().getName());
assertEquals("text/html; charset=UTF-8", entity.getContentType().getValue());
} |
@Override
public CompletableFuture<Acknowledge> updateJobResourceRequirements(
JobResourceRequirements jobResourceRequirements) {
schedulerNG.updateJobResourceRequirements(jobResourceRequirements);
return CompletableFuture.completedFuture(Acknowledge.get());
} | @Test
public void testSuccessfulResourceRequirementsUpdate() throws Exception {
final CompletableFuture<JobResourceRequirements> schedulerUpdateFuture =
new CompletableFuture<>();
final TestingSchedulerNG scheduler =
TestingSchedulerNG.newBuilder()
.setUpdateJobResourceRequirementsConsumer(schedulerUpdateFuture::complete)
.build();
try (final JobMaster jobMaster =
new JobMasterBuilder(jobGraph, rpcService)
.withConfiguration(configuration)
.withHighAvailabilityServices(haServices)
.withSlotPoolServiceSchedulerFactory(
DefaultSlotPoolServiceSchedulerFactory.create(
TestingSlotPoolServiceBuilder.newBuilder(),
new TestingSchedulerNGFactory(scheduler)))
.createJobMaster()) {
jobMaster.start();
final JobMasterGateway jobMasterGateway =
jobMaster.getSelfGateway(JobMasterGateway.class);
final JobResourceRequirements.Builder jobResourceRequirementsBuilder =
JobResourceRequirements.newBuilder();
for (JobVertex jobVertex : jobGraph.getVertices()) {
jobResourceRequirementsBuilder.setParallelismForJobVertex(jobVertex.getID(), 1, 2);
}
final JobResourceRequirements newRequirements = jobResourceRequirementsBuilder.build();
final CompletableFuture<Acknowledge> jobMasterUpdateFuture =
jobMasterGateway.updateJobResourceRequirements(newRequirements);
assertThatFuture(jobMasterUpdateFuture).eventuallySucceeds();
assertThatFuture(schedulerUpdateFuture).eventuallySucceeds().isEqualTo(newRequirements);
}
} |
boolean convertDeviceProfileForVersion330(JsonNode profileData) {
boolean isUpdated = false;
if (profileData.has("alarms") && !profileData.get("alarms").isNull()) {
JsonNode alarms = profileData.get("alarms");
for (JsonNode alarm : alarms) {
if (alarm.has("createRules")) {
JsonNode createRules = alarm.get("createRules");
for (AlarmSeverity severity : AlarmSeverity.values()) {
if (createRules.has(severity.name())) {
JsonNode spec = createRules.get(severity.name()).get("condition").get("spec");
if (convertDeviceProfileAlarmRulesForVersion330(spec)) {
isUpdated = true;
}
}
}
}
if (alarm.has("clearRule") && !alarm.get("clearRule").isNull()) {
JsonNode spec = alarm.get("clearRule").get("condition").get("spec");
if (convertDeviceProfileAlarmRulesForVersion330(spec)) {
isUpdated = true;
}
}
}
}
return isUpdated;
} | @Test
void convertDeviceProfileAlarmRulesForVersion330NoAlarmNode() throws JsonProcessingException {
JsonNode spec = JacksonUtil.toJsonNode("{ \"configuration\": { \"type\": \"DEFAULT\" } }");
JsonNode expected = JacksonUtil.toJsonNode("{ \"configuration\": { \"type\": \"DEFAULT\" } }");
assertThat(service.convertDeviceProfileForVersion330(spec)).isFalse();
assertThat(spec.toPrettyString()).isEqualTo(expected.toPrettyString());
} |
@SuppressWarnings("rawtypes")
public static ShardingStrategy newInstance(final ShardingStrategyConfiguration shardingStrategyConfig, final ShardingAlgorithm shardingAlgorithm, final String defaultShardingColumn) {
if (shardingStrategyConfig instanceof StandardShardingStrategyConfiguration && shardingAlgorithm instanceof StandardShardingAlgorithm) {
String shardingColumn = ((StandardShardingStrategyConfiguration) shardingStrategyConfig).getShardingColumn();
return new StandardShardingStrategy(null == shardingColumn ? defaultShardingColumn : shardingColumn, (StandardShardingAlgorithm) shardingAlgorithm);
}
if (shardingStrategyConfig instanceof ComplexShardingStrategyConfiguration && shardingAlgorithm instanceof ComplexKeysShardingAlgorithm) {
return new ComplexShardingStrategy(((ComplexShardingStrategyConfiguration) shardingStrategyConfig).getShardingColumns(), (ComplexKeysShardingAlgorithm) shardingAlgorithm);
}
if (shardingStrategyConfig instanceof HintShardingStrategyConfiguration && shardingAlgorithm instanceof HintShardingAlgorithm) {
return new HintShardingStrategy((HintShardingAlgorithm) shardingAlgorithm);
}
return new NoneShardingStrategy();
} | @Test
void assertNewInstanceForStandardShardingStrategyWithDefaultColumnStrategy() {
ShardingStrategy actual = ShardingStrategyFactory.newInstance(mock(StandardShardingStrategyConfiguration.class), mock(CoreStandardShardingAlgorithmFixture.class), "order_id");
assertTrue(actual.getShardingColumns().contains("order_id"));
} |
@Override
public void verify(X509Certificate certificate, Date date) {
logger.debug("Verifying {} issued by {}", certificate.getSubjectX500Principal(),
certificate.getIssuerX500Principal());
// Create trustAnchors
final Set<TrustAnchor> trustAnchors = getTrusted().stream().map(
c -> new TrustAnchor(c, null)
).collect(Collectors.toSet());
if (trustAnchors.isEmpty()) {
throw new VerificationException("No trust anchors available");
}
// Create the selector that specifies the starting certificate
final X509CertSelector selector = new X509CertSelector();
selector.setCertificate(certificate);
// Configure the PKIX certificate builder algorithm parameters
try {
final PKIXBuilderParameters pkixParams = new PKIXBuilderParameters(trustAnchors, selector);
// Set assume date
if (date != null) {
pkixParams.setDate(date);
}
// Add cert store with certificate to check
pkixParams.addCertStore(CertStore.getInstance(
"Collection", new CollectionCertStoreParameters(ImmutableList.of(certificate)), "BC"));
// Add cert store with intermediates
pkixParams.addCertStore(CertStore.getInstance(
"Collection", new CollectionCertStoreParameters(getIntermediates()), "BC"));
// Add cert store with CRLs
pkixParams.addCertStore(CertStore.getInstance(
"Collection", new CollectionCertStoreParameters(getCRLs()), "BC"));
// Toggle to check revocation list
pkixParams.setRevocationEnabled(checkRevocation());
// Build and verify the certification chain
final CertPathBuilder builder = CertPathBuilder.getInstance("PKIX", "BC");
builder.build(pkixParams);
} catch (CertPathBuilderException e) {
throw new VerificationException(
String.format("Invalid certificate %s issued by %s",
certificate.getSubjectX500Principal(), certificate.getIssuerX500Principal()
), e
);
} catch (GeneralSecurityException e) {
throw new CryptoException(
String.format("Could not verify certificate %s issued by %s",
certificate.getSubjectX500Principal(), certificate.getIssuerX500Principal()
), e
);
}
} | @Test
public void shouldVerifyCertificate() {
createCertificateService(
new String[] { "root.crt" }, new String[] { "intermediate.crt"},
new String[0], false
).verify(readCert("normal.crt"));
} |
protected void writeLogTableInformation( JobLogTable jobLogTable, LogStatus status ) throws KettleJobException,
KettleDatabaseException {
boolean cleanLogRecords = status.equals( LogStatus.END );
String tableName = jobLogTable.getActualTableName();
DatabaseMeta logcon = jobLogTable.getDatabaseMeta();
Database ldb = createDataBase( logcon );
ldb.shareVariablesWith( this );
try {
ldb.connect();
ldb.setCommit( logCommitSize );
ldb.writeLogRecord( jobLogTable, status, this, null );
if ( cleanLogRecords ) {
ldb.cleanupLogRecords( jobLogTable, getJobname() );
}
} catch ( KettleDatabaseException dbe ) {
addErrors( 1 );
throw new KettleJobException( "Unable to end processing by writing log record to table " + tableName, dbe );
} finally {
if ( !ldb.isAutoCommit() ) {
ldb.commitLog( true, jobLogTable );
}
ldb.disconnect();
}
} | @Ignore( "Test is validating against a mock object... not a real test" )
@Test
public void recordsCleanUpMethodIsCalled_JobLogTable() throws Exception {
JobLogTable jobLogTable = JobLogTable.getDefault( mockedVariableSpace, hasDatabasesInterface );
setAllTableParamsDefault( jobLogTable );
doCallRealMethod().when( mockedJob ).writeLogTableInformation( jobLogTable, LogStatus.END );
mockedJob.writeLogTableInformation( jobLogTable, LogStatus.END );
verify( mockedDataBase ).cleanupLogRecords( eq( jobLogTable ), anyString() );
} |
private CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> consumerGroupHeartbeat(
String groupId,
String memberId,
int memberEpoch,
String instanceId,
String rackId,
int rebalanceTimeoutMs,
String clientId,
String clientHost,
List<String> subscribedTopicNames,
String assignorName,
List<ConsumerGroupHeartbeatRequestData.TopicPartitions> ownedTopicPartitions
) throws ApiException {
final long currentTimeMs = time.milliseconds();
final List<CoordinatorRecord> records = new ArrayList<>();
// Get or create the consumer group.
boolean createIfNotExists = memberEpoch == 0;
final ConsumerGroup group = getOrMaybeCreateConsumerGroup(groupId, createIfNotExists, records);
throwIfConsumerGroupIsFull(group, memberId);
// Get or create the member.
if (memberId.isEmpty()) memberId = Uuid.randomUuid().toString();
final ConsumerGroupMember member;
if (instanceId == null) {
member = getOrMaybeSubscribeDynamicConsumerGroupMember(
group,
memberId,
memberEpoch,
ownedTopicPartitions,
createIfNotExists,
false
);
} else {
member = getOrMaybeSubscribeStaticConsumerGroupMember(
group,
memberId,
memberEpoch,
instanceId,
ownedTopicPartitions,
createIfNotExists,
false,
records
);
}
// 1. Create or update the member. If the member is new or has changed, a ConsumerGroupMemberMetadataValue
// record is written to the __consumer_offsets partition to persist the change. If the subscriptions have
// changed, the subscription metadata is updated and persisted by writing a ConsumerGroupPartitionMetadataValue
// record to the __consumer_offsets partition. Finally, the group epoch is bumped if the subscriptions have
// changed, and persisted by writing a ConsumerGroupMetadataValue record to the partition.
ConsumerGroupMember updatedMember = new ConsumerGroupMember.Builder(member)
.maybeUpdateInstanceId(Optional.ofNullable(instanceId))
.maybeUpdateRackId(Optional.ofNullable(rackId))
.maybeUpdateRebalanceTimeoutMs(ofSentinel(rebalanceTimeoutMs))
.maybeUpdateServerAssignorName(Optional.ofNullable(assignorName))
.maybeUpdateSubscribedTopicNames(Optional.ofNullable(subscribedTopicNames))
.setClientId(clientId)
.setClientHost(clientHost)
.setClassicMemberMetadata(null)
.build();
boolean bumpGroupEpoch = hasMemberSubscriptionChanged(
groupId,
member,
updatedMember,
records
);
int groupEpoch = group.groupEpoch();
Map<String, TopicMetadata> subscriptionMetadata = group.subscriptionMetadata();
Map<String, Integer> subscribedTopicNamesMap = group.subscribedTopicNames();
SubscriptionType subscriptionType = group.subscriptionType();
if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) {
// The subscription metadata is updated in two cases:
// 1) The member has updated its subscriptions;
// 2) The refresh deadline has been reached.
subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember);
subscriptionMetadata = group.computeSubscriptionMetadata(
subscribedTopicNamesMap,
metadataImage.topics(),
metadataImage.cluster()
);
int numMembers = group.numMembers();
if (!group.hasMember(updatedMember.memberId()) && !group.hasStaticMember(updatedMember.instanceId())) {
numMembers++;
}
subscriptionType = ModernGroup.subscriptionType(
subscribedTopicNamesMap,
numMembers
);
if (!subscriptionMetadata.equals(group.subscriptionMetadata())) {
log.info("[GroupId {}] Computed new subscription metadata: {}.",
groupId, subscriptionMetadata);
bumpGroupEpoch = true;
records.add(newConsumerGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata));
}
if (bumpGroupEpoch) {
groupEpoch += 1;
records.add(newConsumerGroupEpochRecord(groupId, groupEpoch));
log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch);
metrics.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME);
}
group.setMetadataRefreshDeadline(currentTimeMs + consumerGroupMetadataRefreshIntervalMs, groupEpoch);
}
// 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between
// the existing and the new target assignment is persisted to the partition.
final int targetAssignmentEpoch;
final Assignment targetAssignment;
if (groupEpoch > group.assignmentEpoch()) {
targetAssignment = updateTargetAssignment(
group,
groupEpoch,
member,
updatedMember,
subscriptionMetadata,
subscriptionType,
records
);
targetAssignmentEpoch = groupEpoch;
} else {
targetAssignmentEpoch = group.assignmentEpoch();
targetAssignment = group.targetAssignment(updatedMember.memberId(), updatedMember.instanceId());
}
// 3. Reconcile the member's assignment with the target assignment if the member is not
// fully reconciled yet.
updatedMember = maybeReconcile(
groupId,
updatedMember,
group::currentPartitionEpoch,
targetAssignmentEpoch,
targetAssignment,
ownedTopicPartitions,
records
);
scheduleConsumerGroupSessionTimeout(groupId, memberId);
// Prepare the response.
ConsumerGroupHeartbeatResponseData response = new ConsumerGroupHeartbeatResponseData()
.setMemberId(updatedMember.memberId())
.setMemberEpoch(updatedMember.memberEpoch())
.setHeartbeatIntervalMs(consumerGroupHeartbeatIntervalMs(groupId));
// The assignment is only provided in the following cases:
// 1. The member sent a full request. It does so when joining or rejoining the group with zero
// as the member epoch; or on any errors (e.g. timeout). We use all the non-optional fields
// (rebalanceTimeoutMs, subscribedTopicNames and ownedTopicPartitions) to detect a full request
// as those must be set in a full request.
// 2. The member's assignment has been updated.
boolean isFullRequest = memberEpoch == 0 || (rebalanceTimeoutMs != -1 && subscribedTopicNames != null && ownedTopicPartitions != null);
if (isFullRequest || hasAssignedPartitionsChanged(member, updatedMember)) {
response.setAssignment(createConsumerGroupResponseAssignment(updatedMember));
}
return new CoordinatorResult<>(records, response);
} | @Test
public void testRebalanceTimeoutLifecycle() {
String groupId = "fooup";
// Use a static member id as it makes the test easier.
String memberId1 = Uuid.randomUuid().toString();
String memberId2 = Uuid.randomUuid().toString();
Uuid fooTopicId = Uuid.randomUuid();
String fooTopicName = "foo";
MockPartitionAssignor assignor = new MockPartitionAssignor("range");
GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
.withConsumerGroupAssignors(Collections.singletonList(assignor))
.withMetadataImage(new MetadataImageBuilder()
.addTopic(fooTopicId, fooTopicName, 3)
.addRacks()
.build())
.build();
assignor.prepareGroupAssignment(new GroupAssignment(
new HashMap<String, MemberAssignment>() {
{
put(memberId1, new MemberAssignmentImpl(mkAssignment(
mkTopicAssignment(fooTopicId, 0, 1, 2)
)));
}
}
));
// Member 1 joins the group.
CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result =
context.consumerGroupHeartbeat(
new ConsumerGroupHeartbeatRequestData()
.setGroupId(groupId)
.setMemberId(memberId1)
.setMemberEpoch(0)
.setRebalanceTimeoutMs(180000)
.setSubscribedTopicNames(Collections.singletonList("foo"))
.setTopicPartitions(Collections.emptyList()));
assertResponseEquals(
new ConsumerGroupHeartbeatResponseData()
.setMemberId(memberId1)
.setMemberEpoch(1)
.setHeartbeatIntervalMs(5000)
.setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()
.setTopicPartitions(Collections.singletonList(
new ConsumerGroupHeartbeatResponseData.TopicPartitions()
.setTopicId(fooTopicId)
.setPartitions(Arrays.asList(0, 1, 2))))),
result.response()
);
assertEquals(
Collections.emptyList(),
context.sleep(result.response().heartbeatIntervalMs())
);
// Prepare next assignment.
assignor.prepareGroupAssignment(new GroupAssignment(
new HashMap<String, MemberAssignment>() {
{
put(memberId1, new MemberAssignmentImpl(mkAssignment(
mkTopicAssignment(fooTopicId, 0, 1)
)));
put(memberId2, new MemberAssignmentImpl(mkAssignment(
mkTopicAssignment(fooTopicId, 2)
)));
}
}
));
// Member 2 joins the group.
result = context.consumerGroupHeartbeat(
new ConsumerGroupHeartbeatRequestData()
.setGroupId(groupId)
.setMemberId(memberId2)
.setMemberEpoch(0)
.setRebalanceTimeoutMs(90000)
.setSubscribedTopicNames(Collections.singletonList("foo"))
.setTopicPartitions(Collections.emptyList()));
assertResponseEquals(
new ConsumerGroupHeartbeatResponseData()
.setMemberId(memberId2)
.setMemberEpoch(2)
.setHeartbeatIntervalMs(5000)
.setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()),
result.response()
);
assertEquals(
Collections.emptyList(),
context.sleep(result.response().heartbeatIntervalMs())
);
// Member 1 heartbeats and transitions to unrevoked partitions. The rebalance timeout
// is scheduled.
result = context.consumerGroupHeartbeat(
new ConsumerGroupHeartbeatRequestData()
.setGroupId(groupId)
.setMemberId(memberId1)
.setMemberEpoch(1)
.setRebalanceTimeoutMs(12000)
.setSubscribedTopicNames(Collections.singletonList("foo")));
assertResponseEquals(
new ConsumerGroupHeartbeatResponseData()
.setMemberId(memberId1)
.setMemberEpoch(1)
.setHeartbeatIntervalMs(5000)
.setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()
.setTopicPartitions(Collections.singletonList(
new ConsumerGroupHeartbeatResponseData.TopicPartitions()
.setTopicId(fooTopicId)
.setPartitions(Arrays.asList(0, 1))))),
result.response()
);
// Verify that there is a revocation timeout. Keep a reference
// to the timeout for later.
ScheduledTimeout<Void, CoordinatorRecord> scheduledTimeout =
context.assertRebalanceTimeout(groupId, memberId1, 12000);
assertEquals(
Collections.emptyList(),
context.sleep(result.response().heartbeatIntervalMs())
);
// Member 1 acks the revocation. The revocation timeout is cancelled.
result = context.consumerGroupHeartbeat(
new ConsumerGroupHeartbeatRequestData()
.setGroupId(groupId)
.setMemberId(memberId1)
.setMemberEpoch(1)
.setTopicPartitions(Collections.singletonList(new ConsumerGroupHeartbeatRequestData.TopicPartitions()
.setTopicId(fooTopicId)
.setPartitions(Arrays.asList(0, 1)))));
assertResponseEquals(
new ConsumerGroupHeartbeatResponseData()
.setMemberId(memberId1)
.setMemberEpoch(2)
.setHeartbeatIntervalMs(5000),
result.response()
);
// Verify that there is not revocation timeout.
context.assertNoRebalanceTimeout(groupId, memberId1);
// Execute the scheduled revocation timeout captured earlier to simulate a
// stale timeout. This should be a no-op.
assertEquals(Collections.emptyList(), scheduledTimeout.operation.generateRecords().records());
} |
public void onCheckpointComplete(long checkpointId) {
assignmentsByCheckpointId.entrySet().removeIf(entry -> entry.getKey() <= checkpointId);
} | @Test
void testOnCheckpointComplete() throws Exception {
final long checkpointId1 = 100L;
final long checkpointId2 = 101L;
SplitAssignmentTracker<MockSourceSplit> tracker = new SplitAssignmentTracker<>();
// Assign some splits to subtask 0 and 1.
tracker.recordSplitAssignment(getSplitsAssignment(2, 0));
// Take the first snapshot.
tracker.onCheckpoint(checkpointId1);
verifyAssignment(
Arrays.asList("0"), tracker.assignmentsByCheckpointId(checkpointId1).get(0));
verifyAssignment(
Arrays.asList("1", "2"), tracker.assignmentsByCheckpointId(checkpointId1).get(1));
// Assign additional splits to subtask 0 and 1.
tracker.recordSplitAssignment(getSplitsAssignment(2, 3));
// Take the second snapshot.
tracker.onCheckpoint(checkpointId2);
verifyAssignment(
Arrays.asList("0"), tracker.assignmentsByCheckpointId(checkpointId1).get(0));
verifyAssignment(
Arrays.asList("1", "2"), tracker.assignmentsByCheckpointId(checkpointId1).get(1));
verifyAssignment(
Arrays.asList("3"), tracker.assignmentsByCheckpointId(checkpointId2).get(0));
verifyAssignment(
Arrays.asList("4", "5"), tracker.assignmentsByCheckpointId(checkpointId2).get(1));
// Complete the first checkpoint.
tracker.onCheckpointComplete(checkpointId1);
assertThat(tracker.assignmentsByCheckpointId(checkpointId1)).isNull();
verifyAssignment(
Arrays.asList("3"), tracker.assignmentsByCheckpointId(checkpointId2).get(0));
verifyAssignment(
Arrays.asList("4", "5"), tracker.assignmentsByCheckpointId(checkpointId2).get(1));
} |
@SuppressWarnings("unchecked")
protected Set<PathSpec> getFields()
{
Object fields = _queryParams.get(RestConstants.FIELDS_PARAM);
if (fields == null) {
return Collections.emptySet();
}
if (fields instanceof Set)
{
return (Set<PathSpec>) fields;
}
else if (fields instanceof String)
{
try
{
MaskTree tree = URIMaskUtil.decodeMaskUriFormat((String) fields);
return tree.getOperations().keySet();
}
catch (IllegalMaskException e)
{
throw new IllegalArgumentException("Field param was a string and it did not represent a serialized mask tree", e);
}
}
else if (fields instanceof DataMap)
{
MaskTree tree = new MaskTree((DataMap) fields);
return tree.getOperations().keySet();
}
throw new IllegalArgumentException("Fields param is of unrecognized type: " + fields.getClass());
} | @Test
public void testStringFieldsParam()
{
GetRequest<TestRecord> getRequest =
generateDummyRequestBuilder().setParam(RestConstants.FIELDS_PARAM, "id").build();
assertEquals(getRequest.getFields(), Collections.singleton(new PathSpec("id")));
} |
@Override
protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> properties) throws Exception {
AS400ConnectionPool connectionPool;
if (properties.containsKey(CONNECTION_POOL)) {
LOG.trace("AS400ConnectionPool instance specified in the URI - will look it up.");
// We have chosen to handle the connectionPool option ourselves, so
// we must remove it from the given parameter list (see
// http://camel.apache.org/writing-components.html)
String poolId = properties.remove(CONNECTION_POOL).toString();
connectionPool
= EndpointHelper.resolveReferenceParameter(getCamelContext(), poolId, AS400ConnectionPool.class, true);
} else {
LOG.trace("No AS400ConnectionPool instance specified in the URI - one will be provided.");
connectionPool = getConnectionPool();
}
String type = remaining.substring(remaining.lastIndexOf('.') + 1).toUpperCase();
Jt400Endpoint endpoint = new Jt400Endpoint(uri, this, connectionPool);
setProperties(endpoint, properties);
endpoint.setType(Jt400Type.valueOf(type));
return endpoint;
} | @Test
public void testCreatePgmSecuredEndpoint() throws Exception {
Endpoint endpoint = component
.createEndpoint(
"jt400://user:password@host/qsys.lib/library.lib/queue.pgm?connectionPool=#mockPool&secured=true");
assertNotNull(endpoint);
assertTrue(endpoint instanceof Jt400Endpoint);
assertTrue(((Jt400Endpoint) endpoint).isSecured());
} |
@Override
public Processor<KO, SubscriptionWrapper<K>, CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> get() {
return new ContextualProcessor<KO, SubscriptionWrapper<K>, CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>>() {
private TimestampedKeyValueStore<Bytes, SubscriptionWrapper<K>> store;
private Sensor droppedRecordsSensor;
@Override
public void init(final ProcessorContext<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> context) {
super.init(context);
final InternalProcessorContext<?, ?> internalProcessorContext = (InternalProcessorContext<?, ?>) context;
droppedRecordsSensor = TaskMetrics.droppedRecordsSensor(
Thread.currentThread().getName(),
internalProcessorContext.taskId().toString(),
internalProcessorContext.metrics()
);
store = internalProcessorContext.getStateStore(storeName);
keySchema.init(context);
}
@Override
public void process(final Record<KO, SubscriptionWrapper<K>> record) {
if (record.key() == null && !SubscriptionWrapper.Instruction.PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE.equals(record.value().getInstruction())) {
dropRecord();
return;
}
if (record.value().getVersion() > SubscriptionWrapper.CURRENT_VERSION) {
//Guard against modifications to SubscriptionWrapper. Need to ensure that there is compatibility
//with previous versions to enable rolling upgrades. Must develop a strategy for upgrading
//from older SubscriptionWrapper versions to newer versions.
throw new UnsupportedVersionException("SubscriptionWrapper is of an incompatible version.");
}
context().forward(
record.withKey(new CombinedKey<>(record.key(), record.value().getPrimaryKey()))
.withValue(inferChange(record))
.withTimestamp(record.timestamp())
);
}
private Change<ValueAndTimestamp<SubscriptionWrapper<K>>> inferChange(final Record<KO, SubscriptionWrapper<K>> record) {
if (record.key() == null) {
return new Change<>(ValueAndTimestamp.make(record.value(), record.timestamp()), null);
} else {
return inferBasedOnState(record);
}
}
private Change<ValueAndTimestamp<SubscriptionWrapper<K>>> inferBasedOnState(final Record<KO, SubscriptionWrapper<K>> record) {
final Bytes subscriptionKey = keySchema.toBytes(record.key(), record.value().getPrimaryKey());
final ValueAndTimestamp<SubscriptionWrapper<K>> newValue = ValueAndTimestamp.make(record.value(), record.timestamp());
final ValueAndTimestamp<SubscriptionWrapper<K>> oldValue = store.get(subscriptionKey);
//This store is used by the prefix scanner in ForeignTableJoinProcessorSupplier
if (record.value().getInstruction().equals(SubscriptionWrapper.Instruction.DELETE_KEY_AND_PROPAGATE) ||
record.value().getInstruction().equals(SubscriptionWrapper.Instruction.DELETE_KEY_NO_PROPAGATE)) {
store.delete(subscriptionKey);
} else {
store.put(subscriptionKey, newValue);
}
return new Change<>(newValue, oldValue);
}
private void dropRecord() {
if (context().recordMetadata().isPresent()) {
final RecordMetadata recordMetadata = context().recordMetadata().get();
LOG.warn(
"Skipping record due to null foreign key. "
+ "topic=[{}] partition=[{}] offset=[{}]",
recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset()
);
} else {
LOG.warn(
"Skipping record due to null foreign key. Topic, partition, and offset not known."
);
}
droppedRecordsSensor.record();
}
};
} | @Test
public void shouldPropagateNullIfNoFKValAvailableV1() {
final StoreBuilder<TimestampedKeyValueStore<Bytes, SubscriptionWrapper<String>>> storeBuilder = storeBuilder();
final SubscriptionReceiveProcessorSupplier<String, String> supplier = supplier(storeBuilder);
final Processor<String,
SubscriptionWrapper<String>,
CombinedKey<String, String>,
Change<ValueAndTimestamp<SubscriptionWrapper<String>>>> processor = supplier.get();
stateStore = storeBuilder.build();
context.addStateStore(stateStore);
stateStore.init((StateStoreContext) context, stateStore);
final SubscriptionWrapper<String> oldWrapper = new SubscriptionWrapper<>(
new long[]{1L, 2L},
Instruction.PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE,
PK2,
SubscriptionWrapper.VERSION_1,
1
);
final ValueAndTimestamp<SubscriptionWrapper<String>> oldValue = ValueAndTimestamp.make(oldWrapper, 0);
final Bytes key = COMBINED_KEY_SCHEMA.toBytes(FK, PK1);
stateStore.put(key, oldValue);
processor.init(context);
final SubscriptionWrapper<String> newWrapper = new SubscriptionWrapper<>(
new long[]{1L, 2L},
Instruction.PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE,
PK1,
SubscriptionWrapper.VERSION_1,
1
);
final ValueAndTimestamp<SubscriptionWrapper<String>> newValue = ValueAndTimestamp.make(
newWrapper, 1L);
final Record<String, SubscriptionWrapper<String>> record = new Record<>(
FK,
newWrapper,
1L
);
processor.process(record);
final List<CapturedForward<? extends CombinedKey<String, String>,
? extends Change<ValueAndTimestamp<SubscriptionWrapper<String>>>>> forwarded = context.forwarded();
assertEquals(newValue, stateStore.get(key));
assertEquals(1, forwarded.size());
assertEquals(
record.withKey(new CombinedKey<>(FK, PK1))
.withValue(new Change<>(newValue, oldValue)),
forwarded.get(0).record()
);
} |
@CanIgnoreReturnValue
public final Ordered containsAtLeast(
@Nullable Object k0, @Nullable Object v0, @Nullable Object... rest) {
return containsAtLeastEntriesIn(accumulateMultimap(k0, v0, rest));
} | @Test
public void containsAtLeastVarargFailureMissing() {
ImmutableMultimap<Integer, String> expected =
ImmutableMultimap.of(3, "one", 3, "six", 3, "two", 4, "five", 4, "four");
ListMultimap<Integer, String> actual = LinkedListMultimap.create(expected);
actual.remove(3, "six");
actual.remove(4, "five");
actual.put(3, "nine");
expectFailureWhenTestingThat(actual)
.containsAtLeast(3, "one", 3, "six", 3, "two", 4, "five", 4, "four");
assertFailureKeys("missing", "---", "expected to contain at least", "but was");
assertFailureValue("missing", "{3=[six], 4=[five]}");
} |
@Override
public Endpoints endpoints(String uid) {
checkArgument(!Strings.isNullOrEmpty(uid), ERR_NULL_ENDPOINTS_UID);
return k8sEndpointsStore.endpoints(uid);
} | @Test
public void testGetEndpointsByUid() {
createBasicEndpoints();
assertNotNull("Endpoints did not match", target.endpoints(ENDPOINTS_UID));
assertNull("Endpoints did not match", target.endpoints(UNKNOWN_UID));
} |
@Override
public void callback(CallbackContext context) {
try {
onCallback(context);
} catch (IOException | ExecutionException e) {
throw new IllegalStateException(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IllegalStateException(e);
}
} | @Test
public void callback_whenOrganizationsAreNotDefinedAndUserDoesntBelongToInstallationOrganization_shouldThrow() throws IOException, ExecutionException, InterruptedException {
UserIdentity userIdentity = mock(UserIdentity.class);
CallbackContext context = mockUserNotBelongingToOrganization(userIdentity);
mockInstallations();
assertThatThrownBy(() -> underTest.callback(context))
.isInstanceOf(UnauthorizedException.class)
.hasMessage("'login' must be a member of at least one organization which has installed the SonarQube GitHub app");
} |
@Override
public boolean tryReturnRecordAt(boolean isAtSplitPoint, Long recordStart) {
return tryReturnRecordAt(isAtSplitPoint, recordStart.longValue());
} | @Test
public void testTryReturnRecordContinuesUntilSplitPoint() throws Exception {
OffsetRangeTracker tracker = new OffsetRangeTracker(9, 18);
// Return records with gaps of 2; every 3rd record is a split point.
assertTrue(tracker.tryReturnRecordAt(true, 10));
assertTrue(tracker.tryReturnRecordAt(false, 12));
assertTrue(tracker.tryReturnRecordAt(false, 14));
assertTrue(tracker.tryReturnRecordAt(true, 16));
// Out of range, but not a split point...
assertTrue(tracker.tryReturnRecordAt(false, 18));
assertTrue(tracker.tryReturnRecordAt(false, 20));
// Out of range AND a split point.
assertFalse(tracker.tryReturnRecordAt(true, 22));
} |
public static Instruction popMpls() {
return new L2ModificationInstruction.ModMplsHeaderInstruction(
L2ModificationInstruction.L2SubType.MPLS_POP,
EthType.EtherType.MPLS_UNICAST.ethType());
} | @Test
public void testPopMplsEthertypeMethod() {
final Instruction instruction = Instructions.popMpls(new EthType(1));
final L2ModificationInstruction.ModMplsHeaderInstruction pushHeaderInstruction =
checkAndConvert(instruction,
Instruction.Type.L2MODIFICATION,
L2ModificationInstruction.ModMplsHeaderInstruction.class);
assertThat(pushHeaderInstruction.ethernetType().toShort(), is((short) 1));
assertThat(pushHeaderInstruction.subtype(),
is(L2ModificationInstruction.L2SubType.MPLS_POP));
} |
public static CommandContext decode(HttpRequest request) {
CommandContext commandContext = null;
if (request != null) {
QueryStringDecoder queryStringDecoder = new QueryStringDecoder(request.uri());
String path = queryStringDecoder.path();
String[] array = path.split("/");
if (array.length == 2) {
String name = array[1];
// process GET request and POST request separately. Check url for GET, and check body for POST
if (request.method() == HttpMethod.GET) {
if (queryStringDecoder.parameters().isEmpty()) {
commandContext = CommandContextFactory.newInstance(name);
commandContext.setHttp(true);
} else {
List<String> valueList = new ArrayList<>();
for (List<String> values :
queryStringDecoder.parameters().values()) {
valueList.addAll(values);
}
commandContext =
CommandContextFactory.newInstance(name, valueList.toArray(new String[] {}), true);
}
} else if (request.method() == HttpMethod.POST) {
HttpPostRequestDecoder httpPostRequestDecoder = new HttpPostRequestDecoder(request);
List<String> valueList = new ArrayList<>();
for (InterfaceHttpData interfaceHttpData : httpPostRequestDecoder.getBodyHttpDatas()) {
if (interfaceHttpData.getHttpDataType() == InterfaceHttpData.HttpDataType.Attribute) {
Attribute attribute = (Attribute) interfaceHttpData;
try {
valueList.add(attribute.getValue());
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
}
if (valueList.isEmpty()) {
commandContext = CommandContextFactory.newInstance(name);
commandContext.setHttp(true);
} else {
commandContext =
CommandContextFactory.newInstance(name, valueList.toArray(new String[] {}), true);
}
}
} else if (array.length == 3) {
String name = array[1];
String appName = array[2];
if (request.method() == HttpMethod.GET) {
commandContext = CommandContextFactory.newInstance(name, new String[] {appName}, true);
commandContext.setHttp(true);
}
}
}
return commandContext;
} | @Test
void decodeGet() {
HttpRequest request = mock(HttpRequest.class);
when(request.uri()).thenReturn("localhost:80/test");
when(request.method()).thenReturn(HttpMethod.GET);
CommandContext context = HttpCommandDecoder.decode(request);
assertThat(context.getCommandName(), equalTo("test"));
assertThat(context.isHttp(), is(true));
when(request.uri()).thenReturn("localhost:80/test?a=b&c=d");
context = HttpCommandDecoder.decode(request);
assertThat(context.getArgs(), arrayContaining("b", "d"));
} |
@SafeVarargs
public static <T> List<T> unionAll(Collection<T> coll1, Collection<T> coll2, Collection<T>... otherColls) {
if (CollUtil.isEmpty(coll1) && CollUtil.isEmpty(coll2) && ArrayUtil.isEmpty(otherColls)) {
return new ArrayList<>(0);
}
// 计算元素总数
int totalSize = 0;
totalSize += size(coll1);
totalSize += size(coll2);
if (otherColls != null) {
for (final Collection<T> otherColl : otherColls) {
totalSize += size(otherColl);
}
}
// 根据size创建,防止多次扩容
final List<T> res = new ArrayList<>(totalSize);
if (coll1 != null) {
res.addAll(coll1);
}
if (coll2 != null) {
res.addAll(coll2);
}
if (otherColls == null) {
return res;
}
for (final Collection<T> otherColl : otherColls) {
if (otherColl != null) {
res.addAll(otherColl);
}
}
return res;
} | @SuppressWarnings({"ConfusingArgumentToVarargsMethod", "ConstantValue"})
@Test
public void unionAllNullTest() {
final List<String> list1 = new ArrayList<>();
final List<String> list2 = null;
final List<String> list3 = null;
final List<String> list = CollUtil.unionAll(list1, list2, list3);
assertNotNull(list);
final List<String> resList2 = CollUtil.unionAll(null, null, null);
assertNotNull(resList2);
} |
@SuppressWarnings("unchecked")
public static <S, F> S visit(final Schema schema, final Visitor<S, F> visitor) {
final BiFunction<Visitor<?, ?>, Schema, Object> handler = HANDLER.get(schema.type());
if (handler == null) {
throw new UnsupportedOperationException("Unsupported schema type: " + schema.type());
}
return (S) handler.apply(visitor, schema);
} | @Test
public void shouldVisitPrimitives() {
// Given:
visitor = new Visitor<String, Integer>() {
@Override
public String visitPrimitive(final Schema schema) {
return "Expected";
}
};
primitiveSchemas().forEach(schema -> {
// When:
final String result = SchemaWalker.visit(schema, visitor);
// Then:
assertThat(result, is("Expected"));
});
} |
@Bean
public RetryRegistry retryRegistry(RetryConfigurationProperties retryConfigurationProperties,
EventConsumerRegistry<RetryEvent> retryEventConsumerRegistry,
RegistryEventConsumer<Retry> retryRegistryEventConsumer,
@Qualifier("compositeRetryCustomizer") CompositeCustomizer<RetryConfigCustomizer> compositeRetryCustomizer) {
RetryRegistry retryRegistry = createRetryRegistry(retryConfigurationProperties,
retryRegistryEventConsumer, compositeRetryCustomizer);
registerEventConsumer(retryRegistry, retryEventConsumerRegistry,
retryConfigurationProperties);
retryConfigurationProperties.getInstances()
.forEach((name, properties) ->
retryRegistry.retry(name, retryConfigurationProperties
.createRetryConfig(name, compositeRetryCustomizer)));
return retryRegistry;
} | @Test
public void testRetryRegistry() {
InstanceProperties instanceProperties1 = new InstanceProperties();
instanceProperties1.setMaxAttempts(3);
InstanceProperties instanceProperties2 = new InstanceProperties();
instanceProperties2.setMaxAttempts(2);
RetryConfigurationProperties retryConfigurationProperties = new RetryConfigurationProperties();
retryConfigurationProperties.getInstances().put("backend1", instanceProperties1);
retryConfigurationProperties.getInstances().put("backend2", instanceProperties2);
retryConfigurationProperties.setRetryAspectOrder(200);
RetryConfiguration retryConfiguration = new RetryConfiguration();
DefaultEventConsumerRegistry<RetryEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>();
RetryRegistry retryRegistry = retryConfiguration
.retryRegistry(retryConfigurationProperties, eventConsumerRegistry,
new CompositeRegistryEventConsumer<>(emptyList()), compositeRetryCustomizerTest());
assertThat(retryConfigurationProperties.getRetryAspectOrder()).isEqualTo(200);
assertThat(retryRegistry.getAllRetries().size()).isEqualTo(2);
Retry retry1 = retryRegistry.retry("backend1");
assertThat(retry1).isNotNull();
assertThat(retry1.getRetryConfig().getMaxAttempts()).isEqualTo(3);
Retry retry2 = retryRegistry.retry("backend2");
assertThat(retry2).isNotNull();
assertThat(retry2.getRetryConfig().getMaxAttempts()).isEqualTo(2);
assertThat(eventConsumerRegistry.getAllEventConsumer()).hasSize(2);
} |
public static DenseSparseMatrix createIdentity(int dimension) {
SparseVector[] newValues = new SparseVector[dimension];
for (int i = 0; i < dimension; i++) {
newValues[i] = new SparseVector(dimension, new int[]{i}, new double[]{1.0});
}
return new DenseSparseMatrix(newValues);
} | @Test
public void testCreateIdentity() {
DenseSparseMatrix identity = DenseSparseMatrix.createIdentity(5);
assertMatrixEquals(new DenseMatrix(new double[][]{new double[]{1.0, 0.0, 0.0, 0.0, 0.0}, new double[]{0.0, 1.0, 0.0, 0.0, 0.0}, new double[]{0.0, 0.0, 1.0, 0.0, 0.0}, new double[]{0.0, 0.0, 0.0, 1.0, 0.0}, new double[]{0.0, 0.0, 0.0, 0.0, 1.0}}),identity);
identity = DenseSparseMatrix.createIdentity(1);
assertMatrixEquals(new DenseMatrix(new double[][]{new double[]{1.0}}),identity);
} |
public static URL parseURL(String address, Map<String, String> defaults) {
if (StringUtils.isEmpty(address)) {
throw new IllegalArgumentException("Address is not allowed to be empty, please re-enter.");
}
String url;
if (address.contains("://") || address.contains(URL_PARAM_STARTING_SYMBOL)) {
url = address;
} else {
String[] addresses = COMMA_SPLIT_PATTERN.split(address);
url = addresses[0];
if (addresses.length > 1) {
StringBuilder backup = new StringBuilder();
for (int i = 1; i < addresses.length; i++) {
if (i > 1) {
backup.append(',');
}
backup.append(addresses[i]);
}
url += URL_PARAM_STARTING_SYMBOL + RemotingConstants.BACKUP_KEY + "=" + backup.toString();
}
}
String defaultProtocol = defaults == null ? null : defaults.get(PROTOCOL_KEY);
if (StringUtils.isEmpty(defaultProtocol)) {
defaultProtocol = DUBBO_PROTOCOL;
}
String defaultUsername = defaults == null ? null : defaults.get(USERNAME_KEY);
String defaultPassword = defaults == null ? null : defaults.get(PASSWORD_KEY);
int defaultPort = StringUtils.parseInteger(defaults == null ? null : defaults.get(PORT_KEY));
String defaultPath = defaults == null ? null : defaults.get(PATH_KEY);
Map<String, String> defaultParameters = defaults == null ? null : new HashMap<>(defaults);
if (defaultParameters != null) {
defaultParameters.remove(PROTOCOL_KEY);
defaultParameters.remove(USERNAME_KEY);
defaultParameters.remove(PASSWORD_KEY);
defaultParameters.remove(HOST_KEY);
defaultParameters.remove(PORT_KEY);
defaultParameters.remove(PATH_KEY);
}
URL u = URL.cacheableValueOf(url);
boolean changed = false;
String protocol = u.getProtocol();
String username = u.getUsername();
String password = u.getPassword();
String host = u.getHost();
int port = u.getPort();
String path = u.getPath();
Map<String, String> parameters = new HashMap<>(u.getParameters());
if (StringUtils.isEmpty(protocol)) {
changed = true;
protocol = defaultProtocol;
}
if (StringUtils.isEmpty(username) && StringUtils.isNotEmpty(defaultUsername)) {
changed = true;
username = defaultUsername;
}
if (StringUtils.isEmpty(password) && StringUtils.isNotEmpty(defaultPassword)) {
changed = true;
password = defaultPassword;
}
/*if (u.isAnyHost() || u.isLocalHost()) {
changed = true;
host = NetUtils.getLocalHost();
}*/
if (port <= 0) {
if (defaultPort > 0) {
changed = true;
port = defaultPort;
} else {
changed = true;
port = 9090;
}
}
if (StringUtils.isEmpty(path)) {
if (StringUtils.isNotEmpty(defaultPath)) {
changed = true;
path = defaultPath;
}
}
if (defaultParameters != null && defaultParameters.size() > 0) {
for (Map.Entry<String, String> entry : defaultParameters.entrySet()) {
String key = entry.getKey();
String defaultValue = entry.getValue();
if (StringUtils.isNotEmpty(defaultValue)) {
String value = parameters.get(key);
if (StringUtils.isEmpty(value)) {
changed = true;
parameters.put(key, defaultValue);
}
}
}
}
if (changed) {
u = new ServiceConfigURL(protocol, username, password, host, port, path, parameters);
}
return u;
} | @Test
void testDefaultUrl() {
String address = "127.0.0.1";
URL url = UrlUtils.parseURL(address, null);
assertEquals(localAddress + ":9090", url.getAddress());
assertEquals(9090, url.getPort());
assertEquals("dubbo", url.getProtocol());
assertNull(url.getUsername());
assertNull(url.getPassword());
assertNull(url.getPath());
} |
public List<Periodical> getAll() {
return Lists.newArrayList(periodicals);
} | @Test
public void testGetAll() throws Exception {
periodicals.registerAndStart(periodical);
assertEquals("getAll() did not return all periodicals", Lists.newArrayList(periodical), periodicals.getAll());
} |
public Schema mergeTables(
Map<FeatureOption, MergingStrategy> mergingStrategies,
Schema sourceSchema,
List<SqlNode> derivedColumns,
List<SqlWatermark> derivedWatermarkSpecs,
SqlTableConstraint derivedPrimaryKey) {
SchemaBuilder schemaBuilder =
new SchemaBuilder(
mergingStrategies,
sourceSchema,
(FlinkTypeFactory) validator.getTypeFactory(),
dataTypeFactory,
validator,
escapeExpression);
schemaBuilder.appendDerivedColumns(mergingStrategies, derivedColumns);
schemaBuilder.appendDerivedWatermarks(mergingStrategies, derivedWatermarkSpecs);
schemaBuilder.appendDerivedPrimaryKey(derivedPrimaryKey);
return schemaBuilder.build();
} | @Test
void mergeOverwritingGeneratedColumnsDuplicate() {
Schema sourceSchema =
Schema.newBuilder()
.column("one", DataTypes.INT())
.columnByExpression("two", "one + 1")
.build();
List<SqlNode> derivedColumns =
Collections.singletonList(computedColumn("two", plus("one", "3")));
Map<FeatureOption, MergingStrategy> mergingStrategies = getDefaultMergingStrategies();
mergingStrategies.put(FeatureOption.GENERATED, MergingStrategy.OVERWRITING);
Schema mergedSchema =
util.mergeTables(
mergingStrategies,
sourceSchema,
derivedColumns,
Collections.emptyList(),
null);
Schema expectedSchema =
Schema.newBuilder()
.column("one", DataTypes.INT())
.columnByExpression("two", "`one` + 3")
.build();
assertThat(mergedSchema).isEqualTo(expectedSchema);
} |
@Override
@Async
public void updateJobLogResultAsync(Long logId, LocalDateTime endTime, Integer duration, boolean success, String result) {
try {
JobLogDO updateObj = JobLogDO.builder().id(logId).endTime(endTime).duration(duration)
.status(success ? JobLogStatusEnum.SUCCESS.getStatus() : JobLogStatusEnum.FAILURE.getStatus())
.result(result).build();
jobLogMapper.updateById(updateObj);
} catch (Exception ex) {
log.error("[updateJobLogResultAsync][logId({}) endTime({}) duration({}) success({}) result({})]",
logId, endTime, duration, success, result);
}
} | @Test
public void testUpdateJobLogResultAsync_failure() {
// mock 数据
JobLogDO log = randomPojo(JobLogDO.class, o -> {
o.setExecuteIndex(1);
o.setStatus(JobLogStatusEnum.RUNNING.getStatus());
});
jobLogMapper.insert(log);
// 准备参数
Long logId = log.getId();
LocalDateTime endTime = randomLocalDateTime();
Integer duration = randomInteger();
boolean success = false;
String result = randomString();
// 调用
jobLogService.updateJobLogResultAsync(logId, endTime, duration, success, result);
// 校验记录的属性是否正确
JobLogDO dbLog = jobLogMapper.selectById(log.getId());
assertEquals(endTime, dbLog.getEndTime());
assertEquals(duration, dbLog.getDuration());
assertEquals(JobLogStatusEnum.FAILURE.getStatus(), dbLog.getStatus());
assertEquals(result, dbLog.getResult());
} |
public static ProxyBackendHandler newInstance(final DatabaseType databaseType, final String sql, final SQLStatement sqlStatement,
final ConnectionSession connectionSession, final HintValueContext hintValueContext) throws SQLException {
if (sqlStatement instanceof EmptyStatement) {
return new SkipBackendHandler(sqlStatement);
}
SQLStatementContext sqlStatementContext = sqlStatement instanceof DistSQLStatement ? new DistSQLStatementContext((DistSQLStatement) sqlStatement)
: new SQLBindEngine(ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData(), connectionSession.getCurrentDatabaseName(), hintValueContext).bind(sqlStatement,
Collections.emptyList());
QueryContext queryContext = new QueryContext(sqlStatementContext, sql, Collections.emptyList(), hintValueContext, connectionSession.getConnectionContext(),
ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData());
connectionSession.setQueryContext(queryContext);
return newInstance(databaseType, queryContext, connectionSession, false);
} | @Test
void assertNewInstanceWithRQLStatementInTransaction() throws SQLException {
when(connectionSession.getTransactionStatus().isInTransaction()).thenReturn(true);
String sql = "SHOW DEFAULT SINGLE TABLE STORAGE UNIT";
SQLStatement sqlStatement = ProxySQLComQueryParser.parse(sql, databaseType, connectionSession);
ProxyBackendHandler actual = ProxyBackendHandlerFactory.newInstance(databaseType, sql, sqlStatement, connectionSession, new HintValueContext());
assertThat(actual, instanceOf(DistSQLQueryBackendHandler.class));
} |
@Override
public void start(Callback<None> callback) {
LOG.info("{} enabled", _printName);
Callback<None> prepareWarmUpCallback = new Callback<None>() {
@Override
public void onError(Throwable e) {
if (e instanceof TimeoutException)
{
LOG.info("{} hit timeout: {}ms. The WarmUp will continue in background", _printName, _warmUpTimeoutMillis);
callback.onSuccess(None.none());
}
else
{
LOG.error("{} failed to fetch dual read mode, continuing warmup.", _printName, e);
}
continueWarmUp(callback);
}
@Override
public void onSuccess(None result) {
continueWarmUp(callback);
}
};
_loadBalancer.start(new Callback<None>() {
@Override
public void onError(Throwable e) {
callback.onError(e);
}
@Override
public void onSuccess(None result) {
_allStartTime = _timeSupplier.get();
_executorService.submit(() -> prepareWarmUp(prepareWarmUpCallback));
}
});
} | @Test(timeOut = 10000, retryAnalyzer = ThreeRetries.class)
public void testHitTimeout() throws URISyntaxException, InterruptedException, ExecutionException, TimeoutException
{
int NRequests = 5000;
int warmUpTimeout = 2;
int concurrentRequests = 5;
int requestTime = 100;
float requestsPerSecond = 1000 / requestTime * concurrentRequests;
int expectedRequests = (int) (requestsPerSecond * warmUpTimeout);
int deviation = (int) requestsPerSecond; // we allow inaccuracies of 1s
createNServicesIniFiles(NRequests);
TestLoadBalancer balancer = new TestLoadBalancer(requestTime);
AtomicInteger requestCount = balancer.getRequestCount();
LoadBalancer warmUpLoadBalancer = new WarmUpLoadBalancer(balancer, balancer, Executors.newSingleThreadScheduledExecutor(),
_tmpdir.getAbsolutePath(), MY_SERVICES_FS, _FSBasedDownstreamServicesFetcher, warmUpTimeout, concurrentRequests);
FutureCallback<None> callback = new FutureCallback<>();
warmUpLoadBalancer.start(callback);
callback.get();
Assert.assertTrue(expectedRequests - deviation < requestCount.get()
&& expectedRequests + deviation > requestCount.get(),
"Expected # of requests between " + expectedRequests + " +/-" + deviation + ", found:" + requestCount.get());
} |
public static Code httpStatusToGrpcCode(int httpStatusCode) {
if (httpStatusCode >= 100 && httpStatusCode < 200) {
return Code.INTERNAL;
}
if (httpStatusCode == HttpResponseStatus.BAD_REQUEST.code()
|| httpStatusCode == HttpResponseStatus.REQUEST_HEADER_FIELDS_TOO_LARGE.code()) {
return Code.INTERNAL;
} else if (httpStatusCode == HttpResponseStatus.UNAUTHORIZED.code()) {
return Code.UNAUTHENTICATED;
} else if (httpStatusCode == HttpResponseStatus.FORBIDDEN.code()) {
return Code.PERMISSION_DENIED;
} else if (httpStatusCode == HttpResponseStatus.NOT_FOUND.code()) {
return Code.UNIMPLEMENTED;
} else if (httpStatusCode == HttpResponseStatus.BAD_GATEWAY.code()
|| httpStatusCode == HttpResponseStatus.TOO_MANY_REQUESTS.code()
|| httpStatusCode == HttpResponseStatus.SERVICE_UNAVAILABLE.code()
|| httpStatusCode == HttpResponseStatus.GATEWAY_TIMEOUT.code()) {
return Code.UNAVAILABLE;
} else {
return Code.UNKNOWN;
}
} | @Test
void httpStatusToGrpcCode() {
Assertions.assertEquals(Code.UNIMPLEMENTED, TriRpcStatus.httpStatusToGrpcCode(404));
Assertions.assertEquals(
Code.UNAVAILABLE, TriRpcStatus.httpStatusToGrpcCode(HttpResponseStatus.BAD_GATEWAY.code()));
Assertions.assertEquals(
Code.UNAVAILABLE, TriRpcStatus.httpStatusToGrpcCode(HttpResponseStatus.TOO_MANY_REQUESTS.code()));
Assertions.assertEquals(
Code.UNAVAILABLE, TriRpcStatus.httpStatusToGrpcCode(HttpResponseStatus.SERVICE_UNAVAILABLE.code()));
Assertions.assertEquals(
Code.UNAVAILABLE, TriRpcStatus.httpStatusToGrpcCode(HttpResponseStatus.GATEWAY_TIMEOUT.code()));
Assertions.assertEquals(Code.INTERNAL, TriRpcStatus.httpStatusToGrpcCode(HttpResponseStatus.CONTINUE.code()));
Assertions.assertEquals(
Code.INTERNAL,
TriRpcStatus.httpStatusToGrpcCode(HttpResponseStatus.REQUEST_HEADER_FIELDS_TOO_LARGE.code()));
Assertions.assertEquals(Code.UNKNOWN, TriRpcStatus.httpStatusToGrpcCode(HttpResponseStatus.ACCEPTED.code()));
Assertions.assertEquals(
Code.PERMISSION_DENIED, TriRpcStatus.httpStatusToGrpcCode(HttpResponseStatus.FORBIDDEN.code()));
Assertions.assertEquals(
Code.UNIMPLEMENTED, TriRpcStatus.httpStatusToGrpcCode(HttpResponseStatus.NOT_FOUND.code()));
} |
public static List<String> mergeValues(
ExtensionDirector extensionDirector, Class<?> type, String cfg, List<String> def) {
List<String> defaults = new ArrayList<>();
if (def != null) {
for (String name : def) {
if (extensionDirector.getExtensionLoader(type).hasExtension(name)) {
defaults.add(name);
}
}
}
List<String> names = new ArrayList<>();
// add initial values
String[] configs = (cfg == null || cfg.trim().length() == 0) ? new String[0] : COMMA_SPLIT_PATTERN.split(cfg);
for (String config : configs) {
if (config != null && config.trim().length() > 0) {
names.add(config);
}
}
// -default is not included
if (!names.contains(REMOVE_VALUE_PREFIX + DEFAULT_KEY)) {
// add default extension
int i = names.indexOf(DEFAULT_KEY);
if (i > 0) {
names.addAll(i, defaults);
} else {
names.addAll(0, defaults);
}
names.remove(DEFAULT_KEY);
} else {
names.remove(DEFAULT_KEY);
}
// merge - configuration
for (String name : new ArrayList<String>(names)) {
if (name.startsWith(REMOVE_VALUE_PREFIX)) {
names.remove(name);
names.remove(name.substring(1));
}
}
return names;
} | @Test
void testMergeValues() {
List<String> merged = ConfigUtils.mergeValues(
ApplicationModel.defaultModel().getExtensionDirector(),
ThreadPool.class,
"aaa,bbb,default.custom",
asList("fixed", "default.limited", "cached"));
assertEquals(asList("fixed", "cached", "aaa", "bbb", "default.custom"), merged);
} |
@Override
public TopicList fetchAllTopicList() throws RemotingException, MQClientException, InterruptedException {
return this.defaultMQAdminExtImpl.fetchAllTopicList();
} | @Test
public void testFetchAllTopicList() throws RemotingException, MQClientException, InterruptedException {
TopicList topicList = defaultMQAdminExt.fetchAllTopicList();
assertThat(topicList.getTopicList().size()).isEqualTo(2);
assertThat(topicList.getTopicList()).contains("topic_one");
} |
public static String getJsonToSave(@Nullable final List<Tab> tabList) {
final JsonStringWriter jsonWriter = JsonWriter.string();
jsonWriter.object();
jsonWriter.array(JSON_TABS_ARRAY_KEY);
if (tabList != null) {
for (final Tab tab : tabList) {
tab.writeJsonOn(jsonWriter);
}
}
jsonWriter.end();
jsonWriter.end();
return jsonWriter.done();
} | @Test
public void testSaveAndReading() throws JsonParserException {
// Saving
final Tab.BlankTab blankTab = new Tab.BlankTab();
final Tab.DefaultKioskTab defaultKioskTab = new Tab.DefaultKioskTab();
final Tab.SubscriptionsTab subscriptionsTab = new Tab.SubscriptionsTab();
final Tab.ChannelTab channelTab = new Tab.ChannelTab(
666, "https://example.org", "testName");
final Tab.KioskTab kioskTab = new Tab.KioskTab(123, "trending_key");
final List<Tab> tabs = Arrays.asList(
blankTab, defaultKioskTab, subscriptionsTab, channelTab, kioskTab);
final String returnedJson = TabsJsonHelper.getJsonToSave(tabs);
// Reading
final JsonObject jsonObject = JsonParser.object().from(returnedJson);
assertTrue(jsonObject.containsKey(JSON_TABS_ARRAY_KEY));
final JsonArray tabsFromArray = jsonObject.getArray(JSON_TABS_ARRAY_KEY);
assertEquals(tabs.size(), tabsFromArray.size());
final Tab.BlankTab blankTabFromReturnedJson = requireNonNull((Tab.BlankTab) Tab.from(
(JsonObject) tabsFromArray.get(0)));
assertEquals(blankTab.getTabId(), blankTabFromReturnedJson.getTabId());
final Tab.DefaultKioskTab defaultKioskTabFromReturnedJson = requireNonNull(
(Tab.DefaultKioskTab) Tab.from((JsonObject) tabsFromArray.get(1)));
assertEquals(defaultKioskTab.getTabId(), defaultKioskTabFromReturnedJson.getTabId());
final Tab.SubscriptionsTab subscriptionsTabFromReturnedJson = requireNonNull(
(Tab.SubscriptionsTab) Tab.from((JsonObject) tabsFromArray.get(2)));
assertEquals(subscriptionsTab.getTabId(), subscriptionsTabFromReturnedJson.getTabId());
final Tab.ChannelTab channelTabFromReturnedJson = requireNonNull((Tab.ChannelTab) Tab.from(
(JsonObject) tabsFromArray.get(3)));
assertEquals(channelTab.getTabId(), channelTabFromReturnedJson.getTabId());
assertEquals(channelTab.getChannelServiceId(),
channelTabFromReturnedJson.getChannelServiceId());
assertEquals(channelTab.getChannelUrl(), channelTabFromReturnedJson.getChannelUrl());
assertEquals(channelTab.getChannelName(), channelTabFromReturnedJson.getChannelName());
final Tab.KioskTab kioskTabFromReturnedJson = requireNonNull((Tab.KioskTab) Tab.from(
(JsonObject) tabsFromArray.get(4)));
assertEquals(kioskTab.getTabId(), kioskTabFromReturnedJson.getTabId());
assertEquals(kioskTab.getKioskServiceId(), kioskTabFromReturnedJson.getKioskServiceId());
assertEquals(kioskTab.getKioskId(), kioskTabFromReturnedJson.getKioskId());
} |
@Override public void pluginAdded( final Object serviceObject ) {
try {
SpoonPluginInterface spoonPluginInterface =
(SpoonPluginInterface) getPluginRegistry().loadClass( (PluginInterface) serviceObject );
if ( plugins.get( serviceObject ) != null ) {
return;
}
SpoonPluginCategories categories = spoonPluginInterface.getClass().getAnnotation( SpoonPluginCategories.class );
if ( categories != null ) {
for ( String cat : categories.value() ) {
List<SpoonPluginInterface> categoryList = pluginCategoryMap.get( cat );
if ( categoryList == null ) {
categoryList = new ArrayList<>();
pluginCategoryMap.put( cat, categoryList );
}
categoryList.add( spoonPluginInterface );
}
}
if ( spoonPluginInterface.getPerspective() != null ) {
getSpoonPerspectiveManager().addPerspective( spoonPluginInterface.getPerspective() );
}
plugins.put( serviceObject, spoonPluginInterface );
} catch ( KettlePluginException e ) {
e.printStackTrace();
}
} | @Test
public void testPluginAdded() throws Exception {
spoonPluginManager.pluginAdded( plugin1 );
verify( spoonPerspectiveManager ).addPerspective( spoonPerspective );
assertEquals( 1, spoonPluginManager.getPlugins().size() );
assertSame( spoonPluginInterface1, spoonPluginManager.getPlugins().get( 0 ) );
} |
@Override
public void eventAdded( KettleLoggingEvent event ) {
Object messageObject = event.getMessage();
checkNotNull( messageObject, "Expected log message to be defined." );
if ( messageObject instanceof LogMessage ) {
LogMessage message = (LogMessage) messageObject;
LoggingObjectInterface loggingObject = logObjProvider.apply( message.getLogChannelId() );
if ( loggingObject == null || ( loggingObject.getObjectType() == GENERAL && "Y".equals( EnvUtil.getSystemProperty( Const.KETTLE_LOG_GENERAL_OBJECTS_TO_DI_LOGGER ) ) ) ) {
// this can happen if logObject has been discarded while log events are still in flight.
logToLogger( diLogger, message.getLevel(),
message.getSubject() + " " + message.getMessage() );
} else if ( loggingObject.getObjectType() == TRANS || loggingObject.getObjectType() == STEP || loggingObject.getObjectType() == DATABASE ) {
logToLogger( transLogger, message.getLevel(), loggingObject, message );
} else if ( loggingObject.getObjectType() == JOB || loggingObject.getObjectType() == JOBENTRY ) {
logToLogger( jobLogger, message.getLevel(), loggingObject, message );
}
}
} | @Test
public void testAddLogEventTrans() {
when( logObjProvider.apply( logChannelId ) ).thenReturn( loggingObject );
when( loggingObject.getObjectType() ).thenReturn( LoggingObjectType.TRANS );
when( loggingObject.getFilename() ).thenReturn( "filename" );
when( message.getLevel() ).thenReturn( LogLevel.BASIC );
listener.eventAdded( logEvent );
verify( transLogger ).info( "[filename] " + msgText );
when( message.getLevel() ).thenReturn( LogLevel.ERROR );
listener.eventAdded( logEvent );
verify( transLogger ).error( "[filename] " + msgText );
verifyNoInteractions( diLogger );
verifyNoInteractions( jobLogger );
} |
@Override
public Num calculate(BarSeries series, Position position) {
if (position.isClosed()) {
Num loss = excludeCosts ? position.getGrossProfit() : position.getProfit();
return loss.isNegative() ? loss : series.zero();
}
return series.zero();
} | @Test
public void calculateOnlyWithProfitPositions() {
MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 110, 100, 95, 105);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series), Trade.sellAt(2, series),
Trade.buyAt(3, series), Trade.sellAt(5, series));
AnalysisCriterion loss = getCriterion(true);
assertNumEquals(0, loss.calculate(series, tradingRecord));
} |
@PostMapping
@Secured(resource = AuthConstants.CONSOLE_RESOURCE_NAME_PREFIX
+ "namespaces", action = ActionTypes.WRITE, signType = SignType.CONSOLE)
public Result<Boolean> createNamespace(NamespaceForm namespaceForm) throws NacosException {
namespaceForm.validate();
String namespaceId = namespaceForm.getNamespaceId();
String namespaceName = namespaceForm.getNamespaceName();
String namespaceDesc = namespaceForm.getNamespaceDesc();
if (StringUtils.isBlank(namespaceId)) {
namespaceId = UUID.randomUUID().toString();
} else {
namespaceId = namespaceId.trim();
if (!namespaceIdCheckPattern.matcher(namespaceId).matches()) {
throw new NacosApiException(HttpStatus.BAD_REQUEST.value(), ErrorCode.ILLEGAL_NAMESPACE,
"namespaceId [" + namespaceId + "] mismatch the pattern");
}
if (namespaceId.length() > NAMESPACE_ID_MAX_LENGTH) {
throw new NacosApiException(HttpStatus.BAD_REQUEST.value(), ErrorCode.ILLEGAL_NAMESPACE,
"too long namespaceId, over " + NAMESPACE_ID_MAX_LENGTH);
}
// check unique
if (namespacePersistService.tenantInfoCountByTenantId(namespaceId) > 0) {
throw new NacosApiException(HttpStatus.BAD_REQUEST.value(), ErrorCode.ILLEGAL_NAMESPACE,
"the namespaceId is existed, namespaceId: " + namespaceForm.getNamespaceId());
}
}
// contains illegal chars
if (!namespaceNameCheckPattern.matcher(namespaceName).matches()) {
throw new NacosApiException(HttpStatus.BAD_REQUEST.value(), ErrorCode.ILLEGAL_NAMESPACE,
"namespaceName [" + namespaceName + "] contains illegal char");
}
return Result.success(namespaceOperationService.createNamespace(namespaceId, namespaceName, namespaceDesc));
} | @Test
void testCreateNamespaceWithNonUniqueId() {
when(namespacePersistService.tenantInfoCountByTenantId("test-id")).thenReturn(1);
NamespaceForm form = new NamespaceForm();
form.setNamespaceId("test-id");
form.setNamespaceDesc("testDesc");
form.setNamespaceName("testName");
assertThrows(NacosException.class, () -> namespaceControllerV2.createNamespace(form));
} |
public Operation parseMethod(
Method method,
List<Parameter> globalParameters,
JsonView jsonViewAnnotation) {
JavaType classType = TypeFactory.defaultInstance().constructType(method.getDeclaringClass());
return parseMethod(
classType.getClass(),
method,
globalParameters,
null,
null,
null,
null,
new ArrayList<>(),
Optional.empty(),
new HashSet<>(),
new ArrayList<>(),
false,
null,
null,
jsonViewAnnotation,
null,
null);
} | @Test(description = "Deprecated Method")
public void testDeprecatedMethod() {
Reader reader = new Reader(new OpenAPI());
Method[] methods = DeprecatedFieldsResource.class.getMethods();
Operation deprecatedOperation = reader.parseMethod(methods[0], null, null);
assertNotNull(deprecatedOperation);
assertTrue(deprecatedOperation.getDeprecated());
} |
public static PrimitiveIterator.OfInt all(int pageCount) {
return new IndexIterator(0, pageCount, i -> true, i -> i);
} | @Test
public void testAll() {
assertEquals(IndexIterator.all(10), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
} |
public SourceWithMetadata lookupSource(int globalLineNumber, int sourceColumn)
throws IncompleteSourceWithMetadataException {
LineToSource lts = this.sourceReferences().stream()
.filter(lts1 -> lts1.includeLine(globalLineNumber))
.findFirst()
.orElseThrow(() -> new IllegalArgumentException("can't find the config segment related to line " + globalLineNumber));
return new SourceWithMetadata(lts.source.getProtocol(), lts.source.getId(),
globalLineNumber + 1 - lts.startLine, sourceColumn, lts.source.getText());
} | @Test
public void testSourceAndLineRemapping_pipelineDefinedMInMultipleFiles() throws IncompleteSourceWithMetadataException {
final SourceWithMetadata[] parts = {
new SourceWithMetadata("file", "/tmp/input", 0, 0, PIPELINE_CONFIG_PART_1),
new SourceWithMetadata("file", "/tmp/output", 0, 0, PIPELINE_CONFIG_PART_2)
};
sut = new PipelineConfig(source, pipelineIdSym, toRubyArray(parts), SETTINGS);
assertEquals("return the line of first segment", 2, (int) sut.lookupSource(2, 0).getLine());
assertEquals("return the id of first segment", "/tmp/input", sut.lookupSource(2, 0).getId());
assertEquals("return the line of second segment", 1, (int) sut.lookupSource(4, 0).getLine());
assertEquals("return the id of second segment", "/tmp/output", sut.lookupSource(4, 0).getId());
} |
@Override
public <T> List<T> toList(DataTable dataTable, Type itemType) {
requireNonNull(dataTable, "dataTable may not be null");
requireNonNull(itemType, "itemType may not be null");
if (dataTable.isEmpty()) {
return emptyList();
}
ListOrProblems<T> result = toListOrProblems(dataTable, itemType);
if (result.hasList()) {
return unmodifiableList(result.getList());
}
throw listNoConverterDefined(
itemType,
result.getProblems());
} | @Test
void convert_to_list__double_column__single_row__throws_exception() {
DataTable table = parse("",
"| 3 | 5 |");
CucumberDataTableException exception = assertThrows(
CucumberDataTableException.class,
() -> converter.toList(table, Integer.class));
assertThat(exception.getMessage(), is("" +
"Can't convert DataTable to List<java.lang.Integer>.\n" +
"Please review these problems:\n" +
"\n" +
" - There was a table cell transformer for java.lang.Integer but the table was too wide to use it.\n" +
" Please reduce the table width to use this converter.\n" +
"\n" +
" - There was no table entry or table row transformer registered for java.lang.Integer.\n" +
" Please consider registering a table entry or row transformer.\n" +
"\n" +
"Note: Usually solving one is enough"));
} |
@Override
public Mono<GetExternalServiceCredentialsResponse> getExternalServiceCredentials(final GetExternalServiceCredentialsRequest request) {
final ExternalServiceCredentialsGenerator credentialsGenerator = this.credentialsGeneratorByType
.get(request.getExternalService());
if (credentialsGenerator == null) {
return Mono.error(Status.INVALID_ARGUMENT.asException());
}
final AuthenticatedDevice authenticatedDevice = AuthenticationUtil.requireAuthenticatedDevice();
return rateLimiters.forDescriptor(RateLimiters.For.EXTERNAL_SERVICE_CREDENTIALS).validateReactive(authenticatedDevice.accountIdentifier())
.then(Mono.fromSupplier(() -> {
final ExternalServiceCredentials externalServiceCredentials = credentialsGenerator
.generateForUuid(authenticatedDevice.accountIdentifier());
return GetExternalServiceCredentialsResponse.newBuilder()
.setUsername(externalServiceCredentials.username())
.setPassword(externalServiceCredentials.password())
.build();
}));
} | @Test
public void testRateLimitExceeded() throws Exception {
final Duration retryAfter = MockUtils.updateRateLimiterResponseToFail(
rateLimiters, RateLimiters.For.EXTERNAL_SERVICE_CREDENTIALS, AUTHENTICATED_ACI, Duration.ofSeconds(100), false);
Mockito.reset(ART_CREDENTIALS_GENERATOR);
assertRateLimitExceeded(
retryAfter,
() -> authenticatedServiceStub().getExternalServiceCredentials(
GetExternalServiceCredentialsRequest.newBuilder()
.setExternalService(ExternalServiceType.EXTERNAL_SERVICE_TYPE_ART)
.build()),
ART_CREDENTIALS_GENERATOR
);
} |
@Override
public void onNewActivity(Activity activity) {
} | @Test
public void onNewActivity_activityIsNotTheOneLaunchedByNotifs_dontClearInitialNotification() throws Exception {
Activity activity = mock(Activity.class);
Intent intent = mock(Intent.class);
when(activity.getIntent()).thenReturn(intent);
when(mAppLaunchHelper.isLaunchIntentsActivity(activity)).thenReturn(false);
when(mAppLaunchHelper.isLaunchIntentOfNotification(any(Intent.class))).thenReturn(false);
createUUT().onNewActivity(activity);
verify(InitialNotificationHolder.getInstance(), never()).clear();
} |
public static Instant fromMillisOrIso8601(String time, String fieldName) {
try {
return Instant.ofEpochMilli(Long.parseLong(time));
} catch (NumberFormatException nfe) {
// TODO: copied from PluginConfigurationProcessor, find a way to share better
try {
DateTimeFormatter formatter =
new DateTimeFormatterBuilder()
.append(DateTimeFormatter.ISO_DATE_TIME)
.optionalStart()
.appendOffset("+HHmm", "+0000")
.optionalEnd()
.toFormatter();
return formatter.parse(time, Instant::from);
} catch (DateTimeParseException dtpe) {
throw new IllegalArgumentException(
fieldName
+ " must be a number of milliseconds since epoch or an ISO 8601 formatted date");
}
}
} | @Test
public void testFromMillisOrIso8601_millis() {
Instant parsed = Instants.fromMillisOrIso8601("100", "ignored");
Assert.assertEquals(Instant.ofEpochMilli(100), parsed);
} |
@Override
public ResultSet getColumnPrivileges(final String catalog, final String schema, final String table, final String columnNamePattern) throws SQLException {
return createDatabaseMetaDataResultSet(
getDatabaseMetaData().getColumnPrivileges(getActualCatalog(catalog), getActualSchema(schema), getActualTable(getActualCatalog(catalog), table), columnNamePattern));
} | @Test
void assertGetColumnPrivileges() throws SQLException {
when(databaseMetaData.getColumnPrivileges("test", null, null, null)).thenReturn(resultSet);
assertThat(shardingSphereDatabaseMetaData.getColumnPrivileges("test", null, null, null), instanceOf(DatabaseMetaDataResultSet.class));
} |
public Person create(Person person) {
return persist(person);
} | @Test
void handlesNullFullName() {
assertThatExceptionOfType(ConstraintViolationException.class).isThrownBy(()->
daoTestRule.inTransaction(() -> personDAO.create(new Person(null, "The null", 0))));
} |
@Override
public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) {
int type = columnDef.getColumnMeta() >> 8;
int length = columnDef.getColumnMeta() & 0xff;
// unpack type & length, see https://bugs.mysql.com/bug.php?id=37426.
if (0x30 != (type & 0x30)) {
length += ((type & 0x30) ^ 0x30) << 4;
type |= 0x30;
}
switch (MySQLBinaryColumnType.valueOf(type)) {
case ENUM:
return readEnumValue(length, payload);
case SET:
return payload.getByteBuf().readByte();
case STRING:
return new MySQLBinaryString(payload.readStringFixByBytes(readActualLength(length, payload)));
default:
throw new UnsupportedSQLOperationException(MySQLBinaryColumnType.valueOf(type).toString());
}
} | @Test
void assertReadLongStringValue() {
String expected = "test_value";
columnDef.setColumnMeta((MySQLBinaryColumnType.STRING.getValue() ^ ((256 & 0x300) >> 4)) << 8);
when(payload.getByteBuf()).thenReturn(byteBuf);
when(byteBuf.readUnsignedShortLE()).thenReturn(expected.length());
when(payload.readStringFixByBytes(expected.length())).thenReturn(expected.getBytes());
Serializable actual = new MySQLStringBinlogProtocolValue().read(columnDef, payload);
assertInstanceOf(MySQLBinaryString.class, actual);
assertThat(((MySQLBinaryString) actual).getBytes(), is(expected.getBytes()));
} |
@Udf
public String trim(
@UdfParameter(
description = "The string to trim") final String input) {
if (input == null) {
return null;
}
return input.trim();
} | @Test
public void shouldRemoveLeadingWhitespace() {
final String result = udf.trim(" \t Foo Bar");
assertThat(result, is("Foo Bar"));
} |
public static void main(String[] args) {
var simpleWizard = new SimpleWizard();
simpleWizard.smoke();
var advancedWizard = new AdvancedWizard(new SecondBreakfastTobacco());
advancedWizard.smoke();
var advancedSorceress = new AdvancedSorceress();
advancedSorceress.setTobacco(new SecondBreakfastTobacco());
advancedSorceress.smoke();
var injector = Guice.createInjector(new TobaccoModule());
var guiceWizard = injector.getInstance(GuiceWizard.class);
guiceWizard.smoke();
} | @Test
void shouldExecuteApplicationWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
} |
@Override
protected void channelRead0(ChannelHandlerContext ctx, HttpRequest msg) throws Exception {
CommandContext commandContext = HttpCommandDecoder.decode(msg);
// return 404 when fail to construct command context
if (commandContext == null) {
log.warn(QOS_UNEXPECTED_EXCEPTION, "", "", "can not found commandContext, url: " + msg.uri());
FullHttpResponse response = http(404);
ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE);
} else {
commandContext.setRemote(ctx.channel());
commandContext.setQosConfiguration(qosConfiguration);
try {
String result = commandExecutor.execute(commandContext);
int httpCode = commandContext.getHttpCode();
FullHttpResponse response = http(httpCode, result);
ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE);
} catch (NoSuchCommandException ex) {
log.error(QOS_COMMAND_NOT_FOUND, "", "", "can not find command: " + commandContext, ex);
FullHttpResponse response = http(404);
ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE);
} catch (PermissionDenyException ex) {
log.error(
QOS_PERMISSION_DENY_EXCEPTION,
"",
"",
"permission deny to access command: " + commandContext,
ex);
FullHttpResponse response = http(403);
ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE);
} catch (Exception qosEx) {
log.error(
QOS_UNEXPECTED_EXCEPTION,
"",
"",
"execute commandContext: " + commandContext + " got exception",
qosEx);
FullHttpResponse response = http(500, qosEx.getMessage());
ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE);
}
}
} | @Test
void test2() throws Exception {
ChannelHandlerContext context = mock(ChannelHandlerContext.class);
ChannelFuture future = mock(ChannelFuture.class);
when(context.writeAndFlush(any(FullHttpResponse.class))).thenReturn(future);
HttpRequest message = Mockito.mock(HttpRequest.class);
when(message.uri()).thenReturn("localhost:80/greeting");
when(message.method()).thenReturn(HttpMethod.GET);
HttpProcessHandler handler = new HttpProcessHandler(
FrameworkModel.defaultModel(),
QosConfiguration.builder()
.anonymousAccessPermissionLevel(PermissionLevel.NONE.name())
.build());
handler.channelRead0(context, message);
verify(future).addListener(ChannelFutureListener.CLOSE);
ArgumentCaptor<FullHttpResponse> captor = ArgumentCaptor.forClass(FullHttpResponse.class);
verify(context).writeAndFlush(captor.capture());
FullHttpResponse response = captor.getValue();
assertThat(response.status().code(), equalTo(200));
} |
@Override
public Consumer createConsumer(Processor aProcessor) throws Exception {
// validate that all of the endpoint is configured properly
if (getMonitorType() != null) {
if (!isPlatformServer()) {
throw new IllegalArgumentException(ERR_PLATFORM_SERVER);
}
if (ObjectHelper.isEmpty(getObservedAttribute())) {
throw new IllegalArgumentException(ERR_OBSERVED_ATTRIBUTE);
}
if (getMonitorType().equals("string")) {
if (ObjectHelper.isEmpty(getStringToCompare())) {
throw new IllegalArgumentException(ERR_STRING_TO_COMPARE);
}
if (!isNotifyDiffer() && !isNotifyMatch()) {
throw new IllegalArgumentException(ERR_STRING_NOTIFY);
}
} else if (getMonitorType().equals("gauge")) {
if (!isNotifyHigh() && !isNotifyLow()) {
throw new IllegalArgumentException(ERR_GAUGE_NOTIFY);
}
if (getThresholdHigh() == null) {
throw new IllegalArgumentException(ERR_THRESHOLD_HIGH);
}
if (getThresholdLow() == null) {
throw new IllegalArgumentException(ERR_THRESHOLD_LOW);
}
}
JMXMonitorConsumer answer = new JMXMonitorConsumer(this, aProcessor);
configureConsumer(answer);
return answer;
} else {
// shouldn't need any other validation.
JMXConsumer answer = new JMXConsumer(this, aProcessor);
configureConsumer(answer);
return answer;
}
} | @Test
public void noNotifyDifferOrNotifyMatch() throws Exception {
JMXEndpoint ep = context.getEndpoint(
"jmx:platform?objectDomain=FooDomain&objectName=theObjectName&monitorType=string&observedAttribute=foo&stringToCompare=foo",
JMXEndpoint.class);
try {
ep.createConsumer(null);
fail("expected exception");
} catch (IllegalArgumentException e) {
assertEquals(JMXEndpoint.ERR_STRING_NOTIFY, e.getMessage());
}
} |
WhitespaceArgumentDelimiter getSqlDelimiter() {
return this.sqlDelimiter;
} | @Test
void testSqlDelimiterCharacters() {
assertTrue(sqlCompleter.getSqlDelimiter().isDelimiterChar("r,", 1));
assertTrue(sqlCompleter.getSqlDelimiter().isDelimiterChar("SS,", 2));
assertTrue(sqlCompleter.getSqlDelimiter().isDelimiterChar(",", 0));
assertTrue(sqlCompleter.getSqlDelimiter().isDelimiterChar("ttt,", 3));
} |
@Override
public MapperResult getGroupIdList(MapperContext context) {
return new MapperResult(
"SELECT group_id FROM config_info WHERE tenant_id ='" + NamespaceUtil.getNamespaceDefaultId()
+ "' GROUP BY group_id OFFSET " + context.getStartRow() + " ROWS FETCH NEXT "
+ context.getPageSize() + " ROWS ONLY", Collections.emptyList());
} | @Test
void testGetGroupIdList() {
MapperResult mapperResult = configInfoMapperByDerby.getGroupIdList(context);
assertEquals(mapperResult.getSql(),
"SELECT group_id FROM config_info WHERE tenant_id ='' GROUP BY group_id OFFSET " + startRow + " ROWS FETCH NEXT " + pageSize
+ " ROWS ONLY");
assertArrayEquals(mapperResult.getParamList().toArray(), emptyObjs);
} |
@Override
public <T_OTHER, OUT> ProcessConfigurableAndNonKeyedPartitionStream<OUT> connectAndProcess(
NonKeyedPartitionStream<T_OTHER> other,
TwoInputNonBroadcastStreamProcessFunction<T, T_OTHER, OUT> processFunction) {
validateStates(
processFunction.usesStates(),
new HashSet<>(
Arrays.asList(
StateDeclaration.RedistributionMode.NONE,
StateDeclaration.RedistributionMode.IDENTICAL)));
TypeInformation<OUT> outTypeInfo =
StreamUtils.getOutputTypeForTwoInputNonBroadcastProcessFunction(
processFunction,
getType(),
((NonKeyedPartitionStreamImpl<T_OTHER>) other).getType());
TwoInputNonBroadcastProcessOperator<T, T_OTHER, OUT> processOperator =
new TwoInputNonBroadcastProcessOperator<>(processFunction);
Transformation<OUT> outTransformation =
StreamUtils.getTwoInputTransformation(
"TwoInput-Process",
this,
(NonKeyedPartitionStreamImpl<T_OTHER>) other,
outTypeInfo,
processOperator);
environment.addOperator(outTransformation);
return StreamUtils.wrapWithConfigureHandle(
new NonKeyedPartitionStreamImpl<>(environment, outTransformation));
} | @Test
void testConnectBroadcastStream() throws Exception {
ExecutionEnvironmentImpl env = StreamTestUtils.getEnv();
NonKeyedPartitionStreamImpl<Long> stream =
new NonKeyedPartitionStreamImpl<>(
env, new TestingTransformation<>("t1", Types.LONG, 1));
stream.connectAndProcess(
new BroadcastStreamImpl<>(env, new TestingTransformation<>("t2", Types.INT, 1)),
new StreamTestUtils.NoOpTwoInputBroadcastStreamProcessFunction());
List<Transformation<?>> transformations = env.getTransformations();
assertThat(transformations).hasSize(1);
assertProcessType(transformations.get(0), TwoInputTransformation.class, Types.LONG);
} |
public synchronized String get() {
ConfidentialStore cs = ConfidentialStore.get();
if (secret == null || cs != lastCS) {
lastCS = cs;
try {
byte[] payload = load();
if (payload == null) {
payload = cs.randomBytes(length / 2);
store(payload);
}
secret = Util.toHexString(payload).substring(0, length);
} catch (IOException e) {
throw new Error("Failed to load the key: " + getId(), e);
}
}
return secret;
} | @Test
public void loadingExistingKey() {
HexStringConfidentialKey key1 = new HexStringConfidentialKey("test", 8);
key1.get(); // this causes the ke to be generated
// this second key of the same ID will cause it to load the key from the disk
HexStringConfidentialKey key2 = new HexStringConfidentialKey("test", 8);
assertEquals(key1.get(), key2.get());
} |
@Override
public ByteBuf getBytes(int index, byte[] dst) {
getBytes(index, dst, 0, dst.length);
return this;
} | @Test
public void testGetBytesAfterRelease4() {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().getBytes(0, new byte[8]);
}
});
} |
public static long getPreMinuteMills(long rightnow) {
return rightnow - (rightnow % MILLISECONDS_PER_MINUTE) - 1;
} | @Test
public void getPreMinuteMills() throws Exception {
long now = System.currentTimeMillis();
long pre = DateUtils.getPreMinuteMills(now);
Assert.assertTrue(now - pre < 60000);
} |
@Override
public void checkAuthorization(
final KsqlSecurityContext securityContext,
final MetaStore metaStore,
final Statement statement
) {
if (statement instanceof Query) {
validateQuery(securityContext, metaStore, (Query)statement);
} else if (statement instanceof InsertInto) {
validateInsertInto(securityContext, metaStore, (InsertInto)statement);
} else if (statement instanceof CreateAsSelect) {
validateCreateAsSelect(securityContext, metaStore, (CreateAsSelect)statement);
} else if (statement instanceof PrintTopic) {
validatePrintTopic(securityContext, (PrintTopic)statement);
} else if (statement instanceof CreateSource) {
validateCreateSource(securityContext, (CreateSource)statement);
}
} | @Test
public void shouldThrowWhenJoinSelectWithoutReadPermissionsDenied() {
// Given:
givenTopicAccessDenied(KAFKA_TOPIC, AclOperation.READ);
final Statement statement = givenStatement(String.format(
"SELECT * FROM %s A JOIN %s B ON A.F1 = B.F1;", KAFKA_STREAM_TOPIC, AVRO_STREAM_TOPIC)
);
// When:
final Exception e = assertThrows(
KsqlTopicAuthorizationException.class,
() -> authorizationValidator.checkAuthorization(securityContext, metaStore, statement)
);
// Then:
assertThat(e.getMessage(), containsString(String.format(
"Authorization denied to Read on topic(s): [%s]", KAFKA_TOPIC
)));
} |
@Override
public boolean add(FilteredBlock block) throws VerificationException, PrunedException {
boolean success = super.add(block);
if (success) {
trackFilteredTransactions(block.getTransactionCount());
}
return success;
} | @Test(expected = VerificationException.class)
public void difficultyTransitions_unexpectedChange() throws Exception {
Context.propagate(new Context(100, Coin.ZERO, false, true));
BlockChain chain = new BlockChain(BitcoinNetwork.MAINNET, new MemoryBlockStore(MAINNET.getGenesisBlock()));
// genesis block is already there
Block prev = chain.getChainHead().getHeader();
Instant newTime = prev.time().plus(Duration.ofMinutes(10));
Block newBlock = prev.createNextBlock(null, 1, newTime, 1);
newBlock.setDifficultyTarget(newBlock.getDifficultyTarget() + 10);
assertTrue(chain.add(newBlock));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.