focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static List<TimeSlot> split(TimeSlot timeSlot, SegmentInMinutes unit) {
TimeSlot normalizedSlot = normalizeToSegmentBoundaries(timeSlot, unit);
return new SlotToSegments().apply(normalizedSlot, unit);
} | @Test
void slotsAreNormalizedBeforeSplitting() {
//given
Instant start = Instant.parse("2023-09-09T00:10:00Z");
Instant end = Instant.parse("2023-09-09T00:59:00Z");
TimeSlot timeSlot = new TimeSlot(start, end);
SegmentInMinutes oneHour = SegmentInMinutes.of(60, FIFTEEN_MINUTES_SEGMENT_DURATION);
//when
List<TimeSlot> segments = Segments.split(timeSlot, oneHour);
//then
assertEquals(1, segments.size());
assertEquals(Instant.parse("2023-09-09T00:00:00Z"), segments.get(0).from());
assertEquals(Instant.parse("2023-09-09T01:00:00Z"), segments.get(0).to());
} |
@Override
public List<Intent> compile(PathIntent intent, List<Intent> installable) {
List<FlowRule> rules = new LinkedList<>();
List<DeviceId> devices = new LinkedList<>();
compile(this, intent, rules, devices);
return ImmutableList.of(new FlowRuleIntent(appId,
intent.key(),
rules,
intent.resources(),
intent.type(),
intent.resourceGroup()
));
} | @Test
public void testCompile() {
sut.activate();
List<Intent> compiled = sut.compile(intent, Collections.emptyList());
assertThat(compiled, hasSize(1));
assertThat("key is inherited",
compiled.stream().map(Intent::key).collect(Collectors.toList()),
everyItem(is(intent.key())));
Collection<FlowRule> rules = ((FlowRuleIntent) compiled.get(0)).flowRules();
FlowRule rule1 = rules.stream()
.filter(x -> x.deviceId().equals(d1p0.deviceId()))
.findFirst()
.get();
verifyIdAndPriority(rule1, d1p0.deviceId());
assertThat(rule1.selector(),
is(DefaultTrafficSelector.builder(selector).matchInPort(d1p0.port()).build()));
assertThat(rule1.treatment(),
is(DefaultTrafficTreatment.builder().setOutput(d1p1.port()).build()));
FlowRule rule2 = rules.stream()
.filter(x -> x.deviceId().equals(d2p0.deviceId()))
.findFirst()
.get();
verifyIdAndPriority(rule2, d2p0.deviceId());
assertThat(rule2.selector(),
is(DefaultTrafficSelector.builder(selector).matchInPort(d2p0.port()).build()));
assertThat(rule2.treatment(),
is(DefaultTrafficTreatment.builder().setOutput(d2p1.port()).build()));
FlowRule rule3 = rules.stream()
.filter(x -> x.deviceId().equals(d3p0.deviceId()))
.findFirst()
.get();
verifyIdAndPriority(rule3, d3p1.deviceId());
assertThat(rule3.selector(),
is(DefaultTrafficSelector.builder(selector).matchInPort(d3p1.port()).build()));
assertThat(rule3.treatment(),
is(DefaultTrafficTreatment.builder(treatment).setOutput(d3p0.port()).build()));
sut.deactivate();
} |
public static <T> Object create(Class<T> iface, T implementation,
RetryPolicy retryPolicy) {
return RetryProxy.create(iface,
new DefaultFailoverProxyProvider<T>(iface, implementation),
retryPolicy);
} | @Test
public void testRetryInterruptible() throws Throwable {
final UnreliableInterface unreliable = (UnreliableInterface)
RetryProxy.create(UnreliableInterface.class, unreliableImpl,
retryUpToMaximumTimeWithFixedSleep(10, 10, TimeUnit.SECONDS));
final CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<Thread> futureThread = new AtomicReference<Thread>();
ExecutorService exec = Executors.newSingleThreadExecutor();
Future<Throwable> future = exec.submit(new Callable<Throwable>(){
@Override
public Throwable call() throws Exception {
futureThread.set(Thread.currentThread());
latch.countDown();
try {
unreliable.alwaysFailsWithFatalException();
} catch (UndeclaredThrowableException ute) {
return ute.getCause();
}
return null;
}
});
latch.await();
Thread.sleep(1000); // time to fail and sleep
assertTrue(futureThread.get().isAlive());
futureThread.get().interrupt();
Throwable e = future.get(1, TimeUnit.SECONDS); // should return immediately
assertNotNull(e);
assertEquals(InterruptedIOException.class, e.getClass());
assertEquals("Retry interrupted", e.getMessage());
assertEquals(InterruptedException.class, e.getCause().getClass());
assertEquals("sleep interrupted", e.getCause().getMessage());
} |
@Nullable
public static PipelineBreakerResult executePipelineBreakers(OpChainSchedulerService scheduler,
MailboxService mailboxService, WorkerMetadata workerMetadata, StagePlan stagePlan,
Map<String, String> opChainMetadata, long requestId, long deadlineMs) {
PipelineBreakerContext pipelineBreakerContext = new PipelineBreakerContext();
PipelineBreakerVisitor.visitPlanRoot(stagePlan.getRootNode(), pipelineBreakerContext);
if (!pipelineBreakerContext.getPipelineBreakerMap().isEmpty()) {
try {
// TODO: This PlanRequestContext needs to indicate it is a pre-stage opChain and only listens to pre-stage
// OpChain receive-mail callbacks.
// see also: MailboxIdUtils TODOs, de-couple mailbox id from query information
OpChainExecutionContext opChainExecutionContext =
new OpChainExecutionContext(mailboxService, requestId, deadlineMs, opChainMetadata,
stagePlan.getStageMetadata(), workerMetadata, null);
return execute(scheduler, pipelineBreakerContext, opChainExecutionContext);
} catch (Exception e) {
LOGGER.error("Caught exception executing pipeline breaker for request: {}, stage: {}", requestId,
stagePlan.getStageMetadata().getStageId(), e);
return new PipelineBreakerResult(pipelineBreakerContext.getNodeIdMap(), Collections.emptyMap(),
TransferableBlockUtils.getErrorTransferableBlock(e), null);
}
} else {
return null;
}
} | @Test
public void shouldReturnBlocksUponNormalOperation() {
MailboxReceiveNode mailboxReceiveNode = getPBReceiveNode(1);
StagePlan stagePlan = new StagePlan(mailboxReceiveNode, _stageMetadata);
// when
when(_mailboxService.getReceivingMailbox(MAILBOX_ID_1)).thenReturn(_mailbox1);
Object[] row1 = new Object[]{1, 1};
Object[] row2 = new Object[]{2, 3};
when(_mailbox1.poll()).thenReturn(OperatorTestUtil.block(DATA_SCHEMA, row1),
OperatorTestUtil.block(DATA_SCHEMA, row2),
TransferableBlockUtils.getEndOfStreamTransferableBlock(OperatorTestUtil.getDummyStats(1)));
PipelineBreakerResult pipelineBreakerResult =
PipelineBreakerExecutor.executePipelineBreakers(_scheduler, _mailboxService, _workerMetadata, stagePlan,
ImmutableMap.of(), 0, Long.MAX_VALUE);
// then
// should have single PB result, receive 2 data blocks, EOS block shouldn't be included
Assert.assertNotNull(pipelineBreakerResult);
Assert.assertNull(pipelineBreakerResult.getErrorBlock());
Assert.assertEquals(pipelineBreakerResult.getResultMap().size(), 1);
Assert.assertEquals(pipelineBreakerResult.getResultMap().values().iterator().next().size(), 2);
// should collect stats from previous stage here
Assert.assertNotNull(pipelineBreakerResult.getStageQueryStats());
Assert.assertNotNull(pipelineBreakerResult.getStageQueryStats().getUpstreamStageStats(1),
"Stats for stage 1 should be sent");
} |
public static QueryQueueOptions createFromEnv() {
if (!Config.enable_query_queue_v2) {
return new QueryQueueOptions(false, V2.DEFAULT);
}
V2 v2 = new V2(Config.query_queue_v2_concurrency_level,
BackendResourceStat.getInstance().getNumBes(),
BackendResourceStat.getInstance().getAvgNumHardwareCoresOfBe(),
BackendResourceStat.getInstance().getAvgMemLimitBytes(),
Config.query_queue_v2_num_rows_per_slot,
Config.query_queue_v2_cpu_costs_per_slot);
return new QueryQueueOptions(true, v2);
} | @Test
public void testCreateFromEnv() {
{
Config.enable_query_queue_v2 = false;
QueryQueueOptions opts = QueryQueueOptions.createFromEnv();
assertThat(opts.isEnableQueryQueueV2()).isFalse();
}
{
final int numCores = 16;
final long memLimitBytes = 64L * 1024 * 1024 * 1024;
final int numBEs = 2;
final int concurrencyLevel = Config.query_queue_v2_concurrency_level;
BackendResourceStat.getInstance().setNumHardwareCoresOfBe(1, numCores);
BackendResourceStat.getInstance().setMemLimitBytesOfBe(1, memLimitBytes);
BackendResourceStat.getInstance().setNumHardwareCoresOfBe(2, numCores);
BackendResourceStat.getInstance().setMemLimitBytesOfBe(2, memLimitBytes);
Config.enable_query_queue_v2 = true;
QueryQueueOptions opts = QueryQueueOptions.createFromEnv();
assertThat(opts.isEnableQueryQueueV2()).isTrue();
assertThat(opts.v2().getNumWorkers()).isEqualTo(numBEs);
assertThat(opts.v2().getNumRowsPerSlot()).isEqualTo(Config.query_queue_v2_num_rows_per_slot);
assertThat(opts.v2().getTotalSlots()).isEqualTo(concurrencyLevel * numBEs * numCores);
assertThat(opts.v2().getMemBytesPerSlot()).isEqualTo(memLimitBytes / concurrencyLevel / numCores);
assertThat(opts.v2().getTotalSmallSlots()).isEqualTo(numCores);
}
} |
public static Builder builder(Credentials credentials) {
return new Builder(credentials);
} | @Test
public void testCreateWithCredentials() {
Credentials credentials = mock(Credentials.class);
FlexTemplateClient.builder(credentials).build();
// Lack of exception is all we really can test
} |
@Override
public OAuth2AccessTokenDO getAccessToken(String accessToken) {
// 优先从 Redis 中获取
OAuth2AccessTokenDO accessTokenDO = oauth2AccessTokenRedisDAO.get(accessToken);
if (accessTokenDO != null) {
return accessTokenDO;
}
// 获取不到,从 MySQL 中获取
accessTokenDO = oauth2AccessTokenMapper.selectByAccessToken(accessToken);
// 如果在 MySQL 存在,则往 Redis 中写入
if (accessTokenDO != null && !DateUtils.isExpired(accessTokenDO.getExpiresTime())) {
oauth2AccessTokenRedisDAO.set(accessTokenDO);
}
return accessTokenDO;
} | @Test
public void testCheckAccessToken_success() {
// mock 数据(访问令牌)
OAuth2AccessTokenDO accessTokenDO = randomPojo(OAuth2AccessTokenDO.class)
.setExpiresTime(LocalDateTime.now().plusDays(1));
oauth2AccessTokenMapper.insert(accessTokenDO);
// 准备参数
String accessToken = accessTokenDO.getAccessToken();
// 调研,并断言
OAuth2AccessTokenDO result = oauth2TokenService.getAccessToken(accessToken);
// 断言
assertPojoEquals(accessTokenDO, result, "createTime", "updateTime", "deleted",
"creator", "updater");
} |
@Override
public MapperResult findConfigInfoLike4PageFetchRows(MapperContext context) {
final String tenant = (String) context.getWhereParameter(FieldConstant.TENANT_ID);
final String dataId = (String) context.getWhereParameter(FieldConstant.DATA_ID);
final String group = (String) context.getWhereParameter(FieldConstant.GROUP_ID);
final String appName = (String) context.getWhereParameter(FieldConstant.APP_NAME);
final String content = (String) context.getWhereParameter(FieldConstant.CONTENT);
final String[] types = (String[]) context.getWhereParameter(FieldConstant.TYPE);
WhereBuilder where = new WhereBuilder(
"SELECT id,data_id,group_id,tenant_id,app_name,content,encrypted_data_key,type FROM config_info");
where.like("tenant_id", tenant);
if (StringUtils.isNotBlank(dataId)) {
where.and().like("data_id", dataId);
}
if (StringUtils.isNotBlank(group)) {
where.and().like("group_id", group);
}
if (StringUtils.isNotBlank(appName)) {
where.and().eq("app_name", appName);
}
if (StringUtils.isNotBlank(content)) {
where.and().like("content", content);
}
if (!ArrayUtils.isEmpty(types)) {
where.in("type", types);
}
where.limit(context.getStartRow(), context.getPageSize());
return where.build();
} | @Test
void testFindConfigInfoLike4PageFetchRows() {
MapperResult mapperResult = configInfoMapperByMySql.findConfigInfoLike4PageFetchRows(context);
assertEquals(mapperResult.getSql(), "SELECT id,data_id,group_id,tenant_id,app_name,content,encrypted_data_key,type FROM config_info "
+ "WHERE tenant_id LIKE ? AND app_name = ? LIMIT " + startRow + "," + pageSize);
assertArrayEquals(new Object[] {tenantId, appName}, mapperResult.getParamList().toArray());
} |
@Override
// Camel calls this method if the endpoint isSynchronous(), as the
// KafkaEndpoint creates a SynchronousDelegateProducer for it
public void process(Exchange exchange) throws Exception {
// is the message body a list or something that contains multiple values
Message message = exchange.getIn();
if (transactionId != null) {
startKafkaTransaction(exchange);
}
if (endpoint.getConfiguration().isUseIterator() && isIterable(message.getBody())) {
processIterableSync(exchange, message);
} else {
processSingleMessageSync(exchange, message);
}
} | @Test
public void processRequiresTopicInConfiguration() throws Exception {
endpoint.getConfiguration().setTopic("configTopic");
Mockito.when(exchange.getIn()).thenReturn(in);
Mockito.when(exchange.getMessage()).thenReturn(in);
in.setHeader(KafkaConstants.PARTITION_KEY, 4);
in.setHeader(KafkaConstants.KEY, "someKey");
producer.process(exchange);
verifySendMessage("configTopic", "someKey");
assertRecordMetadataExists();
} |
public static TimeWindows ofSizeWithNoGrace(final Duration size) throws IllegalArgumentException {
return ofSizeAndGrace(size, ofMillis(NO_GRACE_PERIOD));
} | @Test
public void windowSizeMustNotBeNegative() {
assertThrows(IllegalArgumentException.class, () -> TimeWindows.ofSizeWithNoGrace(ofMillis(-1)));
} |
@Override
public Boolean authenticate(final Host bookmark, final LoginCallback callback, final CancelCallback cancel) throws BackgroundException {
if(log.isDebugEnabled()) {
log.debug(String.format("Login using challenge response authentication for %s", bookmark));
}
final AtomicBoolean canceled = new AtomicBoolean();
final AtomicBoolean publickey = new AtomicBoolean();
try {
final Credentials credentials = bookmark.getCredentials();
client.auth(credentials.getUsername(), new AuthKeyboardInteractive(new ChallengeResponseProvider() {
private String name = StringUtils.EMPTY;
private String instruction = StringUtils.EMPTY;
/**
* Reply for default password prompt challenge sent
*/
private final AtomicBoolean flag = new AtomicBoolean(false);
@Override
public List<String> getSubmethods() {
return Collections.emptyList();
}
@Override
public void init(final Resource resource, final String name, final String instruction) {
if(log.isDebugEnabled()) {
log.debug(String.format("Initialize with name '%s' and instruction '%s'", name, instruction));
}
if(StringUtils.isNotBlank(instruction)) {
this.instruction = instruction;
}
if(StringUtils.isNotBlank(name)) {
this.name = name;
}
}
@Override
public char[] getResponse(final String prompt, final boolean echo) {
if(log.isDebugEnabled()) {
log.debug(String.format("Reply to challenge name '%s' with instruction '%s' and prompt '%s'",
name, instruction, prompt));
}
if(!flag.get() && DEFAULT_PROMPT_PATTERN.matcher(prompt).matches()) {
if(log.isDebugEnabled()) {
log.debug(String.format("Prompt '%s' matches %s", prompt, DEFAULT_PROMPT_PATTERN));
}
if(StringUtils.isNotBlank(credentials.getPassword())) {
flag.set(true);
return credentials.getPassword().toCharArray();
}
if(log.isDebugEnabled()) {
log.debug(String.format("Prompt for password input with %s", callback));
}
try {
final Credentials input = callback.prompt(bookmark, credentials.getUsername(),
String.format("%s %s", LocaleFactory.localizedString("Login", "Login"), bookmark.getHostname()),
MessageFormat.format(LocaleFactory.localizedString(
"Login {0} with username and password", "Credentials"), BookmarkNameProvider.toString(bookmark)),
// Change of username or service not allowed
new LoginOptions(bookmark.getProtocol()).user(false));
if(input.isPublicKeyAuthentication()) {
credentials.setIdentity(input.getIdentity());
publickey.set(true);
// Return null to cancel if user wants to use public key auth
return StringUtils.EMPTY.toCharArray();
}
flag.set(true);
return credentials
.withPassword(input.getPassword())
.withSaved(input.isSaved()).getPassword().toCharArray();
}
catch(LoginCanceledException e) {
canceled.set(true);
// Return null if user cancels
return StringUtils.EMPTY.toCharArray();
}
}
else {
if(log.isDebugEnabled()) {
log.debug(String.format("Prompt for additional credentials with prompt %s", prompt));
}
final StringAppender message = new StringAppender().append(instruction).append(prompt);
// Properly handle an instruction field with embedded newlines. They should also
// be able to display at least 30 characters for the name and prompts.
final Credentials additional;
try {
final StringAppender title = new StringAppender().append(
LocaleFactory.localizedString("Provide additional login credentials", "Credentials")
).append(name);
additional = callback.prompt(bookmark, title.toString(),
message.toString(), new LoginOptions()
.icon(bookmark.getProtocol().disk())
.password(true)
.user(false)
.keychain(false)
.anonymous(true)
);
}
catch(LoginCanceledException e) {
canceled.set(true);
// Return null if user cancels
return StringUtils.EMPTY.toCharArray();
}
// Responses are encoded in ISO-10646 UTF-8.
return additional.getPassword().toCharArray();
}
}
@Override
public boolean shouldRetry() {
return false;
}
}));
}
catch(IOException e) {
if(publickey.get()) {
return new SFTPPublicKeyAuthentication(client).authenticate(bookmark, callback, cancel);
}
if(canceled.get()) {
throw new LoginCanceledException();
}
throw new SFTPExceptionMappingService().map(e);
}
return client.isAuthenticated();
} | @Test(expected = LoginFailureException.class)
@Ignore
public void testAuthenticate() throws Exception {
assertFalse(new SFTPChallengeResponseAuthentication(session.getClient()).authenticate(session.getHost(), new DisabledLoginCallback(), new DisabledCancelCallback()));
} |
public ImmutableSet<String> loadAllMessageStreams(final StreamPermissions streamPermissions) {
return allStreamsProvider.get()
// Unless explicitly queried, exclude event and failure indices by default
// Having these indices in every search, makes sorting almost impossible
// because it triggers https://github.com/Graylog2/graylog2-server/issues/6378
// TODO: this filter could be removed, once we implement https://github.com/Graylog2/graylog2-server/issues/6490
.filter(id -> !NON_MESSAGE_STREAM_IDS.contains(id))
.filter(streamPermissions::canReadStream)
.collect(ImmutableSet.toImmutableSet());
} | @Test
public void filtersDefaultStreams() {
final PermittedStreams sut = new PermittedStreams(() -> Streams.concat(NON_MESSAGE_STREAM_IDS.stream(), java.util.stream.Stream.of("i'm ok")));
ImmutableSet<String> result = sut.loadAllMessageStreams(id -> true);
assertThat(result).containsExactly("i'm ok");
} |
@Override
protected SDSApiClient connect(final ProxyFinder proxy, final HostKeyCallback key, final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException {
final HttpClientBuilder configuration = builder.build(proxy, this, prompt);
authorizationService = new OAuth2RequestInterceptor(builder.build(proxy, this, prompt).addInterceptorLast(new HttpRequestInterceptor() {
@Override
public void process(final HttpRequest request, final HttpContext context) {
if(request instanceof HttpRequestWrapper) {
final HttpRequestWrapper wrapper = (HttpRequestWrapper) request;
if(null != wrapper.getTarget()) {
if(StringUtils.equals(wrapper.getTarget().getHostName(), host.getHostname())) {
request.addHeader(HttpHeaders.AUTHORIZATION,
String.format("Basic %s", Base64.getEncoder().encodeToString(String.format("%s:%s", host.getProtocol().getOAuthClientId(), host.getProtocol().getOAuthClientSecret()).getBytes(StandardCharsets.UTF_8))));
}
}
}
}
}).build(), host, prompt) {
@Override
public void process(final HttpRequest request, final HttpContext context) throws HttpException, IOException {
if(request instanceof HttpRequestWrapper) {
final HttpRequestWrapper wrapper = (HttpRequestWrapper) request;
if(null != wrapper.getTarget()) {
if(StringUtils.equals(wrapper.getTarget().getHostName(), host.getHostname())) {
super.process(request, context);
}
}
}
}
}
.withFlowType(SDSProtocol.Authorization.valueOf(host.getProtocol().getAuthorization()) == SDSProtocol.Authorization.password
? OAuth2AuthorizationService.FlowType.PasswordGrant : OAuth2AuthorizationService.FlowType.AuthorizationCode)
.withRedirectUri(CYBERDUCK_REDIRECT_URI.equals(host.getProtocol().getOAuthRedirectUrl()) ? host.getProtocol().getOAuthRedirectUrl() :
Scheme.isURL(host.getProtocol().getOAuthRedirectUrl()) ? host.getProtocol().getOAuthRedirectUrl() : new HostUrlProvider().withUsername(false).withPath(true).get(
host.getProtocol().getScheme(), host.getPort(), null, host.getHostname(), host.getProtocol().getOAuthRedirectUrl())
);
try {
authorizationService.withParameter("user_agent_info", Base64.getEncoder().encodeToString(InetAddress.getLocalHost().getHostName().getBytes(StandardCharsets.UTF_8)));
}
catch(UnknownHostException e) {
throw new DefaultIOExceptionMappingService().map(e);
}
configuration.setServiceUnavailableRetryStrategy(new CustomServiceUnavailableRetryStrategy(host,
new ExecutionCountServiceUnavailableRetryStrategy(new PreconditionFailedResponseInterceptor(host, authorizationService, prompt),
new OAuth2ErrorResponseInterceptor(host, authorizationService))));
if(new HostPreferences(host).getBoolean("sds.limit.requests.enable")) {
configuration.addInterceptorLast(new RateLimitingHttpRequestInterceptor(new DefaultHttpRateLimiter(
new HostPreferences(host).getInteger("sds.limit.requests.second")
)));
}
configuration.addInterceptorLast(authorizationService);
configuration.addInterceptorLast(new HttpRequestInterceptor() {
@Override
public void process(final HttpRequest request, final HttpContext context) {
request.removeHeaders(SDSSession.SDS_AUTH_TOKEN_HEADER);
}
});
final CloseableHttpClient apache = configuration.build();
final SDSApiClient client = new SDSApiClient(apache);
client.setBasePath(new HostUrlProvider().withUsername(false).withPath(true).get(host.getProtocol().getScheme(), host.getPort(),
null, host.getHostname(), host.getProtocol().getContext()));
client.setHttpClient(ClientBuilder.newClient(new ClientConfig()
.property(ClientProperties.SUPPRESS_HTTP_COMPLIANCE_VALIDATION, true)
.register(new InputStreamProvider())
.register(MultiPartFeature.class)
.register(new JSON())
.register(JacksonFeature.class)
.connectorProvider(new HttpComponentsProvider(apache))));
final int timeout = ConnectionTimeoutFactory.get(preferences).getTimeout() * 1000;
client.setConnectTimeout(timeout);
client.setReadTimeout(timeout);
client.setUserAgent(new PreferencesUseragentProvider().get());
return client;
} | @Test(expected = ConnectionRefusedException.class)
public void testProxyNoConnect() throws Exception {
final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new SDSProtocol())));
final Profile profile = new ProfilePlistReader(factory).read(
this.getClass().getResourceAsStream("/DRACOON (CLI).cyberduckprofile"));
final Host host = new Host(profile, "duck.dracoon.com", new Credentials(
System.getProperties().getProperty("dracoon.user"), System.getProperties().getProperty("dracoon.key")
));
final SDSSession session = new SDSSession(host, new DisabledX509TrustManager(), new DefaultX509KeyManager());
final LoginConnectionService c = new LoginConnectionService(
new DisabledLoginCallback(),
new DisabledHostKeyCallback(),
new DisabledPasswordStore(),
new DisabledProgressListener(),
new ProxyFinder() {
@Override
public Proxy find(final String target) {
return new Proxy(Proxy.Type.HTTP, "localhost", 3128);
}
}
);
c.connect(session, new DisabledCancelCallback());
} |
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
return inject(statement, new TopicProperties.Builder());
} | @Test
public void shouldUseSourceTopicForCreateExistingTopic() {
// Given:
givenStatement("CREATE STREAM x (FOO VARCHAR) WITH(value_format='avro', kafka_topic='source', partitions=2);");
// When:
injector.inject(statement, builder);
// Then:
verify(builder).withSource(argThat(supplierThatGets(sourceDescription)), any(Supplier.class));
} |
@Override
public TimelineEntity getContainerEntity(ContainerId containerId,
String fields, Map<String, String> filters) throws IOException {
ApplicationId appId = containerId.getApplicationAttemptId().
getApplicationId();
String path = PATH_JOINER.join("clusters", clusterId, "apps",
appId, "entities", YARN_CONTAINER, containerId);
if (fields == null || fields.isEmpty()) {
fields = "INFO";
}
MultivaluedMap<String, String> params = new MultivaluedMapImpl();
params.add("fields", fields);
mergeFilters(params, filters);
ClientResponse response = doGetUri(baseUri, path, params);
TimelineEntity entity = response.getEntity(TimelineEntity.class);
return entity;
} | @Test
void testGetContainer() throws Exception {
ContainerId containerId =
ContainerId.fromString("container_1234_0001_01_000001");
TimelineEntity entity = client.getContainerEntity(containerId,
null, null);
assertEquals("mockContainer1", entity.getId());
} |
@Override
public String toString() {
return "DEFAULT";
} | @Test
public void testToString() {
String expected = "DEFAULT";
assertEquals(expected.trim(), SqlDefaultExpr.get().toString().trim());
} |
@Override
public void updateInstance(String serviceName, String groupName, Instance instance) throws NacosException {
} | @Test
void testUpdateInstance() {
String serviceName = "service1";
String groupName = "group1";
Instance instance = new Instance();
Assertions.assertDoesNotThrow(() -> {
delegate.updateInstance(serviceName, groupName, instance);
});
} |
public static ObjectNode json(Highlights highlights) {
ObjectNode payload = objectNode();
ArrayNode devices = arrayNode();
ArrayNode hosts = arrayNode();
ArrayNode links = arrayNode();
payload.set(DEVICES, devices);
payload.set(HOSTS, hosts);
payload.set(LINKS, links);
highlights.devices().forEach(dh -> devices.add(json(dh)));
highlights.hosts().forEach(hh -> hosts.add(json(hh)));
highlights.links().forEach(lh -> links.add(json(lh)));
Highlights.Amount toSubdue = highlights.subdueLevel();
if (!toSubdue.equals(Highlights.Amount.ZERO)) {
payload.put(SUBDUE, toSubdue.toString());
}
int delay = highlights.delayMs();
if (delay > 0) {
payload.put(DELAY, delay);
}
return payload;
} | @Test
public void badgedDevice() {
Highlights h = new Highlights();
DeviceHighlight dh = new DeviceHighlight(DEV1);
dh.setBadge(NodeBadge.number(7));
h.add(dh);
dh = new DeviceHighlight(DEV2);
dh.setBadge(NodeBadge.glyph(Status.WARN, GID, SOME_MSG));
h.add(dh);
payload = TopoJson.json(h);
// System.out.println(payload);
// dig into the payload, and verify the badges are set on the devices
ArrayNode a = (ArrayNode) payload.get(TopoJson.DEVICES);
ObjectNode d = (ObjectNode) a.get(0);
assertEquals("wrong device id", DEV1, d.get(TopoJson.ID).asText());
ObjectNode b = (ObjectNode) d.get(TopoJson.BADGE);
assertNotNull("missing badge", b);
assertEquals("wrong status code", "i", b.get(TopoJson.STATUS).asText());
assertEquals("wrong text", "7", b.get(TopoJson.TXT).asText());
assertNull("glyph?", b.get(TopoJson.GID));
assertNull("msg?", b.get(TopoJson.MSG));
d = (ObjectNode) a.get(1);
assertEquals("wrong device id", DEV2, d.get(TopoJson.ID).asText());
b = (ObjectNode) d.get(TopoJson.BADGE);
assertNotNull("missing badge", b);
assertEquals("wrong status code", "w", b.get(TopoJson.STATUS).asText());
assertNull("text?", b.get(TopoJson.TXT));
assertEquals("wrong text", GID, b.get(TopoJson.GID).asText());
assertEquals("wrong message", SOME_MSG, b.get(TopoJson.MSG).asText());
} |
@Override
public FlinkPod decorateFlinkPod(FlinkPod flinkPod) {
final Container basicMainContainer =
new ContainerBuilder(flinkPod.getMainContainer())
.addAllToEnv(getSecretEnvs())
.build();
return new FlinkPod.Builder(flinkPod).withMainContainer(basicMainContainer).build();
} | @Test
void testWhetherPodOrContainerIsDecorated() {
final FlinkPod resultFlinkPod = envSecretsDecorator.decorateFlinkPod(baseFlinkPod);
List<EnvVar> envVarList = resultFlinkPod.getMainContainer().getEnv();
assertThat(envVarList).extracting(EnvVar::getName).containsExactly(ENV_NAME);
} |
static Builder newBuilder() {
return new AutoValue_SplunkEventWriter.Builder();
} | @Test
public void eventWriterMissingToken() {
Exception thrown =
assertThrows(
NullPointerException.class,
() -> SplunkEventWriter.newBuilder().withUrl("http://test-url").build());
assertTrue(thrown.getMessage().contains("token needs to be provided"));
} |
public ExitStatus(Options options) {
this.options = options;
} | @Test
void with_failed_passed_scenarios() {
createRuntime();
bus.send(testCaseFinishedWithStatus(Status.FAILED));
bus.send(testCaseFinishedWithStatus(Status.PASSED));
assertThat(exitStatus.exitStatus(), is(equalTo((byte) 0x1)));
} |
public static void scheduleLongPolling(Runnable runnable, long initialDelay, long delay, TimeUnit unit) {
LONG_POLLING_EXECUTOR.scheduleWithFixedDelay(runnable, initialDelay, delay, unit);
} | @Test
void testScheduleLongPollingV1() throws InterruptedException {
AtomicInteger atomicInteger = new AtomicInteger();
Runnable runnable = atomicInteger::incrementAndGet;
ConfigExecutor.scheduleLongPolling(runnable, 0, 10, TimeUnit.MILLISECONDS);
TimeUnit.MILLISECONDS.sleep(10);
assertTrue(atomicInteger.get() >= 1);
} |
@Override
public void init(FilterConfig filterConfig) throws ServletException {
String configPrefix = filterConfig.getInitParameter(CONFIG_PREFIX);
configPrefix = (configPrefix != null) ? configPrefix + "." : "";
config = getConfiguration(configPrefix, filterConfig);
String authHandlerName = config.getProperty(AUTH_TYPE, null);
String authHandlerClassName;
if (authHandlerName == null) {
throw new ServletException("Authentication type must be specified: " +
PseudoAuthenticationHandler.TYPE + "|" +
KerberosAuthenticationHandler.TYPE + "|<class>");
}
authHandlerClassName =
AuthenticationHandlerUtil
.getAuthenticationHandlerClassName(authHandlerName);
maxInactiveInterval = Long.parseLong(config.getProperty(
AUTH_TOKEN_MAX_INACTIVE_INTERVAL, "-1")); // By default, disable.
if (maxInactiveInterval > 0) {
maxInactiveInterval *= 1000;
}
validity = Long.parseLong(config.getProperty(AUTH_TOKEN_VALIDITY, "36000"))
* 1000; //10 hours
initializeSecretProvider(filterConfig);
initializeAuthHandler(authHandlerClassName, filterConfig);
cookieDomain = config.getProperty(COOKIE_DOMAIN, null);
cookiePath = config.getProperty(COOKIE_PATH, null);
isCookiePersistent = Boolean.parseBoolean(
config.getProperty(COOKIE_PERSISTENT, "false"));
} | @Test
public void testInit() throws Exception {
// custom secret as inline
AuthenticationFilter filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<>(Arrays.asList(AuthenticationFilter.AUTH_TYPE))
.elements());
ServletContext context = Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(
AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE)).thenReturn(
new SignerSecretProvider() {
@Override
public void init(Properties config, ServletContext servletContext,
long tokenValidity) {
}
@Override
public byte[] getCurrentSecret() {
return null;
}
@Override
public byte[][] getAllSecrets() {
return null;
}
});
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
Assert.assertFalse(filter.isRandomSecret());
Assert.assertTrue(filter.isCustomSignerSecretProvider());
} finally {
filter.destroy();
}
// custom secret by file
File testDir = new File(System.getProperty("test.build.data",
"target/test-dir"));
testDir.mkdirs();
String secretValue = "hadoop";
File secretFile = new File(testDir, "http-secret.txt");
Writer writer = new FileWriter(secretFile);
writer.write(secretValue);
writer.close();
filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(
AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
Mockito.when(config.getInitParameter(
AuthenticationFilter.SIGNATURE_SECRET_FILE))
.thenReturn(secretFile.getAbsolutePath());
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.SIGNATURE_SECRET_FILE)).elements());
ServletContext context = Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(
AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
.thenReturn(null);
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
Assert.assertFalse(filter.isRandomSecret());
Assert.assertFalse(filter.isCustomSignerSecretProvider());
} finally {
filter.destroy();
}
// custom cookie domain and cookie path
filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_DOMAIN)).thenReturn(".foo.com");
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_PATH)).thenReturn("/bar");
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.COOKIE_DOMAIN,
AuthenticationFilter.COOKIE_PATH)).elements());
getMockedServletContextWithStringSigner(config);
filter.init(config);
Assert.assertEquals(".foo.com", filter.getCookieDomain());
Assert.assertEquals("/bar", filter.getCookiePath());
} finally {
filter.destroy();
}
// authentication handler lifecycle, and custom impl
DummyAuthenticationHandler.reset();
filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).
thenReturn("true");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(
Arrays.asList(AuthenticationFilter.AUTH_TYPE,
"management.operation.return")).elements());
getMockedServletContextWithStringSigner(config);
filter.init(config);
Assert.assertTrue(DummyAuthenticationHandler.init);
} finally {
filter.destroy();
Assert.assertTrue(DummyAuthenticationHandler.destroy);
}
// kerberos auth handler
filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
ServletContext sc = Mockito.mock(ServletContext.class);
Mockito.when(config.getServletContext()).thenReturn(sc);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("kerberos");
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
filter.init(config);
} catch (ServletException ex) {
// Expected
} finally {
Assert.assertEquals(KerberosAuthenticationHandler.class, filter.getAuthenticationHandler().getClass());
filter.destroy();
}
} |
public ApplicationBuilder owner(String owner) {
this.owner = owner;
return getThis();
} | @Test
void owner() {
ApplicationBuilder builder = new ApplicationBuilder();
builder.owner("owner");
Assertions.assertEquals("owner", builder.build().getOwner());
} |
public void enter(Wizard wizard) {
LOGGER.info("{} enters the tower.", wizard);
} | @Test
void testEnter() {
final var wizards = List.of(
new Wizard("Gandalf"),
new Wizard("Dumbledore"),
new Wizard("Oz"),
new Wizard("Merlin")
);
var tower = new IvoryTower();
wizards.forEach(tower::enter);
assertTrue(appender.logContains("Gandalf enters the tower."));
assertTrue(appender.logContains("Dumbledore enters the tower."));
assertTrue(appender.logContains("Oz enters the tower."));
assertTrue(appender.logContains("Merlin enters the tower."));
assertEquals(4, appender.getLogSize());
} |
public static Write<PubsubMessage> writeMessages() {
return Write.newBuilder()
.setTopicProvider(null)
.setTopicFunction(null)
.setDynamicDestinations(false)
.build();
} | @Test
public void testWriteMalformedMessagesWithErrorHandler() throws Exception {
OutgoingMessage msg =
OutgoingMessage.of(
com.google.pubsub.v1.PubsubMessage.newBuilder()
.setData(ByteString.copyFromUtf8("foo"))
.build(),
0,
null,
"projects/project/topics/topic1");
try (PubsubTestClientFactory factory =
PubsubTestClient.createFactoryForPublish(null, ImmutableList.of(msg), ImmutableList.of())) {
TimestampedValue<PubsubMessage> pubsubMsg =
TimestampedValue.of(
new PubsubMessage(
msg.getMessage().getData().toByteArray(),
Collections.emptyMap(),
msg.recordId())
.withTopic(msg.topic()),
Instant.ofEpochMilli(msg.getTimestampMsSinceEpoch()));
TimestampedValue<PubsubMessage> failingPubsubMsg =
TimestampedValue.of(
new PubsubMessage(
"foo".getBytes(StandardCharsets.UTF_8),
Collections.emptyMap(),
msg.recordId())
.withTopic("badTopic"),
Instant.ofEpochMilli(msg.getTimestampMsSinceEpoch()));
PCollection<PubsubMessage> messages =
pipeline.apply(
Create.timestamped(ImmutableList.of(pubsubMsg, failingPubsubMsg))
.withCoder(PubsubMessageWithTopicCoder.of()));
messages.setIsBoundedInternal(PCollection.IsBounded.BOUNDED);
ErrorHandler<BadRecord, PCollection<Long>> badRecordErrorHandler =
pipeline.registerBadRecordErrorHandler(new ErrorSinkTransform());
// The most straightforward method to simulate a bad message is to have a format function that
// deterministically fails based on some value
messages.apply(
PubsubIO.writeMessages()
.toBuilder()
.setFormatFn(
(ValueInSingleWindow<PubsubMessage> messageAndWindow) -> {
if (messageAndWindow.getValue().getTopic().equals("badTopic")) {
throw new RuntimeException("expected exception");
}
return messageAndWindow.getValue();
})
.build()
.to("projects/project/topics/topic1")
.withClientFactory(factory)
.withErrorHandler(badRecordErrorHandler));
badRecordErrorHandler.close();
PAssert.thatSingleton(badRecordErrorHandler.getOutput()).isEqualTo(1L);
pipeline.run();
}
} |
@Override
public void check(final SQLStatement sqlStatement) {
ShardingSpherePreconditions.checkState(judgeEngine.isSupported(sqlStatement), () -> new ClusterStateException(getType(), sqlStatement));
} | @Test
void assertExecuteWithSupportedSQL() {
new ReadOnlyProxyState().check(mock(SelectStatement.class));
} |
public ClientSession toClientSession()
{
return new ClientSession(
parseServer(server),
user,
source,
Optional.empty(),
parseClientTags(clientTags),
clientInfo,
catalog,
schema,
TimeZone.getDefault().getID(),
Locale.getDefault(),
toResourceEstimates(resourceEstimates),
toProperties(sessionProperties),
emptyMap(),
emptyMap(),
toExtraCredentials(extraCredentials),
null,
clientRequestTimeout,
disableCompression,
emptyMap(),
emptyMap(),
validateNextUriSource);
} | @Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Multiple entries with same key: test.token.foo=bar and test.token.foo=foo")
public void testDuplicateExtraCredentialKey()
{
Console console = singleCommand(Console.class).parse("--extra-credential", "test.token.foo=foo", "--extra-credential", "test.token.foo=bar");
ClientOptions options = console.clientOptions;
options.toClientSession();
} |
public static BufferedImage rotateImage(final BufferedImage image, final double theta)
{
AffineTransform transform = new AffineTransform();
transform.rotate(theta, image.getWidth() / 2.0, image.getHeight() / 2.0);
AffineTransformOp transformOp = new AffineTransformOp(transform, AffineTransformOp.TYPE_BILINEAR);
return transformOp.filter(image, null);
} | @Test
public void rotateImage()
{
// TODO: Test more than 90° rotations
// Evenly-sized images (2x2)
assertTrue(bufferedImagesEqual(BLACK_PIXEL_TOP_RIGHT, ImageUtil.rotateImage(BLACK_PIXEL_TOP_LEFT, Math.PI / 2)));
assertTrue(bufferedImagesEqual(BLACK_PIXEL_BOTTOM_RIGHT, ImageUtil.rotateImage(BLACK_PIXEL_TOP_LEFT, Math.PI)));
assertTrue(bufferedImagesEqual(BLACK_PIXEL_BOTTOM_LEFT, ImageUtil.rotateImage(BLACK_PIXEL_TOP_LEFT, Math.PI * 3 / 2)));
assertTrue(bufferedImagesEqual(BLACK_PIXEL_TOP_LEFT, ImageUtil.rotateImage(BLACK_PIXEL_TOP_LEFT, Math.PI * 2)));
// Unevenly-sized images (2x1); when rotated 90° become (2x2) images
final BufferedImage twoByOneLeft = new BufferedImage(2, 1, BufferedImage.TYPE_INT_ARGB);
twoByOneLeft.setRGB(0, 0, BLACK.getRGB());
final BufferedImage twoByTwoRight = new BufferedImage(2, 1, BufferedImage.TYPE_INT_ARGB);
twoByTwoRight.setRGB(1, 0, BLACK.getRGB());
final BufferedImage oneByTwoTop = new BufferedImage(2, 2, BufferedImage.TYPE_INT_ARGB);
oneByTwoTop.setRGB(1, 0, new Color(0, 0, 0, 127).getRGB());
final BufferedImage oneByTwoBottom = new BufferedImage(2, 2, BufferedImage.TYPE_INT_ARGB);
oneByTwoBottom.setRGB(0, 0, new Color(0, 0, 0, 127).getRGB());
oneByTwoBottom.setRGB(0, 1, BLACK.getRGB());
assertTrue(bufferedImagesEqual(oneByTwoTop, ImageUtil.rotateImage(twoByOneLeft, Math.PI / 2)));
assertTrue(bufferedImagesEqual(twoByTwoRight, ImageUtil.rotateImage(twoByOneLeft, Math.PI)));
assertTrue(bufferedImagesEqual(oneByTwoBottom, ImageUtil.rotateImage(twoByOneLeft, Math.PI * 3 / 2)));
assertTrue(bufferedImagesEqual(twoByOneLeft, ImageUtil.rotateImage(twoByOneLeft, Math.PI * 2)));
} |
public static WindowBytesStoreSupplier persistentWindowStore(final String name,
final Duration retentionPeriod,
final Duration windowSize,
final boolean retainDuplicates) throws IllegalArgumentException {
return persistentWindowStore(name, retentionPeriod, windowSize, retainDuplicates, false);
} | @Test
public void shouldCreateRocksDbWindowStore() {
final WindowStore store = Stores.persistentWindowStore("store", ofMillis(1L), ofMillis(1L), false).get();
final StateStore wrapped = ((WrappedStateStore) store).wrapped();
assertThat(store, instanceOf(RocksDBWindowStore.class));
assertThat(wrapped, allOf(not(instanceOf(RocksDBTimestampedSegmentedBytesStore.class)), instanceOf(RocksDBSegmentedBytesStore.class)));
} |
public static NacosLogging getInstance() {
return NacosLoggingInstance.INSTANCE;
} | @Test
void testGetInstance() {
NacosLogging instance = NacosLogging.getInstance();
assertNotNull(instance);
} |
@Override
public void checkAuthorization(
final KsqlSecurityContext securityContext,
final MetaStore metaStore,
final Statement statement
) {
if (statement instanceof Query) {
validateQuery(securityContext, metaStore, (Query)statement);
} else if (statement instanceof InsertInto) {
validateInsertInto(securityContext, metaStore, (InsertInto)statement);
} else if (statement instanceof CreateAsSelect) {
validateCreateAsSelect(securityContext, metaStore, (CreateAsSelect)statement);
} else if (statement instanceof PrintTopic) {
validatePrintTopic(securityContext, (PrintTopic)statement);
} else if (statement instanceof CreateSource) {
validateCreateSource(securityContext, (CreateSource)statement);
}
} | @Test
public void shouldThrowWhenJoinSelectWithoutSubjectReadPermissionsDenied() {
// Given:
givenSubjectAccessDenied(AVRO_TOPIC + "-value", AclOperation.READ);
final Statement statement = givenStatement(String.format(
"SELECT * FROM %s A JOIN %s B ON A.F1 = B.F1;", KAFKA_STREAM_TOPIC, AVRO_STREAM_TOPIC)
);
// When:
final Exception e = assertThrows(
KsqlSchemaAuthorizationException.class,
() -> authorizationValidator.checkAuthorization(securityContext, metaStore, statement)
);
// Then:
assertThat(e.getMessage(), containsString(String.format(
"Authorization denied to Read on Schema Registry subject: [%s-value]", AVRO_TOPIC
)));
} |
@Override public Metadata headers() {
return headers;
} | @Test void headers() {
assertThat(response.headers()).isSameAs(headers);
} |
protected boolean nodeValueIsAllowed(Object value) {
return ALLOWED_TYPES.stream().anyMatch(objectPredicate -> objectPredicate.test(value));
} | @Test
void nodeValueIsAllowed_False() {
Object value = Boolean.TRUE;
assertThat(rangeFunction.nodeValueIsAllowed(value))
.withFailMessage(String.format("%s", value)).isFalse();
value = Collections.emptyMap();
assertThat(rangeFunction.nodeValueIsAllowed(value))
.withFailMessage(String.format("%s", value)).isFalse();
value = Collections.emptyList();
assertThat(rangeFunction.nodeValueIsAllowed(value))
.withFailMessage(String.format("%s", value)).isFalse();
} |
protected static PrivateKey toPrivateKey(File keyFile, String keyPassword) throws NoSuchAlgorithmException,
NoSuchPaddingException, InvalidKeySpecException,
InvalidAlgorithmParameterException,
KeyException, IOException {
return toPrivateKey(keyFile, keyPassword, true);
} | @Test
public void testPkcs1AesEncryptedRsaEmptyPassword() throws Exception {
assertThrows(IOException.class, new Executable() {
@Override
public void execute() throws Throwable {
SslContext.toPrivateKey(new File(getClass().getResource("rsa_pkcs1_aes_encrypted.key")
.getFile()), "");
}
});
} |
public long getUnknown_18() {
return unknown_18;
} | @Test
public void testGetUnknown_18() {
assertEquals(TestParameters.VP_UNKNOWN_18, chmLzxcControlData.getUnknown_18());
} |
public static StructType groupingKeyType(Schema schema, Collection<PartitionSpec> specs) {
return buildPartitionProjectionType("grouping key", specs, commonActiveFieldIds(schema, specs));
} | @Test
public void testGroupingKeyTypeWithAddingBackSamePartitionFieldInV1Table() {
TestTables.TestTable table =
TestTables.create(tableDir, "test", SCHEMA, BY_CATEGORY_DATA_SPEC, V1_FORMAT_VERSION);
table.updateSpec().removeField("data").commit();
table.updateSpec().addField("data").commit();
StructType expectedType =
StructType.of(NestedField.optional(1000, "category", Types.StringType.get()));
StructType actualType = Partitioning.groupingKeyType(table.schema(), table.specs().values());
assertThat(actualType).isEqualTo(expectedType);
} |
public <T> T fromXmlPartial(String partial, Class<T> o) throws Exception {
return fromXmlPartial(toInputStream(partial, UTF_8), o);
} | @Test
void shouldLoadPartialConfigWithEnvironment() throws Exception {
String partialConfigWithPipeline = configWithEnvironments(
"""
<environments>
<environment name='uat'>
<pipelines>
<pipeline name='pipeline1' />
</pipelines>
</environment>
<environment name='prod' />
</environments>""", CONFIG_SCHEMA_VERSION);
PartialConfig partialConfig = xmlLoader.fromXmlPartial(partialConfigWithPipeline, PartialConfig.class);
EnvironmentsConfig environmentsConfig = partialConfig.getEnvironments();
assertThat(environmentsConfig.size()).isEqualTo(2);
assertThat(environmentsConfig.get(0).containsPipeline(new CaseInsensitiveString("pipeline1"))).isTrue();
assertThat(environmentsConfig.get(1).getPipelines().size()).isEqualTo(0);
} |
@Override
public int leaveGroupEpoch() {
return groupInstanceId.isPresent() ?
ConsumerGroupHeartbeatRequest.LEAVE_GROUP_STATIC_MEMBER_EPOCH :
ConsumerGroupHeartbeatRequest.LEAVE_GROUP_MEMBER_EPOCH;
} | @Test
public void testLeaveGroupEpoch() {
// Static member should leave the group with epoch -2.
ConsumerMembershipManager membershipManager = createMemberInStableState("instance1");
mockLeaveGroup();
membershipManager.leaveGroup();
verify(subscriptionState).unsubscribe();
assertEquals(MemberState.LEAVING, membershipManager.state());
assertEquals(ConsumerGroupHeartbeatRequest.LEAVE_GROUP_STATIC_MEMBER_EPOCH,
membershipManager.memberEpoch());
// Dynamic member should leave the group with epoch -1.
membershipManager = createMemberInStableState(null);
mockLeaveGroup();
membershipManager.leaveGroup();
verify(subscriptionState).unsubscribe();
assertEquals(MemberState.LEAVING, membershipManager.state());
assertEquals(ConsumerGroupHeartbeatRequest.LEAVE_GROUP_MEMBER_EPOCH,
membershipManager.memberEpoch());
} |
public PageListResponse<IndexSetFieldTypeSummary> getIndexSetFieldTypeSummary(final Set<String> streamIds,
final String fieldName,
final Predicate<String> indexSetPermissionPredicate) {
return getIndexSetFieldTypeSummary(streamIds, fieldName, indexSetPermissionPredicate, 1, 50, DEFAULT_SORT.id(), DEFAULT_SORT.direction());
} | @Test
void testFillsSummaryDataProperly() {
Predicate<String> indexSetPermissionPredicate = indexSetID -> indexSetID.contains("canSee");
doReturn(Set.of("canSee", "cannotSee")).when(streamService).indexSetIdsByIds(Set.of("stream_id"));
doReturn(List.of("Stream1", "Stream2")).when(streamService).streamTitlesForIndexSet("canSee");
doReturn(List.of("text", "keyword")).when(indexFieldTypesService).fieldTypeHistory("canSee", "field_name", true);
mockIndexSetConfig("canSee", "Index Set From The Top Of The Universe");
final PageListResponse<IndexSetFieldTypeSummary> summary = toTest.getIndexSetFieldTypeSummary(Set.of("stream_id"), "field_name", indexSetPermissionPredicate);
assertThat(summary.elements())
.isNotNull()
.isEqualTo(List.of(new IndexSetFieldTypeSummary("canSee", "Index Set From The Top Of The Universe", List.of("Stream1", "Stream2"), List.of("text", "keyword"))));
} |
public EndpointConfig setSocketKeepIntervalSeconds(int socketKeepIntervalSeconds) {
Preconditions.checkPositive("socketKeepIntervalSeconds", socketKeepIntervalSeconds);
Preconditions.checkTrue(socketKeepIntervalSeconds < MAX_SOCKET_KEEP_INTERVAL_SECONDS,
"socketKeepIntervalSeconds value " + socketKeepIntervalSeconds + " is outside valid range 1 - 32767");
this.socketKeepIntervalSeconds = socketKeepIntervalSeconds;
return this;
} | @Test
public void testKeepIntervalSecondsValidation() {
EndpointConfig endpointConfig = new EndpointConfig();
Assert.assertThrows(IllegalArgumentException.class, () -> endpointConfig.setSocketKeepIntervalSeconds(0));
Assert.assertThrows(IllegalArgumentException.class, () -> endpointConfig.setSocketKeepIntervalSeconds(32768));
Assert.assertThrows(IllegalArgumentException.class, () -> endpointConfig.setSocketKeepIntervalSeconds(-17));
} |
@Override
public void start(BundleContext bundleContext) throws Exception {
Bundle bundle = bundleContext.getBundle();
pluginRegistryService = bundleContext.getService(bundleContext.getServiceReference(PluginRegistryService.class));
bundleSymbolicName = bundle.getSymbolicName();
pluginId = pluginRegistryService.getPluginIDOfFirstPluginInBundle(bundleSymbolicName);
LoggingService loggingService = bundleContext.getService(bundleContext.getServiceReference(LoggingService.class));
Logger.initialize(loggingService);
getImplementersAndRegister(bundleContext, bundle, pluginRegistryService.extensionClassesInBundle(bundleSymbolicName));
reportErrorsToHealthService();
} | @Test
public void shouldSetupTheLoggerWithTheLoggingServiceAndPluginId() throws Exception {
setupClassesInBundle();
activator.start(context);
Logger logger = Logger.getLoggerFor(DefaultGoPluginActivatorTest.class);
logger.info("INFO");
verify(loggingService).info(PLUGIN_ID, DefaultGoPluginActivatorTest.class.getName(), "INFO");
} |
@Operation(
summary = "Search for the given search keys in the key transparency log",
description = """
Enforced unauthenticated endpoint. Returns a response if all search keys exist in the key transparency log.
"""
)
@ApiResponse(responseCode = "200", description = "All search key lookups were successful", useReturnTypeSchema = true)
@ApiResponse(responseCode = "403", description = "At least one search key lookup to value mapping was invalid")
@ApiResponse(responseCode = "404", description = "At least one search key lookup did not find the key")
@ApiResponse(responseCode = "413", description = "Ratelimited")
@ApiResponse(responseCode = "422", description = "Invalid request format")
@POST
@Path("/search")
@RateLimitedByIp(RateLimiters.For.KEY_TRANSPARENCY_SEARCH_PER_IP)
@Produces(MediaType.APPLICATION_JSON)
public KeyTransparencySearchResponse search(
@ReadOnly @Auth final Optional<AuthenticatedDevice> authenticatedAccount,
@NotNull @Valid final KeyTransparencySearchRequest request) {
// Disallow clients from making authenticated requests to this endpoint
requireNotAuthenticated(authenticatedAccount);
try {
final CompletableFuture<byte[]> aciSearchKeyResponseFuture = keyTransparencyServiceClient.search(
getFullSearchKeyByteString(ACI_PREFIX, request.aci().toCompactByteArray()),
request.lastTreeHeadSize(),
request.distinguishedTreeHeadSize(),
KEY_TRANSPARENCY_RPC_TIMEOUT);
final CompletableFuture<byte[]> e164SearchKeyResponseFuture = request.e164()
.map(e164 -> keyTransparencyServiceClient.search(
getFullSearchKeyByteString(E164_PREFIX, e164.getBytes(StandardCharsets.UTF_8)),
request.lastTreeHeadSize(),
request.distinguishedTreeHeadSize(),
KEY_TRANSPARENCY_RPC_TIMEOUT))
.orElse(CompletableFuture.completedFuture(null));
final CompletableFuture<byte[]> usernameHashSearchKeyResponseFuture = request.usernameHash()
.map(usernameHash -> keyTransparencyServiceClient.search(
getFullSearchKeyByteString(USERNAME_PREFIX, request.usernameHash().get()),
request.lastTreeHeadSize(),
request.distinguishedTreeHeadSize(),
KEY_TRANSPARENCY_RPC_TIMEOUT))
.orElse(CompletableFuture.completedFuture(null));
return CompletableFuture.allOf(aciSearchKeyResponseFuture, e164SearchKeyResponseFuture,
usernameHashSearchKeyResponseFuture)
.thenApply(ignored ->
new KeyTransparencySearchResponse(aciSearchKeyResponseFuture.join(),
Optional.ofNullable(e164SearchKeyResponseFuture.join()),
Optional.ofNullable(usernameHashSearchKeyResponseFuture.join())))
.join();
} catch (final CancellationException exception) {
LOGGER.error("Unexpected cancellation from key transparency service", exception);
throw new ServerErrorException(Response.Status.SERVICE_UNAVAILABLE, exception);
} catch (final CompletionException exception) {
handleKeyTransparencyServiceError(exception);
}
// This is unreachable
return null;
} | @Test
void searchAuthenticated() {
final Invocation.Builder request = resources.getJerseyTest()
.target("/v1/key-transparency/search")
.request()
.header(HttpHeaders.AUTHORIZATION, AuthHelper.getAuthHeader(AuthHelper.VALID_UUID, AuthHelper.VALID_PASSWORD));
try (Response response = request.post(Entity.json(createSearchRequestJson(ACI, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty())))) {
assertEquals(400, response.getStatus());
}
verify(keyTransparencyServiceClient, never()).search(any(), any(), any(), any());
} |
public B connections(Integer connections) {
this.connections = connections;
return getThis();
} | @Test
void connections() {
InterfaceBuilder builder = new InterfaceBuilder();
builder.connections(1);
Assertions.assertEquals(1, builder.build().getConnections().intValue());
} |
public void printKsqlEntityList(final List<KsqlEntity> entityList) {
switch (outputFormat) {
case JSON:
printAsJson(entityList);
break;
case TABULAR:
final boolean showStatements = entityList.size() > 1;
for (final KsqlEntity ksqlEntity : entityList) {
writer().println();
if (showStatements) {
writer().println(ksqlEntity.getStatementText());
}
printAsTable(ksqlEntity);
}
break;
default:
throw new RuntimeException(String.format(
"Unexpected output format: '%s'",
outputFormat.name()
));
}
} | @Test
public void shouldPrintExplainQueryWithError() {
final long timestamp = 1596644936314L;
// Given:
final QueryDescriptionEntity queryEntity = new QueryDescriptionEntity(
"statement",
new QueryDescription(
new QueryId("id"),
"statement",
Optional.empty(),
ImmutableList.of(
new FieldInfo(
"name",
new SchemaInfo(SqlBaseType.STRING, ImmutableList.of(), null),
Optional.empty())),
ImmutableSet.of("source"),
ImmutableSet.of("sink"),
"topology",
"executionPlan",
ImmutableMap.of("overridden.prop", 42),
ImmutableMap.of(new KsqlHostInfoEntity("foo", 123), KsqlQueryStatus.ERROR),
KsqlQueryType.PERSISTENT,
ImmutableList.of(new QueryError(timestamp, "error", Type.SYSTEM)),
ImmutableSet.of(
new StreamsTaskMetadata(
"test",
Collections.emptySet(),
Optional.empty()
)
),
"consumerGroupId"
)
);
final KsqlEntityList entityList = new KsqlEntityList(ImmutableList.of(queryEntity));
// When:
console.printKsqlEntityList(entityList);
// Then:
final String output = terminal.getOutputString();
Approvals.verify(output, approvalOptions);
} |
@Override
public <T> T convert(DataTable dataTable, Type type) {
return convert(dataTable, type, false);
} | @Test
void convert_to_empty_table__empty_table() {
DataTable table = emptyDataTable();
assertSame(table, converter.convert(table, DataTable.class));
} |
@Override
public void execute(final ConnectionSession connectionSession) throws SQLException {
Map<String, String> sessionVariables = extractSessionVariables();
validateSessionVariables(sessionVariables.keySet());
new CharsetSetExecutor(databaseType, connectionSession).set(sessionVariables);
new SessionVariableRecordExecutor(databaseType, connectionSession).recordVariable(sessionVariables);
executeSetGlobalVariablesIfPresent(connectionSession);
} | @Test
void assertSetVariableWithIncorrectScope() {
VariableAssignSegment variableAssignSegment = new VariableAssignSegment();
variableAssignSegment.setVariable(new VariableSegment(0, 0, "max_connections"));
variableAssignSegment.setAssignValue("");
SetStatement setStatement = new MySQLSetStatement();
setStatement.getVariableAssigns().add(variableAssignSegment);
MySQLSetVariableAdminExecutor executor = new MySQLSetVariableAdminExecutor(setStatement);
assertThrows(ErrorGlobalVariableException.class, () -> executor.execute(mock(ConnectionSession.class)));
} |
@Override
public Optional<DispatchEvent> build(final DataChangedEvent event) {
if (Strings.isNullOrEmpty(event.getValue())) {
return Optional.empty();
}
Optional<QualifiedDataSource> qualifiedDataSource = QualifiedDataSourceNode.extractQualifiedDataSource(event.getKey());
if (qualifiedDataSource.isPresent()) {
QualifiedDataSourceState state = new YamlQualifiedDataSourceStateSwapper().swapToObject(YamlEngine.unmarshal(event.getValue(), YamlQualifiedDataSourceState.class));
return Optional.of(new QualifiedDataSourceStateEvent(qualifiedDataSource.get(), state));
}
return Optional.empty();
} | @Test
void assertCreateEnabledQualifiedDataSourceChangedEvent() {
Optional<DispatchEvent> actual = new QualifiedDataSourceDispatchEventBuilder().build(
new DataChangedEvent("/nodes/qualified_data_sources/replica_query_db.readwrite_ds.replica_ds_0", "state: ENABLED\n", Type.ADDED));
assertTrue(actual.isPresent());
QualifiedDataSourceStateEvent actualEvent = (QualifiedDataSourceStateEvent) actual.get();
assertThat(actualEvent.getQualifiedDataSource().getDatabaseName(), is("replica_query_db"));
assertThat(actualEvent.getQualifiedDataSource().getGroupName(), is("readwrite_ds"));
assertThat(actualEvent.getQualifiedDataSource().getDataSourceName(), is("replica_ds_0"));
assertThat(actualEvent.getStatus().getState(), is(DataSourceState.ENABLED));
} |
void writeLogs(OutputStream out, Instant from, Instant to, long maxLines, Optional<String> hostname) {
double fromSeconds = from.getEpochSecond() + from.getNano() / 1e9;
double toSeconds = to.getEpochSecond() + to.getNano() / 1e9;
long linesWritten = 0;
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out));
for (List<Path> logs : getMatchingFiles(from, to)) {
List<LogLineIterator> logLineIterators = new ArrayList<>();
try {
// Logs in each sub-list contain entries covering the same time interval, so do a merge sort while reading
for (Path log : logs)
logLineIterators.add(new LogLineIterator(log, fromSeconds, toSeconds, hostname));
Iterator<LineWithTimestamp> lines = Iterators.mergeSorted(logLineIterators,
Comparator.comparingDouble(LineWithTimestamp::timestamp));
PriorityQueue<LineWithTimestamp> heap = new PriorityQueue<>(Comparator.comparingDouble(LineWithTimestamp::timestamp));
while (lines.hasNext()) {
heap.offer(lines.next());
if (heap.size() > 1000) {
if (linesWritten++ >= maxLines) return;
writer.write(heap.poll().line);
writer.newLine();
}
}
while ( ! heap.isEmpty()) {
if (linesWritten++ >= maxLines) return;
writer.write(heap.poll().line);
writer.newLine();
}
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
finally {
for (LogLineIterator ll : logLineIterators) {
try { ll.close(); } catch (IOException ignored) { }
}
Exceptions.uncheck(writer::flush);
}
}
} | @Test
void logsForSingeNodeIsRetrieved() {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
LogReader logReader = new LogReader(logDirectory, Pattern.compile(".*"));
logReader.writeLogs(baos, Instant.EPOCH, Instant.EPOCH.plus(Duration.ofDays(2)), 100, Optional.of("node2.com"));
assertEquals(log101 + log100b + log100a + log200, baos.toString(UTF_8));
} |
@Override
public int getOrder() {
return PluginEnum.REDIRECT.getCode();
} | @Test
public void testGetOrder() {
final int result = redirectPlugin.getOrder();
assertThat(PluginEnum.REDIRECT.getCode(), Matchers.is(result));
} |
public static String getKey(String dataId, String group) {
StringBuilder sb = new StringBuilder();
urlEncode(dataId, sb);
sb.append('+');
urlEncode(group, sb);
return sb.toString();
} | @Test
void testGetKeyByThreeParams() {
// Act
final String actual = GroupKey2.getKey(",", ",", "3");
// Assert result
assertEquals(",+,+3", actual);
} |
@Override
public FileMergingCheckpointStateOutputStream createCheckpointStateOutputStream(
SubtaskKey subtaskKey, long checkpointId, CheckpointedStateScope scope) {
return new FileMergingCheckpointStateOutputStream(
writeBufferSize,
new FileMergingCheckpointStateOutputStream.FileMergingSnapshotManagerProxy() {
PhysicalFile physicalFile;
LogicalFile logicalFile;
@Override
public Tuple2<FSDataOutputStream, Path> providePhysicalFile()
throws IOException {
physicalFile =
getOrCreatePhysicalFileForCheckpoint(
subtaskKey, checkpointId, scope);
return new Tuple2<>(
physicalFile.getOutputStream(), physicalFile.getFilePath());
}
@Override
public SegmentFileStateHandle closeStreamAndCreateStateHandle(
Path filePath, long startPos, long stateSize) throws IOException {
if (physicalFile == null) {
return null;
} else {
// deal with logical file
logicalFile =
createLogicalFile(
physicalFile, startPos, stateSize, subtaskKey);
logicalFile.advanceLastCheckpointId(checkpointId);
// track the logical file
synchronized (lock) {
uploadedStates
.computeIfAbsent(checkpointId, key -> new HashSet<>())
.add(logicalFile);
}
// deal with physicalFile file
returnPhysicalFileForNextReuse(subtaskKey, checkpointId, physicalFile);
return new SegmentFileStateHandle(
physicalFile.getFilePath(),
startPos,
stateSize,
scope,
logicalFile.getFileId());
}
}
@Override
public void closeStreamExceptionally() throws IOException {
if (physicalFile != null) {
if (logicalFile != null) {
discardSingleLogicalFile(logicalFile, checkpointId);
} else {
// The physical file should be closed anyway. This is because the
// last segmented write on this file is likely to have failed, and
// we want to prevent further reusing of this file.
physicalFile.close();
physicalFile.deleteIfNecessary();
}
}
}
});
} | @Test
public void testConcurrentWriting() throws Exception {
long checkpointId = 1;
int numThreads = 12;
int perStreamWriteNum = 128;
Set<Future<SegmentFileStateHandle>> futures = new HashSet<>();
try (FileMergingSnapshotManager fmsm = createFileMergingSnapshotManager(checkpointBaseDir);
CloseableRegistry closeableRegistry = new CloseableRegistry()) {
// write data concurrently
for (int i = 0; i < numThreads; i++) {
futures.add(
CompletableFuture.supplyAsync(
() -> {
FileMergingCheckpointStateOutputStream stream =
fmsm.createCheckpointStateOutputStream(
subtaskKey1,
checkpointId,
CheckpointedStateScope.EXCLUSIVE);
try {
closeableRegistry.registerCloseable(stream);
for (int j = 0; j < perStreamWriteNum; j++) {
stream.write(j);
}
return stream.closeAndGetHandle();
} catch (IOException e) {
throw new RuntimeException(e);
}
}));
}
// assert that multiple segments in the same file were not written concurrently
for (Future<SegmentFileStateHandle> future : futures) {
SegmentFileStateHandle segmentFileStateHandle = future.get();
FSDataInputStream is = segmentFileStateHandle.openInputStream();
closeableRegistry.registerCloseable(is);
int readValue;
int expected = 0;
while ((readValue = is.read()) != -1) {
assertThat(readValue).isEqualTo(expected++);
}
}
}
} |
public boolean isSupported(final SQLStatement sqlStatement) {
for (Class<? extends SQLStatement> each : supportedSQLStatements) {
if (each.isAssignableFrom(sqlStatement.getClass())) {
return true;
}
}
for (Class<? extends SQLStatement> each : unsupportedSQLStatements) {
if (each.isAssignableFrom(sqlStatement.getClass())) {
return false;
}
}
return true;
} | @Test
void assertIsSupportedWithInSupportedList() {
assertTrue(new SQLSupportedJudgeEngine(Collections.singleton(SelectStatement.class), Collections.emptyList()).isSupported(mock(SelectStatement.class)));
} |
@Override
public BackgroundException map(final IOException e) {
final StringBuilder buffer = new StringBuilder();
this.append(buffer, e.getMessage());
if(e instanceof FTPConnectionClosedException) {
return new ConnectionRefusedException(buffer.toString(), e);
}
if(e instanceof FTPException) {
return this.handle((FTPException) e, buffer);
}
if(e instanceof MalformedServerReplyException) {
return new InteroperabilityException(buffer.toString(), e);
}
return new DefaultIOExceptionMappingService().map(e);
} | @Test
public void testFile() {
assertTrue(new FTPExceptionMappingService().map(new FTPException(550, "")) instanceof NotfoundException);
} |
protected Destination createDestination(String destName) throws JMSException {
String simpleName = getSimpleName(destName);
byte destinationType = getDestinationType(destName);
if (destinationType == ActiveMQDestination.QUEUE_TYPE) {
LOG.info("Creating queue: {}", destName);
return getSession().createQueue(simpleName);
} else if (destinationType == ActiveMQDestination.TOPIC_TYPE) {
LOG.info("Creating topic: {}", destName);
return getSession().createTopic(simpleName);
} else {
return createTemporaryDestination(destName);
}
} | @Test
public void testCreateDestination() throws JMSException {
assertDestinationNameType("dest", TOPIC_TYPE,
asAmqDest(jmsClient.createDestination("dest")));
} |
public synchronized GpuDeviceInformation parseXml(String xmlContent)
throws YarnException {
InputSource inputSource = new InputSource(new StringReader(xmlContent));
SAXSource source = new SAXSource(xmlReader, inputSource);
try {
return (GpuDeviceInformation) unmarshaller.unmarshal(source);
} catch (JAXBException e) {
String msg = "Failed to parse XML output of " +
GPU_SCRIPT_REFERENCE + "!";
LOG.error(msg, e);
throw new YarnException(msg, e);
}
} | @Test
public void testParse() throws IOException, YarnException {
File f = new File("src/test/resources/nvidia-smi-sample-output.xml");
String s = FileUtils.readFileToString(f, StandardCharsets.UTF_8);
GpuDeviceInformationParser parser = new GpuDeviceInformationParser();
GpuDeviceInformation info = parser.parseXml(s);
assertEquals("375.66", info.getDriverVersion());
assertEquals(2, info.getGpus().size());
assertFirstGpu(info.getGpus().get(0));
assertSecondGpu(info.getGpus().get(1));
} |
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
return inject(statement, new TopicProperties.Builder());
} | @Test
public void shouldGenerateName() {
// Given:
givenStatement("CREATE STREAM x AS SELECT * FROM SOURCE;");
// When:
injector.inject(statement, builder);
// Then:
verify(builder).withName("X");
} |
public static Locale createLocale( String localeCode ) {
if ( Utils.isEmpty( localeCode ) ) {
return null;
}
StringTokenizer parser = new StringTokenizer( localeCode, "_" );
if ( parser.countTokens() == 2 ) {
return new Locale( parser.nextToken(), parser.nextToken() );
}
if ( parser.countTokens() == 3 ) {
return new Locale( parser.nextToken(), parser.nextToken(), parser.nextToken() );
}
return new Locale( localeCode );
} | @Test
public void createLocale_Null() throws Exception {
assertNull( EnvUtil.createLocale( null ) );
} |
static void scan(Class<?> aClass, BiConsumer<Method, Annotation> consumer) {
// prevent unnecessary checking of Object methods
if (Object.class.equals(aClass)) {
return;
}
if (!isInstantiable(aClass)) {
return;
}
for (Method method : safelyGetMethods(aClass)) {
scan(consumer, aClass, method);
}
} | @Test
void scan_ignores_non_instantiable_class() {
MethodScanner.scan(NonStaticInnerClass.class, backend);
assertThat(scanResult, empty());
} |
public static TypeInformation<?> readTypeInfo(String typeString) {
final List<Token> tokens = tokenize(typeString);
final TokenConverter converter = new TokenConverter(typeString, tokens);
return converter.convert();
} | @Test
void testWriteComplexTypes() {
testReadAndWrite("ROW<f0 DECIMAL, f1 TINYINT>", Types.ROW(Types.BIG_DEC, Types.BYTE));
testReadAndWrite(
"ROW<hello DECIMAL, world TINYINT>",
Types.ROW_NAMED(new String[] {"hello", "world"}, Types.BIG_DEC, Types.BYTE));
testReadAndWrite(
"POJO<org.apache.flink.table.utils.TypeStringUtilsTest$TestPojo>",
TypeExtractor.createTypeInfo(TestPojo.class));
testReadAndWrite(
"ANY<org.apache.flink.table.utils.TypeStringUtilsTest$TestNoPojo>",
TypeExtractor.createTypeInfo(TestNoPojo.class));
testReadAndWrite(
"MAP<VARCHAR, ROW<f0 DECIMAL, f1 TINYINT>>",
Types.MAP(Types.STRING, Types.ROW(Types.BIG_DEC, Types.BYTE)));
testReadAndWrite(
"MULTISET<ROW<f0 DECIMAL, f1 TINYINT>>",
new MultisetTypeInfo<>(Types.ROW(Types.BIG_DEC, Types.BYTE)));
testReadAndWrite("PRIMITIVE_ARRAY<TINYINT>", Types.PRIMITIVE_ARRAY(Types.BYTE));
testReadAndWrite(
"OBJECT_ARRAY<POJO<org.apache.flink.table.utils.TypeStringUtilsTest$TestPojo>>",
Types.OBJECT_ARRAY(TypeExtractor.createTypeInfo(TestPojo.class)));
// test escaping
assertThat(TypeStringUtils.readTypeInfo("ROW<`he \nllo` DECIMAL, world TINYINT>"))
.isEqualTo(
Types.ROW_NAMED(
new String[] {"he \nllo", "world"},
Types.BIG_DEC,
Types.BYTE));
assertThat(TypeStringUtils.readTypeInfo("ROW<`he``llo` DECIMAL, world TINYINT>"))
.isEqualTo(
Types.ROW_NAMED(
new String[] {"he`llo", "world"}, Types.BIG_DEC, Types.BYTE));
// test backward compatibility with brackets ()
assertThat(TypeStringUtils.readTypeInfo("ROW(`he \nllo` DECIMAL, world TINYINT)"))
.isEqualTo(
Types.ROW_NAMED(
new String[] {"he \nllo", "world"},
Types.BIG_DEC,
Types.BYTE));
// test nesting
testReadAndWrite(
"ROW<singleton ROW<f0 INT>, twoField ROW<`Field 1` ROW<f0 DECIMAL>, `Field``s 2` VARCHAR>>",
Types.ROW_NAMED(
new String[] {"singleton", "twoField"},
Types.ROW(Types.INT),
Types.ROW_NAMED(
new String[] {"Field 1", "Field`s 2"},
Types.ROW(Types.BIG_DEC),
Types.STRING)));
testWrite(
"ROW<f0 DECIMAL, f1 TIMESTAMP, f2 TIME, f3 DATE>",
Types.ROW_NAMED(
new String[] {"f0", "f1", "f2", "f3"},
Types.BIG_DEC,
Types.LOCAL_DATE_TIME,
Types.LOCAL_TIME,
Types.LOCAL_DATE));
} |
static Schema sortKeySchema(Schema schema, SortOrder sortOrder) {
List<SortField> sortFields = sortOrder.fields();
int size = sortFields.size();
List<Types.NestedField> transformedFields = Lists.newArrayListWithCapacity(size);
for (int i = 0; i < size; ++i) {
int sourceFieldId = sortFields.get(i).sourceId();
Types.NestedField sourceField = schema.findField(sourceFieldId);
Preconditions.checkArgument(
sourceField != null, "Cannot find source field: %s", sourceFieldId);
Type transformedType = sortFields.get(i).transform().getResultType(sourceField.type());
// There could be multiple transformations on the same source column, like in the PartitionKey
// case. To resolve the collision, field id is set to transform index and field name is set to
// sourceFieldName_transformIndex
Types.NestedField transformedField =
Types.NestedField.of(
i,
sourceField.isOptional(),
sourceField.name() + '_' + i,
transformedType,
sourceField.doc());
transformedFields.add(transformedField);
}
return new Schema(transformedFields);
} | @Test
public void testResultSchema() {
Schema schema =
new Schema(
Types.NestedField.required(1, "id", Types.StringType.get()),
Types.NestedField.required(2, "ratio", Types.DoubleType.get()),
Types.NestedField.optional(
3,
"user",
Types.StructType.of(
Types.NestedField.required(11, "name", Types.StringType.get()),
Types.NestedField.required(12, "ts", Types.TimestampType.withoutZone()),
Types.NestedField.optional(13, "device_id", Types.UUIDType.get()),
Types.NestedField.optional(
14,
"location",
Types.StructType.of(
Types.NestedField.required(101, "lat", Types.FloatType.get()),
Types.NestedField.required(102, "long", Types.FloatType.get()),
Types.NestedField.required(103, "blob", Types.BinaryType.get()))))));
SortOrder sortOrder =
SortOrder.builderFor(schema)
.asc("ratio")
.sortBy(Expressions.hour("user.ts"), SortDirection.ASC, NullOrder.NULLS_FIRST)
.sortBy(
Expressions.bucket("user.device_id", 16), SortDirection.ASC, NullOrder.NULLS_FIRST)
.sortBy(
Expressions.truncate("user.location.blob", 16),
SortDirection.ASC,
NullOrder.NULLS_FIRST)
.build();
assertThat(SortKeyUtil.sortKeySchema(schema, sortOrder).asStruct())
.isEqualTo(
Types.StructType.of(
Types.NestedField.required(0, "ratio_0", Types.DoubleType.get()),
Types.NestedField.required(1, "ts_1", Types.IntegerType.get()),
Types.NestedField.optional(2, "device_id_2", Types.IntegerType.get()),
Types.NestedField.required(3, "blob_3", Types.BinaryType.get())));
} |
public static String segmentIdHex(String segIdStr) {
int segId = Integer.parseInt(segIdStr);
return String.format("%06x", segId).toLowerCase();
} | @Test
public void testSegmentIdHex() {
assertEquals("000001", segmentIdHex("1"));
assertEquals("00000a", segmentIdHex("10"));
assertEquals("ffffff", segmentIdHex("16777215"));
} |
public static GroupInstruction createGroup(final GroupId groupId) {
checkNotNull(groupId, "GroupId cannot be null");
return new GroupInstruction(groupId);
} | @Test
public void testCreateGroupMethod() {
final Instruction instruction = Instructions.createGroup(groupId1);
final Instructions.GroupInstruction groupInstruction =
checkAndConvert(instruction,
Instruction.Type.GROUP,
Instructions.GroupInstruction.class);
assertThat(groupInstruction.groupId(), is(equalTo(groupId1)));
} |
@Override
public void moveCenter(double moveHorizontal, double moveVertical) {
this.moveCenterAndZoom(moveHorizontal, moveVertical, (byte) 0, true);
} | @Test
public void moveCenterTest() {
MapViewPosition mapViewPosition = new MapViewPosition(new FixedTileSizeDisplayModel(256));
mapViewPosition.moveCenter(
MercatorProjection.getMapSize((byte) 0, new FixedTileSizeDisplayModel(256).getTileSize()) / -360d, 0);
MapPosition mapPosition = mapViewPosition.getMapPosition();
Assert.assertEquals(0, mapPosition.latLong.latitude, 0);
Assert.assertEquals(1, mapPosition.latLong.longitude, 1.0E-14);
Assert.assertEquals(0, mapPosition.zoomLevel);
} |
public static String formatExpression(final Expression expression) {
return formatExpression(expression, FormatOptions.of(s -> false));
} | @Test
public void shouldFormatLikePredicate() {
final LikePredicate predicate = new LikePredicate(new StringLiteral("string"), new StringLiteral("*"), Optional.empty());
assertThat(ExpressionFormatter.formatExpression(predicate), equalTo("('string' LIKE '*')"));
} |
public static ResourceType determineResourceType(final String resourceName) {
for ( Map.Entry<String, ResourceType> entry : CACHE.entrySet() ) {
if (resourceName.endsWith(entry.getKey())) {
if (entry.getValue().equals(ResourceType.DRT)) {
LOG.warn("DRT (Drools Rule Template) is deprecated. Please consider drools-decisiontables or third party templating features.");
}
return entry.getValue();
}
}
return null;
} | @Test
public void testDetermineResourceType() {
assertThat(ResourceType.determineResourceType("test.drl.xls")).isEqualTo(ResourceType.DTABLE);
assertThat(ResourceType.determineResourceType("test.xls")).isNull();
} |
public void validate(CreateReviewAnswerRequest request) {
Question question = questionRepository.findById(request.questionId())
.orElseThrow(() -> new SubmittedQuestionNotFoundException(request.questionId()));
validateNotIncludingOptions(request);
validateQuestionRequired(question, request);
validateLength(request);
} | @Test
void 필수_텍스트형_질문에_응답을_하지_않으면_예외가_발생한다() {
// given
Question savedQuestion
= questionRepository.save(new Question(true, QuestionType.TEXT, "질문", "가이드라인", 1));
CreateReviewAnswerRequest request = new CreateReviewAnswerRequest(savedQuestion.getId(), null, null);
// when, then
assertThatCode(() -> createTextAnswerRequestValidator.validate(request))
.isInstanceOf(RequiredQuestionNotAnsweredException.class);
} |
@Override
public Enumeration<URL> getResources(String name) throws IOException {
List<URL> resources = new ArrayList<>();
ClassLoadingStrategy loadingStrategy = getClassLoadingStrategy(name);
log.trace("Received request to load resources '{}'", name);
for (ClassLoadingStrategy.Source classLoadingSource : loadingStrategy.getSources()) {
switch (classLoadingSource) {
case APPLICATION:
if (getParent() != null) {
resources.addAll(Collections.list(getParent().getResources(name)));
}
break;
case PLUGIN:
resources.addAll(Collections.list(findResources(name)));
break;
case DEPENDENCIES:
resources.addAll(findResourcesFromDependencies(name));
break;
}
}
return Collections.enumeration(resources);
} | @Test
void parentFirstGetResourcesNonExisting() throws IOException {
assertFalse(parentFirstPluginClassLoader.getResources("META-INF/non-existing-file").hasMoreElements());
} |
@Override
public List<String> getServerList() {
return serverList.isEmpty() ? serversFromEndpoint : serverList;
} | @Test
void testConstructWithAddrTryToRefresh()
throws InvocationTargetException, NoSuchMethodException, IllegalAccessException, NoSuchFieldException {
Properties properties = new Properties();
properties.put(PropertyKeyConst.SERVER_ADDR, "127.0.0.1:8848,127.0.0.1:8849");
serverListManager = new ServerListManager(properties);
List<String> serverList = serverListManager.getServerList();
assertEquals(2, serverList.size());
assertEquals("127.0.0.1:8848", serverList.get(0));
assertEquals("127.0.0.1:8849", serverList.get(1));
mockThreadInvoke(serverListManager, false);
serverList = serverListManager.getServerList();
assertEquals(2, serverList.size());
assertEquals("127.0.0.1:8848", serverList.get(0));
assertEquals("127.0.0.1:8849", serverList.get(1));
} |
public static Environment of(@NonNull Properties props) {
var environment = new Environment();
environment.props = props;
return environment;
} | @Test
public void testOf() {
Environment environment = Environment.of("application.properties");
Optional<String> version = Objects.requireNonNull(environment).get("app.version");
String lang = environment.get("app.lang", "cn");
assertEquals("0.0.2", version.orElse(""));
assertEquals("cn", lang);
environment = Environment.of("classpath:application.properties");
version = Objects.requireNonNull(environment).get("app.version");
lang = environment.get("app.lang", "cn");
assertEquals("0.0.2", version.orElse(""));
assertEquals("cn", lang);
} |
@Override
public DynamicTableSource createDynamicTableSource(Context context) {
Configuration conf = FlinkOptions.fromMap(context.getCatalogTable().getOptions());
StoragePath path = new StoragePath(conf.getOptional(FlinkOptions.PATH).orElseThrow(() ->
new ValidationException("Option [path] should not be empty.")));
setupTableOptions(conf.getString(FlinkOptions.PATH), conf);
ResolvedSchema schema = context.getCatalogTable().getResolvedSchema();
setupConfOptions(conf, context.getObjectIdentifier(), context.getCatalogTable(), schema);
return new HoodieTableSource(
SerializableSchema.create(schema),
path,
context.getCatalogTable().getPartitionKeys(),
conf.getString(FlinkOptions.PARTITION_DEFAULT_NAME),
conf);
} | @Test
void testSetupHoodieKeyOptionsForSource() {
this.conf.setString(FlinkOptions.RECORD_KEY_FIELD, "dummyField");
this.conf.setString(FlinkOptions.KEYGEN_CLASS_NAME, "dummyKeyGenClass");
// definition with simple primary key and partition path
ResolvedSchema schema1 = SchemaBuilder.instance()
.field("f0", DataTypes.INT().notNull())
.field("f1", DataTypes.VARCHAR(20))
.field("f2", DataTypes.BIGINT())
.field("ts", DataTypes.TIMESTAMP(3))
.primaryKey("f0")
.build();
final MockContext sourceContext1 = MockContext.getInstance(this.conf, schema1, "f2");
final HoodieTableSource tableSource1 = (HoodieTableSource) new HoodieTableFactory().createDynamicTableSource(sourceContext1);
final Configuration conf1 = tableSource1.getConf();
assertThat(conf1.get(FlinkOptions.RECORD_KEY_FIELD), is("f0"));
assertThat(conf1.get(FlinkOptions.KEYGEN_CLASS_NAME), is("dummyKeyGenClass"));
// definition with complex primary keys and partition paths
this.conf.removeConfig(FlinkOptions.KEYGEN_CLASS_NAME);
ResolvedSchema schema2 = SchemaBuilder.instance()
.field("f0", DataTypes.INT().notNull())
.field("f1", DataTypes.VARCHAR(20).notNull())
.field("f2", DataTypes.TIMESTAMP(3))
.field("ts", DataTypes.TIMESTAMP(3))
.primaryKey("f0", "f1")
.build();
final MockContext sourceContext2 = MockContext.getInstance(this.conf, schema2, "f2");
final HoodieTableSource tableSource2 = (HoodieTableSource) new HoodieTableFactory().createDynamicTableSource(sourceContext2);
final Configuration conf2 = tableSource2.getConf();
assertThat(conf2.get(FlinkOptions.RECORD_KEY_FIELD), is("f0,f1"));
assertThat(conf2.get(FlinkOptions.KEYGEN_CLASS_NAME), is(ComplexAvroKeyGenerator.class.getName()));
// definition with complex primary keys and empty partition paths
this.conf.removeConfig(FlinkOptions.KEYGEN_CLASS_NAME);
final MockContext sourceContext3 = MockContext.getInstance(this.conf, schema2, "");
final HoodieTableSource tableSource3 = (HoodieTableSource) new HoodieTableFactory().createDynamicTableSource(sourceContext3);
final Configuration conf3 = tableSource3.getConf();
assertThat(conf3.get(FlinkOptions.RECORD_KEY_FIELD), is("f0,f1"));
assertThat(conf3.get(FlinkOptions.KEYGEN_CLASS_NAME), is(NonpartitionedAvroKeyGenerator.class.getName()));
} |
@Override
public void rotate(IndexSet indexSet) {
indexRotator.rotate(indexSet, this::shouldRotate);
} | @Test
public void shouldRotateThrowsISEIfIndexSetIdIsNull() {
when(indexSet.getConfig()).thenReturn(indexSetConfig);
when(indexSetConfig.id()).thenReturn(null);
when(indexSet.getNewestIndex()).thenReturn(IGNORED);
expectedException.expect(IllegalStateException.class);
expectedException.expectMessage("Index set ID must not be null or empty");
rotationStrategy.rotate(indexSet);
} |
public int getSubmitReservationFailedRetrieved() {
return numSubmitReservationFailedRetrieved.value();
} | @Test
public void testGetSubmitReservationRetrievedFailed() {
long totalBadBefore = metrics.getSubmitReservationFailedRetrieved();
badSubCluster.getSubmitReservationFailed();
Assert.assertEquals(totalBadBefore + 1,
metrics.getSubmitReservationFailedRetrieved());
} |
@Override
public Repositories listRepositories(String appUrl, AccessToken accessToken, String organization, @Nullable String query, int page, int pageSize) {
checkPageArgs(page, pageSize);
String searchQuery = "fork:true+org:" + organization;
if (query != null) {
searchQuery = query.replace(" ", "+") + "+" + searchQuery;
}
try {
Repositories repositories = new Repositories();
GetResponse response = githubApplicationHttpClient.get(appUrl, accessToken, String.format("/search/repositories?q=%s&page=%s&per_page=%s", searchQuery, page, pageSize));
Optional<GsonRepositorySearch> gsonRepositories = response.getContent().map(content -> GSON.fromJson(content, GsonRepositorySearch.class));
if (!gsonRepositories.isPresent()) {
return repositories;
}
repositories.setTotal(gsonRepositories.get().getTotalCount());
if (gsonRepositories.get().getItems() != null) {
repositories.setRepositories(gsonRepositories.get().getItems().stream()
.map(GsonGithubRepository::toRepository)
.toList());
}
return repositories;
} catch (Exception e) {
throw new IllegalStateException(format("Failed to list all repositories of '%s' accessible by user access token on '%s' using query '%s'", organization, appUrl, searchQuery),
e);
}
} | @Test
public void listRepositories_fail_if_pageSize_out_of_bounds() {
UserAccessToken token = new UserAccessToken("token");
assertThatThrownBy(() -> underTest.listRepositories(appUrl, token, "test", null, 1, 0))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("'pageSize' must be a value larger than 0 and smaller or equal to 100.");
assertThatThrownBy(() -> underTest.listRepositories("", token, "test", null, 1, 101))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("'pageSize' must be a value larger than 0 and smaller or equal to 100.");
} |
public TryDefinition doFinally() {
popBlock();
FinallyDefinition answer = new FinallyDefinition();
addOutput(answer);
pushBlock(answer);
return this;
} | @Test
public void doFinallyTest() {
TryDefinition tryDefinition = new TryDefinition();
CatchDefinition catchDefinition = new CatchDefinition();
FinallyDefinition finallyDefinition = new FinallyDefinition();
tryDefinition.addOutput(new ToDefinition("mock:1"));
catchDefinition.setExceptions(List.of("java.lang.Exception"));
catchDefinition.addOutput(new ToDefinition("mock:2"));
finallyDefinition.addOutput(new ToDefinition("mock:3"));
tryDefinition.addOutput(catchDefinition);
tryDefinition.addOutput(finallyDefinition);
Assertions.assertDoesNotThrow(tryDefinition::preCreateProcessor);
TryDefinition tryDefinition1 = tryDefinition.copyDefinition();
Assertions.assertDoesNotThrow(tryDefinition1::preCreateProcessor);
FinallyDefinition finallyDefinition1 = new FinallyDefinition();
finallyDefinition.addOutput(new ToDefinition("mock:4"));
tryDefinition.addOutput(finallyDefinition1);
Assertions.assertThrows(IllegalArgumentException.class, tryDefinition::preCreateProcessor);
} |
@Udf
public <T> Map<String, T> union(
@UdfParameter(description = "first map to union") final Map<String, T> map1,
@UdfParameter(description = "second map to union") final Map<String, T> map2) {
final List<Map<String, T>> nonNullInputs =
Stream.of(map1, map2)
.filter(Objects::nonNull)
.collect(Collectors.toList());
if (nonNullInputs.size() == 0) {
return null;
}
final Map<String, T> output = new HashMap<>();
nonNullInputs
.forEach(output::putAll);
return output;
} | @Test
public void shouldUnionMapWithNulls() {
final Map<String, String> input1 = Maps.newHashMap();
input1.put("one", "apple");
input1.put("two", "banana");
input1.put("three", "cherry");
final Map<String, String> input2 = Maps.newHashMap();
input2.put("foo", "bar");
input2.put(null, null);
input2.put("baz", null);
final Map<String, String> result = udf.union(input1, input2);
assertThat(result.size(), is(6));
assertThat(result.get("two"), is("banana"));
assertThat(result.get("foo"), is("bar"));
assertThat(result.get("baz"), is(nullValue()));
assertThat(result.keySet(), containsInAnyOrder("one", "two", "three", null, "foo", "baz"));
} |
public static ParamType getVarArgsSchemaFromType(final Type type) {
return getSchemaFromType(type, VARARGS_JAVA_TO_ARG_TYPE);
} | @Test
public void shouldGetTriFunctionVariadic() throws NoSuchMethodException {
final Type type = getClass().getDeclaredMethod("triFunctionType", TriFunction.class)
.getGenericParameterTypes()[0];
final ParamType schema = UdfUtil.getVarArgsSchemaFromType(type);
assertThat(schema, instanceOf(LambdaType.class));
assertThat(((LambdaType) schema).inputTypes(), equalTo(ImmutableList.of(ParamTypes.LONG, ParamTypes.INTEGER, ParamTypes.BOOLEAN)));
assertThat(((LambdaType) schema).returnType(), equalTo(ParamTypes.BOOLEAN));
} |
@Override
public TapiNepRef getNepRef(TapiNepRef nepRef) throws NoSuchElementException {
updateCache();
TapiNepRef ret = null;
try {
ret = tapiNepRefList.stream()
.filter(nepRef::equals)
.findFirst().get();
} catch (NoSuchElementException e) {
log.error("Nep not found of {}", nepRef);
throw e;
}
return ret;
} | @Test(expected = NoSuchElementException.class)
public void testGetNepRefWithSipIdWhenEmpty() {
tapiResolver.getNepRef(sipId);
} |
Set<String> getRetry() {
return retry;
} | @Test
public void determineRetryWhenSetToMultiple() {
Athena2QueryHelper helper = athena2QueryHelperWithRetry("exhausted,generic");
assertEquals(new HashSet<>(Arrays.asList("exhausted", "generic")), helper.getRetry());
} |
public static String join(Iterable<?> iterable, String separator) {
return join( iterable, separator, null );
} | @Test
public void testJoin() {
assertThat( Strings.join( new ArrayList<String>(), "-" ) ).isEqualTo( "" );
assertThat( Strings.join( Arrays.asList( "Hello", "World" ), "-" ) ).isEqualTo( "Hello-World" );
assertThat( Strings.join( Arrays.asList( "Hello" ), "-" ) ).isEqualTo( "Hello" );
} |
public CompletableFuture<LookupResult> createLookupResult(String candidateBroker, boolean authoritativeRedirect,
final String advertisedListenerName) {
CompletableFuture<LookupResult> lookupFuture = new CompletableFuture<>();
try {
checkArgument(StringUtils.isNotBlank(candidateBroker), "Lookup broker can't be null %s", candidateBroker);
String path = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + candidateBroker;
localBrokerDataCache.get(path).thenAccept(reportData -> {
if (reportData.isPresent()) {
LocalBrokerData lookupData = reportData.get();
if (StringUtils.isNotBlank(advertisedListenerName)) {
AdvertisedListener listener = lookupData.getAdvertisedListeners().get(advertisedListenerName);
if (listener == null) {
lookupFuture.completeExceptionally(
new PulsarServerException(
"the broker do not have " + advertisedListenerName + " listener"));
} else {
URI url = listener.getBrokerServiceUrl();
URI urlTls = listener.getBrokerServiceUrlTls();
lookupFuture.complete(new LookupResult(lookupData.getWebServiceUrl(),
lookupData.getWebServiceUrlTls(), url == null ? null : url.toString(),
urlTls == null ? null : urlTls.toString(), authoritativeRedirect));
}
} else {
lookupFuture.complete(new LookupResult(lookupData.getWebServiceUrl(),
lookupData.getWebServiceUrlTls(), lookupData.getPulsarServiceUrl(),
lookupData.getPulsarServiceUrlTls(), authoritativeRedirect));
}
} else {
lookupFuture.completeExceptionally(new MetadataStoreException.NotFoundException(path));
}
}).exceptionally(ex -> {
lookupFuture.completeExceptionally(ex);
return null;
});
} catch (Exception e) {
lookupFuture.completeExceptionally(e);
}
return lookupFuture;
} | @Test
public void testLoadReportDeserialize() throws Exception {
final String candidateBroker1 = "localhost:8000";
String broker1Url = "pulsar://localhost:6650";
final String candidateBroker2 = "localhost:3000";
String broker2Url = "pulsar://localhost:6660";
LoadReport lr = new LoadReport("http://" + candidateBroker1, null, broker1Url, null);
LocalBrokerData ld = new LocalBrokerData("http://" + candidateBroker2, null, broker2Url, null);
String path1 = String.format("%s/%s", LoadManager.LOADBALANCE_BROKERS_ROOT, candidateBroker1);
String path2 = String.format("%s/%s", LoadManager.LOADBALANCE_BROKERS_ROOT, candidateBroker2);
pulsar.getLocalMetadataStore().put(path1,
ObjectMapperFactory.getMapper().writer().writeValueAsBytes(lr),
Optional.empty(),
EnumSet.of(CreateOption.Ephemeral)
).join();
pulsar.getLocalMetadataStore().put(path2,
ObjectMapperFactory.getMapper().writer().writeValueAsBytes(ld),
Optional.empty(),
EnumSet.of(CreateOption.Ephemeral)
).join();
LookupResult result1 = pulsar.getNamespaceService().createLookupResult(candidateBroker1, false, null).get();
// update to new load manager
LoadManager oldLoadManager = pulsar.getLoadManager()
.getAndSet(new ModularLoadManagerWrapper(new ModularLoadManagerImpl()));
oldLoadManager.stop();
LookupResult result2 = pulsar.getNamespaceService().createLookupResult(candidateBroker2, false, null).get();
Assert.assertEquals(result1.getLookupData().getBrokerUrl(), broker1Url);
Assert.assertEquals(result2.getLookupData().getBrokerUrl(), broker2Url);
System.out.println(result2);
} |
@GetMapping(value = "/node/self/health")
@Secured(action = ActionTypes.READ, resource = "nacos/admin", signType = SignType.CONSOLE)
public Result<String> selfHealth() {
return Result.success(nacosClusterOperationService.selfHealth());
} | @Test
void testSelfHealth() {
String selfHealth = "UP";
when(nacosClusterOperationService.selfHealth()).thenReturn(selfHealth);
Result<String> result = nacosClusterControllerV2.selfHealth();
assertEquals(ErrorCode.SUCCESS.getCode(), result.getCode());
assertEquals(selfHealth, result.getData());
} |
protected void processFileContents(List<String> fileLines, String filePath, Engine engine) throws AnalysisException {
fileLines.stream()
.map(fileLine -> fileLine.split("(,|=>)"))
.map(requires -> {
//LOGGER.debug("perl scanning file:" + fileLine);
final String fqName = requires[0].substring(8)
.replace("'", "")
.replace("\"", "")
.trim();
final String version;
if (requires.length == 1) {
version = "0";
} else {
final Matcher matcher = VERSION_PATTERN.matcher(requires[1]);
if (matcher.find()) {
version = matcher.group(1);
} else {
version = "0";
}
}
final int pos = fqName.lastIndexOf("::");
final String namespace;
final String name;
if (pos > 0) {
namespace = fqName.substring(0, pos);
name = fqName.substring(pos + 2);
} else {
namespace = null;
name = fqName;
}
final Dependency dependency = new Dependency(true);
final File f = new File(filePath);
dependency.setFileName(f.getName());
dependency.setFilePath(filePath);
dependency.setActualFilePath(filePath);
dependency.setDisplayFileName("'" + fqName + "', '" + version + "'");
dependency.setEcosystem(Ecosystem.PERL);
dependency.addEvidence(EvidenceType.VENDOR, "cpanfile", "requires", fqName, Confidence.HIGHEST);
dependency.addEvidence(EvidenceType.PRODUCT, "cpanfile", "requires", fqName, Confidence.HIGHEST);
dependency.addEvidence(EvidenceType.VERSION, "cpanfile", "requires", version, Confidence.HIGHEST);
Identifier id = null;
try {
//note - namespace might be null and that's okay.
final PackageURL purl = PackageURLBuilder.aPackageURL()
.withType("cpan")
.withNamespace(namespace)
.withName(name)
.withVersion(version)
.build();
id = new PurlIdentifier(purl, Confidence.HIGH);
} catch (MalformedPackageURLException ex) {
LOGGER.debug("Error building package url for " + fqName + "; using generic identifier instead.", ex);
id = new GenericIdentifier("cpan:" + fqName + "::" + version, Confidence.HIGH);
}
dependency.setVersion(version);
dependency.setName(fqName);
dependency.addSoftwareIdentifier(id);
//sha1sum is used for anchor links in the HtML report
dependency.setSha1sum(Checksum.getSHA1Checksum(id.getValue()));
return dependency;
}).forEachOrdered(engine::addDependency);
} | @Test
public void testProcessDefaultZero() throws AnalysisException {
Dependency d = new Dependency();
List<String> dependencyLines = Arrays.asList(new String[]{
"requires 'JSON'",});
PerlCpanfileAnalyzer instance = new PerlCpanfileAnalyzer();
Engine engine = new Engine(getSettings());
instance.processFileContents(dependencyLines, "./cpanfile", engine);
assertEquals(1, engine.getDependencies().length);
Dependency dep = engine.getDependencies()[0];
assertEquals("'JSON', '0'", dep.getDisplayFileName());
assertEquals("0", dep.getVersion());
assertEquals("pkg:cpan/JSON@0", dep.getSoftwareIdentifiers().iterator().next().getValue());
} |
@Override
public OAuth2CodeDO createAuthorizationCode(Long userId, Integer userType, String clientId,
List<String> scopes, String redirectUri, String state) {
OAuth2CodeDO codeDO = new OAuth2CodeDO().setCode(generateCode())
.setUserId(userId).setUserType(userType)
.setClientId(clientId).setScopes(scopes)
.setExpiresTime(LocalDateTime.now().plusSeconds(TIMEOUT))
.setRedirectUri(redirectUri).setState(state);
oauth2CodeMapper.insert(codeDO);
return codeDO;
} | @Test
public void testCreateAuthorizationCode() {
// 准备参数
Long userId = randomLongId();
Integer userType = RandomUtil.randomEle(UserTypeEnum.values()).getValue();
String clientId = randomString();
List<String> scopes = Lists.newArrayList("read", "write");
String redirectUri = randomString();
String state = randomString();
// 调用
OAuth2CodeDO codeDO = oauth2CodeService.createAuthorizationCode(userId, userType, clientId,
scopes, redirectUri, state);
// 断言
OAuth2CodeDO dbCodeDO = oauth2CodeMapper.selectByCode(codeDO.getCode());
assertPojoEquals(codeDO, dbCodeDO, "createTime", "updateTime", "deleted");
assertEquals(userId, codeDO.getUserId());
assertEquals(userType, codeDO.getUserType());
assertEquals(clientId, codeDO.getClientId());
assertEquals(scopes, codeDO.getScopes());
assertEquals(redirectUri, codeDO.getRedirectUri());
assertEquals(state, codeDO.getState());
assertFalse(DateUtils.isExpired(codeDO.getExpiresTime()));
} |
public ConnectionFileName createChildName( String name, FileType type ) {
String childAbsPath = getConnectionFileNameUtils().ensureTrailingSeparator( getPath() ) + name;
return new ConnectionFileName( connection, childAbsPath, type );
} | @Test
public void testCreateChildNameOfSubFolderReturnsFileNameWithCorrectPath() {
ConnectionFileName parentFileName = new ConnectionFileName( "connection", "/folder", FileType.FOLDER );
assertEquals( "pvfs://connection/folder", parentFileName.getURI() );
ConnectionFileName childFileName = parentFileName.createChildName( "child", FileType.FOLDER );
assertEquals( "pvfs://connection/folder/child", childFileName.getURI() );
} |
public boolean greaterThanOrEqualTo(final int major, final int minor, final int series) {
if (this.major < major) {
return false;
}
if (this.major > major) {
return true;
}
if (this.minor < minor) {
return false;
}
if (this.minor > minor) {
return true;
}
return this.series >= series;
} | @Test
void assertGreaterThan() {
MySQLServerVersion actual = new MySQLServerVersion("5.7.12");
assertTrue(actual.greaterThanOrEqualTo(4, 0, 0));
assertTrue(actual.greaterThanOrEqualTo(5, 6, 0));
assertTrue(actual.greaterThanOrEqualTo(5, 7, 11));
} |
@VisibleForTesting
int getNumAllocatedSlotsOfGroup(long groupId) {
return allocatedGroupIdToSlotCount.getOrDefault(groupId, 0);
} | @Test
public void testReleaseSlot() throws InterruptedException {
DefaultSlotSelectionStrategy strategy =
new DefaultSlotSelectionStrategy(() -> false, (groupId) -> false);
SlotTracker slotTracker = new SlotTracker(ImmutableList.of(strategy));
LogicalSlot slot1 = generateSlot(1, 0);
LogicalSlot slot2 = generateSlot(1, 0);
// 1.1 require slot1.
assertThat(slotTracker.requireSlot(slot1)).isTrue();
assertThat(slotTracker.getSlots()).hasSize(1);
// 1.2 release slot1.
slotTracker.releaseSlot(slot1.getSlotId());
assertThat(slotTracker.getNumAllocatedSlots()).isZero();
assertThat(strategy.getNumAllocatedSlotsOfGroup(0)).isZero();
// 2.1 require and allocate slot2.
assertThat(slotTracker.requireSlot(slot2)).isTrue();
assertThat(slotTracker.getSlots()).hasSize(1);
slotTracker.allocateSlot(slot2);
assertThat(slotTracker.getNumAllocatedSlots()).isOne();
assertThat(strategy.getNumAllocatedSlotsOfGroup(0)).isOne();
// 2.2 require slot1.
assertThat(slotTracker.requireSlot(slot1)).isTrue();
assertThat(slotTracker.getSlots()).hasSize(2);
// 2.3 release slot1.
slotTracker.releaseSlot(slot1.getSlotId());
assertThat(slotTracker.getNumAllocatedSlots()).isOne();
assertThat(strategy.getNumAllocatedSlotsOfGroup(0)).isOne();
// 2.4 release slot2
Thread.sleep(1200); // give enough interval time to trigger sweepEmptyGroups.
assertThat(slotTracker.releaseSlot(slot2.getSlotId())).isSameAs(slot2);
assertThat(strategy.getNumAllocatedSlotsOfGroup(0)).isZero();
} |
@Override
public boolean mayHaveMergesPending(String bucketSpace, int contentNodeIndex) {
if (!stats.hasUpdatesFromAllDistributors()) {
return true;
}
ContentNodeStats nodeStats = stats.getStats().getNodeStats(contentNodeIndex);
if (nodeStats != null) {
ContentNodeStats.BucketSpaceStats bucketSpaceStats = nodeStats.getBucketSpace(bucketSpace);
return (bucketSpaceStats != null &&
bucketSpaceStats.mayHaveBucketsPending(minMergeCompletionRatio));
}
return true;
} | @Test
void valid_bucket_space_stats_may_have_merges_pending() {
Fixture f = Fixture.fromBucketsPending(1);
assertTrue(f.mayHaveMergesPending("default", 1));
} |
void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo,
Collection<StorageLocation> dataDirs, StartupOption startOpt) throws IOException {
if (addStorageLocations(datanode, nsInfo, dataDirs, startOpt).isEmpty()) {
throw new IOException("All specified directories have failed to load.");
}
} | @Test
public void testRecoverTransitionReadFailure() throws IOException {
final int numLocations = 3;
List<StorageLocation> locations =
createStorageLocations(numLocations, true);
try {
storage.recoverTransitionRead(mockDN, nsInfo, locations, START_OPT);
fail("An IOException should throw: all StorageLocations are NON_EXISTENT");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(
"All specified directories have failed to load.", e);
}
assertEquals(0, storage.getNumStorageDirs());
} |
List<CounterRequestContext> getOrderedRootCurrentContexts() {
final List<CounterRequestContext> contextList = new ArrayList<>(
rootCurrentContextsByThreadId.size());
for (final CounterRequestContext rootCurrentContext : rootCurrentContextsByThreadId
.values()) {
contextList.add(rootCurrentContext.clone());
}
if (contextList.size() > 1) {
contextList.sort(new CounterRequestContextComparator(System.currentTimeMillis()));
}
return contextList;
} | @Test
public void testGetOrderedRootCurrentContexts() {
counter.unbindContext();
final String requestName = "root context";
final int nbRootContexts = 100; // 100 pour couvrir tous les cas du compartor de tri
bindRootContexts(requestName, counter, nbRootContexts);
// addRequest pour rentrer dans le if de la map dans CounterRequestContext.clone
counter.addRequest(requestName, 100, 100, 100, false, 1000);
final List<CounterRequestContext> rootContexts = counter.getOrderedRootCurrentContexts();
assertEquals("contexts size", nbRootContexts + 1, rootContexts.size());
assertEquals("context name", requestName, rootContexts.get(0).getRequestName());
final String string = rootContexts.get(0).toString();
assertNotNull("toString not null", string);
assertFalse("toString not empty", string.isEmpty());
} |
@Override
public Endpoint<Http2LocalFlowController> local() {
return localEndpoint;
} | @Test
public void clientLocalIncrementAndGetStreamShouldRespectOverflow() throws Http2Exception {
incrementAndGetStreamShouldRespectOverflow(client.local(), MAX_VALUE);
} |
public static boolean parseBoolean(final String value) {
return booleanStringMatches(value, true);
} | @Test
public void shouldParseTrueAsTrue() {
assertThat(SqlBooleans.parseBoolean("tRue"), is(true));
assertThat(SqlBooleans.parseBoolean("trU"), is(true));
assertThat(SqlBooleans.parseBoolean("tr"), is(true));
assertThat(SqlBooleans.parseBoolean("T"), is(true));
assertThat(SqlBooleans.parseBoolean("t"), is(true));
} |
@SuppressWarnings("unchecked")
// visible for testing
public StitchRequestBody createStitchRequestBody(final Message inMessage) {
if (inMessage.getBody() instanceof StitchRequestBody) {
return createStitchRequestBodyFromStitchRequestBody(inMessage.getBody(StitchRequestBody.class), inMessage);
}
if (inMessage.getBody() instanceof StitchMessage) {
return createStitchRequestBodyFromStitchMessages(Collections.singletonList(inMessage.getBody(StitchMessage.class)),
inMessage);
}
if (inMessage.getBody() instanceof Iterable) {
return createStitchRequestBodyFromIterable(inMessage.getBody(Iterable.class), inMessage);
}
if (inMessage.getBody() instanceof Map) {
return createStitchRecordFromMap(inMessage.getBody(Map.class), inMessage);
}
throw new IllegalArgumentException("Message body data `" + inMessage.getBody() + "` type is not supported");
} | @Test
void testIfCreateIfMapSet() {
final StitchConfiguration configuration = new StitchConfiguration();
final Map<String, Object> properties = new LinkedHashMap<>();
properties.put("id", Collections.singletonMap("type", "integer"));
properties.put("name", Collections.singletonMap("type", "string"));
properties.put("age", Collections.singletonMap("type", "integer"));
properties.put("has_magic", Collections.singletonMap("type", "boolean"));
final Map<String, Object> message = new LinkedHashMap<>();
message.put(StitchMessage.DATA, Collections.singletonMap("id", 2));
message.put(StitchMessage.SEQUENCE, 1L);
final Map<String, Object> data = new LinkedHashMap<>();
data.put(StitchRequestBody.TABLE_NAME, "my_table");
data.put(StitchRequestBody.SCHEMA, Collections.singletonMap("properties", properties));
data.put(StitchRequestBody.MESSAGES,
Collections.singletonList(message));
data.put(StitchRequestBody.KEY_NAMES, Collections.singletonList("test_key"));
final Exchange exchange = new DefaultExchange(context);
exchange.getMessage().setBody(data);
final StitchProducerOperations operations = new StitchProducerOperations(new TestClient(), configuration);
final String createdJson
= JsonUtils.convertMapToJson(operations.createStitchRequestBody(exchange.getMessage()).toMap());
assertEquals("{\"table_name\":\"my_table\",\"schema\":{\"properties\":{\"id\":{\"type\":\"integer\"},"
+ "\"name\":{\"type\":\"string\"},\"age\":{\"type\":\"integer\"},\"has_magic\""
+ ":{\"type\":\"boolean\"}}},\"messages\":[{\"action\":\"upsert\",\"sequence\":1,"
+ "\"data\":{\"id\":2}}],\"key_names\":[\"test_key\"]}",
createdJson);
} |
@Override
public void accept(Props props) {
if (isClusterEnabled(props)) {
checkClusterProperties(props);
}
} | @Test
public void accept_throws_MessageException_if_a_cluster_forbidden_property_is_defined_in_a_cluster_search_node() {
TestAppSettings settings = new TestAppSettings(of(
CLUSTER_ENABLED.getKey(), "true",
CLUSTER_NODE_TYPE.getKey(), "search",
"sonar.search.host", "localhost"));
Props props = settings.getProps();
ClusterSettings clusterSettings = new ClusterSettings(network);
assertThatThrownBy(() -> clusterSettings.accept(props))
.isInstanceOf(MessageException.class)
.hasMessage("Properties [sonar.search.host] are not allowed when running SonarQube in cluster mode.");
} |
@Override
public String baseUrl() {
return "/";
} | @Test
public void baseUrl_is_always_slash() {
assertThat(underTest.baseUrl()).isEqualTo("/");
} |
@Override
public void validateTransientQuery(
final SessionConfig config,
final ExecutionPlan executionPlan,
final Collection<QueryMetadata> runningQueries
) {
validateCacheBytesUsage(
runningQueries.stream()
.filter(q -> q instanceof TransientQueryMetadata)
.collect(Collectors.toList()),
config,
config.getConfig(false)
.getLong(KsqlConfig.KSQL_TOTAL_CACHE_MAX_BYTES_BUFFERING_TRANSIENT)
);
} | @Test
public void shouldLimitBufferCacheLimitForTransientQueries() {
// Given:
final SessionConfig config = configWithLimitsTransient(5, OptionalLong.of(30));
// When/Then:
assertThrows(
KsqlException.class,
() -> queryValidator.validateTransientQuery(config, plan, queries)
);
} |
public static <T> PCollections<T> pCollections() {
return new PCollections<>();
} | @Test
public void testIncompatibleWindowFnPropagationFailure() {
p.enableAbandonedNodeEnforcement(false);
PCollection<String> input1 =
p.apply("CreateInput1", Create.of("Input1"))
.apply("Window1", Window.into(FixedWindows.of(Duration.standardMinutes(1))));
PCollection<String> input2 =
p.apply("CreateInput2", Create.of("Input2"))
.apply("Window2", Window.into(FixedWindows.of(Duration.standardMinutes(2))));
try {
PCollectionList.of(input1).and(input2).apply(Flatten.pCollections());
Assert.fail("Exception should have been thrown");
} catch (IllegalStateException e) {
Assert.assertTrue(
e.getMessage().startsWith("Inputs to Flatten had incompatible window windowFns"));
}
} |
@Override
public String doSharding(final Collection<String> availableTargetNames, final PreciseShardingValue<Comparable<?>> shardingValue) {
ShardingSpherePreconditions.checkNotNull(shardingValue.getValue(), NullShardingValueException::new);
return doSharding(availableTargetNames, Range.singleton(shardingValue.getValue())).stream().findFirst().orElse(null);
} | @Test
void assertRangeDoShardingByHours() {
int stepAmount = 2;
IntervalShardingAlgorithm algorithm = createAlgorithm("HH:mm:ss.SSS", "02:00:00.000",
"13:00:00.000", "HHmm", stepAmount, "Hours");
Collection<String> availableTablesForJDBCTimeDataSources = new LinkedList<>();
for (int i = 2; i < 13; i++) {
availableTablesForJDBCTimeDataSources.add(String.format("t_order_%02d%02d", i, 0));
}
Collection<String> actualAsLocalTime = algorithm.doSharding(availableTablesForJDBCTimeDataSources,
createShardingValue(LocalTime.of(2, 25, 27), LocalTime.of(12, 25, 27)));
assertThat(actualAsLocalTime.size(), is(6));
Collection<String> actualAsOffsetTime = algorithm.doSharding(availableTablesForJDBCTimeDataSources,
createShardingValue(OffsetTime.of(2, 25, 27, 0, OffsetDateTime.now().getOffset()),
OffsetTime.of(12, 25, 27, 0, OffsetDateTime.now().getOffset())));
assertThat(actualAsOffsetTime.size(), is(6));
} |
public void disableAutoProvisioning(String configId) {
setProvisioningMode(configId, "JIT");
} | @Test
public void disableAutoProvisioning_shouldNotFail() {
assertThatNoException().isThrownBy(() -> gitlabConfigurationService.disableAutoProvisioning("configId"));
} |
public void close() {
inFlightUnloadRequest.forEach((bundle, future) -> {
if (!future.isDone()) {
String msg = String.format("Unloading bundle: %s, but the unload manager already closed.", bundle);
log.warn(msg);
future.completeExceptionally(new IllegalStateException(msg));
}
});
inFlightUnloadRequest.clear();
} | @Test
public void testClose() throws IllegalAccessException {
UnloadCounter counter = new UnloadCounter();
UnloadManager manager = new UnloadManager(counter, "mockBrokerId");
var unloadDecision =
new UnloadDecision(new Unload("broker-1", "bundle-1"), Success, Admin);
CompletableFuture<Void> future =
manager.waitAsync(CompletableFuture.completedFuture(null),
"bundle-1", unloadDecision,5, TimeUnit.SECONDS);
Map<String, CompletableFuture<Void>> inFlightUnloadRequestMap = getInFlightUnloadRequestMap(manager);
assertEquals(inFlightUnloadRequestMap.size(), 1);
manager.close();
assertEquals(inFlightUnloadRequestMap.size(), 0);
try {
future.get();
fail();
} catch (Exception ex) {
assertTrue(ex.getCause() instanceof IllegalStateException);
}
assertEquals(counter.getBreakdownCounters().get(Failure).get(Unknown).get(), 1);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.