focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public Rule<ProjectNode> projectNodeRule()
{
return new PullUpExpressionInLambdaProjectNodeRule();
} | @Test
public void testInvalidSwitchWhenExpression()
{
tester().assertThat(new PullUpExpressionInLambdaRules(getFunctionManager()).projectNodeRule())
.setSystemProperty(PULL_EXPRESSION_FROM_LAMBDA_ENABLED, "true")
.on(p ->
{
p.variable("arr", new ArrayType(VARCHAR));
p.variable("arr2", new ArrayType(VARCHAR));
return p.project(
Assignments.builder().put(p.variable("expr", VARCHAR), p.rowExpression(
"transform(arr, x -> concat(case when contains(arr2, x) then '*' when arr2 is null then '+' else ' ' end, x))")).build(),
p.values(p.variable("arr", new ArrayType(VARCHAR)), p.variable("arr2", new ArrayType(VARCHAR))));
}).doesNotFire();
} |
public static void main(String[] args) {
var king = new OrcKing();
king.makeRequest(new Request(RequestType.DEFEND_CASTLE, "defend castle"));
king.makeRequest(new Request(RequestType.TORTURE_PRISONER, "torture prisoner"));
king.makeRequest(new Request(RequestType.COLLECT_TAX, "collect tax"));
} | @Test
void shouldExecuteApplicationWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
} |
@Override
public long getNextTimeOffset(int timesShown) {
final Calendar calendar = mCalendarProvider.getInstance();
final int dayOfYear = calendar.get(Calendar.DAY_OF_YEAR);
if (dayOfYear < mFirstNoticeStart) {
// should start showing at May start
return ONE_DAY * (mFirstNoticeStart - dayOfYear);
} else if (dayOfYear < mFirstNoticeEnd) {
// inside notice period. Notify every day
return ONE_DAY;
} else if (dayOfYear < mSecondNoticeStart) {
// should start showing at November start
return ONE_DAY * (mSecondNoticeStart - dayOfYear);
} else if (dayOfYear < mSecondNoticeEnd) {
// inside notice period. Notify every day
return ONE_DAY;
} else {
// till next May
return ONE_DAY * (365 + mFirstNoticeStart - dayOfYear);
}
} | @Test
public void testHappyPath() {
Calendar instance = Calendar.getInstance();
PeriodsTimeProvider underTest = new PeriodsTimeProvider(() -> instance, 100, 115, 200, 230);
instance.set(2020, Calendar.JANUARY, 1, 10, 10, 10);
Assert.assertEquals(99 * ONE_DAY, underTest.getNextTimeOffset(0));
instance.set(Calendar.DAY_OF_YEAR, 98);
Assert.assertEquals(2 * ONE_DAY, underTest.getNextTimeOffset(1));
instance.set(Calendar.DAY_OF_YEAR, 100);
Assert.assertEquals(1 * ONE_DAY, underTest.getNextTimeOffset(2));
instance.set(Calendar.DAY_OF_YEAR, 101);
Assert.assertEquals(1 * ONE_DAY, underTest.getNextTimeOffset(3));
instance.set(Calendar.DAY_OF_YEAR, 112);
Assert.assertEquals(1 * ONE_DAY, underTest.getNextTimeOffset(4));
instance.set(Calendar.DAY_OF_YEAR, 114);
Assert.assertEquals(1 * ONE_DAY, underTest.getNextTimeOffset(5));
instance.set(Calendar.DAY_OF_YEAR, 115);
Assert.assertEquals(85 * ONE_DAY, underTest.getNextTimeOffset(6));
instance.set(Calendar.DAY_OF_YEAR, 116);
Assert.assertEquals(84 * ONE_DAY, underTest.getNextTimeOffset(1));
instance.set(Calendar.DAY_OF_YEAR, 198);
Assert.assertEquals(2 * ONE_DAY, underTest.getNextTimeOffset(2));
instance.set(Calendar.DAY_OF_YEAR, 200);
Assert.assertEquals(1 * ONE_DAY, underTest.getNextTimeOffset(3));
instance.set(Calendar.DAY_OF_YEAR, 204);
Assert.assertEquals(1 * ONE_DAY, underTest.getNextTimeOffset(4));
instance.set(Calendar.DAY_OF_YEAR, 228);
Assert.assertEquals(1 * ONE_DAY, underTest.getNextTimeOffset(5));
instance.set(Calendar.DAY_OF_YEAR, 230);
Assert.assertEquals((365 - 230 + 100) * ONE_DAY, underTest.getNextTimeOffset(6));
} |
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
getRawMapping().setConf(conf);
} | @Test
public void testResolve() throws IOException {
File mapFile = File.createTempFile(getClass().getSimpleName() +
".testResolve", ".txt");
Files.asCharSink(mapFile, StandardCharsets.UTF_8).write(
hostName1 + " /rack1\n" + hostName2 + "\t/rack2\n");
mapFile.deleteOnExit();
TableMapping mapping = new TableMapping();
Configuration conf = new Configuration();
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, mapFile.getCanonicalPath());
mapping.setConf(conf);
List<String> names = new ArrayList<String>();
names.add(hostName1);
names.add(hostName2);
List<String> result = mapping.resolve(names);
assertEquals(names.size(), result.size());
assertEquals("/rack1", result.get(0));
assertEquals("/rack2", result.get(1));
} |
public String create(T entity) {
ensureValidScope(entity);
return insertedIdAsString(collection.insertOne(entity));
} | @Test
void testInvalidScopeFails() {
// Invalid scopes not registered with the EntityScopeService should not be allowed.
final ScopedDTO invalidScoped = ScopedDTO.builder().name("test").scope("INVALID").build();
assertThatThrownBy(() -> scopedEntityMongoUtils.create(invalidScoped))
.isExactlyInstanceOf(IllegalArgumentException.class);
assertThat(collection.countDocuments()).isEqualTo(0L);
} |
public static String escapeIdentifier(String identifier) {
return '`' + identifier.replace("`", "``") + '`';
} | @Test
public void testEscapeIdentifierNoSpecialCharacters() {
assertEquals("`asdasd asd ad`", SingleStoreUtil.escapeIdentifier("asdasd asd ad"));
} |
public static <T extends SearchablePlugin> List<T> search(Collection<T> searchablePlugins, String query)
{
return searchablePlugins.stream()
.filter(plugin -> Text.matchesSearchTerms(SPLITTER.split(query.toLowerCase()), plugin.getKeywords()))
.sorted(comparator(query))
.collect(Collectors.toList());
} | @Test
public void searchOrdersPinnedItemsFirstIfThereAreNoExactMatches()
{
List<SearchablePlugin> results = PluginSearch.search(plugins.values(), "integrat");
assertThat(results, contains(plugins.get("Grand Exchange"), plugins.get("Discord")));
} |
public static String createApiVersionUrl(String baseUrl, ExtensionJson json) {
return createApiVersionUrl(baseUrl, json.namespace, json.name, json.targetPlatform, json.version);
} | @Test
public void testCreateApiVersionUrlNoTarget() throws Exception {
var baseUrl = "http://localhost/";
assertThat(UrlUtil.createApiVersionUrl(baseUrl, "foo", "bar", null, "1.0.0"))
.isEqualTo("http://localhost/api/foo/bar/1.0.0");
} |
@ApiOperation(value = "Get Device Profile names (getDeviceProfileNames)",
notes = "Returns a set of unique device profile names owned by the tenant."
+ TENANT_OR_CUSTOMER_AUTHORITY_PARAGRAPH)
@PreAuthorize("hasAnyAuthority('TENANT_ADMIN', 'CUSTOMER_USER')")
@RequestMapping(value = "/deviceProfile/names", method = RequestMethod.GET)
@ResponseBody
public List<EntityInfo> getDeviceProfileNames(
@Parameter(description = "Flag indicating whether to retrieve exclusively the names of device profiles that are referenced by tenant's devices.")
@RequestParam(value = "activeOnly", required = false, defaultValue = "false") boolean activeOnly) throws ThingsboardException {
SecurityUser user = getCurrentUser();
TenantId tenantId = user.getTenantId();
return checkNotNull(deviceProfileService.findDeviceProfileNamesByTenantId(tenantId, activeOnly));
} | @Test
public void testGetDeviceProfileNames() throws Exception {
var pageLink = new PageLink(Integer.MAX_VALUE);
var deviceProfileInfos = doGetTypedWithPageLink("/api/deviceProfileInfos?",
new TypeReference<PageData<DeviceProfileInfo>>() {
}, pageLink);
Assert.assertNotNull("Device Profile Infos page data is null!", deviceProfileInfos);
Assert.assertEquals("Device Profile Infos Page data is empty! Expected to have default profile created!", 1, deviceProfileInfos.getTotalElements());
List<EntityInfo> expectedDeviceProfileNames = deviceProfileInfos.getData().stream()
.map(info -> new EntityInfo(info.getId(), info.getName()))
.sorted(Comparator.comparing(EntityInfo::getName))
.collect(Collectors.toList());
var deviceProfileNames = doGetTyped("/api/deviceProfile/names", new TypeReference<List<EntityInfo>>() {
});
Assert.assertNotNull("Device Profile Names list is null!", deviceProfileNames);
Assert.assertFalse("Device Profile Names list is empty!", deviceProfileNames.isEmpty());
Assert.assertEquals(expectedDeviceProfileNames, deviceProfileNames);
Assert.assertEquals(1, deviceProfileNames.size());
Assert.assertEquals(DEFAULT_DEVICE_TYPE, deviceProfileNames.get(0).getName());
int count = 3;
for (int i = 0; i < count; i++) {
Device device = new Device();
device.setName("DeviceName" + i);
device.setType("DeviceProfileName" + i);
Device savedDevice = doPost("/api/device", device, Device.class);
Assert.assertNotNull(savedDevice);
}
deviceProfileInfos = doGetTypedWithPageLink("/api/deviceProfileInfos?",
new TypeReference<>() {
}, pageLink);
Assert.assertNotNull("Device Profile Infos page data is null!", deviceProfileInfos);
Assert.assertEquals("Device Profile Infos Page data is empty! Expected to have default profile created + count value!", 1 + count, deviceProfileInfos.getTotalElements());
expectedDeviceProfileNames = deviceProfileInfos.getData().stream()
.map(info -> new EntityInfo(info.getId(), info.getName()))
.sorted(Comparator.comparing(EntityInfo::getName))
.collect(Collectors.toList());
deviceProfileNames = doGetTyped("/api/deviceProfile/names", new TypeReference<>() {
});
Assert.assertNotNull("Device Profile Names list is null!", deviceProfileNames);
Assert.assertFalse("Device Profile Names list is empty!", deviceProfileNames.isEmpty());
Assert.assertEquals(expectedDeviceProfileNames, deviceProfileNames);
Assert.assertEquals(1 + count, deviceProfileNames.size());
deviceProfileNames = doGetTyped("/api/deviceProfile/names?activeOnly=true", new TypeReference<>() {
});
Assert.assertNotNull("Device Profile Names list is null!", deviceProfileNames);
Assert.assertFalse("Device Profile Names list is empty!", deviceProfileNames.isEmpty());
var expectedDeviceProfileNamesWithoutDefault = expectedDeviceProfileNames.stream()
.filter(entityInfo -> !entityInfo.getName().equals(DEFAULT_DEVICE_TYPE))
.collect(Collectors.toList());
Assert.assertEquals(expectedDeviceProfileNamesWithoutDefault, deviceProfileNames);
Assert.assertEquals(count, deviceProfileNames.size());
} |
public static List<Endpoint> listenerListToEndPoints(
String input,
Map<ListenerName, SecurityProtocol> nameToSecurityProto
) {
return listenerListToEndPoints(input, n -> {
SecurityProtocol result = nameToSecurityProto.get(n);
if (result == null) {
throw new IllegalArgumentException("No security protocol defined for listener " + n.value());
}
return result;
});
} | @Test
public void testAnotherListenerListToEndPointsWithNonDefaultProtoMap() {
Map<ListenerName, SecurityProtocol> map = new HashMap<>();
map.put(new ListenerName("CONTROLLER"), SecurityProtocol.PLAINTEXT);
assertEquals(Arrays.asList(
new Endpoint("CONTROLLER", SecurityProtocol.PLAINTEXT, "example.com", 9093)),
SocketServerConfigs.listenerListToEndPoints("CONTROLLER://example.com:9093",
map));
} |
public static int stringHash(Client client) {
String s = buildUniqueString(client);
if (s == null) {
return 0;
}
return s.hashCode();
} | @Test
void testRevision0() {
assertEquals(-1713189600L, DistroUtils.stringHash(client0));
} |
@Override
public boolean acceptsURL(String url) throws SQLException {
return DriverUri.acceptsURL(url);
} | @Test
public void testDriverInvalidUri() throws Exception {
// Invalid prefix
for (String invalidPrefixUri :
Arrays.asList(
"flink://localhost:8888/catalog_name/database_name?sessionId=123&key1=val1&key2=val2",
"jdbc::flink://localhost:8888/catalog_name/database_name?sessionId=123&key1=val1&key2=val2",
"jdbc::flink//localhost:8888/catalog_name/database_name?sessionId=123&key1=val1&key2=val2")) {
assertFalse(DriverUri.acceptsURL(invalidPrefixUri));
assertThrowsExactly(
SQLException.class,
() -> DriverUri.create(invalidPrefixUri, new Properties()),
String.format(
"Flink JDBC URL[%s] must start with [jdbc:flink:]", invalidPrefixUri));
}
// Without host or port
String noPortUri = "jdbc:flink://localhost/catalog";
assertThrowsExactly(
SQLException.class,
() -> DriverUri.create(noPortUri, new Properties()),
String.format("No port specified in uri: %s", noPortUri));
Properties properties = new Properties();
properties.setProperty("key3", "val33");
for (String dupPropUri :
Arrays.asList(
"jdbc:flink://localhost:8088/catalog?key1=val1&key2=val2&key3=val3",
"jdbc:flink://localhost:8088/catalog?key1=val1&key2=val2&key1=val3")) {
assertThrowsExactly(
SQLException.class,
() -> DriverUri.create(dupPropUri, properties),
"Connection property 'key3' is both in the URL and an argument");
}
} |
public static int checkLessThan(int n, int expected, String name)
{
if (n >= expected)
{
throw new IllegalArgumentException(name + ": " + n + " (expected: < " + expected + ')');
}
return n;
} | @Test(expected = IllegalArgumentException.class)
public void checkLessThanMustFailIfArgumentIsGreaterThanExpected()
{
RangeUtil.checkLessThan(1, 0, "var");
} |
public static Date string2Date(String date) {
if (date == null) {
return null;
}
int year = Integer.parseInt(date.substring(0, 2));
int month = Integer.parseInt(date.substring(2, 4));
int day = Integer.parseInt(date.substring(4, 6));
int hour = Integer.parseInt(date.substring(6, 8));
int minute = Integer.parseInt(date.substring(8, 10));
int second = Integer.parseInt(date.substring(10, 12));
Calendar cal = Calendar.getInstance();
cal.set(convertTwoDigitYear(year), month - 1, day, hour, minute, second);
cal.set(Calendar.MILLISECOND, 0);
return cal.getTime();
} | @Test
public void string2Date() {
Date date = SmppUtils.string2Date("-300101010000004+");
Calendar calendar = Calendar.getInstance();
calendar.setTime(date);
assertEquals(5, calendar.get(Calendar.YEAR));
assertEquals(11, calendar.get(Calendar.MONTH));
assertEquals(10, calendar.get(Calendar.DAY_OF_MONTH));
assertEquals(10, calendar.get(Calendar.HOUR));
assertEquals(10, calendar.get(Calendar.MINUTE));
assertEquals(0, calendar.get(Calendar.SECOND));
} |
@VisibleForTesting
static boolean isCompressed(String contentEncoding) {
return contentEncoding.contains(HttpHeaderValues.GZIP.toString())
|| contentEncoding.contains(HttpHeaderValues.DEFLATE.toString())
|| contentEncoding.contains(HttpHeaderValues.BR.toString())
|| contentEncoding.contains(HttpHeaderValues.COMPRESS.toString());
} | @Test
void detectsBR() {
assertTrue(HttpUtils.isCompressed("br"));
} |
@SuppressWarnings("rawtypes")
public Result waitUntilDone(Config config) {
return finishOrTimeout(
config,
new Supplier[] {() -> false},
() -> jobIsDone(config.project(), config.region(), config.jobId()));
} | @Test
public void testWaitUntilDoneTimeout() throws IOException {
when(client.getJobStatus(any(), any(), any())).thenReturn(JobState.RUNNING);
Result result = new PipelineOperator(client).waitUntilDone(DEFAULT_CONFIG);
assertThat(result).isEqualTo(Result.TIMEOUT);
} |
static boolean shouldUpdate(AmazonInfo newInfo, AmazonInfo oldInfo) {
if (newInfo.getMetadata().isEmpty()) {
logger.warn("Newly resolved AmazonInfo is empty, skipping an update cycle");
} else if (!newInfo.equals(oldInfo)) {
if (isBlank(newInfo.get(AmazonInfo.MetaDataKey.instanceId))) {
logger.warn("instanceId is blank, skipping an update cycle");
return false;
} else if (isBlank(newInfo.get(AmazonInfo.MetaDataKey.localIpv4))) {
logger.warn("localIpv4 is blank, skipping an update cycle");
return false;
} else {
Set<String> newKeys = new HashSet<>(newInfo.getMetadata().keySet());
Set<String> oldKeys = new HashSet<>(oldInfo.getMetadata().keySet());
Set<String> union = new HashSet<>(newKeys);
union.retainAll(oldKeys);
newKeys.removeAll(union);
oldKeys.removeAll(union);
for (String key : newKeys) {
logger.info("Adding new metadata {}={}", key, newInfo.getMetadata().get(key));
}
for (String key : oldKeys) {
logger.info("Removing old metadata {}={}", key, oldInfo.getMetadata().get(key));
}
}
return true;
}
return false;
} | @Test
public void testAmazonInfoNoUpdateIfEqual() {
AmazonInfo oldInfo = (AmazonInfo) instanceInfo.getDataCenterInfo();
AmazonInfo newInfo = copyAmazonInfo(instanceInfo);
assertThat(RefreshableAmazonInfoProvider.shouldUpdate(newInfo, oldInfo), is(false));
} |
@Override
public RuntimeException handleFault(String failureMessage, Throwable cause) {
if (cause == null) {
log.error("Encountered {} fault: {}", type, failureMessage);
} else {
log.error("Encountered {} fault: {}", type, failureMessage, cause);
}
try {
action.run();
} catch (Throwable e) {
log.error("Failed to run LoggingFaultHandler action.", e);
}
return new FaultHandlerException(failureMessage, cause);
} | @Test
public void testHandleExceptionInAction() {
LoggingFaultHandler handler = new LoggingFaultHandler("test", () -> {
throw new RuntimeException("action failed");
});
handler.handleFault("uh oh"); // should not throw
handler.handleFault("uh oh", new RuntimeException("yikes")); // should not throw
} |
@Override
public CreateTopicsResult createTopics(final Collection<NewTopic> newTopics,
final CreateTopicsOptions options) {
final Map<String, KafkaFutureImpl<TopicMetadataAndConfig>> topicFutures = new HashMap<>(newTopics.size());
final CreatableTopicCollection topics = new CreatableTopicCollection();
for (NewTopic newTopic : newTopics) {
if (topicNameIsUnrepresentable(newTopic.name())) {
KafkaFutureImpl<TopicMetadataAndConfig> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic name '" +
newTopic.name() + "' cannot be represented in a request."));
topicFutures.put(newTopic.name(), future);
} else if (!topicFutures.containsKey(newTopic.name())) {
topicFutures.put(newTopic.name(), new KafkaFutureImpl<>());
topics.add(newTopic.convertToCreatableTopic());
}
}
if (!topics.isEmpty()) {
final long now = time.milliseconds();
final long deadline = calcDeadlineMs(now, options.timeoutMs());
final Call call = getCreateTopicsCall(options, topicFutures, topics,
Collections.emptyMap(), now, deadline);
runnable.call(call, now);
}
return new CreateTopicsResult(new HashMap<>(topicFutures));
} | @Test
public void testTimeoutWithoutMetadata() throws Exception {
try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, mockBootstrapCluster(),
newStrMap(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "10"))) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareCreateTopicsResponse("myTopic", Errors.NONE));
KafkaFuture<Void> future = env.adminClient().createTopics(
singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))),
new CreateTopicsOptions().timeoutMs(1000)).all();
TestUtils.assertFutureError(future, TimeoutException.class);
}
} |
public static Double interpolateCourse(Double c1, Double c2, double fraction) {
if (c1 == null || c2 == null) {
return null;
}
checkArgument(VALID_COURSE_RANGE.contains(c1), "The 1st course: " + c1 + " is not in range");
checkArgument(VALID_COURSE_RANGE.contains(c2), "The 2nd course: " + c2 + " is not in range");
checkArgument(VALID_FRACTION_RANGE.contains(fraction), "The fraction: " + fraction + " is not in range");
double angleDelta = Spherical.angleDifference(c2, c1);
Double course = c1 + interpolate(0.0, angleDelta, fraction);
return Spherical.mod(course, 360.0d);
} | @Test
public void testInterpolateCourse_bug() {
//A bug was found when interpolating between a Point with course 178 and a Point with 181
double TOL = 0.001;
assertEquals(
178.0,
interpolateCourse(178.0, 181.0, 0.0),
TOL
);
assertEquals(
181.0,
interpolateCourse(178.0, 181.0, 1.0),
TOL
);
//this assertion failed ... result was 106.6
assertEquals(
178.6,
interpolateCourse(178.0, 181.0, 0.2),
TOL
);
} |
public Map<String, String> confirm(RdaConfirmRequest params) {
AppSession appSession = appSessionService.getSession(params.getAppSessionId());
AppAuthenticator appAuthenticator = appAuthenticatorService.findByUserAppId(appSession.getUserAppId());
if(!checkSecret(params, appSession) || !checkAccount(params, appSession)){
appSession.setRdaSessionStatus("ABORTED");
appSessionService.save(appSession);
return Map.of("arrivalStatus", "NOK");
}
if(checkAndProcessError(params, appSession)){
appSessionService.save(appSession);
return Map.of("arrivalStatus", "OK");
}
if (!switchService.digidAppSwitchEnabled()) {
digidClient.remoteLog("853",
Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(HIDDEN), true));
appSession.setRdaSessionStatus("REFUTED");
} else if (!switchService.digidRdaSwitchEnabled()){
digidClient.remoteLog("579",
Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(HIDDEN), true));
appSession.setRdaSessionStatus("REFUTED");
} else if (params.isVerified() && (SCANNING.equals(appSession.getRdaSessionStatus()) ||
SCANNING_FOREIGN.equals(appSession.getRdaSessionStatus()))) {
appSession.setRdaSessionStatus("VERIFIED");
appAuthenticator.setSubstantieelActivatedAt(ZonedDateTime.now());
appAuthenticator.setSubstantieelDocumentType(params.getDocumentType().toLowerCase());
if (appAuthenticator.getWidActivatedAt() == null) {
appAuthenticator.setIssuerType("rda");
}
storeIdCheckDocument(params.getDocumentNumber(), params.getDocumentType(), appSession.getAccountId(), appAuthenticator.getUserAppId());
if (ID_CHECK_ACTION.equals(appSession.getRdaAction())) {
digidClient.remoteLog("1321",
Map.of("document_type", params.getDocumentType().toLowerCase(), lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId()));
} else {
digidClient.remoteLog("848",
Map.of("document_type", params.getDocumentType().toLowerCase(),
lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(),
lowerUnderscore(APP_CODE), appAuthenticator.getAppCode(),
lowerUnderscore(DEVICE_NAME), appAuthenticator.getDeviceName()));
}
appAuthenticatorService.save(appAuthenticator);
if(appSession.getFlow().equals(UpgradeLoginLevel.NAME)) {
digidClient.sendNotificationMessage(appSession.getAccountId(), "ED024", "SMS20");
logger.debug("Sending notify email ED024 / SMS20 for device {}", appAuthenticator.getDeviceName());
}
}
appSession.setAppAuthenticationLevel(appAuthenticator.getAuthenticationLevel());
appSessionService.save(appSession);
return Map.of("arrivalStatus", "OK");
} | @Test
void checkSecretError(){
rdaConfirmRequest.setSecret("secret2");
when(appSessionService.getSession(any())).thenReturn(appSession);
when(appAuthenticatorService.findByUserAppId(any())).thenReturn(appAuthenticator);
Map<String, String> result = rdaService.confirm(rdaConfirmRequest);
assertEquals("ABORTED", appSession.getRdaSessionStatus());
assertEquals("NOK", result.get("arrivalStatus"));
} |
@POST
@ApiOperation("Create a new view")
@AuditEvent(type = ViewsAuditEventTypes.VIEW_CREATE)
public ViewDTO create(@ApiParam @Valid @NotNull(message = "View is mandatory") ViewDTO dto,
@Context UserContext userContext,
@Context SearchUser searchUser) throws ValidationException {
if (dto.type().equals(ViewDTO.Type.DASHBOARD) && !searchUser.canCreateDashboards()) {
throw new ForbiddenException("User is not allowed to create new dashboards.");
}
validateIntegrity(dto, searchUser, true);
final User user = userContext.getUser();
var result = dbService.saveWithOwner(dto.toBuilder().owner(searchUser.username()).build(), user);
recentActivityService.create(result.id(), result.type().equals(ViewDTO.Type.DASHBOARD) ? GRNTypes.DASHBOARD : GRNTypes.SEARCH, searchUser);
return result;
} | @Test
public void throwsExceptionWhenCreatingSearchWithFilterThatUserIsNotAllowedToSee() {
final ViewsResource viewsResource = createViewsResource(
mock(ViewService.class),
mock(StartPageService.class),
mock(RecentActivityService.class),
mock(ClusterEventBus.class),
new ReferencedSearchFiltersHelper(),
searchFilterVisibilityChecker(Collections.singletonList("<<You cannot see this filter>>")),
EMPTY_VIEW_RESOLVERS,
SEARCH
);
Assertions.assertThatThrownBy(() -> viewsResource.create(TEST_DASHBOARD_VIEW, mock(UserContext.class), SEARCH_USER))
.isInstanceOf(BadRequestException.class)
.hasMessageContaining("View cannot be saved, as it contains Search Filters which you are not privileged to view : [<<You cannot see this filter>>]");
} |
public void start() {
// Start the thread that creates connections asynchronously
this.creator.start();
// Schedule a task to remove stale connection pools and sockets
long recycleTimeMs = Math.min(
poolCleanupPeriodMs, connectionCleanupPeriodMs);
LOG.info("Cleaning every {} seconds",
TimeUnit.MILLISECONDS.toSeconds(recycleTimeMs));
this.cleaner.scheduleAtFixedRate(
new CleanupTask(), 0, recycleTimeMs, TimeUnit.MILLISECONDS);
// Mark the manager as running
this.running = true;
} | @Test
public void testConnectionCreatorWithException() throws Exception {
// Create a bad connection pool pointing to unresolvable namenode address.
ConnectionPool badPool = new ConnectionPool(
conf, UNRESOLVED_TEST_NN_ADDRESS, TEST_USER1, 0, 10, 0.5f,
ClientProtocol.class, null);
BlockingQueue<ConnectionPool> queue = new ArrayBlockingQueue<>(1);
queue.add(badPool);
ConnectionManager.ConnectionCreator connectionCreator =
new ConnectionManager.ConnectionCreator(queue);
connectionCreator.setDaemon(true);
connectionCreator.start();
// Wait to make sure async thread is scheduled and picks
GenericTestUtils.waitFor(queue::isEmpty, 50, 5000);
// At this point connection creation task should be definitely picked up.
assertTrue(queue.isEmpty());
// At this point connection thread should still be alive.
assertTrue(connectionCreator.isAlive());
// Stop the thread as test is successful at this point
connectionCreator.interrupt();
} |
public static <InputT> UsingBuilder<InputT> of(PCollection<InputT> input) {
return new UsingBuilder<>(DEFAULT_NAME, input);
} | @Test
public void testBuild_ImplicitName() {
final PCollection<String> dataset = TestUtils.createMockDataset(TypeDescriptors.strings());
final Split.Output<String> split =
Split.of(dataset).using((UnaryPredicate<String>) what -> true).output();
final Filter positive = (Filter) TestUtils.getProducer(split.positive());
assertTrue(positive.getName().isPresent());
assertEquals(Split.DEFAULT_NAME + Split.POSITIVE_FILTER_SUFFIX, positive.getName().get());
final Filter negative = (Filter) TestUtils.getProducer(split.negative());
assertTrue(negative.getName().isPresent());
assertEquals(Split.DEFAULT_NAME + Split.NEGATIVE_FILTER_SUFFIX, negative.getName().get());
} |
@Override
public Sensor addRateTotalSensor(final String scopeName,
final String entityName,
final String operationName,
final Sensor.RecordingLevel recordingLevel,
final String... tags) {
final String threadId = Thread.currentThread().getName();
final Map<String, String> tagMap = customizedTags(threadId, scopeName, entityName, tags);
return customInvocationRateAndCountSensor(
threadId,
groupNameFromScope(scopeName),
entityName,
operationName,
tagMap,
recordingLevel
);
} | @Test
public void shouldAddRateTotalSensor() {
final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, VERSION, time);
shouldAddCustomSensor(
streamsMetrics.addRateTotalSensor(SCOPE_NAME, ENTITY_NAME, OPERATION_NAME, RecordingLevel.DEBUG),
streamsMetrics,
Arrays.asList(OPERATION_NAME + TOTAL_SUFFIX, OPERATION_NAME + RATE_SUFFIX)
);
} |
@Override
public InMemoryReaderIterator iterator() throws IOException {
return new InMemoryReaderIterator();
} | @Test
public void testProgressReporting() throws Exception {
List<Integer> elements = Arrays.asList(33, 44, 55, 66, 77, 88);
// Should initially read elements at indices: 44@1, 55@2, 66@3, 77@4
Coder<Integer> coder = BigEndianIntegerCoder.of();
InMemoryReader<Integer> inMemoryReader =
new InMemoryReader<>(encodedElements(elements, coder), 1, 4, coder);
try (InMemoryReader<Integer>.InMemoryReaderIterator iterator = inMemoryReader.iterator()) {
assertNull(iterator.getProgress());
assertEquals(3, iterator.getRemainingParallelism(), 0.0);
assertTrue(iterator.start());
Assert.assertEquals(
ReaderTestUtils.positionAtIndex(1L),
ReaderTestUtils.positionFromProgress(iterator.getProgress()));
assertEquals(3, iterator.getRemainingParallelism(), 0.0);
assertTrue(iterator.advance());
Assert.assertEquals(
ReaderTestUtils.positionAtIndex(2L),
ReaderTestUtils.positionFromProgress(iterator.getProgress()));
assertEquals(2, iterator.getRemainingParallelism(), 0.0);
assertTrue(iterator.advance());
Assert.assertEquals(
ReaderTestUtils.positionAtIndex(3L),
ReaderTestUtils.positionFromProgress(iterator.getProgress()));
assertEquals(1, iterator.getRemainingParallelism(), 0.0);
assertFalse(iterator.advance());
}
} |
@Override
public Mono<AccessToken> requestToken(AuthorizationCodeTokenRequest request) {
return Mono
.justOrEmpty(request.code())
.map(this::getRedisKey)
.flatMap(redis.opsForValue()::get)
.switchIfEmpty(Mono.error(() -> new OAuth2Exception(ErrorType.ILLEGAL_CODE)))
//移除code
.flatMap(cache -> redis.opsForValue().delete(getRedisKey(cache.getCode())).thenReturn(cache))
.flatMap(cache -> {
if (!request.getClient().getClientId().equals(cache.getClientId())) {
return Mono.error(new OAuth2Exception(ErrorType.ILLEGAL_CLIENT_ID));
}
return accessTokenManager
.createAccessToken(cache.getClientId(), cache.getAuthentication(), false)
.flatMap(token -> new OAuth2GrantedEvent(request.getClient(),
token,
cache.getAuthentication(),
cache.getScope(),
GrantType.authorization_code,
request.getParameters())
.publish(eventPublisher)
.onErrorResume(err -> accessTokenManager
.removeToken(cache.getClientId(), token.getAccessToken())
.then(Mono.error(err)))
.thenReturn(token));
})
;
} | @Test
public void testRequestToken() {
StaticApplicationContext context = new StaticApplicationContext();
context.refresh();
context.start();
DefaultAuthorizationCodeGranter codeGranter = new DefaultAuthorizationCodeGranter(
new RedisAccessTokenManager(RedisHelper.factory), context, RedisHelper.factory
);
OAuth2Client client = new OAuth2Client();
client.setClientId("test");
client.setClientSecret("test");
SimpleAuthentication authentication = new SimpleAuthentication();
authentication.setUser(SimpleUser
.builder()
.id("test")
.build());
codeGranter
.requestCode(new AuthorizationCodeRequest(client, authentication, Collections.emptyMap()))
.doOnNext(System.out::println)
.flatMap(response -> codeGranter
.requestToken(new AuthorizationCodeTokenRequest(client, Collections.singletonMap("code", response.getCode()))))
.doOnNext(System.out::println)
.as(StepVerifier::create)
.expectNextCount(1)
.verifyComplete();
} |
public String toEncodedString() {
StringBuilder sb = new StringBuilder();
try {
for (Map.Entry<String, String> entry : entries()) {
sb.append(URLEncoder.encode(entry.getKey(), "UTF-8"));
if (!Strings.isNullOrEmpty(entry.getValue())) {
sb.append('=');
sb.append(URLEncoder.encode(entry.getValue(), "UTF-8"));
} else if (isTrailingEquals(entry.getKey())) {
sb.append('=');
}
sb.append('&');
}
// Remove trailing '&'.
if (sb.length() > 0 && '&' == sb.charAt(sb.length() - 1)) {
sb.deleteCharAt(sb.length() - 1);
}
} catch (UnsupportedEncodingException e) {
// Won't happen.
e.printStackTrace();
}
return sb.toString();
} | @Test
void testToEncodedString() {
HttpQueryParams qp = new HttpQueryParams();
qp.add("k'1", "v1&");
assertEquals("k%271=v1%26", qp.toEncodedString());
qp = new HttpQueryParams();
qp.add("k+", "\n");
assertEquals("k%2B=%0A", qp.toEncodedString());
} |
public void download(String pluginKey, Version version) {
Optional<UpdateCenter> updateCenter = updateCenterMatrixFactory.getUpdateCenter(true);
if (updateCenter.isPresent()) {
List<Release> installablePlugins = updateCenter.get().findInstallablePlugins(pluginKey, version);
checkRequest(!installablePlugins.isEmpty(), "Error while downloading plugin '%s' with version '%s'. No compatible plugin found.", pluginKey, version.getName());
for (Release release : installablePlugins) {
try {
downloadRelease(release);
} catch (Exception e) {
String message = String.format("Fail to download the plugin (%s, version %s) from %s (error is : %s)",
release.getArtifact().getKey(), release.getVersion().getName(), release.getDownloadUrl(), e.getMessage());
LOG.debug(message, e);
throw new IllegalStateException(message, e);
}
}
}
} | @Test
public void fail_if_no_compatible_plugin_found() {
assertThatThrownBy(() -> pluginDownloader.download("foo", create("1.0")))
.isInstanceOf(BadRequestException.class);
} |
public boolean lockRecordRecentlyCreated(String lockName) {
return lockRecords.contains(lockName);
} | @Test
void shouldNotLie() {
assertThat(lockRecordRegistry.lockRecordRecentlyCreated(NAME)).isFalse();
} |
public ReferenceBuilder<T> interfaceClass(Class<?> interfaceClass) {
this.interfaceClass = interfaceClass;
return getThis();
} | @Test
void interfaceClass() {
ReferenceBuilder builder = new ReferenceBuilder();
builder.interfaceClass(DemoService.class);
Assertions.assertEquals(DemoService.class, builder.build().getInterfaceClass());
} |
public TTextFileDesc toThrift() {
TTextFileDesc desc = new TTextFileDesc();
if (fieldDelim != null) {
desc.setField_delim(fieldDelim);
}
if (lineDelim != null) {
desc.setLine_delim(lineDelim);
}
if (collectionDelim != null) {
desc.setCollection_delim(collectionDelim);
}
if (mapkeyDelim != null) {
desc.setMapkey_delim(mapkeyDelim);
}
desc.setSkip_header_line_count(skipHeaderLineCount);
return desc;
} | @Test
public void testToThrift() {
TextFileFormatDesc desc = new TextFileFormatDesc(null, null, null, null);
TTextFileDesc tTextFileDesc = desc.toThrift();
Assert.assertFalse(tTextFileDesc.isSetField_delim());
Assert.assertFalse(tTextFileDesc.isSetLine_delim());
Assert.assertFalse(tTextFileDesc.isSetCollection_delim());
Assert.assertFalse(tTextFileDesc.isSetMapkey_delim());
Assert.assertTrue(tTextFileDesc.isSetSkip_header_line_count());
Assert.assertEquals(0, tTextFileDesc.getSkip_header_line_count());
desc = new TextFileFormatDesc("a", "b", "c", "d", 10);
tTextFileDesc = desc.toThrift();
Assert.assertTrue(tTextFileDesc.isSetField_delim());
Assert.assertTrue(tTextFileDesc.isSetLine_delim());
Assert.assertTrue(tTextFileDesc.isSetCollection_delim());
Assert.assertTrue(tTextFileDesc.isSetMapkey_delim());
Assert.assertEquals("a", tTextFileDesc.getField_delim());
Assert.assertEquals("b", tTextFileDesc.getLine_delim());
Assert.assertEquals("c", tTextFileDesc.getCollection_delim());
Assert.assertEquals("d", tTextFileDesc.getMapkey_delim());
Assert.assertEquals(10, tTextFileDesc.getSkip_header_line_count());
} |
@Override
public boolean alterOffsets(Map<String, String> connectorConfig, Map<Map<String, ?>, Map<String, ?>> offsets) {
AbstractConfig config = new AbstractConfig(CONFIG_DEF, connectorConfig);
String filename = config.getString(FILE_CONFIG);
if (filename == null || filename.isEmpty()) {
throw new ConnectException("Offsets cannot be modified if the '" + FILE_CONFIG + "' configuration is unspecified. " +
"This is because stdin is used for input and offsets are not tracked.");
}
// This connector makes use of a single source partition at a time which represents the file that it is configured to read from.
// However, there could also be source partitions from previous configurations of the connector.
for (Map.Entry<Map<String, ?>, Map<String, ?>> partitionOffset : offsets.entrySet()) {
Map<String, ?> offset = partitionOffset.getValue();
if (offset == null) {
// We allow tombstones for anything; if there's garbage in the offsets for the connector, we don't
// want to prevent users from being able to clean it up using the REST API
continue;
}
if (!offset.containsKey(POSITION_FIELD)) {
throw new ConnectException("Offset objects should either be null or contain the key '" + POSITION_FIELD + "'");
}
// The 'position' in the offset represents the position in the file's byte stream and should be a non-negative long value
if (!(offset.get(POSITION_FIELD) instanceof Long)) {
throw new ConnectException("The value for the '" + POSITION_FIELD + "' key in the offset is expected to be a Long value");
}
long offsetPosition = (Long) offset.get(POSITION_FIELD);
if (offsetPosition < 0) {
throw new ConnectException("The value for the '" + POSITION_FIELD + "' key in the offset should be a non-negative value");
}
Map<String, ?> partition = partitionOffset.getKey();
if (partition == null) {
throw new ConnectException("Partition objects cannot be null");
}
if (!partition.containsKey(FILENAME_FIELD)) {
throw new ConnectException("Partition objects should contain the key '" + FILENAME_FIELD + "'");
}
}
// Let the task check whether the actual value for the offset position is valid for the configured file on startup
return true;
} | @Test
public void testAlterOffsetsStdin() {
sourceProperties.remove(FileStreamSourceConnector.FILE_CONFIG);
Map<Map<String, ?>, Map<String, ?>> offsets = Collections.singletonMap(
Collections.singletonMap(FILENAME_FIELD, FILENAME),
Collections.singletonMap(POSITION_FIELD, 0L)
);
assertThrows(ConnectException.class, () -> connector.alterOffsets(sourceProperties, offsets));
} |
public int computeThreshold(StreamConfig streamConfig, CommittingSegmentDescriptor committingSegmentDescriptor,
@Nullable SegmentZKMetadata committingSegmentZKMetadata, String newSegmentName) {
long desiredSegmentSizeBytes = streamConfig.getFlushThresholdSegmentSizeBytes();
if (desiredSegmentSizeBytes <= 0) {
desiredSegmentSizeBytes = StreamConfig.DEFAULT_FLUSH_THRESHOLD_SEGMENT_SIZE_BYTES;
}
long optimalSegmentSizeBytesMin = desiredSegmentSizeBytes / 2;
double optimalSegmentSizeBytesMax = desiredSegmentSizeBytes * 1.5;
if (committingSegmentZKMetadata == null) { // first segment of the partition, hence committing segment is null
if (_latestSegmentRowsToSizeRatio > 0) { // new partition group added case
long targetSegmentNumRows = (long) (desiredSegmentSizeBytes * _latestSegmentRowsToSizeRatio);
targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows);
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(
"Committing segment zk metadata is not available, using prev ratio {}, setting rows threshold for {} as {}",
_latestSegmentRowsToSizeRatio, newSegmentName, targetSegmentNumRows);
return (int) targetSegmentNumRows;
} else {
final int autotuneInitialRows = streamConfig.getFlushAutotuneInitialRows();
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(
"Committing segment zk metadata is not available, setting threshold for {} as {}", newSegmentName,
autotuneInitialRows);
return autotuneInitialRows;
}
}
final long committingSegmentSizeBytes = committingSegmentDescriptor.getSegmentSizeBytes();
if (committingSegmentSizeBytes <= 0 // repair segment case
|| SegmentCompletionProtocol.REASON_FORCE_COMMIT_MESSAGE_RECEIVED.equals(
committingSegmentDescriptor.getStopReason())) {
String reason = committingSegmentSizeBytes <= 0 //
? "Committing segment size is not available" //
: "Committing segment is due to force-commit";
final int targetNumRows = committingSegmentZKMetadata.getSizeThresholdToFlushSegment();
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info("{}, setting thresholds from previous segment for {} as {}",
reason, newSegmentName, targetNumRows);
return targetNumRows;
}
final long timeConsumed = _clock.millis() - committingSegmentZKMetadata.getCreationTime();
final long numRowsConsumed = committingSegmentZKMetadata.getTotalDocs();
final int numRowsThreshold = committingSegmentZKMetadata.getSizeThresholdToFlushSegment();
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(
"{}: Data from committing segment: Time {} numRows {} threshold {} segmentSize(bytes) {}",
newSegmentName, TimeUtils.convertMillisToPeriod(timeConsumed), numRowsConsumed, numRowsThreshold,
committingSegmentSizeBytes);
double currentRatio = (double) numRowsConsumed / committingSegmentSizeBytes;
if (_latestSegmentRowsToSizeRatio > 0) {
_latestSegmentRowsToSizeRatio =
CURRENT_SEGMENT_RATIO_WEIGHT * currentRatio + PREVIOUS_SEGMENT_RATIO_WEIGHT * _latestSegmentRowsToSizeRatio;
} else {
_latestSegmentRowsToSizeRatio = currentRatio;
}
// If the number of rows consumed is less than what we set as target in metadata, then the segment hit time limit.
// We can set the new target to be slightly higher than the actual number of rows consumed so that we can aim
// to hit the row limit next time around.
//
// If the size of the committing segment is higher than the desired segment size, then the administrator has
// set a lower segment size threshold. We should treat this case as if we have hit thw row limit and not the time
// limit.
//
// TODO: add feature to adjust time threshold as well
// If we set new threshold to be numRowsConsumed, we might keep oscillating back and forth between doubling limit
// and time threshold being hit If we set new threshold to be committingSegmentZKMetadata
// .getSizeThresholdToFlushSegment(),
// we might end up using a lot more memory than required for the segment Using a minor bump strategy, until
// we add feature to adjust time We will only slightly bump the threshold based on numRowsConsumed
if (numRowsConsumed < numRowsThreshold && committingSegmentSizeBytes < desiredSegmentSizeBytes) {
final long timeThresholdMillis = streamConfig.getFlushThresholdTimeMillis();
long currentNumRows = numRowsConsumed;
StringBuilder logStringBuilder = new StringBuilder().append("Time threshold reached. ");
if (timeThresholdMillis < timeConsumed) {
// The administrator has reduced the time threshold. Adjust the
// number of rows to match the average consumption rate on the partition.
currentNumRows = timeThresholdMillis * numRowsConsumed / timeConsumed;
logStringBuilder.append(" Detected lower time threshold, adjusting numRowsConsumed to ").append(currentNumRows)
.append(". ");
}
long targetSegmentNumRows = (long) (currentNumRows * ROWS_MULTIPLIER_WHEN_TIME_THRESHOLD_HIT);
targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows);
logStringBuilder.append("Setting segment size for {} as {}");
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(logStringBuilder.toString(),
newSegmentName, targetSegmentNumRows);
return (int) targetSegmentNumRows;
}
long targetSegmentNumRows;
if (committingSegmentSizeBytes < optimalSegmentSizeBytesMin) {
targetSegmentNumRows = numRowsConsumed + numRowsConsumed / 2;
} else if (committingSegmentSizeBytes > optimalSegmentSizeBytesMax) {
targetSegmentNumRows = numRowsConsumed / 2;
} else {
if (_latestSegmentRowsToSizeRatio > 0) {
targetSegmentNumRows = (long) (desiredSegmentSizeBytes * _latestSegmentRowsToSizeRatio);
} else {
targetSegmentNumRows = (long) (desiredSegmentSizeBytes * currentRatio);
}
}
targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows);
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(
"Committing segment size {}, current ratio {}, setting threshold for {} as {}",
committingSegmentSizeBytes, _latestSegmentRowsToSizeRatio, newSegmentName, targetSegmentNumRows);
return (int) targetSegmentNumRows;
} | @Test
public void testApplyMultiplierToTotalDocsWhenTimeThresholdNotReached() {
long currentTime = 1640216032391L;
Clock clock = Clock.fixed(java.time.Instant.ofEpochMilli(currentTime), ZoneId.of("UTC"));
SegmentFlushThresholdComputer computer = new SegmentFlushThresholdComputer(clock);
StreamConfig streamConfig = mock(StreamConfig.class);
when(streamConfig.getFlushThresholdSegmentSizeBytes()).thenReturn(300_0000L);
when(streamConfig.getFlushThresholdTimeMillis()).thenReturn(MILLISECONDS.convert(6, TimeUnit.HOURS));
CommittingSegmentDescriptor committingSegmentDescriptor = mock(CommittingSegmentDescriptor.class);
when(committingSegmentDescriptor.getSegmentSizeBytes()).thenReturn(200_0000L);
SegmentZKMetadata committingSegmentZKMetadata = mock(SegmentZKMetadata.class);
when(committingSegmentZKMetadata.getTotalDocs()).thenReturn(10_000L);
when(committingSegmentZKMetadata.getSizeThresholdToFlushSegment()).thenReturn(20_000);
when(committingSegmentZKMetadata.getCreationTime()).thenReturn(
currentTime - MILLISECONDS.convert(1, TimeUnit.HOURS));
int threshold = computer.computeThreshold(streamConfig, committingSegmentDescriptor, committingSegmentZKMetadata,
"events3__0__0__20211222T1646Z");
// totalDocs * 1.1
// 10000 * 1.1
assertEquals(threshold, 11_000);
} |
@Override
public Map<String, Metric> getMetrics() {
final Map<String, Metric> gauges = new HashMap<>();
for (final GarbageCollectorMXBean gc : garbageCollectors) {
final String name = WHITESPACE.matcher(gc.getName()).replaceAll("-");
gauges.put(name(name, "count"), (Gauge<Long>) gc::getCollectionCount);
gauges.put(name(name, "time"), (Gauge<Long>) gc::getCollectionTime);
}
return Collections.unmodifiableMap(gauges);
} | @Test
public void hasAGaugeForGcCounts() {
final Gauge<Long> gauge = (Gauge<Long>) metrics.getMetrics().get("PS-OldGen.count");
assertThat(gauge.getValue())
.isEqualTo(1L);
} |
@SuppressWarnings("unchecked")
public static <T> boolean containsAll(T[] array, T... values) {
for (T value : values) {
if (false == contains(array, value)) {
return false;
}
}
return true;
} | @Test
public void containsAllTest() {
Integer[] a = {1, 2, 3, 4, 3, 6};
boolean contains = ArrayUtil.containsAll(a, 4, 2, 6);
assertTrue(contains);
contains = ArrayUtil.containsAll(a, 1, 2, 3, 5);
assertFalse(contains);
} |
public ContainerLaunchContext completeContainerLaunch() throws IOException {
String cmdStr = ServiceUtils.join(commands, " ", false);
log.debug("Completed setting up container command {}", cmdStr);
containerLaunchContext.setCommands(commands);
//env variables
if (log.isDebugEnabled()) {
log.debug("Environment variables");
for (Map.Entry<String, String> envPair : envVars.entrySet()) {
log.debug(" \"{}\"=\"{}\"", envPair.getKey(), envPair.getValue());
}
}
containerLaunchContext.setEnvironment(envVars);
//service data
if (log.isDebugEnabled()) {
log.debug("Service Data size");
for (Map.Entry<String, ByteBuffer> entry : serviceData.entrySet()) {
log.debug("\"{}\"=> {} bytes of data", entry.getKey(),
entry.getValue().array().length);
}
}
containerLaunchContext.setServiceData(serviceData);
// resources
dumpLocalResources();
containerLaunchContext.setLocalResources(localResources);
//tokens
if (context.tokens != null) {
containerLaunchContext.setTokens(context.tokens.duplicate());
}
if(yarnDockerMode){
Map<String, String> env = containerLaunchContext.getEnvironment();
env.put("YARN_CONTAINER_RUNTIME_TYPE", "docker");
env.put("YARN_CONTAINER_RUNTIME_DOCKER_IMAGE", dockerImage);
if (ServiceUtils.isSet(dockerNetwork)) {
env.put("YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK",
dockerNetwork);
}
env.put("YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_HOSTNAME",
dockerHostname);
if (runPrivilegedContainer) {
env.put("YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER",
"true");
}
if (!mountPaths.isEmpty()) {
StringBuilder sb = new StringBuilder();
if (env.get(ENV_DOCKER_CONTAINER_MOUNTS) != null) {
// user specified mounts in the spec
sb.append(env.get(ENV_DOCKER_CONTAINER_MOUNTS));
}
for (Entry<String, String> mount : mountPaths.entrySet()) {
if (sb.length() > 0) {
sb.append(",");
}
sb.append(mount.getKey()).append(":")
.append(mount.getValue()).append(":ro");
}
env.put(ENV_DOCKER_CONTAINER_MOUNTS, sb.toString());
}
log.info("yarn docker env var has been set {}",
containerLaunchContext.getEnvironment().toString());
}
return containerLaunchContext;
} | @Test
public void testDockerContainerMounts() throws IOException {
launcher.yarnDockerMode = true;
launcher.envVars.put(AbstractLauncher.ENV_DOCKER_CONTAINER_MOUNTS,
"s1:t1:ro");
launcher.mountPaths.put("s2", "t2");
launcher.completeContainerLaunch();
String dockerContainerMounts = launcher.containerLaunchContext
.getEnvironment().get(AbstractLauncher.ENV_DOCKER_CONTAINER_MOUNTS);
Assert.assertEquals("s1:t1:ro,s2:t2:ro", dockerContainerMounts);
} |
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return PathAttributes.EMPTY;
}
if(containerService.isContainer(file)) {
final PathAttributes attributes = new PathAttributes();
if(log.isDebugEnabled()) {
log.debug(String.format("Read location for bucket %s", file));
}
attributes.setRegion(new S3LocationFeature(session, session.getClient().getRegionEndpointCache()).getLocation(file).getIdentifier());
return attributes;
}
if(file.getType().contains(Path.Type.upload)) {
final Write.Append append = new S3MultipartUploadService(session, new S3WriteFeature(session, acl), acl).append(file, new TransferStatus());
if(append.append) {
return new PathAttributes().withSize(append.offset);
}
throw new NotfoundException(file.getAbsolute());
}
try {
PathAttributes attr;
final Path bucket = containerService.getContainer(file);
try {
attr = new S3AttributesAdapter(session.getHost()).toAttributes(session.getClient().getVersionedObjectDetails(
file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file)));
}
catch(ServiceException e) {
switch(e.getResponseCode()) {
case 405:
if(log.isDebugEnabled()) {
log.debug(String.format("Mark file %s as delete marker", file));
}
// Only DELETE method is allowed for delete markers
attr = new PathAttributes();
attr.setCustom(Collections.singletonMap(KEY_DELETE_MARKER, Boolean.TRUE.toString()));
attr.setDuplicate(true);
return attr;
}
throw new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
if(StringUtils.isNotBlank(attr.getVersionId())) {
if(log.isDebugEnabled()) {
log.debug(String.format("Determine if %s is latest version for %s", attr.getVersionId(), file));
}
// Determine if latest version
try {
final String latest = new S3AttributesAdapter(session.getHost()).toAttributes(session.getClient().getObjectDetails(
bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))).getVersionId();
if(null != latest) {
if(log.isDebugEnabled()) {
log.debug(String.format("Found later version %s for %s", latest, file));
}
// Duplicate if not latest version
attr.setDuplicate(!latest.equals(attr.getVersionId()));
}
}
catch(ServiceException e) {
final BackgroundException failure = new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file);
if(failure instanceof NotfoundException) {
attr.setDuplicate(true);
}
else {
throw failure;
}
}
}
return attr;
}
catch(NotfoundException e) {
if(file.isDirectory()) {
if(log.isDebugEnabled()) {
log.debug(String.format("Search for common prefix %s", file));
}
// File may be marked as placeholder but no placeholder file exists. Check for common prefix returned.
try {
new S3ObjectListService(session, acl).list(file, new CancellingListProgressListener(), String.valueOf(Path.DELIMITER), 1);
}
catch(ListCanceledException l) {
// Found common prefix
return PathAttributes.EMPTY;
}
catch(NotfoundException n) {
throw e;
}
// Found common prefix
return PathAttributes.EMPTY;
}
throw e;
}
} | @Test
public void testRedirectWithNoLocationHeader() throws Exception {
final Path container = new Path("profiles.cyberduck.io", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path test = new Path(container, "S3 (HTTP).cyberduckprofile", EnumSet.of(Path.Type.file));
final S3AttributesFinderFeature f = new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session));
f.find(test);
assertTrue(session.getClient().getRegionEndpointCache().containsRegionForBucketName("profiles.cyberduck.io"));
assertEquals("eu-west-1", session.getClient().getRegionEndpointCache().getRegionForBucketName("profiles.cyberduck.io"));
} |
@Override
public Mono<Void> backup(Backup backup) {
return Mono.usingWhen(
createTempDir("halo-full-backup-", scheduler),
tempDir -> backupExtensions(tempDir)
.then(Mono.defer(() -> backupWorkDir(tempDir)))
.then(Mono.defer(() -> packageBackup(tempDir, backup))),
tempDir -> deleteRecursivelyAndSilently(tempDir, scheduler)
);
} | @Test
void backupTest() throws IOException {
Files.writeString(tempDir.resolve("fake-file"), "halo", StandardOpenOption.CREATE_NEW);
var extensionStores = List.of(
createExtensionStore("fake-extension-store", "fake-data")
);
when(repository.findAll()).thenReturn(Flux.fromIterable(extensionStores));
when(haloProperties.getWorkDir()).thenReturn(tempDir);
when(backupRoot.get()).thenReturn(tempDir.resolve("backups"));
var startTimestamp = Instant.now();
var backup = createRunningBackup("fake-backup", startTimestamp);
StepVerifier.create(migrationService.backup(backup))
.verifyComplete();
verify(repository).findAll();
// 1. backup workdir
// 2. package backup
verify(haloProperties).getWorkDir();
verify(backupRoot).get();
var status = backup.getStatus();
var datetimePart = migrationService.getDateTimeFormatter().format(startTimestamp);
assertEquals(datetimePart + "-fake-backup.zip", status.getFilename());
var backupFile = migrationService.getBackupsRoot()
.resolve(status.getFilename());
assertTrue(Files.exists(backupFile));
assertEquals(Files.size(backupFile), status.getSize());
var target = tempDir.resolve("target");
try (var zis = new ZipInputStream(
Files.newInputStream(backupFile, StandardOpenOption.READ))) {
FileUtils.unzip(zis, tempDir.resolve("target"));
}
var extensionsFile = target.resolve("extensions.data");
var workdir = target.resolve("workdir");
assertTrue(Files.exists(extensionsFile));
assertTrue(Files.exists(workdir));
var objectMapper = migrationService.getObjectMapper();
var gotExtensionStores = objectMapper.readValue(extensionsFile.toFile(),
new TypeReference<List<ExtensionStore>>() {
});
assertEquals(gotExtensionStores, extensionStores);
assertEquals("halo", Files.readString(workdir.resolve("fake-file")));
} |
@Override
public ObjectNode encode(Instruction instruction, CodecContext context) {
checkNotNull(instruction, "Instruction cannot be null");
return new EncodeInstructionCodecHelper(instruction, context).encode();
} | @Test
public void modOduSignalIdInstructionTest() {
OduSignalId oduSignalId = OduSignalId.oduSignalId(1, 8, new byte[] {8, 0, 0, 0, 0, 0, 0, 0, 0, 0});
L1ModificationInstruction.ModOduSignalIdInstruction instruction =
(L1ModificationInstruction.ModOduSignalIdInstruction)
Instructions.modL1OduSignalId(oduSignalId);
ObjectNode instructionJson =
instructionCodec.encode(instruction, context);
assertThat(instructionJson, matchesInstruction(instruction));
} |
public static BigDecimal cast(final Integer value, final int precision, final int scale) {
if (value == null) {
return null;
}
return cast(value.longValue(), precision, scale);
} | @Test
public void shouldNotCastDecimalTooBig() {
// When:
final Exception e = assertThrows(
ArithmeticException.class,
() -> cast(new BigDecimal(10), 2, 1)
);
// Then:
assertThat(e.getMessage(), containsString("Numeric field overflow"));
} |
public static PDImageXObject createFromImage(PDDocument document, BufferedImage image)
throws IOException
{
return createFromImage(document, image, 0.75f);
} | @Test
void testCreateFromImageUSHORT_555_RGB() throws IOException
{
PDDocument document = new PDDocument();
BufferedImage image = ImageIO.read(JPEGFactoryTest.class.getResourceAsStream("jpeg.jpg"));
// create an USHORT_555_RGB image
int width = image.getWidth();
int height = image.getHeight();
BufferedImage rgbImage = new BufferedImage(width, height, BufferedImage.TYPE_USHORT_555_RGB);
Graphics ag = rgbImage.getGraphics();
ag.drawImage(image, 0, 0, null);
ag.dispose();
for (int x = 0; x < rgbImage.getWidth(); ++x)
{
for (int y = 0; y < rgbImage.getHeight(); ++y)
{
rgbImage.setRGB(x, y, (rgbImage.getRGB(x, y) & 0xFFFFFF) | ((y / 10 * 10) << 24));
}
}
PDImageXObject ximage = JPEGFactory.createFromImage(document, rgbImage);
validate(ximage, 8, width, height, "jpg", PDDeviceRGB.INSTANCE.getName());
assertNull(ximage.getSoftMask());
doWritePDF(document, ximage, TESTRESULTSDIR, "jpeg-ushort555rgb.pdf");
} |
@Override
public ChannelFuture removeListener(GenericFutureListener<? extends Future<? super Void>> listener) {
super.removeListener(listener);
return this;
} | @Test
public void shouldNotDoAnythingOnRemove() {
Channel channel = Mockito.mock(Channel.class);
CompleteChannelFuture future = new CompleteChannelFutureImpl(channel);
ChannelFutureListener l = Mockito.mock(ChannelFutureListener.class);
future.removeListener(l);
Mockito.verifyNoMoreInteractions(l);
Mockito.verifyZeroInteractions(channel);
} |
public static Date getCurrentDate() {
return new Date();
} | @Test
public void testGetCurrentDate() {
Date currentDate = DateUtil.getCurrentDate();
Assertions.assertNotNull(currentDate);
} |
@Override
public AdjacencyMatrix setWeight(int source, int target, double weight) {
graph[source][target] = weight;
if (!digraph) {
graph[target][source] = weight;
}
return this;
} | @Test
public void testSetWeight() {
System.out.println("setWeight");
g4.setWeight(1, 4, 5.7);
assertEquals(5.7, g4.getWeight(1, 4), 1E-10);
assertEquals(1.0, g4.getWeight(4, 1), 1E-10);
g8.setWeight(1, 4, 5.7);
assertEquals(5.7, g8.getWeight(1, 4), 1E-10);
assertEquals(5.7, g8.getWeight(4, 1), 1E-10);
} |
public static String getTypeName(final int type) {
switch (type) {
case START_EVENT_V3:
return "Start_v3";
case STOP_EVENT:
return "Stop";
case QUERY_EVENT:
return "Query";
case ROTATE_EVENT:
return "Rotate";
case INTVAR_EVENT:
return "Intvar";
case LOAD_EVENT:
return "Load";
case NEW_LOAD_EVENT:
return "New_load";
case SLAVE_EVENT:
return "Slave";
case CREATE_FILE_EVENT:
return "Create_file";
case APPEND_BLOCK_EVENT:
return "Append_block";
case DELETE_FILE_EVENT:
return "Delete_file";
case EXEC_LOAD_EVENT:
return "Exec_load";
case RAND_EVENT:
return "RAND";
case XID_EVENT:
return "Xid";
case USER_VAR_EVENT:
return "User var";
case FORMAT_DESCRIPTION_EVENT:
return "Format_desc";
case TABLE_MAP_EVENT:
return "Table_map";
case PRE_GA_WRITE_ROWS_EVENT:
return "Write_rows_event_old";
case PRE_GA_UPDATE_ROWS_EVENT:
return "Update_rows_event_old";
case PRE_GA_DELETE_ROWS_EVENT:
return "Delete_rows_event_old";
case WRITE_ROWS_EVENT_V1:
return "Write_rows_v1";
case UPDATE_ROWS_EVENT_V1:
return "Update_rows_v1";
case DELETE_ROWS_EVENT_V1:
return "Delete_rows_v1";
case BEGIN_LOAD_QUERY_EVENT:
return "Begin_load_query";
case EXECUTE_LOAD_QUERY_EVENT:
return "Execute_load_query";
case INCIDENT_EVENT:
return "Incident";
case HEARTBEAT_LOG_EVENT:
case HEARTBEAT_LOG_EVENT_V2:
return "Heartbeat";
case IGNORABLE_LOG_EVENT:
return "Ignorable";
case ROWS_QUERY_LOG_EVENT:
return "Rows_query";
case WRITE_ROWS_EVENT:
return "Write_rows";
case UPDATE_ROWS_EVENT:
return "Update_rows";
case DELETE_ROWS_EVENT:
return "Delete_rows";
case GTID_LOG_EVENT:
return "Gtid";
case ANONYMOUS_GTID_LOG_EVENT:
return "Anonymous_Gtid";
case PREVIOUS_GTIDS_LOG_EVENT:
return "Previous_gtids";
case PARTIAL_UPDATE_ROWS_EVENT:
return "Update_rows_partial";
case TRANSACTION_CONTEXT_EVENT :
return "Transaction_context";
case VIEW_CHANGE_EVENT :
return "view_change";
case XA_PREPARE_LOG_EVENT :
return "Xa_prepare";
case TRANSACTION_PAYLOAD_EVENT :
return "transaction_payload";
default:
return "Unknown type:" + type;
}
} | @Test
public void getTypeNameInputPositiveOutputNotNull21() {
// Arrange
final int type = 33;
// Act
final String actual = LogEvent.getTypeName(type);
// Assert result
Assert.assertEquals("Gtid", actual);
} |
public Class<T> getConfigurationClass() {
return Generics.getTypeParameter(getClass(), Configuration.class);
} | @Test
void canDetermineConfiguration() throws Exception {
assertThat(new PoserApplication().getConfigurationClass())
.isSameAs(FakeConfiguration.class);
} |
@Override
public void checkBeforeUpdate(final AlterReadwriteSplittingRuleStatement sqlStatement) {
ReadwriteSplittingRuleStatementChecker.checkAlteration(database, sqlStatement.getRules(), rule.getConfiguration());
} | @Test
void assertCheckSQLStatementWithoutToBeAlteredLoadBalancers() {
when(database.getRuleMetaData().findRules(any())).thenReturn(Collections.emptyList());
executor.setDatabase(database);
ReadwriteSplittingRule rule = mock(ReadwriteSplittingRule.class);
when(rule.getConfiguration()).thenReturn(createCurrentRuleConfiguration());
executor.setRule(rule);
assertThrows(ServiceProviderNotFoundException.class, () -> executor.checkBeforeUpdate(createSQLStatement("INVALID_TYPE")));
} |
@Override
public void decorateRouteContext(final RouteContext routeContext, final QueryContext queryContext, final ShardingSphereDatabase database, final BroadcastRule broadcastRule,
final ConfigurationProperties props, final ConnectionContext connectionContext) {
SQLStatementContext sqlStatementContext = queryContext.getSqlStatementContext();
SQLStatement sqlStatement = sqlStatementContext.getSqlStatement();
if (sqlStatement instanceof TCLStatement) {
routeToAllDatabase(routeContext, broadcastRule);
}
if (sqlStatement instanceof DDLStatement) {
decorateRouteContextWhenDDLStatement(routeContext, queryContext, database, broadcastRule);
}
if (sqlStatement instanceof DALStatement && isResourceGroupStatement(sqlStatement)) {
routeToAllDatabaseInstance(routeContext, database, broadcastRule);
}
if (sqlStatement instanceof DCLStatement && !isDCLForSingleTable(queryContext.getSqlStatementContext())) {
routeToAllDatabaseInstance(routeContext, database, broadcastRule);
}
} | @Test
void assertDecorateBroadcastRouteContextWithSingleDataSource() {
BroadcastRuleConfiguration currentConfig = mock(BroadcastRuleConfiguration.class);
when(currentConfig.getTables()).thenReturn(Collections.singleton("t_order"));
BroadcastRule broadcastRule = new BroadcastRule(currentConfig, DefaultDatabase.LOGIC_NAME, Collections.singletonMap("foo_ds", new MockedDataSource()), Collections.emptyList());
RouteContext routeContext = new RouteContext();
routeContext.getRouteUnits().add(new RouteUnit(new RouteMapper("foo_ds", "foo_ds"), Lists.newArrayList()));
BroadcastSQLRouter sqlRouter = (BroadcastSQLRouter) OrderedSPILoader.getServices(SQLRouter.class, Collections.singleton(broadcastRule)).get(broadcastRule);
sqlRouter.decorateRouteContext(routeContext, createQueryContext(),
mockSingleDatabase(), broadcastRule, new ConfigurationProperties(new Properties()), new ConnectionContext(Collections::emptySet));
Iterator<String> routedDataSourceNames = routeContext.getActualDataSourceNames().iterator();
assertThat(routedDataSourceNames.next(), is("foo_ds"));
} |
private void unregister(ConnectPoint connectPoint, HandlerRegistration registration) {
synchronized (packetHandlers) {
packetHandlers.remove(connectPoint, registration);
if (packetHandlers.isEmpty()) {
cancelPackets();
}
}
} | @Test
public void testUnregister() {
// Register a handler and verify the registration is there
neighbourManager.registerNeighbourHandler(CP1, HANDLER, APP_ID);
assertTrue(verifyRegistration(CP1, HANDLER, APP_ID));
// Unregister the handler but supply a different connect point
neighbourManager.unregisterNeighbourHandler(CP2, HANDLER, APP_ID);
// Verify the original registration is still there on the original
// connect point
assertTrue(verifyRegistration(CP1, HANDLER, APP_ID));
assertTrue(verifyNoRegistration(CP2));
// Unregister the handler from the original connect point
neighbourManager.unregisterNeighbourHandler(CP1, HANDLER, APP_ID);
// Verify that it is gone
assertTrue(verifyNoRegistration(CP1));
} |
protected boolean isBoolean(String field, FieldPresence presence) {
return isBoolean(object, field, presence);
} | @Test
public void isBoolean() {
assertTrue("is not proper boolean", cfg.isBoolean(BOOLEAN, MANDATORY));
assertTrue("did not detect missing field",
expectInvalidField(() -> cfg.isBoolean("none", MANDATORY)));
assertTrue("is not proper boolean", cfg.isBoolean("none", OPTIONAL));
assertTrue("did not detect bad boolean",
expectInvalidField(() -> cfg.isBoolean(TEXT, MANDATORY)));
} |
public static List<AclEntry> mergeAclEntries(List<AclEntry> existingAcl,
List<AclEntry> inAclSpec) throws AclException {
ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec);
ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES);
List<AclEntry> foundAclSpecEntries =
Lists.newArrayListWithCapacity(MAX_ENTRIES);
EnumMap<AclEntryScope, AclEntry> providedMask =
Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class);
EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class);
for (AclEntry existingEntry: existingAcl) {
AclEntry aclSpecEntry = aclSpec.findByKey(existingEntry);
if (aclSpecEntry != null) {
foundAclSpecEntries.add(aclSpecEntry);
scopeDirty.add(aclSpecEntry.getScope());
if (aclSpecEntry.getType() == MASK) {
providedMask.put(aclSpecEntry.getScope(), aclSpecEntry);
maskDirty.add(aclSpecEntry.getScope());
} else {
aclBuilder.add(aclSpecEntry);
}
} else {
if (existingEntry.getType() == MASK) {
providedMask.put(existingEntry.getScope(), existingEntry);
} else {
aclBuilder.add(existingEntry);
}
}
}
// ACL spec entries that were not replacements are new additions.
for (AclEntry newEntry: aclSpec) {
if (Collections.binarySearch(foundAclSpecEntries, newEntry,
ACL_ENTRY_COMPARATOR) < 0) {
scopeDirty.add(newEntry.getScope());
if (newEntry.getType() == MASK) {
providedMask.put(newEntry.getScope(), newEntry);
maskDirty.add(newEntry.getScope());
} else {
aclBuilder.add(newEntry);
}
}
}
copyDefaultsIfNeeded(aclBuilder);
calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty);
return buildAndValidateAcl(aclBuilder);
} | @Test
public void testMergeAclEntriesUnchanged() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", ALL))
.add(aclEntry(ACCESS, GROUP, READ_EXECUTE))
.add(aclEntry(ACCESS, GROUP, "sales", ALL))
.add(aclEntry(ACCESS, MASK, ALL))
.add(aclEntry(ACCESS, OTHER, NONE))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", ALL))
.add(aclEntry(DEFAULT, GROUP, READ_EXECUTE))
.add(aclEntry(DEFAULT, GROUP, "sales", ALL))
.add(aclEntry(DEFAULT, MASK, ALL))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "bruce", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, GROUP, "sales", ALL),
aclEntry(ACCESS, MASK, ALL),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "bruce", ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, GROUP, "sales", ALL),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, NONE));
assertEquals(existing, mergeAclEntries(existing, aclSpec));
} |
public ElasticAgentInformationDTO getElasticAgentInformationDTO(ElasticAgentInformation elasticAgentInformation) {
return elasticAgentInformationConverterV5.toDTO(elasticAgentInformation);
} | @Test
public void shouldGetRequestBodyForMigrateCall_withOldConfig() throws CryptoException {
ConfigurationProperty property1 = new ConfigurationProperty(new ConfigurationKey("key"), new ConfigurationValue("value"));
ConfigurationProperty property2 = new ConfigurationProperty(new ConfigurationKey("key2"), new EncryptedConfigurationValue(new GoCipher().encrypt("password")));
Configuration configuration = new Configuration();
configuration.add(property1);
configuration.add(property2);
Map<String, String> pluginSettings = configuration.getConfigurationAsMap(true);
List<ClusterProfile> clusterProfiles = new ArrayList<>();
clusterProfiles.add(new ClusterProfile("prod-cluster", "plugin_id"));
List<ElasticProfile> elasticAgentProfiles = new ArrayList<>();
elasticAgentProfiles.add(new ElasticProfile("profile_id", "prod-cluster", new ConfigurationProperty(new ConfigurationKey("some_key"), new ConfigurationValue("some_value")), new ConfigurationProperty(new ConfigurationKey("some_key2"), new EncryptedConfigurationValue(new GoCipher().encrypt("some_value2")))));
ElasticAgentInformation elasticAgentInformation = new ElasticAgentInformation(pluginSettings, clusterProfiles, elasticAgentProfiles);
ElasticAgentInformationDTO elasticAgentInformationDTO = new ElasticAgentExtensionConverterV5().getElasticAgentInformationDTO(elasticAgentInformation);
String requestBody = elasticAgentInformationDTO.toJSON().toString();
String expectedRequestBody = "{" +
" \"plugin_settings\":{" +
" \"key2\":\"password\", " +
" \"key\":\"value\"" +
" }," +
" \"cluster_profiles\":[" +
" {" +
" \"id\":\"prod-cluster\"," +
" \"plugin_id\":\"plugin_id\"," +
" \"properties\":{" +
" }" +
" }" +
" ]," +
" \"elastic_agent_profiles\":[" +
" {" +
" \"id\":\"profile_id\"," +
" \"plugin_id\":\"plugin_id\"," +
" \"cluster_profile_id\": \"prod-cluster\"," +
" \"properties\":{" +
" \"some_key\":\"some_value\", " +
" \"some_key2\":\"some_value2\"" +
" }" +
" }" +
" ]" +
"}\n";
assertThatJson(expectedRequestBody).isEqualTo(requestBody);
} |
@Override
public CompletableFuture<JobID> submitJob(@Nonnull JobGraph jobGraph) {
CompletableFuture<java.nio.file.Path> jobGraphFileFuture =
CompletableFuture.supplyAsync(
() -> {
try {
final java.nio.file.Path jobGraphFile =
Files.createTempFile(
"flink-jobgraph-" + jobGraph.getJobID(), ".bin");
try (ObjectOutputStream objectOut =
new ObjectOutputStream(
Files.newOutputStream(jobGraphFile))) {
objectOut.writeObject(jobGraph);
}
return jobGraphFile;
} catch (IOException e) {
throw new CompletionException(
new FlinkException("Failed to serialize JobGraph.", e));
}
},
executorService);
CompletableFuture<Tuple2<JobSubmitRequestBody, Collection<FileUpload>>> requestFuture =
jobGraphFileFuture.thenApply(
jobGraphFile -> {
List<String> jarFileNames = new ArrayList<>(8);
List<JobSubmitRequestBody.DistributedCacheFile> artifactFileNames =
new ArrayList<>(8);
Collection<FileUpload> filesToUpload = new ArrayList<>(8);
filesToUpload.add(
new FileUpload(
jobGraphFile, RestConstants.CONTENT_TYPE_BINARY));
for (Path jar : jobGraph.getUserJars()) {
jarFileNames.add(jar.getName());
filesToUpload.add(
new FileUpload(
Paths.get(jar.toUri()),
RestConstants.CONTENT_TYPE_JAR));
}
for (Map.Entry<String, DistributedCache.DistributedCacheEntry>
artifacts : jobGraph.getUserArtifacts().entrySet()) {
final Path artifactFilePath =
new Path(artifacts.getValue().filePath);
try {
// Only local artifacts need to be uploaded.
if (!artifactFilePath.getFileSystem().isDistributedFS()) {
artifactFileNames.add(
new JobSubmitRequestBody.DistributedCacheFile(
artifacts.getKey(),
artifactFilePath.getName()));
filesToUpload.add(
new FileUpload(
Paths.get(artifactFilePath.getPath()),
RestConstants.CONTENT_TYPE_BINARY));
}
} catch (IOException e) {
throw new CompletionException(
new FlinkException(
"Failed to get the FileSystem of artifact "
+ artifactFilePath
+ ".",
e));
}
}
final JobSubmitRequestBody requestBody =
new JobSubmitRequestBody(
jobGraphFile.getFileName().toString(),
jarFileNames,
artifactFileNames);
return Tuple2.of(
requestBody, Collections.unmodifiableCollection(filesToUpload));
});
final CompletableFuture<JobSubmitResponseBody> submissionFuture =
requestFuture.thenCompose(
requestAndFileUploads -> {
LOG.info(
"Submitting job '{}' ({}).",
jobGraph.getName(),
jobGraph.getJobID());
return sendRetriableRequest(
JobSubmitHeaders.getInstance(),
EmptyMessageParameters.getInstance(),
requestAndFileUploads.f0,
requestAndFileUploads.f1,
isConnectionProblemOrServiceUnavailable(),
(receiver, error) -> {
if (error != null) {
LOG.warn(
"Attempt to submit job '{}' ({}) to '{}' has failed.",
jobGraph.getName(),
jobGraph.getJobID(),
receiver,
error);
} else {
LOG.info(
"Successfully submitted job '{}' ({}) to '{}'.",
jobGraph.getName(),
jobGraph.getJobID(),
receiver);
}
});
});
submissionFuture
.exceptionally(ignored -> null) // ignore errors
.thenCompose(ignored -> jobGraphFileFuture)
.thenAccept(
jobGraphFile -> {
try {
Files.delete(jobGraphFile);
} catch (IOException e) {
LOG.warn("Could not delete temporary file {}.", jobGraphFile, e);
}
});
return submissionFuture
.thenApply(ignore -> jobGraph.getJobID())
.exceptionally(
(Throwable throwable) -> {
throw new CompletionException(
new JobSubmissionException(
jobGraph.getJobID(),
"Failed to submit JobGraph.",
ExceptionUtils.stripCompletionException(throwable)));
});
} | @Test
void testJobSubmissionFailureCauseForwardedToClient() throws Exception {
try (final TestRestServerEndpoint restServerEndpoint =
createRestServerEndpoint(new SubmissionFailingHandler())) {
try (RestClusterClient<?> restClusterClient =
createRestClusterClient(restServerEndpoint.getServerAddress().getPort())) {
restClusterClient
.submitJob(jobGraph)
.thenCompose(restClusterClient::requestJobResult)
.get()
.toJobExecutionResult(ClassLoader.getSystemClassLoader());
} catch (final Exception e) {
assertThat(
ExceptionUtils.findThrowableWithMessage(
e, "RestHandlerException: expected"))
.isPresent();
return;
}
fail("Should failed with exception");
}
} |
public DdlCommandResult execute(
final String sql,
final DdlCommand ddlCommand,
final boolean withQuery,
final Set<SourceName> withQuerySources
) {
return execute(sql, ddlCommand, withQuery, withQuerySources, false);
} | @Test
public void shouldDropMissingType() {
// Given:
metaStore.deleteType("type");
// When:
final DdlCommandResult result = cmdExec.execute(SQL_TEXT, dropType, false, NO_QUERY_SOURCES);
// Then:
assertThat("Expected successful execution", result.isSuccess());
assertThat(result.getMessage(), is("Type 'type' does not exist"));
} |
public static Map<String, Class<?>> compile(Map<String, String> classNameSourceMap, ClassLoader classLoader) {
return compile(classNameSourceMap, classLoader, null);
} | @Test
public void compileAndLoadClass() throws Exception {
Map<String, String> source = singletonMap("org.kie.memorycompiler.ExampleClass", EXAMPLE_CLASS);
Map<String, Class<?>> compiled = KieMemoryCompiler.compile(source, this.getClass().getClassLoader());
Class<?> exampleClazz = compiled.get("org.kie.memorycompiler.ExampleClass");
assertThat(exampleClazz).isNotNull();
Object instance = exampleClazz.getDeclaredConstructors()[0].newInstance();
Method sumMethod = exampleClazz.getMethod("sum", Integer.class, Integer.class);
Object result = sumMethod.invoke(instance, 2, 3);
assertThat(result).isEqualTo(5);
} |
Record convert(Object data) {
return convert(data, null);
} | @Test
public void testMissingColumnDetectionMapNested() {
Table table = mock(Table.class);
when(table.schema()).thenReturn(ID_SCHEMA);
RecordConverter converter = new RecordConverter(table, config);
Map<String, Object> nestedData = createNestedMapData();
SchemaUpdate.Consumer consumer = new SchemaUpdate.Consumer();
converter.convert(nestedData, consumer);
Collection<AddColumn> addCols = consumer.addColumns();
assertThat(addCols).hasSize(1);
AddColumn addCol = addCols.iterator().next();
assertThat(addCol.name()).isEqualTo("st");
StructType addedType = addCol.type().asStructType();
assertThat(addedType.fields()).hasSize(MAPPED_CNT);
assertTypesAddedFromMap(col -> addedType.field(col).type());
} |
public boolean accepts( String fileName ) {
if ( fileName == null || fileName.indexOf( '.' ) == -1 ) {
return false;
}
String extension = fileName.substring( fileName.lastIndexOf( '.' ) + 1 );
return extension.equals( "ktr" );
} | @Test
public void testAccepts() throws Exception {
assertFalse( transFileListener.accepts( null ) );
assertFalse( transFileListener.accepts( "NoDot" ) );
assertTrue( transFileListener.accepts( "Trans.ktr" ) );
assertTrue( transFileListener.accepts( ".ktr" ) );
} |
public NumericIndicator down() {
return down;
} | @Test
public void testCreation() {
final AroonFacade facade = new AroonFacade(data, 5);
assertEquals(data, facade.down().getBarSeries());
} |
public static <T> T visit(final Schema start, final SchemaVisitor<T> visitor) {
// Set of Visited Schemas
IdentityHashMap<Schema, Schema> visited = new IdentityHashMap<>();
// Stack that contains the Schemas to process and afterVisitNonTerminal
// functions.
// Deque<Either<Schema, Supplier<SchemaVisitorAction>>>
// Using Either<...> has a cost we want to avoid...
Deque<Object> dq = new ArrayDeque<>();
dq.push(start);
Object current;
while ((current = dq.poll()) != null) {
if (current instanceof Supplier) {
// We are executing a non-terminal post visit.
SchemaVisitor.SchemaVisitorAction action = ((Supplier<SchemaVisitor.SchemaVisitorAction>) current).get();
switch (action) {
case CONTINUE:
break;
case SKIP_SIBLINGS:
while (dq.peek() instanceof Schema) {
dq.remove();
}
break;
case TERMINATE:
return visitor.get();
case SKIP_SUBTREE:
default:
throw new UnsupportedOperationException("Invalid action " + action);
}
} else {
Schema schema = (Schema) current;
boolean terminate;
if (visited.containsKey(schema)) {
terminate = visitTerminal(visitor, schema, dq);
} else {
Schema.Type type = schema.getType();
switch (type) {
case ARRAY:
terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getElementType()));
visited.put(schema, schema);
break;
case RECORD:
terminate = visitNonTerminal(visitor, schema, dq, () -> schema.getFields().stream().map(Field::schema)
.collect(Collectors.toCollection(ArrayDeque::new)).descendingIterator());
visited.put(schema, schema);
break;
case UNION:
terminate = visitNonTerminal(visitor, schema, dq, schema.getTypes());
visited.put(schema, schema);
break;
case MAP:
terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getValueType()));
visited.put(schema, schema);
break;
default:
terminate = visitTerminal(visitor, schema, dq);
break;
}
}
if (terminate) {
return visitor.get();
}
}
}
return visitor.get();
} | @Test
public void testVisit3() {
String s3 = "{\"type\": \"record\", \"name\": \"ss1\", \"fields\": [" + "{\"name\": \"f1\", \"type\": \"int\"}"
+ "]}";
Assert.assertEquals("ss1.", Schemas.visit(new Schema.Parser().parse(s3), new TestVisitor()));
} |
@Override
public void process() {
JMeterContext context = getThreadContext();
Sampler sam = context.getCurrentSampler();
SampleResult res = context.getPreviousResult();
HTTPSamplerBase sampler;
HTTPSampleResult result;
if (!(sam instanceof HTTPSamplerBase) || !(res instanceof HTTPSampleResult)) {
log.info("Can't apply HTML Link Parser when the previous" + " sampler run is not an HTTP Request.");
return;
} else {
sampler = (HTTPSamplerBase) sam;
result = (HTTPSampleResult) res;
}
List<HTTPSamplerBase> potentialLinks = new ArrayList<>();
String responseText = result.getResponseDataAsString();
int index = responseText.indexOf('<'); // $NON-NLS-1$
if (index == -1) {
index = 0;
}
if (log.isDebugEnabled()) {
log.debug("Check for matches against: "+sampler.toString());
}
Document html = (Document) HtmlParsingUtils.getDOM(responseText.substring(index));
addAnchorUrls(html, result, sampler, potentialLinks);
addFormUrls(html, result, sampler, potentialLinks);
addFramesetUrls(html, result, sampler, potentialLinks);
if (!potentialLinks.isEmpty()) {
HTTPSamplerBase url = potentialLinks.get(ThreadLocalRandom.current().nextInt(potentialLinks.size()));
if (log.isDebugEnabled()) {
log.debug("Selected: "+url.toString());
}
sampler.setDomain(url.getDomain());
sampler.setPath(url.getPath());
if (url.getMethod().equals(HTTPConstants.POST)) {
for (JMeterProperty jMeterProperty : sampler.getArguments()) {
Argument arg = (Argument) jMeterProperty.getObjectValue();
modifyArgument(arg, url.getArguments());
}
} else {
sampler.setArguments(url.getArguments());
}
sampler.setProtocol(url.getProtocol());
} else {
log.debug("No matches found");
}
} | @Test
public void testSimpleParse4() throws Exception {
HTTPSamplerBase config = makeUrlConfig("/subdir/index\\..*");
HTTPSamplerBase context = makeContext("http://www.apache.org/subdir/previous.html");
String responseText = "<html><head><title>Test page</title></head><body>"
+ "<A HREF=\"index.html\">Goto index page</A></body></html>";
HTTPSampleResult result = new HTTPSampleResult();
result.setResponseData(responseText, null);
result.setSampleLabel(context.toString());
result.setURL(context.getUrl());
jmctx.setCurrentSampler(context);
jmctx.setCurrentSampler(config);
jmctx.setPreviousResult(result);
parser.process();
String newUrl = config.getUrl().toString();
assertEquals("http://www.apache.org/subdir/index.html", newUrl);
} |
static String trimFieldsAndRemoveEmptyFields(String str) {
char[] chars = str.toCharArray();
char[] res = new char[chars.length];
/*
* set when reading the first non trimmable char after a separator char (or the beginning of the string)
* unset when reading a separator
*/
boolean inField = false;
boolean inQuotes = false;
int i = 0;
int resI = 0;
for (; i < chars.length; i++) {
boolean isSeparator = chars[i] == ',';
if (!inQuotes && isSeparator) {
// exiting field (may already be unset)
inField = false;
if (resI > 0) {
resI = retroTrim(res, resI);
}
} else {
boolean isTrimmed = !inQuotes && istrimmable(chars[i]);
if (isTrimmed && !inField) {
// we haven't meet any non trimmable char since the last separator yet
continue;
}
boolean isEscape = isEscapeChar(chars[i]);
if (isEscape) {
inQuotes = !inQuotes;
}
// add separator as we already had one field
if (!inField && resI > 0) {
res[resI] = ',';
resI++;
}
// register in field (may already be set)
inField = true;
// copy current char
res[resI] = chars[i];
resI++;
}
}
// inQuotes can only be true at this point if quotes are unbalanced
if (!inQuotes) {
// trim end of str
resI = retroTrim(res, resI);
}
return new String(res, 0, resI);
} | @Test
@UseDataProvider("emptyAndtrimmable")
public void trimFieldsAndRemoveEmptyFields_ignores_empty_fields_and_trims_fields(String empty, String trimmable) {
String expected = trimmable.trim();
assertThat(empty.trim()).isEmpty();
assertThat(trimFieldsAndRemoveEmptyFields(trimmable)).isEqualTo(expected);
assertThat(trimFieldsAndRemoveEmptyFields(trimmable + ',' + empty)).isEqualTo(expected);
assertThat(trimFieldsAndRemoveEmptyFields(trimmable + ",," + empty)).isEqualTo(expected);
assertThat(trimFieldsAndRemoveEmptyFields(empty + ',' + trimmable)).isEqualTo(expected);
assertThat(trimFieldsAndRemoveEmptyFields(empty + ",," + trimmable)).isEqualTo(expected);
assertThat(trimFieldsAndRemoveEmptyFields(empty + ',' + trimmable + ',' + empty)).isEqualTo(expected);
assertThat(trimFieldsAndRemoveEmptyFields(empty + ",," + trimmable + ",,," + empty)).isEqualTo(expected);
assertThat(trimFieldsAndRemoveEmptyFields(trimmable + ',' + empty + ',' + empty)).isEqualTo(expected);
assertThat(trimFieldsAndRemoveEmptyFields(trimmable + ",," + empty + ",,," + empty)).isEqualTo(expected);
assertThat(trimFieldsAndRemoveEmptyFields(empty + ',' + empty + ',' + trimmable)).isEqualTo(expected);
assertThat(trimFieldsAndRemoveEmptyFields(empty + ",,,," + empty + ",," + trimmable)).isEqualTo(expected);
assertThat(trimFieldsAndRemoveEmptyFields(trimmable + ',' + trimmable)).isEqualTo(expected + ',' + expected);
assertThat(trimFieldsAndRemoveEmptyFields(trimmable + ',' + trimmable + ',' + trimmable)).isEqualTo(expected + ',' + expected + ',' + expected);
assertThat(trimFieldsAndRemoveEmptyFields(trimmable + "," + trimmable + ',' + trimmable)).isEqualTo(expected + ',' + expected + ',' + expected);
} |
@Override
public List<ProviderGroup> subscribe(ConsumerConfig config) {
String directUrl = config.getDirectUrl();
if (StringUtils.isNotBlank(directUrl)) {
List<ConsumerConfig> listeners = notifyListeners.get(directUrl);
if (listeners == null) {
notifyListeners.putIfAbsent(directUrl, new CopyOnWriteArrayList<>());
listeners = notifyListeners.get(directUrl);
}
listeners.add(config);
ProviderGroup directGroup = getDirectGroup(directUrl);
ArrayList<ProviderGroup> providerGroups = new ArrayList<>();
providerGroups.add(directGroup);
return providerGroups;
} else {
return null;
}
} | @Test
public void testSubscribe() {
ConsumerConfig<Object> consumerConfig = new ConsumerConfig<>();
String directUrl = "bolt://alipay.com";
consumerConfig.setDirectUrl(directUrl);
List<ProviderGroup> providerGroups = domainRegistry.subscribe(consumerConfig);
assertTrue(domainRegistry.notifyListeners.containsKey(directUrl));
assertSame(consumerConfig, domainRegistry.notifyListeners.get(directUrl).get(0));
assertEquals(1, providerGroups.size());
ProviderGroup providerGroup = providerGroups.get(0);
assertEquals(RpcConstants.ADDRESS_DIRECT_GROUP, providerGroup.getName());
ConsumerConfig<Object> notDirect = new ConsumerConfig<>();
notDirect.setDirectUrl("");
domainRegistry.subscribe(notDirect);
assertFalse(domainRegistry.notifyListeners.containsKey(""));
ConsumerConfig<Object> ipConfig = new ConsumerConfig<>();
String ip = "bolt://127.0.0.1";
ipConfig.setDirectUrl(ip);
List<ProviderGroup> ipProviderGroup = domainRegistry.subscribe(ipConfig);
assertTrue(domainRegistry.notifyListeners.containsKey(directUrl));
assertSame(consumerConfig, domainRegistry.notifyListeners.get(directUrl).get(0));
assertEquals(1, ipProviderGroup.size());
ProviderGroup ipGroup = ipProviderGroup.get(0);
assertEquals(RpcConstants.ADDRESS_DIRECT_GROUP, ipGroup.getName());
assertEquals(1, ipGroup.getProviderInfos().size());
assertEquals("127.0.0.1", ipGroup.getProviderInfos().get(0).getHost());
} |
boolean hasFile(String fileReference) {
return hasFile(new FileReference(fileReference));
} | @Test
public void requireThatExistingFileIsFound() throws IOException {
String dir = "123";
writeFile(dir);
assertTrue(fileServer.hasFile(dir));
} |
@Override
public Processor<K, Change<V>, KO, SubscriptionWrapper<K>> get() {
return new UnbindChangeProcessor();
} | @Test
public void leftJoinShouldPropagateDeletionOfAPrimaryKeyThatHadNullFK() {
final MockInternalNewProcessorContext<String, SubscriptionWrapper<String>> context = new MockInternalNewProcessorContext<>();
leftJoinProcessor.init(context);
context.setRecordMetadata("topic", 0, 0);
leftJoinProcessor.process(new Record<>(pk, new Change<>(null, new LeftValue(null)), 0));
assertThat(context.forwarded().size(), is(1));
assertThat(
context.forwarded().get(0).record(),
is(new Record<>(null, new SubscriptionWrapper<>(null, PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE, pk, 0), 0))
);
} |
@Override
public void consume(Update update) {
super.consume(update);
} | @Test
void sendsLocalityViolation() {
Update update = mockFullUpdate(bot, USER, "/group");
bot.consume(update);
verify(silent, times(1)).send(format("Sorry, %s-only feature.", "group"), USER.getId());
} |
public void updateBatchSize(int size) {
batchSizesUpdater.accept(size);
} | @Test
public void testUpdateBatchSize() {
MetricsRegistry registry = new MetricsRegistry();
try (FakeMetadataLoaderMetrics fakeMetrics = new FakeMetadataLoaderMetrics(registry)) {
fakeMetrics.metrics.updateBatchSize(50);
assertEquals(50, fakeMetrics.batchSize.get());
}
} |
public static void delete(final File file, final boolean ignoreFailures)
{
if (file.exists())
{
if (file.isDirectory())
{
final File[] files = file.listFiles();
if (null != files)
{
for (final File f : files)
{
delete(f, ignoreFailures);
}
}
}
if (!file.delete() && !ignoreFailures)
{
try
{
Files.delete(file.toPath());
}
catch (final IOException ex)
{
LangUtil.rethrowUnchecked(ex);
}
}
}
} | @Test
void deleteIgnoreFailuresShouldThrowExceptionIfDeleteOfAFileFails()
{
final File file = mock(File.class);
when(file.exists()).thenReturn(true);
when(file.delete()).thenReturn(false);
assertThrows(NullPointerException.class, () -> IoUtil.delete(file, false));
} |
public static int bytesToShortLE(byte[] bytes, int off) {
return (bytes[off + 1] << 8) + (bytes[off] & 255);
} | @Test
public void testBytesToShortLE() {
assertEquals(-12345,
ByteUtils.bytesToShortLE(SHORT_12345_LE, 0));
} |
@Override
public int getOriginalPort() {
try {
return getOriginalPort(getContext(), getHeaders(), getPort());
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
} | @Test
void getOriginalPort_EmptyXFFPort() throws URISyntaxException {
Headers headers = new Headers();
headers.add(HttpHeaderNames.X_FORWARDED_PORT, "");
// Default to using server port
assertEquals(9999, HttpRequestMessageImpl.getOriginalPort(new SessionContext(), headers, 9999));
} |
public static String resolveIpAddress(String hostname) throws UnknownHostException {
Preconditions.checkNotNull(hostname, "hostname");
Preconditions.checkArgument(!hostname.isEmpty(),
"Cannot resolve IP address for empty hostname");
return InetAddress.getByName(hostname).getHostAddress();
} | @Test
public void resolveIpAddress() throws UnknownHostException {
assertEquals(NetworkAddressUtils.resolveIpAddress("localhost"), "127.0.0.1");
assertEquals(NetworkAddressUtils.resolveIpAddress("127.0.0.1"), "127.0.0.1");
} |
public ChannelFuture close() {
return close(ctx().newPromise());
} | @Test
@Timeout(value = 3000, unit = TimeUnit.MILLISECONDS)
public void writingAfterClosedChannelDoesNotNPE() throws InterruptedException {
EventLoopGroup group = new NioEventLoopGroup(2);
Channel serverChannel = null;
Channel clientChannel = null;
final CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<Throwable> writeFailCauseRef = new AtomicReference<Throwable>();
try {
ServerBootstrap sb = new ServerBootstrap();
sb.group(group);
sb.channel(NioServerSocketChannel.class);
sb.childHandler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
}
});
Bootstrap bs = new Bootstrap();
bs.group(group);
bs.channel(NioSocketChannel.class);
bs.handler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
ch.pipeline().addLast(new Lz4FrameEncoder());
}
});
serverChannel = sb.bind(new InetSocketAddress(0)).syncUninterruptibly().channel();
clientChannel = bs.connect(serverChannel.localAddress()).syncUninterruptibly().channel();
final Channel finalClientChannel = clientChannel;
clientChannel.eventLoop().execute(new Runnable() {
@Override
public void run() {
finalClientChannel.close();
final int size = 27;
ByteBuf buf = ByteBufAllocator.DEFAULT.buffer(size, size);
finalClientChannel.writeAndFlush(buf.writerIndex(buf.writerIndex() + size))
.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
try {
writeFailCauseRef.set(future.cause());
} finally {
latch.countDown();
}
}
});
}
});
latch.await();
Throwable writeFailCause = writeFailCauseRef.get();
assertNotNull(writeFailCause);
Throwable writeFailCauseCause = writeFailCause.getCause();
if (writeFailCauseCause != null) {
assertThat(writeFailCauseCause, is(not(instanceOf(NullPointerException.class))));
}
} finally {
if (serverChannel != null) {
serverChannel.close();
}
if (clientChannel != null) {
clientChannel.close();
}
group.shutdownGracefully();
}
} |
@NonNull @VisibleForTesting
static String[] getPermissionsStrings(int requestCode) {
switch (requestCode) {
case CONTACTS_PERMISSION_REQUEST_CODE -> {
return new String[] {Manifest.permission.READ_CONTACTS};
}
case NOTIFICATION_PERMISSION_REQUEST_CODE -> {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
return new String[] {Manifest.permission.POST_NOTIFICATIONS};
} else {
return new String[0];
}
}
default -> throw new IllegalArgumentException("Unknown request code " + requestCode);
}
} | @Test
public void testGetPermissionsStringsContacts() {
Assert.assertArrayEquals(
new String[] {Manifest.permission.READ_CONTACTS},
PermissionRequestHelper.getPermissionsStrings(
PermissionRequestHelper.CONTACTS_PERMISSION_REQUEST_CODE));
} |
public static String getSystemProperty(String key) {
try {
return System.getProperty(key);
} catch (Throwable t) {
return null;
}
} | @Test
public void testGetSystemProperty() {
Assert.assertNull(EagleEyeCoreUtils.getSystemProperty(null));
Assert.assertNull(EagleEyeCoreUtils.getSystemProperty("foo"));
} |
public synchronized TopologyDescription describe() {
return internalTopologyBuilder.describe();
} | @Test
public void kTableNonMaterializedMapValuesShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
final KTable<Object, Object> table = builder.table("input-topic");
table.mapValues((readOnlyKey, value) -> null);
final TopologyDescription describe = builder.build().describe();
assertEquals("Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic])\n" +
" --> KTABLE-SOURCE-0000000002\n" +
" Processor: KTABLE-SOURCE-0000000002 (stores: [])\n" +
" --> KTABLE-MAPVALUES-0000000003\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KTABLE-MAPVALUES-0000000003 (stores: [])\n" +
" --> none\n" +
" <-- KTABLE-SOURCE-0000000002\n\n", describe.toString());
} |
@SuppressWarnings("WeakerAccess")
public Map<String, Object> getMainConsumerConfigs(final String groupId, final String clientId, final int threadIdx) {
final Map<String, Object> consumerProps = getCommonConsumerConfigs();
// Get main consumer override configs
final Map<String, Object> mainConsumerProps = originalsWithPrefix(MAIN_CONSUMER_PREFIX);
consumerProps.putAll(mainConsumerProps);
// this is a hack to work around StreamsConfig constructor inside StreamsPartitionAssignor to avoid casting
consumerProps.put(APPLICATION_ID_CONFIG, groupId);
// add group id, client id with stream client id prefix, and group instance id
consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId);
final String groupInstanceId = (String) consumerProps.get(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG);
// Suffix each thread consumer with thread.id to enforce uniqueness of group.instance.id.
if (groupInstanceId != null) {
consumerProps.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, groupInstanceId + "-" + threadIdx);
}
// add configs required for stream partition assignor
consumerProps.put(UPGRADE_FROM_CONFIG, getString(UPGRADE_FROM_CONFIG));
consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG));
consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG));
consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG));
consumerProps.put(ACCEPTABLE_RECOVERY_LAG_CONFIG, getLong(ACCEPTABLE_RECOVERY_LAG_CONFIG));
consumerProps.put(MAX_WARMUP_REPLICAS_CONFIG, getInt(MAX_WARMUP_REPLICAS_CONFIG));
consumerProps.put(PROBING_REBALANCE_INTERVAL_MS_CONFIG, getLong(PROBING_REBALANCE_INTERVAL_MS_CONFIG));
consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamsPartitionAssignor.class.getName());
consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG));
consumerProps.put(RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG, getInt(RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG));
consumerProps.put(RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG, getString(RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG));
consumerProps.put(RACK_AWARE_ASSIGNMENT_TAGS_CONFIG, getList(RACK_AWARE_ASSIGNMENT_TAGS_CONFIG));
consumerProps.put(RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG, getInt(RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG));
consumerProps.put(TASK_ASSIGNOR_CLASS_CONFIG, getString(TASK_ASSIGNOR_CLASS_CONFIG));
// disable auto topic creation
consumerProps.put(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, "false");
// verify that producer batch config is no larger than segment size, then add topic configs required for creating topics
final Map<String, Object> topicProps = originalsWithPrefix(TOPIC_PREFIX, false);
final Map<String, Object> producerProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames());
if (topicProps.containsKey(topicPrefix(TopicConfig.SEGMENT_BYTES_CONFIG)) &&
producerProps.containsKey(ProducerConfig.BATCH_SIZE_CONFIG)) {
final int segmentSize = Integer.parseInt(topicProps.get(topicPrefix(TopicConfig.SEGMENT_BYTES_CONFIG)).toString());
final int batchSize = Integer.parseInt(producerProps.get(ProducerConfig.BATCH_SIZE_CONFIG).toString());
if (segmentSize < batchSize) {
throw new IllegalArgumentException(String.format("Specified topic segment size %d is is smaller than the configured producer batch size %d, this will cause produced batch not able to be appended to the topic",
segmentSize,
batchSize));
}
}
consumerProps.putAll(topicProps);
return consumerProps;
} | @Test
public void shouldBeSupportNonPrefixedConsumerConfigs() {
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
props.put(ConsumerConfig.METRICS_NUM_SAMPLES_CONFIG, 1);
final StreamsConfig streamsConfig = new StreamsConfig(props);
final Map<String, Object> consumerConfigs = streamsConfig.getMainConsumerConfigs(groupId, clientId, threadIdx);
assertEquals("earliest", consumerConfigs.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG));
assertEquals(1, consumerConfigs.get(ConsumerConfig.METRICS_NUM_SAMPLES_CONFIG));
} |
@Override
public ProtobufSystemInfo.Section toProtobuf() {
ProtobufSystemInfo.Section.Builder protobuf = ProtobufSystemInfo.Section.newBuilder();
protobuf.setName("System");
setAttribute(protobuf, "Server ID", server.getId());
setAttribute(protobuf, "Version", getVersion());
setAttribute(protobuf, "Edition", sonarRuntime.getEdition().getLabel());
setAttribute(protobuf, NCLOC.getName(), statisticsSupport.getLinesOfCode());
setAttribute(protobuf, "Container", containerSupport.isRunningInContainer());
setAttribute(protobuf, "External Users and Groups Provisioning", commonSystemInformation.getManagedInstanceProviderName());
setAttribute(protobuf, "External User Authentication", commonSystemInformation.getExternalUserAuthentication());
addIfNotEmpty(protobuf, "Accepted external identity providers",
commonSystemInformation.getEnabledIdentityProviders());
addIfNotEmpty(protobuf, "External identity providers whose users are allowed to sign themselves up",
commonSystemInformation.getAllowsToSignUpEnabledIdentityProviders());
setAttribute(protobuf, "High Availability", false);
setAttribute(protobuf, "Official Distribution", officialDistribution.check());
setAttribute(protobuf, "Force authentication", commonSystemInformation.getForceAuthentication());
setAttribute(protobuf, "Home Dir", config.get(PATH_HOME.getKey()).orElse(null));
setAttribute(protobuf, "Data Dir", config.get(PATH_DATA.getKey()).orElse(null));
setAttribute(protobuf, "Temp Dir", config.get(PATH_TEMP.getKey()).orElse(null));
setAttribute(protobuf, "Processors", Runtime.getRuntime().availableProcessors());
return protobuf.build();
} | @Test
public void toProtobuf_whenExternalUserAuthentication_shouldWriteIt() {
when(commonSystemInformation.getExternalUserAuthentication()).thenReturn("LDAP");
ProtobufSystemInfo.Section protobuf = underTest.toProtobuf();
assertThatAttributeIs(protobuf, "External User Authentication", "LDAP");
} |
public static <T extends PipelineOptions> T as(Class<T> klass) {
return new Builder().as(klass);
} | @Test
public void testMultipleMissingGettersThrows() {
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage(
"missing property methods on [org.apache.beam.sdk.options."
+ "PipelineOptionsFactoryTest$MissingMultipleGetters]");
expectedException.expectMessage("getter for property [object] of type [java.lang.Object]");
expectedException.expectMessage("getter for property [otherObject] of type [java.lang.Object]");
PipelineOptionsFactory.as(MissingMultipleGetters.class);
} |
@Override
public void afterMethod(final TargetAdviceObject target, final TargetAdviceMethod method, final Object[] args, final Object result, final String pluginType) {
MetricsCollectorRegistry.<CounterMetricsCollector>get(config, pluginType).inc();
} | @Test
void assertCountExecuteErrors() {
TargetAdviceObjectFixture targetObject = new TargetAdviceObjectFixture();
new ExecuteErrorsCountAdvice().afterMethod(targetObject, mock(TargetAdviceMethod.class), new Object[]{}, null, "FIXTURE");
assertThat(MetricsCollectorRegistry.get(config, "FIXTURE").toString(), is("1"));
} |
@Override
public boolean registerThreadPoolPluginSupport(@NonNull ThreadPoolPluginSupport support) {
if (!managedThreadPoolPluginSupports.containsKey(support.getThreadPoolId())) {
enableThreadPoolPluginRegistrars.values().forEach(registrar -> registrar.doRegister(support));
enableThreadPoolPlugins.values().forEach(support::tryRegister);
managedThreadPoolPluginSupports.put(support.getThreadPoolId(), support);
return true;
}
return false;
} | @Test
public void testRegisterThreadPoolPluginSupport() {
GlobalThreadPoolPluginManager manager = new DefaultGlobalThreadPoolPluginManager();
Assert.assertTrue(manager.enableThreadPoolPlugin(new TestPlugin("1")));
TestSupport support = new TestSupport("1");
Assert.assertTrue(manager.registerThreadPoolPluginSupport(support));
Assert.assertFalse(manager.registerThreadPoolPluginSupport(support));
Assert.assertEquals(1, support.getAllPlugins().size());
// incremental update
manager.enableThreadPoolPlugin(new TestPlugin("2"));
manager.enableThreadPoolPluginRegistrar(new TestRegistrar("1"));
Assert.assertEquals(3, support.getAllPlugins().size());
} |
public void shutdown(long awaitTerminateMillis) {
this.scheduledExecutorService.shutdown();
ThreadUtils.shutdownGracefully(this.consumeExecutor, awaitTerminateMillis, TimeUnit.MILLISECONDS);
} | @Test
public void testShutdown() throws IllegalAccessException {
popService.shutdown(3000L);
Field scheduledExecutorServiceField = FieldUtils.getDeclaredField(popService.getClass(), "scheduledExecutorService", true);
Field consumeExecutorField = FieldUtils.getDeclaredField(popService.getClass(), "consumeExecutor", true);
ScheduledExecutorService scheduledExecutorService = (ScheduledExecutorService) scheduledExecutorServiceField.get(popService);
ThreadPoolExecutor consumeExecutor = (ThreadPoolExecutor) consumeExecutorField.get(popService);
assertTrue(scheduledExecutorService.isShutdown());
assertTrue(scheduledExecutorService.isTerminated());
assertTrue(consumeExecutor.isShutdown());
assertTrue(consumeExecutor.isTerminated());
} |
@Override
public Collection<GroupDto> getGroups() {
return emptySet();
} | @Test
public void getGroups() {
assertThat(githubWebhookUserSession.getGroups()).isEmpty();
} |
@Override
public QualityGate findEffectiveQualityGate(Project project) {
return findQualityGate(project).orElseGet(this::findDefaultQualityGate);
} | @Test
public void findQualityGate_by_project_found() {
QualityGateDto qualityGateDto = new QualityGateDto();
qualityGateDto.setUuid(QUALITY_GATE_DTO.getUuid());
qualityGateDto.setName(QUALITY_GATE_DTO.getName());
when(qualityGateDao.selectByProjectUuid(any(), any())).thenReturn(qualityGateDto);
when(qualityGateConditionDao.selectForQualityGate(any(), eq(SOME_UUID))).thenReturn(ImmutableList.of(CONDITION_1, CONDITION_2));
when(metricRepository.getOptionalByUuid(METRIC_UUID_1)).thenReturn(Optional.empty());
when(metricRepository.getOptionalByUuid(METRIC_UUID_2)).thenReturn(Optional.of(METRIC_2));
QualityGate result = underTest.findEffectiveQualityGate(mock(Project.class));
assertThat(result.getUuid()).isEqualTo(QUALITY_GATE_DTO.getUuid());
assertThat(result.getName()).isEqualTo(QUALITY_GATE_DTO.getName());
} |
@Override
public void process() {
JMeterContext context = getThreadContext();
Sampler sam = context.getCurrentSampler();
SampleResult res = context.getPreviousResult();
HTTPSamplerBase sampler;
HTTPSampleResult result;
if (!(sam instanceof HTTPSamplerBase) || !(res instanceof HTTPSampleResult)) {
log.info("Can't apply HTML Link Parser when the previous" + " sampler run is not an HTTP Request.");
return;
} else {
sampler = (HTTPSamplerBase) sam;
result = (HTTPSampleResult) res;
}
List<HTTPSamplerBase> potentialLinks = new ArrayList<>();
String responseText = result.getResponseDataAsString();
int index = responseText.indexOf('<'); // $NON-NLS-1$
if (index == -1) {
index = 0;
}
if (log.isDebugEnabled()) {
log.debug("Check for matches against: "+sampler.toString());
}
Document html = (Document) HtmlParsingUtils.getDOM(responseText.substring(index));
addAnchorUrls(html, result, sampler, potentialLinks);
addFormUrls(html, result, sampler, potentialLinks);
addFramesetUrls(html, result, sampler, potentialLinks);
if (!potentialLinks.isEmpty()) {
HTTPSamplerBase url = potentialLinks.get(ThreadLocalRandom.current().nextInt(potentialLinks.size()));
if (log.isDebugEnabled()) {
log.debug("Selected: "+url.toString());
}
sampler.setDomain(url.getDomain());
sampler.setPath(url.getPath());
if (url.getMethod().equals(HTTPConstants.POST)) {
for (JMeterProperty jMeterProperty : sampler.getArguments()) {
Argument arg = (Argument) jMeterProperty.getObjectValue();
modifyArgument(arg, url.getArguments());
}
} else {
sampler.setArguments(url.getArguments());
}
sampler.setProtocol(url.getProtocol());
} else {
log.debug("No matches found");
}
} | @Test
public void testSimpleParse5() throws Exception {
HTTPSamplerBase config = makeUrlConfig("/subdir/index\\.h.*");
HTTPSamplerBase context = makeContext("http://www.apache.org/subdir/one/previous.html");
String responseText = "<html><head><title>Test page</title></head><body>"
+ "<a href=\"../index.html\">Goto index page</a></body></html>";
HTTPSampleResult result = new HTTPSampleResult();
result.setResponseData(responseText, null);
result.setSampleLabel(context.toString());
result.setURL(context.getUrl());
jmctx.setCurrentSampler(context);
jmctx.setCurrentSampler(config);
jmctx.setPreviousResult(result);
parser.process();
String newUrl = config.getUrl().toString();
assertEquals("http://www.apache.org/subdir/index.html", newUrl);
} |
@Override
public Object toKsqlRow(final Schema connectSchema, final Object connectData) {
if (connectData == null) {
return null;
}
return toKsqlValue(schema, connectSchema, connectData, "");
} | @Test
public void shouldTranslateMissingStructFieldToNull() {
// Given:
final Schema structSchema = SchemaBuilder
.struct()
.field("INT", SchemaBuilder.OPTIONAL_INT32_SCHEMA)
.optional()
.build();
final Schema rowSchema = SchemaBuilder
.struct()
.field("STRUCT", structSchema)
.optional()
.build();
final Schema dataRowSchema = SchemaBuilder
.struct()
.field("OTHER", SchemaBuilder.OPTIONAL_INT32_SCHEMA)
.optional()
.build();
final Struct connectStruct = new Struct(dataRowSchema);
connectStruct.put("OTHER", 123);
final ConnectDataTranslator connectToKsqlTranslator = new ConnectDataTranslator(rowSchema);
// When:
final Struct row = (Struct) connectToKsqlTranslator.toKsqlRow(dataRowSchema, connectStruct);
// Then:
assertThat(row.schema(), is(rowSchema));
assertThat(row.get("STRUCT"), is(nullValue()));
} |
public static ExtensibleLoadManagerImpl get(LoadManager loadManager) {
if (!(loadManager instanceof ExtensibleLoadManagerWrapper loadManagerWrapper)) {
throw new IllegalArgumentException("The load manager should be 'ExtensibleLoadManagerWrapper'.");
}
return loadManagerWrapper.get();
} | @Test(timeOut = 30 * 1000)
public void testRoleChange() throws Exception {
makePrimaryAsLeader();
var leader = primaryLoadManager;
var follower = secondaryLoadManager;
BrokerLoadData brokerLoadExpected = new BrokerLoadData();
SystemResourceUsage usage = new SystemResourceUsage();
var cpu = new ResourceUsage(1.0, 100.0);
String key = "b1";
usage.setCpu(cpu);
brokerLoadExpected.update(usage, 0, 0, 0, 0, 0, 0, conf);
String bundle = "public/default/0x00000000_0xffffffff";
TopBundlesLoadData topBundlesExpected = new TopBundlesLoadData();
topBundlesExpected.getTopBundlesLoadData().clear();
topBundlesExpected.getTopBundlesLoadData().add(new TopBundlesLoadData.BundleLoadData(bundle, new NamespaceBundleStats()));
follower.getBrokerLoadDataStore().pushAsync(key, brokerLoadExpected);
follower.getTopBundlesLoadDataStore().pushAsync(bundle, topBundlesExpected);
Awaitility.await().atMost(30, TimeUnit.SECONDS).untilAsserted(() -> {
assertNotNull(FieldUtils.readDeclaredField(leader.getTopBundlesLoadDataStore(), "tableView", true));
assertNull(FieldUtils.readDeclaredField(follower.getTopBundlesLoadDataStore(), "tableView", true));
for (String internalTopic : ExtensibleLoadManagerImpl.INTERNAL_TOPICS) {
assertTrue(leader.pulsar.getBrokerService().getTopicReference(internalTopic)
.isPresent());
assertTrue(follower.pulsar.getBrokerService().getTopicReference(internalTopic)
.isEmpty());
assertTrue(leader.pulsar.getNamespaceService()
.isServiceUnitOwnedAsync(TopicName.get(internalTopic)).get());
assertFalse(follower.pulsar.getNamespaceService()
.isServiceUnitOwnedAsync(TopicName.get(internalTopic)).get());
}
var actualBrokerLoadLeader = leader.getBrokerLoadDataStore().get(key);
if (actualBrokerLoadLeader.isPresent()) {
assertEquals(actualBrokerLoadLeader.get(), brokerLoadExpected);
}
var actualTopBundlesLeader = leader.getTopBundlesLoadDataStore().get(bundle);
if (actualTopBundlesLeader.isPresent()) {
assertEquals(actualTopBundlesLeader.get(), topBundlesExpected);
}
var actualBrokerLoadFollower = follower.getBrokerLoadDataStore().get(key);
if (actualBrokerLoadFollower.isPresent()) {
assertEquals(actualBrokerLoadFollower.get(), brokerLoadExpected);
}
});
makeSecondaryAsLeader();
var leader2 = secondaryLoadManager;
var follower2 = primaryLoadManager;
brokerLoadExpected.update(usage, 1, 0, 0, 0, 0, 0, conf);
topBundlesExpected.getTopBundlesLoadData().get(0).stats().msgRateIn = 1;
follower.getBrokerLoadDataStore().pushAsync(key, brokerLoadExpected);
follower.getTopBundlesLoadDataStore().pushAsync(bundle, topBundlesExpected);
Awaitility.await().atMost(30, TimeUnit.SECONDS).ignoreExceptions().untilAsserted(() -> {
assertNotNull(FieldUtils.readDeclaredField(leader2.getTopBundlesLoadDataStore(), "tableView", true));
assertNull(FieldUtils.readDeclaredField(follower2.getTopBundlesLoadDataStore(), "tableView", true));
for (String internalTopic : ExtensibleLoadManagerImpl.INTERNAL_TOPICS) {
assertTrue(leader2.pulsar.getBrokerService().getTopicReference(internalTopic)
.isPresent());
assertTrue(follower2.pulsar.getBrokerService().getTopicReference(internalTopic)
.isEmpty());
assertTrue(leader2.pulsar.getNamespaceService()
.isServiceUnitOwnedAsync(TopicName.get(internalTopic)).get());
assertFalse(follower2.pulsar.getNamespaceService()
.isServiceUnitOwnedAsync(TopicName.get(internalTopic)).get());
}
var actualBrokerLoadLeader = leader2.getBrokerLoadDataStore().get(key);
assertEquals(actualBrokerLoadLeader.get(), brokerLoadExpected);
var actualTopBundlesLeader = leader2.getTopBundlesLoadDataStore().get(bundle);
assertEquals(actualTopBundlesLeader.get(), topBundlesExpected);
var actualBrokerLoadFollower = follower2.getBrokerLoadDataStore().get(key);
assertEquals(actualBrokerLoadFollower.get(), brokerLoadExpected);
});
} |
@Override
public JType apply(String nodeName, JsonNode node, JsonNode parent, JClassContainer jClassContainer, Schema schema) {
String propertyTypeName = getTypeName(node);
JType type;
if (propertyTypeName.equals("object") || node.has("properties") && node.path("properties").size() > 0) {
type = ruleFactory.getObjectRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema);
} else if (node.has("existingJavaType")) {
String typeName = node.path("existingJavaType").asText();
if (isPrimitive(typeName, jClassContainer.owner())) {
type = primitiveType(typeName, jClassContainer.owner());
} else {
type = resolveType(jClassContainer, typeName);
}
} else if (propertyTypeName.equals("string")) {
type = jClassContainer.owner().ref(String.class);
} else if (propertyTypeName.equals("number")) {
type = getNumberType(jClassContainer.owner(), ruleFactory.getGenerationConfig());
} else if (propertyTypeName.equals("integer")) {
type = getIntegerType(jClassContainer.owner(), node, ruleFactory.getGenerationConfig());
} else if (propertyTypeName.equals("boolean")) {
type = unboxIfNecessary(jClassContainer.owner().ref(Boolean.class), ruleFactory.getGenerationConfig());
} else if (propertyTypeName.equals("array")) {
type = ruleFactory.getArrayRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema);
} else {
type = jClassContainer.owner().ref(Object.class);
}
if (!node.has("javaType") && !node.has("existingJavaType") && node.has("format")) {
type = ruleFactory.getFormatRule().apply(nodeName, node.get("format"), node, type, schema);
} else if (!node.has("javaType") && !node.has("existingJavaType") && propertyTypeName.equals("string") && node.has("media")) {
type = ruleFactory.getMediaRule().apply(nodeName, node.get("media"), node, type, schema);
}
return type;
} | @Test
public void applyGeneratesBigDecimal() {
JPackage jpackage = new JCodeModel()._package(getClass().getPackage().getName());
ObjectNode objectNode = new ObjectMapper().createObjectNode();
objectNode.put("type", "number");
when(config.isUseBigDecimals()).thenReturn(true);
JType result = rule.apply("fooBar", objectNode, null, jpackage, null);
assertThat(result.fullName(), is(BigDecimal.class.getName()));
} |
public static UriTemplate create(String template, Charset charset) {
return new UriTemplate(template, true, charset);
} | @Test
void encodeLiterals() {
String template = "https://www.example.com/A Team";
UriTemplate uriTemplate = UriTemplate.create(template, Util.UTF_8);
String expandedTemplate = uriTemplate.expand(Collections.emptyMap());
assertThat(expandedTemplate).isEqualToIgnoringCase("https://www.example.com/A%20Team");
} |
public static KTableHolder<GenericKey> build(
final KGroupedTableHolder groupedTable,
final TableAggregate aggregate,
final RuntimeBuildContext buildContext,
final MaterializedFactory materializedFactory) {
return build(
groupedTable,
aggregate,
buildContext,
materializedFactory,
new AggregateParamsFactory()
);
} | @Test
public void shouldBuildAggregatorParamsCorrectlyForAggregate() {
// When:
aggregate.build(planBuilder, planInfo);
// Then:
verify(aggregateParamsFactory).createUndoable(
INPUT_SCHEMA,
NON_AGG_COLUMNS,
functionRegistry,
FUNCTIONS,
KsqlConfig.empty()
);
} |
@Override
public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return true;
}
try {
final Path found = this.search(file, listener);
return found != null;
}
catch(NotfoundException e) {
if(log.isDebugEnabled()) {
log.debug(String.format("Parent directory for file %s not found", file));
}
return false;
}
} | @Test
public void testFindPlaceholder() throws Exception {
assertTrue(new DefaultFindFeature(new NullSession(new Host(new TestProtocol())) {
@Override
public AttributedList<Path> list(final Path file, final ListProgressListener listener) {
return new AttributedList<>(Collections.singletonList(new Path("/a/b", EnumSet.of(Path.Type.directory, Path.Type.placeholder))));
}
}).find(new Path("/a/b", EnumSet.of(Path.Type.directory))));
} |
public static void main(String[] args) {
// Loading the Logback configuration
loadLoggerConfiguration();
// Getting the bar series
BarSeries series = CsvTradesLoader.loadBitstampSeries();
// Building the trading strategy
Strategy strategy = CCICorrectionStrategy.buildStrategy(series);
// Running the strategy
BarSeriesManager seriesManager = new BarSeriesManager(series);
seriesManager.run(strategy);
// Unload the Logback configuration
unloadLoggerConfiguration();
} | @Test
public void test() {
StrategyExecutionLogging.main(null);
} |
protected static int getMaxRowSize( List<List<?>> results ) {
return results.stream().mapToInt( List::size ).max().getAsInt();
} | @Test
public void testFastJsonReaderGetMaxRowSize() throws KettleException {
List<List<Integer>> mainList = new ArrayList<>();
List<Integer> l1 = new ArrayList<>();
List<Integer> l2 = new ArrayList<>();
List<Integer> l3 = new ArrayList<>();
l1.add( 1 );
l2.add( 1 );
l2.add( 2 );
l3.add( 1 );
l3.add( 2 );
l3.add( 3 );
mainList.add( l1 );
mainList.add( l2 );
mainList.add( l3 );
assertEquals( 3, FastJsonReader.getMaxRowSize( Collections.singletonList( mainList ) ) );
} |
public String getSubscriptionsForChannel(String subscribeChannel) {
return filterService.getFiltersForChannel(subscribeChannel).stream()
.map(PrioritizedFilter::toString)
.collect(Collectors.joining(",\n", "[", "]"));
} | @Test
void testGetSubscriptionsForChannel() {
String channel = "test";
service.getSubscriptionsForChannel(channel);
Mockito.verify(filterService, Mockito.times(1)).getFiltersForChannel(channel);
} |
public static void destroyAllShellProcesses() {
synchronized (CHILD_SHELLS) {
for (Shell shell : CHILD_SHELLS.keySet()) {
if (shell.getProcess() != null) {
shell.getProcess().destroy();
}
}
CHILD_SHELLS.clear();
}
} | @Test(timeout=120000)
public void testDestroyAllShellProcesses() throws Throwable {
Assume.assumeFalse(WINDOWS);
StringBuilder sleepCommand = new StringBuilder();
sleepCommand.append("sleep 200");
String[] shellCmd = {"bash", "-c", sleepCommand.toString()};
final ShellCommandExecutor shexc1 = new ShellCommandExecutor(shellCmd);
final ShellCommandExecutor shexc2 = new ShellCommandExecutor(shellCmd);
Thread shellThread1 = new Thread() {
@Override
public void run() {
try {
shexc1.execute();
} catch(IOException ioe) {
//ignore IOException from thread interrupt
}
}
};
Thread shellThread2 = new Thread() {
@Override
public void run() {
try {
shexc2.execute();
} catch(IOException ioe) {
//ignore IOException from thread interrupt
}
}
};
shellThread1.start();
shellThread2.start();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return shexc1.getProcess() != null;
}
}, 10, 10000);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return shexc2.getProcess() != null;
}
}, 10, 10000);
Shell.destroyAllShellProcesses();
shexc1.getProcess().waitFor();
shexc2.getProcess().waitFor();
} |
public static void setSyncInterval(JobConf job, int syncIntervalInBytes) {
job.setInt(SYNC_INTERVAL_KEY, syncIntervalInBytes);
} | @Test
void setSyncInterval() {
JobConf jobConf = new JobConf();
int newSyncInterval = 100000;
AvroOutputFormat.setSyncInterval(jobConf, newSyncInterval);
assertEquals(newSyncInterval, jobConf.getInt(AvroOutputFormat.SYNC_INTERVAL_KEY, -1));
} |
public static Http2MultiplexCodecBuilder forServer(ChannelHandler childHandler) {
return new Http2MultiplexCodecBuilder(true, childHandler);
} | @Test
public void testUnsharableHandler() {
assertThrows(IllegalArgumentException.class, new Executable() {
@Override
public void execute() throws Throwable {
Http2MultiplexCodecBuilder.forServer(new UnsharableChannelHandler());
}
});
} |
@ConstantFunction(name = "subtract", argTypes = {SMALLINT, SMALLINT}, returnType = SMALLINT, isMonotonic = true)
public static ConstantOperator subtractSmallInt(ConstantOperator first, ConstantOperator second) {
return ConstantOperator.createSmallInt((short) Math.subtractExact(first.getSmallint(), second.getSmallint()));
} | @Test
public void subtractSmallInt() {
assertEquals(0,
ScalarOperatorFunctions.subtractSmallInt(O_SI_10, O_SI_10).getSmallint());
} |
@Override
public void accept(Props props) {
File homeDir = props.nonNullValueAsFile(PATH_HOME.getKey());
Provider provider = resolveProviderAndEnforceNonnullJdbcUrl(props);
String driverPath = driverPath(homeDir, provider);
props.set(JDBC_DRIVER_PATH.getKey(), driverPath);
} | @Test
public void checkAndComplete_sets_driver_path_for_mssql() throws Exception {
File driverFile = new File(homeDir, "lib/jdbc/mssql/sqljdbc4.jar");
FileUtils.touch(driverFile);
Props props = newProps(JDBC_URL.getKey(), "jdbc:sqlserver://localhost/sonar;SelectMethod=Cursor");
underTest.accept(props);
assertThat(props.nonNullValueAsFile(JDBC_DRIVER_PATH.getKey())).isEqualTo(driverFile);
} |
@Override
public TableDataConsistencyCheckResult swapToObject(final YamlTableDataConsistencyCheckResult yamlConfig) {
if (null == yamlConfig) {
return null;
}
if (!Strings.isNullOrEmpty(yamlConfig.getIgnoredType())) {
return new TableDataConsistencyCheckResult(TableDataConsistencyCheckIgnoredType.valueOf(yamlConfig.getIgnoredType()));
}
return new TableDataConsistencyCheckResult(yamlConfig.isMatched());
} | @Test
void assertSwapToObjectWithNullYamlTableDataConsistencyCheckResult() {
assertNull(yamlTableDataConsistencyCheckResultSwapper.swapToObject((YamlTableDataConsistencyCheckResult) null));
} |
@Override
public String getTaskType() {
return "null";
} | @Test
public void shouldKnowItsType() {
assertThat(new NullTask().getTaskType(), is("null"));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.