focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public AwsProxyResponse handle(Throwable ex) {
log.error("Called exception handler for:", ex);
// adding a print stack trace in case we have no appender or we are running inside SAM local, where need the
// output to go to the stderr.
ex.printStackTrace();
if (ex instanceof InvalidRequestEventException || ex instanceof InternalServerErrorException) {
return new AwsProxyResponse(500, HEADERS, getErrorJson(INTERNAL_SERVER_ERROR));
} else {
return new AwsProxyResponse(502, HEADERS, getErrorJson(GATEWAY_TIMEOUT_ERROR));
}
}
|
@Test
void streamHandle_InvalidRequestEventException_responseString()
throws IOException {
ByteArrayOutputStream respStream = new ByteArrayOutputStream();
exceptionHandler.handle(new InvalidRequestEventException(INVALID_REQUEST_MESSAGE, null), respStream);
assertNotNull(respStream);
assertTrue(respStream.size() > 0);
AwsProxyResponse resp = objectMapper.readValue(new ByteArrayInputStream(respStream.toByteArray()), AwsProxyResponse.class);
assertNotNull(resp);
String body = objectMapper.writeValueAsString(new ErrorModel(AwsProxyExceptionHandler.INTERNAL_SERVER_ERROR));
assertEquals(body, resp.getBody());
}
|
public static ObjectNode convertFromGHResponse(GHResponse ghResponse, TranslationMap translationMap, Locale locale,
DistanceConfig distanceConfig) {
ObjectNode json = JsonNodeFactory.instance.objectNode();
if (ghResponse.hasErrors())
throw new IllegalStateException(
"If the response has errors, you should use the method NavigateResponseConverter#convertFromGHResponseError");
PointList waypoints = ghResponse.getBest().getWaypoints();
final ArrayNode routesJson = json.putArray("routes");
List<ResponsePath> paths = ghResponse.getAll();
for (int i = 0; i < paths.size(); i++) {
ResponsePath path = paths.get(i);
ObjectNode pathJson = routesJson.addObject();
putRouteInformation(pathJson, path, i, translationMap, locale, distanceConfig);
}
final ArrayNode waypointsJson = json.putArray("waypoints");
for (int i = 0; i < waypoints.size(); i++) {
ObjectNode waypointJson = waypointsJson.addObject();
// TODO get names
waypointJson.put("name", "");
putLocation(waypoints.getLat(i), waypoints.getLon(i), waypointJson);
}
json.put("code", "Ok");
// TODO: Maybe we need a different format... uuid: "cji4ja4f8004o6xrsta8w4p4h"
json.put("uuid", UUID.randomUUID().toString().replaceAll("-", ""));
return json;
}
|
@Test
@Disabled
public void alternativeRoutesTest() {
GHResponse rsp = hopper.route(new GHRequest(42.554851, 1.536198, 42.510071, 1.548128).setProfile(profile)
.setAlgorithm(Parameters.Algorithms.ALT_ROUTE));
assertEquals(2, rsp.getAll().size());
ObjectNode json = NavigateResponseConverter.convertFromGHResponse(rsp, trMap, Locale.ENGLISH, distanceConfig);
JsonNode routes = json.get("routes");
assertEquals(2, routes.size());
assertEquals("GraphHopper Route 0", routes.get(0).get("legs").get(0).get("summary").asText());
assertEquals("Avinguda Sant Antoni, CG-3", routes.get(1).get("legs").get(0).get("summary").asText());
}
|
public static void validateValue(Schema schema, Object value) {
validateValue(null, schema, value);
}
|
@Test
public void testValidateValueMismatchArray() {
assertThrows(DataException.class,
() -> ConnectSchema.validateValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), Arrays.asList("a", "b", "c")));
}
|
@Override
public Path touch(final Path file, final TransferStatus status) throws BackgroundException {
return super.touch(file, status.withChecksum(write.checksum(file, status).compute(new NullInputStream(0L), status)));
}
|
@Test
public void testTouchVersioning() throws Exception {
final Path container = new Path("versioning-test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path file = new Path(container, new AsciiRandomStringService().random(), EnumSet.of(Path.Type.file));
final String version1 = new S3TouchFeature(session, new S3AccessControlListFeature(session)).touch(file, new TransferStatus()).attributes().getVersionId();
assertNotNull(version1);
assertEquals(version1, new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(file).getVersionId());
final String version2 = new S3TouchFeature(session, new S3AccessControlListFeature(session)).touch(file, new TransferStatus()).attributes().getVersionId();
assertNotNull(version2);
assertEquals(version2, new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(file).getVersionId());
assertTrue(new S3FindFeature(session, new S3AccessControlListFeature(session)).find(file));
assertTrue(new DefaultFindFeature(session).find(file));
assertTrue(new DefaultFindFeature(session).find(new Path(file.getParent(), file.getName(), file.getType(),
new PathAttributes(file.attributes()).withVersionId(version1))));
assertTrue(new DefaultFindFeature(session).find(new Path(file.getParent(), file.getName(), file.getType(),
new PathAttributes(file.attributes()).withVersionId(version2))));
assertTrue(new S3FindFeature(session, new S3AccessControlListFeature(session)).find(new Path(file.getParent(), file.getName(), file.getType(),
new PathAttributes(file.attributes()).withVersionId(version1))));
assertTrue(new S3FindFeature(session, new S3AccessControlListFeature(session)).find(new Path(file.getParent(), file.getName(), file.getType(),
new PathAttributes(file.attributes()).withVersionId(version2))));
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(new Path(file).withAttributes(PathAttributes.EMPTY)), new DisabledLoginCallback(), new Delete.DisabledCallback());
// Versioned files are not deleted but with delete marker added
assertTrue(new DefaultFindFeature(session).find(new Path(file.getParent(), file.getName(), file.getType(),
new PathAttributes(file.attributes()).withVersionId(version1))));
assertTrue(new DefaultFindFeature(session).find(new Path(file.getParent(), file.getName(), file.getType(),
new PathAttributes(file.attributes()).withVersionId(version2))));
assertTrue(new S3FindFeature(session, new S3AccessControlListFeature(session)).find(new Path(file.getParent(), file.getName(), file.getType(),
new PathAttributes(file.attributes()).withVersionId(version1))));
assertTrue(new S3FindFeature(session, new S3AccessControlListFeature(session)).find(new Path(file.getParent(), file.getName(), file.getType(),
new PathAttributes(file.attributes()).withVersionId(version2))));
}
|
@PostMapping
@Secured(resource = AuthConstants.CONSOLE_RESOURCE_NAME_PREFIX + "namespaces", action = ActionTypes.WRITE)
public Boolean createNamespace(@RequestParam("customNamespaceId") String namespaceId,
@RequestParam("namespaceName") String namespaceName,
@RequestParam(value = "namespaceDesc", required = false) String namespaceDesc) {
if (StringUtils.isBlank(namespaceId)) {
namespaceId = UUID.randomUUID().toString();
} else {
namespaceId = namespaceId.trim();
if (!namespaceIdCheckPattern.matcher(namespaceId).matches()) {
return false;
}
if (namespaceId.length() > NAMESPACE_ID_MAX_LENGTH) {
return false;
}
// check unique
if (namespacePersistService.tenantInfoCountByTenantId(namespaceId) > 0) {
return false;
}
}
// contains illegal chars
if (!namespaceNameCheckPattern.matcher(namespaceName).matches()) {
return false;
}
try {
return namespaceOperationService.createNamespace(namespaceId, namespaceName, namespaceDesc);
} catch (NacosException e) {
return false;
}
}
|
@Test
void testCreateNamespaceWithAutoId() throws Exception {
assertFalse(namespaceController.createNamespace("", "testName", "testDesc"));
verify(namespaceOperationService).createNamespace(
matches("[A-Za-z\\d]{8}-[A-Za-z\\d]{4}-[A-Za-z\\d]{4}-[A-Za-z\\d]{4}-[A-Za-z\\d]{12}"), eq("testName"), eq("testDesc"));
}
|
public static ResourceModel processResource(final Class<?> resourceClass)
{
return processResource(resourceClass, null);
}
|
@Test(expectedExceptions = ResourceConfigException.class)
public void failsOnNonPublicCreateMethod()
{
@RestLiCollection(name = "nonPublicCreateMethod")
class LocalClass extends CollectionResourceTemplate<Long, EmptyRecord>
{
@RestMethod.Create
CreateResponse protectedCreate(EmptyRecord entity)
{
return new CreateResponse(HttpStatus.S_200_OK);
}
}
RestLiAnnotationReader.processResource(LocalClass.class);
Assert.fail("#addCrudResourceMethod should fail throwing a ResourceConfigException");
}
|
@Deprecated
public static DnsServerAddresses defaultAddresses() {
return DefaultDnsServerAddressStreamProvider.defaultAddresses();
}
|
@Test
public void testDefaultAddresses() {
assertThat(defaultAddressList().size(), is(greaterThan(0)));
}
|
public static java.util.regex.Pattern compilePattern(String expression) {
return compilePattern(expression, 0);
}
|
@Test
void testCompilePatternInvalid() {
assertThrows(PatternSyntaxException.class, () -> JMeterUtils.compilePattern("[missing closing bracket"));
}
|
@Override
public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) {
LOG.debug("Merging statistics: [aggregateColStats:{}, newColStats: {}]", aggregateColStats, newColStats);
DecimalColumnStatsDataInspector aggregateData = decimalInspectorFromStats(aggregateColStats);
DecimalColumnStatsDataInspector newData = decimalInspectorFromStats(newColStats);
Decimal lowValue = mergeLowValue(getLowValue(aggregateData), getLowValue(newData));
if (lowValue != null) {
aggregateData.setLowValue(lowValue);
}
Decimal highValue = mergeHighValue(getHighValue(aggregateData), getHighValue(newData));
if (highValue != null) {
aggregateData.setHighValue(highValue);
}
aggregateData.setNumNulls(mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls()));
NumDistinctValueEstimator oldNDVEst = aggregateData.getNdvEstimator();
NumDistinctValueEstimator newNDVEst = newData.getNdvEstimator();
List<NumDistinctValueEstimator> ndvEstimatorsList = Arrays.asList(oldNDVEst, newNDVEst);
aggregateData.setNumDVs(mergeNumDistinctValueEstimator(aggregateColStats.getColName(),
ndvEstimatorsList, aggregateData.getNumDVs(), newData.getNumDVs()));
aggregateData.setNdvEstimator(ndvEstimatorsList.get(0));
KllHistogramEstimator oldKllEst = aggregateData.getHistogramEstimator();
KllHistogramEstimator newKllEst = newData.getHistogramEstimator();
aggregateData.setHistogramEstimator(mergeHistogramEstimator(aggregateColStats.getColName(), oldKllEst, newKllEst));
aggregateColStats.getStatsData().setDecimalStats(aggregateData);
}
|
@Test
public void testMergeNonNullValues() {
ColumnStatisticsObj aggrObj = createColumnStatisticsObj(new ColStatsBuilder<>(Decimal.class)
.low(DECIMAL_1)
.high(DECIMAL_1)
.numNulls(2)
.numDVs(1)
.hll(2)
.kll(2)
.build());
ColumnStatisticsObj newObj = createColumnStatisticsObj(new ColStatsBuilder<>(Decimal.class)
.low(DECIMAL_3)
.high(DECIMAL_3)
.numNulls(3)
.numDVs(1)
.hll(3)
.kll(3)
.build());
merger.merge(aggrObj, newObj);
newObj = createColumnStatisticsObj(new ColStatsBuilder<>(Decimal.class)
.low(DECIMAL_1)
.high(DECIMAL_1)
.numNulls(1)
.numDVs(1)
.hll(1, 1)
.kll(1, 1)
.build());
merger.merge(aggrObj, newObj);
ColumnStatisticsData expectedColumnStatisticsData = new ColStatsBuilder<>(Decimal.class)
.low(DECIMAL_1)
.high(DECIMAL_3)
.numNulls(6)
.numDVs(3)
.hll(2, 3, 1, 1)
.kll(2, 3, 1, 1)
.build();
assertEquals(expectedColumnStatisticsData, aggrObj.getStatsData());
}
|
public static Locale setupRuntimeLocale() {
Locale systemDefault = Locale.getDefault();
log.info("System Default Locale: [{}]", systemDefault);
// TODO: Review- do we need to store the system default anywhere?
// Useful to log the "user.*" properties for debugging...
Set<String> pn = new HashSet<>(System.getProperties().stringPropertyNames());
pn.removeIf(f -> !(f.startsWith("user.")));
for (String ukey : pn) {
log.debug(" {}: {}", ukey, System.getProperty(ukey));
}
String language = System.getProperty(USER_LANGUAGE);
String country = System.getProperty(USER_COUNTRY);
log.info("Language: [{}], Country: [{}]", language, country);
Locale runtime = new Locale(language != null ? language : EMPTY,
country != null ? country : EMPTY);
String override = System.getenv(ONOS_LOCALE);
if (override != null) {
log.warn("Override with ONOS_LOCALE: [{}]", override);
runtime = localeFromString(override);
}
log.info("Setting runtime locale to: [{}]", runtime);
Locale.setDefault(runtime);
return runtime;
}
|
@Test
public void runtimeLocale() {
title("runtimeLocale");
Locale runtime = LionUtils.setupRuntimeLocale();
print("locale is [%s]", runtime);
// NOTE:
// Yeah, I know, "a unit test without asserts is not a unit test".
//
// But it would NOT be a good idea to assert the locale results in
// this method, because that is dependent on an environment variable.
//
// This method is here to allow manual verification of the Locale
// e.g. when running tests from IntelliJ, and setting the
// ONOS_LOCALE env.var. via the "Edit Configurations..." dialog.
}
|
public Result resolve(List<PluginDescriptor> plugins) {
// create graphs
dependenciesGraph = new DirectedGraph<>();
dependentsGraph = new DirectedGraph<>();
// populate graphs
Map<String, PluginDescriptor> pluginByIds = new HashMap<>();
for (PluginDescriptor plugin : plugins) {
addPlugin(plugin);
pluginByIds.put(plugin.getPluginId(), plugin);
}
log.debug("Graph: {}", dependenciesGraph);
// get a sorted list of dependencies
List<String> sortedPlugins = dependenciesGraph.reverseTopologicalSort();
log.debug("Plugins order: {}", sortedPlugins);
// create the result object
Result result = new Result(sortedPlugins);
resolved = true;
if (sortedPlugins != null) { // no cyclic dependency
// detect not found dependencies
for (String pluginId : sortedPlugins) {
if (!pluginByIds.containsKey(pluginId)) {
result.addNotFoundDependency(pluginId);
}
}
}
// check dependencies versions
for (PluginDescriptor plugin : plugins) {
String pluginId = plugin.getPluginId();
String existingVersion = plugin.getVersion();
List<String> dependents = getDependents(pluginId);
while (!dependents.isEmpty()) {
String dependentId = dependents.remove(0);
PluginDescriptor dependent = pluginByIds.get(dependentId);
String requiredVersion = getDependencyVersionSupport(dependent, pluginId);
boolean ok = checkDependencyVersion(requiredVersion, existingVersion);
if (!ok) {
result.addWrongDependencyVersion(new WrongDependencyVersion(pluginId, dependentId, existingVersion, requiredVersion));
}
}
}
return result;
}
|
@Test
void wrongDependencyVersion() {
PluginDescriptor pd1 = new DefaultPluginDescriptor()
.setPluginId("p1")
// .setDependencies("p2@2.0.0"); // simple version
.setDependencies("p2@>=1.5.0 & <1.6.0"); // range version
PluginDescriptor pd2 = new DefaultPluginDescriptor()
.setPluginId("p2")
.setPluginVersion("1.4.0");
List<PluginDescriptor> plugins = new ArrayList<>();
plugins.add(pd1);
plugins.add(pd2);
DependencyResolver.Result result = resolver.resolve(plugins);
assertFalse(result.getWrongVersionDependencies().isEmpty());
}
|
public String getName() {
return name;
}
|
@Test
void testWithoutEndpointContextPath() throws NacosException {
Properties properties = new Properties();
String endpoint = "127.0.0.1";
properties.setProperty(PropertyKeyConst.ENDPOINT, endpoint);
String endpointPort = "9090";
properties.setProperty(PropertyKeyConst.ENDPOINT_PORT, endpointPort);
String contextPath = "/contextPath";
properties.setProperty(PropertyKeyConst.CONTEXT_PATH, contextPath);
final NacosClientProperties clientProperties = NacosClientProperties.PROTOTYPE.derive(properties);
ServerListManager serverListManager = new ServerListManager(clientProperties);
String endpointContextPath = "/endpointContextPath";
assertFalse(serverListManager.addressServerUrl.contains(endpointContextPath));
assertTrue(serverListManager.addressServerUrl.contains(contextPath));
assertFalse(serverListManager.getName().contains("endpointContextPath"));
assertTrue(serverListManager.getName().contains("contextPath"));
}
|
@Override
public void setMonochrome(boolean monochrome) {
formats = monochrome ? monochrome() : ansi();
}
|
@Test
void should_handle_background() {
Feature feature = TestFeatureParser.parse("path/test.feature", "" +
"Feature: feature name\n" +
" Background: background name\n" +
" Given first step\n" +
" Scenario: s1\n" +
" Then second step\n" +
" Scenario: s2\n" +
" Then third step\n");
ByteArrayOutputStream out = new ByteArrayOutputStream();
Runtime.builder()
.withFeatureSupplier(new StubFeatureSupplier(feature))
.withAdditionalPlugins(new PrettyFormatter(out))
.withRuntimeOptions(new RuntimeOptionsBuilder().setMonochrome().build())
.withBackendSupplier(new StubBackendSupplier(
new StubStepDefinition("first step", "path/step_definitions.java:3"),
new StubStepDefinition("second step", "path/step_definitions.java:7"),
new StubStepDefinition("third step", "path/step_definitions.java:11")))
.build()
.run();
assertThat(out, bytes(equalToCompressingWhiteSpace("" +
"\n" +
"Scenario: s1 # path/test.feature:4\n" +
" Given first step # path/step_definitions.java:3\n" +
" Then second step # path/step_definitions.java:7\n" +
"\n" +
"Scenario: s2 # path/test.feature:6\n" +
" Given first step # path/step_definitions.java:3\n" +
" Then third step # path/step_definitions.java:11\n")));
}
|
static Result coerceUserList(
final Collection<Expression> expressions,
final ExpressionTypeManager typeManager
) {
return coerceUserList(expressions, typeManager, Collections.emptyMap());
}
|
@Test
public void shouldCoerceToBigIntIfStringNumericTooWideForInt() {
// Given:
final ImmutableList<Expression> expressions = ImmutableList.of(
new IntegerLiteral(10),
new StringLiteral("1234567890000")
);
// When:
final Result result = CoercionUtil.coerceUserList(expressions, typeManager);
// Then:
assertThat(result.commonType(), is(Optional.of(SqlTypes.BIGINT)));
assertThat(result.expressions(), is(ImmutableList.of(
new LongLiteral(10),
new LongLiteral(1234567890000L)
)));
}
|
@VisibleForTesting
DeletionMeta globalLogCleanup(long size) throws Exception {
List<Path> workerDirs = new ArrayList<>(workerLogs.getAllWorkerDirs());
Set<Path> aliveWorkerDirs = workerLogs.getAliveWorkerDirs();
return directoryCleaner.deleteOldestWhileTooLarge(workerDirs, size, false, aliveWorkerDirs);
}
|
@Test
public void testGlobalLogCleanup() throws Exception {
long nowMillis = Time.currentTimeMillis();
try (TmpPath testDir = new TmpPath()) {
Files.createDirectories(testDir.getFile().toPath());
Path rootDir = createDir(testDir.getFile().toPath(), "workers-artifacts");
Path topo1Dir = createDir(rootDir, "topo1");
Path topo2Dir = createDir(rootDir, "topo2");
// note that port1Dir is active worker containing active logs
Path port1Dir = createDir(topo1Dir, "port1");
Path port2Dir = createDir(topo1Dir, "port2");
Path port3Dir = createDir(topo2Dir, "port3");
IntStream.range(0, 10)
.forEach(idx -> createFile(port1Dir, "A" + idx + ".log", nowMillis + 100L * idx, 200));
IntStream.range(0, 10)
.forEach(idx -> createFile(port2Dir, "B" + idx, nowMillis + 100L * idx, 200));
IntStream.range(0, 10)
.forEach(idx -> createFile(port3Dir, "C" + idx, nowMillis + 100L * idx, 200));
Map<String, Object> conf = Utils.readStormConfig();
StormMetricsRegistry metricRegistry = new StormMetricsRegistry();
WorkerLogs stubbedWorkerLogs = new WorkerLogs(conf, rootDir, metricRegistry) {
@Override
public SortedSet<Path> getAliveWorkerDirs() {
return new TreeSet<>(Collections.singletonList(port1Dir));
}
};
LogCleaner logCleaner = new LogCleaner(conf, stubbedWorkerLogs, new DirectoryCleaner(metricRegistry), rootDir, metricRegistry);
int deletedFiles = logCleaner.globalLogCleanup(2400).deletedFiles;
assertEquals(18, deletedFiles);
}
}
|
public static DateTimeFormatter getShortMillsFormatter() {
return SHORT_MILLS;
}
|
@Test
void assertGetShortMillsFormatter() {
assertThat(DateTimeFormatterFactory.getShortMillsFormatter().parse("1970-01-01 00:00:00.0").toString(), is("{},ISO resolved to 1970-01-01T00:00"));
}
|
public static L3ModificationInstruction modL3Src(IpAddress addr) {
checkNotNull(addr, "Src l3 IPv4 address cannot be null");
return new ModIPInstruction(L3SubType.IPV4_SRC, addr);
}
|
@Test
public void testModL3SrcMethod() {
final Instruction instruction = Instructions.modL3Src(ip41);
final L3ModificationInstruction.ModIPInstruction modIPInstruction =
checkAndConvert(instruction,
Instruction.Type.L3MODIFICATION,
L3ModificationInstruction.ModIPInstruction.class);
assertThat(modIPInstruction.ip(), is(equalTo(ip41)));
assertThat(modIPInstruction.subtype(),
is(equalTo(L3ModificationInstruction.L3SubType.IPV4_SRC)));
}
|
private void invokeToLeader(final String group, final Message request, final int timeoutMillis,
FailoverClosure closure) {
try {
final Endpoint leaderIp = Optional.ofNullable(getLeader(group))
.orElseThrow(() -> new NoLeaderException(group)).getEndpoint();
cliClientService.getRpcClient().invokeAsync(leaderIp, request, new InvokeCallback() {
@Override
public void complete(Object o, Throwable ex) {
if (Objects.nonNull(ex)) {
closure.setThrowable(ex);
closure.run(new Status(RaftError.UNKNOWN, ex.getMessage()));
return;
}
if (!((Response)o).getSuccess()) {
closure.setThrowable(new IllegalStateException(((Response) o).getErrMsg()));
closure.run(new Status(RaftError.UNKNOWN, ((Response) o).getErrMsg()));
return;
}
closure.setResponse((Response) o);
closure.run(Status.OK());
}
@Override
public Executor executor() {
return RaftExecutor.getRaftCliServiceExecutor();
}
}, timeoutMillis);
} catch (Exception e) {
closure.setThrowable(e);
closure.run(new Status(RaftError.UNKNOWN, e.toString()));
}
}
|
@Test
void testInvokeToLeader()
throws NoSuchMethodException, InvocationTargetException, IllegalAccessException, RemotingException, InterruptedException {
when(cliClientServiceMock.getRpcClient()).thenReturn(rpcClient);
setLeaderAs(peerId1);
int timeout = 3000;
Method invokeToLeaderMethod = JRaftServer.class.getDeclaredMethod("invokeToLeader", String.class, Message.class, int.class,
FailoverClosure.class);
invokeToLeaderMethod.setAccessible(true);
invokeToLeaderMethod.invoke(server, groupId, this.readRequest, timeout, null);
verify(cliClientServiceMock).getRpcClient();
verify(rpcClient).invokeAsync(eq(peerId1.getEndpoint()), eq(readRequest), any(InvokeCallback.class), any(long.class));
}
|
@Nonnull
@Override
public CreatedAggregations<AggregationBuilder> doCreateAggregation(Direction direction, String name, Pivot pivot, Time timeSpec, OSGeneratedQueryContext queryContext, Query query) {
AggregationBuilder root = null;
AggregationBuilder leaf = null;
final Interval interval = timeSpec.interval();
final TimeRange timerange = query.timerange();
if (interval instanceof AutoInterval autoInterval
&& isAllMessages(timerange)) {
for (String timeField : timeSpec.fields()) {
final AutoDateHistogramAggregationBuilder builder = new AutoDateHistogramAggregationBuilder(name)
.field(timeField)
.setNumBuckets((int) (BASE_NUM_BUCKETS / autoInterval.scaling()))
.format(DATE_TIME_FORMAT);
if (root == null && leaf == null) {
root = builder;
leaf = builder;
} else {
leaf.subAggregation(builder);
leaf = builder;
}
}
} else {
for (String timeField : timeSpec.fields()) {
final DateHistogramInterval dateHistogramInterval = new DateHistogramInterval(interval.toDateInterval(query.effectiveTimeRange(pivot)).toString());
final List<BucketOrder> ordering = orderListForPivot(pivot, queryContext, defaultOrder);
final DateHistogramAggregationBuilder builder = AggregationBuilders.dateHistogram(name)
.field(timeField)
.order(ordering)
.format(DATE_TIME_FORMAT);
setInterval(builder, dateHistogramInterval);
if (root == null && leaf == null) {
root = builder;
leaf = builder;
} else {
leaf.subAggregation(builder);
leaf = builder;
}
}
}
return CreatedAggregations.create(root, leaf);
}
|
@Test
public void timeSpecIntervalIsCalculatedOnQueryTimeRangeIfNoPivotTimeRange() throws InvalidRangeParametersException {
final ArgumentCaptor<TimeRange> timeRangeCaptor = ArgumentCaptor.forClass(TimeRange.class);
when(interval.toDateInterval(timeRangeCaptor.capture())).thenReturn(DateInterval.days(1));
when(pivot.timerange()).thenReturn(Optional.empty());
when(query.timerange()).thenReturn(RelativeRange.create(2323));
this.osTimeHandler.doCreateAggregation(BucketSpecHandler.Direction.Row, "foobar", pivot, time, queryContext, query);
final TimeRange argumentTimeRange = timeRangeCaptor.getValue();
assertThat(argumentTimeRange).isEqualTo(RelativeRange.create(2323));
}
|
private void rebalance() {
int activeNodes = (int) clusterService.getNodes()
.stream()
.filter(node -> clusterService.getState(node.id()).isActive())
.count();
int myShare = (int) Math.ceil((double) NUM_PARTITIONS / activeNodes);
// First make sure this node is a candidate for all partitions.
IntStream.range(0, NUM_PARTITIONS)
.mapToObj(this::getPartitionPath)
.map(leadershipService::getLeadership)
.filter(leadership -> !leadership.candidates().contains(localNodeId))
.map(Leadership::topic)
.forEach(leadershipService::runForLeadership);
List<String> myPartitions = IntStream.range(0, NUM_PARTITIONS)
.mapToObj(this::getPartitionPath)
.map(leadershipService::getLeadership)
.filter(Objects::nonNull)
.filter(leadership -> localNodeId.equals(leadership.leaderNodeId()))
.map(Leadership::topic)
.collect(Collectors.toList());
int relinquish = myPartitions.size() - myShare;
for (int i = 0; i < relinquish; i++) {
String topic = myPartitions.get(i);
// Wait till all active nodes are in contention for partition ownership.
// This avoids too many relinquish/reclaim cycles.
if (leadershipService.getCandidates(topic).size() == activeNodes) {
leadershipService.withdraw(topic);
executor.schedule(() -> recontest(topic), BACKOFF_TIME, TimeUnit.SECONDS);
}
}
}
|
@Test
public void testRebalance() {
// We have all the partitions so we'll need to relinquish some
setUpLeadershipService(WorkPartitionManager.NUM_PARTITIONS);
leadershipService.withdraw(anyString());
expectLastCall().times(7);
replay(leadershipService);
partitionManager.activate();
// trigger rebalance
partitionManager.doRebalance();
verify(leadershipService);
}
|
@Override
public Map<K, V> getCachedMap() {
return localCacheView.getCachedMap();
}
|
@Test
public void testSizeCache() {
RLocalCachedMap<String, Integer> map = redisson.getLocalCachedMap(LocalCachedMapOptions.name("test"));
Map<String, Integer> cache = map.getCachedMap();
map.put("12", 1);
map.put("14", 2);
map.put("15", 3);
assertThat(cache.size()).isEqualTo(3);
assertThat(map.size()).isEqualTo(3);
}
|
static String getAbbreviation(Exception ex,
Integer statusCode,
String storageErrorMessage) {
String result = null;
for (RetryReasonCategory retryReasonCategory : rankedReasonCategories) {
final String abbreviation
= retryReasonCategory.captureAndGetAbbreviation(ex,
statusCode, storageErrorMessage);
if (abbreviation != null) {
result = abbreviation;
}
}
return result;
}
|
@Test
public void testConnectionTimeoutRetryReason() {
SocketTimeoutException connectionTimeoutException = new SocketTimeoutException(CONNECTION_TIMEOUT_JDK_MESSAGE);
Assertions.assertThat(RetryReason.getAbbreviation(connectionTimeoutException, null, null)).isEqualTo(
CONNECTION_TIMEOUT_ABBREVIATION
);
}
|
@Override
@SuppressWarnings("rawtypes")
public void report(SortedMap<String, Gauge> gauges,
SortedMap<String, Counter> counters,
SortedMap<String, Histogram> histograms,
SortedMap<String, Meter> meters,
SortedMap<String, Timer> timers) {
final long timestamp = clock.getTime() / 1000;
// oh it'd be lovely to use Java 7 here
try {
graphite.connect();
for (Map.Entry<String, Gauge> entry : gauges.entrySet()) {
reportGauge(entry.getKey(), entry.getValue(), timestamp);
}
for (Map.Entry<String, Counter> entry : counters.entrySet()) {
reportCounter(entry.getKey(), entry.getValue(), timestamp);
}
for (Map.Entry<String, Histogram> entry : histograms.entrySet()) {
reportHistogram(entry.getKey(), entry.getValue(), timestamp);
}
for (Map.Entry<String, Meter> entry : meters.entrySet()) {
reportMetered(entry.getKey(), entry.getValue(), timestamp);
}
for (Map.Entry<String, Timer> entry : timers.entrySet()) {
reportTimer(entry.getKey(), entry.getValue(), timestamp);
}
graphite.flush();
} catch (IOException e) {
LOGGER.warn("Unable to report to Graphite", graphite, e);
} finally {
try {
graphite.close();
} catch (IOException e1) {
LOGGER.warn("Error closing Graphite", graphite, e1);
}
}
}
|
@Test
public void sendsMetricAttributesAsTagsIfEnabled() throws Exception {
final Counter counter = mock(Counter.class);
when(counter.getCount()).thenReturn(100L);
getReporterThatSendsMetricAttributesAsTags().report(map(),
map("counter", counter),
map(),
map(),
map());
final InOrder inOrder = inOrder(graphite);
inOrder.verify(graphite).connect();
inOrder.verify(graphite).send("prefix.counter;metricattribute=count", "100", timestamp);
inOrder.verify(graphite).flush();
inOrder.verify(graphite).close();
verifyNoMoreInteractions(graphite);
}
|
public static CatalogTable buildWithConfig(Config config) {
ReadonlyConfig readonlyConfig = ReadonlyConfig.fromConfig(config);
return buildWithConfig(readonlyConfig);
}
|
@Test
public void testDefaultTablePath() throws FileNotFoundException, URISyntaxException {
String path = getTestConfigFile("/conf/default_tablepath.conf");
Config config = ConfigFactory.parseFile(new File(path));
Config source = config.getConfigList("source").get(0);
ReadonlyConfig sourceReadonlyConfig = ReadonlyConfig.fromConfig(source);
CatalogTable catalogTable = CatalogTableUtil.buildWithConfig(sourceReadonlyConfig);
Assertions.assertEquals(
TablePath.DEFAULT.getDatabaseName(), catalogTable.getTablePath().getDatabaseName());
Assertions.assertEquals(
TablePath.DEFAULT.getSchemaName(), catalogTable.getTablePath().getSchemaName());
Assertions.assertEquals(
TablePath.DEFAULT.getTableName(), catalogTable.getTablePath().getTableName());
}
|
@Override
public Column convert(BasicTypeDefine typeDefine) {
PhysicalColumn.PhysicalColumnBuilder builder =
PhysicalColumn.builder()
.name(typeDefine.getName())
.sourceType(typeDefine.getColumnType())
.nullable(typeDefine.isNullable())
.defaultValue(typeDefine.getDefaultValue())
.comment(typeDefine.getComment());
String pgDataType = typeDefine.getDataType().toLowerCase();
switch (pgDataType) {
case PG_BOOLEAN:
builder.dataType(BasicType.BOOLEAN_TYPE);
break;
case PG_BOOLEAN_ARRAY:
builder.dataType(ArrayType.BOOLEAN_ARRAY_TYPE);
break;
case PG_SMALLSERIAL:
case PG_SMALLINT:
builder.dataType(BasicType.SHORT_TYPE);
break;
case PG_SMALLINT_ARRAY:
builder.dataType(ArrayType.SHORT_ARRAY_TYPE);
break;
case PG_INTEGER:
case PG_SERIAL:
builder.dataType(BasicType.INT_TYPE);
break;
case PG_INTEGER_ARRAY:
builder.dataType(ArrayType.INT_ARRAY_TYPE);
break;
case PG_BIGINT:
case PG_BIGSERIAL:
builder.dataType(BasicType.LONG_TYPE);
break;
case PG_BIGINT_ARRAY:
builder.dataType(ArrayType.LONG_ARRAY_TYPE);
break;
case PG_REAL:
builder.dataType(BasicType.FLOAT_TYPE);
break;
case PG_REAL_ARRAY:
builder.dataType(ArrayType.FLOAT_ARRAY_TYPE);
break;
case PG_DOUBLE_PRECISION:
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case PG_DOUBLE_PRECISION_ARRAY:
builder.dataType(ArrayType.DOUBLE_ARRAY_TYPE);
break;
case PG_NUMERIC:
DecimalType decimalType;
if (typeDefine.getPrecision() != null && typeDefine.getPrecision() > 0) {
decimalType =
new DecimalType(
typeDefine.getPrecision().intValue(), typeDefine.getScale());
} else {
decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE);
}
builder.dataType(decimalType);
break;
case PG_MONEY:
// -92233720368547758.08 to +92233720368547758.07, With the sign bit it's 20, we use
// 30 precision to save it
DecimalType moneyDecimalType;
moneyDecimalType = new DecimalType(30, 2);
builder.dataType(moneyDecimalType);
builder.columnLength(30L);
builder.scale(2);
break;
case PG_CHAR:
case PG_CHARACTER:
builder.dataType(BasicType.STRING_TYPE);
if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) {
builder.columnLength(TypeDefineUtils.charTo4ByteLength(1L));
builder.sourceType(pgDataType);
} else {
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
builder.sourceType(String.format("%s(%s)", pgDataType, typeDefine.getLength()));
}
break;
case PG_VARCHAR:
case PG_CHARACTER_VARYING:
builder.dataType(BasicType.STRING_TYPE);
if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) {
builder.sourceType(pgDataType);
} else {
builder.sourceType(String.format("%s(%s)", pgDataType, typeDefine.getLength()));
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
}
break;
case PG_TEXT:
builder.dataType(BasicType.STRING_TYPE);
break;
case PG_UUID:
builder.dataType(BasicType.STRING_TYPE);
builder.sourceType(pgDataType);
builder.columnLength(128L);
break;
case PG_JSON:
case PG_JSONB:
case PG_XML:
case PG_GEOMETRY:
case PG_GEOGRAPHY:
builder.dataType(BasicType.STRING_TYPE);
break;
case PG_CHAR_ARRAY:
case PG_VARCHAR_ARRAY:
case PG_TEXT_ARRAY:
builder.dataType(ArrayType.STRING_ARRAY_TYPE);
break;
case PG_BYTEA:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
break;
case PG_DATE:
builder.dataType(LocalTimeType.LOCAL_DATE_TYPE);
break;
case PG_TIME:
case PG_TIME_TZ:
builder.dataType(LocalTimeType.LOCAL_TIME_TYPE);
if (typeDefine.getScale() != null && typeDefine.getScale() > MAX_TIME_SCALE) {
builder.scale(MAX_TIME_SCALE);
log.warn(
"The scale of time type is larger than {}, it will be truncated to {}",
MAX_TIME_SCALE,
MAX_TIME_SCALE);
} else {
builder.scale(typeDefine.getScale());
}
break;
case PG_TIMESTAMP:
case PG_TIMESTAMP_TZ:
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
if (typeDefine.getScale() != null && typeDefine.getScale() > MAX_TIMESTAMP_SCALE) {
builder.scale(MAX_TIMESTAMP_SCALE);
log.warn(
"The scale of timestamp type is larger than {}, it will be truncated to {}",
MAX_TIMESTAMP_SCALE,
MAX_TIMESTAMP_SCALE);
} else {
builder.scale(typeDefine.getScale());
}
break;
default:
throw CommonError.convertToSeaTunnelTypeError(
identifier(), typeDefine.getDataType(), typeDefine.getName());
}
return builder.build();
}
|
@Test
public void testConvertChar() {
BasicTypeDefine<Object> typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("bpchar")
.dataType("bpchar")
.build();
Column column = PostgresTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType());
Assertions.assertEquals(4, column.getColumnLength());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("bpchar(10)")
.dataType("bpchar")
.length(10L)
.build();
column = PostgresTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType());
Assertions.assertEquals(40, column.getColumnLength());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
}
|
@Override
public void finished(boolean allStepsExecuted) {
if (postProjectAnalysisTasks.length == 0) {
return;
}
ProjectAnalysisImpl projectAnalysis = createProjectAnalysis(allStepsExecuted ? SUCCESS : FAILED);
for (PostProjectAnalysisTask postProjectAnalysisTask : postProjectAnalysisTasks) {
executeTask(projectAnalysis, postProjectAnalysisTask);
}
}
|
@Test
@UseDataProvider("booleanValues")
public void logStatistics_add_fails_with_IAE_if_same_key_with_exact_case_added_twice(boolean allStepsExecuted) {
underTest.finished(allStepsExecuted);
verify(postProjectAnalysisTask).finished(taskContextCaptor.capture());
PostProjectAnalysisTask.LogStatistics logStatistics = taskContextCaptor.getValue().getLogStatistics();
String key = RandomStringUtils.randomAlphabetic(10);
logStatistics.add(key, new Object());
assertThat(catchThrowable(() -> logStatistics.add(key, "bar")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Statistic with key [" + key + "] is already present");
}
|
public Bson parseSingleExpression(final String filterExpression, final List<EntityAttribute> attributes) {
final Filter filter = singleFilterParser.parseSingleExpression(filterExpression, attributes);
return filter.toBson();
}
|
@Test
void parsesFilterExpressionCorrectlyForDateRanges() {
final String fromString = "2012-12-12 12:12:12";
final String toString = "2022-12-12 12:12:12";
final List<EntityAttribute> entityAttributes = List.of(EntityAttribute.builder()
.id("created_at")
.title("Creation Date")
.type(SearchQueryField.Type.DATE)
.filterable(true)
.build());
assertEquals(
Filters.and(
Filters.gte("created_at",
new DateTime(2012, 12, 12, 12, 12, 12, DateTimeZone.UTC).toDate()),
Filters.lte("created_at",
new DateTime(2022, 12, 12, 12, 12, 12, DateTimeZone.UTC).toDate())
),
toTest.parseSingleExpression("created_at:" + fromString + RANGE_VALUES_SEPARATOR + toString,
entityAttributes
));
}
|
public static ResourceModel processResource(final Class<?> resourceClass)
{
return processResource(resourceClass, null);
}
|
@Test(expectedExceptions = ResourceConfigException.class)
public void failsOnInvalidActionReturnType() {
@RestLiCollection(name = "invalidReturnType")
class LocalClass extends CollectionResourceTemplate<Long, EmptyRecord> {
@Action(name = "invalidReturnType")
public Object invalidReturnType(@ActionParam(value = "someId") String someId) {
return null;
}
}
RestLiAnnotationReader.processResource(LocalClass.class);
Assert.fail("#validateActionReturnType should fail throwing a ResourceConfigException");
}
|
@Override
public PinotDataBuffer newBuffer(String column, IndexType<?, ?, ?> type, long sizeBytes)
throws IOException {
return allocNewBufferInternal(column, type, sizeBytes, type.getId().toLowerCase() + ".create");
}
|
@Test(expectedExceptions = RuntimeException.class)
public void testWriteExisting()
throws Exception {
try (SingleFileIndexDirectory columnDirectory = new SingleFileIndexDirectory(TEMP_DIR, _segmentMetadata,
ReadMode.mmap)) {
columnDirectory.newBuffer("column1", StandardIndexes.dictionary(), 1024);
}
try (SingleFileIndexDirectory columnDirectory = new SingleFileIndexDirectory(TEMP_DIR, _segmentMetadata,
ReadMode.mmap)) {
columnDirectory.newBuffer("column1", StandardIndexes.dictionary(), 1024);
}
}
|
public static ValueReference ofNullable(@Nullable String value) {
if (value == null) {
return null;
} else {
return of(value);
}
}
|
@Test
public void testOfNullable() {
assertThat(ValueReference.ofNullable("test")).isNotNull();
assertThat(ValueReference.ofNullable((String) null)).isNull();
assertThat(ValueReference.ofNullable(TestEnum.A)).isNotNull();
assertThat(ValueReference.ofNullable((TestEnum) null)).isNull();
}
|
@ApiOperation(value = "Get a single group", tags = { "Groups" })
@ApiResponses(value = {
@ApiResponse(code = 200, message = "Indicates the group exists and is returned."),
@ApiResponse(code = 404, message = "Indicates the requested group does not exist.")
})
@GetMapping(value = "/identity/groups/{groupId}", produces = "application/json")
public GroupResponse getGroup(@ApiParam(name = "groupId") @PathVariable String groupId) {
return restResponseFactory.createGroupResponse(getGroupFromRequest(groupId));
}
|
@Test
public void testGetGroup() throws Exception {
try {
Group testGroup = identityService.newGroup("testgroup");
testGroup.setName("Test group");
testGroup.setType("Test type");
identityService.saveGroup(testGroup);
CloseableHttpResponse response = executeRequest(
new HttpGet(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_GROUP, "testgroup")), HttpStatus.SC_OK);
JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent());
closeResponse(response);
assertThat(responseNode).isNotNull();
assertThatJson(responseNode)
.when(Option.IGNORING_EXTRA_FIELDS)
.isEqualTo("{"
+ " id: 'testgroup',"
+ " name: 'Test group',"
+ " type: 'Test type',"
+ " url: '" + SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_GROUP, testGroup.getId()) + "'"
+ "}");
Group createdGroup = identityService.createGroupQuery().groupId("testgroup").singleResult();
assertThat(createdGroup).isNotNull();
assertThat(createdGroup.getName()).isEqualTo("Test group");
assertThat(createdGroup.getType()).isEqualTo("Test type");
} finally {
try {
identityService.deleteGroup("testgroup");
} catch (Throwable ignore) {
// Ignore, since the group may not have been created in the test
// or already deleted
}
}
}
|
public static String relativize(String root, final String path) {
if(StringUtils.isBlank(root)) {
return path;
}
if(StringUtils.equals(root, path)) {
return StringUtils.EMPTY;
}
if(!StringUtils.equals(root, String.valueOf(Path.DELIMITER))) {
if(!StringUtils.endsWith(root, String.valueOf(Path.DELIMITER))) {
root = root + Path.DELIMITER;
}
}
if(StringUtils.contains(path, root)) {
return StringUtils.substring(path, path.indexOf(root) + root.length());
}
return path;
}
|
@Test
public void testRelativize() {
assertEquals("", PathRelativizer.relativize("/r", "/r"));
assertEquals("/", PathRelativizer.relativize("/r", "/r//"));
assertEquals("a", PathRelativizer.relativize("/", "/a"));
assertEquals("/b/path", PathRelativizer.relativize("/a", "/b/path"));
assertEquals("path", PathRelativizer.relativize("/a", "/a/path"));
assertEquals("path", PathRelativizer.relativize("/a/", "/a/path"));
assertEquals("path/", PathRelativizer.relativize("/a", "/a/path/"));
assertEquals("a/path", PathRelativizer.relativize("public_html", "/home/user/public_html/a/path"));
assertEquals("/home/user/public_html/a/path", PathRelativizer.relativize(null, "/home/user/public_html/a/path"));
}
|
EventLoopGroup getSharedOrCreateEventLoopGroup(EventLoopGroup eventLoopGroupShared) {
if (eventLoopGroupShared != null) {
return eventLoopGroupShared;
}
return this.eventLoopGroup = new NioEventLoopGroup();
}
|
@Test
public void givenSharedEventLoop_whenGetEventLoop_ThenReturnShared() {
eventLoop = mock(EventLoopGroup.class);
assertThat(client.getSharedOrCreateEventLoopGroup(eventLoop), is(eventLoop));
}
|
@ScalarOperator(CAST)
@LiteralParameters("x")
@SqlType("varchar(x)")
public static Slice castToVarchar(@SqlType(StandardTypes.DOUBLE) double value)
{
return utf8Slice(String.valueOf(value));
}
|
@Test
public void testCastToVarchar()
{
assertFunction("cast(37.7E0 as varchar)", VARCHAR, "37.7");
assertFunction("cast(17.1E0 as varchar)", VARCHAR, "17.1");
}
|
@Override
public String render(String text) {
if (StringUtils.isBlank(text)) {
return "";
}
if (regex.isEmpty() || link.isEmpty()) {
Comment comment = new Comment();
comment.escapeAndAdd(text);
return comment.render();
}
try {
Matcher matcher = Pattern.compile(regex).matcher(text);
int start = 0;
Comment comment = new Comment();
while (hasMatch(matcher)) {
comment.escapeAndAdd(text.substring(start, matcher.start()));
comment.add(dynamicLink(matcher));
start = matcher.end();
}
comment.escapeAndAdd(text.substring(start));
return comment.render();
} catch (PatternSyntaxException e) {
LOGGER.warn("Illegal regular expression: {} - {}", regex, e.getMessage());
}
return text;
}
|
@Test
public void shouldEscapeDynamicLink() {
String link = "http://jira.example.com/${ID}";
String regex = "^ABC-[^ ]+";
trackingTool = new DefaultCommentRenderer(link, regex);
String result = trackingTool.render("ABC-\"><svg/onload=\"alert(1)");
assertThat(result,
is("<a href=\"http://jira.example.com/ABC-"><svg/onload="alert(1)\" " +
"target=\"story_tracker\">ABC-"><svg/onload="alert(1)</a>"));
}
|
public StructuralByteArray(byte[] value) {
this.value = value;
}
|
@Test
public void testStructuralByteArray() throws Exception {
assertEquals(
new StructuralByteArray("test string".getBytes(StandardCharsets.UTF_8)),
new StructuralByteArray("test string".getBytes(StandardCharsets.UTF_8)));
assertFalse(
new StructuralByteArray("test string".getBytes(StandardCharsets.UTF_8))
.equals(new StructuralByteArray("diff string".getBytes(StandardCharsets.UTF_8))));
}
|
public static SchemaKStream<?> buildSource(
final PlanBuildContext buildContext,
final DataSource dataSource,
final QueryContext.Stacker contextStacker
) {
final boolean windowed = dataSource.getKsqlTopic().getKeyFormat().isWindowed();
switch (dataSource.getDataSourceType()) {
case KSTREAM:
return windowed
? buildWindowedStream(
buildContext,
dataSource,
contextStacker
) : buildStream(
buildContext,
dataSource,
contextStacker
);
case KTABLE:
return windowed
? buildWindowedTable(
buildContext,
dataSource,
contextStacker
) : buildTable(
buildContext,
dataSource,
contextStacker
);
default:
throw new UnsupportedOperationException("Source type:" + dataSource.getDataSourceType());
}
}
|
@Test
public void shouldReplaceTableSourceV2WithMatchingPseudoColumnVersion() {
// Given:
givenNonWindowedTable();
givenExistingQueryWithOldPseudoColumnVersion(tableSource);
// When:
final SchemaKStream<?> result = SchemaKSourceFactory.buildSource(
buildContext,
dataSource,
contextStacker
);
// Then:
assertThat(((TableSource) result.getSourceStep()).getPseudoColumnVersion(), equalTo(LEGACY_PSEUDOCOLUMN_VERSION_NUMBER));
assertValidSchema(result);
}
|
public int floor(T v) {
return Boundary.FLOOR.apply(find(v));
}
|
@Test
public void testFloor() {
assertEquals(-1, l.floor("A"));
assertEquals(0, l.floor("B"));
assertEquals(0, l.floor("C"));
assertEquals(1, l.floor("D"));
assertEquals(1, l.floor("E"));
assertEquals(2, l.floor("F"));
assertEquals(2, l.floor("G"));
}
|
public ClientAuth getClientAuth() {
String clientAuth = getString(SSL_CLIENT_AUTHENTICATION_CONFIG);
if (originals().containsKey(SSL_CLIENT_AUTH_CONFIG)) {
if (originals().containsKey(SSL_CLIENT_AUTHENTICATION_CONFIG)) {
log.warn(
"The {} configuration is deprecated. Since a value has been supplied for the {} "
+ "configuration, that will be used instead",
SSL_CLIENT_AUTH_CONFIG,
SSL_CLIENT_AUTHENTICATION_CONFIG
);
} else {
log.warn(
"The configuration {} is deprecated and should be replaced with {}",
SSL_CLIENT_AUTH_CONFIG,
SSL_CLIENT_AUTHENTICATION_CONFIG
);
clientAuth = getBoolean(SSL_CLIENT_AUTH_CONFIG)
? SSL_CLIENT_AUTHENTICATION_REQUIRED
: SSL_CLIENT_AUTHENTICATION_NONE;
}
}
return getClientAuth(clientAuth);
}
|
@Test
public void shouldResolveClientAuthenticationRequest() {
// Given:
final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.<String, Object>builder()
.put(KsqlRestConfig.SSL_CLIENT_AUTHENTICATION_CONFIG,
KsqlRestConfig.SSL_CLIENT_AUTHENTICATION_REQUESTED)
.build()
);
// When:
final ClientAuth clientAuth = config.getClientAuth();
// Then:
assertThat(clientAuth, is(ClientAuth.REQUEST));
}
|
@Override
public Set<byte[]> zDiff(byte[]... sets) {
List<Object> args = new ArrayList<>(sets.length + 1);
args.add(sets.length);
args.addAll(Arrays.asList(sets));
return write(sets[0], ByteArrayCodec.INSTANCE, ZDIFF, args.toArray());
}
|
@Test
public void testZDiff() {
StringRedisTemplate redisTemplate = new StringRedisTemplate();
redisTemplate.setConnectionFactory(new RedissonConnectionFactory(redisson));
redisTemplate.afterPropertiesSet();
redisTemplate.boundZSetOps("test").add("1", 10);
redisTemplate.boundZSetOps("test").add("2", 20);
redisTemplate.boundZSetOps("test").add("3", 30);
redisTemplate.boundZSetOps("test").add("4", 30);
redisTemplate.boundZSetOps("test2").add("5", 50);
redisTemplate.boundZSetOps("test2").add("2", 20);
redisTemplate.boundZSetOps("test2").add("3", 30);
redisTemplate.boundZSetOps("test2").add("6", 60);
Set<String> objs = redisTemplate.boundZSetOps("test").difference("test2");
assertThat(objs).hasSize(2);
}
|
public boolean setResolution(DefaultIssue issue, @Nullable String resolution, IssueChangeContext context) {
if (!Objects.equals(resolution, issue.resolution())) {
issue.setFieldChange(context, RESOLUTION, issue.resolution(), resolution);
issue.setResolution(resolution);
issue.setUpdateDate(context.date());
issue.setChanged(true);
issue.setSendNotifications(true);
return true;
}
return false;
}
|
@Test
void not_change_resolution() {
issue.setResolution(Issue.RESOLUTION_FIXED);
boolean updated = underTest.setResolution(issue, Issue.RESOLUTION_FIXED, context);
assertThat(updated).isFalse();
assertThat(issue.resolution()).isEqualTo(Issue.RESOLUTION_FIXED);
assertThat(issue.currentChange()).isNull();
assertThat(issue.mustSendNotifications()).isFalse();
}
|
@Override
public SQLParserRuleConfiguration swapToObject(final YamlSQLParserRuleConfiguration yamlConfig) {
CacheOption parseTreeCacheOption = null == yamlConfig.getParseTreeCache()
? DefaultSQLParserRuleConfigurationBuilder.PARSE_TREE_CACHE_OPTION
: cacheOptionSwapper.swapToObject(yamlConfig.getParseTreeCache());
CacheOption sqlStatementCacheOption = null == yamlConfig.getSqlStatementCache()
? DefaultSQLParserRuleConfigurationBuilder.SQL_STATEMENT_CACHE_OPTION
: cacheOptionSwapper.swapToObject(yamlConfig.getSqlStatementCache());
return new SQLParserRuleConfiguration(parseTreeCacheOption, sqlStatementCacheOption);
}
|
@Test
void assertSwapToObjectWithDefaultConfig() {
YamlSQLParserRuleConfiguration yamlConfig = new YamlSQLParserRuleConfiguration();
SQLParserRuleConfiguration actual = new YamlSQLParserRuleConfigurationSwapper().swapToObject(yamlConfig);
assertThat(actual.getParseTreeCache().getInitialCapacity(), is(128));
assertThat(actual.getParseTreeCache().getMaximumSize(), is(1024L));
assertThat(actual.getSqlStatementCache().getInitialCapacity(), is(2000));
assertThat(actual.getSqlStatementCache().getMaximumSize(), is(65535L));
}
|
public static <T> CompletableFuture<T> addTimeoutHandling(CompletableFuture<T> future, Duration timeout,
ScheduledExecutorService executor,
Supplier<Throwable> exceptionSupplier) {
ScheduledFuture<?> scheduledFuture = executor.schedule(() -> {
if (!future.isDone()) {
future.completeExceptionally(exceptionSupplier.get());
}
}, timeout.toMillis(), TimeUnit.MILLISECONDS);
future.whenComplete((res, exception) -> scheduledFuture.cancel(false));
return future;
}
|
@Test
public void testTimeoutHandlingNoTimeout() throws ExecutionException, InterruptedException {
CompletableFuture<Void> future = new CompletableFuture<>();
@Cleanup("shutdownNow")
ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
FutureUtil.addTimeoutHandling(future, Duration.ofMillis(100), executor, () -> new Exception());
future.complete(null);
future.get();
}
|
@Override
public void resetLocal() {
this.min = Integer.MAX_VALUE;
}
|
@Test
void testResetLocal() {
IntMinimum min = new IntMinimum();
int value = 13;
min.add(value);
assertThat(min.getLocalValue().intValue()).isEqualTo(value);
min.resetLocal();
assertThat(min.getLocalValue().intValue()).isEqualTo(Integer.MAX_VALUE);
}
|
public String name() {
return name;
}
|
@Test
void nameCannotContainSpaces() {
Assertions.assertThrows(IllegalArgumentException.class, () -> DefaultBot.getDefaultBuilder().name("test test").build());
}
|
@Bean
public BulkheadRegistry bulkheadRegistry(
BulkheadConfigurationProperties bulkheadConfigurationProperties,
EventConsumerRegistry<BulkheadEvent> bulkheadEventConsumerRegistry,
RegistryEventConsumer<Bulkhead> bulkheadRegistryEventConsumer,
@Qualifier("compositeBulkheadCustomizer") CompositeCustomizer<BulkheadConfigCustomizer> compositeBulkheadCustomizer) {
BulkheadRegistry bulkheadRegistry = createBulkheadRegistry(bulkheadConfigurationProperties,
bulkheadRegistryEventConsumer, compositeBulkheadCustomizer);
registerEventConsumer(bulkheadRegistry, bulkheadEventConsumerRegistry,
bulkheadConfigurationProperties);
bulkheadConfigurationProperties.getInstances().forEach((name, properties) ->
bulkheadRegistry
.bulkhead(name, bulkheadConfigurationProperties
.createBulkheadConfig(properties, compositeBulkheadCustomizer,
name)));
return bulkheadRegistry;
}
|
@Test
public void testCreateBulkHeadRegistryWithSharedConfigs() {
//Given
io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties defaultProperties = new io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties();
defaultProperties.setMaxConcurrentCalls(3);
defaultProperties.setMaxWaitDuration(Duration.ofMillis(50L));
assertThat(defaultProperties.getEventConsumerBufferSize()).isNull();
io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties sharedProperties = new io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties();
sharedProperties.setMaxConcurrentCalls(2);
sharedProperties.setMaxWaitDuration(Duration.ofMillis(100L));
assertThat(sharedProperties.getEventConsumerBufferSize()).isNull();
io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties backendWithDefaultConfig = new io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties();
backendWithDefaultConfig.setBaseConfig("default");
backendWithDefaultConfig.setMaxWaitDuration(Duration.ofMillis(200L));
assertThat(backendWithDefaultConfig.getEventConsumerBufferSize()).isNull();
io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties backendWithSharedConfig = new io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties();
backendWithSharedConfig.setBaseConfig("sharedConfig");
backendWithSharedConfig.setMaxWaitDuration(Duration.ofMillis(300L));
assertThat(backendWithSharedConfig.getEventConsumerBufferSize()).isNull();
BulkheadConfigurationProperties bulkheadConfigurationProperties = new BulkheadConfigurationProperties();
bulkheadConfigurationProperties.getConfigs().put("default", defaultProperties);
bulkheadConfigurationProperties.getConfigs().put("sharedConfig", sharedProperties);
bulkheadConfigurationProperties.getInstances()
.put("backendWithDefaultConfig", backendWithDefaultConfig);
bulkheadConfigurationProperties.getInstances()
.put("backendWithSharedConfig", backendWithSharedConfig);
BulkheadConfiguration bulkheadConfiguration = new BulkheadConfiguration();
DefaultEventConsumerRegistry<BulkheadEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>();
//When
BulkheadRegistry bulkheadRegistry = bulkheadConfiguration
.bulkheadRegistry(bulkheadConfigurationProperties, eventConsumerRegistry,
new CompositeRegistryEventConsumer<>(emptyList()),
new CompositeCustomizer<>(Collections.emptyList()));
//Then
assertThat(bulkheadRegistry.getAllBulkheads().size()).isEqualTo(2);
// Should get default config and overwrite max calls and wait time
Bulkhead bulkhead1 = bulkheadRegistry.bulkhead("backendWithDefaultConfig");
assertThat(bulkhead1).isNotNull();
assertThat(bulkhead1.getBulkheadConfig().getMaxConcurrentCalls()).isEqualTo(3);
assertThat(bulkhead1.getBulkheadConfig().getMaxWaitDuration().toMillis()).isEqualTo(200L);
// Should get shared config and overwrite wait time
Bulkhead bulkhead2 = bulkheadRegistry.bulkhead("backendWithSharedConfig");
assertThat(bulkhead2).isNotNull();
assertThat(bulkhead2.getBulkheadConfig().getMaxConcurrentCalls()).isEqualTo(2);
assertThat(bulkhead2.getBulkheadConfig().getMaxWaitDuration().toMillis()).isEqualTo(300L);
// Unknown backend should get default config of Registry
Bulkhead bulkhead3 = bulkheadRegistry.bulkhead("unknownBackend");
assertThat(bulkhead3).isNotNull();
assertThat(bulkhead3.getBulkheadConfig().getMaxWaitDuration().toMillis()).isEqualTo(50L);
assertThat(eventConsumerRegistry.getAllEventConsumer()).hasSize(3);
}
|
public Transfer create(final CommandLine input, final Host host, final Path remote, final List<TransferItem> items)
throws BackgroundException {
final Transfer transfer;
final TerminalAction type = TerminalActionFinder.get(input);
if(null == type) {
throw new BackgroundException(LocaleFactory.localizedString("Unknown"), "Unknown transfer type");
}
switch(type) {
case download:
if(StringUtils.containsAny(remote.getName(), '*')) {
transfer = new DownloadTransfer(host, items, new DownloadGlobFilter(remote.getName()));
}
else {
transfer = new DownloadTransfer(host, items);
}
if(input.hasOption(TerminalOptionsBuilder.Params.nochecksum.name())) {
final DownloadFilterOptions options = new DownloadFilterOptions(host);
options.checksum = Boolean.parseBoolean(input.getOptionValue(TerminalOptionsBuilder.Params.nochecksum.name()));
((DownloadTransfer) transfer).withOptions(options);
}
break;
case upload:
transfer = new UploadTransfer(host, items);
if(input.hasOption(TerminalOptionsBuilder.Params.nochecksum.name())) {
final UploadFilterOptions options = new UploadFilterOptions(host);
options.checksum = Boolean.parseBoolean(input.getOptionValue(TerminalOptionsBuilder.Params.nochecksum.name()));
((UploadTransfer) transfer).withOptions(options);
}
break;
case synchronize:
transfer = new SyncTransfer(host, items.iterator().next());
break;
default:
throw new BackgroundException(LocaleFactory.localizedString("Unknown"),
String.format("Unknown transfer type %s", type.name()));
}
if(input.hasOption(TerminalOptionsBuilder.Params.throttle.name())) {
try {
transfer.setBandwidth(Float.parseFloat(input.getOptionValue(TerminalOptionsBuilder.Params.throttle.name())));
}
catch(NumberFormatException ignore) {
//
}
}
return transfer;
}
|
@Test
public void testFilter() throws Exception {
final CommandLineParser parser = new PosixParser();
final Transfer transfer = new TerminalTransferFactory().create(parser.parse(TerminalOptionsBuilder.options(), new String[]{"--download", "rackspace://cdn.cyberduck.ch/remote/*.css"}),
new Host(new SwiftProtocol()), new Path("/remote/*.css", EnumSet.of(Path.Type.directory)), Collections.<TransferItem>emptyList());
assertEquals(Transfer.Type.download, transfer.getType());
final PathCache cache = new PathCache(1);
transfer.withCache(cache);
cache.clear();
cache.put(new Path("/remote", EnumSet.of(Path.Type.directory)), new AttributedList<Path>(Collections.singletonList(new Path("/remote/file.css", EnumSet.of(Path.Type.file)))));
assertFalse(transfer.list(null, new Path("/remote", EnumSet.of(Path.Type.directory)), new Local("/tmp"), new DisabledListProgressListener()).isEmpty());
cache.clear();
cache.put(new Path("/remote", EnumSet.of(Path.Type.directory)), new AttributedList<Path>(Collections.singletonList(new Path("/remote/file.png", EnumSet.of(Path.Type.file)))));
assertTrue(transfer.list(null, new Path("/remote", EnumSet.of(Path.Type.directory)), new Local("/tmp"), new DisabledListProgressListener()).isEmpty());
}
|
public static Formatter forNumbers(@Nonnull String format) {
return new NumberFormat(format);
}
|
@Test
public void testRounding() {
Formatter f = forNumbers("FM0.9");
check(0.15, f, "0.2");
f = forNumbers("FM.99");
check(0.015, f, ".02");
f = forNumbers("FM99");
check(9.9, f, "10");
}
|
@VisibleForTesting
Path stringToPath(String s) {
try {
URI uri = new URI(s);
return new Path(uri.getScheme(), uri.getAuthority(), uri.getPath());
} catch (URISyntaxException e) {
throw new IllegalArgumentException(
"Error parsing argument." + " Argument must be a valid URI: " + s, e);
}
}
|
@Test
public void testStringToPath() throws IOException {
Configuration conf = new Configuration();
JobResourceUploader uploader =
new JobResourceUploader(FileSystem.getLocal(conf), false);
Assert.assertEquals("Failed: absolute, no scheme, with fragment",
"/testWithFragment.txt",
uploader.stringToPath("/testWithFragment.txt#fragment.txt").toString());
Assert.assertEquals("Failed: absolute, with scheme, with fragment",
"file:/testWithFragment.txt",
uploader.stringToPath("file:///testWithFragment.txt#fragment.txt")
.toString());
Assert.assertEquals("Failed: relative, no scheme, with fragment",
"testWithFragment.txt",
uploader.stringToPath("testWithFragment.txt#fragment.txt").toString());
Assert.assertEquals("Failed: relative, no scheme, no fragment",
"testWithFragment.txt",
uploader.stringToPath("testWithFragment.txt").toString());
Assert.assertEquals("Failed: absolute, with scheme, no fragment",
"file:/testWithFragment.txt",
uploader.stringToPath("file:///testWithFragment.txt").toString());
}
|
public static boolean isNotNull(Object value) {
return value != null;
}
|
@SuppressWarnings({"ConstantConditions", "SimplifiableJUnitAssertion"})
@Test
public void isNotNull() {
assertEquals(true, TernaryLogic.isNotNull(false));
assertEquals(true, TernaryLogic.isNotNull(true));
assertEquals(false, TernaryLogic.isNotNull(null));
assertEquals(true, TernaryLogic.isNotNull(new Object()));
}
|
static KiePMMLFieldColumnPair getKiePMMLFieldColumnPair(final FieldColumnPair fieldColumnPair) {
return new KiePMMLFieldColumnPair(fieldColumnPair.getField(),
getKiePMMLExtensions(fieldColumnPair.getExtensions()),
fieldColumnPair.getColumn());
}
|
@Test
void getKiePMMLFieldColumnPair() {
final FieldColumnPair toConvert = getRandomFieldColumnPair();
final KiePMMLFieldColumnPair retrieved = KiePMMLFieldColumnPairInstanceFactory.getKiePMMLFieldColumnPair(toConvert);
commonVerifyKiePMMLFieldColumnPair(retrieved, toConvert);
}
|
public void setOuterJoinType(OuterJoinType outerJoinType) {
this.outerJoinType = outerJoinType;
}
|
@Test
void testFullOuterJoinWithEmptyLeftInput() throws Exception {
final List<String> leftInput = Collections.emptyList();
final List<String> rightInput = Arrays.asList("foo", "bar", "foobar");
baseOperator.setOuterJoinType(OuterJoinOperatorBase.OuterJoinType.FULL);
List<String> expected = Arrays.asList("null,bar", "null,foo", "null,foobar");
testOuterJoin(leftInput, rightInput, expected);
}
|
public Response downloadLogFile(String host, String fileName, String user) throws IOException {
workerLogs.setLogFilePermission(fileName);
return logFileDownloadHelper.downloadFile(host, fileName, user, false);
}
|
@Test
public void testDownloadLogFileTraversal() throws IOException {
try (TmpPath rootPath = new TmpPath()) {
LogviewerLogDownloadHandler handler = createHandlerTraversalTests(rootPath.getFile().toPath());
Response topoAResponse = handler.downloadLogFile("host","../nimbus.log", "user");
Utils.forceDelete(rootPath.toString());
assertThat(topoAResponse.getStatus(), is(Response.Status.NOT_FOUND.getStatusCode()));
}
}
|
@SuppressWarnings("WeakerAccess")
public Map<String, Object> getMainConsumerConfigs(final String groupId, final String clientId, final int threadIdx) {
final Map<String, Object> consumerProps = getCommonConsumerConfigs();
// Get main consumer override configs
final Map<String, Object> mainConsumerProps = originalsWithPrefix(MAIN_CONSUMER_PREFIX);
consumerProps.putAll(mainConsumerProps);
// this is a hack to work around StreamsConfig constructor inside StreamsPartitionAssignor to avoid casting
consumerProps.put(APPLICATION_ID_CONFIG, groupId);
// add group id, client id with stream client id prefix, and group instance id
consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId);
final String groupInstanceId = (String) consumerProps.get(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG);
// Suffix each thread consumer with thread.id to enforce uniqueness of group.instance.id.
if (groupInstanceId != null) {
consumerProps.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, groupInstanceId + "-" + threadIdx);
}
// add configs required for stream partition assignor
consumerProps.put(UPGRADE_FROM_CONFIG, getString(UPGRADE_FROM_CONFIG));
consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG));
consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG));
consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG));
consumerProps.put(ACCEPTABLE_RECOVERY_LAG_CONFIG, getLong(ACCEPTABLE_RECOVERY_LAG_CONFIG));
consumerProps.put(MAX_WARMUP_REPLICAS_CONFIG, getInt(MAX_WARMUP_REPLICAS_CONFIG));
consumerProps.put(PROBING_REBALANCE_INTERVAL_MS_CONFIG, getLong(PROBING_REBALANCE_INTERVAL_MS_CONFIG));
consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamsPartitionAssignor.class.getName());
consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG));
consumerProps.put(RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG, getInt(RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG));
consumerProps.put(RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG, getString(RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG));
consumerProps.put(RACK_AWARE_ASSIGNMENT_TAGS_CONFIG, getList(RACK_AWARE_ASSIGNMENT_TAGS_CONFIG));
consumerProps.put(RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG, getInt(RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG));
consumerProps.put(TASK_ASSIGNOR_CLASS_CONFIG, getString(TASK_ASSIGNOR_CLASS_CONFIG));
// disable auto topic creation
consumerProps.put(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, "false");
// verify that producer batch config is no larger than segment size, then add topic configs required for creating topics
final Map<String, Object> topicProps = originalsWithPrefix(TOPIC_PREFIX, false);
final Map<String, Object> producerProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames());
if (topicProps.containsKey(topicPrefix(TopicConfig.SEGMENT_BYTES_CONFIG)) &&
producerProps.containsKey(ProducerConfig.BATCH_SIZE_CONFIG)) {
final int segmentSize = Integer.parseInt(topicProps.get(topicPrefix(TopicConfig.SEGMENT_BYTES_CONFIG)).toString());
final int batchSize = Integer.parseInt(producerProps.get(ProducerConfig.BATCH_SIZE_CONFIG).toString());
if (segmentSize < batchSize) {
throw new IllegalArgumentException(String.format("Specified topic segment size %d is is smaller than the configured producer batch size %d, this will cause produced batch not able to be appended to the topic",
segmentSize,
batchSize));
}
}
consumerProps.putAll(topicProps);
return consumerProps;
}
|
@Test
public void shouldNotSetInternalThrowOnFetchStableOffsetUnsupportedConfigToFalseInConsumerForEosV2() {
props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, EXACTLY_ONCE_V2);
final StreamsConfig streamsConfig = new StreamsConfig(props);
final Map<String, Object> consumerConfigs = streamsConfig.getMainConsumerConfigs(groupId, clientId, threadIdx);
assertThat(consumerConfigs.get("internal.throw.on.fetch.stable.offset.unsupported"), is(true));
}
|
protected static int castToIntSafely(long value) {
if (value > Integer.MAX_VALUE) {
return Integer.MAX_VALUE;
}
return Long.valueOf(value).intValue();
}
|
@Test
void testCastToIntSafely() {
assertEquals(0, Resource.castToIntSafely(0));
assertEquals(1, Resource.castToIntSafely(1));
assertEquals(Integer.MAX_VALUE,
Resource.castToIntSafely(Integer.MAX_VALUE));
assertEquals(Integer.MAX_VALUE,
Resource.castToIntSafely(Integer.MAX_VALUE + 1L),
"Cast to Integer.MAX_VALUE if the long is greater than "
+ "Integer.MAX_VALUE");
assertEquals(Integer.MAX_VALUE,
Resource.castToIntSafely(Long.MAX_VALUE),
"Cast to Integer.MAX_VALUE if the long is greater than "
+ "Integer.MAX_VALUE");
}
|
@Bean("CiConfiguration")
public CiConfiguration provide(Configuration configuration, CiVendor[] ciVendors) {
boolean disabled = configuration.getBoolean(PROP_DISABLED).orElse(false);
if (disabled) {
return new EmptyCiConfiguration();
}
List<CiVendor> detectedVendors = Arrays.stream(ciVendors)
.filter(CiVendor::isDetected)
.toList();
if (detectedVendors.size() > 1) {
List<String> names = detectedVendors.stream().map(CiVendor::getName).toList();
throw MessageException.of("Multiple CI environments are detected: " + names + ". Please check environment variables or set property " + PROP_DISABLED + " to true.");
}
if (detectedVendors.size() == 1) {
CiVendor vendor = detectedVendors.get(0);
LOG.info("Auto-configuring with CI '{}'", vendor.getName());
return vendor.loadConfiguration();
}
return new EmptyCiConfiguration();
}
|
@Test
public void configuration_defined_by_ci_vendor() {
CiConfiguration ciConfiguration = underTest.provide(cli.asConfig(), new CiVendor[]{new DisabledCiVendor("vendor1"), new EnabledCiVendor("vendor2")});
assertThat(ciConfiguration.getScmRevision()).hasValue(EnabledCiVendor.SHA);
}
|
GroupFilter groupFilter() {
return getConfiguredInstance(GROUP_FILTER_CLASS, GroupFilter.class);
}
|
@Test
public void testGroupMatching() {
MirrorCheckpointConfig config = new MirrorCheckpointConfig(makeProps("groups", "group1"));
assertTrue(config.groupFilter().shouldReplicateGroup("group1"),
"topic1 group matching property configuration failed");
assertFalse(config.groupFilter().shouldReplicateGroup("group2"),
"topic2 group matching property configuration failed");
}
|
public static HostRestrictingAuthorizationFilter
initializeState(Configuration conf) {
String confName = HostRestrictingAuthorizationFilter.HDFS_CONFIG_PREFIX +
HostRestrictingAuthorizationFilter.RESTRICTION_CONFIG;
String confValue = conf.get(confName);
// simply pass a blank value if we do not have one set
confValue = (confValue == null ? "" : confValue);
Map<String, String> confMap =
ImmutableMap.of(HostRestrictingAuthorizationFilter.RESTRICTION_CONFIG
, confValue);
FilterConfig fc =
new DatanodeHttpServer.MapBasedFilterConfig(
HostRestrictingAuthorizationFilter.class.getName(), confMap);
HostRestrictingAuthorizationFilter hostRestrictingAuthorizationFilter =
new HostRestrictingAuthorizationFilter();
try {
hostRestrictingAuthorizationFilter.init(fc);
} catch (ServletException e) {
throw new IllegalStateException(
"Failed to initialize HostRestrictingAuthorizationFilter.", e);
}
return hostRestrictingAuthorizationFilter;
}
|
@Test
public void testMultipleAcceptedGETsOneChannel() {
Configuration conf = new Configuration();
conf.set(CONFNAME, "*,*,/allowed");
HostRestrictingAuthorizationFilter filter =
HostRestrictingAuthorizationFilterHandler.initializeState(conf);
EmbeddedChannel channel = new CustomEmbeddedChannel("127.0.0.1", 1006,
new HostRestrictingAuthorizationFilterHandler(filter));
FullHttpRequest allowedHttpRequest =
new DefaultFullHttpRequest(HttpVersion.HTTP_1_1,
HttpMethod.GET,
WebHdfsFileSystem.PATH_PREFIX + "/allowed/file_one?op=OPEN");
FullHttpRequest allowedHttpRequest2 =
new DefaultFullHttpRequest(HttpVersion.HTTP_1_1,
HttpMethod.GET,
WebHdfsFileSystem.PATH_PREFIX + "/allowed/file_two?op=OPEN");
FullHttpRequest allowedHttpRequest3 =
new DefaultFullHttpRequest(HttpVersion.HTTP_1_1,
HttpMethod.GET,
WebHdfsFileSystem.PATH_PREFIX + "/allowed/file_three?op=OPEN");
assertTrue("Should successfully accept request",
channel.writeInbound(allowedHttpRequest));
assertTrue("Should successfully accept request, second time",
channel.writeInbound(allowedHttpRequest2));
assertTrue("Should successfully accept request, third time",
channel.writeInbound(allowedHttpRequest3));
}
|
public final AccessControlEntry entry() {
return entry;
}
|
@Test
public void shouldThrowOnMatchPatternType() {
assertThrows(IllegalArgumentException.class,
() -> new AclBinding(new ResourcePattern(ResourceType.TOPIC, "foo", PatternType.MATCH), ACL1.entry()));
}
|
public static DynamicVoters parse(String input) {
input = input.trim();
List<DynamicVoter> voters = new ArrayList<>();
for (String voterString : input.split(",")) {
if (!voterString.isEmpty()) {
voters.add(DynamicVoter.parse(voterString));
}
}
return new DynamicVoters(voters);
}
|
@Test
public void testParsingSingleDynamicVoter() {
assertEquals(new DynamicVoters(Arrays.asList(
new DynamicVoter(
Uuid.fromString("K90IZ-0DRNazJ49kCZ1EMQ"),
2,
"localhost",
(short) 8020))),
DynamicVoters.parse("2@localhost:8020:K90IZ-0DRNazJ49kCZ1EMQ"));
}
|
public static ParamType getVarArgsSchemaFromType(final Type type) {
return getSchemaFromType(type, VARARGS_JAVA_TO_ARG_TYPE);
}
|
@Test
public void shouldGetPartialGenericFunctionVariadic() throws NoSuchMethodException {
// Given:
final Type genericType = getClass().getMethod("partialGenericFunctionType").getGenericReturnType();
// When:
final ParamType returnType = UdfUtil.getVarArgsSchemaFromType(genericType);
// Then:
assertThat(returnType, is(LambdaType.of(ImmutableList.of(ParamTypes.LONG), GenericType.of("U"))));
}
|
static byte[] deriveEnc(byte[] seed, int offset, int length) {
final MessageDigest md = DigestUtils.digest("SHA1");
md.update(seed, offset, length);
md.update(new byte[] {0, 0, 0, 1});
return Arrays.copyOfRange(md.digest(), 0, 16);
}
|
@Test
public void shouldDeriveEncryptionKey() {
assertEquals(
"SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS",
ByteArrayUtils.prettyHex(TDEASecureMessaging.deriveEnc(
Hex.decode("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS"),0, 16)
)
);
}
|
@Override
public Name getLocation(final Path file) throws BackgroundException {
final Path container = containerService.getContainer(file);
if(container.isRoot()) {
return unknown;
}
if(cache.containsKey(container)) {
return cache.get(container);
}
if(Location.unknown.equals(new SwiftRegion(container.attributes().getRegion()))) {
final SwiftRegion region = new SwiftRegion(session.getHost().getRegion());
if(Location.unknown.equals(region)) {
final Client client = session.getClient();
for(Region r : client.getRegions()) {
try {
cache.put(container, new SwiftRegion(client.getContainerInfo(r, container.getName()).getRegion().getRegionId()));
}
catch(ContainerNotFoundException | AuthorizationException e) {
log.warn(String.format("Failure finding container %s in region %s", container, r.getRegionId()));
}
catch(GenericException e) {
if(e.getHttpStatusCode() == HttpStatus.SC_SERVICE_UNAVAILABLE) {
log.warn(String.format("Ignore failure %s for region %s", e, region));
continue;
}
throw new SwiftExceptionMappingService().map(e);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map(e);
}
}
if(!cache.containsKey(container)) {
throw new NotfoundException(container.getAbsolute());
}
}
else {
cache.put(container, region);
}
}
else {
final SwiftRegion r = new SwiftRegion(container.attributes().getRegion());
cache.put(container, r);
}
return cache.get(container);
}
|
@Test
public void testFindLocation() throws Exception {
assertEquals(new SwiftLocationFeature.SwiftRegion("IAD"), new SwiftLocationFeature(session).getLocation(
new Path("cdn.duck.sh", EnumSet.of(Path.Type.volume, Path.Type.directory))));
assertEquals(unknown, new SwiftLocationFeature(session).getLocation(
new Path("/", EnumSet.of(Path.Type.volume, Path.Type.directory))));
}
|
@Deprecated
public static String updateSerializedOptions(
String serializedOptions, Map<String, String> runtimeValues) {
ObjectNode root, options;
try {
root = PipelineOptionsFactory.MAPPER.readValue(serializedOptions, ObjectNode.class);
options = (ObjectNode) root.get("options");
checkNotNull(options, "Unable to locate 'options' in %s", serializedOptions);
} catch (IOException e) {
throw new RuntimeException(String.format("Unable to parse %s", serializedOptions), e);
}
for (Map.Entry<String, String> entry : runtimeValues.entrySet()) {
options.put(entry.getKey(), entry.getValue());
}
try {
return PipelineOptionsFactory.MAPPER.writeValueAsString(root);
} catch (IOException e) {
throw new RuntimeException("Unable to parse re-serialize options", e);
}
}
|
@Test
public void testUpdateSerialize() throws Exception {
TestOptions submitOptions = PipelineOptionsFactory.as(TestOptions.class);
String serializedOptions = MAPPER.writeValueAsString(submitOptions);
String updatedOptions =
ValueProviders.updateSerializedOptions(serializedOptions, ImmutableMap.of("string", "bar"));
TestOptions runtime =
MAPPER.readValue(updatedOptions, PipelineOptions.class).as(TestOptions.class);
assertEquals("bar", runtime.getString());
}
|
@Override
public void childEntriesWillBecomeVisible(final Entry submenu) {
UserRole userRole = currentUserRole();
childEntriesWillBecomeVisible(submenu, userRole);
}
|
@Test
public void dontActivateSelectOnPopup_forNotCheckSelectionOnPopup() {
Entry menuEntry = new Entry();
Entry actionEntry = new Entry();
menuEntry.addChild(actionEntry);
final AFreeplaneAction someAction = Mockito.mock(AFreeplaneAction.class);
when(someAction.checkSelectionOnPopup()).thenReturn(false);
when(someAction.isEnabled()).thenReturn(true);
new EntryAccessor().setAction(actionEntry, someAction);
final ActionStatusUpdater actionSelectListener = new ActionStatusUpdater();
actionSelectListener.childEntriesWillBecomeVisible(menuEntry, UserRole.EDITOR);
verify(someAction, never()).setSelected();
}
|
@Override
@SuppressWarnings("unchecked")
public synchronized boolean addAll(Collection<? extends E> c) {
if (c instanceof BitList) {
rootSet.or(((BitList<? extends E>) c).rootSet);
if (((BitList<? extends E>) c).hasMoreElementInTailList()) {
for (E e : ((BitList<? extends E>) c).tailList) {
addToTailList(e);
}
}
return true;
}
return super.addAll(c);
}
|
@Test
void testAddAll() {
List<String> list = Arrays.asList("A", "B", "C");
BitList<String> bitList1 = new BitList<>(list);
BitList<String> bitList2 = new BitList<>(list);
bitList1.removeAll(list);
Assertions.assertEquals(0, bitList1.size());
bitList1.addAll(bitList2);
Assertions.assertEquals(3, bitList1.size());
Assertions.assertFalse(bitList1.hasMoreElementInTailList());
bitList1.addAll(bitList2);
Assertions.assertEquals(3, bitList1.size());
}
|
@Override
public boolean accept(final Path file, final Local local, final TransferStatus parent) throws BackgroundException {
if(super.accept(file, local, parent)) {
if(local.isFile()) {
if(parent.isExists()) {
if(find.find(file)) {
final PathAttributes attributes = attribute.find(file);
if(attributes.getSize() == local.attributes().getSize()) {
if(Checksum.NONE != attributes.getChecksum()) {
final ChecksumCompute compute = ChecksumComputeFactory.get(attributes.getChecksum().algorithm);
if(compute.compute(local.getInputStream(), parent).equals(attributes.getChecksum())) {
if(log.isInfoEnabled()) {
log.info(String.format("Skip file %s with checksum %s", file, attributes.getChecksum()));
}
return false;
}
log.warn(String.format("Checksum mismatch for %s and %s", file, local));
}
else {
if(log.isInfoEnabled()) {
log.info(String.format("Skip file %s with remote size %d", file, attributes.getSize()));
}
// No need to resume completed transfers
return false;
}
}
}
}
}
return true;
}
return false;
}
|
@Test
public void testAccept() throws Exception {
final ResumeFilter f = new ResumeFilter(new DisabledUploadSymlinkResolver(), new NullSession(new Host(new TestProtocol())));
assertTrue(f.accept(new Path("t", EnumSet.of(Path.Type.file)), new NullLocal("a") {
@Override
public boolean exists() {
return true;
}
@Override
public LocalAttributes attributes() {
return new LocalAttributes(this.getAbsolute()) {
@Override
public long getSize() {
return 1L;
}
};
}
}, new TransferStatus().exists(true)));
}
|
public static Path copyContent(Path src, Path target, CopyOption... options) throws IORuntimeException {
Assert.notNull(src, "Src path must be not null !");
Assert.notNull(target, "Target path must be not null !");
try {
Files.walkFileTree(src, new CopyVisitor(src, target, options));
} catch (IOException e) {
throw new IORuntimeException(e);
}
return target;
}
|
@Test
@Disabled
public void copyContentTest(){
PathUtil.copyContent(
Paths.get("d:/Red2_LYY"),
Paths.get("d:/test/aaa/")
);
}
|
@Override
public void execute(MigrationStatusListener listener) {
MigrationContainer migrationContainer = new MigrationContainerImpl(serverContainer, MigrationStepsExecutorImpl.class);
try {
MigrationStepsExecutor stepsExecutor = migrationContainer.getComponentByType(MigrationStepsExecutor.class);
Optional<Long> lastMigrationNumber = migrationHistory.getLastMigrationNumber();
List<RegisteredMigrationStep> steps = lastMigrationNumber
.map(i -> migrationSteps.readFrom(i + 1))
.orElse(migrationSteps.readAll());
listener.onMigrationsStart(steps.size());
stepsExecutor.execute(steps, listener);
} finally {
migrationContainer.cleanup();
}
}
|
@Test
void execute_execute_steps_from_last_migration_number_plus_1() {
when(migrationHistory.getLastMigrationNumber()).thenReturn(Optional.of(50L));
List<RegisteredMigrationStep> steps = singletonList(new RegisteredMigrationStep(1, "doo", TestMigrationStep.class));
when(migrationSteps.readFrom(51)).thenReturn(steps);
when(migrationSteps.readAll()).thenReturn(steps);
underTest.execute(new NoOpMigrationStatusListener());
verify(migrationSteps).readFrom(51);
assertThat(stepRegistry.stepRan).isTrue();
}
|
@Override
public void write(final PostgreSQLPacketPayload payload, final Object value) {
throw new UnsupportedSQLOperationException("PostgreSQLStringArrayBinaryProtocolValue.write()");
}
|
@Test
void assertWrite() {
assertThrows(UnsupportedSQLOperationException.class, () -> newInstance().write(new PostgreSQLPacketPayload(null, StandardCharsets.UTF_8), "val"));
}
|
public static Collection<SubquerySegment> getSubquerySegments(final SelectStatement selectStatement) {
List<SubquerySegment> result = new LinkedList<>();
extractSubquerySegments(result, selectStatement);
return result;
}
|
@Test
void assertGetSubquerySegmentsInProjection() {
ColumnSegment left = new ColumnSegment(41, 48, new IdentifierValue("order_id"));
ColumnSegment right = new ColumnSegment(52, 62, new IdentifierValue("order_id"));
SelectStatement subquerySelectStatement = mock(SelectStatement.class);
when(subquerySelectStatement.getWhere()).thenReturn(Optional.of(new WhereSegment(35, 62, new BinaryOperationExpression(41, 62, left, right, "=", "order_id = oi.order_id"))));
SubquerySegment subquerySegment = new SubquerySegment(7, 63, subquerySelectStatement, "");
SubqueryProjectionSegment subqueryProjectionSegment = new SubqueryProjectionSegment(subquerySegment, "(SELECT status FROM t_order WHERE order_id = oi.order_id)");
SelectStatement selectStatement = mock(SelectStatement.class);
ProjectionsSegment projections = new ProjectionsSegment(7, 79);
when(selectStatement.getProjections()).thenReturn(projections);
projections.getProjections().add(subqueryProjectionSegment);
Collection<SubquerySegment> actual = SubqueryExtractUtils.getSubquerySegments(selectStatement);
assertThat(actual.size(), is(1));
assertThat(actual.iterator().next(), is(subquerySegment));
}
|
public static void boundsCheck(int capacity, int index, int length) {
if (capacity < 0 || index < 0 || length < 0 || (index > (capacity - length))) {
throw new IndexOutOfBoundsException(String.format("index=%d, length=%d, capacity=%d", index, length, capacity));
}
}
|
@Test(expected = IndexOutOfBoundsException.class)
public void boundsCheck_whenMoreThanCapacity() {
ArrayUtils.boundsCheck(100, 0, 110);
}
|
public static String trim(final String str, final char ch) {
if (isEmpty(str)) {
return null;
}
final char[] chars = str.toCharArray();
int i = 0, j = chars.length - 1;
// noinspection StatementWithEmptyBody
for (; i < chars.length && chars[i] == ch; i++) {
}
// noinspection StatementWithEmptyBody
for (; j > 0 && chars[j] == ch; j--) {
}
return new String(chars, i, j - i + 1);
}
|
@Test
public void testTrim() {
assertEquals(StringUtil.trim("aaabcdefaaa", 'a'), "bcdef");
assertEquals(StringUtil.trim("bcdef", 'a'), "bcdef");
assertEquals(StringUtil.trim("abcdef", 'a'), "bcdef");
assertEquals(StringUtil.trim("abcdef", 'f'), "abcde");
}
|
@ApiOperation(value = "Create Or update Tenant Profile (saveTenantProfile)",
notes = "Create or update the Tenant Profile. When creating tenant profile, platform generates Tenant Profile Id as " + UUID_WIKI_LINK +
"The newly created Tenant Profile Id will be present in the response. " +
"Specify existing Tenant Profile Id id to update the Tenant Profile. " +
"Referencing non-existing Tenant Profile Id will cause 'Not Found' error. " +
"\n\nUpdate of the tenant profile configuration will cause immediate recalculation of API limits for all affected Tenants. " +
"\n\nThe **'profileData'** object is the part of Tenant Profile that defines API limits and Rate limits. " +
"\n\nYou have an ability to define maximum number of devices ('maxDevice'), assets ('maxAssets') and other entities. " +
"You may also define maximum number of messages to be processed per month ('maxTransportMessages', 'maxREExecutions', etc). " +
"The '*RateLimit' defines the rate limits using simple syntax. For example, '1000:1,20000:60' means up to 1000 events per second but no more than 20000 event per minute. " +
"Let's review the example of tenant profile data below: " +
"\n\n" + MARKDOWN_CODE_BLOCK_START +
"{\n" +
" \"name\": \"Your name\",\n" +
" \"description\": \"Your description\",\n" +
" \"isolatedTbRuleEngine\": false,\n" +
" \"profileData\": {\n" +
" \"configuration\": {\n" +
" \"type\": \"DEFAULT\",\n" +
" \"maxDevices\": 0,\n" +
" \"maxAssets\": 0,\n" +
" \"maxCustomers\": 0,\n" +
" \"maxUsers\": 0,\n" +
" \"maxDashboards\": 0,\n" +
" \"maxRuleChains\": 0,\n" +
" \"maxResourcesInBytes\": 0,\n" +
" \"maxOtaPackagesInBytes\": 0,\n" +
" \"maxResourceSize\": 0,\n" +
" \"transportTenantMsgRateLimit\": \"1000:1,20000:60\",\n" +
" \"transportTenantTelemetryMsgRateLimit\": \"1000:1,20000:60\",\n" +
" \"transportTenantTelemetryDataPointsRateLimit\": \"1000:1,20000:60\",\n" +
" \"transportDeviceMsgRateLimit\": \"20:1,600:60\",\n" +
" \"transportDeviceTelemetryMsgRateLimit\": \"20:1,600:60\",\n" +
" \"transportDeviceTelemetryDataPointsRateLimit\": \"20:1,600:60\",\n" +
" \"transportGatewayMsgRateLimit\": \"20:1,600:60\",\n" +
" \"transportGatewayTelemetryMsgRateLimit\": \"20:1,600:60\",\n" +
" \"transportGatewayTelemetryDataPointsRateLimit\": \"20:1,600:60\",\n" +
" \"transportGatewayDeviceMsgRateLimit\": \"20:1,600:60\",\n" +
" \"transportGatewayDeviceTelemetryMsgRateLimit\": \"20:1,600:60\",\n" +
" \"transportGatewayDeviceTelemetryDataPointsRateLimit\": \"20:1,600:60\",\n" +
" \"maxTransportMessages\": 10000000,\n" +
" \"maxTransportDataPoints\": 10000000,\n" +
" \"maxREExecutions\": 4000000,\n" +
" \"maxJSExecutions\": 5000000,\n" +
" \"maxDPStorageDays\": 0,\n" +
" \"maxRuleNodeExecutionsPerMessage\": 50,\n" +
" \"maxEmails\": 0,\n" +
" \"maxSms\": 0,\n" +
" \"maxCreatedAlarms\": 1000,\n" +
" \"defaultStorageTtlDays\": 0,\n" +
" \"alarmsTtlDays\": 0,\n" +
" \"rpcTtlDays\": 0,\n" +
" \"queueStatsTtlDays\": 0,\n" +
" \"ruleEngineExceptionsTtlDays\": 0,\n" +
" \"warnThreshold\": 0\n" +
" }\n" +
" },\n" +
" \"default\": false\n" +
"}" +
MARKDOWN_CODE_BLOCK_END +
"Remove 'id', from the request body example (below) to create new Tenant Profile entity." +
SYSTEM_AUTHORITY_PARAGRAPH)
@PreAuthorize("hasAuthority('SYS_ADMIN')")
@RequestMapping(value = "/tenantProfile", method = RequestMethod.POST)
@ResponseBody
public TenantProfile saveTenantProfile(@Parameter(description = "A JSON value representing the tenant profile.")
@RequestBody TenantProfile tenantProfile) throws ThingsboardException {
TenantProfile oldProfile;
if (tenantProfile.getId() == null) {
accessControlService.checkPermission(getCurrentUser(), Resource.TENANT_PROFILE, Operation.CREATE);
oldProfile = null;
} else {
oldProfile = checkTenantProfileId(tenantProfile.getId(), Operation.WRITE);
}
return tbTenantProfileService.save(getTenantId(), tenantProfile, oldProfile);
}
|
@Test
public void testSaveTenantProfile() throws Exception {
loginSysAdmin();
Mockito.reset(tbClusterService);
TenantProfile tenantProfile = this.createTenantProfile("Tenant Profile");
TenantProfile savedTenantProfile = doPost("/api/tenantProfile", tenantProfile, TenantProfile.class);
Assert.assertNotNull(savedTenantProfile);
Assert.assertNotNull(savedTenantProfile.getId());
Assert.assertTrue(savedTenantProfile.getCreatedTime() > 0);
Assert.assertEquals(tenantProfile.getName(), savedTenantProfile.getName());
Assert.assertEquals(tenantProfile.getDescription(), savedTenantProfile.getDescription());
Assert.assertEquals(tenantProfile.getProfileData(), savedTenantProfile.getProfileData());
Assert.assertEquals(tenantProfile.isDefault(), savedTenantProfile.isDefault());
Assert.assertEquals(tenantProfile.isIsolatedTbRuleEngine(), savedTenantProfile.isIsolatedTbRuleEngine());
testBroadcastEntityStateChangeEventTimeManyTimeTenantProfile(savedTenantProfile, ComponentLifecycleEvent.CREATED, 1);
savedTenantProfile.setName("New tenant profile");
doPost("/api/tenantProfile", savedTenantProfile, TenantProfile.class);
TenantProfile foundTenantProfile = doGet("/api/tenantProfile/" + savedTenantProfile.getId().getId().toString(), TenantProfile.class);
Assert.assertEquals(foundTenantProfile.getName(), savedTenantProfile.getName());
testBroadcastEntityStateChangeEventTimeManyTimeTenantProfile(savedTenantProfile, ComponentLifecycleEvent.UPDATED, 1);
}
|
@Override
public void unsubscribe() {
acquireAndEnsureOpen();
try {
fetchBuffer.retainAll(Collections.emptySet());
Timer timer = time.timer(Long.MAX_VALUE);
UnsubscribeEvent unsubscribeEvent = new UnsubscribeEvent(calculateDeadlineMs(timer));
applicationEventHandler.add(unsubscribeEvent);
log.info("Unsubscribing all topics or patterns and assigned partitions {}",
subscriptions.assignedPartitions());
try {
processBackgroundEvents(unsubscribeEvent.future(), timer);
log.info("Unsubscribed all topics or patterns and assigned partitions");
} catch (TimeoutException e) {
log.error("Failed while waiting for the unsubscribe event to complete");
}
resetGroupMetadata();
} catch (Exception e) {
log.error("Unsubscribe failed", e);
throw e;
} finally {
release();
}
}
|
@Test
void testReaperInvokedInUnsubscribe() {
consumer = newConsumer();
completeUnsubscribeApplicationEventSuccessfully();
consumer.unsubscribe();
verify(backgroundEventReaper).reap(time.milliseconds());
}
|
@Override
public int choosePartition(Message<?> msg, TopicMetadata metadata) {
// If the message has a key, it supersedes the single partition routing policy
if (msg.hasKey()) {
return signSafeMod(hash.makeHash(msg.getKey()), metadata.numPartitions());
}
return partitionIndex;
}
|
@Test
public void testChoosePartitionWithoutKey() {
Message<?> msg = mock(Message.class);
when(msg.getKey()).thenReturn(null);
SinglePartitionMessageRouterImpl router = new SinglePartitionMessageRouterImpl(1234, HashingScheme.JavaStringHash);
assertEquals(1234, router.choosePartition(msg, new TopicMetadataImpl(2468)));
}
|
public static String fromParts(String... parts) {
return fromPartsAndSeparator(ID_SEPARATOR, parts);
}
|
@Test
void fromParts() {
String id = IdUtils.fromParts("namespace", "flow");
assertThat(id, notNullValue());
assertThat(id, is("namespace_flow"));
String idWithNull = IdUtils.fromParts(null, "namespace", "flow");
assertThat(idWithNull, notNullValue());
assertThat(idWithNull, is("namespace_flow"));
}
|
public static long parseLong(String number) {
if (StrUtil.isBlank(number)) {
return 0L;
}
if (number.startsWith("0x")) {
// 0x04表示16进制数
return Long.parseLong(number.substring(2), 16);
}
try {
return Long.parseLong(number);
} catch (NumberFormatException e) {
return parseNumber(number).longValue();
}
}
|
@Test
public void parseLongTest2() {
// -------------------------- Parse failed -----------------------
final Long v1 = NumberUtil.parseLong(null, null);
assertNull(v1);
final Long v2 = NumberUtil.parseLong(StrUtil.EMPTY, null);
assertNull(v2);
final Long v3 = NumberUtil.parseLong("L3221", 1233L);
assertEquals(1233L, v3);
// -------------------------- Parse success -----------------------
final Long v4 = NumberUtil.parseLong("1233L", null);
assertEquals(1233L, v4);
}
|
@Override
public KTable<K, VOut> aggregate(final Initializer<VOut> initializer,
final Materialized<K, VOut, KeyValueStore<Bytes, byte[]>> materialized) {
return aggregate(initializer, NamedInternal.empty(), materialized);
}
|
@Test
public void shouldNotHaveNullNamedOnAggregateWithMateriazlied() {
assertThrows(NullPointerException.class, () -> cogroupedStream.aggregate(STRING_INITIALIZER, null, Materialized.as("store")));
}
|
public static ParsedCommand parse(
// CHECKSTYLE_RULES.ON: CyclomaticComplexity
final String sql, final Map<String, String> variables) {
validateSupportedStatementType(sql);
final String substituted;
try {
substituted = VariableSubstitutor.substitute(KSQL_PARSER.parse(sql).get(0), variables);
} catch (ParseFailedException e) {
throw new MigrationException(String.format(
"Failed to parse the statement. Statement: %s. Reason: %s",
sql, e.getMessage()));
}
final SqlBaseParser.SingleStatementContext statementContext = KSQL_PARSER.parse(substituted)
.get(0).getStatement();
final boolean isStatement = StatementType.get(statementContext.statement().getClass())
== StatementType.STATEMENT;
return new ParsedCommand(substituted,
isStatement ? Optional.empty() : Optional.of(new AstBuilder(TypeRegistry.EMPTY)
.buildStatement(statementContext)));
}
|
@Test
public void shouldParseTerminateStatement() {
// When:
List<CommandParser.ParsedCommand> commands = parse("terminate some_query_id;");
// Then:
assertThat(commands.size(), is(1));
assertThat(commands.get(0).getStatement().isPresent(), is (false));
assertThat(commands.get(0).getCommand(), is("terminate some_query_id;"));
}
|
public static int indexOfNonWhiteSpace(CharSequence seq, int offset) {
for (; offset < seq.length(); ++offset) {
if (!Character.isWhitespace(seq.charAt(offset))) {
return offset;
}
}
return -1;
}
|
@Test
public void testIndexOfNonWhiteSpace() {
assertEquals(-1, indexOfNonWhiteSpace("", 0));
assertEquals(-1, indexOfNonWhiteSpace(" ", 0));
assertEquals(-1, indexOfNonWhiteSpace(" \t", 0));
assertEquals(-1, indexOfNonWhiteSpace(" \t\r\n", 0));
assertEquals(2, indexOfNonWhiteSpace(" \tfoo\r\n", 0));
assertEquals(2, indexOfNonWhiteSpace(" \tfoo\r\n", 1));
assertEquals(4, indexOfNonWhiteSpace(" \tfoo\r\n", 4));
assertEquals(-1, indexOfNonWhiteSpace(" \tfoo\r\n", 10));
assertEquals(-1, indexOfNonWhiteSpace(" \tfoo\r\n", Integer.MAX_VALUE));
}
|
@SuppressWarnings("unchecked")
public E lookup(final int key)
{
@DoNotSub int size = this.size;
final int[] keys = this.keys;
final Object[] values = this.values;
for (@DoNotSub int i = 0; i < size; i++)
{
if (key == keys[i])
{
final E value = (E)values[i];
makeMostRecent(key, value, i);
return value;
}
}
final E value = factory.apply(key);
if (value != null)
{
if (capacity == size)
{
closer.accept((E)values[size - 1]);
}
else
{
size++;
this.size = size;
}
makeMostRecent(key, value, size - 1);
}
return value;
}
|
@Test
void shouldSupportKeyOfZero()
{
final AutoCloseable actual = cache.lookup(0);
assertSame(lastValue, actual);
assertNotNull(lastValue);
}
|
@Override
public String getRmAppPageUrlBase(ApplicationId appId)
throws IOException, YarnException {
SubClusterId scid = federationFacade.getApplicationHomeSubCluster(appId);
createSubclusterIfAbsent(scid);
SubClusterInfo subClusterInfo = subClusters.get(scid).getLeft();
String scheme = WebAppUtils.getHttpSchemePrefix(getConf());
return StringHelper.pjoin(scheme + subClusterInfo.getRMWebServiceAddress(), "cluster", "app");
}
|
@Test
public void testGetRmAppPageUrlBase() throws IOException, YarnException {
testHelper(true);
String scheme = WebAppUtils.getHttpSchemePrefix(conf);
Assert.assertEquals(fetcher.getRmAppPageUrlBase(appId1),
StringHelper.pjoin(scheme + clusterInfo1.getRMWebServiceAddress(), "cluster", "app"));
Assert.assertEquals(fetcher.getRmAppPageUrlBase(appId2),
StringHelper.pjoin(scheme + clusterInfo2.getRMWebServiceAddress(), "cluster", "app"));
}
|
static PiPreEntry translate(Group group, PiPipeconf pipeconf, Device device)
throws PiTranslationException {
checkNotNull(group);
final List<OutputInstruction> outInstructions = Lists.newArrayList();
int truncateMaxLen = PiCloneSessionEntry.DO_NOT_TRUNCATE;
for (GroupBucket bucket : group.buckets().buckets()) {
int numInstructionsInBucket = bucket.treatment().allInstructions().size();
List<OutputInstruction> outputs =
getInstructions(bucket, Instruction.Type.OUTPUT, OutputInstruction.class);
List<TruncateInstruction> truncates =
getInstructions(bucket, Instruction.Type.TRUNCATE, TruncateInstruction.class);
if (outputs.size() != 1) {
throw new PiTranslationException(
"support only groups with just one OUTPUT instruction per bucket");
}
outInstructions.add(outputs.get(0));
if (truncates.size() != 0) {
if (group.type() != GroupDescription.Type.CLONE) {
throw new PiTranslationException("only CLONE group support truncate instruction");
}
if (truncates.size() != 1) {
throw new PiTranslationException(
"support only groups with just one TRUNCATE instruction per bucket");
}
int truncateInstMaxLen = truncates.get(0).maxLen();
if (truncateMaxLen != PiCloneSessionEntry.DO_NOT_TRUNCATE &&
truncateMaxLen != truncateInstMaxLen) {
throw new PiTranslationException("all TRUNCATE instruction must be the same in a CLONE group");
}
truncateMaxLen = truncateInstMaxLen;
} else if (truncateMaxLen != PiCloneSessionEntry.DO_NOT_TRUNCATE) {
// No truncate instruction found in this bucket, but previous bucket contains one.
throw new PiTranslationException("all TRUNCATE instruction must be the same in a CLONE group");
}
if (numInstructionsInBucket != outputs.size() + truncates.size()) {
throw new PiTranslationException("bucket contains unsupported instruction(s)");
}
}
switch (group.type()) {
case ALL:
return PiMulticastGroupEntry.builder()
.withGroupId(group.id().id())
.addReplicas(getReplicas(outInstructions, device))
.build();
case CLONE:
return PiCloneSessionEntry.builder()
.withSessionId(group.id().id())
.addReplicas(getReplicas(outInstructions, device))
.withMaxPacketLengthBytes(truncateMaxLen)
.build();
default:
throw new PiTranslationException(format(
"group type %s not supported", group.type()));
}
}
|
@Test
public void testInvalidPreGroups() {
try {
PiReplicationGroupTranslatorImpl
.translate(INVALID_ALL_GROUP, null, null);
Assert.fail("Did not get expected exception.");
} catch (PiTranslationException ex) {
Assert.assertEquals("support only groups with just one OUTPUT instruction per bucket", ex.getMessage());
}
try {
PiReplicationGroupTranslatorImpl
.translate(INVALID_ALL_GROUP_2, null, null);
Assert.fail("Did not get expected exception.");
} catch (PiTranslationException ex) {
Assert.assertEquals("only CLONE group support truncate instruction", ex.getMessage());
}
try {
PiReplicationGroupTranslatorImpl
.translate(INVALID_ALL_GROUP_3, null, null);
Assert.fail("Did not get expected exception.");
} catch (PiTranslationException ex) {
Assert.assertEquals("bucket contains unsupported instruction(s)", ex.getMessage());
}
try {
PiReplicationGroupTranslatorImpl
.translate(INVALID_CLONE_GROUP, null, null);
Assert.fail("Did not get expected exception.");
} catch (PiTranslationException ex) {
Assert.assertEquals("support only groups with just one OUTPUT instruction per bucket", ex.getMessage());
}
try {
PiReplicationGroupTranslatorImpl
.translate(INVALID_CLONE_GROUP_2, null, null);
Assert.fail("Did not get expected exception.");
} catch (PiTranslationException ex) {
Assert.assertEquals("all TRUNCATE instruction must be the same in a CLONE group", ex.getMessage());
}
try {
PiReplicationGroupTranslatorImpl
.translate(INVALID_CLONE_GROUP_3, null, null);
Assert.fail("Did not get expected exception.");
} catch (PiTranslationException ex) {
Assert.assertEquals("support only groups with just one TRUNCATE instruction per bucket", ex.getMessage());
}
try {
PiReplicationGroupTranslatorImpl
.translate(INVALID_CLONE_GROUP_4, null, null);
Assert.fail("Did not get expected exception.");
} catch (PiTranslationException ex) {
Assert.assertEquals("all TRUNCATE instruction must be the same in a CLONE group", ex.getMessage());
}
}
|
public boolean isPresent(final UUID accountUuid, final byte deviceId) {
return checkPresenceTimer.record(() ->
presenceCluster.withCluster(connection ->
connection.sync().exists(getPresenceKey(accountUuid, deviceId))) == 1);
}
|
@Test
void testIsPresent() {
final UUID accountUuid = UUID.randomUUID();
final byte deviceId = 1;
assertFalse(clientPresenceManager.isPresent(accountUuid, deviceId));
clientPresenceManager.setPresent(accountUuid, deviceId, NO_OP);
assertTrue(clientPresenceManager.isPresent(accountUuid, deviceId));
}
|
public FloatArrayAsIterable usingExactEquality() {
return new FloatArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject());
}
|
@Test
public void usingExactEquality_containsNoneOf_primitiveFloatArray_failure() {
expectFailureWhenTestingThat(array(1.1f, 2.2f, 3.3f))
.usingExactEquality()
.containsNoneOf(array(99.99f, 2.2f));
assertFailureKeys(
"value of",
"expected not to contain any of",
"testing whether",
"but contained",
"corresponding to",
"---",
"full contents");
assertFailureValue("expected not to contain any of", "[" + 99.99f + ", " + 2.2f + "]");
assertFailureValue("but contained", "[" + 2.2f + "]");
assertFailureValue("corresponding to", Float.toString(2.2f));
}
|
public static boolean hasDirectChild(Element parent, String namespace, String tag) {
NodeList children = parent.getElementsByTagNameNS(namespace, tag);
for (int i = 0; i < children.getLength(); i++) {
Node child = children.item(i);
if (child.getNodeType() == Node.ELEMENT_NODE && child.getParentNode() == parent) {
return true;
}
}
return false;
}
|
@Test
void hasDirectChild() {
assertTrue(XmlUtil.hasDirectChild(parent, "http://example.com", "child"));
assertFalse(XmlUtil.hasDirectChild(parent, "http://example.com", "nonExistentChild"));
}
|
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return PathAttributes.EMPTY;
}
try {
return new MantaObjectAttributeAdapter(session)
.toAttributes(session.getClient().head(file.getAbsolute()));
}
catch(MantaException e) {
throw new MantaExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
catch(MantaClientHttpResponseException e) {
throw new MantaHttpExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
}
|
@Test
public void testFindFile() throws Exception {
final Path file = randomFile();
new MantaTouchFeature(session).touch(file, new TransferStatus().withMime("x-application/cyberduck"));
final PathAttributes attributes = new MantaAttributesFinderFeature(session).find(file);
assertNotNull(attributes);
assertEquals(-1L, attributes.getCreationDate());
assertNotEquals(-1L, attributes.getModificationDate());
assertNotNull(attributes.getETag());
new MantaDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public static String truncateMessageLineLength(Object message) {
return truncateMessageLineLength(message, MAX_TRUNCATED_LENGTH);
}
|
@Test
public void truncateSpecificLength() throws Exception {
String s = CommonUtils.randomAlphaNumString(LogUtils.MAX_TRUNCATED_LENGTH);
for (int length = 1; length < LogUtils.MAX_TRUNCATED_LENGTH; length++) {
String truncated = LogUtils.truncateMessageLineLength(s, length);
assertTrue(truncated.startsWith(s.substring(0, length) + " ..."));
}
}
|
public <T> void postForm(String url, Header header, Query query, Map<String, String> bodyValues, Type responseType,
Callback<T> callback) {
execute(url, HttpMethod.POST,
new RequestHttpEntity(header.setContentType(MediaType.APPLICATION_FORM_URLENCODED), query, bodyValues),
responseType, callback);
}
|
@Test
void testPostForm() throws Exception {
Header header = Header.newInstance().setContentType(MediaType.APPLICATION_XML);
restTemplate.postForm(TEST_URL, header, new HashMap<>(), String.class, mockCallback);
verify(requestClient).execute(any(), eq("POST"), any(), any(), eq(mockCallback));
assertEquals(MediaType.APPLICATION_FORM_URLENCODED, header.getValue(HttpHeaderConsts.CONTENT_TYPE));
}
|
public static String toJson(final Object obj, boolean prettyFormat) {
return JSON.toJSONString(obj, prettyFormat);
}
|
@Test
public void testToJson_prettyString() {
RemotingSerializable serializable = new RemotingSerializable() {
private List<String> stringList = Arrays.asList("a", "o", "e", "i", "u", "v");
public List<String> getStringList() {
return stringList;
}
public void setStringList(List<String> stringList) {
this.stringList = stringList;
}
};
String prettyString = serializable.toJson(true);
assertThat(prettyString).isEqualTo("{\n" +
"\t\"stringList\":[\n" +
"\t\t\"a\",\n" +
"\t\t\"o\",\n" +
"\t\t\"e\",\n" +
"\t\t\"i\",\n" +
"\t\t\"u\",\n" +
"\t\t\"v\"\n" +
"\t]\n" +
"}");
}
|
@Override
public UpdateNodeResourceResponse updateNodeResource(UpdateNodeResourceRequest request)
throws YarnException, IOException {
// parameter verification.
if (request == null) {
routerMetrics.incrUpdateNodeResourceFailedRetrieved();
RouterServerUtil.logAndThrowException("Missing UpdateNodeResource request.", null);
}
String subClusterId = request.getSubClusterId();
if (StringUtils.isBlank(subClusterId)) {
routerMetrics.incrUpdateNodeResourceFailedRetrieved();
RouterServerUtil.logAndThrowException("Missing UpdateNodeResource SubClusterId.", null);
}
try {
long startTime = clock.getTime();
RMAdminProtocolMethod remoteMethod = new RMAdminProtocolMethod(
new Class[]{UpdateNodeResourceRequest.class}, new Object[]{request});
Collection<UpdateNodeResourceResponse> updateNodeResourceResps =
remoteMethod.invokeConcurrent(this, UpdateNodeResourceResponse.class, subClusterId);
if (CollectionUtils.isNotEmpty(updateNodeResourceResps)) {
long stopTime = clock.getTime();
routerMetrics.succeededUpdateNodeResourceRetrieved(stopTime - startTime);
return UpdateNodeResourceResponse.newInstance();
}
} catch (YarnException e) {
routerMetrics.incrUpdateNodeResourceFailedRetrieved();
RouterServerUtil.logAndThrowException(e,
"Unable to updateNodeResource due to exception. " + e.getMessage());
}
routerMetrics.incrUpdateNodeResourceFailedRetrieved();
throw new YarnException("Unable to updateNodeResource.");
}
|
@Test
public void testUpdateNodeResourceEmptyRequest() throws Exception {
// null request1.
LambdaTestUtils.intercept(YarnException.class, "Missing UpdateNodeResource request.",
() -> interceptor.updateNodeResource(null));
// null request2.
Map<NodeId, ResourceOption> nodeResourceMap = new HashMap<>();
UpdateNodeResourceRequest request = UpdateNodeResourceRequest.newInstance(nodeResourceMap);
LambdaTestUtils.intercept(YarnException.class, "Missing UpdateNodeResource SubClusterId.",
() -> interceptor.updateNodeResource(request));
}
|
public boolean downloadIfNecessary(final DownloadableFile downloadableFile) {
boolean updated = false;
boolean downloaded = false;
while (!updated) try {
fetchUpdateCheckHeaders(downloadableFile);
if (downloadableFile.doesNotExist() || !downloadableFile.isChecksumEquals(getMd5())) {
PerfTimer timer = PerfTimer.start("Downloading new " + downloadableFile + " with md5 signature: " + md5);
downloaded = download(downloadableFile);
timer.stop();
}
updated = true;
} catch (Exception e) {
try {
int period = Integer.parseInt(System.getProperty("sleep.for.download", DEFAULT_FAILED_DOWNLOAD_SLEEP_MS));
LOG.error("Couldn't update {}. Sleeping for {}s. Error: ", downloadableFile, TimeUnit.SECONDS.convert(period, TimeUnit.MILLISECONDS), e);
Thread.sleep(period);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
return downloaded;
}
|
@Test
public void shouldReturnTrueIfTheFileIsDownloaded() {
ServerBinaryDownloader downloader = new ServerBinaryDownloader(new GoAgentServerHttpClientBuilder(null, SslVerificationMode.NONE, null, null, null), ServerUrlGeneratorMother.generatorFor("localhost", server.getPort()));
assertThat(downloader.downloadIfNecessary(DownloadableFile.AGENT), is(true));
}
|
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return PathAttributes.EMPTY;
}
final Region region = regionService.lookup(file);
try {
if(containerService.isContainer(file)) {
final ContainerInfo info = session.getClient().getContainerInfo(region,
containerService.getContainer(file).getName());
final PathAttributes attributes = new PathAttributes();
attributes.setSize(info.getTotalSize());
attributes.setRegion(info.getRegion().getRegionId());
return attributes;
}
final ObjectMetadata metadata;
try {
try {
metadata = session.getClient().getObjectMetaData(region,
containerService.getContainer(file).getName(), containerService.getKey(file));
}
catch(GenericException e) {
throw new SwiftExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
}
catch(NotfoundException e) {
if(file.isDirectory()) {
// Directory placeholder file may be missing. Still return empty attributes when we find children
try {
new SwiftObjectListService(session).list(file, new CancellingListProgressListener());
}
catch(ListCanceledException l) {
// Found common prefix
return PathAttributes.EMPTY;
}
catch(NotfoundException n) {
throw e;
}
// Common prefix only
return PathAttributes.EMPTY;
}
// Try to find pending large file upload
final Write.Append append = new SwiftLargeObjectUploadFeature(session, regionService, new SwiftWriteFeature(session, regionService)).append(file, new TransferStatus());
if(append.append) {
return new PathAttributes().withSize(append.offset);
}
throw e;
}
if(file.isDirectory()) {
if(!StringUtils.equals(SwiftDirectoryFeature.DIRECTORY_MIME_TYPE, metadata.getMimeType())) {
throw new NotfoundException(String.format("File %s has set MIME type %s but expected %s",
file.getAbsolute(), metadata.getMimeType(), SwiftDirectoryFeature.DIRECTORY_MIME_TYPE));
}
}
if(file.isFile()) {
if(StringUtils.equals(SwiftDirectoryFeature.DIRECTORY_MIME_TYPE, metadata.getMimeType())) {
throw new NotfoundException(String.format("File %s has set MIME type %s",
file.getAbsolute(), metadata.getMimeType()));
}
}
return this.toAttributes(metadata);
}
catch(GenericException e) {
throw new SwiftExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
}
|
@Test
public void testFindRoot() throws Exception {
final SwiftAttributesFinderFeature f = new SwiftAttributesFinderFeature(session);
assertEquals(PathAttributes.EMPTY, f.find(new Path("/", EnumSet.of(Path.Type.directory))));
}
|
@Override
public PageResult<DiyTemplateDO> getDiyTemplatePage(DiyTemplatePageReqVO pageReqVO) {
return diyTemplateMapper.selectPage(pageReqVO);
}
|
@Test
@Disabled // TODO 请修改 null 为需要的值,然后删除 @Disabled 注解
public void testGetDiyTemplatePage() {
// mock 数据
DiyTemplateDO dbDiyTemplate = randomPojo(DiyTemplateDO.class, o -> { // 等会查询到
o.setName(null);
o.setUsed(null);
o.setUsedTime(null);
o.setRemark(null);
o.setPreviewPicUrls(null);
o.setProperty(null);
o.setCreateTime(null);
});
diyTemplateMapper.insert(dbDiyTemplate);
// 测试 name 不匹配
diyTemplateMapper.insert(cloneIgnoreId(dbDiyTemplate, o -> o.setName(null)));
// 测试 used 不匹配
diyTemplateMapper.insert(cloneIgnoreId(dbDiyTemplate, o -> o.setUsed(null)));
// 测试 usedTime 不匹配
diyTemplateMapper.insert(cloneIgnoreId(dbDiyTemplate, o -> o.setUsedTime(null)));
// 测试 remark 不匹配
diyTemplateMapper.insert(cloneIgnoreId(dbDiyTemplate, o -> o.setRemark(null)));
// 测试 previewPicUrls 不匹配
diyTemplateMapper.insert(cloneIgnoreId(dbDiyTemplate, o -> o.setPreviewPicUrls(null)));
// 测试 property 不匹配
diyTemplateMapper.insert(cloneIgnoreId(dbDiyTemplate, o -> o.setProperty(null)));
// 测试 createTime 不匹配
diyTemplateMapper.insert(cloneIgnoreId(dbDiyTemplate, o -> o.setCreateTime(null)));
// 准备参数
DiyTemplatePageReqVO reqVO = new DiyTemplatePageReqVO();
reqVO.setName(null);
reqVO.setUsed(null);
reqVO.setUsedTime(buildBetweenTime(2023, 2, 1, 2023, 2, 28));
reqVO.setCreateTime(buildBetweenTime(2023, 2, 1, 2023, 2, 28));
// 调用
PageResult<DiyTemplateDO> pageResult = diyTemplateService.getDiyTemplatePage(reqVO);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbDiyTemplate, pageResult.getList().get(0));
}
|
@Override
public Map<String, String> properties() {
return Collections.unmodifiableMap(systemPropertiesConfigParser.parse(systemPropertiesProvider.get()));
}
|
@Test
public void testNonStringSystemPropertyValue() {
String key = getClass().getSimpleName();
systemProperties.put(key, true);
assertNull(provider.properties().get(key));
}
|
public void updateInstanceMetadata(Service service, String metadataId, InstanceMetadata instanceMetadata) {
instanceMetadataMap.computeIfAbsent(service, k -> new ConcurrentHashMap<>(INITIAL_CAPACITY)).put(metadataId, instanceMetadata);
}
|
@Test
void testUpdateInstanceMetadata() throws NoSuchFieldException, IllegalAccessException {
InstanceMetadata instanceMetadata = new InstanceMetadata();
Class<InstanceMetadata> instanceMetadataClass = InstanceMetadata.class;
Field enabled = instanceMetadataClass.getDeclaredField("enabled");
enabled.setAccessible(true);
enabled.set(instanceMetadata, false);
namingMetadataManager.updateInstanceMetadata(service, METADATA_ID, instanceMetadata);
Optional<InstanceMetadata> optional = namingMetadataManager.getInstanceMetadata(service, METADATA_ID);
assertTrue(optional.isPresent());
assertNotNull(optional.get());
assertFalse(optional.get().isEnabled());
}
|
@Override
public boolean equals(Object other) {
return other instanceof SourceAndTarget && toString().equals(other.toString());
}
|
@Test
public void testEquals() {
SourceAndTarget sourceAndTarget = new SourceAndTarget("source", "target");
SourceAndTarget sourceAndTarget2 = new SourceAndTarget("source", "target");
SourceAndTarget sourceAndTarget3 = new SourceAndTarget("error-source", "target");
assertEquals(sourceAndTarget, sourceAndTarget2);
assertNotEquals(sourceAndTarget, sourceAndTarget3);
class FakeSourceAndTarget {
private final String source;
private final String target;
public FakeSourceAndTarget(String source, String target) {
this.source = source;
this.target = target;
}
@Override
public String toString() {
return source + "->" + target;
}
}
FakeSourceAndTarget fakeSourceAndTarget = new FakeSourceAndTarget("source", "target");
assertNotEquals(sourceAndTarget, fakeSourceAndTarget);
}
|
@Override
public void setPreparedStatementValue( DatabaseMeta databaseMeta, PreparedStatement preparedStatement, int index,
Object data ) throws KettleDatabaseException {
try {
if ( data != null ) {
preparedStatement.setTimestamp( index, getTimestamp( data ) );
} else {
preparedStatement.setNull( index, java.sql.Types.TIMESTAMP );
}
} catch ( Exception e ) {
throw new KettleDatabaseException( toStringMeta() + " : Unable to set value on prepared statement on index "
+ index, e );
}
}
|
@Test
public void testSetPreparedStatementValue() throws Exception {
ValueMetaTimestamp vm = new ValueMetaTimestamp();
PreparedStatement ps = mock( PreparedStatement.class );
doAnswer( (Answer<Object>) invocationOnMock -> {
Object ts = invocationOnMock.getArguments()[ 1 ];
return ts.toString();
} ).when( ps ).setTimestamp( anyInt(), any( Timestamp.class ) );
try {
vm.setPreparedStatementValue( mock( DatabaseMeta.class ), ps, 0, null );
} catch ( KettleDatabaseException ex ) {
fail( "Check PDI-11547" );
}
}
|
public static SubscriptionData build(final String topic, final String subString,
final String type) throws Exception {
if (ExpressionType.TAG.equals(type) || type == null) {
return buildSubscriptionData(topic, subString);
}
if (StringUtils.isEmpty(subString)) {
throw new IllegalArgumentException("Expression can't be null! " + type);
}
SubscriptionData subscriptionData = new SubscriptionData();
subscriptionData.setTopic(topic);
subscriptionData.setSubString(subString);
subscriptionData.setExpressionType(type);
return subscriptionData;
}
|
@Test
public void testBuildSQL() {
try {
SubscriptionData subscriptionData = FilterAPI.build(
"TOPIC", "a is not null", ExpressionType.SQL92
);
assertThat(subscriptionData).isNotNull();
assertThat(subscriptionData.getTopic()).isEqualTo("TOPIC");
assertThat(subscriptionData.getExpressionType()).isEqualTo(ExpressionType.SQL92);
} catch (Exception e) {
e.printStackTrace();
assertThat(Boolean.FALSE).isTrue();
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.