focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public PMML_MODEL getPMMLModelType(){
return PMML_MODEL.TREE_MODEL;
}
|
@Test
void getPMMLModelType() {
assertThat(evaluator.getPMMLModelType()).isEqualTo(PMML_MODEL.TREE_MODEL);
}
|
@VisibleForTesting
static Map<String, Object> serializableHeaders(Map<String, Object> headers) {
Map<String, Object> returned = new HashMap<>();
if (headers != null) {
for (Map.Entry<String, Object> h : headers.entrySet()) {
Object value = h.getValue();
if (value instanceof List<?>) {
// Transformation for List type headers
value =
((List<?>) value)
.stream().map(RabbitMqMessage::getTransformedValue).collect(Collectors.toList());
} else if (!(value instanceof Serializable)) {
value = getTransformedValue(value);
}
returned.put(h.getKey(), value);
}
}
return returned;
}
|
@Test
public void testSerializableHeadersWithLongStringValues() {
Map<String, Object> rawHeaders = new HashMap<>();
String key1 = "key1", key2 = "key2", value1 = "value1", value2 = "value2";
rawHeaders.put(key1, LongStringHelper.asLongString(value1));
rawHeaders.put(key2, LongStringHelper.asLongString(value2.getBytes(StandardCharsets.UTF_8)));
Map<String, Object> serializedHeaders = RabbitMqMessage.serializableHeaders(rawHeaders);
assertEquals(value1, serializedHeaders.get(key1));
assertEquals(value2, serializedHeaders.get(key2));
}
|
public double calculateMinPercentageUsedBy(NormalizedResources used, double totalMemoryMb, double usedMemoryMb) {
if (LOG.isTraceEnabled()) {
LOG.trace("Calculating min percentage used by. Used Mem: {} Total Mem: {}"
+ " Used Normalized Resources: {} Total Normalized Resources: {}", totalMemoryMb, usedMemoryMb,
toNormalizedMap(), used.toNormalizedMap());
}
double min = 1.0;
if (usedMemoryMb > totalMemoryMb) {
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb);
}
if (totalMemoryMb != 0.0) {
min = Math.min(min, usedMemoryMb / totalMemoryMb);
}
double totalCpu = getTotalCpu();
if (used.getTotalCpu() > totalCpu) {
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb);
}
if (totalCpu != 0.0) {
min = Math.min(min, used.getTotalCpu() / totalCpu);
}
if (used.otherResources.length > otherResources.length) {
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb);
}
for (int i = 0; i < otherResources.length; i++) {
if (otherResources[i] == 0.0) {
//Skip any resources where the total is 0, the percent used for this resource isn't meaningful.
//We fall back to prioritizing by cpu, memory and any other resources by ignoring this value
continue;
}
if (i >= used.otherResources.length) {
//Resources missing from used are using none of that resource
return 0;
}
if (used.otherResources[i] > otherResources[i]) {
String info = String.format("%s, %f > %f", getResourceNameForResourceIndex(i), used.otherResources[i], otherResources[i]);
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb, info);
}
min = Math.min(min, used.otherResources[i] / otherResources[i]);
}
return min * 100.0;
}
|
@Test
public void testCalculateMinWithTooLittleResourceInTotal() {
Map<String, Double> allResourcesMap = new HashMap<>();
allResourcesMap.put(Constants.COMMON_CPU_RESOURCE_NAME, 2.0);
allResourcesMap.put(gpuResourceName, 1.0);
NormalizedResources resources = new NormalizedResources(normalize(allResourcesMap));
Map<String, Double> usedResourcesMap = new HashMap<>();
usedResourcesMap.put(Constants.COMMON_CPU_RESOURCE_NAME, 1.0);
usedResourcesMap.put(gpuResourceName, 5.0);
NormalizedResources usedResources = new NormalizedResources(normalize(usedResourcesMap));
assertThrows(IllegalArgumentException.class, () -> resources.calculateMinPercentageUsedBy(usedResources, 4, 1));
}
|
@Override
protected void copy(List<HadoopResourceId> srcResourceIds, List<HadoopResourceId> destResourceIds)
throws IOException {
for (int i = 0; i < srcResourceIds.size(); ++i) {
// this enforces src and dest file systems to match
final org.apache.hadoop.fs.FileSystem fs =
srcResourceIds.get(i).toPath().getFileSystem(configuration);
// Unfortunately HDFS FileSystems don't support a native copy operation so we are forced
// to use the inefficient implementation found in FileUtil which copies all the bytes through
// the local machine.
//
// HDFS FileSystem does define a concat method but could only find the DFSFileSystem
// implementing it. The DFSFileSystem implemented concat by deleting the srcs after which
// is not what we want. Also, all the other FileSystem implementations I saw threw
// UnsupportedOperationException within concat.
final boolean success =
FileUtil.copy(
fs,
srcResourceIds.get(i).toPath(),
fs,
destResourceIds.get(i).toPath(),
false,
true,
fs.getConf());
if (!success) {
// Defensive coding as this should not happen in practice
throw new IOException(
String.format(
"Unable to copy resource %s to %s. No further information provided by underlying filesystem.",
srcResourceIds.get(i).toPath(), destResourceIds.get(i).toPath()));
}
}
}
|
@Test
public void testCopy() throws Exception {
create("testFileA", "testDataA".getBytes(StandardCharsets.UTF_8));
create("testFileB", "testDataB".getBytes(StandardCharsets.UTF_8));
fileSystem.copy(
ImmutableList.of(testPath("testFileA"), testPath("testFileB")),
ImmutableList.of(testPath("copyTestFileA"), testPath("copyTestFileB")));
assertArrayEquals("testDataA".getBytes(StandardCharsets.UTF_8), read("testFileA", 0));
assertArrayEquals("testDataB".getBytes(StandardCharsets.UTF_8), read("testFileB", 0));
assertArrayEquals("testDataA".getBytes(StandardCharsets.UTF_8), read("copyTestFileA", 0));
assertArrayEquals("testDataB".getBytes(StandardCharsets.UTF_8), read("copyTestFileB", 0));
}
|
public void performSortOperation(int option, List<File> pdf) {
switch (option) {
case DATE_INDEX:
sortFilesByDateNewestToOldest(pdf);
break;
case NAME_INDEX:
sortByNameAlphabetical(pdf);
break;
case SIZE_INCREASING_ORDER_INDEX:
sortFilesBySizeIncreasingOrder(pdf);
break;
case SIZE_DECREASING_ORDER_INDEX:
sortFilesBySizeDecreasingOrder(pdf);
break;
}
}
|
@Test
public void shouldReturnArraySortedAlphabetically() throws IOException {
// given
// (for some reason sorting mocks doesn't work)
List<String> paths = getPaths();
mFiles = new ArrayList<>(paths.size());
for (String item : paths) {
File f = new File(item);
f.createNewFile();
mFiles.add(f);
}
File[] expected = new File[]{mFiles.get(5), mFiles.get(3), mFiles.get(4),
mFiles.get(0), mFiles.get(1), mFiles.get(2)};
// when
mInstance.performSortOperation(mInstance.NAME_INDEX, mFiles);
// then
Assert.assertEquals(asList(expected), mFiles);
}
|
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) {
return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature);
}
|
@Test
public void testStateParameterNoAnnotation() throws Exception {
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("missing StateId annotation");
thrown.expectMessage("myProcessElement");
thrown.expectMessage("index 1");
thrown.expectMessage(not(mentionsTimers()));
DoFnSignatures.getSignature(
new DoFn<KV<String, Integer>, Long>() {
@ProcessElement
public void myProcessElement(ProcessContext context, ValueState<Integer> noAnnotation) {}
}.getClass());
}
|
String upload(File report) {
LOG.debug("Upload report");
long startTime = System.currentTimeMillis();
Part filePart = new Part(MediaTypes.ZIP, report);
PostRequest post = new PostRequest("api/ce/submit")
.setMediaType(MediaTypes.PROTOBUF)
.setParam("projectKey", moduleHierarchy.root().key())
.setParam("projectName", moduleHierarchy.root().getOriginalName())
.setPart("report", filePart);
ciConfiguration.getDevOpsPlatformInfo().ifPresent(devOpsPlatformInfo -> {
post.setParam(CHARACTERISTIC, buildCharacteristicParam(DEVOPS_PLATFORM_URL ,devOpsPlatformInfo.getUrl()));
post.setParam(CHARACTERISTIC, buildCharacteristicParam(DEVOPS_PLATFORM_PROJECT_IDENTIFIER, devOpsPlatformInfo.getProjectIdentifier()));
});
String branchName = branchConfiguration.branchName();
if (branchName != null) {
if (branchConfiguration.branchType() != PULL_REQUEST) {
post.setParam(CHARACTERISTIC, buildCharacteristicParam(CeTaskCharacteristics.BRANCH, branchName));
post.setParam(CHARACTERISTIC, buildCharacteristicParam(BRANCH_TYPE, branchConfiguration.branchType().name()));
} else {
post.setParam(CHARACTERISTIC, buildCharacteristicParam(CeTaskCharacteristics.PULL_REQUEST, branchConfiguration.pullRequestKey()));
}
}
WsResponse response;
try {
post.setWriteTimeOutInMs(properties.reportPublishTimeout() * 1000);
response = wsClient.call(post);
} catch (Exception e) {
throw new IllegalStateException("Failed to upload report: " + e.getMessage(), e);
}
try {
response.failIfNotSuccessful();
} catch (HttpException e) {
throw MessageException.of(String.format("Server failed to process report. Please check server logs: %s", DefaultScannerWsClient.createErrorMessage(e)));
}
try (InputStream protobuf = response.contentStream()) {
return Ce.SubmitResponse.parser().parseFrom(protobuf).getTaskId();
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
long stopTime = System.currentTimeMillis();
LOG.info("Analysis report uploaded in " + (stopTime - startTime) + "ms");
}
}
|
@Test
public void upload_error_message() {
HttpException ex = new HttpException("url", 404, "{\"errors\":[{\"msg\":\"Organization with key 'MyOrg' does not exist\"}]}");
WsResponse response = mock(WsResponse.class);
when(response.failIfNotSuccessful()).thenThrow(ex);
when(wsClient.call(any(WsRequest.class))).thenThrow(new IllegalStateException("timeout"));
assertThatThrownBy(() -> underTest.upload(reportTempFolder.newFile()))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Failed to upload report: timeout");
}
|
public ForComputation forComputation(String computation) {
return new ForComputation(computation);
}
|
@Test
public void testBadCoderEquality() throws Exception {
WindmillStateCache.ForKeyAndFamily keyCache1 =
cache.forComputation(COMPUTATION).forKey(COMPUTATION_KEY, 0L, 0L).forFamily(STATE_FAMILY);
StateTag<TestState> tag = new TestStateTagWithBadEquality("tag1");
keyCache1.put(StateNamespaces.global(), tag, new TestState("g1"), 1);
keyCache1.persist();
keyCache1 =
cache.forComputation(COMPUTATION).forKey(COMPUTATION_KEY, 0L, 1L).forFamily(STATE_FAMILY);
assertEquals(Optional.of(new TestState("g1")), keyCache1.get(StateNamespaces.global(), tag));
assertEquals(
Optional.of(new TestState("g1")),
keyCache1.get(StateNamespaces.global(), new TestStateTagWithBadEquality("tag1")));
}
|
@Override
public void handle(ContainersLauncherEvent event) {
// TODO: ContainersLauncher launches containers one by one!!
Container container = event.getContainer();
ContainerId containerId = container.getContainerId();
switch (event.getType()) {
case LAUNCH_CONTAINER:
Application app =
context.getApplications().get(
containerId.getApplicationAttemptId().getApplicationId());
ContainerLaunch launch =
new ContainerLaunch(context, getConfig(), dispatcher, exec, app,
event.getContainer(), dirsHandler, containerManager);
containerLauncher.submit(launch);
running.put(containerId, launch);
break;
case RELAUNCH_CONTAINER:
app = context.getApplications().get(
containerId.getApplicationAttemptId().getApplicationId());
ContainerRelaunch relaunch =
new ContainerRelaunch(context, getConfig(), dispatcher, exec, app,
event.getContainer(), dirsHandler, containerManager);
containerLauncher.submit(relaunch);
running.put(containerId, relaunch);
break;
case RECOVER_CONTAINER:
app = context.getApplications().get(
containerId.getApplicationAttemptId().getApplicationId());
launch = new RecoveredContainerLaunch(context, getConfig(), dispatcher,
exec, app, event.getContainer(), dirsHandler, containerManager);
containerLauncher.submit(launch);
running.put(containerId, launch);
break;
case RECOVER_PAUSED_CONTAINER:
app = context.getApplications().get(
containerId.getApplicationAttemptId().getApplicationId());
launch = new RecoverPausedContainerLaunch(context, getConfig(),
dispatcher, exec, app, event.getContainer(), dirsHandler,
containerManager);
containerLauncher.submit(launch);
break;
case CLEANUP_CONTAINER:
cleanup(event, containerId, true);
break;
case CLEANUP_CONTAINER_FOR_REINIT:
cleanup(event, containerId, false);
break;
case SIGNAL_CONTAINER:
SignalContainersLauncherEvent signalEvent =
(SignalContainersLauncherEvent) event;
ContainerLaunch runningContainer = running.get(containerId);
if (runningContainer == null) {
// Container not launched. So nothing needs to be done.
LOG.info("Container " + containerId + " not running, nothing to signal.");
return;
}
try {
runningContainer.signalContainer(signalEvent.getCommand());
} catch (IOException e) {
LOG.warn("Got exception while signaling container " + containerId
+ " with command " + signalEvent.getCommand());
}
break;
case PAUSE_CONTAINER:
ContainerLaunch launchedContainer = running.get(containerId);
if (launchedContainer == null) {
// Container not launched. So nothing needs to be done.
return;
}
// Pause the container
try {
launchedContainer.pauseContainer();
} catch (Exception e) {
LOG.info("Got exception while pausing container: " +
StringUtils.stringifyException(e));
}
break;
case RESUME_CONTAINER:
ContainerLaunch launchCont = running.get(containerId);
if (launchCont == null) {
// Container not launched. So nothing needs to be done.
return;
}
// Resume the container.
try {
launchCont.resumeContainer();
} catch (Exception e) {
LOG.info("Got exception while resuming container: " +
StringUtils.stringifyException(e));
}
break;
}
}
|
@SuppressWarnings("unchecked")
@Test
public void testLaunchContainerEvent()
throws IllegalArgumentException {
Map<ContainerId, ContainerLaunch> dummyMap = spy.running;
when(event.getType())
.thenReturn(ContainersLauncherEventType.LAUNCH_CONTAINER);
assertEquals(0, dummyMap.size());
spy.handle(event);
assertEquals(1, dummyMap.size());
Mockito.verify(containerLauncher, Mockito.times(1))
.submit(Mockito.any(ContainerLaunch.class));
}
|
@Override
public StringBuffer format(double number, StringBuffer toAppendTo, FieldPosition pos) {
int initLength = toAppendTo.length();
super.format(number, toAppendTo, pos);
return pad(toAppendTo, initLength);
}
|
@Test
void format() {
PaddingDecimalFormat format = new PaddingDecimalFormat("0.0", 7);
assertThat(format.format(1L)).isEqualTo(" 1.0");
assertThat(format.format(1000L)).isEqualTo(" 1000.0");
assertThat(format.format(10000000L)).isEqualTo("10000000.0");
}
|
public String cleanupDwgString(String dwgString) {
String cleanString = dwgString;
StringBuilder sb = new StringBuilder();
//Strip off start/stop underline/overstrike/strike throughs
Matcher m = Pattern.compile(underlineStrikeThrough).matcher(cleanString);
while (m.find()) {
if (! m.group(1).endsWith("\\")) {
m.appendReplacement(sb, "");
}
}
m.appendTail(sb);
cleanString = sb.toString();
//Strip off semi-colon ended markers
m = Pattern.compile(endMarks).matcher(cleanString);
sb.setLength(0);
while (m.find()) {
if (! m.group(1).endsWith("\\")) {
m.appendReplacement(sb, "");
}
}
m.appendTail(sb);
cleanString = sb.toString();
//new line marker \\P replace with actual new line
m = Pattern.compile(newLine).matcher(cleanString);
sb.setLength(0);
while (m.find()) {
if (m.group(1).endsWith("P")) {
m.appendReplacement(sb, "\n");
}
}
m.appendTail(sb);
cleanString = sb.toString();
//stacking fractions
m = Pattern.compile(stackFrac).matcher(cleanString);
sb.setLength(0);
while (m.find()) {
if (m.group(1) == null) {
m.appendReplacement(sb, m.group(2) + "/" + m.group(3));
}
}
m.appendTail(sb);
cleanString = sb.toString();
//strip brackets around text, make sure they aren't escaped
m = Pattern.compile(curlyBraces).matcher(cleanString);
sb.setLength(0);
while (m.find()) {
if (m.group(1) == null) {
m.appendReplacement(sb, "");
}
}
m.appendTail(sb);
cleanString = sb.toString();
//now get rid of escape characters
cleanString = cleanString.replaceAll(escapeChars, "");
//now unescape backslash
cleanString = cleanString.replaceAll("(\\\\\\\\)", "\\\\");
return cleanString;
}
|
@Test
public void testStackedFractions() {
String formatted = "abc \\S+0,8^+0,1; efg";
DWGReadFormatRemover dwgReadFormatter = new DWGReadFormatRemover();
String expected = "abc +0,8/+0,1 efg";
assertEquals(expected, dwgReadFormatter.cleanupDwgString(formatted));
}
|
Optional<String> getServiceProviderPrivateKey() {
return configuration.get(SERVICE_PROVIDER_PRIVATE_KEY);
}
|
@Test
public void return_service_provider_private_key() {
settings.setProperty("sonar.auth.saml.sp.privateKey.secured", "my_private_secret_private_key");
assertThat(underTest.getServiceProviderPrivateKey()).hasValue("my_private_secret_private_key");
}
|
@Override
public String getHelpMessage() {
return HELP;
}
|
@Test
public void shouldGetHelp() {
assertThat(cmd.getHelpMessage(), is(
"run script <path_to_sql_file>:" + System.lineSeparator()
+ "\tLoad and run the statements in the supplied file." + System.lineSeparator()
+ "\tNote: the file must be UTF-8 encoded."));
}
|
public Map<String, Diff> diffs() {
if (diffs.containsKey(ASSIGNEE)) {
Map<String, Diff> result = new LinkedHashMap<>(diffs);
result.put(ASSIGNEE, decode(result.get(ASSIGNEE)));
return result;
}
return diffs;
}
|
@Test
public void diffs_should_be_empty_by_default() {
assertThat(diffs.diffs()).isEmpty();
}
|
public RestResponse<KsqlEntityList> postKsqlRequest(
final String ksql,
final Map<String, ?> requestProperties,
final Optional<Long> previousCommandSeqNum
) {
return post(
KSQL_PATH,
createKsqlRequest(ksql, requestProperties, previousCommandSeqNum),
r -> deserialize(r.getBody(), KsqlEntityList.class)
);
}
|
@Test
public void shouldUseTimeout() {
// Given:
ksqlTarget = new KsqlTarget(httpClient, socketAddress, localProperties, authHeader, HOST,
ImmutableMap.of(), 300L);
// When:
executor.submit(() -> {
try {
ksqlTarget.postKsqlRequest("some ksql;", Collections.emptyMap(), Optional.empty());
} catch (Exception e) {
// ignore response error since this test is just testing headers on the outgoing request
}
});
assertThatEventually(requestStarted::get, is(true));
handlerCaptor.getValue().handle(Buffer.buffer());
// Then:
verify(httpClient).request(requestOptionsCaptor.capture(), any());
assertThat(requestOptionsCaptor.getValue().getTimeout(), is(300L));
}
|
@Override
public double read() {
return gaugeSource.read();
}
|
@Test
public void whenLongGaugeField() {
SomeObject someObject = new SomeObject();
metricsRegistry.registerStaticMetrics(someObject, "foo");
DoubleGauge gauge = metricsRegistry.newDoubleGauge("foo.longField");
assertEquals(someObject.longField, gauge.read(), 0.1);
}
|
@Override
public ManifestIdentifier identify(Config config) {
Path manifestFile = getFileFromProperty("android_merged_manifest");
Path resourcesDir = getFileFromProperty("android_merged_resources");
Path assetsDir = getFileFromProperty("android_merged_assets");
Path apkFile = getFileFromProperty("android_resource_apk");
String packageName = properties.getProperty("android_custom_package");
String manifestConfig = config.manifest();
if (Config.NONE.equals(manifestConfig)) {
Logger.info(
"@Config(manifest = Config.NONE) specified while using Build System API, ignoring");
} else if (!Config.DEFAULT_MANIFEST_NAME.equals(manifestConfig)) {
manifestFile = getResource(manifestConfig);
}
if (!Config.DEFAULT_RES_FOLDER.equals(config.resourceDir())) {
resourcesDir = getResource(config.resourceDir());
}
if (!Config.DEFAULT_ASSET_FOLDER.equals(config.assetDir())) {
assetsDir = getResource(config.assetDir());
}
if (!Config.DEFAULT_PACKAGE_NAME.equals(config.packageName())) {
packageName = config.packageName();
}
List<ManifestIdentifier> libraryDirs = emptyList();
if (config.libraries().length > 0) {
Logger.info("@Config(libraries) specified while using Build System API, ignoring");
}
return new ManifestIdentifier(
packageName, manifestFile, resourcesDir, assetsDir, libraryDirs, apkFile);
}
|
@Test
public void identify_withResourceApk() {
Properties properties = new Properties();
properties.put("android_merged_manifest", "gradle/AndroidManifest.xml");
properties.put("android_merged_resources", "gradle/res");
properties.put("android_merged_assets", "gradle/assets");
properties.put("android_resource_apk", "gradle/resources.ap_");
DefaultManifestFactory factory = new DefaultManifestFactory(properties);
ManifestIdentifier identifier = factory.identify(Config.Builder.defaults().build());
AndroidManifest manifest = RobolectricTestRunner.createAndroidManifest(identifier);
assertThat(manifest.getAndroidManifestFile())
.isEqualTo(Paths.get("gradle/AndroidManifest.xml"));
assertThat(manifest.getResDirectory()).isEqualTo(Paths.get("gradle/res"));
assertThat(manifest.getAssetsDirectory()).isEqualTo(Paths.get("gradle/assets"));
assertThat(manifest.getApkFile()).isEqualTo(Paths.get("gradle/resources.ap_"));
}
|
public Application load(HeliumPackage packageInfo, ApplicationContext context)
throws Exception {
if (packageInfo.getType() != HeliumType.APPLICATION) {
throw new ApplicationException(
"Can't instantiate " + packageInfo.getType() + " package using ApplicationLoader");
}
// check if already loaded
RunningApplication key =
new RunningApplication(packageInfo, context.getNoteId(), context.getParagraphId());
// get resource required by this package
ResourceSet resources = findRequiredResourceSet(packageInfo.getResources(),
context.getNoteId(), context.getParagraphId());
// load class
Class<Application> appClass = loadClass(packageInfo);
// instantiate
ClassLoader oldcl = Thread.currentThread().getContextClassLoader();
ClassLoader cl = appClass.getClassLoader();
Thread.currentThread().setContextClassLoader(cl);
try {
Constructor<Application> constructor = appClass.getConstructor(ApplicationContext.class);
Application app = new ClassLoaderApplication(constructor.newInstance(context), cl);
return app;
} catch (Exception e) {
throw new ApplicationException(e);
} finally {
Thread.currentThread().setContextClassLoader(oldcl);
}
}
|
@Test
void loadUnloadApplication() throws Exception {
// given
LocalResourcePool resourcePool = new LocalResourcePool("pool1");
DependencyResolver dep =
new DependencyResolver(tmpDir.getAbsolutePath(), ZeppelinConfiguration.load());
ApplicationLoader appLoader = new ApplicationLoader(resourcePool, dep);
HeliumPackage pkg1 = createPackageInfo(MockApplication1.class.getName(), "artifact1");
ApplicationContext context1 = createContext("note1", "paragraph1", "app1");
// when load application
MockApplication1 app = (MockApplication1) ((ClassLoaderApplication)
appLoader.load(pkg1, context1)).getInnerApplication();
// then
assertFalse(app.isUnloaded());
assertEquals(0, app.getNumRun());
// when unload
app.unload();
// then
assertTrue(app.isUnloaded());
assertEquals(0, app.getNumRun());
}
|
public SchemaMapping fromParquet(MessageType parquetSchema) {
List<Type> fields = parquetSchema.getFields();
List<TypeMapping> mappings = fromParquet(fields);
List<Field> arrowFields = fields(mappings);
return new SchemaMapping(new Schema(arrowFields), parquetSchema, mappings);
}
|
@Test
public void testParquetFixedBinaryToArrow() {
MessageType parquet = Types.buildMessage()
.addField(Types.optional(FIXED_LEN_BYTE_ARRAY).length(12).named("a"))
.named("root");
Schema expected = new Schema(asList(field("a", new ArrowType.Binary())));
Assert.assertEquals(expected, converter.fromParquet(parquet).getArrowSchema());
}
|
public void syncTableMeta(String dbName, String tableName, boolean forceDeleteData) throws DdlException {
Database db = GlobalStateMgr.getCurrentState().getDb(dbName);
if (db == null) {
throw new DdlException(String.format("db %s does not exist.", dbName));
}
Table table = db.getTable(tableName);
if (table == null) {
throw new DdlException(String.format("table %s does not exist.", tableName));
}
if (!table.isCloudNativeTableOrMaterializedView()) {
throw new DdlException("only support cloud table or cloud mv.");
}
syncTableMetaAndColocationInfoInternal(db, (OlapTable) table, forceDeleteData);
}
|
@Test
@Ignore
public void testSyncTableMeta() throws Exception {
long dbId = 100;
long tableId = 1000;
List<Long> shards = new ArrayList<>();
new MockUp<GlobalStateMgr>() {
@Mock
public Database getDb(String dbName) {
return new Database(dbId, dbName);
}
@Mock
public Database getDb(long id) {
return new Database(id, "aaa");
}
@Mock
public List<Long> getDbIds() {
return Lists.newArrayList(dbId);
}
};
List<Column> baseSchema = new ArrayList<>();
KeysType keysType = KeysType.AGG_KEYS;
PartitionInfo partitionInfo = new PartitionInfo(PartitionType.RANGE);
DistributionInfo defaultDistributionInfo = new HashDistributionInfo();
Table table = new LakeTable(tableId, "bbb", baseSchema, keysType, partitionInfo, defaultDistributionInfo);
new MockUp<Database>() {
@Mock
public Table getTable(String tableName) {
return table;
}
@Mock
public Table getTable(long tableId) {
return table;
}
@Mock
public List<Table> getTables() {
return Lists.newArrayList(table);
}
};
new MockUp<MaterializedIndex>() {
@Mock
public List<Tablet> getTablets() {
List<Tablet> tablets = new ArrayList<>();
tablets.add(new LakeTablet(111));
tablets.add(new LakeTablet(222));
tablets.add(new LakeTablet(333));
return tablets;
}
};
new MockUp<PhysicalPartition>() {
@Mock
public long getShardGroupId() {
return 444;
}
};
new MockUp<StarOSAgent>() {
@Mock
public List<Long> listShard(long groupId) throws DdlException {
return shards;
}
@Mock
public void deleteShards(Set<Long> shardIds) throws DdlException {
shards.removeAll(shardIds);
}
};
new MockUp<ColocateTableIndex>() {
@Mock
public boolean isLakeColocateTable(long tableId) {
return true;
}
@Mock
public void updateLakeTableColocationInfo(OlapTable olapTable, boolean isJoin,
GroupId expectGroupId) throws DdlException {
return;
}
};
new MockUp<SystemInfoService>() {
@Mock
public ComputeNode getBackendOrComputeNode(long nodeId) {
return null;
}
};
shards.clear();
shards.add(111L);
shards.add(222L);
shards.add(333L);
starMgrMetaSyncer.syncTableMeta("db", "table", true);
Assert.assertEquals(3, shards.size());
shards.clear();
shards.add(111L);
shards.add(222L);
shards.add(333L);
shards.add(444L);
starMgrMetaSyncer.syncTableMetaAndColocationInfo();
Assert.assertEquals(3, shards.size());
Assert.assertEquals((long) shards.get(0), 111L);
Assert.assertEquals((long) shards.get(1), 222L);
Assert.assertEquals((long) shards.get(2), 333L);
}
|
public boolean isIncluded(Path absolutePath, Path relativePath, InputFile.Type type) {
PathPattern[] inclusionPatterns = InputFile.Type.MAIN == type ? mainInclusionsPattern : testInclusionsPattern;
if (inclusionPatterns.length == 0) {
return true;
}
for (PathPattern pattern : inclusionPatterns) {
if (pattern.match(absolutePath, relativePath)) {
return true;
}
}
return false;
}
|
@Test
public void should_keepLegacyValue_when_legacyAndAliasPropertiesAreUsedForTestInclusions() {
settings.setProperty(PROJECT_TESTS_INCLUSIONS_PROPERTY, "**/*Dao.java");
settings.setProperty(PROJECT_TEST_INCLUSIONS_PROPERTY, "**/*Dto.java");
AbstractExclusionFilters filter = new AbstractExclusionFilters(analysisWarnings, settings.asConfig()::getStringArray) {
};
IndexedFile indexedFile = new DefaultIndexedFile("foo", moduleBaseDir, "test/main/java/com/mycompany/FooDao.java", null);
assertThat(filter.isIncluded(indexedFile.path(), Paths.get(indexedFile.relativePath()), InputFile.Type.TEST)).isFalse();
indexedFile = new DefaultIndexedFile("foo", moduleBaseDir, "test/main/java/com/mycompany/FooDto.java", null);
assertThat(filter.isIncluded(indexedFile.path(), Paths.get(indexedFile.relativePath()), InputFile.Type.TEST)).isTrue();
String expectedWarn = "Use of sonar.test.inclusions and sonar.tests.inclusions at the same time. sonar.test.inclusions is taken into account. Consider updating your configuration";
assertThat(logTester.logs(Level.WARN)).hasSize(1)
.contains(expectedWarn);
verify(analysisWarnings).addUnique(expectedWarn);
}
|
public static void run(Path source, Path target) throws IOException {
final List<Path> existingPaths = collectExistingPaths(target);
existingPaths.remove(target); // exclude the target, we don't want to remove it in following step
deletePaths(existingPaths);
copyFiles(source, target);
}
|
@Test
void run() throws IOException {
FullDirSync.run(source, target);
List<Path> afterSyncState = new ArrayList<>();
Files.walkFileTree(target, new SimpleFileVisitor<>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
if(!target.equals(dir)) {
afterSyncState.add(dir);
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
afterSyncState.add(file);
return super.visitFile(file, attrs);
}
});
Assertions.assertThat(afterSyncState)
.extracting(target::relativize)
.extracting(Path::toString)
.hasSize(4)
.contains("a.txt", "b.txt", "subdir", "subdir/c.txt");
Assertions.assertThat(afterSyncState)
.filteredOn(Files::isDirectory)
.allSatisfy(p -> {
final Set<PosixFilePermission> permission = Files.getPosixFilePermissions(p);
Assertions.assertThat(permission).containsExactlyInAnyOrderElementsOf(FullDirSync.DIRECTORY_PERMISSIONS);
});
Assertions.assertThat(afterSyncState)
.filteredOn(Files::isRegularFile)
.allSatisfy(p -> {
final Set<PosixFilePermission> permission = Files.getPosixFilePermissions(p);
Assertions.assertThat(permission).containsExactlyInAnyOrderElementsOf(FullDirSync.FILE_PERMISSIONS);
});
}
|
public CompletableFuture<InetSocketAddress> resolveAndCheckTargetAddress(String hostAndPort) {
int pos = hostAndPort.lastIndexOf(':');
String host = hostAndPort.substring(0, pos);
int port = Integer.parseInt(hostAndPort.substring(pos + 1));
if (!isPortAllowed(port)) {
return FutureUtil.failedFuture(
new TargetAddressDeniedException("Given port in '" + hostAndPort + "' isn't allowed."));
} else if (!isHostAllowed(host)) {
return FutureUtil.failedFuture(
new TargetAddressDeniedException("Given host in '" + hostAndPort + "' isn't allowed."));
} else {
return NettyFutureUtil.toCompletableFuture(
inetSocketAddressResolver.resolve(InetSocketAddress.createUnresolved(host, port)))
.thenCompose(resolvedAddress -> {
CompletableFuture<InetSocketAddress> result = new CompletableFuture<>();
if (isIPAddressAllowed(resolvedAddress)) {
result.complete(resolvedAddress);
} else {
result.completeExceptionally(new TargetAddressDeniedException(
"The IP address of the given host and port '" + hostAndPort + "' isn't allowed."));
}
return result;
});
}
}
|
@Test
public void shouldAllowIPv6Address() throws Exception {
BrokerProxyValidator brokerProxyValidator = new BrokerProxyValidator(
createMockedAddressResolver("fd4d:801b:73fa:abcd:0000:0000:0000:0001"),
"*"
, "fd4d:801b:73fa:abcd::/64"
, "6650");
brokerProxyValidator.resolveAndCheckTargetAddress("myhost.mydomain:6650").get();
}
|
@Override
public int countChars(Note note) {
String titleAndContent = note.getTitle() + "\n" + note.getContent();
return (int) Stream.of(sanitizeTextForWordsAndCharsCount(note, titleAndContent).split(""))
.map(String::trim)
.filter(s -> !s.isEmpty())
.count();
}
|
@Test
public void countChars() {
Note note = getNote(1L, "one two", "three four five\nAnother line");
assertEquals(30, new DefaultWordCounter().countChars(note));
}
|
public void put(String key, String val) throws IllegalArgumentException {
if (key == null) {
throw new IllegalArgumentException("key cannot be null");
}
Map<String, String> oldMap = copyOnThreadLocal.get();
Integer lastOp = getAndSetLastOperation(WRITE_OPERATION);
if (wasLastOpReadOrNull(lastOp) || oldMap == null) {
Map<String, String> newMap = duplicateAndInsertNewMap(oldMap);
newMap.put(key, val);
} else {
oldMap.put(key, val);
}
}
|
@Test
public void nearSimultaneousPutsShouldNotCauseConcurrentModificationException() throws InterruptedException {
// For the weirdest reason, modifications to mdcAdapter must be done
// before the definition anonymous ChildThread class below. Otherwise, the
// map in the child thread, the one contained in mdcAdapter.copyOnInheritThreadLocal,
// is null. How strange is that?
// let the map have lots of elements so that copying it takes time
for (int i = 0; i < 2048; i++) {
mdcAdapter.put("k" + i, "v" + i);
}
ChildThread childThread = new ChildThread(mdcAdapter, null, null) {
@Override
public void run() {
for (int i = 0; i < 16; i++) {
mdcAdapter.put("ck" + i, "cv" + i);
Thread.yield();
}
successful = true;
}
};
childThread.start();
Thread.sleep(1);
for (int i = 0; i < 16; i++) {
mdcAdapter.put("K" + i, "V" + i);
}
childThread.join();
assertTrue(childThread.successful);
}
|
@Override
protected void parse(final ProtocolFactory protocols, final Local file) throws AccessDeniedException {
NSDictionary serialized = NSDictionary.dictionaryWithContentsOfFile(file.getAbsolute());
if(null == serialized) {
throw new LocalAccessDeniedException(String.format("Invalid bookmark file %s", file));
}
this.parse(protocols, serialized);
}
|
@Test
public void testParse() throws AccessDeniedException {
FlowBookmarkCollection c = new FlowBookmarkCollection();
assertEquals(0, c.size());
c.parse(new ProtocolFactory(new HashSet<>(Arrays.asList(new TestProtocol(Scheme.ftp), new TestProtocol(Scheme.ftps), new TestProtocol(Scheme.https)))), new Local("src/test/resources/com.fivedetails.Bookmarks.plist"));
assertEquals(3, c.size());
}
|
@Override
public long getOffsetInQueueByTime(String topic, int queueId, long timestamp) {
return this.getOffsetInQueueByTime(topic, queueId, timestamp, BoundaryType.LOWER);
}
|
@Test
public void testGetOffsetInQueueByTime() {
final int totalCount = 10;
int queueId = 0;
String topic = "FooBar";
AppendMessageResult[] appendMessageResults = putMessages(totalCount, topic, queueId, true);
//Thread.sleep(10);
StoreTestUtil.waitCommitLogReput((DefaultMessageStore) messageStore);
ConsumeQueueInterface consumeQueue = getDefaultMessageStore().findConsumeQueue(topic, queueId);
for (AppendMessageResult appendMessageResult : appendMessageResults) {
long offset = messageStore.getOffsetInQueueByTime(topic, queueId, appendMessageResult.getStoreTimestamp());
CqUnit cqUnit = consumeQueue.get(offset);
assertThat(cqUnit.getPos()).isEqualTo(appendMessageResult.getWroteOffset());
assertThat(cqUnit.getSize()).isEqualTo(appendMessageResult.getWroteBytes());
}
}
|
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token = null;
String authorization =
request.getHeader(HttpConstants.AUTHORIZATION_HEADER);
if (authorization == null
|| !AuthenticationHandlerUtil.matchAuthScheme(HttpConstants.BASIC,
authorization)) {
response.setHeader(WWW_AUTHENTICATE, HttpConstants.BASIC);
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
if (authorization == null) {
logger.trace("Basic auth starting");
} else {
logger.warn("'" + HttpConstants.AUTHORIZATION_HEADER
+ "' does not start with '" + HttpConstants.BASIC + "' : {}",
authorization);
}
} else {
authorization =
authorization.substring(HttpConstants.BASIC.length()).trim();
final Base64 base64 = new Base64(0);
// As per RFC7617, UTF-8 charset should be used for decoding.
String[] credentials = new String(base64.decode(authorization),
StandardCharsets.UTF_8).split(":", 2);
if (credentials.length == 2) {
token = authenticateUser(credentials[0], credentials[1]);
response.setStatus(HttpServletResponse.SC_OK);
}
}
return token;
}
|
@Test(timeout = 60000)
public void testRequestWithWrongCredentials() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
final Base64 base64 = new Base64(0);
String credentials = base64.encodeToString("bjones:foo123".getBytes());
String authHeader = HttpConstants.BASIC + " " + credentials;
Mockito.when(request.getHeader(HttpConstants.AUTHORIZATION_HEADER))
.thenReturn(authHeader);
try {
handler.authenticate(request, response);
Assert.fail();
} catch (AuthenticationException ex) {
// Expected
} catch (Exception ex) {
Assert.fail();
}
}
|
@Override
public void run() {
try {
// Step1. Get Current Time.
Date now = new Date();
LOG.info("SubClusterCleaner at {}.", now);
Map<SubClusterId, SubClusterInfo> subClusters = federationFacade.getSubClusters(true);
for (Map.Entry<SubClusterId, SubClusterInfo> subCluster : subClusters.entrySet()) {
// Step2. Get information about subClusters.
SubClusterId subClusterId = subCluster.getKey();
SubClusterInfo subClusterInfo = subCluster.getValue();
SubClusterState subClusterState = subClusterInfo.getState();
long lastHeartBeatTime = subClusterInfo.getLastHeartBeat();
// We Only Check SubClusters in NEW and RUNNING states
if (subClusterState.isUsable()) {
long heartBeatInterval = now.getTime() - lastHeartBeatTime;
try {
// HeartBeat Interval Exceeds Expiration Time
if (heartBeatInterval > heartbeatExpirationMillis) {
LOG.info("Deregister SubCluster {} in state {} last heartbeat at {}.",
subClusterId, subClusterState, new Date(lastHeartBeatTime));
federationFacade.deregisterSubCluster(subClusterId, SubClusterState.SC_LOST);
}
} catch (YarnException e) {
LOG.error("deregisterSubCluster failed on SubCluster {}.", subClusterId, e);
}
} else {
LOG.debug("SubCluster {} in state {} last heartbeat at {}, " +
"heartbeat interval < 30mins, no need for Deregister.",
subClusterId, subClusterState, new Date(lastHeartBeatTime));
}
}
} catch (Throwable e) {
LOG.error("SubClusterCleaner Fails.", e);
}
}
|
@Test
public void testSubClustersWithOutHeartBeat()
throws InterruptedException, TimeoutException, YarnException {
// We set up such a unit test, We set the status of all subClusters to RUNNING,
// and Manually set subCluster heartbeat expiration.
// At this time, the size of the Active SubCluster is 0.
Map<SubClusterId, SubClusterInfo> subClustersMap = facade.getSubClusters(false);
// Step1. Manually set subCluster heartbeat expiration.
// subCluster has no heartbeat, and all subClusters will expire.
subClustersMap.keySet().forEach(subClusterId ->
stateStore.setExpiredHeartbeat(subClusterId, EXPIRATION_TIME));
// Step2. Run the Cleaner to change the status of the expired SubCluster to SC_LOST.
cleaner.run();
// Step3. All clusters have expired,
// so the current Federation has no active subClusters.
int count = facade.getActiveSubClustersCount();
Assert.assertEquals(0, count);
// Step4. Check Active SubCluster Status.
// We want all subClusters to be SC_LOST.
subClustersMap.values().forEach(subClusterInfo -> {
SubClusterState subClusterState = subClusterInfo.getState();
Assert.assertEquals(SubClusterState.SC_LOST, subClusterState);
});
}
|
List<PluginConfiguration> getAuthConfigMetadata(String pluginId) {
return pluginRequestHelper.submitRequest(pluginId, REQUEST_GET_AUTH_CONFIG_METADATA, new DefaultPluginInteractionCallback<>() {
@Override
public List<PluginConfiguration> onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) {
return getMessageConverter(resolvedExtensionVersion).getPluginConfigMetadataResponseFromBody(responseBody);
}
});
}
|
@Test
void shouldTalkToPlugin_To_GetPluginConfigurationMetadata() {
String responseBody = "[{\"key\":\"username\",\"metadata\":{\"required\":true,\"secure\":false}},{\"key\":\"password\",\"metadata\":{\"required\":true,\"secure\":true}}]";
when(pluginManager.submitTo(eq(PLUGIN_ID), eq(AUTHORIZATION_EXTENSION), requestArgumentCaptor.capture())).thenReturn(new DefaultGoPluginApiResponse(SUCCESS_RESPONSE_CODE, responseBody));
List<PluginConfiguration> authConfigMetadata = authorizationExtension.getAuthConfigMetadata(PLUGIN_ID);
assertRequest(requestArgumentCaptor.getValue(), AUTHORIZATION_EXTENSION, "2.0", REQUEST_GET_AUTH_CONFIG_METADATA, null);
assertThat(authConfigMetadata.size()).isEqualTo(2);
assertThat(authConfigMetadata).hasSize(2)
.contains(
new PluginConfiguration("username", new Metadata(true, false)),
new PluginConfiguration("password", new Metadata(true, true))
);
}
|
@Deprecated
public static SofaRequest buildSofaRequest(Class<?> clazz, String method, Class[] argTypes, Object[] args) {
SofaRequest request = new SofaRequest();
request.setInterfaceName(clazz.getName());
request.setMethodName(method);
request.setMethodArgs(args == null ? CodecUtils.EMPTY_OBJECT_ARRAY : args);
request.setMethodArgSigs(ClassTypeUtils.getTypeStrs(argTypes, true));
return request;
}
|
@Test
public void buildSofaRequest1() throws Exception {
Method method = Number.class.getMethod("intValue");
SofaRequest request = MessageBuilder.buildSofaRequest(Number.class, method,
new Class[0], StringUtils.EMPTY_STRING_ARRAY);
Assert.assertEquals(request.getInterfaceName(), Number.class.getName());
Assert.assertEquals(request.getMethodName(), "intValue");
Assert.assertArrayEquals(StringUtils.EMPTY_STRING_ARRAY, request.getMethodArgs());
Assert.assertArrayEquals(StringUtils.EMPTY_STRING_ARRAY, request.getMethodArgSigs());
method = Comparable.class.getMethod("compareTo", Object.class);
request = MessageBuilder.buildSofaRequest(Comparable.class, method,
new Class[] { Object.class }, new Object[] { null });
Assert.assertEquals(request.getInterfaceName(), Comparable.class.getName());
Assert.assertEquals(request.getMethodName(), "compareTo");
Assert.assertArrayEquals(request.getMethodArgs(), new Object[] { null });
Assert.assertArrayEquals(request.getMethodArgSigs(), new String[] { "java.lang.Object" });
}
|
public static DateTime convertToDateTime(@Nonnull Object value) {
if (value instanceof DateTime) {
return (DateTime) value;
}
if (value instanceof Date) {
return new DateTime(value, DateTimeZone.UTC);
} else if (value instanceof ZonedDateTime) {
final DateTimeZone dateTimeZone = DateTimeZone.forTimeZone(TimeZone.getTimeZone(((ZonedDateTime) value).getZone()));
return new DateTime(Date.from(((ZonedDateTime) value).toInstant()), dateTimeZone);
} else if (value instanceof OffsetDateTime) {
return new DateTime(Date.from(((OffsetDateTime) value).toInstant()), DateTimeZone.UTC);
} else if (value instanceof LocalDateTime) {
final LocalDateTime localDateTime = (LocalDateTime) value;
final ZoneId defaultZoneId = ZoneId.systemDefault();
final ZoneOffset offset = defaultZoneId.getRules().getOffset(localDateTime);
return new DateTime(Date.from(localDateTime.toInstant(offset)));
} else if (value instanceof LocalDate) {
final LocalDate localDate = (LocalDate) value;
final LocalDateTime localDateTime = localDate.atStartOfDay();
final ZoneId defaultZoneId = ZoneId.systemDefault();
final ZoneOffset offset = defaultZoneId.getRules().getOffset(localDateTime);
return new DateTime(Date.from(localDateTime.toInstant(offset)));
} else if (value instanceof Instant) {
return new DateTime(Date.from((Instant) value), DateTimeZone.UTC);
} else if (value instanceof String) {
return ES_DATE_FORMAT_FORMATTER.parseDateTime((String) value);
} else {
throw new IllegalArgumentException("Value of invalid type <" + value.getClass().getSimpleName() + "> provided");
}
}
|
@Test
void convertFromInstant() {
final long currentTimeMillis = System.currentTimeMillis();
final Instant input = Instant.ofEpochMilli(currentTimeMillis);
final DateTime output = DateTimeConverter.convertToDateTime(input);
final DateTime expectedOutput = new DateTime(currentTimeMillis, DateTimeZone.UTC);
assertThat(output).isEqualTo(expectedOutput);
}
|
public static <T extends PipelineOptions> T as(Class<T> klass) {
return new Builder().as(klass);
}
|
@Test
public void testMissingMultipleSettersThrows() {
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage(
"missing property methods on [org.apache.beam.sdk.options."
+ "PipelineOptionsFactoryTest$MissingMultipleSetters]");
expectedException.expectMessage("setter for property [object] of type [java.lang.Object]");
expectedException.expectMessage("setter for property [otherObject] of type [java.lang.Object]");
PipelineOptionsFactory.as(MissingMultipleSetters.class);
}
|
@Override
public void initializeState(StateInitializationContext context) throws Exception {
if (isPartitionCommitTriggerEnabled()) {
partitionCommitPredicate =
PartitionCommitPredicate.create(conf, getUserCodeClassloader(), partitionKeys);
}
currentNewPartitions = new HashSet<>();
newPartitions = new TreeMap<>();
committablePartitions = new HashSet<>();
inProgressPartitions = new HashMap<>();
super.initializeState(context);
}
|
@Test
void testFailover() throws Exception {
OperatorSubtaskState state;
try (OneInputStreamOperatorTestHarness<RowData, PartitionCommitInfo> harness = create()) {
harness.setup();
harness.initializeEmptyState();
harness.open();
harness.processElement(row("1"), 0);
harness.processElement(row("2"), 0);
harness.processElement(row("2"), 0);
state = harness.snapshot(1, 1);
harness.processElement(row("3"), 0);
harness.processElement(row("4"), 0);
harness.notifyOfCompletedCheckpoint(1);
List<String> partitions = collect(harness);
assertThat(partitions).containsExactly("1", "2");
}
// first retry, no partition {1, 2} records
try (OneInputStreamOperatorTestHarness<RowData, PartitionCommitInfo> harness = create()) {
harness.setup();
harness.initializeState(state);
harness.open();
harness.processElement(row("3"), 0);
harness.processElement(row("4"), 0);
state = harness.snapshot(2, 2);
harness.notifyOfCompletedCheckpoint(2);
List<String> partitions = collect(harness);
assertThat(partitions).containsExactly("1", "2", "3", "4");
}
// second retry, partition {4} repeat
try (OneInputStreamOperatorTestHarness<RowData, PartitionCommitInfo> harness = create()) {
harness.setup();
harness.initializeState(state);
harness.open();
harness.processElement(row("4"), 0);
harness.processElement(row("5"), 0);
state = harness.snapshot(3, 3);
harness.notifyOfCompletedCheckpoint(3);
List<String> partitions = collect(harness);
assertThat(partitions).containsExactly("3", "4", "5");
}
// third retry, multiple snapshots
try (OneInputStreamOperatorTestHarness<RowData, PartitionCommitInfo> harness = create()) {
harness.setup();
harness.initializeState(state);
harness.open();
harness.processElement(row("6"), 0);
harness.processElement(row("7"), 0);
harness.snapshot(4, 4);
harness.processElement(row("8"), 0);
harness.snapshot(5, 5);
harness.processElement(row("9"), 0);
harness.snapshot(6, 6);
harness.notifyOfCompletedCheckpoint(5);
List<String> partitions = collect(harness);
// should not contains partition {9}
assertThat(partitions).containsExactly("4", "5", "6", "7", "8");
}
}
|
public static <T> Map<T, T> reverse(Map<T, T> map) {
return edit(map, t -> new Entry<T, T>() {
@Override
public T getKey() {
return t.getValue();
}
@Override
public T getValue() {
return t.getKey();
}
@Override
public T setValue(T value) {
throw new UnsupportedOperationException("Unsupported setValue method !");
}
});
}
|
@Test
public void reverseTest() {
final Map<String, String> map = MapUtil.newHashMap();
map.put("a", "1");
map.put("b", "2");
map.put("c", "3");
map.put("d", "4");
final Map<String, String> map2 = MapUtil.reverse(map);
assertEquals("a", map2.get("1"));
assertEquals("b", map2.get("2"));
assertEquals("c", map2.get("3"));
assertEquals("d", map2.get("4"));
}
|
public void startCluster() throws ClusterEntrypointException {
LOG.info("Starting {}.", getClass().getSimpleName());
try {
FlinkSecurityManager.setFromConfiguration(configuration);
PluginManager pluginManager =
PluginUtils.createPluginManagerFromRootFolder(configuration);
configureFileSystems(configuration, pluginManager);
SecurityContext securityContext = installSecurityContext(configuration);
ClusterEntrypointUtils.configureUncaughtExceptionHandler(configuration);
securityContext.runSecured(
(Callable<Void>)
() -> {
runCluster(configuration, pluginManager);
return null;
});
} catch (Throwable t) {
final Throwable strippedThrowable =
ExceptionUtils.stripException(t, UndeclaredThrowableException.class);
try {
// clean up any partial state
shutDownAsync(
ApplicationStatus.FAILED,
ShutdownBehaviour.GRACEFUL_SHUTDOWN,
ExceptionUtils.stringifyException(strippedThrowable),
false)
.get(
INITIALIZATION_SHUTDOWN_TIMEOUT.toMilliseconds(),
TimeUnit.MILLISECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
strippedThrowable.addSuppressed(e);
}
throw new ClusterEntrypointException(
String.format(
"Failed to initialize the cluster entrypoint %s.",
getClass().getSimpleName()),
strippedThrowable);
}
}
|
@Test
public void testWorkingDirectoryIsNotDeletedWhenStoppingClusterEntrypoint() throws Exception {
final File workingDirBase = TEMPORARY_FOLDER.newFolder();
final ResourceID resourceId = new ResourceID("foobar");
configureWorkingDirectory(flinkConfig, workingDirBase, resourceId);
final File workingDir =
ClusterEntrypointUtils.generateJobManagerWorkingDirectoryFile(
flinkConfig, resourceId);
try (final TestingEntryPoint testingEntryPoint =
new TestingEntryPoint.Builder().setConfiguration(flinkConfig).build()) {
testingEntryPoint.startCluster();
}
assertTrue(
"The working directory has been deleted when the cluster entrypoint shut down. This should not happen.",
workingDir.exists());
}
|
@Override
public SchemaKStream<?> buildStream(final PlanBuildContext buildContext) {
if (!joinKey.isForeignKey()) {
ensureMatchingPartitionCounts(buildContext.getServiceContext().getTopicClient());
}
final JoinerFactory joinerFactory = new JoinerFactory(
buildContext,
this,
buildContext.buildNodeContext(getId().toString()));
return joinerFactory.getJoiner(left.getNodeOutputType(), right.getNodeOutputType()).join();
}
|
@Test
public void shouldPerformTableToTableLeftJoin() {
// Given:
setupTable(left, leftSchemaKTable);
setupTable(right, rightSchemaKTable);
final JoinNode joinNode = new JoinNode(nodeId, LEFT, joinKey, true, left,
right, empty(),"KAFKA");
// When:
joinNode.buildStream(planBuildContext);
// Then:
verify(leftSchemaKTable).leftJoin(
rightSchemaKTable,
SYNTH_KEY,
CONTEXT_STACKER
);
}
|
@Override
public Object[] toArray() {
return new Object[0];
}
|
@Test
public void testToArray1() throws Exception {
Object[] array = es.toArray(new Integer[1]);
assertEquals(1, array.length);
assertEquals(null, array[0]);
array = es.toArray(new Integer[0]);
assertEquals(0, array.length);
}
|
public RuntimeOptionsBuilder parse(Map<String, String> properties) {
return parse(properties::get);
}
|
@Test
void should_parse_filter_tag() {
properties.put(Constants.FILTER_TAGS_PROPERTY_NAME, "@No and not @Never");
RuntimeOptions options = cucumberPropertiesParser.parse(properties).build();
List<String> tagExpressions = options.getTagExpressions().stream()
.map(Object::toString)
.collect(toList());
assertThat(tagExpressions, contains("( @No and not ( @Never ) )"));
}
|
public HtmlEmail createEmail(T report) throws MalformedURLException, EmailException {
HtmlEmail email = new HtmlEmail();
setEmailSettings(email);
addReportContent(email, report);
return email;
}
|
@Test
public void support_ssl() throws Exception {
BasicEmail basicEmail = new BasicEmail(Set.of("noreply@nowhere"));
when(emailSettings.getSecureConnection()).thenReturn("SSL");
when(emailSettings.getSmtpHost()).thenReturn("smtphost");
when(emailSettings.getSmtpPort()).thenReturn(466);
when(emailSettings.getFrom()).thenReturn("noreply@nowhere");
when(emailSettings.getSmtpUsername()).thenReturn("login");
when(emailSettings.getSmtpPassword()).thenReturn("pwd");
MultiPartEmail email = sender.createEmail(basicEmail);
assertThat(email.isSSLOnConnect()).isTrue();
assertThat(email.isStartTLSEnabled()).isFalse();
assertThat(email.isStartTLSRequired()).isFalse();
assertThat(email.getHostName()).isEqualTo("smtphost");
assertThat(email.getSmtpPort()).isEqualTo("466");
assertThat(email.getSslSmtpPort()).isEqualTo("466");
}
|
public static String safeSubstring(String target, int start, int end) {
if (target == null) {
return null;
}
int slen = target.length();
if (start < 0 || end <= 0 || end <= start || slen < start || slen < end) {
return null;
}
return target.substring(start, end);
}
|
@Test
public void testSafeSubstring() {
assertNull(Tools.safeSubstring(null, 10, 20));
assertNull(Tools.safeSubstring("", 10, 20));
assertNull(Tools.safeSubstring("foo", -1, 2));
assertNull(Tools.safeSubstring("foo", 1, 0));
assertNull(Tools.safeSubstring("foo", 5, 2));
assertNull(Tools.safeSubstring("foo", 1, 1));
assertNull(Tools.safeSubstring("foo", 2, 1));
assertEquals("justatest", Tools.safeSubstring("justatest", 0, 9));
assertEquals("tat", Tools.safeSubstring("justatest", 3, 6));
assertEquals("just", Tools.safeSubstring("justatest", 0, 4));
assertEquals("atest", Tools.safeSubstring("justatest", 4, 9));
}
|
static List<CircuitBreaker> getDefaultCircuitBreakers(String resourceName) {
if (rules == null || rules.isEmpty()) {
return null;
}
List<CircuitBreaker> circuitBreakers = DefaultCircuitBreakerRuleManager.circuitBreakers.get(resourceName);
if (circuitBreakers == null && !rules.isEmpty() && !excludedResource.contains(resourceName)) {
circuitBreakers = new ArrayList<>();
for (DegradeRule rule : rules) {
circuitBreakers.add(DefaultCircuitBreakerRuleManager.newCircuitBreakerFrom(rule));
}
DefaultCircuitBreakerRuleManager.circuitBreakers.put(resourceName, circuitBreakers);
return circuitBreakers;
}
return circuitBreakers;
}
|
@Test
public void testGetDefaultCircuitBreakers() {
String resourceName = RESOURCE_NAME + "I";
assertFalse(DegradeRuleManager.hasConfig(resourceName));
List<CircuitBreaker> defaultCircuitBreakers1 = DefaultCircuitBreakerRuleManager.getDefaultCircuitBreakers(
resourceName);
assertNotNull(defaultCircuitBreakers1);
List<CircuitBreaker> defaultCircuitBreakers2 = DefaultCircuitBreakerRuleManager.getDefaultCircuitBreakers(
resourceName);
assertSame(defaultCircuitBreakers1, defaultCircuitBreakers2);
}
|
public DubboShutdownHook(ApplicationModel applicationModel) {
super("DubboShutdownHook");
this.applicationModel = applicationModel;
Assert.notNull(this.applicationModel, "ApplicationModel is null");
ignoreListenShutdownHook = Boolean.parseBoolean(
ConfigurationUtils.getProperty(applicationModel, CommonConstants.IGNORE_LISTEN_SHUTDOWN_HOOK));
if (ignoreListenShutdownHook) {
logger.info(
CommonConstants.IGNORE_LISTEN_SHUTDOWN_HOOK + " configured, will ignore add shutdown hook to jvm.");
}
}
|
@Test
public void testDubboShutdownHook() {
Assertions.assertNotNull(dubboShutdownHook);
Assertions.assertLinesMatch(asList("DubboShutdownHook"), asList(dubboShutdownHook.getName()));
Assertions.assertFalse(dubboShutdownHook.getRegistered());
}
|
public Optional<Node> localCorpusDispatchTarget() {
if (localCorpusDispatchTarget == null) return Optional.empty();
// Only use direct dispatch if the local group has sufficient coverage
Group localSearchGroup = groups.get(localCorpusDispatchTarget.group());
if ( ! localSearchGroup.hasSufficientCoverage()) return Optional.empty();
// Only use direct dispatch if the local search node is not down
if (localCorpusDispatchTarget.isWorking() == Boolean.FALSE) return Optional.empty();
return Optional.of(localCorpusDispatchTarget);
}
|
@Test
void requireThatVipStatusDownWhenLocalIsDown() {
try (State test = new State("cluster.1", 1, HostName.getLocalhost(), "b")) {
test.waitOneFullPingRound();
assertTrue(test.vipStatus.isInRotation());
assertTrue(test.searchCluster.localCorpusDispatchTarget().isPresent());
test.waitOneFullPingRound();
assertTrue(test.vipStatus.isInRotation());
test.numDocsPerNode.get(0).set(-1);
test.waitOneFullPingRound();
assertFalse(test.vipStatus.isInRotation());
test.numDocsPerNode.get(0).set(1);
test.waitOneFullPingRound();
assertTrue(test.vipStatus.isInRotation());
test.numDocsPerNode.get(1).set(-1);
test.waitOneFullPingRound();
assertTrue(test.vipStatus.isInRotation());
test.numDocsPerNode.get(0).set(-1);
test.numDocsPerNode.get(1).set(-1);
test.waitOneFullPingRound();
assertFalse(test.vipStatus.isInRotation());
test.numDocsPerNode.get(1).set(1);
test.waitOneFullPingRound();
assertFalse(test.vipStatus.isInRotation());
test.numDocsPerNode.get(0).set(1);
test.waitOneFullPingRound();
assertTrue(test.vipStatus.isInRotation());
}
}
|
public static void setMetadata(
Context context, NotificationCompat.Builder notification, int type) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
switch (type) {
case TYPE_NORMAL:
createNormalChannel(context);
break;
case TYPE_FTP:
createFtpChannel(context);
break;
default:
throw new IllegalArgumentException("Unrecognized type:" + type);
}
} else {
switch (type) {
case TYPE_NORMAL:
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
notification.setCategory(Notification.CATEGORY_SERVICE);
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) {
notification.setPriority(Notification.PRIORITY_MIN);
}
break;
case TYPE_FTP:
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
notification.setCategory(Notification.CATEGORY_SERVICE);
notification.setVisibility(NotificationCompat.VISIBILITY_PUBLIC);
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) {
notification.setPriority(Notification.PRIORITY_MAX);
}
break;
default:
throw new IllegalArgumentException("Unrecognized type:" + type);
}
}
}
|
@Test
@Config(sdk = {KITKAT}) // max sdk is N
public void testFtpNotification() {
NotificationCompat.Builder builder =
new NotificationCompat.Builder(context, CHANNEL_FTP_ID)
.setContentTitle("FTP server test")
.setContentText("FTP listening at 127.0.0.1:22")
.setSmallIcon(R.drawable.ic_ftp_light)
.setTicker(context.getString(R.string.ftp_notif_starting))
.setOngoing(true)
.setOnlyAlertOnce(true);
NotificationConstants.setMetadata(context, builder, TYPE_FTP);
Notification result = builder.build();
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
assertEquals(Notification.CATEGORY_SERVICE, result.category);
assertEquals(Notification.VISIBILITY_PUBLIC, result.visibility);
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) {
assertEquals(Notification.PRIORITY_MAX, result.priority);
} else {
assertEquals(Notification.PRIORITY_DEFAULT, result.priority);
}
}
|
public void consume(Inbox inbox) {
ensureNotDone();
if (limit <= 0) {
done.compareAndSet(null, new ResultLimitReachedException());
ensureNotDone();
}
while (offset > 0 && inbox.poll() != null) {
offset--;
}
for (JetSqlRow row; (row = (JetSqlRow) inbox.peek()) != null && rows.offer(row); ) {
inbox.remove();
if (limit != Long.MAX_VALUE) {
limit -= 1;
if (limit < 1) {
done.compareAndSet(null, new ResultLimitReachedException());
ensureNotDone();
}
}
}
}
|
@Test
public void when_nextItemWhileWaiting_then_hasNextReturns() throws Exception {
initProducer(false);
Future<?> future = spawn(() -> {
assertThat(iterator.hasNext(1, DAYS)).isEqualTo(YES);
assertThat((int) iterator.next().get(0)).isEqualTo(42);
});
sleepMillis(50); // sleep so that the thread starts blocking in `hasNext`
inbox.queue().add(jetRow(42));
producer.consume(inbox);
assertThat(inbox).isEmpty();
future.get();
}
|
@Override
public byte[] serialize() {
byte[] optionsData = null;
if (this.options.hasOptions()) {
optionsData = this.options.serialize();
}
int optionsLength = 0;
if (optionsData != null) {
optionsLength = optionsData.length;
}
final byte[] data = new byte[HEADER_LENGTH + optionsLength];
final ByteBuffer bb = ByteBuffer.wrap(data);
bb.putInt(0);
bb.put(this.targetAddress, 0, Ip6Address.BYTE_LENGTH);
if (optionsData != null) {
bb.put(optionsData);
}
return data;
}
|
@Test
public void testSerialize() {
NeighborSolicitation ns = new NeighborSolicitation();
ns.setTargetAddress(TARGET_ADDRESS);
ns.addOption(NeighborDiscoveryOptions.TYPE_TARGET_LL_ADDRESS,
MAC_ADDRESS.toBytes());
assertArrayEquals(ns.serialize(), bytePacket);
}
|
public static long readVLong(ByteData arr, long position) {
byte b = arr.get(position++);
if(b == (byte) 0x80)
throw new RuntimeException("Attempting to read null value as long");
long value = b & 0x7F;
while ((b & 0x80) != 0) {
b = arr.get(position++);
value <<= 7;
value |= (b & 0x7F);
}
return value;
}
|
@Test(expected = EOFException.class)
public void testReadVLongTruncatedInputStream() throws IOException {
InputStream is = new ByteArrayInputStream(BYTES_TRUNCATED);
VarInt.readVLong(is);
}
|
@Override
public void removeAll() {
map.removeAll(Predicates.alwaysTrue());
}
|
@Test(expected = MethodNotAvailableException.class)
public void testRemoveAllWithKeys() {
adapter.removeAll(singleton(42));
}
|
@NonNull
public ConnectionFileName getConnectionRootFileName( @NonNull VFSConnectionDetails details ) {
String connectionName = details.getName();
if ( StringUtils.isEmpty( connectionName ) ) {
throw new IllegalArgumentException( "Unnamed connection" );
}
return new ConnectionFileName( connectionName );
}
|
@Test( expected = IllegalArgumentException.class )
public void testGetConnectionRootFileNameThrowsIllegalArgumentGivenConnectionHasNullName() {
when( vfsConnectionDetails.getName() ).thenReturn( null );
vfsConnectionManagerHelper.getConnectionRootFileName( vfsConnectionDetails );
}
|
public static boolean isWide(@Nullable Type type) {
if (type == null) return false;
return Type.DOUBLE_TYPE.equals(type) || Type.LONG_TYPE.equals(type);
}
|
@Test
void testIsWide() {
assertTrue(Types.isWide(Type.getType("D")));
assertTrue(Types.isWide(Type.getType("J")));
//
assertFalse(Types.isWide(Type.getType("V")));
assertFalse(Types.isWide(Type.getType("Z")));
assertFalse(Types.isWide(Type.getType("B")));
assertFalse(Types.isWide(Type.getType("C")));
assertFalse(Types.isWide(Type.getType("S")));
assertFalse(Types.isWide(Type.getType("I")));
assertFalse(Types.isWide(Type.getType("F")));
assertFalse(Types.isWide(Type.getType("[D")));
assertFalse(Types.isWide(Type.getType("[J")));
assertFalse(Types.isWide(Type.getType("Ljava/lang/String;")));
}
|
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) {
CharStream input = CharStreams.fromString(source);
FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input );
CommonTokenStream tokens = new CommonTokenStream( lexer );
FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens );
ParserHelper parserHelper = new ParserHelper(eventsManager);
additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol()));
parser.setHelper(parserHelper);
parser.setErrorHandler( new FEELErrorHandler() );
parser.removeErrorListeners(); // removes the error listener that prints to the console
parser.addErrorListener( new FEELParserErrorListener( eventsManager ) );
// pre-loads the parser with symbols
defineVariables( inputVariableTypes, inputVariables, parser );
if (typeRegistry != null) {
parserHelper.setTypeRegistry(typeRegistry);
}
return parser;
}
|
@Test
void atLiteralDuration() {
String inputExpression = "@\"P2Y2M\"";
BaseNode bool = parse(inputExpression);
assertThat(bool).isInstanceOf(AtLiteralNode.class);
assertThat(bool.getResultType()).isEqualTo(BuiltInType.DURATION);
assertLocation(inputExpression, bool);
}
|
public static double estimateDistanceInFeet(Point p1, Point p2, long maxTimeDeltaInMillisec) {
//using sythetic Points to estimate physcial distances is numerically unstable
verifyTimeDeltaIsSmall(p1, p2, maxTimeDeltaInMillisec);
Instant avgTime = Time.averageTime(p1.time(), p2.time());
//this metric ignores time and only reflects distance (measured in feet)
PointDistanceMetric metric = new PointDistanceMetric(0.0, 1.0);
return metric.distanceBtw(
projectPointAtNewTime(p1, avgTime),
projectPointAtNewTime(p2, avgTime)
);
}
|
@Test
public void testEstimateDistanceInFeet_tooFarApartInTime() {
//1 knot -- due east
Point testPoint = (new PointBuilder())
.time(Instant.EPOCH)
.latLong(0.0, 0.0)
.altitude(Distance.ofFeet(0.0))
.speedInKnots(1.0)
.courseInDegrees(90.0)
.build();
//1 knot -- due west (1 minute later)
Point testPoint2 = (new PointBuilder())
.time(Instant.EPOCH.plusSeconds(60L))
.latLong(0.0, 0.0)
.altitude(Distance.ofFeet(0.0))
.speedInKnots(1.0)
.courseInDegrees(270.0)
.build();
long MAX_TIME_DELTA = 1000L;
assertThrows(
IllegalArgumentException.class,
() -> Distances.estimateDistanceInFeet(testPoint, testPoint2, MAX_TIME_DELTA),
"Should have failed because 1 minute is too much"
);
}
|
public static String getPackageName(Class<?> clazz) {
return getPackageName(clazz.getName());
}
|
@Test
public void testGetPackageName() {
String packageName = ClassUtils.getPackageName(AbstractMap.class);
assertEquals("java.util", packageName);
}
|
public static DynamicMessage messageFromGenericRecord(
Descriptor descriptor,
GenericRecord record,
@Nullable String changeType,
long changeSequenceNum) {
return messageFromGenericRecord(
descriptor, record, changeType, Long.toHexString(changeSequenceNum));
}
|
@Test
public void testMessageFromGenericRecord() throws Exception {
Descriptors.Descriptor descriptor =
TableRowToStorageApiProto.getDescriptorFromTableSchema(
AvroGenericRecordToStorageApiProto.protoTableSchemaFromAvroSchema(NESTED_SCHEMA),
true,
false);
DynamicMessage msg =
AvroGenericRecordToStorageApiProto.messageFromGenericRecord(
descriptor, nestedRecord, null, -1);
assertEquals(2, msg.getAllFields().size());
Map<String, Descriptors.FieldDescriptor> fieldDescriptors =
descriptor.getFields().stream()
.collect(Collectors.toMap(Descriptors.FieldDescriptor::getName, Functions.identity()));
DynamicMessage nestedMsg = (DynamicMessage) msg.getField(fieldDescriptors.get("nested"));
assertBaseRecord(nestedMsg, baseProtoExpectedFields);
}
|
public CsvData read() throws IORuntimeException {
return read(this.reader, false);
}
|
@Test
public void readDisableCommentTest() {
final CsvReader reader = CsvUtil.getReader(CsvReadConfig.defaultConfig().disableComment());
final CsvData read = reader.read(ResourceUtil.getUtf8Reader("test.csv"));
final CsvRow row = read.getRow(0);
assertEquals("# 这是一行注释,读取时应忽略", row.get(0));
}
|
Map<Path, Set<Integer>> changedLines() {
return tracker.changedLines();
}
|
@Test
public void do_not_count_deleted_line() throws IOException {
String example = "Index: sample1\n"
+ "===================================================================\n"
+ "--- a/sample1\n"
+ "+++ b/sample1\n"
+ "@@ -1 +0,0 @@\n"
+ "-deleted line\n";
printDiff(example);
assertThat(underTest.changedLines()).isEmpty();
}
|
public synchronized TableId createTable(String tableName, Schema schema)
throws BigQueryResourceManagerException {
return createTable(tableName, schema, System.currentTimeMillis() + 3600000); // 1h
}
|
@Test
public void testCreateTableShouldThrowErrorWhenTableNameIsNotValid() {
assertThrows(IllegalArgumentException.class, () -> testManager.createTable("", schema));
}
|
@Override
public double getValue(double quantile) {
if (quantile < 0.0 || quantile > 1.0 || Double.isNaN(quantile)) {
throw new IllegalArgumentException(quantile + " is not in [0..1]");
}
if (values.length == 0) {
return 0.0;
}
final double pos = quantile * (values.length + 1);
final int index = (int) pos;
if (index < 1) {
return values[0];
}
if (index >= values.length) {
return values[values.length - 1];
}
final double lower = values[index - 1];
final double upper = values[index];
return lower + (pos - floor(pos)) * (upper - lower);
}
|
@Test(expected = IllegalArgumentException.class)
public void disallowsNotANumberQuantile() {
snapshot.getValue(Double.NaN);
}
|
public static byte[] copyOf(byte[] original) {
return Arrays.copyOf(original, original.length);
}
|
@Test
public void copyOf() {
byte[] input = new byte[] {1, 2, 3};
assertThat(Tools.copyOf(input), is(equalTo(input)));
assertNotSame(input, Tools.copyOf(input));
}
|
@RequestMapping("/error")
public ModelAndView handleError(HttpServletRequest request) {
Object status = request.getAttribute(RequestDispatcher.ERROR_STATUS_CODE);
ModelAndView modelAndView = new ModelAndView();
if (status != null) {
int statusCode = Integer.parseInt(status.toString());
if (statusCode == HttpStatus.NOT_FOUND.value()) {
modelAndView.setStatus(HttpStatus.OK);
modelAndView.setViewName("forward:/ui/index.html");
return modelAndView;
}
modelAndView.setStatus(HttpStatus.valueOf(statusCode));
}
return modelAndView;
}
|
@Test
void handleError_ReturnsModelAndViewWithStatusCode() {
HttpServletRequest request = mock(HttpServletRequest.class);
when(request.getAttribute(RequestDispatcher.ERROR_STATUS_CODE))
.thenReturn(HttpStatus.INTERNAL_SERVER_ERROR.value());
ModelAndView modelAndView = new FrontendRedirector().handleError(request);
assertEquals(HttpStatus.INTERNAL_SERVER_ERROR, modelAndView.getStatus());
assertNull(modelAndView.getViewName());
}
|
@Override
public UltraLogLog getInitialAggregatedValue(Object rawValue) {
UltraLogLog initialValue;
if (rawValue instanceof byte[]) {
byte[] bytes = (byte[]) rawValue;
initialValue = deserializeAggregatedValue(bytes);
} else {
initialValue = UltraLogLog.create(_p);
addObjectToSketch(rawValue, initialValue);
}
return initialValue;
}
|
@Test
public void getInitialValueShouldSupportDifferentTypes() {
DistinctCountULLValueAggregator agg = new DistinctCountULLValueAggregator(Collections.emptyList());
assertEquals(roundedEstimate(agg.getInitialAggregatedValue(12345)), 1.0);
assertEquals(roundedEstimate(agg.getInitialAggregatedValue(12345L)), 1.0);
assertEquals(roundedEstimate(agg.getInitialAggregatedValue(12.345f)), 1.0);
assertEquals(roundedEstimate(agg.getInitialAggregatedValue(12.345d)), 1.0);
assertThrows(() -> agg.getInitialAggregatedValue(new Object()));
}
|
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token = null;
String authorization =
request.getHeader(HttpConstants.AUTHORIZATION_HEADER);
if (authorization == null
|| !AuthenticationHandlerUtil.matchAuthScheme(HttpConstants.BASIC,
authorization)) {
response.setHeader(WWW_AUTHENTICATE, HttpConstants.BASIC);
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
if (authorization == null) {
logger.trace("Basic auth starting");
} else {
logger.warn("'" + HttpConstants.AUTHORIZATION_HEADER
+ "' does not start with '" + HttpConstants.BASIC + "' : {}",
authorization);
}
} else {
authorization =
authorization.substring(HttpConstants.BASIC.length()).trim();
final Base64 base64 = new Base64(0);
// As per RFC7617, UTF-8 charset should be used for decoding.
String[] credentials = new String(base64.decode(authorization),
StandardCharsets.UTF_8).split(":", 2);
if (credentials.length == 2) {
token = authenticateUser(credentials[0], credentials[1]);
response.setStatus(HttpServletResponse.SC_OK);
}
}
return token;
}
|
@Test(timeout = 60000)
public void testRequestWithIncompleteAuthorization() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getHeader(HttpConstants.AUTHORIZATION_HEADER))
.thenReturn(HttpConstants.BASIC);
Assert.assertNull(handler.authenticate(request, response));
}
|
public static KeyStore newStoreCopyContent(KeyStore originalKeyStore,
char[] currentPassword,
final char[] newPassword) throws GeneralSecurityException, IOException {
if (newPassword == null) {
throw new IllegalArgumentException("new password cannot be null");
}
KeyStore newKeyStore = KeyStore.getInstance(PKCS12);
newKeyStore.load(null, newPassword);
final Enumeration<String> aliases = originalKeyStore.aliases();
while (aliases.hasMoreElements()) {
String alias = aliases.nextElement();
if (originalKeyStore.entryInstanceOf(alias, KeyStore.PrivateKeyEntry.class)) {
newKeyStore.setKeyEntry(
alias,
originalKeyStore.getKey(alias, currentPassword),
newPassword,
originalKeyStore.getCertificateChain(alias)
);
} else if (originalKeyStore.entryInstanceOf(alias, KeyStore.TrustedCertificateEntry.class)) {
newKeyStore.setCertificateEntry(alias, originalKeyStore.getCertificate(alias));
} else if (originalKeyStore.entryInstanceOf(alias, KeyStore.SecretKeyEntry.class)) {
newKeyStore.setEntry(alias,
originalKeyStore.getEntry(alias, new KeyStore.PasswordProtection(currentPassword)),
new KeyStore.PasswordProtection(newPassword)
);
}
}
return newKeyStore;
}
|
@Test
void testMovingManyEntiresOfTheSameType() throws Exception {
final char[] oldPassword = "oldPass".toCharArray();
final char[] newPassword = "newPass".toCharArray();
KeyStore originalKeyStore = KeyStore.getInstance(PKCS12);
originalKeyStore.load(null, oldPassword);
CertRequest req = CertRequest.selfSigned("localhost")
.validity(Duration.ZERO);
final KeyPair keyPair1 = CertificateGenerator.generate(req);
originalKeyStore.setKeyEntry("privkey1", keyPair1.privateKey(), oldPassword, new Certificate[]{keyPair1.certificate()});
final KeyPair keyPair2 = CertificateGenerator.generate(req);
originalKeyStore.setKeyEntry("privkey2", keyPair2.privateKey(), oldPassword, new Certificate[]{keyPair2.certificate()});
final KeyPair keyPair3 = CertificateGenerator.generate(req);
originalKeyStore.setKeyEntry("privkey3", keyPair3.privateKey(), oldPassword, new Certificate[]{keyPair3.certificate()});
final KeyStore newKeyStore = KeystoreUtils.newStoreCopyContent(originalKeyStore, oldPassword, newPassword);
assertEquals(keyPair1.privateKey(), newKeyStore.getKey("privkey1", newPassword));
assertEquals(keyPair2.privateKey(), newKeyStore.getKey("privkey2", newPassword));
assertEquals(keyPair3.privateKey(), newKeyStore.getKey("privkey3", newPassword));
}
|
@Override
public Stream<FileSlice> getLatestMergedFileSlicesBeforeOrOn(String partitionPath, String maxInstantTime) {
return execute(partitionPath, maxInstantTime, preferredView::getLatestMergedFileSlicesBeforeOrOn,
(path, instantTime) -> getSecondaryView().getLatestMergedFileSlicesBeforeOrOn(path, instantTime));
}
|
@Test
public void testGetLatestMergedFileSlicesBeforeOrOn() {
Stream<FileSlice> actual;
Stream<FileSlice> expected = testFileSliceStream;
String partitionPath = "/table2";
String maxInstantTime = "2020-01-01";
when(primary.getLatestMergedFileSlicesBeforeOrOn(partitionPath, maxInstantTime))
.thenReturn(testFileSliceStream);
actual = fsView.getLatestMergedFileSlicesBeforeOrOn(partitionPath, maxInstantTime);
assertEquals(expected, actual);
verify(secondaryViewSupplier, never()).get();
resetMocks();
when(secondaryViewSupplier.get()).thenReturn(secondary);
when(primary.getLatestMergedFileSlicesBeforeOrOn(partitionPath, maxInstantTime))
.thenThrow(new RuntimeException());
when(secondary.getLatestMergedFileSlicesBeforeOrOn(partitionPath, maxInstantTime))
.thenReturn(testFileSliceStream);
actual = fsView.getLatestMergedFileSlicesBeforeOrOn(partitionPath, maxInstantTime);
assertEquals(expected, actual);
resetMocks();
when(secondary.getLatestMergedFileSlicesBeforeOrOn(partitionPath, maxInstantTime))
.thenReturn(testFileSliceStream);
actual = fsView.getLatestMergedFileSlicesBeforeOrOn(partitionPath, maxInstantTime);
assertEquals(expected, actual);
resetMocks();
when(secondary.getLatestMergedFileSlicesBeforeOrOn(partitionPath, maxInstantTime))
.thenThrow(new RuntimeException());
assertThrows(RuntimeException.class, () -> {
fsView.getLatestMergedFileSlicesBeforeOrOn(partitionPath, maxInstantTime);
});
}
|
@UdafFactory(description = "Compute average of column with type Integer.",
aggregateSchema = "STRUCT<SUM integer, COUNT bigint>")
public static TableUdaf<Integer, Struct, Double> averageInt() {
return getAverageImplementation(
0,
STRUCT_INT,
(sum, newValue) -> sum.getInt32(SUM) + newValue,
(sum, count) -> sum.getInt32(SUM) / count,
(sum1, sum2) -> sum1.getInt32(SUM) + sum2.getInt32(SUM),
(sum, valueToUndo) -> sum.getInt32(SUM) - valueToUndo);
}
|
@Test
public void shouldAverageInts() {
final TableUdaf<Integer, Struct, Double> udaf = AverageUdaf.averageInt();
Struct agg = udaf.initialize();
final int[] values = new int[] {1, 1, 1, 1, 1};
for (final int thisValue : values) {
agg = udaf.aggregate(thisValue, agg);
}
final double avg = udaf.map(agg);
assertThat(1.0, equalTo(avg));
}
|
static String calculateBillingProjectId(Optional<String> configParentProjectId, Optional<Credentials> credentials)
{
if (configParentProjectId.isPresent()) {
return configParentProjectId.get();
}
// All other credentials types (User, AppEngine, GCE, CloudShell, etc.) take it from the environment
if (credentials.isPresent() && credentials.get() instanceof ServiceAccountCredentials) {
return ((ServiceAccountCredentials) credentials.get()).getProjectId();
}
return BigQueryOptions.getDefaultProjectId();
}
|
@Test
public void testCredentialsOnly()
throws Exception
{
String projectId = BigQueryConnectorModule.calculateBillingProjectId(Optional.empty(), credentials());
assertThat(projectId).isEqualTo("presto-bq-credentials-test");
}
|
@Override public final Object unwrap() {
return delegate;
}
|
@Test void unwrap() {
assertThat(wrapper.unwrap())
.isEqualTo(request);
}
|
@GET
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public AppInfo get() {
return getAppInfo();
}
|
@Test
public void testAMSlash() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("mapreduce/")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
response.getType().toString());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
verifyAMInfo(json.getJSONObject("info"), appContext);
}
|
@Override
public String toString() {
return "ResourceConfig{" +
"url=" + url +
", id='" + id + '\'' +
", resourceType=" + resourceType +
'}';
}
|
@Test
public void when_attachNonexistentFileWithPath_then_throwsException() {
// Given
String path = Paths.get("/i/do/not/exist").toString();
// Then
expectedException.expect(JetException.class);
expectedException.expectMessage("Not an existing, readable file: " + path);
// When
config.attachFile(path);
}
|
private DefaultFuture(Channel channel, Request request, int timeout) {
this.channel = channel;
this.request = request;
this.id = request.getId();
this.timeout = timeout > 0 ? timeout : channel.getUrl().getPositiveParameter(TIMEOUT_KEY, DEFAULT_TIMEOUT);
// put into waiting map.
FUTURES.put(id, this);
CHANNELS.put(id, channel);
}
|
@Test
@Disabled
public void timeoutNotSend() throws Exception {
final DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss");
System.out.println(
"before a future is create , time is : " + LocalDateTime.now().format(formatter));
// timeout after 5 seconds.
DefaultFuture f = defaultFuture(5000);
while (!f.isDone()) {
// spin
Thread.sleep(100);
}
System.out.println(
"after a future is timeout , time is : " + LocalDateTime.now().format(formatter));
// get operate will throw a timeout exception, because the future is timeout.
try {
f.get();
} catch (Exception e) {
Assertions.assertTrue(
e.getCause() instanceof TimeoutException, "catch exception is not timeout exception!");
System.out.println(e.getMessage());
}
}
|
public void putStats(String route, String cause) {
if (route == null) {
route = "UNKNOWN_ROUTE";
}
route = route.replace("/", "_");
ConcurrentHashMap<String, ErrorStatsData> statsMap = routeMap.get(route);
if (statsMap == null) {
statsMap = new ConcurrentHashMap<String, ErrorStatsData>();
routeMap.putIfAbsent(route, statsMap);
}
ErrorStatsData sd = statsMap.get(cause);
if (sd == null) {
sd = new ErrorStatsData(route, cause);
ErrorStatsData sd1 = statsMap.putIfAbsent(cause, sd);
if (sd1 != null) {
sd = sd1;
} else {
MonitorRegistry.getInstance().registerObject(sd);
}
}
sd.update();
}
|
@Test
void testPutStats() {
ErrorStatsManager sm = new ErrorStatsManager();
assertNotNull(sm);
sm.putStats("test", "cause");
assertNotNull(sm.routeMap.get("test"));
ConcurrentHashMap<String, ErrorStatsData> map = sm.routeMap.get("test");
ErrorStatsData sd = map.get("cause");
assertEquals(1, sd.getCount());
sm.putStats("test", "cause");
assertEquals(2, sd.getCount());
}
|
@Override
public long getTtl() {
return expiryMetadata.getTtl();
}
|
@Test
public void test_getTtl() {
assertEquals(Long.MAX_VALUE, view.getTtl());
}
|
@Bean
public PluginDataHandler resilience4JHandler() {
return new Resilience4JHandler();
}
|
@Test
public void testResilience4JHandler() {
new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(Resilience4JPluginConfiguration.class))
.withBean(Resilience4JPluginConfigurationTest.class)
.withPropertyValues("debug=true")
.run(context -> {
Resilience4JHandler handler = context.getBean("resilience4JHandler", Resilience4JHandler.class);
assertNotNull(handler);
});
}
|
@Override
public LanguageDetector loadModels() {
// FUTURE when the "language-detector" project supports short profiles, check if
// isShortText() returns true and switch to those.
languages = DEFAULT_LANGUAGES;
if (languageProbabilities != null) {
detector = createDetector(DEFAULT_LANGUAGE_PROFILES, languageProbabilities);
} else {
detector = DEFAULT_DETECTOR;
}
return this;
}
|
@Test
@Timeout(5000)
public void testOptimaizeRegexBug() throws Exception {
//confirm TIKA-2777 doesn't affect langdetect's Optimaize
LanguageDetector detector = new OptimaizeLangDetector().setShortText(false).loadModels();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < 50000; i++) {
sb.append('a');
}
detector.detect(sb.toString());
}
|
@Description("removes whitespace from the beginning and end of a string")
@ScalarFunction("trim")
@LiteralParameters("x")
@SqlType("char(x)")
public static Slice charTrim(@SqlType("char(x)") Slice slice)
{
return trim(slice);
}
|
@Test
public void testCharTrim()
{
assertFunction("TRIM(CAST('' AS CHAR(20)))", createCharType(20), padRight("", 20));
assertFunction("TRIM(CAST(' hello ' AS CHAR(9)))", createCharType(9), padRight("hello", 9));
assertFunction("TRIM(CAST(' hello' AS CHAR(7)))", createCharType(7), padRight("hello", 7));
assertFunction("TRIM(CAST('hello ' AS CHAR(7)))", createCharType(7), padRight("hello", 7));
assertFunction("TRIM(CAST(' hello world ' AS CHAR(13)))", createCharType(13), padRight("hello world", 13));
assertFunction("TRIM(CAST('\u4FE1\u5FF5 \u7231 \u5E0C\u671B \u2028 ' AS CHAR(10)))", createCharType(10), padRight("\u4FE1\u5FF5 \u7231 \u5E0C\u671B", 10));
assertFunction("TRIM(CAST('\u4FE1\u5FF5 \u7231 \u5E0C\u671B ' AS CHAR(9)))", createCharType(9), padRight("\u4FE1\u5FF5 \u7231 \u5E0C\u671B", 9));
assertFunction("TRIM(CAST(' \u4FE1\u5FF5 \u7231 \u5E0C\u671B ' AS CHAR(9)))", createCharType(9), padRight("\u4FE1\u5FF5 \u7231 \u5E0C\u671B", 9));
assertFunction("TRIM(CAST(' \u4FE1\u5FF5 \u7231 \u5E0C\u671B' AS CHAR(9)))", createCharType(9), padRight("\u4FE1\u5FF5 \u7231 \u5E0C\u671B", 9));
assertFunction("TRIM(CAST(' \u2028 \u4FE1\u5FF5 \u7231 \u5E0C\u671B' AS CHAR(10)))", createCharType(10), padRight("\u4FE1\u5FF5 \u7231 \u5E0C\u671B", 10));
}
|
@Subscribe
public void publishClusterEvent(Object event) {
if (event instanceof DeadEvent) {
LOG.debug("Skipping DeadEvent on cluster event bus");
return;
}
final String className = AutoValueUtils.getCanonicalName(event.getClass());
final ClusterEvent clusterEvent = ClusterEvent.create(nodeId.getNodeId(), className, Collections.singleton(nodeId.getNodeId()), event);
try {
final String id = dbCollection.save(clusterEvent, WriteConcern.JOURNALED).getSavedId();
// We are handling a locally generated event, so we can speed up processing by posting it to the local event
// bus immediately. Due to having added the local node id to its list of consumers, it will not be picked up
// by the db cursor again, avoiding double processing of the event. See #11263 for details.
serverEventBus.post(event);
LOG.debug("Published cluster event with ID <{}> and type <{}>", id, className);
} catch (MongoException e) {
LOG.error("Couldn't publish cluster event of type <" + className + ">", e);
}
}
|
@Test
public void localEventIsPostedToServerBusImmediately() {
SimpleEvent event = new SimpleEvent("test");
clusterEventPeriodical.publishClusterEvent(event);
verify(serverEventBus, times(1)).post(event);
}
|
@Override
public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object o) {
Object span = request.getAttribute(SpanCustomizer.class.getName());
if (span instanceof SpanCustomizer) handlerParser.preHandle(request, o, (SpanCustomizer) span);
return true;
}
|
@Test void preHandle_parses() {
when(request.getAttribute("brave.SpanCustomizer")).thenReturn(span);
interceptor.preHandle(request, response, controller);
verify(request).getAttribute("brave.SpanCustomizer");
verify(parser).preHandle(request, controller, span);
verifyNoMoreInteractions(request, response, parser, span);
}
|
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
}
|
@Test
public void testFetcherSessionEpochUpdate() throws Exception {
buildFetcher(2);
MetadataResponse initialMetadataResponse = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, 1), topicIds);
client.updateMetadata(initialMetadataResponse);
assignFromUser(Collections.singleton(tp0));
subscriptions.seek(tp0, 0L);
AtomicInteger fetchesRemaining = new AtomicInteger(1000);
executorService = Executors.newSingleThreadExecutor();
Future<?> future = executorService.submit(() -> {
long nextOffset = 0;
long nextEpoch = 0;
while (fetchesRemaining.get() > 0) {
synchronized (consumerClient) {
if (!client.requests().isEmpty()) {
ClientRequest request = client.requests().peek();
FetchRequest fetchRequest = (FetchRequest) request.requestBuilder().build();
int epoch = fetchRequest.metadata().epoch();
assertTrue(epoch == 0 || epoch == nextEpoch,
String.format("Unexpected epoch expected %d got %d", nextEpoch, epoch));
nextEpoch++;
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseMap = new LinkedHashMap<>();
responseMap.put(tidp0, new FetchResponseData.PartitionData()
.setPartitionIndex(tp0.partition())
.setHighWatermark(nextOffset + 2)
.setLastStableOffset(nextOffset + 2)
.setLogStartOffset(0)
.setRecords(buildRecords(nextOffset, 2, nextOffset)));
nextOffset += 2;
client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap));
consumerClient.poll(time.timer(0));
}
}
}
return fetchesRemaining.get();
});
long nextFetchOffset = 0;
while (fetchesRemaining.get() > 0 && !future.isDone()) {
if (sendFetches() == 1) {
synchronized (consumerClient) {
consumerClient.poll(time.timer(0));
}
}
if (fetcher.hasCompletedFetches()) {
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchRecords();
if (!fetchedRecords.isEmpty()) {
fetchesRemaining.decrementAndGet();
List<ConsumerRecord<byte[], byte[]>> records = fetchedRecords.get(tp0);
assertEquals(2, records.size());
assertEquals(nextFetchOffset, records.get(0).offset());
assertEquals(nextFetchOffset + 1, records.get(1).offset());
nextFetchOffset += 2;
}
assertTrue(fetchRecords().isEmpty());
}
}
assertEquals(0, future.get());
}
|
public synchronized TopologyDescription describe() {
return internalTopologyBuilder.describe();
}
|
@Test
public void timeWindowedCogroupedZeroArgCountWithTopologyConfigShouldPreserveTopologyStructure() {
// override the default store into in-memory
final StreamsBuilder builder = new StreamsBuilder(overrideDefaultStore(StreamsConfig.IN_MEMORY));
builder.stream("input-topic")
.groupByKey()
.cogroup((key, value, aggregate) -> value)
.windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(1)))
.aggregate(() -> "");
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topology: my-topology:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> COGROUPKSTREAM-AGGREGATE-0000000002\n" +
" Processor: COGROUPKSTREAM-AGGREGATE-0000000002 (stores: [COGROUPKSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" +
" --> COGROUPKSTREAM-MERGE-0000000003\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: COGROUPKSTREAM-MERGE-0000000003 (stores: [])\n" +
" --> none\n" +
" <-- COGROUPKSTREAM-AGGREGATE-0000000002\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(false));
}
|
static boolean fieldMatch(Object repoObj, Object filterObj) {
return filterObj == null || repoObj.equals(filterObj);
}
|
@Test
public void testFieldMatchWithNullFilterObjShouldReturnTrue() {
assertTrue(Utilities.fieldMatch("repoObject", null));
}
|
protected Invoker<T> select(
LoadBalance loadbalance, Invocation invocation, List<Invoker<T>> invokers, List<Invoker<T>> selected)
throws RpcException {
if (CollectionUtils.isEmpty(invokers)) {
return null;
}
String methodName = invocation == null ? StringUtils.EMPTY_STRING : RpcUtils.getMethodName(invocation);
boolean sticky =
invokers.get(0).getUrl().getMethodParameter(methodName, CLUSTER_STICKY_KEY, DEFAULT_CLUSTER_STICKY);
// ignore overloaded method
if (stickyInvoker != null && !invokers.contains(stickyInvoker)) {
stickyInvoker = null;
}
// ignore concurrency problem
if (sticky && stickyInvoker != null && (selected == null || !selected.contains(stickyInvoker))) {
if (availableCheck && stickyInvoker.isAvailable()) {
return stickyInvoker;
}
}
Invoker<T> invoker = doSelect(loadbalance, invocation, invokers, selected);
if (sticky) {
stickyInvoker = invoker;
}
return invoker;
}
|
@Test
void testSelectAgainAndCheckAvailable() {
LoadBalance lb = ExtensionLoader.getExtensionLoader(LoadBalance.class).getExtension(RoundRobinLoadBalance.NAME);
initlistsize5();
{
// Boundary condition test .
selectedInvokers.clear();
selectedInvokers.add(invoker1);
selectedInvokers.add(invoker2);
selectedInvokers.add(invoker3);
selectedInvokers.add(invoker5);
Invoker sinvoker = cluster.select(lb, invocation, invokers, selectedInvokers);
Assertions.assertSame(sinvoker, invoker4);
}
{
// Boundary condition test .
selectedInvokers.clear();
selectedInvokers.add(invoker2);
selectedInvokers.add(invoker3);
selectedInvokers.add(invoker4);
selectedInvokers.add(invoker5);
Invoker sinvoker = cluster.select(lb, invocation, invokers, selectedInvokers);
Assertions.assertTrue(sinvoker == invoker2 || sinvoker == invoker4);
}
{
// Boundary condition test .
for (int i = 0; i < 100; i++) {
selectedInvokers.clear();
Invoker sinvoker = cluster.select(lb, invocation, invokers, selectedInvokers);
Assertions.assertTrue(sinvoker == invoker2 || sinvoker == invoker4);
}
}
{
// Boundary condition test .
for (int i = 0; i < 100; i++) {
selectedInvokers.clear();
selectedInvokers.add(invoker1);
selectedInvokers.add(invoker3);
selectedInvokers.add(invoker5);
Invoker sinvoker = cluster.select(lb, invocation, invokers, selectedInvokers);
Assertions.assertTrue(sinvoker == invoker2 || sinvoker == invoker4);
}
}
{
// Boundary condition test .
for (int i = 0; i < 100; i++) {
selectedInvokers.clear();
selectedInvokers.add(invoker1);
selectedInvokers.add(invoker3);
selectedInvokers.add(invoker2);
selectedInvokers.add(invoker4);
selectedInvokers.add(invoker5);
Invoker sinvoker = cluster.select(lb, invocation, invokers, selectedInvokers);
Assertions.assertTrue(sinvoker == invoker2 || sinvoker == invoker4);
}
}
}
|
@Override
public void suspend(Throwable cause) {
context.goToFinished(context.getArchivedExecutionGraph(JobStatus.SUSPENDED, cause));
}
|
@Test
void testSuspendTransitionsToFinished() {
FlinkException expectedException = new FlinkException("This is a test exception");
TestingStateWithoutExecutionGraph state = new TestingStateWithoutExecutionGraph(ctx, LOG);
ctx.setExpectFinished(
archivedExecutionGraph -> {
assertThat(archivedExecutionGraph.getState()).isEqualTo(JobStatus.SUSPENDED);
assertThat(archivedExecutionGraph.getFailureInfo()).isNotNull();
assertThat(
archivedExecutionGraph
.getFailureInfo()
.getException()
.deserializeError(this.getClass().getClassLoader()))
.isEqualTo(expectedException);
});
state.suspend(expectedException);
}
|
public final TraceContext decorate(TraceContext context) {
long traceId = context.traceId(), spanId = context.spanId();
E claimed = null;
int existingIndex = -1, extraLength = context.extra().size();
for (int i = 0; i < extraLength; i++) {
Object next = context.extra().get(i);
if (next instanceof Extra) {
Extra nextExtra = (Extra) next;
// Don't interfere with other instances or subtypes
if (nextExtra.factory != this) continue;
if (claimed == null && nextExtra.tryToClaim(traceId, spanId)) {
claimed = (E) nextExtra;
continue;
}
if (existingIndex == -1) {
existingIndex = i;
} else {
Platform.get().log("BUG: something added redundant extra instances %s", context, null);
return context;
}
}
}
// Easiest when there is neither existing state to assign, nor need to change context.extra()
if (claimed != null && existingIndex == -1) {
return context;
}
// If context.extra() didn't have an unclaimed extra instance, create one for this context.
if (claimed == null) {
claimed = create();
if (claimed == null) {
Platform.get().log("BUG: create() returned null", null);
return context;
}
claimed.tryToClaim(traceId, spanId);
}
TraceContext.Builder builder = context.toBuilder().clearExtra().addExtra(claimed);
for (int i = 0; i < extraLength; i++) {
Object next = context.extra().get(i);
if (i == existingIndex) {
E existing = (E) next;
// If the claimed extra instance was new or had no changes, simply assign existing to it
if (claimed.state == initialState) {
claimed.state = existing.state;
} else if (existing.state != initialState) {
claimed.mergeStateKeepingOursOnConflict(existing);
}
} else if (!next.equals(claimed)) {
builder.addExtra(next);
}
}
return builder.build();
}
|
@Test void decorate_claimsContext() {
assertExtraClaimed(propagationFactory.decorate(context));
}
|
@Override
public void submit(VplsOperation vplsOperation) {
if (isLeader) {
// Only leader can execute operation
addVplsOperation(vplsOperation);
}
}
|
@Test
public void testSubmitUpdateHostOperation() {
vplsOperationManager.hostService = new EmptyHostService();
VplsData vplsData = VplsData.of(VPLS1);
vplsData.addInterfaces(ImmutableSet.of(V100H1, V100H2));
VplsOperation vplsOperation = VplsOperation.of(vplsData,
VplsOperation.Operation.ADD);
vplsOperationManager.submit(vplsOperation);
delay(1000);
vplsOperationManager.hostService = new TestHostService();
vplsData = VplsData.of(VPLS1);
vplsData.addInterfaces(ImmutableSet.of(V100H1, V100H2));
vplsData.state(VplsData.VplsState.UPDATING);
vplsOperation = VplsOperation.of(vplsData,
VplsOperation.Operation.UPDATE);
vplsOperationManager.submit(vplsOperation);
assertAfter(OPERATION_DELAY, OPERATION_DURATION, () -> {
Collection<VplsData> vplss = vplsOperationManager.vplsStore.getAllVpls();
VplsData result = vplss.iterator().next();
VplsData expected = VplsData.of(VPLS1);
expected.addInterfaces(ImmutableSet.of(V100H1, V100H2));
expected.state(VplsData.VplsState.ADDED);
assertEquals(1, vplss.size());
assertEquals(expected, result);
assertEquals(4, vplsOperationManager.intentService.getIntentCount());
});
}
|
public Bandwidth add(Bandwidth value) {
if (value instanceof LongBandwidth) {
return Bandwidth.bps(this.bps + ((LongBandwidth) value).bps);
}
return Bandwidth.bps(this.bps + value.bps());
}
|
@Test
public void testAdd() {
final long add = billion + one;
Bandwidth expected = Bandwidth.kbps(add);
assertThat(big.add(small), is(expected));
final double notLongAdd = 1001.0;
Bandwidth notLongExpected = Bandwidth.kbps(notLongAdd);
assertThat(notLongSmall.add(notLongBig), is(notLongExpected));
}
|
public static List<String> getDefaultProtocols() throws NoSuchAlgorithmException, KeyManagementException
{
// TODO Might want to cache the result. It's unlikely to change at runtime.
final SSLContext context = getUninitializedSSLContext();
context.init( null, null, null );
return Arrays.asList( context.createSSLEngine().getEnabledProtocols() );
}
|
@Test
public void testHasDefaultProtocols() throws Exception
{
// Setup fixture.
// (not needed)
// Execute system under test.
final Collection<String> result = EncryptionArtifactFactory.getDefaultProtocols();
// Verify results.
assertFalse( result.isEmpty() );
}
|
@VisibleForTesting
static void validateWorkerSettings(DataflowPipelineWorkerPoolOptions workerOptions) {
DataflowPipelineOptions dataflowOptions = workerOptions.as(DataflowPipelineOptions.class);
validateSdkContainerImageOptions(workerOptions);
GcpOptions gcpOptions = workerOptions.as(GcpOptions.class);
Preconditions.checkArgument(
gcpOptions.getZone() == null || gcpOptions.getWorkerRegion() == null,
"Cannot use option zone with workerRegion. Prefer either workerZone or workerRegion.");
Preconditions.checkArgument(
gcpOptions.getZone() == null || gcpOptions.getWorkerZone() == null,
"Cannot use option zone with workerZone. Prefer workerZone.");
Preconditions.checkArgument(
gcpOptions.getWorkerRegion() == null || gcpOptions.getWorkerZone() == null,
"workerRegion and workerZone options are mutually exclusive.");
boolean hasExperimentWorkerRegion = false;
if (dataflowOptions.getExperiments() != null) {
for (String experiment : dataflowOptions.getExperiments()) {
if (experiment.startsWith("worker_region")) {
hasExperimentWorkerRegion = true;
break;
}
}
}
Preconditions.checkArgument(
!hasExperimentWorkerRegion || gcpOptions.getWorkerRegion() == null,
"Experiment worker_region and option workerRegion are mutually exclusive.");
Preconditions.checkArgument(
!hasExperimentWorkerRegion || gcpOptions.getWorkerZone() == null,
"Experiment worker_region and option workerZone are mutually exclusive.");
if (gcpOptions.getZone() != null) {
LOG.warn("Option --zone is deprecated. Please use --workerZone instead.");
gcpOptions.setWorkerZone(gcpOptions.getZone());
gcpOptions.setZone(null);
}
}
|
@Test
public void testExperimentRegionAndWorkerZoneMutuallyExclusive() {
DataflowPipelineWorkerPoolOptions options =
PipelineOptionsFactory.as(DataflowPipelineWorkerPoolOptions.class);
DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class);
ExperimentalOptions.addExperiment(dataflowOptions, "worker_region=us-west1");
options.setWorkerZone("us-east1-b");
assertThrows(
IllegalArgumentException.class, () -> DataflowRunner.validateWorkerSettings(options));
}
|
public static CompletionStage<List<MultipartFile>> read(HttpServerRequest r) {
var contentType = r.headers().getFirst("content-type");
if (contentType == null) {
throw HttpServerResponseException.of(400, "content-type header is required");
}
var m = boundaryPattern.matcher(contentType);
if (!m.matches()) {
throw HttpServerResponseException.of(400, "content-type header is invalid");
}
var boundary = m.group("boundary");
var future = new CompletableFuture<List<MultipartFile>>();
var decoder = new MultipartDecoder(boundary, future);
r.body().subscribe(decoder);
return future;
}
|
@Test
void insomniaMultipart() {
var body = """
--X-INSOMNIA-BOUNDARY\r
Content-Disposition: form-data; name="field1"\r
Content-Type: text/plain\r
\r
value1\r
--X-INSOMNIA-BOUNDARY\r
Content-Disposition: form-data; name="field2"; filename="example.txt"\r
Content-Type: text/plain\r
\r
value2\r
--X-INSOMNIA-BOUNDARY--\r
\r""".getBytes(StandardCharsets.UTF_8);
var f = Flux.<ByteBuffer>create(sink -> {
var i = 0;
while (i < body.length) {
var len = Math.min(ThreadLocalRandom.current().nextInt(10), body.length - i);
var buf = ByteBuffer.wrap(body, i, len);
sink.next(buf);
i += len;
}
sink.complete();
});
var request = new SimpleHttpServerRequest("POST", "/", f, new Map.Entry[]{
Map.entry("content-type", "multipart/form-data; boundary=X-INSOMNIA-BOUNDARY")
}, Map.of());
var result = MultipartReader.read(request)
.toCompletableFuture().join();
assertThat(result)
.satisfies(part -> {
assertThat(part.name()).isEqualTo("field1");
assertThat(part.fileName()).isNull();
assertThat(part.contentType()).isEqualTo("text/plain");
assertThat(part.content()).asString(StandardCharsets.UTF_8).isEqualTo("value1");
}, Index.atIndex(0))
.satisfies(part -> {
assertThat(part.name()).isEqualTo("field2");
assertThat(part.fileName()).isEqualTo("example.txt");
assertThat(part.contentType()).isEqualTo("text/plain");
assertThat(part.content()).asString(StandardCharsets.UTF_8).isEqualTo("value2");
}, Index.atIndex(1));
}
|
public static void checkKeyParam(String dataId, String group) throws NacosException {
if (StringUtils.isBlank(dataId) || !ParamUtils.isValid(dataId)) {
throw new NacosException(NacosException.CLIENT_INVALID_PARAM, DATAID_INVALID_MSG);
}
if (StringUtils.isBlank(group) || !ParamUtils.isValid(group)) {
throw new NacosException(NacosException.CLIENT_INVALID_PARAM, GROUP_INVALID_MSG);
}
}
|
@Test
void testCheckKeyParam3() throws NacosException {
String dataId = "b";
String group = "c";
ParamUtils.checkKeyParam(Arrays.asList(dataId), group);
try {
group = "c";
ParamUtils.checkKeyParam(new ArrayList<String>(), group);
fail();
} catch (NacosException e) {
assertEquals("dataIds invalid", e.getMessage());
}
try {
dataId = "";
group = "c";
ParamUtils.checkKeyParam(Arrays.asList(dataId), group);
fail();
} catch (NacosException e) {
assertEquals("dataId invalid", e.getMessage());
}
try {
dataId = "b";
group = "";
ParamUtils.checkKeyParam(Arrays.asList(dataId), group);
fail();
} catch (NacosException e) {
assertEquals("group invalid", e.getMessage());
}
}
|
@Override
public <T> List<SearchResult<T>> search(SearchRequest request, Class<T> typeFilter) {
SearchSession<T> session = new SearchSession<>(request, Collections.singleton(typeFilter));
if (request.inParallel()) {
ForkJoinPool commonPool = ForkJoinPool.commonPool();
getProviderTasks(request, session).stream().map(commonPool::submit).forEach(ForkJoinTask::join);
} else {
getProviderTasks(request, session).forEach(Runnable::run);
}
return session.getResults();
}
|
@Test
public void testNodeLabel() {
GraphGenerator generator = GraphGenerator.build().generateTinyGraph();
generator.getGraph().getNode(GraphGenerator.FIRST_NODE).setLabel("foo");
SearchRequest request = buildRequest("foo", generator);
Collection<SearchResult<Node>> results = controller.search(request, Node.class);
Assert.assertEquals(1, results.size());
SearchResult<Node> result = results.iterator().next();
Assert.assertEquals(GraphGenerator.FIRST_NODE, result.getResult().getId());
}
|
@Override
public SchemaResult getValueSchema(
final Optional<String> topicName,
final Optional<Integer> schemaId,
final FormatInfo expectedFormat,
final SerdeFeatures serdeFeatures
) {
return getSchema(topicName, schemaId, expectedFormat, serdeFeatures, false);
}
|
@Test
public void shouldThrowFromGetValueSchemaOnOtherRestExceptions() throws Exception {
// Given:
when(srClient.getLatestSchemaMetadata(any()))
.thenThrow(new RestClientException("failure", 1, 1));
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> supplier.getValueSchema(Optional.of(TOPIC_NAME),
Optional.empty(), expectedFormat, SerdeFeatures.of())
);
// Then:
assertThat(e.getMessage(), containsString("Schema registry fetch for topic "
+ "value request failed for topic: " + TOPIC_NAME));
}
|
public static String getSanitizedPackageName(String modelName) {
return modelName.replaceAll("[^A-Za-z0-9.]", "").toLowerCase();
}
|
@Test
void getSanitizedPackageName() {
packageNameMap.forEach((originalName, expectedName) -> assertThat(KiePMMLModelUtils.getSanitizedPackageName(originalName)).isEqualTo(expectedName));
}
|
public synchronized TopologyDescription describe() {
return internalTopologyBuilder.describe();
}
|
@Test
public void kTableNamedMaterializedFilterShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
final KTable<Object, Object> table = builder.table("input-topic");
table.filter((key, value) -> false, Materialized.as("store-name"));
final TopologyDescription describe = builder.build().describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic])\n" +
" --> KTABLE-SOURCE-0000000002\n" +
" Processor: KTABLE-SOURCE-0000000002 (stores: [])\n" +
" --> KTABLE-FILTER-0000000003\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KTABLE-FILTER-0000000003 (stores: [store-name])\n" +
" --> none\n" +
" <-- KTABLE-SOURCE-0000000002\n" +
"\n",
describe.toString());
}
|
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
throws IOException, ServletException {
long size = request.getContentLengthLong();
if (size > maxSize || isChunked(request)) {
// Size it's either unknown or too large
HttpServletResponse httpResponse = (HttpServletResponse) response;
httpResponse.sendError(HttpServletResponse.SC_BAD_REQUEST, "Bad Request");
} else {
chain.doFilter(request, response);
}
}
|
@Test
public void testDoFilterInvokeChainDoFilter() throws ServletException, IOException {
MaxRequestSizeFilter maxRequestSizeFilter = new MaxRequestSizeFilter(MAX_SIZE);
FilterChain spyFilterChain = Mockito.spy(FilterChain.class);
ServletRequest spyHttpServletRequest = Mockito.spy(ServletRequest.class);
ServletResponse spyHttpServletResponse = Mockito.spy(ServletResponse.class);
Mockito.doReturn(LEGAL_SIZE).when(spyHttpServletRequest).getContentLengthLong();
maxRequestSizeFilter.doFilter(spyHttpServletRequest, spyHttpServletResponse, spyFilterChain);
Mockito.verify(spyFilterChain).doFilter(spyHttpServletRequest,spyHttpServletResponse);
}
|
private int usage(String[] args) {
err(
"Usage : [load | create] <classname>");
err(
" [locate | print] <resourcename>]");
err("The return codes are:");
explainResult(SUCCESS,
"The operation was successful");
explainResult(E_GENERIC,
"Something went wrong");
explainResult(E_USAGE,
"This usage message was printed");
explainResult(E_NOT_FOUND,
"The class or resource was not found");
explainResult(E_LOAD_FAILED,
"The class was found but could not be loaded");
explainResult(E_CREATE_FAILED,
"The class was loaded, but an instance of it could not be created");
return E_USAGE;
}
|
@Test
public void testUsage() throws Throwable {
run(FindClass.E_USAGE, "org.apache.hadoop.util.TestFindClass");
}
|
@VisibleForTesting
void setKeyACLs(Configuration conf) {
Map<String, HashMap<KeyOpType, AccessControlList>> tempKeyAcls =
new HashMap<String, HashMap<KeyOpType,AccessControlList>>();
Map<String, String> allKeyACLS =
conf.getValByRegex(KMSConfiguration.KEY_ACL_PREFIX_REGEX);
for (Map.Entry<String, String> keyAcl : allKeyACLS.entrySet()) {
String k = keyAcl.getKey();
// this should be of type "key.acl.<KEY_NAME>.<OP_TYPE>"
int keyNameStarts = KMSConfiguration.KEY_ACL_PREFIX.length();
int keyNameEnds = k.lastIndexOf(".");
if (keyNameStarts >= keyNameEnds) {
LOG.warn("Invalid key name '{}'", k);
} else {
String aclStr = keyAcl.getValue();
String keyName = k.substring(keyNameStarts, keyNameEnds);
String keyOp = k.substring(keyNameEnds + 1);
KeyOpType aclType = null;
try {
aclType = KeyOpType.valueOf(keyOp);
} catch (IllegalArgumentException e) {
LOG.warn("Invalid key Operation '{}'", keyOp);
}
if (aclType != null) {
// On the assumption this will be single threaded.. else we need to
// ConcurrentHashMap
HashMap<KeyOpType,AccessControlList> aclMap =
tempKeyAcls.get(keyName);
if (aclMap == null) {
aclMap = new HashMap<KeyOpType, AccessControlList>();
tempKeyAcls.put(keyName, aclMap);
}
aclMap.put(aclType, new AccessControlList(aclStr));
LOG.info("KEY_NAME '{}' KEY_OP '{}' ACL '{}'",
keyName, aclType, aclStr);
}
}
}
keyAcls = tempKeyAcls;
final Map<KeyOpType, AccessControlList> tempDefaults = new HashMap<>();
final Map<KeyOpType, AccessControlList> tempWhitelists = new HashMap<>();
for (KeyOpType keyOp : KeyOpType.values()) {
parseAclsWithPrefix(conf, KMSConfiguration.DEFAULT_KEY_ACL_PREFIX,
keyOp, tempDefaults);
parseAclsWithPrefix(conf, KMSConfiguration.WHITELIST_KEY_ACL_PREFIX,
keyOp, tempWhitelists);
}
defaultKeyAcls = tempDefaults;
whitelistKeyAcls = tempWhitelists;
}
|
@Test
public void testKeyAclReload() {
Configuration conf = new Configuration(false);
conf.set(DEFAULT_KEY_ACL_PREFIX + "READ", "read1");
conf.set(DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT", "");
conf.set(DEFAULT_KEY_ACL_PREFIX + "GENERATE_EEK", "*");
conf.set(DEFAULT_KEY_ACL_PREFIX + "DECRYPT_EEK", "decrypt1");
conf.set(KEY_ACL + "testuser1.ALL", "testkey1");
conf.set(WHITELIST_KEY_ACL_PREFIX + "READ", "admin_read1");
conf.set(WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT", "");
conf.set(WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK", "*");
conf.set(WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK", "admin_decrypt1");
final KMSACLs acls = new KMSACLs(conf);
// update config and hot-reload.
conf.set(DEFAULT_KEY_ACL_PREFIX + "READ", "read2");
conf.set(DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT", "mgmt1,mgmt2");
conf.set(DEFAULT_KEY_ACL_PREFIX + "GENERATE_EEK", "");
conf.set(DEFAULT_KEY_ACL_PREFIX + "DECRYPT_EEK", "decrypt2");
conf.set(KEY_ACL + "testkey1.ALL", "testkey1,testkey2");
conf.set(WHITELIST_KEY_ACL_PREFIX + "READ", "admin_read2");
conf.set(WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT", "admin_mgmt,admin_mgmt1");
conf.set(WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK", "");
conf.set(WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK", "admin_decrypt2");
acls.setKeyACLs(conf);
assertDefaultKeyAcl(acls, KeyOpType.READ, "read2");
assertDefaultKeyAcl(acls, KeyOpType.MANAGEMENT, "mgmt1", "mgmt2");
assertDefaultKeyAcl(acls, KeyOpType.GENERATE_EEK);
assertDefaultKeyAcl(acls, KeyOpType.DECRYPT_EEK, "decrypt2");
assertKeyAcl("testuser1", acls, KeyOpType.ALL, "testkey1");
assertWhitelistKeyAcl(acls, KeyOpType.READ, "admin_read2");
assertWhitelistKeyAcl(acls, KeyOpType.MANAGEMENT,
"admin_mgmt", "admin_mgmt1");
assertWhitelistKeyAcl(acls, KeyOpType.GENERATE_EEK);
assertWhitelistKeyAcl(acls, KeyOpType.DECRYPT_EEK, "admin_decrypt2");
// reloading same config, nothing should change.
acls.setKeyACLs(conf);
assertDefaultKeyAcl(acls, KeyOpType.READ, "read2");
assertDefaultKeyAcl(acls, KeyOpType.MANAGEMENT, "mgmt1", "mgmt2");
assertDefaultKeyAcl(acls, KeyOpType.GENERATE_EEK);
assertDefaultKeyAcl(acls, KeyOpType.DECRYPT_EEK, "decrypt2");
assertKeyAcl("testuser1", acls, KeyOpType.ALL, "testkey1");
assertWhitelistKeyAcl(acls, KeyOpType.READ, "admin_read2");
assertWhitelistKeyAcl(acls, KeyOpType.MANAGEMENT,
"admin_mgmt", "admin_mgmt1");
assertWhitelistKeyAcl(acls, KeyOpType.GENERATE_EEK);
assertWhitelistKeyAcl(acls, KeyOpType.DECRYPT_EEK, "admin_decrypt2");
// test wildcard.
conf.set(DEFAULT_KEY_ACL_PREFIX + "DECRYPT_EEK", "*");
acls.setKeyACLs(conf);
AccessControlList acl = acls.defaultKeyAcls.get(KeyOpType.DECRYPT_EEK);
Assert.assertTrue(acl.isAllAllowed());
Assert.assertTrue(acl.getUsers().isEmpty());
// everything else should still be the same.
assertDefaultKeyAcl(acls, KeyOpType.READ, "read2");
assertDefaultKeyAcl(acls, KeyOpType.MANAGEMENT, "mgmt1", "mgmt2");
assertDefaultKeyAcl(acls, KeyOpType.GENERATE_EEK);
assertKeyAcl("testuser1", acls, KeyOpType.ALL, "testkey1");
assertWhitelistKeyAcl(acls, KeyOpType.READ, "admin_read2");
assertWhitelistKeyAcl(acls, KeyOpType.MANAGEMENT,
"admin_mgmt", "admin_mgmt1");
assertWhitelistKeyAcl(acls, KeyOpType.GENERATE_EEK);
assertWhitelistKeyAcl(acls, KeyOpType.DECRYPT_EEK, "admin_decrypt2");
// test new configuration should clear other items
conf = new Configuration();
conf.set(DEFAULT_KEY_ACL_PREFIX + "DECRYPT_EEK", "new");
acls.setKeyACLs(conf);
assertDefaultKeyAcl(acls, KeyOpType.DECRYPT_EEK, "new");
Assert.assertTrue(acls.keyAcls.isEmpty());
Assert.assertTrue(acls.whitelistKeyAcls.isEmpty());
Assert.assertEquals("Got unexpected sized acls:"
+ acls.defaultKeyAcls, 1, acls.defaultKeyAcls.size());
}
|
public EndpointResponse getServerMetadata() {
return EndpointResponse.ok(serverMetadata);
}
|
@Test
public void shouldReturnServerMetadata() {
// When:
final EndpointResponse response = serverMetadataResource.getServerMetadata();
// Then:
assertThat(response.getStatus(), equalTo(200));
assertThat(response.getEntity(), instanceOf(ServerMetadata.class));
final ServerMetadata serverMetadata = (ServerMetadata)response.getEntity();
assertThat(
serverMetadata,
equalTo(new ServerMetadata(
AppInfo.getVersion(),
ServerClusterId.of(KAFKA_CLUSTER_ID, KSQL_SERVICE_ID))
)
);
}
|
public static byte[] serialize(final Object obj) throws IOException {
return SERIALIZER_REF.get().serialize(obj);
}
|
@Test
public void testClassFullyQualifiedNameSerialization() throws IOException {
DeleteRecord deleteRecord = DeleteRecord.create(new HoodieKey("key", "partition"));
List<Pair<DeleteRecord, Long>> deleteRecordList = new ArrayList<>();
deleteRecordList.add(Pair.of(deleteRecord, -1L));
HoodieDeleteBlock deleteBlock = new HoodieDeleteBlock(deleteRecordList, false, Collections.emptyMap());
byte[] firstBytes = SerializationUtils.serialize(deleteBlock);
byte[] secondBytes = SerializationUtils.serialize(deleteBlock);
assertNotSame(firstBytes, secondBytes);
// NOTE: Here we assert that Kryo doesn't optimize out the fully-qualified class-name
// and always writes it out
assertEquals(ByteBuffer.wrap(firstBytes), ByteBuffer.wrap(secondBytes));
}
|
public static void checkMock(Class<?> interfaceClass, AbstractInterfaceConfig config) {
String mock = config.getMock();
if (ConfigUtils.isEmpty(mock)) {
return;
}
String normalizedMock = MockInvoker.normalizeMock(mock);
if (normalizedMock.startsWith(RETURN_PREFIX)) {
normalizedMock = normalizedMock.substring(RETURN_PREFIX.length()).trim();
try {
// Check whether the mock value is legal, if it is illegal, throw exception
MockInvoker.parseMockValue(normalizedMock);
} catch (Exception e) {
throw new IllegalStateException(
"Illegal mock return in <dubbo:service/reference ... " + "mock=\"" + mock + "\" />");
}
} else if (normalizedMock.startsWith(THROW_PREFIX)) {
normalizedMock = normalizedMock.substring(THROW_PREFIX.length()).trim();
if (ConfigUtils.isNotEmpty(normalizedMock)) {
try {
// Check whether the mock value is legal
MockInvoker.getThrowable(normalizedMock);
} catch (Exception e) {
throw new IllegalStateException(
"Illegal mock throw in <dubbo:service/reference ... " + "mock=\"" + mock + "\" />");
}
}
} else {
// Check whether the mock class is a implementation of the interfaceClass, and if it has a default
// constructor
MockInvoker.getMockObject(config.getScopeModel().getExtensionDirector(), normalizedMock, interfaceClass);
}
}
|
@Test
void checkMock3() {
Assertions.assertThrows(IllegalStateException.class, () -> {
InterfaceConfig interfaceConfig = new InterfaceConfig();
interfaceConfig.setMock(GreetingMock2.class.getName());
ConfigValidationUtils.checkMock(Greeting.class, interfaceConfig);
});
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.