focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public void onPartitionsAssigned(final Collection<TopicPartition> partitions) {
// NB: all task management is already handled by:
// org.apache.kafka.streams.processor.internals.StreamsPartitionAssignor.onAssignment
if (assignmentErrorCode.get() == AssignorError.INCOMPLETE_SOURCE_TOPIC_METADATA.code()) {
log.error("Received error code {}. {}",
AssignorError.INCOMPLETE_SOURCE_TOPIC_METADATA.codeName(),
AssignorError.INCOMPLETE_SOURCE_TOPIC_METADATA.description());
taskManager.handleRebalanceComplete();
throw new MissingSourceTopicException("One or more source topics were missing during rebalance");
} else if (assignmentErrorCode.get() == AssignorError.VERSION_PROBING.code()) {
log.info("Received version probing code {}", AssignorError.VERSION_PROBING);
} else if (assignmentErrorCode.get() == AssignorError.ASSIGNMENT_ERROR.code()) {
log.error("Received error code {}", AssignorError.ASSIGNMENT_ERROR);
taskManager.handleRebalanceComplete();
throw new TaskAssignmentException("Hit an unexpected exception during task assignment phase of rebalance");
} else if (assignmentErrorCode.get() == AssignorError.SHUTDOWN_REQUESTED.code()) {
log.error("A Kafka Streams client in this Kafka Streams application is requesting to shutdown the application");
taskManager.handleRebalanceComplete();
streamThread.shutdownToError();
return;
} else if (assignmentErrorCode.get() != AssignorError.NONE.code()) {
log.error("Received unknown error code {}", assignmentErrorCode.get());
throw new TaskAssignmentException("Hit an unrecognized exception during rebalance");
}
streamThread.setState(State.PARTITIONS_ASSIGNED);
streamThread.setPartitionAssignedTime(time.milliseconds());
taskManager.handleRebalanceComplete();
} | @Test
public void shouldThrowTaskAssignmentExceptionOnUnrecognizedErrorCode() {
assignmentErrorCode.set(Integer.MAX_VALUE);
final TaskAssignmentException exception = assertThrows(
TaskAssignmentException.class,
() -> streamsRebalanceListener.onPartitionsAssigned(Collections.emptyList())
);
assertThat(exception.getMessage(), is("Hit an unrecognized exception during rebalance"));
} |
static SerializableFunction<LinkedHashMap<String, Double>,
LinkedHashMap<String, Double>> getProbabilityMapFunction(final RegressionModel.NormalizationMethod normalizationMethod,
final boolean isBinary) {
if (UNSUPPORTED_NORMALIZATION_METHODS.contains(normalizationMethod)) {
throw new KiePMMLInternalException(String.format("Unsupported NormalizationMethod %s",
normalizationMethod));
} else {
return getProbabilityMapFunctionSupported(normalizationMethod, isBinary);
}
} | @Test
void getProbabilityMapUnsupportedFunction() {
KiePMMLClassificationTableFactory.UNSUPPORTED_NORMALIZATION_METHODS.forEach(normalizationMethod -> {
try {
KiePMMLClassificationTableFactory.getProbabilityMapFunction(normalizationMethod, false);
} catch (Throwable t) {
assertThat(t).isInstanceOf(KiePMMLInternalException.class);
String expected = String.format("Unsupported NormalizationMethod %s",
normalizationMethod);
assertThat(t.getMessage()).isEqualTo(expected);
}
try {
KiePMMLClassificationTableFactory.getProbabilityMapFunction(normalizationMethod, true);
} catch (Throwable t) {
assertThat(t).isInstanceOf(KiePMMLInternalException.class);
String expected = String.format("Unsupported NormalizationMethod %s",
normalizationMethod);
assertThat(t.getMessage()).isEqualTo(expected);
}
});
} |
@Override
public boolean isNetworkRequestEnable() {
return false;
} | @Test
public void isNetworkRequestEnable() {
Assert.assertFalse(mSensorsAPI.isNetworkRequestEnable());
} |
public void cancel() {
if (isOpen()) {
LOG.debug("Cancelling stream ({})", mDescription);
mCanceled = true;
mRequestObserver.cancel("Request is cancelled by user.", null);
}
} | @Test
public void cancel() throws Exception {
mStream.cancel();
assertTrue(mStream.isCanceled());
assertFalse(mStream.isOpen());
verify(mRequestObserver).cancel(any(String.class), eq(null));
} |
public TolerantDoubleComparison isNotWithin(double tolerance) {
return new TolerantDoubleComparison() {
@Override
public void of(double expected) {
Double actual = DoubleSubject.this.actual;
checkNotNull(
actual, "actual value cannot be null. tolerance=%s expected=%s", tolerance, expected);
checkTolerance(tolerance);
if (!notEqualWithinTolerance(actual, expected, tolerance)) {
failWithoutActual(
fact("expected not to be", doubleToString(expected)),
butWas(),
fact("within tolerance", doubleToString(tolerance)));
}
}
};
} | @Test
public void isNotWithinZeroTolerance() {
double max = Double.MAX_VALUE;
assertThatIsNotWithinFails(max, 0.0, max);
assertThatIsNotWithinFails(NEARLY_MAX, 0.0, NEARLY_MAX);
assertThat(max).isNotWithin(0.0).of(NEARLY_MAX);
assertThat(NEARLY_MAX).isNotWithin(0.0).of(max);
double min = Double.MIN_VALUE;
assertThatIsNotWithinFails(min, 0.0, min);
assertThatIsNotWithinFails(OVER_MIN, 0.0, OVER_MIN);
assertThat(min).isNotWithin(0.0).of(OVER_MIN);
assertThat(OVER_MIN).isNotWithin(0.0).of(min);
} |
@Override
public <T> T detach(T attachedObject) {
addExpireListener(commandExecutor);
Map<String, Object> alreadyDetached = new HashMap<String, Object>();
return detach(attachedObject, alreadyDetached);
} | @Test
public void testDetach() {
RLiveObjectService service = redisson.getLiveObjectService();
TestClass ts = new TestClass(new ObjectId(100));
ts.setValue("VALUE");
ts.setCode("CODE");
TestClass merged = service.merge(ts);
assertEquals("VALUE", merged.getValue());
assertEquals("CODE", merged.getCode());
TestClass detach = service.detach(merged);
assertEquals(ts, detach);
} |
@Override
protected Optional<ErrorResponse> filter(DiscFilterRequest request) {
String method = request.getMethod();
URI uri = request.getUri();
for (Rule rule : rules) {
if (rule.matches(method, uri)) {
log.log(Level.FINE, () ->
String.format("Request '%h' with method '%s' and uri '%s' matched rule '%s'", request, method, uri, rule.name));
return responseFor(request, rule.name, rule.response);
}
}
return responseFor(request, "default", defaultResponse);
} | @Test
void performs_action_on_first_matching_rule() throws IOException {
RuleBasedFilterConfig config = new RuleBasedFilterConfig.Builder()
.dryrun(false)
.defaultRule(new DefaultRule.Builder()
.action(DefaultRule.Action.Enum.ALLOW))
.rule(new Rule.Builder()
.name("first")
.pathExpressions("/path-to-resource")
.methods(Rule.Methods.Enum.DELETE)
.action(Rule.Action.Enum.BLOCK)
.blockResponseCode(403))
.rule(new Rule.Builder()
.name("second")
.pathExpressions("/path-to-resource")
.methods(Rule.Methods.Enum.GET)
.action(Rule.Action.Enum.BLOCK)
.blockResponseCode(404))
.build();
Metric metric = mock(Metric.class);
RuleBasedRequestFilter filter = new RuleBasedRequestFilter(metric, config);
MockResponseHandler responseHandler = new MockResponseHandler();
filter.filter(request("GET", "http://myserver:80/path-to-resource"), responseHandler);
assertBlocked(responseHandler, metric, 404, "");
} |
public abstract void calculateIV(byte[] initIV, long counter, byte[] IV); | @Test(timeout=120000)
public void testCalculateIV() throws Exception {
JceAesCtrCryptoCodec codec = new JceAesCtrCryptoCodec();
codec.setConf(conf);
SecureRandom sr = new SecureRandom();
byte[] initIV = new byte[16];
byte[] IV = new byte[16];
long iterations = 1000;
long counter = 10000;
// Overflow test, IV: 00 00 00 00 00 00 00 00 ff ff ff ff ff ff ff ff
for(int i = 0; i < 8; i++) {
initIV[8 + i] = (byte)0xff;
}
for(long j = 0; j < counter; j++) {
assertIVCalculation(codec, initIV, j, IV);
}
// Random IV and counter sequence test
for(long i = 0; i < iterations; i++) {
sr.nextBytes(initIV);
for(long j = 0; j < counter; j++) {
assertIVCalculation(codec, initIV, j, IV);
}
}
// Random IV and random counter test
for(long i = 0; i < iterations; i++) {
sr.nextBytes(initIV);
for(long j = 0; j < counter; j++) {
long c = sr.nextLong();
assertIVCalculation(codec, initIV, c, IV);
}
}
} |
@Override
public void disableCaching() {
close(regularDbSession, "regular");
close(batchDbSession, "batch");
regularDbSession.remove();
batchDbSession.remove();
CACHING_ENABLED.remove();
} | @Test
void disableCaching_has_no_effect_if_enabledCaching_has_not_been_called() {
underTest.disableCaching();
verifyNoMoreInteractions(myBatis);
} |
@Override
public void endInput() throws Exception {
endCompaction(Long.MAX_VALUE);
snapshotState(Long.MAX_VALUE);
clearExpiredFiles(Long.MAX_VALUE);
} | @Test
void testEndInput() throws Exception {
Path f0 = newFile(".uncompacted-f0", 3);
Path f1 = newFile(".uncompacted-f1", 4);
Path f2 = newFile(".uncompacted-f2", 2);
FileSystem fs = f0.getFileSystem();
runCompact(
harness -> {
harness.setup();
harness.open();
harness.processElement(new CompactionUnit(0, "p0", Arrays.asList(f0, f1)), 0);
harness.processElement(
new CompactionUnit(1, "p0", Collections.singletonList(f2)), 0);
// test without snapshot
harness.endInput();
// check all compacted file generated
assertThat(fs.exists(new Path(folder, "compacted-f0"))).isTrue();
assertThat(fs.exists(new Path(folder, "compacted-f2"))).isTrue();
// check all temp files have been deleted
assertThat(fs.exists(f0)).isFalse();
assertThat(fs.exists(f1)).isFalse();
assertThat(fs.exists(f2)).isFalse();
});
} |
public Map<String, Object> getKsqlStreamConfigProps(final String applicationId) {
final Map<String, Object> map = new HashMap<>(getKsqlStreamConfigProps());
map.put(
MetricCollectors.RESOURCE_LABEL_PREFIX
+ StreamsConfig.APPLICATION_ID_CONFIG,
applicationId
);
// Streams client metrics aren't used in Confluent deployment
possiblyConfigureConfluentTelemetry(map);
return Collections.unmodifiableMap(map);
} | @Test
public void shouldSetMonitoringInterceptorConfigProperties() {
final KsqlConfig ksqlConfig = new KsqlConfig(Collections.singletonMap(
"confluent.monitoring.interceptor.topic", "foo"));
final Object result
= ksqlConfig.getKsqlStreamConfigProps().get("confluent.monitoring.interceptor.topic");
assertThat(result, equalTo("foo"));
} |
public static List<URL> parseURLs(String address, Map<String, String> defaults) {
if (StringUtils.isEmpty(address)) {
throw new IllegalArgumentException("Address is not allowed to be empty, please re-enter.");
}
String[] addresses = REGISTRY_SPLIT_PATTERN.split(address);
if (addresses == null || addresses.length == 0) {
throw new IllegalArgumentException(
"Addresses is not allowed to be empty, please re-enter."); // here won't be empty
}
List<URL> registries = new ArrayList<>();
for (String addr : addresses) {
registries.add(parseURL(addr, defaults));
}
return registries;
} | @Test
void testParseUrlsAddressNull() {
String exceptionMessage = "Address is not allowed to be empty, please re-enter.";
try {
UrlUtils.parseURLs(null, null);
} catch (IllegalArgumentException illegalArgumentException) {
assertEquals(exceptionMessage, illegalArgumentException.getMessage());
}
} |
@ApiOperation(value = "获取接口资源分页列表")
@GetMapping("/page")
public ApiPageResult<IPage<BaseAction>> page(PageForm pageForm){
return ApiPageResult.success(baseActionService.page(new Page(pageForm.getPageNum(), pageForm.getPageSize())));
} | @Test
void page() {
} |
@Override
public boolean shouldFire(TriggerStateMachine.TriggerContext context) {
return false;
} | @Test
public void falseAfterEndOfWindow() throws Exception {
triggerTester.injectElements(TimestampedValue.of(1, new Instant(1)));
IntervalWindow window =
new IntervalWindow(new Instant(0), new Instant(0).plus(Duration.standardMinutes(5)));
assertThat(triggerTester.shouldFire(window), is(false));
triggerTester.advanceInputWatermark(BoundedWindow.TIMESTAMP_MAX_VALUE);
assertThat(triggerTester.shouldFire(window), is(false));
} |
public void setSQLServerInstance( String instanceName ) {
// This is also covered/persisted by JDBC option MS SQL Server / instancename / <somevalue>
// We want to return set <somevalue>
// --> MSSQL.instancename
if ( ( instanceName != null ) && ( instanceName.length() > 0 ) ) {
addExtraOption( getPluginId(), "instance", instanceName );
}
} | @Test
public void setSQLServerInstanceTest() {
DatabaseMeta dbmeta = new DatabaseMeta();
DatabaseInterface mssqlServerDatabaseMeta = new MSSQLServerDatabaseMeta();
mssqlServerDatabaseMeta.setPluginId( "MSSQL" );
DatabaseInterface mssqlServerNativeDatabaseMeta = new MSSQLServerNativeDatabaseMeta();
mssqlServerNativeDatabaseMeta.setPluginId( "MSSQLNATIVE" );
dbmeta.setDatabaseInterface( mssqlServerDatabaseMeta );
dbmeta.setSQLServerInstance( "" );
assertEquals( dbmeta.getSQLServerInstance(), null );
dbmeta.setSQLServerInstance( "instance1" );
assertEquals( dbmeta.getSQLServerInstance(), "instance1" );
dbmeta.setDatabaseInterface( mssqlServerNativeDatabaseMeta );
dbmeta.setSQLServerInstance( "" );
assertEquals( dbmeta.getSQLServerInstance(), null );
dbmeta.setSQLServerInstance( "instance1" );
assertEquals( dbmeta.getSQLServerInstance(), "instance1" );
} |
GooglePubsubLiteConsumer(GooglePubsubLiteEndpoint endpoint, Processor processor) {
super(endpoint, processor);
this.endpoint = endpoint;
this.processor = processor;
this.subscribers = Collections.synchronizedList(new LinkedList<>());
String loggerId = endpoint.getLoggerId();
if (Strings.isNullOrEmpty(loggerId)) {
loggerId = this.getClass().getName();
}
localLog = LoggerFactory.getLogger(loggerId);
} | @Test
public void testGooglePubsubLiteConsumer() {
when(endpoint.getCamelContext()).thenReturn(context);
GooglePubsubLiteConsumer consumer = new GooglePubsubLiteConsumer(endpoint, processor);
assertNotNull(consumer);
} |
public Frequency multiply(long value) {
return new Frequency(this.frequency * value);
} | @Test
public void testMultiply() {
Frequency frequency = Frequency.ofMHz(1000);
long factor = 5;
Frequency expected = Frequency.ofGHz(5);
assertThat(frequency.multiply(5), is(expected));
} |
@Nullable
public PasswordAlgorithm forPassword(String hashedPassword) {
for (PasswordAlgorithm passwordAlgorithm : passwordAlgorithms.values()) {
if (passwordAlgorithm.supports(hashedPassword))
return passwordAlgorithm;
}
return null;
} | @Test
public void testForPasswordShouldReturnSecondAlgorithm() throws Exception {
when(passwordAlgorithm1.supports(anyString())).thenReturn(false);
when(passwordAlgorithm2.supports(anyString())).thenReturn(true);
final PasswordAlgorithmFactory passwordAlgorithmFactory = new PasswordAlgorithmFactory(passwordAlgorithms, passwordAlgorithm2);
assertThat(passwordAlgorithmFactory.forPassword("foobar")).isEqualTo(passwordAlgorithm2);
} |
public String getLegacyColumnName( DatabaseMetaData dbMetaData, ResultSetMetaData rsMetaData, int index ) throws KettleDatabaseException {
if ( dbMetaData == null ) {
throw new KettleDatabaseException( BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameNoDBMetaDataException" ) );
}
if ( rsMetaData == null ) {
throw new KettleDatabaseException( BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameNoRSMetaDataException" ) );
}
try {
return dbMetaData.getDriverMajorVersion() > 3 ? rsMetaData.getColumnLabel( index ) : rsMetaData.getColumnName( index );
} catch ( Exception e ) {
throw new KettleDatabaseException( String.format( "%s: %s", BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameException" ), e.getMessage() ), e );
}
} | @Test
public void testGetLegacyColumnNameDriverLessOrEqualToThreeFieldNoAliasText() throws Exception {
DatabaseMetaData databaseMetaData = mock( DatabaseMetaData.class );
doReturn( 3 ).when( databaseMetaData ).getDriverMajorVersion();
assertEquals( "NoAliasText", new MySQLDatabaseMeta().getLegacyColumnName( databaseMetaData, getResultSetMetaData(), 6 ) );
} |
public double[][] test(DataFrame data) {
DataFrame x = formula.x(data);
int n = x.nrow();
int ntrees = trees.length;
double[][] prediction = new double[ntrees][n];
for (int j = 0; j < n; j++) {
Tuple xj = x.get(j);
double base = b;
for (int i = 0; i < ntrees; i++) {
base += shrinkage * trees[i].predict(xj);
prediction[i][j] = base;
}
}
return prediction;
} | @Test
public void testCpuLS() {
test(Loss.ls(), "CPU", CPU.formula, CPU.data, 60.5335);
} |
public boolean hasRequiredResourceManagers()
{
return currentResourceManagerCount >= resourceManagerMinCountActive;
} | @Test
public void testHasRequiredResourceManagers()
throws InterruptedException
{
assertFalse(monitor.hasRequiredResourceManagers());
for (int i = numResourceManagers.get(); i < DESIRED_RESOURCE_MANAGER_COUNT_ACTIVE; i++) {
addResourceManager(nodeManager);
}
assertTrue(monitor.hasRequiredResourceManagers());
} |
public static ArrayByteIterable comparableSetToEntry(@NotNull final ComparableSet object) {
return BINDING.objectToEntry(object);
} | @Test(expected = ExodusException.class)
public void empty() {
ComparableSetBinding.comparableSetToEntry(new ComparableSet());
} |
String offsetSyncsTopic() {
String otherClusterAlias = SOURCE_CLUSTER_ALIAS_DEFAULT.equals(offsetSyncsTopicLocation())
? targetClusterAlias()
: sourceClusterAlias();
return replicationPolicy().offsetSyncsTopic(otherClusterAlias);
} | @Test
public void testOffsetSyncsTopic() {
// Invalid location
Map<String, String> connectorProps = makeProps("offset-syncs.topic.location", "something");
assertThrows(ConfigException.class, () -> new MirrorSourceConfig(connectorProps));
connectorProps.put("offset-syncs.topic.location", "source");
MirrorSourceConfig config = new MirrorSourceConfig(connectorProps);
assertEquals("mm2-offset-syncs.target2.internal", config.offsetSyncsTopic());
connectorProps.put("offset-syncs.topic.location", "target");
config = new MirrorSourceConfig(connectorProps);
assertEquals("mm2-offset-syncs.source1.internal", config.offsetSyncsTopic());
// Default to source
connectorProps.remove("offset-syncs.topic.location");
config = new MirrorSourceConfig(connectorProps);
assertEquals("mm2-offset-syncs.target2.internal", config.offsetSyncsTopic());
} |
@Override
public int hashCode()
{
return HashCodeBuilder.reflectionHashCode(this);
} | @Test
public void testHashCode() {
PathSpecSet pss1 = PathSpecSet.of(THREE_FIELD_MODEL_FIELD1, THREE_FIELD_MODEL_FIELD2);
PathSpecSet pss2 = PathSpecSet.of(THREE_FIELD_MODEL_FIELD1, THREE_FIELD_MODEL_FIELD2);
Assert.assertEquals(pss1.hashCode(), pss2.hashCode());
} |
public void process() {
LOGGER.debug("Beginning Composer lock processing");
try {
final JsonObject composer = jsonReader.readObject();
if (composer.containsKey("packages")) {
LOGGER.debug("Found packages");
final JsonArray packages = composer.getJsonArray("packages");
for (JsonObject pkg : packages.getValuesAs(JsonObject.class)) {
if (pkg.containsKey("name")) {
final String groupName = pkg.getString("name");
if (groupName.indexOf('/') >= 0 && groupName.indexOf('/') <= groupName.length() - 1) {
if (pkg.containsKey("version")) {
final String group = groupName.substring(0, groupName.indexOf('/'));
final String project = groupName.substring(groupName.indexOf('/') + 1);
String version = pkg.getString("version");
// Some version numbers begin with v - which doesn't end up matching CPE's
if (version.startsWith("v")) {
version = version.substring(1);
}
LOGGER.debug("Got package {}/{}/{}", group, project, version);
composerDependencies.add(new ComposerDependency(group, project, version));
} else {
LOGGER.debug("Group/package {} does not have a version", groupName);
}
} else {
LOGGER.debug("Got a dependency with no name");
}
}
}
}
} catch (JsonParsingException jsonpe) {
throw new ComposerException("Error parsing stream", jsonpe);
} catch (JsonException jsone) {
throw new ComposerException("Error reading stream", jsone);
} catch (IllegalStateException ise) {
throw new ComposerException("Illegal state in composer stream", ise);
} catch (ClassCastException cce) {
throw new ComposerException("Not exactly composer lock", cce);
}
} | @Test(expected = ComposerException.class)
public void testNotPackagesArray() throws Exception {
String input = "{\"packages\":\"eleventy\"}";
ComposerLockParser clp = new ComposerLockParser(new ByteArrayInputStream(input.getBytes(Charset.defaultCharset())));
clp.process();
} |
public synchronized void addAll(T... nodes) {
for (T node : nodes) {
addInternal(node, defaultReplication);
}
refreshTable();
} | @Test
@Ignore("Helper test for performance, no assertion")
public void speed() {
Map<String, Integer> data = new CopyOnWriteMap.Hash<>();
for (int i = 0; i < 1000; i++) {
data.put("node" + i, 100);
}
data.put("tail", 100);
long start = System.currentTimeMillis();
for (int j = 0; j < 10; j++) {
ConsistentHash<String> b = new ConsistentHash<>();
b.addAll(data);
}
System.out.println(System.currentTimeMillis() - start);
} |
@Override
public int run(InputStream in, PrintStream out, PrintStream err, List<String> args) throws Exception {
boolean useJavaCC = "--useJavaCC".equals(getArg(args, 0, null));
if (args.size() > (useJavaCC ? 3 : 2)
|| (args.size() == 1 && (args.get(0).equals("--help") || args.get(0).equals("-help")))) {
err.println("Usage: idl [--useJavaCC] [in [out]]");
err.println();
err.println("If an output path is not specified, outputs to stdout.");
err.println("If no input or output is specified, takes input from");
err.println("stdin and outputs to stdout.");
err.println("The special path \"-\" may also be specified to refer to");
err.println("stdin and stdout.");
return -1;
}
String inputName = getArg(args, useJavaCC ? 1 : 0, "-");
File inputFile = "-".equals(inputName) ? null : new File(inputName);
String outputName = getArg(args, useJavaCC ? 2 : 1, "-");
File outputFile = "-".equals(outputName) ? null : new File(outputName);
Schema m = null;
Protocol p;
if (useJavaCC) {
// noinspection deprecation
try (Idl parser = new Idl(inputFile)) {
p = parser.CompilationUnit();
for (String warning : parser.getWarningsAfterParsing()) {
err.println("Warning: " + warning);
}
}
} else {
IdlReader parser = new IdlReader();
IdlFile idlFile = inputFile == null ? parser.parse(in) : parser.parse(inputFile.toPath());
for (String warning : idlFile.getWarnings()) {
err.println("Warning: " + warning);
}
p = idlFile.getProtocol();
m = idlFile.getMainSchema();
}
PrintStream parseOut = out;
if (outputFile != null) {
parseOut = new PrintStream(Files.newOutputStream(outputFile.toPath()));
}
if (m == null && p == null) {
err.println("Error: the IDL file does not contain a schema nor a protocol.");
return 1;
}
try {
parseOut.print(m == null ? p.toString(true) : m.toString(true));
} finally {
if (parseOut != out) // Close only the newly created FileOutputStream
parseOut.close();
}
return 0;
} | @Test
public void testWriteIdlAsSchema() throws Exception {
String idl = "src/test/idl/schema.avdl";
String protocol = "src/test/idl/schema.avsc";
String outfile = "target/test-schema.avsc";
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
List<String> arglist = Arrays.asList(idl, outfile);
new IdlTool().run(null, null, new PrintStream(buffer), arglist);
assertEquals(readFileAsString(protocol), readFileAsString(outfile));
String warnings = readPrintStreamBuffer(buffer);
assertEquals("Warning: Line 1, char 1: Ignoring out-of-place documentation comment."
+ "\nDid you mean to use a multiline comment ( /* ... */ ) instead?", warnings);
} |
public static String kbToHumanReadable(long kb) {
int unit = 1;
while (kb >= KB && unit < UNITS.length() - 1) {
kb /= KB;
unit++;
}
String kbString = String.valueOf(kb);
return kbString + UNITS.charAt(unit);
} | @Test
void kbToHumanReadable() {
Assertions.assertEquals("0k", OsUtils.kbToHumanReadable(0L));
Assertions.assertEquals("1001k", OsUtils.kbToHumanReadable(1001L));
Assertions.assertEquals("1m", OsUtils.kbToHumanReadable(1024L));
Assertions.assertEquals("1023m", OsUtils.kbToHumanReadable(1024L * 1024L - 1L));
Assertions.assertEquals("1g", OsUtils.kbToHumanReadable(1024L * 1024L));
Assertions.assertEquals("1t", OsUtils.kbToHumanReadable(1024L * 1024L * 1024L));
} |
@Override
public void onHeartbeatSuccess(ShareGroupHeartbeatResponseData response) {
if (response.errorCode() != Errors.NONE.code()) {
String errorMessage = String.format(
"Unexpected error in Heartbeat response. Expected no error, but received: %s",
Errors.forCode(response.errorCode())
);
throw new IllegalArgumentException(errorMessage);
}
MemberState state = state();
if (state == MemberState.LEAVING) {
log.debug("Ignoring heartbeat response received from broker. Member {} with epoch {} is " +
"already leaving the group.", memberId, memberEpoch);
return;
}
if (state == MemberState.UNSUBSCRIBED && maybeCompleteLeaveInProgress()) {
log.debug("Member {} with epoch {} received a successful response to the heartbeat " +
"to leave the group and completed the leave operation. ", memberId, memberEpoch);
return;
}
if (isNotInGroup()) {
log.debug("Ignoring heartbeat response received from broker. Member {} is in {} state" +
" so it's not a member of the group. ", memberId, state);
return;
}
// Update the group member id label in the client telemetry reporter if the member id has
// changed. Initially the member id is empty, and it is updated when the member joins the
// group. This is done here to avoid updating the label on every heartbeat response. Also
// check if the member id is null, as the schema defines it as nullable.
if (response.memberId() != null && !response.memberId().equals(memberId)) {
clientTelemetryReporter.ifPresent(reporter -> reporter.updateMetricsLabels(
Collections.singletonMap(ClientTelemetryProvider.GROUP_MEMBER_ID, response.memberId())));
}
this.memberId = response.memberId();
updateMemberEpoch(response.memberEpoch());
ShareGroupHeartbeatResponseData.Assignment assignment = response.assignment();
if (assignment != null) {
if (!state.canHandleNewAssignment()) {
// New assignment received but member is in a state where it cannot take new
// assignments (ex. preparing to leave the group)
log.debug("Ignoring new assignment {} received from server because member is in {} state.",
assignment, state);
return;
}
Map<Uuid, SortedSet<Integer>> newAssignment = new HashMap<>();
assignment.topicPartitions().forEach(topicPartition -> newAssignment.put(topicPartition.topicId(), new TreeSet<>(topicPartition.partitions())));
processAssignmentReceived(newAssignment);
}
} | @Test
public void testTransitionToReconcilingIfEmptyAssignmentReceived() {
ShareMembershipManager membershipManager = createMembershipManagerJoiningGroup();
assertEquals(MemberState.JOINING, membershipManager.state());
ShareGroupHeartbeatResponse responseWithoutAssignment = createShareGroupHeartbeatResponse(new Assignment());
membershipManager.onHeartbeatSuccess(responseWithoutAssignment.data());
assertEquals(MemberState.RECONCILING, membershipManager.state());
ShareGroupHeartbeatResponse responseWithAssignment =
createShareGroupHeartbeatResponse(createAssignment(true));
membershipManager.onHeartbeatSuccess(responseWithAssignment.data());
assertEquals(MemberState.RECONCILING, membershipManager.state());
} |
@Override
public void configure(String encodedAuthParamString) {
checkArgument(isNotBlank(encodedAuthParamString), "authParams must not be empty");
try {
setAuthParams(AuthenticationUtil.configureFromJsonString(encodedAuthParamString));
} catch (IOException e) {
throw new IllegalArgumentException("Failed to parse authParams", e);
}
} | @Test
public void testCopperArgos() throws Exception {
@Cleanup
AuthenticationAthenz caAuth = new AuthenticationAthenz();
Field ztsClientField = caAuth.getClass().getDeclaredField("ztsClient");
ztsClientField.setAccessible(true);
ztsClientField.set(caAuth, new MockZTSClient("dummy"));
ObjectMapper jsonMapper = ObjectMapperFactory.create();
Map<String, String> authParamsMap = new HashMap<>();
authParamsMap.put("providerDomain", "test_provider");
authParamsMap.put("ztsUrl", "https://localhost:4443/");
try {
caAuth.configure(jsonMapper.writeValueAsString(authParamsMap));
fail("Should not succeed if some required parameters are missing");
} catch (Exception e) {
assertTrue(e instanceof IllegalArgumentException);
}
authParamsMap.put("x509CertChain", "data:application/x-pem-file;base64,aW52YWxpZAo=");
try {
caAuth.configure(jsonMapper.writeValueAsString(authParamsMap));
fail("'data' scheme url should not be accepted");
} catch (Exception e) {
assertTrue(e instanceof IllegalArgumentException);
}
authParamsMap.put("x509CertChain", "file:./src/test/resources/copper_argos_client.crt");
try {
caAuth.configure(jsonMapper.writeValueAsString(authParamsMap));
fail("Should not succeed if 'privateKey' or 'caCert' is missing");
} catch (Exception e) {
assertTrue(e instanceof IllegalArgumentException);
}
authParamsMap.put("privateKey", "./src/test/resources/copper_argos_client.key");
authParamsMap.put("caCert", "./src/test/resources/copper_argos_ca.crt");
caAuth.configure(jsonMapper.writeValueAsString(authParamsMap));
Field x509CertChainPathField = caAuth.getClass().getDeclaredField("x509CertChainPath");
x509CertChainPathField.setAccessible(true);
String actualX509CertChainPath = (String) x509CertChainPathField.get(caAuth);
assertFalse(actualX509CertChainPath.startsWith("file:"));
assertFalse(actualX509CertChainPath.startsWith("./"));
assertTrue(actualX509CertChainPath.endsWith("/src/test/resources/copper_argos_client.crt"));
Field privateKeyPathField = caAuth.getClass().getDeclaredField("privateKeyPath");
privateKeyPathField.setAccessible(true);
String actualPrivateKeyPath = (String) privateKeyPathField.get(caAuth);
assertFalse(actualPrivateKeyPath.startsWith("file:"));
assertFalse(actualPrivateKeyPath.startsWith("./"));
assertTrue(actualPrivateKeyPath.endsWith("/src/test/resources/copper_argos_client.key"));
Field caCertPathField = caAuth.getClass().getDeclaredField("caCertPath");
caCertPathField.setAccessible(true);
String actualCaCertPath = (String) caCertPathField.get(caAuth);
assertFalse(actualCaCertPath.startsWith("file:"));
assertFalse(actualCaCertPath.startsWith("./"));
assertTrue(actualCaCertPath.endsWith("/src/test/resources/copper_argos_ca.crt"));
} |
public void collect(K key, V value, int partition) throws IOException {
kvPusher.collect(key, value, partition);
} | @Test
public void testCollect() throws IOException {
this.handler = new NativeCollectorOnlyHandler(taskContext, nativeHandler, pusher, combiner);
handler.collect(new BytesWritable(), new BytesWritable(), 100);
handler.close();
handler.close();
Mockito.verify(pusher, Mockito.times(1)).collect(any(BytesWritable.class),
any(BytesWritable.class), anyInt());
Mockito.verify(pusher, Mockito.times(1)).close();
Mockito.verify(combiner, Mockito.times(1)).close();
Mockito.verify(nativeHandler, Mockito.times(1)).close();
} |
File putIfAbsent(String userId, boolean saveToDisk) throws IOException {
String idKey = getIdStrategy().keyFor(userId);
String directoryName = idToDirectoryNameMap.get(idKey);
File directory = null;
if (directoryName == null) {
synchronized (this) {
directoryName = idToDirectoryNameMap.get(idKey);
if (directoryName == null) {
directory = createDirectoryForNewUser(userId);
directoryName = directory.getName();
idToDirectoryNameMap.put(idKey, directoryName);
if (saveToDisk) {
save();
}
}
}
}
return directory == null ? new File(usersDirectory, directoryName) : directory;
} | @Test
public void testDirectoryFormatSingleCharacter() throws IOException {
UserIdMapper mapper = createUserIdMapper(IdStrategy.CASE_INSENSITIVE);
String user1 = ".";
File directory1 = mapper.putIfAbsent(user1, true);
assertThat(directory1.getName(), startsWith("_"));
} |
void forwardToStateService(DeviceStateServiceMsgProto deviceStateServiceMsg, TbCallback callback) {
if (statsEnabled) {
stats.log(deviceStateServiceMsg);
}
stateService.onQueueMsg(deviceStateServiceMsg, callback);
} | @Test
public void givenProcessingSuccess_whenForwardingActivityMsgToStateService_thenOnSuccessCallbackIsCalled() {
// GIVEN
var activityMsg = TransportProtos.DeviceActivityProto.newBuilder()
.setTenantIdMSB(tenantId.getId().getMostSignificantBits())
.setTenantIdLSB(tenantId.getId().getLeastSignificantBits())
.setDeviceIdMSB(deviceId.getId().getMostSignificantBits())
.setDeviceIdLSB(deviceId.getId().getLeastSignificantBits())
.setLastActivityTime(time)
.build();
doCallRealMethod().when(defaultTbCoreConsumerServiceMock).forwardToStateService(activityMsg, tbCallbackMock);
// WHEN
defaultTbCoreConsumerServiceMock.forwardToStateService(activityMsg, tbCallbackMock);
// THEN
then(stateServiceMock).should().onDeviceActivity(tenantId, deviceId, time);
then(tbCallbackMock).should().onSuccess();
then(tbCallbackMock).should(never()).onFailure(any());
} |
@Override
public void updateLoadBalancer(KubevirtLoadBalancer lb) {
checkNotNull(lb, ERR_NULL_LOAD_BALANCER);
checkArgument(!Strings.isNullOrEmpty(lb.name()), ERR_NULL_LOAD_BALANCER_NAME);
kubevirtLoadBalancerStore.updateLoadBalancer(lb);
log.info(String.format(MSG_LOAD_BALANCER, lb.name(), MSG_UPDATED));
} | @Test(expected = IllegalArgumentException.class)
public void testUpdateUnregisteredLoadBalancer() {
target.updateLoadBalancer(LB);
} |
public Map<String, String> getExtendData() {
return extendData;
} | @Test
void testGetExtendData() {
Map<String, String> extendData = serviceMetadata.getExtendData();
assertNotNull(extendData);
assertEquals(0, extendData.size());
} |
@Override
public void getPipeline(
GetJobPipelineRequest request, StreamObserver<GetJobPipelineResponse> responseObserver) {
LOG.trace("{} {}", GetJobPipelineRequest.class.getSimpleName(), request);
String invocationId = request.getJobId();
try {
JobInvocation invocation = getInvocation(invocationId);
RunnerApi.Pipeline pipeline = invocation.getPipeline();
GetJobPipelineResponse response =
GetJobPipelineResponse.newBuilder().setPipeline(pipeline).build();
responseObserver.onNext(response);
responseObserver.onCompleted();
} catch (StatusRuntimeException | StatusException e) {
responseObserver.onError(e);
} catch (Exception e) {
String errMessage =
String.format("Encountered Unexpected Exception for Invocation %s", invocationId);
LOG.error(errMessage, e);
responseObserver.onError(Status.INTERNAL.withCause(e).asException());
}
} | @Test
public void testGetPipelineIsSuccessful() throws Exception {
prepareAndRunJob();
JobApi.GetJobPipelineRequest request =
JobApi.GetJobPipelineRequest.newBuilder().setJobId(TEST_JOB_ID).build();
RecordingObserver<JobApi.GetJobPipelineResponse> recorder = new RecordingObserver<>();
service.getPipeline(request, recorder);
assertThat(recorder.isSuccessful(), is(true));
assertThat(recorder.values, hasSize(1));
JobApi.GetJobPipelineResponse response = recorder.values.get(0);
assertThat(response.getPipeline(), is(TEST_PIPELINE));
} |
@Override
public Http2Headers decodeHeaders(int streamId, ByteBuf headerBlock) throws Http2Exception {
try {
final Http2Headers headers = newHeaders();
hpackDecoder.decode(streamId, headerBlock, headers, validateHeaders);
headerArraySizeAccumulator = HEADERS_COUNT_WEIGHT_NEW * headers.size() +
HEADERS_COUNT_WEIGHT_HISTORICAL * headerArraySizeAccumulator;
return headers;
} catch (Http2Exception e) {
throw e;
} catch (Throwable e) {
// Default handler for any other types of errors that may have occurred. For example,
// the Header builder throws IllegalArgumentException if the key or value was invalid
// for any reason (e.g. the key was an invalid pseudo-header).
throw connectionError(COMPRESSION_ERROR, e, "Error decoding headers: %s", e.getMessage());
}
} | @Test
public void decodingTrailersTeHeaderMustNotFailValidation() throws Exception {
// The TE header is expressly allowed to have the value "trailers".
ByteBuf buf = null;
try {
buf = encode(b(":method"), b("GET"), b("te"), b("trailers"));
Http2Headers headers = decoder.decodeHeaders(1, buf); // This must not throw.
assertThat(headers.get(HttpHeaderNames.TE)).isEqualToIgnoringCase(HttpHeaderValues.TRAILERS);
} finally {
ReferenceCountUtil.release(buf);
}
} |
public SimpleMain(CamelContext camelContext) {
super(camelContext);
} | @Test
public void testSimpleMain() throws Exception {
List<String> events = new ArrayList<>();
CamelContext context = new DefaultCamelContext();
SimpleMain main = new SimpleMain(context);
main.configure().addRoutesBuilder(new MyRouteBuilder());
main.addMainListener(new MainListenerSupport() {
@Override
public void beforeInitialize(BaseMainSupport main) {
events.add("beforeInitialize");
}
@Override
public void beforeConfigure(BaseMainSupport main) {
events.add("beforeConfigure");
}
@Override
public void afterConfigure(BaseMainSupport main) {
events.add("afterConfigure");
}
@Override
public void beforeStart(BaseMainSupport main) {
events.add("beforeStart");
}
@Override
public void afterStart(BaseMainSupport main) {
events.add("afterStart");
}
@Override
public void beforeStop(BaseMainSupport main) {
events.add("beforeStop");
}
@Override
public void afterStop(BaseMainSupport main) {
events.add("afterStop");
}
});
main.start();
try {
assertSame(context, main.getCamelContext());
MockEndpoint endpoint = context.getEndpoint("mock:results", MockEndpoint.class);
endpoint.expectedMinimumMessageCount(1);
context.createProducerTemplate().sendBody("direct:start", "<message>1</message>");
endpoint.assertIsSatisfied();
} finally {
main.stop();
}
assertTrue(events.contains("beforeInitialize"));
assertTrue(events.contains("beforeConfigure"));
assertTrue(events.contains("afterConfigure"));
assertTrue(events.contains("beforeStart"));
assertTrue(events.contains("afterStart"));
assertTrue(events.contains("beforeStop"));
assertTrue(events.contains("afterStop"));
} |
public static <T, V> Collection<V> flatCollect(
Iterable<T> iterable,
Function<? super T, ? extends Iterable<V>> function)
{
return FJIterate.flatCollect(iterable, function, false);
} | @Test
public void flatCollect()
{
this.iterables.each(this::basicFlatCollect);
} |
public boolean contains(int value) {
return contains(Util.toUnsignedLong(value));
} | @Test
public void testContains() {
// empty
Assert.assertFalse(emptyBitmap.contains(1));
// single value
Assert.assertTrue(singleBitmap.contains(1));
Assert.assertFalse(singleBitmap.contains(2));
// bitmap
Assert.assertTrue(largeBitmap.contains(1));
Assert.assertFalse(largeBitmap.contains(100));
// set
Assert.assertTrue(mediumBitmap.contains(1));
Assert.assertFalse(mediumBitmap.contains(20));
} |
public final boolean checkIfExecuted(String input) {
return this.validator.isExecuted(Optional.of(ByteString.copyFromUtf8(input)));
} | @Test
public void checkIfExecuted_withOptional_executesValidator() {
TestValidatorIsCalledValidator testValidator = new TestValidatorIsCalledValidator();
Payload payload = new Payload("my-payload", testValidator, PAYLOAD_ATTRIBUTES, CONFIG);
payload.checkIfExecuted(Optional.empty());
assertTrue(testValidator.wasCalled);
} |
@Override
public Integer doCall() throws Exception {
CommandLineHelper.createPropertyFile();
if (configuration.split("=").length == 1) {
printer().println("Configuration parameter not in key=value format");
return 1;
}
CommandLineHelper.loadProperties(properties -> {
String key = StringHelper.before(configuration, "=").trim();
String value = StringHelper.after(configuration, "=").trim();
properties.put(key, value);
CommandLineHelper.storeProperties(properties, printer());
});
return 0;
} | @Test
public void shouldOverwriteConfig() throws Exception {
UserConfigHelper.createUserConfig("foo=bar");
ConfigSet command = new ConfigSet(new CamelJBangMain().withPrinter(printer));
command.configuration = "foo=baz";
command.doCall();
Assertions.assertEquals("", printer.getOutput());
CommandLineHelper.loadProperties(properties -> {
Assertions.assertEquals(1, properties.size());
Assertions.assertEquals("baz", properties.get("foo"));
});
} |
public synchronized Counter findCounter(String group, String name) {
if (name.equals("MAP_INPUT_BYTES")) {
LOG.warn("Counter name MAP_INPUT_BYTES is deprecated. " +
"Use FileInputFormatCounters as group name and " +
" BYTES_READ as counter name instead");
return findCounter(FileInputFormatCounter.BYTES_READ);
}
String newGroupKey = getNewGroupKey(group);
if (newGroupKey != null) {
group = newGroupKey;
}
return getGroup(group).getCounterForName(name);
} | @SuppressWarnings("rawtypes")
@Test
public void testFrameworkCounter() {
GroupFactory groupFactory = new GroupFactoryForTest();
FrameworkGroupFactory frameworkGroupFactory =
groupFactory.newFrameworkGroupFactory(JobCounter.class);
Group group = (Group) frameworkGroupFactory.newGroup("JobCounter");
FrameworkCounterGroup counterGroup =
(FrameworkCounterGroup) group.getUnderlyingGroup();
org.apache.hadoop.mapreduce.Counter count1 =
counterGroup.findCounter(JobCounter.NUM_FAILED_MAPS.toString());
Assert.assertNotNull(count1);
// Verify no exception get thrown when finding an unknown counter
org.apache.hadoop.mapreduce.Counter count2 =
counterGroup.findCounter("Unknown");
Assert.assertNull(count2);
} |
@Override
public void deleteProject(Long id) {
// 校验存在
validateProjectExists(id);
// 删除
goViewProjectMapper.deleteById(id);
} | @Test
public void testDeleteProject_success() {
// mock 数据
GoViewProjectDO dbGoViewProject = randomPojo(GoViewProjectDO.class);
goViewProjectMapper.insert(dbGoViewProject);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbGoViewProject.getId();
// 调用
goViewProjectService.deleteProject(id);
// 校验数据不存在了
assertNull(goViewProjectMapper.selectById(id));
} |
private RemotingCommand getProducerConnectionList(ChannelHandlerContext ctx,
RemotingCommand request) throws RemotingCommandException {
final RemotingCommand response = RemotingCommand.createResponseCommand(null);
final GetProducerConnectionListRequestHeader requestHeader =
(GetProducerConnectionListRequestHeader) request.decodeCommandCustomHeader(GetProducerConnectionListRequestHeader.class);
ProducerConnection bodydata = new ProducerConnection();
Map<Channel, ClientChannelInfo> channelInfoHashMap =
this.brokerController.getProducerManager().getGroupChannelTable().get(requestHeader.getProducerGroup());
if (channelInfoHashMap != null) {
Iterator<Map.Entry<Channel, ClientChannelInfo>> it = channelInfoHashMap.entrySet().iterator();
while (it.hasNext()) {
ClientChannelInfo info = it.next().getValue();
Connection connection = new Connection();
connection.setClientId(info.getClientId());
connection.setLanguage(info.getLanguage());
connection.setVersion(info.getVersion());
connection.setClientAddr(RemotingHelper.parseChannelRemoteAddr(info.getChannel()));
bodydata.getConnectionSet().add(connection);
}
byte[] body = bodydata.encode();
response.setBody(body);
response.setCode(ResponseCode.SUCCESS);
response.setRemark(null);
return response;
}
response.setCode(ResponseCode.SYSTEM_ERROR);
response.setRemark("the producer group[" + requestHeader.getProducerGroup() + "] not exist");
return response;
} | @Test
public void testGetProducerConnectionList() throws RemotingCommandException {
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_PRODUCER_CONNECTION_LIST, null);
request.addExtField("producerGroup", "ProducerGroupId");
RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SYSTEM_ERROR);
} |
public static <T> T uncheckIOExceptions(CallableRaisingIOE<T> call) {
return call.unchecked();
} | @Test
public void testUncheckIOExceptionsUnchecked() throws Throwable {
final UncheckedIOException raised = new UncheckedIOException(
new IOException("text"));
final UncheckedIOException ex = intercept(UncheckedIOException.class, "text", () ->
uncheckIOExceptions(() -> {
throw raised;
}));
Assertions.assertThat(ex)
.describedAs("Propagated Exception %s", ex)
.isSameAs(raised);
} |
protected void updateCurrentDir() {
String prevCurrentDir = variables.getVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY );
String currentDir = variables.getVariable(
repository != null
? Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY
: filename != null
? Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY
: Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY );
variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, currentDir );
fireCurrentDirectoryChanged( prevCurrentDir, currentDir );
} | @Test
public void testUpdateCurrentDirWithFilename( ) {
JobMeta jobMetaTest = new JobMeta( );
jobMetaTest.setFilename( "hasFilename" );
jobMetaTest.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, "Original value defined at run execution" );
jobMetaTest.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, "file:///C:/SomeFilenameDirectory" );
jobMetaTest.setVariable( Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY, "/SomeRepDirectory" );
jobMetaTest.updateCurrentDir();
assertEquals( "file:///C:/SomeFilenameDirectory", jobMetaTest.getVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY ) );
} |
public int format(String... args) throws UsageException {
CommandLineOptions parameters = processArgs(args);
if (parameters.version()) {
errWriter.println(versionString());
return 0;
}
if (parameters.help()) {
throw new UsageException();
}
JavaFormatterOptions options =
JavaFormatterOptions.builder()
.style(parameters.aosp() ? Style.AOSP : Style.GOOGLE)
.formatJavadoc(parameters.formatJavadoc())
.build();
if (parameters.stdin()) {
return formatStdin(parameters, options);
} else {
return formatFiles(parameters, options);
}
} | @Test
public void importRemoveErrorParseError() throws Exception {
Locale backupLocale = Locale.getDefault();
try {
Locale.setDefault(Locale.ROOT);
String[] input = {
"import java.util.ArrayList;", //
"import java.util.List;",
"class Test {",
"}}",
};
StringWriter out = new StringWriter();
StringWriter err = new StringWriter();
Main main =
new Main(
new PrintWriter(out, true),
new PrintWriter(err, true),
new ByteArrayInputStream(joiner.join(input).getBytes(UTF_8)));
assertThat(main.format("-")).isEqualTo(1);
assertThat(err.toString()).contains("<stdin>:4:3: error: class, interface");
} finally {
Locale.setDefault(backupLocale);
}
} |
public static <T> RetryTransformer<T> of(Retry retry) {
return new RetryTransformer<>(retry);
} | @Test
public void returnOnCompleteUsingSingle() throws InterruptedException {
RetryConfig config = retryConfig();
Retry retry = Retry.of("testName", config);
given(helloWorldService.returnHelloWorld())
.willReturn("Hello world")
.willThrow(new HelloWorldException())
.willThrow(new HelloWorldException())
.willReturn("Hello world");
Single.fromCallable(helloWorldService::returnHelloWorld)
.compose(RetryTransformer.of(retry))
.test()
.await()
.assertValueCount(1)
.assertValues("Hello world")
.assertComplete();
Single.fromCallable(helloWorldService::returnHelloWorld)
.compose(RetryTransformer.of(retry))
.test()
.await()
.assertValueCount(1)
.assertValues("Hello world")
.assertComplete();
then(helloWorldService).should(times(4)).returnHelloWorld();
Retry.Metrics metrics = retry.getMetrics();
assertThat(metrics.getNumberOfSuccessfulCallsWithoutRetryAttempt()).isEqualTo(1);
assertThat(metrics.getNumberOfSuccessfulCallsWithRetryAttempt()).isEqualTo(1);
assertThat(metrics.getNumberOfFailedCallsWithRetryAttempt()).isZero();
assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isZero();
} |
public void reloadTpsControlRule(String pointName, boolean external) {
NotifyCenter.publishEvent(new TpsControlRuleChangeEvent(pointName, external));
} | @Test
void testReloadTpsControlRule() throws Exception {
String localRuleStorageBaseDir = EnvUtils.getNacosHome() + File.separator + "tmpTps" + File.separator + "tps" + File.separator;
ControlConfigs.getInstance().setLocalRuleStorageBaseDir(localRuleStorageBaseDir);
resetRuleStorageProxy();
final ControlManagerCenter controlManagerCenter = ControlManagerCenter.getInstance();
TpsControlRule tpsControlRule = new TpsControlRule();
tpsControlRule.setPointName("test");
RuleDetail ruleDetail = new RuleDetail();
ruleDetail.setMaxCount(100);
ruleDetail.setRuleName("test");
ruleDetail.setMonitorType(MonitorType.INTERCEPT.getType());
ruleDetail.setPeriod(TimeUnit.SECONDS);
tpsControlRule.setPointRule(ruleDetail);
String ruleContent = JacksonUtils.toJson(tpsControlRule);
controlManagerCenter.getRuleStorageProxy().getLocalDiskStorage().saveTpsRule("test", ruleContent);
controlManagerCenter.getTpsControlManager().applyTpsRule("test", tpsControlRule);
TpsControlRule testTpsControlRule = controlManagerCenter.getTpsControlManager().getRules().get("test");
assertEquals(100, testTpsControlRule.getPointRule().getMaxCount());
assertEquals("test", testTpsControlRule.getPointRule().getRuleName());
TpsControlRule tpsControlRule2 = new TpsControlRule();
tpsControlRule2.setPointName("test");
RuleDetail ruleDetail2 = new RuleDetail();
ruleDetail2.setMaxCount(200);
ruleDetail2.setRuleName("test2");
ruleDetail2.setMonitorType(MonitorType.INTERCEPT.getType());
ruleDetail2.setPeriod(TimeUnit.SECONDS);
tpsControlRule2.setPointRule(ruleDetail2);
String ruleContent2 = JacksonUtils.toJson(tpsControlRule2);
controlManagerCenter.getRuleStorageProxy().getLocalDiskStorage().saveTpsRule("test", ruleContent2);
controlManagerCenter.reloadTpsControlRule("test", false);
//wait event
TimeUnit.SECONDS.sleep(1);
TpsControlRule testTpsControlRule2 = controlManagerCenter.getTpsControlManager().getRules().get("test");
assertEquals(200, testTpsControlRule2.getPointRule().getMaxCount());
assertEquals("test2", testTpsControlRule2.getPointRule().getRuleName());
} |
@Bean(PRODUCER_BEAN_NAME)
@ConditionalOnMissingBean(DefaultMQProducer.class)
@ConditionalOnProperty(prefix = "rocketmq", value = {"name-server", "producer.group"})
public DefaultMQProducer defaultMQProducer(RocketMQProperties rocketMQProperties) {
RocketMQProperties.Producer producerConfig = rocketMQProperties.getProducer();
String nameServer = rocketMQProperties.getNameServer();
String groupName = producerConfig.getGroup();
Assert.hasText(nameServer, "[rocketmq.name-server] must not be null");
Assert.hasText(groupName, "[rocketmq.producer.group] must not be null");
String accessChannel = rocketMQProperties.getAccessChannel();
String ak = rocketMQProperties.getProducer().getAccessKey();
String sk = rocketMQProperties.getProducer().getSecretKey();
boolean isEnableMsgTrace = rocketMQProperties.getProducer().isEnableMsgTrace();
String customizedTraceTopic = rocketMQProperties.getProducer().getCustomizedTraceTopic();
DefaultMQProducer producer = RocketMQUtil.createDefaultMQProducer(groupName, ak, sk, isEnableMsgTrace, customizedTraceTopic);
producer.setNamesrvAddr(nameServer);
if (StringUtils.hasLength(accessChannel)) {
producer.setAccessChannel(AccessChannel.valueOf(accessChannel));
}
producer.setSendMsgTimeout(producerConfig.getSendMessageTimeout());
producer.setRetryTimesWhenSendFailed(producerConfig.getRetryTimesWhenSendFailed());
producer.setRetryTimesWhenSendAsyncFailed(producerConfig.getRetryTimesWhenSendAsyncFailed());
producer.setMaxMessageSize(producerConfig.getMaxMessageSize());
producer.setCompressMsgBodyOverHowmuch(producerConfig.getCompressMessageBodyThreshold());
producer.setRetryAnotherBrokerWhenNotStoreOK(producerConfig.isRetryNextServer());
producer.setUseTLS(producerConfig.isTlsEnable());
if (StringUtils.hasText(producerConfig.getNamespace())) {
producer.setNamespace(producerConfig.getNamespace());
}
if (StringUtils.hasText(producerConfig.getNamespaceV2())) {
producer.setNamespaceV2(producerConfig.getNamespaceV2());
}
producer.setInstanceName(producerConfig.getInstanceName());
log.info("a producer ({}) init on namesrv {}", groupName, nameServer);
return producer;
} | @Test
public void testDefaultMQProducer() {
runner.withPropertyValues("rocketmq.name-server=127.0.0.1:9876",
"rocketmq.producer.group=spring_rocketmq").
run((context) -> {
assertThat(context).hasSingleBean(DefaultMQProducer.class);
});
} |
@VisibleForTesting
static void validateDefaultTopicFormats(final KsqlConfig config) {
validateTopicFormat(config, KsqlConfig.KSQL_DEFAULT_KEY_FORMAT_CONFIG, "key");
validateTopicFormat(config, KsqlConfig.KSQL_DEFAULT_VALUE_FORMAT_CONFIG, "value");
} | @Test
public void shouldFailOnInvalidDefaultValueFormat() {
// Given:
final KsqlConfig config = configWith(ImmutableMap.of(
KsqlConfig.KSQL_DEFAULT_VALUE_FORMAT_CONFIG, "bad"
));
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> KsqlServerMain.validateDefaultTopicFormats(config)
);
// Then:
assertThat(e.getMessage(), containsString(
"Invalid value for config '" + KsqlConfig.KSQL_DEFAULT_VALUE_FORMAT_CONFIG + "': bad"));
} |
static boolean toBoolean(final JsonNode object) {
if (object instanceof BooleanNode) {
return object.booleanValue();
}
throw invalidConversionException(object, SqlBaseType.BOOLEAN);
} | @Test
public void shouldNotIncludeValueInExceptionWhenFailingToBoolean() {
try {
// When:
JsonSerdeUtils.toBoolean(JsonNodeFactory.instance.textNode("personal info: do not log me"));
fail("Invalid test: should throw");
} catch (final Exception e) {
assertThat(ExceptionUtils.getStackTrace(e), not(containsString("personal info")));
}
} |
void consumeAll(Consumer<T> consumer) throws InterruptedException {
waitForData();
try {
for (int i = size(); i-- > 0; ) {
consumer.consume(front()); // can take forever
_dequeue();
}
}
finally {
clearConsumerLock();
}
} | @Test public void testConsumeAll() throws Exception {
final int capacity = 64; // arbitrary
final SinkQueue<Integer> q = new SinkQueue<Integer>(capacity);
for (int i = 0; i < capacity; ++i) {
assertTrue("should enqueue", q.enqueue(i));
}
assertTrue("should not enqueue", !q.enqueue(capacity));
final Runnable trigger = mock(Runnable.class);
q.consumeAll(new Consumer<Integer>() {
private int expected = 0;
@Override public void consume(Integer e) {
assertEquals("element", expected++, (int) e);
trigger.run();
}
});
verify(trigger, times(capacity)).run();
} |
public static KafkaPool fromCrd(
Reconciliation reconciliation,
Kafka kafka,
KafkaNodePool pool,
NodeIdAssignment idAssignment,
Storage oldStorage,
OwnerReference ownerReference,
SharedEnvironmentProvider sharedEnvironmentProvider
) {
ModelUtils.validateComputeResources(pool.getSpec().getResources(), "KafkaNodePool.spec.resources");
StorageUtils.validatePersistentStorage(pool.getSpec().getStorage(), "KafkaNodePool.spec.storage");
KafkaPool result = new KafkaPool(reconciliation, kafka, pool, componentName(kafka, pool), ownerReference, idAssignment, sharedEnvironmentProvider);
result.gcLoggingEnabled = isGcLoggingEnabled(kafka, pool);
result.jvmOptions = pool.getSpec().getJvmOptions() != null ? pool.getSpec().getJvmOptions() : kafka.getSpec().getKafka().getJvmOptions();
result.resources = pool.getSpec().getResources() != null ? pool.getSpec().getResources() : kafka.getSpec().getKafka().getResources();
result.processRoles = new HashSet<>(pool.getSpec().getRoles());
if (oldStorage != null) {
Storage newStorage = pool.getSpec().getStorage();
StorageDiff diff = new StorageDiff(reconciliation, oldStorage, newStorage, idAssignment.current(), idAssignment.desired());
if (diff.issuesDetected()) {
LOGGER.warnCr(reconciliation, "Only the following changes to Kafka storage are allowed: " +
"changing the deleteClaim flag, " +
"changing the kraftMetadata flag (but only one one volume can be marked to store the KRaft metadata log at a time), " +
"adding volumes to Jbod storage or removing volumes from Jbod storage, " +
"each volume in Jbod storage should have an unique ID, " +
"changing overrides to nodes which do not exist yet, " +
"and increasing size of persistent claim volumes (depending on the volume type and used storage class).");
LOGGER.warnCr(reconciliation, "The desired Kafka storage configuration in the KafkaNodePool resource {}/{} contains changes which are not allowed. As a " +
"result, all storage changes will be ignored. Use DEBUG level logging for more information " +
"about the detected changes.", pool.getMetadata().getNamespace(), pool.getMetadata().getName());
Condition warning = StatusUtils.buildWarningCondition("KafkaStorage",
"The desired Kafka storage configuration in the KafkaNodePool resource " + pool.getMetadata().getNamespace() + "/" + pool.getMetadata().getName() + " contains changes which are not allowed. As a " +
"result, all storage changes will be ignored. Use DEBUG level logging for more information " +
"about the detected changes.");
result.warningConditions.add(warning);
result.setStorage(oldStorage);
} else {
if (!VolumeUtils.kraftMetadataPath(oldStorage).equals(VolumeUtils.kraftMetadataPath(newStorage))) {
// The volume for the KRaft metadata log is changing. We should log it.
LOGGER.warnCr(reconciliation, "The KRaft metadata log for KafkaNodePool {}/{} will be moved from volume {} to volume {}.", pool.getMetadata().getNamespace(), pool.getMetadata().getName(), VolumeUtils.kraftMetadataPath(oldStorage), VolumeUtils.kraftMetadataPath(newStorage));
}
result.setStorage(newStorage);
}
} else {
result.setStorage(pool.getSpec().getStorage());
}
// Adds the warnings about unknown or deprecated fields
result.warningConditions.addAll(StatusUtils.validate(reconciliation, pool));
if (pool.getSpec().getTemplate() != null) {
KafkaNodePoolTemplate template = pool.getSpec().getTemplate();
result.templatePersistentVolumeClaims = template.getPersistentVolumeClaim();
result.templatePodSet = template.getPodSet();
result.templatePod = template.getPod();
result.templatePerBrokerService = template.getPerPodService();
result.templatePerBrokerRoute = template.getPerPodRoute();
result.templatePerBrokerIngress = template.getPerPodIngress();
result.templateContainer = template.getKafkaContainer();
result.templateInitContainer = template.getInitContainer();
} else if (kafka.getSpec().getKafka().getTemplate() != null) {
KafkaClusterTemplate template = kafka.getSpec().getKafka().getTemplate();
result.templatePersistentVolumeClaims = template.getPersistentVolumeClaim();
result.templatePodSet = template.getPodSet();
result.templatePod = template.getPod();
result.templatePerBrokerService = template.getPerPodService();
result.templatePerBrokerRoute = template.getPerPodRoute();
result.templatePerBrokerIngress = template.getPerPodIngress();
result.templateContainer = template.getKafkaContainer();
result.templateInitContainer = template.getInitContainer();
}
return result;
} | @Test
public void testResourceValidation() {
KafkaNodePool pool = new KafkaNodePoolBuilder(POOL)
.editSpec()
.withResources(new ResourceRequirementsBuilder()
.withRequests(Map.of("cpu", new Quantity("4"), "memory", new Quantity("-16Gi")))
.withLimits(Map.of("cpu", new Quantity("2"), "memory", new Quantity("16Gi")))
.build())
.endSpec()
.build();
InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> KafkaPool.fromCrd(
Reconciliation.DUMMY_RECONCILIATION,
KAFKA,
pool,
new NodeIdAssignment(Set.of(10, 11, 13), Set.of(10, 11, 13), Set.of(), Set.of(), Set.of()),
new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()).build(),
OWNER_REFERENCE,
SHARED_ENV_PROVIDER
));
assertThat(ex.getMessage(), containsString("KafkaNodePool.spec.resources cpu request must be <= limit"));
assertThat(ex.getMessage(), containsString("KafkaNodePool.spec.resources memory request must be > zero"));
} |
@Override
public Optional<Entity> exportEntity(EntityDescriptor entityDescriptor, EntityDescriptorIds entityDescriptorIds) {
final ModelId modelId = entityDescriptor.id();
try {
final Output output = outputService.load(modelId.id());
return Optional.of(exportNativeEntity(output, entityDescriptorIds));
} catch (NotFoundException e) {
LOG.debug("Couldn't find output {}", entityDescriptor, e);
return Optional.empty();
}
} | @Test
public void exportEntity() {
final ImmutableMap<String, Object> configuration = ImmutableMap.of(
"some-setting", "foobar"
);
final OutputImpl output = OutputImpl.create(
"01234567890",
"Output Title",
"org.graylog2.outputs.LoggingOutput",
"admin",
configuration,
new Date(0L),
null
);
final EntityDescriptor descriptor = EntityDescriptor.create(output.getId(), ModelTypes.OUTPUT_V1);
final EntityDescriptorIds entityDescriptorIds = EntityDescriptorIds.of(descriptor);
final Entity entity = facade.exportNativeEntity(output, entityDescriptorIds);
assertThat(entity).isInstanceOf(EntityV1.class);
assertThat(entity.id()).isEqualTo(ModelId.of(entityDescriptorIds.get(descriptor).orElse(null)));
assertThat(entity.type()).isEqualTo(ModelTypes.OUTPUT_V1);
final EntityV1 entityV1 = (EntityV1) entity;
final OutputEntity outputEntity = objectMapper.convertValue(entityV1.data(), OutputEntity.class);
assertThat(outputEntity.title()).isEqualTo(ValueReference.of("Output Title"));
assertThat(outputEntity.type()).isEqualTo(ValueReference.of("org.graylog2.outputs.LoggingOutput"));
assertThat(outputEntity.configuration()).containsEntry("some-setting", ValueReference.of("foobar"));
} |
@VisibleForTesting
static CompostState determineCompostUsed(String chatMessage)
{
if (!chatMessage.contains("compost"))
{
return null;
}
Matcher matcher;
if ((matcher = COMPOST_USED_ON_PATCH.matcher(chatMessage)).matches() ||
(matcher = FERTILE_SOIL_CAST.matcher(chatMessage)).find() ||
(matcher = ALREADY_TREATED.matcher(chatMessage)).matches() ||
(matcher = INSPECT_PATCH.matcher(chatMessage)).matches())
{
String compostGroup = matcher.group("compostType");
switch (compostGroup)
{
case "ultra":
return CompostState.ULTRACOMPOST;
case "super":
return CompostState.SUPERCOMPOST;
default:
return CompostState.COMPOST;
}
}
return null;
} | @Test
public void determineCompostUsed_returnsAppropriateCompostValues()
{
// invalid
collector.checkThat(
CompostTracker.determineCompostUsed("This is not a farming chat message."),
is((CompostState) null)
);
collector.checkThat(
CompostTracker.determineCompostUsed("Contains word compost but is not examine message."),
is((CompostState) null)
);
// inspect
collector.checkThat(
CompostTracker.determineCompostUsed("This is an allotment. The soil has been treated with supercompost. The patch is empty and weeded."),
is(CompostState.SUPERCOMPOST)
);
// fertile soil on existing patch
collector.checkThat(
CompostTracker.determineCompostUsed("This patch has already been fertilised with ultracompost - the spell can't make it any more fertile."),
is(CompostState.ULTRACOMPOST)
);
// fertile soil on cleared patch
collector.checkThat(
CompostTracker.determineCompostUsed("The herb patch has been treated with supercompost."),
is(CompostState.SUPERCOMPOST)
);
// bucket on cleared patch
collector.checkThat(
CompostTracker.determineCompostUsed("You treat the herb patch with ultracompost."),
is(CompostState.ULTRACOMPOST)
);
collector.checkThat(
CompostTracker.determineCompostUsed("You treat the tree patch with compost."),
is(CompostState.COMPOST)
);
collector.checkThat(
CompostTracker.determineCompostUsed("You treat the fruit tree patch with supercompost."),
is(CompostState.SUPERCOMPOST)
);
} |
public static JavaVersion parse(String version)
{
Matcher matcher = LEGACY_PATTERN.matcher(version);
if (matcher.matches()) {
int major = Integer.parseInt(matcher.group("MAJOR"));
int minor = Optional.ofNullable(matcher.group("MINOR"))
.map(Integer::parseInt)
.orElse(0);
String update = matcher.group("UPDATE");
if (update == null) {
return new JavaVersion(major, minor);
}
return new JavaVersion(major, minor, OptionalInt.of(Integer.parseInt(update)));
}
matcher = PATTERN.matcher(version);
if (matcher.matches()) {
int major = Integer.parseInt(matcher.group("MAJOR"));
int minor = Optional.ofNullable(matcher.group("MINOR"))
.map(Integer::parseInt)
.orElse(0);
return new JavaVersion(major, minor);
}
throw new IllegalArgumentException(format("Cannot parse version %s", version));
} | @Test
public void testParseLegacy()
{
assertEquals(JavaVersion.parse("1.8"), new JavaVersion(8, 0));
assertEquals(JavaVersion.parse("1.8.0"), new JavaVersion(8, 0));
assertEquals(JavaVersion.parse("1.8.0_5"), new JavaVersion(8, 0, OptionalInt.of(5)));
assertEquals(JavaVersion.parse("1.8.0_20"), new JavaVersion(8, 0, OptionalInt.of(20)));
assertEquals(JavaVersion.parse("1.8.1_25"), new JavaVersion(8, 1, OptionalInt.of(25)));
assertEquals(JavaVersion.parse("1.8.0_60-ea"), new JavaVersion(8, 0, OptionalInt.of(60)));
assertEquals(JavaVersion.parse("1.8.0_111-internal"), new JavaVersion(8, 0, OptionalInt.of(111)));
} |
@Override
public long estimate() {
final double raw = (1 / computeE()) * alpha() * m * m;
return applyRangeCorrection(raw);
} | @Test
public void testAlpha_withMemoryFootprintOf32() {
DenseHyperLogLogEncoder encoder = new DenseHyperLogLogEncoder(5);
encoder.estimate();
} |
protected SaveFederationQueuePolicyRequest parsePolicy(String policy) throws YarnException {
String[] policyItems = policy.split(SEMICOLON);
if (policyItems == null || policyItems.length != 4) {
throw new YarnException("The policy cannot be empty or the policy is incorrect. \n" +
" Required information to provide: queue,router weight,amrm weight,headroomalpha \n" +
" eg. root.a;SC-1:0.7,SC-2:0.3;SC-1:0.7,SC-2:0.3;1.0");
}
String queue = policyItems[0];
String routerWeight = policyItems[1];
String amrmWeight = policyItems[2];
String headroomalpha = policyItems[3];
LOG.info("Policy: [Queue = {}, RouterWeight = {}, AmRmWeight = {}, Headroomalpha = {}]",
queue, routerWeight, amrmWeight, headroomalpha);
checkSubClusterQueueWeightRatioValid(routerWeight);
checkSubClusterQueueWeightRatioValid(amrmWeight);
checkHeadRoomAlphaValid(headroomalpha);
FederationQueueWeight federationQueueWeight =
FederationQueueWeight.newInstance(routerWeight, amrmWeight, headroomalpha);
String policyManager = getConf().get(YarnConfiguration.FEDERATION_POLICY_MANAGER,
YarnConfiguration.DEFAULT_FEDERATION_POLICY_MANAGER);
SaveFederationQueuePolicyRequest request = SaveFederationQueuePolicyRequest.newInstance(
queue, federationQueueWeight, policyManager);
return request;
} | @Test
public void testParsePolicy() throws Exception {
// Case1, If policy is empty.
String errMsg1 = "The policy cannot be empty or the policy is incorrect. \n" +
" Required information to provide: queue,router weight,amrm weight,headroomalpha \n" +
" eg. root.a;SC-1:0.7,SC-2:0.3;SC-1:0.7,SC-2:0.3;1.0";
LambdaTestUtils.intercept(YarnException.class, errMsg1, () -> rmAdminCLI.parsePolicy(""));
// Case2, If policy is incomplete, We need 4 items, but only 2 of them are provided.
LambdaTestUtils.intercept(YarnException.class, errMsg1,
() -> rmAdminCLI.parsePolicy("root.a;SC-1:0.1,SC-2:0.9;"));
// Case3, If policy is incomplete, The weight of a subcluster is missing.
String errMsg2 = "The subClusterWeight cannot be empty, " +
"and the subClusterWeight size must be 2. (eg.SC-1,0.2)";
LambdaTestUtils.intercept(YarnException.class, errMsg2,
() -> rmAdminCLI.parsePolicy("root.a;SC-1:0.1,SC-2;SC-1:0.1,SC-2;0.3,1.0"));
// Case4, The policy is complete, but the sum of weights for each subcluster is not equal to 1.
String errMsg3 = "The sum of ratios for all subClusters must be equal to 1.";
LambdaTestUtils.intercept(YarnException.class, errMsg3,
() -> rmAdminCLI.parsePolicy("root.a;SC-1:0.1,SC-2:0.8;SC-1:0.1,SC-2;0.3,1.0"));
// If policy is root.a;SC-1:0.7,SC-2:0.3;SC-1:0.7,SC-2:0.3;1.0
String policy = "root.a;SC-1:0.7,SC-2:0.3;SC-1:0.6,SC-2:0.4;1.0";
SaveFederationQueuePolicyRequest request = rmAdminCLI.parsePolicy(policy);
FederationQueueWeight federationQueueWeight = request.getFederationQueueWeight();
assertNotNull(federationQueueWeight);
assertEquals("SC-1:0.7,SC-2:0.3", federationQueueWeight.getRouterWeight());
assertEquals("SC-1:0.6,SC-2:0.4", federationQueueWeight.getAmrmWeight());
assertEquals("1.0", federationQueueWeight.getHeadRoomAlpha());
} |
@Override
public ValidationTaskResult validateImpl(Map<String, String> optionMap)
throws InterruptedException {
String hadoopVersion;
try {
hadoopVersion = getHadoopVersion();
} catch (IOException e) {
return new ValidationTaskResult(ValidationUtils.State.FAILED, getName(),
String.format("Failed to get hadoop version:%n%s.", ExceptionUtils.asPlainText(e)),
"Please check if hadoop is on your PATH.");
}
String version = mConf.getString(PropertyKey.UNDERFS_VERSION);
for (String prefix : new String[] {CDH_PREFIX, HADOOP_PREFIX}) {
if (version.startsWith(prefix)) {
version = version.substring(prefix.length());
break;
}
}
if (hadoopVersion.contains(version)) {
return new ValidationTaskResult(ValidationUtils.State.OK, getName(),
String.format("Hadoop version %s contains UFS version defined in alluxio %s=%s.",
hadoopVersion, PropertyKey.UNDERFS_VERSION, version),
"");
}
return new ValidationTaskResult(ValidationUtils.State.FAILED, getName(),
String.format("Hadoop version %s does not match %s=%s.",
hadoopVersion, PropertyKey.UNDERFS_VERSION, version),
String.format("Please configure %s to match the HDFS version.",
PropertyKey.UNDERFS_VERSION));
} | @Test
public void versionMatched() throws Exception {
PowerMockito.mockStatic(ShellUtils.class);
String[] cmd = new String[]{"hadoop", "version"};
BDDMockito.given(ShellUtils.execCommand(cmd)).willReturn("Hadoop 2.6");
CONF.set(PropertyKey.UNDERFS_VERSION, "2.6");
HdfsVersionValidationTask task = new HdfsVersionValidationTask(CONF);
ValidationTaskResult result = task.validateImpl(ImmutableMap.of());
assertEquals(ValidationUtils.State.OK, result.getState());
} |
public static boolean isBlank(String str) {
return StringUtils.isBlank(str);
} | @Test
public void testIsBlank() {
assertThat(UtilAll.isBlank("Hello ")).isFalse();
assertThat(UtilAll.isBlank(" Hello")).isFalse();
assertThat(UtilAll.isBlank("He llo")).isFalse();
assertThat(UtilAll.isBlank(" ")).isTrue();
assertThat(UtilAll.isBlank("Hello")).isFalse();
} |
public boolean same(final KsqlVersion version) {
return isAtLeast(version) && version.isAtLeast(this);
} | @Test
public void shouldCompareCpVersionToStandaloneVersionInSame() {
// known mappings
assertThat(new KsqlVersion("6.0.").same(new KsqlVersion("0.10.")), is(true));
assertThat(new KsqlVersion("0.10.").same(new KsqlVersion("6.0.")), is(true));
assertThat(new KsqlVersion("6.1.").same(new KsqlVersion("0.14.")), is(true));
assertThat(new KsqlVersion("0.14.").same(new KsqlVersion("6.1.")), is(true));
assertThat(new KsqlVersion("6.0.").same(new KsqlVersion("0.14.")), is(false));
assertThat(new KsqlVersion("0.10.").same(new KsqlVersion("6.1.")), is(false));
// unknown mappings
assertThat(new KsqlVersion("5.0.").same(new KsqlVersion("0.10.")), is(false));
assertThat(new KsqlVersion("6.0.").same(new KsqlVersion("0.11.")), is(false));
assertThat(new KsqlVersion("6.2.").same(new KsqlVersion("0.17.")), is(false));
} |
@Override
public CompletableFuture<PopResult> popMessage(ProxyContext ctx, AddressableMessageQueue messageQueue,
PopMessageRequestHeader requestHeader, long timeoutMillis) {
requestHeader.setBornTime(System.currentTimeMillis());
RemotingCommand request = LocalRemotingCommand.createRequestCommand(RequestCode.POP_MESSAGE, requestHeader, ctx.getLanguage());
CompletableFuture<RemotingCommand> future = new CompletableFuture<>();
SimpleChannel channel = channelManager.createInvocationChannel(ctx);
InvocationContext invocationContext = new InvocationContext(future);
channel.registerInvocationContext(request.getOpaque(), invocationContext);
ChannelHandlerContext simpleChannelHandlerContext = channel.getChannelHandlerContext();
try {
RemotingCommand response = brokerController.getPopMessageProcessor().processRequest(simpleChannelHandlerContext, request);
if (response != null) {
invocationContext.handle(response);
channel.eraseInvocationContext(request.getOpaque());
}
} catch (Exception e) {
future.completeExceptionally(e);
channel.eraseInvocationContext(request.getOpaque());
log.error("Failed to process popMessage command", e);
}
return future.thenApply(r -> {
PopStatus popStatus;
List<MessageExt> messageExtList = new ArrayList<>();
switch (r.getCode()) {
case ResponseCode.SUCCESS:
popStatus = PopStatus.FOUND;
ByteBuffer byteBuffer = ByteBuffer.wrap(r.getBody());
messageExtList = MessageDecoder.decodesBatch(
byteBuffer,
true,
false,
true
);
break;
case ResponseCode.POLLING_FULL:
popStatus = PopStatus.POLLING_FULL;
break;
case ResponseCode.POLLING_TIMEOUT:
case ResponseCode.PULL_NOT_FOUND:
popStatus = PopStatus.POLLING_NOT_FOUND;
break;
default:
throw new ProxyException(ProxyExceptionCode.INTERNAL_SERVER_ERROR, r.getRemark());
}
PopResult popResult = new PopResult(popStatus, messageExtList);
PopMessageResponseHeader responseHeader = (PopMessageResponseHeader) r.readCustomHeader();
if (popStatus == PopStatus.FOUND) {
Map<String, Long> startOffsetInfo;
Map<String, List<Long>> msgOffsetInfo;
Map<String, Integer> orderCountInfo;
popResult.setInvisibleTime(responseHeader.getInvisibleTime());
popResult.setPopTime(responseHeader.getPopTime());
startOffsetInfo = ExtraInfoUtil.parseStartOffsetInfo(responseHeader.getStartOffsetInfo());
msgOffsetInfo = ExtraInfoUtil.parseMsgOffsetInfo(responseHeader.getMsgOffsetInfo());
orderCountInfo = ExtraInfoUtil.parseOrderCountInfo(responseHeader.getOrderCountInfo());
// <topicMark@queueId, msg queueOffset>
Map<String, List<Long>> sortMap = new HashMap<>(16);
for (MessageExt messageExt : messageExtList) {
// Value of POP_CK is used to determine whether it is a pop retry,
// cause topic could be rewritten by broker.
String key = ExtraInfoUtil.getStartOffsetInfoMapKey(messageExt.getTopic(),
messageExt.getProperty(MessageConst.PROPERTY_POP_CK), messageExt.getQueueId());
if (!sortMap.containsKey(key)) {
sortMap.put(key, new ArrayList<>(4));
}
sortMap.get(key).add(messageExt.getQueueOffset());
}
Map<String, String> map = new HashMap<>(5);
for (MessageExt messageExt : messageExtList) {
if (startOffsetInfo == null) {
// we should set the check point info to extraInfo field , if the command is popMsg
// find pop ck offset
String key = messageExt.getTopic() + messageExt.getQueueId();
if (!map.containsKey(messageExt.getTopic() + messageExt.getQueueId())) {
map.put(key, ExtraInfoUtil.buildExtraInfo(messageExt.getQueueOffset(), responseHeader.getPopTime(), responseHeader.getInvisibleTime(), responseHeader.getReviveQid(),
messageExt.getTopic(), messageQueue.getBrokerName(), messageExt.getQueueId()));
}
messageExt.getProperties().put(MessageConst.PROPERTY_POP_CK, map.get(key) + MessageConst.KEY_SEPARATOR + messageExt.getQueueOffset());
} else {
if (messageExt.getProperty(MessageConst.PROPERTY_POP_CK) == null) {
String key = ExtraInfoUtil.getStartOffsetInfoMapKey(messageExt.getTopic(), messageExt.getQueueId());
int index = sortMap.get(key).indexOf(messageExt.getQueueOffset());
Long msgQueueOffset = msgOffsetInfo.get(key).get(index);
if (msgQueueOffset != messageExt.getQueueOffset()) {
log.warn("Queue offset [{}] of msg is strange, not equal to the stored in msg, {}", msgQueueOffset, messageExt);
}
messageExt.getProperties().put(MessageConst.PROPERTY_POP_CK,
ExtraInfoUtil.buildExtraInfo(startOffsetInfo.get(key), responseHeader.getPopTime(), responseHeader.getInvisibleTime(),
responseHeader.getReviveQid(), messageExt.getTopic(), messageQueue.getBrokerName(), messageExt.getQueueId(), msgQueueOffset)
);
if (requestHeader.isOrder() && orderCountInfo != null) {
Integer count = orderCountInfo.get(key);
if (count != null && count > 0) {
messageExt.setReconsumeTimes(count);
}
}
}
}
messageExt.getProperties().computeIfAbsent(MessageConst.PROPERTY_FIRST_POP_TIME, k -> String.valueOf(responseHeader.getPopTime()));
messageExt.setBrokerName(messageExt.getBrokerName());
messageExt.setTopic(messageQueue.getTopic());
}
}
return popResult;
});
} | @Test
public void testPopMessageWriteAndFlush() throws Exception {
int reviveQueueId = 1;
long popTime = System.currentTimeMillis();
long invisibleTime = 3000L;
long startOffset = 100L;
long restNum = 0L;
StringBuilder startOffsetStringBuilder = new StringBuilder();
StringBuilder messageOffsetStringBuilder = new StringBuilder();
List<MessageExt> messageExtList = new ArrayList<>();
List<Long> messageOffsetList = new ArrayList<>();
MessageExt message1 = buildMessageExt(topic, 0, startOffset);
messageExtList.add(message1);
messageOffsetList.add(startOffset);
byte[] body1 = MessageDecoder.encode(message1, false);
MessageExt message2 = buildMessageExt(topic, 0, startOffset + 1);
messageExtList.add(message2);
messageOffsetList.add(startOffset + 1);
ExtraInfoUtil.buildStartOffsetInfo(startOffsetStringBuilder, topic, queueId, startOffset);
ExtraInfoUtil.buildMsgOffsetInfo(messageOffsetStringBuilder, topic, queueId, messageOffsetList);
byte[] body2 = MessageDecoder.encode(message2, false);
ByteBuffer byteBuffer1 = ByteBuffer.wrap(body1);
ByteBuffer byteBuffer2 = ByteBuffer.wrap(body2);
ByteBuffer b3 = ByteBuffer.allocate(byteBuffer1.limit() + byteBuffer2.limit());
b3.put(byteBuffer1);
b3.put(byteBuffer2);
PopMessageRequestHeader requestHeader = new PopMessageRequestHeader();
requestHeader.setInvisibleTime(invisibleTime);
Mockito.when(popMessageProcessorMock.processRequest(Mockito.any(SimpleChannelHandlerContext.class), Mockito.argThat(argument -> {
boolean first = argument.getCode() == RequestCode.POP_MESSAGE;
boolean second = argument.readCustomHeader() instanceof PopMessageRequestHeader;
return first && second;
}))).thenAnswer(invocation -> {
SimpleChannelHandlerContext simpleChannelHandlerContext = invocation.getArgument(0);
RemotingCommand request = invocation.getArgument(1);
RemotingCommand response = RemotingCommand.createResponseCommand(PopMessageResponseHeader.class);
response.setOpaque(request.getOpaque());
response.setCode(ResponseCode.SUCCESS);
response.setBody(b3.array());
PopMessageResponseHeader responseHeader = (PopMessageResponseHeader) response.readCustomHeader();
responseHeader.setStartOffsetInfo(startOffsetStringBuilder.toString());
responseHeader.setMsgOffsetInfo(messageOffsetStringBuilder.toString());
responseHeader.setInvisibleTime(requestHeader.getInvisibleTime());
responseHeader.setPopTime(popTime);
responseHeader.setRestNum(restNum);
responseHeader.setReviveQid(reviveQueueId);
simpleChannelHandlerContext.writeAndFlush(response);
return null;
});
MessageQueue messageQueue = new MessageQueue(topic, brokerName, queueId);
CompletableFuture<PopResult> future = localMessageService.popMessage(proxyContext, new AddressableMessageQueue(messageQueue, ""), requestHeader, 1000L);
PopResult popResult = future.get();
assertThat(popResult.getPopTime()).isEqualTo(popTime);
assertThat(popResult.getInvisibleTime()).isEqualTo(invisibleTime);
assertThat(popResult.getPopStatus()).isEqualTo(PopStatus.FOUND);
assertThat(popResult.getRestNum()).isEqualTo(restNum);
assertThat(popResult.getMsgFoundList().size()).isEqualTo(messageExtList.size());
for (int i = 0; i < popResult.getMsgFoundList().size(); i++) {
assertMessageExt(popResult.getMsgFoundList().get(i), messageExtList.get(i));
}
} |
@NotNull
public SocialUserDO authSocialUser(Integer socialType, Integer userType, String code, String state) {
// 优先从 DB 中获取,因为 code 有且可以使用一次。
// 在社交登录时,当未绑定 User 时,需要绑定登录,此时需要 code 使用两次
SocialUserDO socialUser = socialUserMapper.selectByTypeAndCodeAnState(socialType, code, state);
if (socialUser != null) {
return socialUser;
}
// 请求获取
AuthUser authUser = socialClientService.getAuthUser(socialType, userType, code, state);
Assert.notNull(authUser, "三方用户不能为空");
// 保存到 DB 中
socialUser = socialUserMapper.selectByTypeAndOpenid(socialType, authUser.getUuid());
if (socialUser == null) {
socialUser = new SocialUserDO();
}
socialUser.setType(socialType).setCode(code).setState(state) // 需要保存 code + state 字段,保证后续可查询
.setOpenid(authUser.getUuid()).setToken(authUser.getToken().getAccessToken()).setRawTokenInfo((toJsonString(authUser.getToken())))
.setNickname(authUser.getNickname()).setAvatar(authUser.getAvatar()).setRawUserInfo(toJsonString(authUser.getRawUserInfo()));
if (socialUser.getId() == null) {
socialUserMapper.insert(socialUser);
} else {
socialUserMapper.updateById(socialUser);
}
return socialUser;
} | @Test
public void testAuthSocialUser_notNull() {
// mock 数据
SocialUserDO socialUser = randomPojo(SocialUserDO.class,
o -> o.setType(SocialTypeEnum.GITEE.getType()).setCode("tudou").setState("yuanma"));
socialUserMapper.insert(socialUser);
// 准备参数
Integer socialType = SocialTypeEnum.GITEE.getType();
Integer userType = randomEle(SocialTypeEnum.values()).getType();
String code = "tudou";
String state = "yuanma";
// 调用
SocialUserDO result = socialUserService.authSocialUser(socialType, userType, code, state);
// 断言
assertPojoEquals(socialUser, result);
} |
public boolean isEmpty() {
return results.isEmpty();
} | @Test
public void empty() {
assertTrue(result.isEmpty());
} |
@VisibleForTesting
static String extractUrn(MonitoringInfoSpecs.Enum value) {
return value.getValueDescriptor().getOptions().getExtension(monitoringInfoSpec).getUrn();
} | @Test
public void testUniqueUrnsDefinedForAllSpecs() {
Multimap<String, MonitoringInfoSpecs.Enum> urnToEnum = ArrayListMultimap.create();
for (MonitoringInfoSpecs.Enum value : MonitoringInfoSpecs.Enum.values()) {
if (value != MonitoringInfoSpecs.Enum.UNRECOGNIZED) {
urnToEnum.put(extractUrn(value), value);
}
}
for (String urn : ImmutableSet.copyOf(urnToEnum.keySet())) {
if (urnToEnum.get(urn).size() == 1) {
urnToEnum.removeAll(urn);
}
}
assertThat(urnToEnum.entries(), Matchers.empty());
} |
@Override
protected Map<String, RowMetaInterface> getInputRowMetaInterfaces( GetXMLDataMeta meta ) {
Map<String, RowMetaInterface> inputRows = getInputFields( meta );
if ( inputRows == null ) {
inputRows = new HashMap<>();
}
// Get some boolean flags from the meta for easier access
boolean isInFields = meta.isInFields();
boolean isAFile = meta.getIsAFile();
boolean isAUrl = meta.isReadUrl();
// only add resource fields if we are NOT getting the xml or file from a field
if ( !isInFields || isAFile || isAUrl ) {
RowMetaInterface stepFields = getOutputFields( meta );
RowMetaInterface clone = stepFields.clone();
// if there are previous steps providing data, we should remove them from the set of "resource" fields
for ( RowMetaInterface rowMetaInterface : inputRows.values() ) {
for ( ValueMetaInterface valueMetaInterface : rowMetaInterface.getValueMetaList() ) {
try {
clone.removeValueMeta( valueMetaInterface.getName() );
} catch ( KettleValueException e ) {
// could not find it in the output, skip it
}
}
}
inputRows.put( RESOURCE, clone );
}
return inputRows;
} | @Test
public void testGetInputRowMetaInterfaces_isInFields() throws Exception {
when( parentTransMeta.getPrevStepNames( Mockito.<StepMeta>any() ) ).thenReturn( null );
RowMetaInterface rowMetaInterface = mock( RowMetaInterface.class );
lenient().doReturn( rowMetaInterface ).when( analyzer ).getOutputFields( meta );
when( meta.isInFields() ).thenReturn( true );
when( meta.getIsAFile() ).thenReturn( false );
when( meta.isReadUrl() ).thenReturn( false );
Map<String, RowMetaInterface> rowMetaInterfaces = analyzer.getInputRowMetaInterfaces( meta );
assertNotNull( rowMetaInterfaces );
assertEquals( 0, rowMetaInterfaces.size() );
} |
@Override
public void registerStore(final StateStore store,
final StateRestoreCallback stateRestoreCallback,
final CommitCallback commitCallback) {
final String storeName = store.name();
// TODO (KAFKA-12887): we should not trigger user's exception handler for illegal-argument but always
// fail-crash; in this case we would not need to immediately close the state store before throwing
if (CHECKPOINT_FILE_NAME.equals(storeName)) {
store.close();
throw new IllegalArgumentException(format("%sIllegal store name: %s, which collides with the pre-defined " +
"checkpoint file name", logPrefix, storeName));
}
if (stores.containsKey(storeName)) {
store.close();
throw new IllegalArgumentException(format("%sStore %s has already been registered.", logPrefix, storeName));
}
if (stateRestoreCallback instanceof StateRestoreListener) {
log.warn("The registered state restore callback is also implementing the state restore listener interface, " +
"which is not expected and would be ignored");
}
final StateStoreMetadata storeMetadata = isLoggingEnabled(storeName) ?
new StateStoreMetadata(
store,
getStorePartition(storeName),
stateRestoreCallback,
commitCallback,
converterForStore(store)) :
new StateStoreMetadata(store, commitCallback);
// register the store first, so that if later an exception is thrown then eventually while we call `close`
// on the state manager this state store would be closed as well
stores.put(storeName, storeMetadata);
if (!stateUpdaterEnabled) {
maybeRegisterStoreWithChangelogReader(storeName);
}
log.debug("Registered state store {} to its state manager", storeName);
} | @Test
public void shouldThrowProcessorStateExceptionOnFlushIfStoreThrowsAnException() {
final RuntimeException exception = new RuntimeException("KABOOM!");
final ProcessorStateManager stateManager = getStateManager(Task.TaskType.ACTIVE);
final MockKeyValueStore stateStore = new MockKeyValueStore(persistentStoreName, true) {
@Override
public void flush() {
throw exception;
}
};
stateManager.registerStore(stateStore, stateStore.stateRestoreCallback, null);
final ProcessorStateException thrown = assertThrows(ProcessorStateException.class, stateManager::flush);
assertEquals(exception, thrown.getCause());
} |
@Override
public SarifSchema210 deserialize(Path reportPath) {
try {
return mapper
.enable(JsonParser.Feature.INCLUDE_SOURCE_IN_LOCATION)
.addHandler(new DeserializationProblemHandler() {
@Override
public Object handleInstantiationProblem(DeserializationContext ctxt, Class<?> instClass, Object argument, Throwable t) throws IOException {
if (!instClass.equals(SarifSchema210.Version.class)) {
return NOT_HANDLED;
}
throw new UnsupportedSarifVersionException(format(UNSUPPORTED_VERSION_MESSAGE_TEMPLATE, argument), t);
}
})
.readValue(reportPath.toFile(), SarifSchema210.class);
} catch (UnsupportedSarifVersionException e) {
throw new IllegalStateException(e.getMessage(), e);
} catch (JsonMappingException | JsonParseException e) {
throw new IllegalStateException(format(SARIF_JSON_SYNTAX_ERROR, reportPath), e);
} catch (IOException e) {
throw new IllegalStateException(format(SARIF_REPORT_ERROR, reportPath), e);
}
} | @Test
public void deserialize() throws URISyntaxException {
URL sarifResource = requireNonNull(getClass().getResource("eslint-sarif210.json"));
Path sarif = Paths.get(sarifResource.toURI());
SarifSchema210 deserializationResult = serializer.deserialize(sarif);
verifySarif(deserializationResult);
} |
@Override
public void close() {
isRunning.set(false);
getListener().onClose(this);
} | @Test
public void shouldCallbackOnClose() {
// when:
sandboxed.close();
// then:
verify(listener).onClose(sandboxed);
} |
@Override
public void upgrade() {
if (clusterConfigService.get(MigrationCompleted.class) != null) {
LOG.debug("Migration already completed!");
return;
}
final List<SearchPivotLimitMigration> pivotLimitMigrations = StreamSupport.stream(this.searches.find().spliterator(), false)
.flatMap(document -> {
final String searchId = document.get("_id", ObjectId.class).toHexString();
final List<Document> queries = document.get("queries", Collections.emptyList());
return EntryStream.of(queries)
.flatMap(entry -> {
final Integer queryIndex = entry.getKey();
final List<Document> searchTypes = entry.getValue().get("search_types", Collections.emptyList());
return EntryStream.of(searchTypes)
.filter(searchType -> "pivot".equals(searchType.getValue().getString("type")))
.flatMap(searchTypeEntry -> {
final Document searchType = searchTypeEntry.getValue();
final Integer searchTypeIndex = searchTypeEntry.getKey();
final boolean hasRowLimit = searchType.containsKey("row_limit");
final boolean hasColumnLimit = searchType.containsKey("column_limit");
final Optional<Integer> rowLimit = Optional.ofNullable(searchType.getInteger("row_limit"));
final Optional<Integer> columnLimit = Optional.ofNullable(searchType.getInteger("column_limit"));
if (searchTypeIndex != null && (hasRowLimit || hasColumnLimit)) {
return Stream.of(new SearchPivotLimitMigration(searchId, queryIndex, searchTypeIndex, rowLimit, columnLimit));
}
return Stream.empty();
});
});
})
.collect(Collectors.toList());
final List<WriteModel<Document>> operations = pivotLimitMigrations.stream()
.flatMap(pivotMigration -> {
final ImmutableList.Builder<WriteModel<Document>> builder = ImmutableList.builder();
builder.add(
updateSearch(
pivotMigration.searchId(),
doc("$unset", doc(pivotPath(pivotMigration) + ".row_limit", 1))
)
);
builder.add(
updateSearch(
pivotMigration.searchId(),
doc("$set", doc(pivotPath(pivotMigration) + ".row_groups.$[pivot].limit", pivotMigration.rowLimit.orElse(DEFAULT_LIMIT))),
matchValuePivots
)
);
builder.add(
updateSearch(
pivotMigration.searchId(),
doc("$unset", doc(pivotPath(pivotMigration) + ".column_limit", 1))
)
);
builder.add(
updateSearch(
pivotMigration.searchId(),
doc("$set", doc(pivotPath(pivotMigration) + ".column_groups.$[pivot].limit", pivotMigration.columnLimit.orElse(DEFAULT_LIMIT))),
matchValuePivots
)
);
return builder.build().stream();
})
.collect(Collectors.toList());
if (!operations.isEmpty()) {
LOG.debug("Updating {} search types ...", pivotLimitMigrations.size());
this.searches.bulkWrite(operations);
}
clusterConfigService.write(new MigrationCompleted(pivotLimitMigrations.size()));
} | @Test
@MongoDBFixtures("V20230113095301_MigrateGlobalPivotLimitsToGroupingsInSearchesTest_empty.json")
void notMigratingAnythingIfNoSearchesArePresent() {
this.migration.upgrade();
assertThat(migrationCompleted().migratedSearchTypes()).isZero();
} |
public IssuesChangesNotification newIssuesChangesNotification(Set<DefaultIssue> issues, Map<String, UserDto> assigneesByUuid) {
AnalysisChange change = new AnalysisChange(analysisMetadataHolder.getAnalysisDate());
Set<ChangedIssue> changedIssues = issues.stream()
.map(issue -> new ChangedIssue.Builder(issue.key())
.setAssignee(getAssignee(issue.assignee(), assigneesByUuid))
.setNewStatus(issue.status())
.setNewIssueStatus(issue.status() != null ? IssueStatus.of(issue.status(), issue.resolution()) : null)
.setRule(getRuleByRuleKey(issue.ruleKey()))
.setProject(getProject())
.build())
.collect(Collectors.toSet());
return issuesChangesSerializer.serialize(new IssuesChangesNotificationBuilder(changedIssues, change));
} | @Test
public void newIssuesChangesNotification_fails_with_ISE_if_treeRootHolder_is_empty() {
RuleKey ruleKey = RuleKey.of("foo", "bar");
DefaultIssue issue = new DefaultIssue()
.setRuleKey(ruleKey);
Map<String, UserDto> assigneesByUuid = nonEmptyAssigneesByUuid();
ruleRepository.add(ruleKey);
analysisMetadata.setAnalysisDate(new Random().nextLong());
assertThatThrownBy(() -> underTest.newIssuesChangesNotification(ImmutableSet.of(issue), assigneesByUuid))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Holder has not been initialized yet");
} |
@Override
public void updateSettings(Map<String, String> settings, AuthenticationInfo subject) {
if (settings == null || settings.isEmpty()) {
LOGGER.error("Cannot update {} with empty settings", this.getClass().getName());
return;
}
String newNotebookDirectotyPath = StringUtils.EMPTY;
if (settings.containsKey("Notebook Path")) {
newNotebookDirectotyPath = settings.get("Notebook Path");
}
if (StringUtils.isBlank(newNotebookDirectotyPath)) {
LOGGER.error("Notebook path is invalid");
return;
}
LOGGER.warn("{} will change notebook dir from {} to {}",
subject.getUser(), this.rootNotebookFolder, newNotebookDirectotyPath);
try {
setNotebookDirectory(newNotebookDirectotyPath);
} catch (IOException e) {
LOGGER.error("Cannot update notebook directory", e);
}
} | @Test
void testUpdateSettings() throws IOException {
List<NotebookRepoSettingsInfo> repoSettings = notebookRepo.getSettings(AuthenticationInfo.ANONYMOUS);
assertEquals(1, repoSettings.size());
NotebookRepoSettingsInfo settingInfo = repoSettings.get(0);
assertEquals("Notebook Path", settingInfo.name);
assertEquals(notebookRepo.rootNotebookFolder, settingInfo.selected);
createNewNote("{}", "id2", "my_project/name2");
assertEquals(1, notebookRepo.list(AuthenticationInfo.ANONYMOUS).size());
String newNotebookDir = "/tmp/zeppelin/vfs_notebookrepo2";
FileUtils.forceMkdir(new File(newNotebookDir));
Map<String, String> newSettings = ImmutableMap.of("Notebook Path", newNotebookDir);
notebookRepo.updateSettings(newSettings, AuthenticationInfo.ANONYMOUS);
assertEquals(0, notebookRepo.list(AuthenticationInfo.ANONYMOUS).size());
} |
@Override
public Failure parse(XmlPullParser parser, int initialDepth, XmlEnvironment xmlEnvironment) throws XmlPullParserException, IOException, SmackParsingException {
Failure.CompressFailureError compressFailureError = null;
StanzaError stanzaError = null;
XmlEnvironment failureXmlEnvironment = XmlEnvironment.from(parser, xmlEnvironment);
outerloop: while (true) {
XmlPullParser.Event eventType = parser.next();
switch (eventType) {
case START_ELEMENT:
String name = parser.getName();
String namespace = parser.getNamespace();
switch (namespace) {
case Failure.NAMESPACE:
compressFailureError = Failure.CompressFailureError.valueOf(name.replace("-", "_"));
if (compressFailureError == null) {
LOGGER.warning("Unknown element in " + Failure.NAMESPACE + ": " + name);
}
break;
case StreamOpen.CLIENT_NAMESPACE:
case StreamOpen.SERVER_NAMESPACE:
switch (name) {
case StanzaError.ERROR:
stanzaError = PacketParserUtils.parseError(parser, failureXmlEnvironment);
break;
default:
LOGGER.warning("Unknown element in " + namespace + ": " + name);
break;
}
break;
}
break;
case END_ELEMENT:
if (parser.getDepth() == initialDepth) {
break outerloop;
}
break;
default: // fall out
}
}
return new Failure(compressFailureError, stanzaError);
} | @Test
public void withStanzaErrrorFailureTest() throws Exception {
final String xml = "<failure xmlns='http://jabber.org/protocol/compress'>"
+ "<setup-failed/>"
+ "<error xmlns='jabber:client' type='modify'>"
+ "<bad-request xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>"
+ "</error>"
+ "</failure>";
final XmlPullParser parser = PacketParserUtils.getParserFor(xml);
final Failure failure = FailureProvider.INSTANCE.parse(parser);
assertEquals(Failure.CompressFailureError.setup_failed, failure.getCompressFailureError());
final StanzaError error = failure.getStanzaError();
assertEquals(Condition.bad_request, error.getCondition());
} |
public static void checkState(boolean isValid, String message) throws IllegalStateException {
if (!isValid) {
throw new IllegalStateException(message);
}
} | @Test
public void testCheckStateWithoutArguments() {
try {
Preconditions.checkState(true, "Test message");
} catch (IllegalStateException e) {
Assert.fail("Should not throw exception when isValid is true");
}
try {
Preconditions.checkState(false, "Test message");
Assert.fail("Should throw exception when isValid is false");
} catch (IllegalStateException e) {
Assert.assertEquals("Should format message", "Test message", e.getMessage());
}
} |
@SuppressWarnings("unchecked")
public static <S, F> S visit(final Schema schema, final Visitor<S, F> visitor) {
final BiFunction<Visitor<?, ?>, Schema, Object> handler = HANDLER.get(schema.type());
if (handler == null) {
throw new UnsupportedOperationException("Unsupported schema type: " + schema.type());
}
return (S) handler.apply(visitor, schema);
} | @Test
public void shouldVisitBytes() {
// Given:
final Schema schema = Schema.OPTIONAL_BYTES_SCHEMA;
when(visitor.visitBytes(any())).thenReturn("Expected");
// When:
final String result = SchemaWalker.visit(schema, visitor);
// Then:
verify(visitor).visitBytes(same(schema));
assertThat(result, is("Expected"));
} |
@Override
public String doSharding(final Collection<String> availableTargetNames, final PreciseShardingValue<Comparable<?>> shardingValue) {
ShardingSpherePreconditions.checkNotNull(shardingValue.getValue(), NullShardingValueException::new);
String tableNameSuffix = String.valueOf(doSharding(parseDate(shardingValue.getValue())));
return ShardingAutoTableAlgorithmUtils.findMatchedTargetName(availableTargetNames, tableNameSuffix, shardingValue.getDataNodeInfo()).orElse(null);
} | @Test
void assertRangeDoShardingWithAllRange() {
List<String> availableTargetNames = Arrays.asList("t_order_0", "t_order_1", "t_order_2", "t_order_3", "t_order_4");
Collection<String> actual = shardingAlgorithm.doSharding(availableTargetNames,
new RangeShardingValue<>("t_order", "create_time", DATA_NODE_INFO, Range.closed("2019-01-01 00:00:00", "2020-01-01 00:00:15")));
assertThat(actual.size(), is(5));
} |
public PrivateKey convertPrivateKey(final String privatePemKey) {
StringReader keyReader = new StringReader(privatePemKey);
try {
PrivateKeyInfo privateKeyInfo = PrivateKeyInfo
.getInstance(new PEMParser(keyReader).readObject());
return new JcaPEMKeyConverter().getPrivateKey(privateKeyInfo);
} catch (IOException exception) {
throw new RuntimeException(exception);
}
} | @Test
void givenEmptyPrivateKey_whenConvertPrivateKey_thenThrowRuntimeException() {
// Given
String emptyPrivatePemKey = "";
// When & Then
assertThatThrownBy(() -> KeyConverter.convertPrivateKey(emptyPrivatePemKey))
.isInstanceOf(RuntimeException.class)
.hasCauseInstanceOf(PEMException.class)
.hasMessageContaining("PEMException");
} |
Path getImageDirectory(ImageReference imageReference) {
// Replace ':' and '@' with '!' to avoid directory-naming restrictions
String replacedReference =
imageReference.toStringWithQualifier().replace(':', '!').replace('@', '!');
// Split image reference on '/' to build directory structure
Iterable<String> directories = Splitter.on('/').split(replacedReference);
Path destination = getImagesDirectory();
for (String dir : directories) {
destination = destination.resolve(dir);
}
return destination;
} | @Test
public void testGetImageDirectory() throws InvalidImageReferenceException {
Path imagesDirectory = Paths.get("cache", "directory", "images");
Assert.assertEquals(imagesDirectory, TEST_CACHE_STORAGE_FILES.getImagesDirectory());
Assert.assertEquals(
imagesDirectory.resolve("reg.istry/repo/sitory!tag"),
TEST_CACHE_STORAGE_FILES.getImageDirectory(
ImageReference.parse("reg.istry/repo/sitory:tag")));
Assert.assertEquals(
imagesDirectory.resolve(
"reg.istry/repo!sha256!aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
TEST_CACHE_STORAGE_FILES.getImageDirectory(
ImageReference.parse(
"reg.istry/repo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")));
Assert.assertEquals(
imagesDirectory.resolve("reg.istry!5000/repo/sitory!tag"),
TEST_CACHE_STORAGE_FILES.getImageDirectory(
ImageReference.parse("reg.istry:5000/repo/sitory:tag")));
} |
public static Field p(String fieldName) {
return SELECT_ALL_FROM_SOURCES_ALL.where(fieldName);
} | @Test
void contains_near_onear_with_annotation() {
{
String q = Q.p("f1").containsNear(A.a("distance", 5), "p1", "p2", "p3")
.build();
assertEquals(q, "yql=select * from sources * where f1 contains ([{\"distance\":5}]near(\"p1\", \"p2\", \"p3\"))");
}
{
String q = Q.p("f1").containsOnear(A.a("distance", 5), "p1", "p2", "p3")
.build();
assertEquals(q, "yql=select * from sources * where f1 contains ([{\"distance\":5}]onear(\"p1\", \"p2\", \"p3\"))");
}
} |
@Override
public Optional<IdentityProvider> getIdentityProvider() {
return Optional.empty();
} | @Test
public void getIdentityProvider() {
assertThat(githubWebhookUserSession.getIdentityProvider()).isEmpty();
} |
@Override
public String getName() {
return "NPM CPE Analyzer";
} | @Test
public void testGetName() {
NpmCPEAnalyzer instance = new NpmCPEAnalyzer();
String expResult = "NPM CPE Analyzer";
String result = instance.getName();
assertEquals(expResult, result);
} |
public int remap(int var, int size) {
if ((var & REMAP_FLAG) != 0) {
return unmask(var);
}
int offset = var - argsSize;
if (offset < 0) {
// self projection for method arguments
return var;
}
if (offset >= mapping.length) {
mapping = Arrays.copyOf(mapping, Math.max(mapping.length * 2, offset + 1));
}
int mappedVar = mapping[offset];
int unmasked = unmask(mappedVar);
boolean isRemapped = ((mappedVar & REMAP_FLAG) != 0);
if (size == 2) {
if ((mappedVar & DOUBLE_SLOT_FLAG) == 0) {
// no double slot mapping over an int slot;
// must re-map unless the int slot is the last used one or there is a free double-ext slot
isRemapped = false;
}
} else {
// size == 1
if ((mappedVar & DOUBLE_SLOT_FLAG_2) != 0) {
// no mapping over a previously 2-slot value
isRemapped = false;
} else if ((mappedVar & DOUBLE_SLOT_FLAG) != 0) {
// the previously second part of the double slot is free to reuse
mapping[unmasked + 1] = (unmasked + 1) | REMAP_FLAG;
}
}
if (!isRemapped) {
mappedVar = remapVar(newVarIdxInternal(size), size);
setMapping(offset, mappedVar, size);
}
unmasked = unmask(mappedVar);
// adjust the mapping pointer if remapping with variable occupying 2 slots
nextMappedVar = Math.max(unmasked + size, nextMappedVar);
return unmasked;
} | @Test
public void remapOutOfOrder() {
assertEquals(0, instance.remap(2, 1));
assertEquals(1, instance.remap(0, 2));
assertEquals(3, instance.remap(3, 1));
} |
public int add(Object o) {
HollowTypeMapper typeMapper = getTypeMapper(o.getClass(), null, null);
return typeMapper.write(o);
} | @Test
public void testBasic() throws IOException {
HollowObjectMapper mapper = new HollowObjectMapper(writeStateEngine);
mapper.add(new TypeA("two", 2, new TypeB((short) 20, 20000000L, 2.2f, "two".toCharArray(), new byte[]{2, 2, 2}),
Collections.<TypeC>emptySet()));
mapper.add(new TypeA("one", 1, new TypeB((short) 10, 10000000L, 1.1f, "one".toCharArray(), new byte[]{1, 1, 1}),
new HashSet<TypeC>(Arrays.asList(new TypeC('d', map("one.1", 1, "one.2", 1, 1, "one.3", 1, 2, 3))))));
roundTripSnapshot();
Assert.assertEquals("{\"a1\": \"two\",\"a2\": 2,\"b\": {\"b1\": 20,\"b2\": 20000000,\"b3\": 2.2,\"b4\": \"two\",\"b5\": [2, 2, 2]},\"cList\": []}",
new HollowRecordJsonStringifier(false, true).stringify(readStateEngine, "TypeA", 0));
//System.out.println("---------------------------------");
//System.out.println(new HollowRecordJsonStringifier(false, true).stringify(readStateEngine, "TypeA", 1));
} |
public List<RoleDO> getRoles() {
return ((Collection<?>) getSource())
.stream()
.map(RoleDO.class::cast)
.collect(Collectors.toList());
} | @Test
public void testGetRoles() {
List<RoleDO> roles = batchRoleDeletedEventTest.getRoles();
assertEquals(roles, roleDOList);
List<RoleDO> emptyRoles = batchRoleDeletedEventEmptySourceTest.getRoles();
assertEquals(emptyRoles, emptyRoleDOList);
} |
@Override
public Predicate visit(OrPredicate orPredicate, IndexRegistry indexes) {
Predicate[] originalInnerPredicates = orPredicate.predicates;
if (originalInnerPredicates == null || originalInnerPredicates.length < MINIMUM_NUMBER_OF_OR_TO_REPLACE) {
return orPredicate;
}
InternalListMultiMap<String, Integer> candidates = findAndGroupCandidates(originalInnerPredicates);
if (candidates == null) {
return orPredicate;
}
int toBeRemoved = 0;
boolean modified = false;
Predicate[] target = originalInnerPredicates;
for (Map.Entry<String, List<Integer>> candidate : candidates.entrySet()) {
String attribute = candidate.getKey();
List<Integer> positions = candidate.getValue();
if (positions.size() < MINIMUM_NUMBER_OF_OR_TO_REPLACE) {
continue;
}
if (!modified) {
modified = true;
target = createCopy(target);
}
toBeRemoved = replaceForAttribute(attribute, target, positions, toBeRemoved);
}
Predicate[] newInnerPredicates = replaceInnerPredicates(target, toBeRemoved);
return getOrCreateFinalPredicate(orPredicate, originalInnerPredicates, newInnerPredicates);
} | @Test
public void whenEmptyPredicate_thenReturnItself() {
OrPredicate or = new OrPredicate((Predicate[]) null);
OrPredicate result = (OrPredicate) visitor.visit(or, indexes);
assertThat(or).isEqualTo(result);
} |
@Override
public boolean filter(Message msg) {
if (msg.getSourceInputId() == null) {
return false;
}
for (final Map.Entry<String, String> field : staticFields.getOrDefault(msg.getSourceInputId(), Collections.emptyList())) {
if (!msg.hasField(field.getKey())) {
msg.addField(field.getKey(), field.getValue());
} else {
LOG.debug("Message already contains field [{}]. Not overwriting.", field.getKey());
}
}
return false;
} | @Test
@SuppressForbidden("Executors#newSingleThreadExecutor() is okay for tests")
public void testFilter() throws Exception {
Message msg = messageFactory.createMessage("hello", "junit", Tools.nowUTC());
msg.setSourceInputId("someid");
when(input.getId()).thenReturn("someid");
when(inputService.all()).thenReturn(Collections.singletonList(input));
when(inputService.find(eq("someid"))).thenReturn(input);
when(inputService.getStaticFields(eq(input)))
.thenReturn(Collections.singletonList(Maps.immutableEntry("foo", "bar")));
final StaticFieldFilter filter = new StaticFieldFilter(inputService, new EventBus(), Executors.newSingleThreadScheduledExecutor());
filter.lifecycleChanged(Lifecycle.STARTING);
filter.filter(msg);
assertEquals("hello", msg.getMessage());
assertEquals("junit", msg.getSource());
assertEquals("bar", msg.getField("foo"));
} |
public boolean eval(StructLike data) {
return new EvalVisitor().eval(data);
} | @Test
public void testNotNaN() {
Evaluator evaluator = new Evaluator(STRUCT, notNaN("y"));
assertThat(evaluator.eval(TestHelpers.Row.of(1, Double.NaN, 3))).as("NaN is NaN").isFalse();
assertThat(evaluator.eval(TestHelpers.Row.of(1, 2.0, 3))).as("2 is not NaN").isTrue();
Evaluator structEvaluator = new Evaluator(STRUCT, notNaN("s5.s6.f"));
assertThat(
structEvaluator.eval(
TestHelpers.Row.of(
1, 2, 3, null, TestHelpers.Row.of(TestHelpers.Row.of(Float.NaN)))))
.as("NaN is NaN")
.isFalse();
assertThat(
structEvaluator.eval(
TestHelpers.Row.of(1, 2, 3, null, TestHelpers.Row.of(TestHelpers.Row.of(4F)))))
.as("4F is not NaN")
.isTrue();
} |
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
SAExposureData that = (SAExposureData) o;
return exposureConfig.equals(that.exposureConfig) && properties.toString().equals(that.properties.toString()) && event.equals(that.event) && exposureIdentifier.equals(that.exposureIdentifier);
} | @Test
public void testEquals() {
} |
@Override
public long findRegion(int subpartition, int bufferIndex, boolean loadToCache) {
// first of all, find the region from current writing region group.
RegionGroup regionGroup = currentRegionGroup[subpartition];
if (regionGroup != null) {
long regionOffset =
findRegionInRegionGroup(subpartition, bufferIndex, regionGroup, loadToCache);
if (regionOffset != -1) {
return regionOffset;
}
}
// next, find the region from finished region groups.
TreeMap<Integer, RegionGroup> subpartitionRegionGroupMetaTreeMap =
subpartitionFinishedRegionGroupMetas.get(subpartition);
// all region groups with a minBufferIndex less than or equal to this target buffer index
// may contain the target region.
for (RegionGroup meta :
subpartitionRegionGroupMetaTreeMap.headMap(bufferIndex, true).values()) {
long regionOffset =
findRegionInRegionGroup(subpartition, bufferIndex, meta, loadToCache);
if (regionOffset != -1) {
return regionOffset;
}
}
return -1;
} | @Test
void testFindNonExistentRegion() throws Exception {
CompletableFuture<Void> cachedRegionFuture = new CompletableFuture<>();
try (FileDataIndexSpilledRegionManager<TestingFileDataIndexRegion> spilledRegionManager =
createSpilledRegionManager(
(ignore1, ignore2) -> cachedRegionFuture.complete(null))) {
long regionOffset = spilledRegionManager.findRegion(0, 0, true);
assertThat(regionOffset).isEqualTo(-1);
assertThat(cachedRegionFuture).isNotCompleted();
}
} |
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter,
MetricsRecorder metricsRecorder,
BufferSupplier bufferSupplier) {
if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) {
// check the magic value
if (!records.hasMatchingMagic(toMagic))
return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder);
else
// Do in-place validation, offset assignment and maybe set timestamp
return assignOffsetsNonCompressed(offsetCounter, metricsRecorder);
} else
return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier);
} | @Test
public void testRelativeOffsetAssignmentCompressedV1() {
long now = System.currentTimeMillis();
Compression compression = Compression.gzip().build();
MemoryRecords records = createRecords(RecordBatch.MAGIC_VALUE_V1, now, compression);
long offset = 1234567;
checkOffsets(records, 0);
MemoryRecords compressedMessagesWithOffset = new LogValidator(
records,
topicPartition,
time,
CompressionType.GZIP,
compression,
false,
RecordBatch.MAGIC_VALUE_V1,
TimestampType.CREATE_TIME,
5000L,
5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT,
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset),
metricsRecorder,
RequestLocal.withThreadConfinedCaching().bufferSupplier()
).validatedRecords;
checkOffsets(compressedMessagesWithOffset, offset);
} |
public int compare(Object o1, Object o2, Schema s) {
return compare(o1, o2, s, false);
} | @Test
void compare() {
// Prepare a schema for testing.
Field integerField = new Field("test", Schema.create(Type.INT), null, null);
List<Field> fields = new ArrayList<>();
fields.add(integerField);
Schema record = Schema.createRecord("test", null, null, false);
record.setFields(fields);
ByteArrayOutputStream b1 = new ByteArrayOutputStream(5);
ByteArrayOutputStream b2 = new ByteArrayOutputStream(5);
BinaryEncoder b1Enc = EncoderFactory.get().binaryEncoder(b1, null);
BinaryEncoder b2Enc = EncoderFactory.get().binaryEncoder(b2, null);
// Prepare two different datums
Record testDatum1 = new Record(record);
testDatum1.put(0, 1);
Record testDatum2 = new Record(record);
testDatum2.put(0, 2);
GenericDatumWriter<Record> gWriter = new GenericDatumWriter<>(record);
Integer start1 = 0, start2 = 0;
try {
// Write two datums in each stream
// and get the offset length after the first write in each.
gWriter.write(testDatum1, b1Enc);
b1Enc.flush();
start1 = b1.size();
gWriter.write(testDatum1, b1Enc);
b1Enc.flush();
b1.close();
gWriter.write(testDatum2, b2Enc);
b2Enc.flush();
start2 = b2.size();
gWriter.write(testDatum2, b2Enc);
b2Enc.flush();
b2.close();
// Compare to check if offset-based compare works right.
assertEquals(-1, BinaryData.compare(b1.toByteArray(), start1, b2.toByteArray(), start2, record));
} catch (IOException e) {
fail("IOException while writing records to output stream.");
}
} |
@Override
public List<RedisClientInfo> getClientList(RedisClusterNode node) {
RedisClient entry = getEntry(node);
RFuture<List<String>> f = executorService.readAsync(entry, StringCodec.INSTANCE, RedisCommands.CLIENT_LIST);
List<String> list = syncFuture(f);
return CONVERTER.convert(list.toArray(new String[list.size()]));
} | @Test
public void testGetClientList() {
RedisClusterNode master = getFirstMaster();
List<RedisClientInfo> list = connection.getClientList(master);
assertThat(list.size()).isGreaterThan(10);
} |
public NonClosedTracking<RAW, BASE> trackNonClosed(Input<RAW> rawInput, Input<BASE> baseInput) {
NonClosedTracking<RAW, BASE> tracking = NonClosedTracking.of(rawInput, baseInput);
// 1. match by rule, line, line hash and message
match(tracking, LineAndLineHashAndMessage::new);
// 2. match issues with same rule, same line and same line hash, but not necessarily with same message
match(tracking, LineAndLineHashKey::new);
// 3. detect code moves by comparing blocks of codes
detectCodeMoves(rawInput, baseInput, tracking);
// 4. match issues with same rule, same message and same line hash
match(tracking, LineHashAndMessageKey::new);
// 5. match issues with same rule, same line and same message
match(tracking, LineAndMessageKey::new);
// 6. match issues with same rule and same line hash but different line and different message.
// See SONAR-2812
match(tracking, LineHashKey::new);
return tracking;
} | @Test
public void similar_issues_except_rule_do_not_match() {
FakeInput baseInput = new FakeInput("H1");
baseInput.createIssueOnLine(1, RULE_SYSTEM_PRINT, "msg");
FakeInput rawInput = new FakeInput("H1");
Issue raw = rawInput.createIssueOnLine(1, RULE_UNUSED_LOCAL_VARIABLE, "msg");
Tracking<Issue, Issue> tracking = tracker.trackNonClosed(rawInput, baseInput);
assertThat(tracking.baseFor(raw)).isNull();
} |
@Override
public Serde<List<?>> getSerde(
final PersistenceSchema schema,
final Map<String, String> formatProperties,
final KsqlConfig ksqlConfig,
final Supplier<SchemaRegistryClient> srClientFactory,
final boolean isKey) {
FormatProperties.validateProperties(name(), formatProperties, getSupportedProperties());
SerdeUtils.throwOnUnsupportedFeatures(schema.features(), supportedFeatures());
if (!schema.columns().isEmpty()) {
throw new KsqlException("The '" + NAME
+ "' format can only be used when no columns are defined. Got: " + schema.columns());
}
return new KsqlVoidSerde<>();
} | @Test
public void shouldReturnVoidSerde() {
// When:
final Serde<List<?>> serde = format.getSerde(schema, formatProps, ksqlConfig, srClientFactory, false);
// Then:
assertThat(serde, instanceOf(KsqlVoidSerde.class));
} |
@Override
public KvMetadata resolveMetadata(
boolean isKey,
List<MappingField> resolvedFields,
Map<String, String> options,
InternalSerializationService serializationService
) {
Map<QueryPath, MappingField> fieldsByPath = extractFields(resolvedFields, isKey);
PortableId portableId = getPortableId(fieldsByPath, options, isKey);
ClassDefinition classDefinition = resolveClassDefinition(portableId, getFields(fieldsByPath),
serializationService.getPortableContext());
List<TableField> fields = new ArrayList<>();
for (Entry<QueryPath, MappingField> entry : fieldsByPath.entrySet()) {
QueryPath path = entry.getKey();
QueryDataType type = entry.getValue().type();
String name = entry.getValue().name();
fields.add(new MapTableField(name, type, false, path));
}
maybeAddDefaultField(isKey, resolvedFields, fields, QueryDataType.OBJECT);
return new KvMetadata(
fields,
GenericQueryTargetDescriptor.DEFAULT,
new PortableUpsertTargetDescriptor(classDefinition)
);
} | @Test
@Parameters({
"true, __key",
"false, this"
})
public void test_resolveMetadata(boolean key, String prefix) {
KvMetadata metadata = INSTANCE.resolveMetadata(
key,
asList(
field("boolean", QueryDataType.BOOLEAN, prefix + ".boolean"),
field("byte", QueryDataType.TINYINT, prefix + ".byte"),
field("short", QueryDataType.SMALLINT, prefix + ".short"),
field("int", QueryDataType.INT, prefix + ".int"),
field("long", QueryDataType.BIGINT, prefix + ".long"),
field("float", QueryDataType.REAL, prefix + ".float"),
field("double", QueryDataType.DOUBLE, prefix + ".double"),
field("decimal", QueryDataType.DECIMAL, prefix + ".decimal"),
field("string", QueryDataType.VARCHAR, prefix + ".string"),
field("time", QueryDataType.TIME, prefix + ".time"),
field("date", QueryDataType.DATE, prefix + ".date"),
field("timestamp", QueryDataType.TIMESTAMP, prefix + ".timestamp"),
field("timestampTz", QueryDataType.TIMESTAMP_WITH_TZ_OFFSET_DATE_TIME, prefix + ".timestampTz")
),
ImmutableMap.of(
(key ? OPTION_KEY_FACTORY_ID : OPTION_VALUE_FACTORY_ID), "1",
(key ? OPTION_KEY_CLASS_ID : OPTION_VALUE_CLASS_ID), "2",
(key ? OPTION_KEY_CLASS_VERSION : OPTION_VALUE_CLASS_VERSION), "3"
),
new DefaultSerializationServiceBuilder().build()
);
assertThat(metadata.getFields()).containsExactly(
new MapTableField("boolean", QueryDataType.BOOLEAN, false, QueryPath.create(prefix + ".boolean")),
new MapTableField("byte", QueryDataType.TINYINT, false, QueryPath.create(prefix + ".byte")),
new MapTableField("short", QueryDataType.SMALLINT, false, QueryPath.create(prefix + ".short")),
new MapTableField("int", QueryDataType.INT, false, QueryPath.create(prefix + ".int")),
new MapTableField("long", QueryDataType.BIGINT, false, QueryPath.create(prefix + ".long")),
new MapTableField("float", QueryDataType.REAL, false, QueryPath.create(prefix + ".float")),
new MapTableField("double", QueryDataType.DOUBLE, false, QueryPath.create(prefix + ".double")),
new MapTableField("decimal", QueryDataType.DECIMAL, false, QueryPath.create(prefix + ".decimal")),
new MapTableField("string", QueryDataType.VARCHAR, false, QueryPath.create(prefix + ".string")),
new MapTableField("time", QueryDataType.TIME, false, QueryPath.create(prefix + ".time")),
new MapTableField("date", QueryDataType.DATE, false, QueryPath.create(prefix + ".date")),
new MapTableField("timestamp", QueryDataType.TIMESTAMP, false, QueryPath.create(prefix + ".timestamp")),
new MapTableField(
"timestampTz",
QueryDataType.TIMESTAMP_WITH_TZ_OFFSET_DATE_TIME,
false,
QueryPath.create(prefix + ".timestampTz")
),
new MapTableField(prefix, QueryDataType.OBJECT, true, QueryPath.create(prefix))
);
assertThat(metadata.getQueryTargetDescriptor()).isEqualTo(GenericQueryTargetDescriptor.DEFAULT);
assertThat(metadata.getUpsertTargetDescriptor())
.isEqualToComparingFieldByField(new PortableUpsertTargetDescriptor(
new ClassDefinitionBuilder(1, 2, 3)
.addBooleanField("boolean")
.addByteField("byte")
.addShortField("short")
.addIntField("int")
.addLongField("long")
.addFloatField("float")
.addDoubleField("double")
.addDecimalField("decimal")
.addStringField("string")
.addTimeField("time")
.addDateField("date")
.addTimestampField("timestamp")
.addTimestampWithTimezoneField("timestampTz")
.build()
));
} |
public MyNewIssuesNotification newMyNewIssuesNotification(Map<String, UserDto> assigneesByUuid) {
verifyAssigneesByUuid(assigneesByUuid);
return new MyNewIssuesNotification(new DetailsSupplierImpl(assigneesByUuid));
} | @Test
public void newMyNewIssuesNotification_DetailsSupplier_getRuleDefinitionByRuleKey_returns_name_and_language_from_RuleRepository() {
RuleKey rulekey1 = RuleKey.of("foo", "bar");
RuleKey rulekey2 = RuleKey.of("foo", "donut");
RuleKey rulekey3 = RuleKey.of("no", "language");
DumbRule rule1 = ruleRepository.add(rulekey1).setName("rule1").setLanguage("lang1");
DumbRule rule2 = ruleRepository.add(rulekey2).setName("rule2").setLanguage("lang2");
DumbRule rule3 = ruleRepository.add(rulekey3).setName("rule3");
MyNewIssuesNotification underTest = this.underTest.newMyNewIssuesNotification(emptyMap());
DetailsSupplier detailsSupplier = readDetailsSupplier(underTest);
assertThat(detailsSupplier.getRuleDefinitionByRuleKey(rulekey1))
.contains(new RuleDefinition(rule1.getName(), rule1.getLanguage()));
assertThat(detailsSupplier.getRuleDefinitionByRuleKey(rulekey2))
.contains(new RuleDefinition(rule2.getName(), rule2.getLanguage()));
assertThat(detailsSupplier.getRuleDefinitionByRuleKey(rulekey3))
.contains(new RuleDefinition(rule3.getName(), null));
assertThat(detailsSupplier.getRuleDefinitionByRuleKey(RuleKey.of("donut", "foo")))
.isEmpty();
} |
public AStarBidirection setApproximation(WeightApproximator approx) {
weightApprox = new BalancedWeightApproximator(approx);
return this;
} | @Test
void infeasibleApproximator_noException() {
// An infeasible approximator means that the weight of the entries polled from the priority queue does not
// increase monotonically. Here we deliberately choose the approximations and edge distances such that the fwd
// search first explores the 0-1-2-3-4 branch, then polls node 10 which causes an update for node 2, but the
// search stops before node 2 is polled again such that nodes 3 and 4 cannot be updated, because the bwd search
// already arrived and the stopping criterion is fulfilled. Node 2 still remains in the queue at this point.
// This means the resulting path contains the invalid search tree branch 2(old)-3-4 and is not the shortest path,
// because the SPTEntry for node 3 still points to the outdated/deleted entry for node 2.
// We do not expect an exception, though, because for an infeasible approximator we cannot expect optimal paths.
DecimalEncodedValue speedEnc = new DecimalEncodedValueImpl("speed", 2, 1, true);
EncodingManager em = EncodingManager.start().add(speedEnc).build();
BaseGraph graph = new BaseGraph.Builder(em).create();
// 0-1----2-3-4----5-6-7-8-9
// \ /
// 10
graph.edge(0, 1).set(speedEnc, 1, 0).setDistance(100);
// the distance 1-2 is longer than 1-10-2
// we deliberately use 2-1 as storage direction, even though the edge points from 1 to 2, because this way
// we can reproduce the 'Calculating time should not require to read speed from edge in wrong direction' error
// from #2600
graph.edge(2, 1).setDistance(300).set(speedEnc, 0, 1);
graph.edge(2, 3).set(speedEnc, 1, 0).setDistance(100);
graph.edge(3, 4).set(speedEnc, 1, 0).setDistance(100);
// distance 4-5 is very long
graph.edge(4, 5).set(speedEnc, 1, 0).setDistance(10_000);
graph.edge(5, 6).set(speedEnc, 1, 0).setDistance(100);
graph.edge(6, 7).set(speedEnc, 1, 0).setDistance(100);
graph.edge(7, 8).set(speedEnc, 1, 0).setDistance(100);
graph.edge(8, 9).set(speedEnc, 1, 0).setDistance(100);
graph.edge(1, 10).set(speedEnc, 1, 0).setDistance(100);
graph.edge(10, 2).set(speedEnc, 1, 0).setDistance(100);
Weighting weighting = new SpeedWeighting(speedEnc);
AStarBidirection algo = new AStarBidirection(graph, weighting, TraversalMode.NODE_BASED);
algo.setApproximation(new InfeasibleApproximator());
Path path = algo.calcPath(0, 9);
// the path is not the shortest path, but the suboptimal one we get for this approximator
assertEquals(11_000, path.getDistance());
assertEquals(IntArrayList.from(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), path.calcNodes());
// this returns the correct path
Dijkstra dijkstra = new Dijkstra(graph, weighting, TraversalMode.NODE_BASED);
Path optimalPath = dijkstra.calcPath(0, 9);
assertEquals(10_900, optimalPath.getDistance());
assertEquals(IntArrayList.from(0, 1, 10, 2, 3, 4, 5, 6, 7, 8, 9), optimalPath.calcNodes());
} |
@Override
public ExecutionResult toExecutionResult(String responseBody) {
ExecutionResult executionResult = new ExecutionResult();
ArrayList<String> exceptions = new ArrayList<>();
try {
Map result = (Map) GSON.fromJson(responseBody, Object.class);
if (!(result.containsKey("success") && result.get("success") instanceof Boolean)) {
exceptions.add("The Json for Execution Result must contain a not-null 'success' field of type Boolean");
}
if (result.containsKey("message") && (!(result.get("message") instanceof String))) {
exceptions.add("If the 'message' key is present in the Json for Execution Result, it must contain a not-null message of type String");
}
if (!exceptions.isEmpty()) {
throw new RuntimeException(StringUtils.join(exceptions, ", "));
}
if ((Boolean) result.get("success")) {
executionResult.withSuccessMessages((String) result.get("message"));
} else {
executionResult.withErrorMessages((String) result.get("message"));
}
return executionResult;
} catch (Exception e) {
LOGGER.error("Error occurred while converting the Json to Execution Result. Error: {}. The Json received was '{}'.", e.getMessage(), responseBody);
throw new RuntimeException(String.format("Error occurred while converting the Json to Execution Result. Error: %s.", e.getMessage()));
}
} | @Test
public void shouldConstructExecutionResultFromSuccessfulExecutionResponse() {
GoPluginApiResponse response = mock(GoPluginApiResponse.class);
when(response.responseBody()).thenReturn("{\"success\":true,\"message\":\"message1\"}");
ExecutionResult result = new JsonBasedTaskExtensionHandler_V1().toExecutionResult(response.responseBody());
assertThat(result.isSuccessful(), is(true));
assertThat(result.getMessagesForDisplay(), is("message1"));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.