focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public void editOptions( int index ) {
if ( index + 1 == optionsParameterTree.getRows() ) {
// editing last row add a new one below
Object[][] values = optionsParameterTree.getValues();
Object[] row = values[ values.length - 1 ];
if ( row != null && ( !StringUtils.isEmpty( (String) row[ 0 ] ) || !StringUtils.isEmpty( (String) row[ 1 ] ) ) ) {
// acutally have something in current last row
XulTreeRow newRow = optionsParameterTree.getRootChildren().addNewRow();
newRow.addCellText( 0, "" );
newRow.addCellText( 1, "" );
}
}
} | @Test
public void testEditOptions() throws Exception {
} |
public final StringSubject hasMessageThat() {
StandardSubjectBuilder check = check("getMessage()");
if (actual instanceof ErrorWithFacts && ((ErrorWithFacts) actual).facts().size() > 1) {
check =
check.withMessage(
"(Note from Truth: When possible, instead of asserting on the full message, assert"
+ " about individual facts by using ExpectFailure.assertThat.)");
}
return check.that(checkNotNull(actual).getMessage());
} | @Test
public void hasMessageThat_NullMessageHasMessage_failure() {
NullPointerException npe = new NullPointerException(null);
expectFailureWhenTestingThat(npe).hasMessageThat().isEqualTo("message");
} |
public static Map<String, String> parseMap(String str) {
if (str != null) {
StringTokenizer tok = new StringTokenizer(str, ", \t\n\r");
HashMap<String, String> map = new HashMap<>();
while (tok.hasMoreTokens()) {
String record = tok.nextToken();
int endIndex = record.indexOf('=');
if (endIndex == -1) {
throw new RuntimeException("Failed to parse Map from String");
}
String key = record.substring(0, endIndex);
String value = record.substring(endIndex + 1);
map.put(key.trim(), value.trim());
}
return Collections.unmodifiableMap(map);
} else {
return Collections.emptyMap();
}
} | @Test
public void testParseMapEmptyValue() {
String stringMap = "key1=value1\n" +
"key2=";
Map<String, String> m = parseMap(stringMap);
assertThat(m, aMapWithSize(2));
assertThat(m, hasEntry("key1", "value1"));
assertThat(m, hasEntry("key2", ""));
} |
@Override
public int getResultSetHoldability() {
return 0;
} | @Test
void assertGetResultSetHoldability() {
assertThat(metaData.getResultSetHoldability(), is(0));
} |
@Override
public IClonableStepAnalyzer newInstance() {
return new JsonInputAnalyzer();
} | @Test
public void testNewInstance(){
JsonInputAnalyzer analyzer = new JsonInputAnalyzer();
assertTrue( analyzer.newInstance().getClass().equals(JsonInputAnalyzer.class));
} |
public static boolean isRuncContainerRequested(Configuration daemonConf,
Map<String, String> env) {
String type = (env == null)
? null : env.get(ContainerRuntimeConstants.ENV_CONTAINER_TYPE);
if (type == null) {
type = daemonConf.get(YarnConfiguration.LINUX_CONTAINER_RUNTIME_TYPE);
}
return type != null && type.equals(
ContainerRuntimeConstants.CONTAINER_RUNTIME_RUNC);
} | @Test
public void testSelectRuncContainerTypeWithDefaultSet() {
Map<String, String> envRuncType = new HashMap<>();
Map<String, String> envOtherType = new HashMap<>();
conf.set(YarnConfiguration.LINUX_CONTAINER_RUNTIME_TYPE, "default");
envRuncType.put(ContainerRuntimeConstants.ENV_CONTAINER_TYPE,
ContainerRuntimeConstants.CONTAINER_RUNTIME_RUNC);
envOtherType.put(ContainerRuntimeConstants.ENV_CONTAINER_TYPE, "other");
Assert.assertFalse(RuncContainerRuntime
.isRuncContainerRequested(conf, null));
Assert.assertTrue(RuncContainerRuntime
.isRuncContainerRequested(conf, envRuncType));
Assert.assertFalse(RuncContainerRuntime
.isRuncContainerRequested(conf, envOtherType));
} |
public void lockClusterState(ClusterStateChange stateChange, Address initiator, UUID txnId, long leaseTime,
int memberListVersion, long partitionStateStamp) {
Preconditions.checkNotNull(stateChange);
clusterServiceLock.lock();
try {
if (!node.getNodeExtension().isStartCompleted()) {
throw new IllegalStateException("Can not lock cluster state! Startup is not completed yet!");
}
if (node.getClusterService().getClusterJoinManager().isMastershipClaimInProgress()) {
throw new IllegalStateException("Can not lock cluster state! Mastership claim is in progress!");
}
if (stateChange.isOfType(Version.class)) {
validateNodeCompatibleWith((Version) stateChange.getNewState());
validateClusterVersionChange((Version) stateChange.getNewState());
}
checkMemberListVersion(memberListVersion);
checkMigrationsAndPartitionStateStamp(stateChange, partitionStateStamp);
lockOrExtendClusterState(initiator, txnId, leaseTime);
try {
// check migration status and partition-state version again
// if partition state is changed then release the lock and fail.
checkMigrationsAndPartitionStateStamp(stateChange, partitionStateStamp);
} catch (IllegalStateException e) {
stateLockRef.set(LockGuard.NOT_LOCKED);
throw e;
}
} finally {
clusterServiceLock.unlock();
}
} | @Test(expected = IllegalStateException.class)
public void test_lockClusterState_forFrozenState_whenHasOnGoingMigration() throws Exception {
when(partitionService.hasOnGoingMigrationLocal()).thenReturn(true);
Address initiator = newAddress();
clusterStateManager.lockClusterState(ClusterStateChange.from(FROZEN), initiator, TXN, 1000, MEMBERLIST_VERSION,
PARTITION_STAMP);
} |
@Override
public synchronized FunctionSource getFunction(final List<SqlType> argTypeList) {
final List<SqlArgument> args = argTypeList.stream()
.map((type) -> type == null ? null : SqlArgument.of(type))
.collect(Collectors.toList());
final UdafFactoryInvoker creator = udfIndex.getFunction(args);
if (creator == null) {
throw new KsqlException("There is no aggregate function with name='" + getName()
+ "' that has arguments of type="
+ argTypeList.stream()
.map(SqlType::baseType)
.map(Objects::toString)
.collect(Collectors.joining(",")));
}
final boolean isFactoryVariadic = creator.literalParams().stream()
.anyMatch(ParameterInfo::isVariadic);
/* There can only be one variadic argument, so we know either the column args are bounded
or the initial args are bounded. */
final int numInitArgs;
final int numSignatureInitArgs = creator.literalParams().size();
if (isFactoryVariadic) {
numInitArgs = argTypeList.size() - (creator.parameterInfo().size() - numSignatureInitArgs);
} else {
numInitArgs = numSignatureInitArgs;
}
return new FunctionSource(
numInitArgs,
(initArgs) -> creator.createFunction(initArgs, args)
);
} | @Test
public void shouldHandleNullLiteralParams() {
// When:
AggregateFunctionFactory.FunctionSource result = functionFactory.getFunction(
Arrays.asList(SqlTypes.STRING, null, SqlTypes.INTEGER,
SqlTypes.BIGINT, SqlTypes.DOUBLE, SqlTypes.STRING)
);
int initArgs = result.initArgs;
result.source.apply(new AggregateFunctionInitArguments(
Collections.singletonList(0),
ImmutableMap.of(),
Arrays.asList(null, 5, 4L, 2.3d, "s")
));
// Then:
assertEquals(5, initArgs);
verify(functionIndex).getFunction(Arrays.asList(SqlArgument.of(SqlTypes.STRING), null,
SqlArgument.of(SqlTypes.INTEGER), SqlArgument.of(SqlTypes.BIGINT),
SqlArgument.of(SqlTypes.DOUBLE), SqlArgument.of(SqlTypes.STRING)));
} |
@Override
public void analyzeDependency(Dependency dependency, Engine engine) throws AnalysisException {
final List<ClassNameInformation> classNames = collectClassNames(dependency);
final String fileName = dependency.getFileName().toLowerCase();
if ((classNames.isEmpty()
&& (fileName.endsWith("-sources.jar")
|| fileName.endsWith("-javadoc.jar")
|| fileName.endsWith("-src.jar")
|| fileName.endsWith("-doc.jar")
|| isMacOSMetaDataFile(dependency, engine)))
|| !isZipFile(dependency)) {
engine.removeDependency(dependency);
return;
}
Exception exception = null;
boolean hasManifest = false;
try {
hasManifest = parseManifest(dependency, classNames);
} catch (IOException ex) {
LOGGER.debug("Invalid Manifest", ex);
exception = ex;
}
boolean hasPOM = false;
try {
hasPOM = analyzePOM(dependency, classNames, engine);
} catch (AnalysisException ex) {
LOGGER.debug("Error parsing pom.xml", ex);
exception = ex;
}
final boolean addPackagesAsEvidence = !(hasManifest && hasPOM);
analyzePackageNames(classNames, dependency, addPackagesAsEvidence);
dependency.setEcosystem(DEPENDENCY_ECOSYSTEM);
if (exception != null) {
throw new AnalysisException(String.format("An error occurred extracting evidence from "
+ "%s, analysis may be incomplete; please see the log for more details.",
dependency.getDisplayFileName()), exception);
}
} | @Test
public void testAnalyseDependency_SkipsNonZipFile() throws Exception {
JarAnalyzer instance = new JarAnalyzer();
Dependency textFileWithJarExtension = new Dependency();
textFileWithJarExtension
.setActualFilePath(BaseTest.getResourceAsFile(this, "test.properties").getAbsolutePath());
textFileWithJarExtension.setFileName("textFileWithJarExtension.jar");
try (Engine engine = new Engine(getSettings())) {
engine.setDependencies(Collections.singletonList(textFileWithJarExtension));
instance.analyzeDependency(textFileWithJarExtension, engine);
assertEquals(0, engine.getDependencies().length);
}
} |
@Override
public String localize(final String key, final String table) {
final Key lookup = new Key(table, key);
if(!cache.contains(lookup)) {
if(!tables.contains(table)) {
try {
this.load(table);
}
catch(IOException e) {
log.warn(String.format("Failure loading properties from %s.strings. %s", table, e.getMessage()));
}
finally {
tables.add(table);
}
}
}
if(cache.contains(lookup)) {
return cache.get(lookup);
}
return key;
} | @Test
public void testLocalize() {
final RegexLocale locale = new RegexLocale(new Local(new WorkdirPrefixer().normalize("../i18n/src/main/resources")));
assertEquals("Download failed", locale.localize("Download failed", "Status"));
locale.setDefault("fr");
assertEquals("Échec du téléchargement", locale.localize("Download failed", "Status"));
} |
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) {
CharStream input = CharStreams.fromString(source);
FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input );
CommonTokenStream tokens = new CommonTokenStream( lexer );
FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens );
ParserHelper parserHelper = new ParserHelper(eventsManager);
additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol()));
parser.setHelper(parserHelper);
parser.setErrorHandler( new FEELErrorHandler() );
parser.removeErrorListeners(); // removes the error listener that prints to the console
parser.addErrorListener( new FEELParserErrorListener( eventsManager ) );
// pre-loads the parser with symbols
defineVariables( inputVariableTypes, inputVariables, parser );
if (typeRegistry != null) {
parserHelper.setTypeRegistry(typeRegistry);
}
return parser;
} | @Test
void functionInvocationWithKeyword() {
String inputExpression = "date and time( \"2016-07-29T19:47:53\" )";
BaseNode functionBase = parse( inputExpression );
assertThat( functionBase).isInstanceOf(FunctionInvocationNode.class);
assertThat( functionBase.getText()).isEqualTo(inputExpression);
FunctionInvocationNode function = (FunctionInvocationNode) functionBase;
assertThat( function.getName()).isInstanceOf(NameRefNode.class);
assertThat( function.getName().getText()).isEqualTo( "date and time");
assertThat( function.getParams()).isInstanceOf(ListNode.class);
assertThat( function.getParams().getElements()).hasSize(1);
assertThat( function.getParams().getElements().get( 0 )).isInstanceOf(StringNode.class);
} |
@Udf(description = "Converts a number of milliseconds since 1970-01-01 00:00:00 UTC/GMT into the"
+ " string representation of the timestamp in the given format. Single quotes in the"
+ " timestamp format can be escaped with '', for example: 'yyyy-MM-dd''T''HH:mm:ssX'."
+ " The system default time zone is used when no time zone is explicitly provided."
+ " The format pattern should be in the format expected"
+ " by java.time.format.DateTimeFormatter")
public String timestampToString(
@UdfParameter(
description = "Milliseconds since"
+ " January 1, 1970, 00:00:00 UTC/GMT.") final long epochMilli,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
if (formatPattern == null) {
return null;
}
try {
final Timestamp timestamp = new Timestamp(epochMilli);
final DateTimeFormatter formatter = formatters.get(formatPattern);
return timestamp.toInstant()
.atZone(ZoneId.systemDefault())
.format(formatter);
} catch (final ExecutionException | RuntimeException e) {
throw new KsqlFunctionException("Failed to format timestamp " + epochMilli
+ " with formatter '" + formatPattern
+ "': " + e.getMessage(), e);
}
} | @Test
public void testReturnNullForNullFormat() {
// When:
final String result = udf.timestampToString(1534353043000L,
null);
// Then:
assertThat(result, is(nullValue()));
} |
@Override
public boolean evaluate(Map<String, Object> values) {
boolean toReturn = false;
if (values.containsKey(name)) {
logger.debug("found matching parameter, evaluating... ");
toReturn = evaluation(values.get(name));
}
return toReturn;
} | @Test
void evaluateIntIn() {
ARRAY_TYPE arrayType = ARRAY_TYPE.INT;
List<Object> values = getObjects(arrayType, 4);
KiePMMLSimpleSetPredicate kiePMMLSimpleSetPredicate = getKiePMMLSimpleSetPredicate(values, arrayType,
IN_NOTIN.IN);
Map<String, Object> inputData = new HashMap<>();
inputData.put("FAKE", "234");
assertThat(kiePMMLSimpleSetPredicate.evaluate(inputData)).isFalse();
inputData.put(SIMPLE_SET_PREDICATE_NAME, "432");
assertThat(kiePMMLSimpleSetPredicate.evaluate(inputData)).isFalse();
inputData.put(SIMPLE_SET_PREDICATE_NAME, values.get(0));
assertThat(kiePMMLSimpleSetPredicate.evaluate(inputData)).isTrue();
} |
public static <T> T visit(final Schema start, final SchemaVisitor<T> visitor) {
// Set of Visited Schemas
IdentityHashMap<Schema, Schema> visited = new IdentityHashMap<>();
// Stack that contains the Schemas to process and afterVisitNonTerminal
// functions.
// Deque<Either<Schema, Supplier<SchemaVisitorAction>>>
// Using Either<...> has a cost we want to avoid...
Deque<Object> dq = new ArrayDeque<>();
dq.push(start);
Object current;
while ((current = dq.poll()) != null) {
if (current instanceof Supplier) {
// We are executing a non-terminal post visit.
SchemaVisitor.SchemaVisitorAction action = ((Supplier<SchemaVisitor.SchemaVisitorAction>) current).get();
switch (action) {
case CONTINUE:
break;
case SKIP_SIBLINGS:
while (dq.peek() instanceof Schema) {
dq.remove();
}
break;
case TERMINATE:
return visitor.get();
case SKIP_SUBTREE:
default:
throw new UnsupportedOperationException("Invalid action " + action);
}
} else {
Schema schema = (Schema) current;
boolean terminate;
if (visited.containsKey(schema)) {
terminate = visitTerminal(visitor, schema, dq);
} else {
Schema.Type type = schema.getType();
switch (type) {
case ARRAY:
terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getElementType()));
visited.put(schema, schema);
break;
case RECORD:
terminate = visitNonTerminal(visitor, schema, dq, () -> schema.getFields().stream().map(Field::schema)
.collect(Collectors.toCollection(ArrayDeque::new)).descendingIterator());
visited.put(schema, schema);
break;
case UNION:
terminate = visitNonTerminal(visitor, schema, dq, schema.getTypes());
visited.put(schema, schema);
break;
case MAP:
terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getValueType()));
visited.put(schema, schema);
break;
default:
terminate = visitTerminal(visitor, schema, dq);
break;
}
}
if (terminate) {
return visitor.get();
}
}
}
return visitor.get();
} | @Test
public void testVisit5() {
String s5 = "{\"type\": \"record\", \"name\": \"c1\", \"fields\": ["
+ "{\"name\": \"f1\", \"type\": {\"type\": \"record\", \"name\": \"c2\", \"fields\": "
+ "[{\"name\": \"f11\", \"type\": \"int\"}]}}," + "{\"name\": \"f2\", \"type\": \"long\"}" + "]}";
Assert.assertEquals("c1.c2.\"int\"!\"long\"!", Schemas.visit(new Schema.Parser().parse(s5), new TestVisitor()));
} |
@Override
public void doFilter(ServletRequest req, ServletResponse res, FilterChain chain)
throws IOException, ServletException
{
HttpServletRequest request = (HttpServletRequest)req;
HttpServletResponse response = (HttpServletResponse)res;
// Do not allow framing; OF-997
response.setHeader("X-Frame-Options", JiveGlobals.getProperty("adminConsole.frame-options", "SAMEORIGIN"));
// Reset the defaultLoginPage variable
String loginPage = defaultLoginPage;
if (loginPage == null) {
loginPage = request.getContextPath() + (AuthFactory.isOneTimeAccessTokenEnabled() ? "/loginToken.jsp" : "/login.jsp" );
}
// Get the page we're on:
String url = request.getRequestURI().substring(1);
if (url.startsWith("plugins/")) {
url = url.substring("plugins/".length());
}
// See if it's contained in the exclude list. If so, skip filter execution
boolean doExclude = false;
for (String exclude : excludes) {
if (testURLPassesExclude(url, exclude)) {
doExclude = true;
break;
}
}
if (!doExclude || IP_ACCESS_IGNORE_EXCLUDES.getValue()) {
if (!passesBlocklist(req) || !passesAllowList(req)) {
response.sendError(HttpServletResponse.SC_FORBIDDEN);
return;
}
}
if (!doExclude) {
WebManager manager = new WebManager();
manager.init(request, response, request.getSession(), context);
boolean haveOneTimeToken = manager.getAuthToken() instanceof AuthToken.OneTimeAuthToken;
User loggedUser = manager.getUser();
boolean loggedAdmin = loggedUser == null ? false : adminManager.isUserAdmin(loggedUser.getUsername(), true);
if (!haveOneTimeToken && !loggedAdmin && !authUserFromRequest(request)) {
response.sendRedirect(getRedirectURL(request, loginPage, null));
return;
}
}
chain.doFilter(req, res);
} | @Test
public void nonExcludedUrlWillNotErrorWhenOnAllowlist() throws Exception {
AuthCheckFilter.SERVLET_REQUEST_AUTHENTICATOR.setValue(AdminUserServletAuthenticatorClass.class);
final AuthCheckFilter filter = new AuthCheckFilter(adminManager, loginLimitManager);
AuthCheckFilter.IP_ACCESS_ALLOWLIST.setValue(Collections.singleton(remoteAddr));
filter.doFilter(request, response, filterChain);
verify(response, never()).sendError(anyInt());
verify(filterChain, atLeastOnce()).doFilter(any(), any());
} |
@Override
public MapSettings setProperty(String key, String value) {
return (MapSettings) super.setProperty(key, value);
} | @Test
public void setStringArrayWithEmptyValues() {
Settings settings = new MapSettings(definitions);
settings.setProperty("multi_values", new String[]{"A,B", "", "C,D"});
String[] array = settings.getStringArray("multi_values");
assertThat(array).isEqualTo(new String[]{"A,B", "", "C,D"});
} |
public abstract MySqlSplit toMySqlSplit(); | @Test
public void testFromToSplit() {
final MySqlSnapshotSplit split =
new MySqlSnapshotSplit(
TableId.parse("test_db.test_table"),
"test_db.test_table-1",
new RowType(
Collections.singletonList(
new RowType.RowField("id", new BigIntType()))),
new Object[] {100L},
new Object[] {999L},
BinlogOffset.ofBinlogFilePosition("mysql-bin.000002", 78L),
new HashMap<>());
final MySqlSnapshotSplitState mySqlSplitState = new MySqlSnapshotSplitState(split);
assertEquals(split, mySqlSplitState.toMySqlSplit());
} |
@Override
public int run(InputStream in, PrintStream out, PrintStream err, List<String> args) throws Exception {
OptionParser optParser = new OptionParser();
OptionSpec<Long> offsetOpt = optParser.accepts("offset", "offset for reading input").withRequiredArg()
.ofType(Long.class).defaultsTo(Long.valueOf(0));
OptionSpec<Long> limitOpt = optParser.accepts("limit", "maximum number of records in the outputfile")
.withRequiredArg().ofType(Long.class).defaultsTo(Long.MAX_VALUE);
OptionSpec<Double> fracOpt = optParser.accepts("samplerate", "rate at which records will be collected")
.withRequiredArg().ofType(Double.class).defaultsTo(Double.valueOf(1));
OptionSet opts = optParser.parse(args.toArray(new String[0]));
List<String> nargs = (List<String>) opts.nonOptionArguments();
if (nargs.size() < 2) {
printHelp(out);
return 0;
}
inFiles = Util.getFiles(nargs.subList(0, nargs.size() - 1));
System.out.println("List of input files:");
for (Path p : inFiles) {
System.out.println(p);
}
currentInput = -1;
nextInput();
OutputStream output = out;
String lastArg = nargs.get(nargs.size() - 1);
if (nargs.size() > 1 && !lastArg.equals("-")) {
output = Util.createFromFS(lastArg);
}
writer = new DataFileWriter<>(new GenericDatumWriter<>());
String codecName = reader.getMetaString(DataFileConstants.CODEC);
CodecFactory codec = (codecName == null) ? CodecFactory.fromString(DataFileConstants.NULL_CODEC)
: CodecFactory.fromString(codecName);
writer.setCodec(codec);
for (String key : reader.getMetaKeys()) {
if (!DataFileWriter.isReservedMeta(key)) {
writer.setMeta(key, reader.getMeta(key));
}
}
writer.create(schema, output);
long offset = opts.valueOf(offsetOpt);
long limit = opts.valueOf(limitOpt);
double samplerate = opts.valueOf(fracOpt);
sampleCounter = 1;
totalCopied = 0;
reuse = null;
if (limit < 0) {
System.out.println("limit has to be non-negative");
this.printHelp(out);
return 1;
}
if (offset < 0) {
System.out.println("offset has to be non-negative");
this.printHelp(out);
return 1;
}
if (samplerate < 0 || samplerate > 1) {
System.out.println("samplerate has to be a number between 0 and 1");
this.printHelp(out);
return 1;
}
skip(offset);
writeRecords(limit, samplerate);
System.out.println(totalCopied + " records written.");
writer.flush();
writer.close();
Util.close(out);
return 0;
} | @Test
void offSetAccuracy() throws Exception {
Map<String, String> metadata = new HashMap<>();
metadata.put("myMetaKey", "myMetaValue");
File input1 = generateData("input1.avro", Type.INT, metadata, DEFLATE);
File output = new File(DIR, name.getMethodName() + ".avro");
output.deleteOnExit();
List<String> args = asList(input1.getAbsolutePath(), "--offset", String.valueOf(OFFSET), "--limit",
String.valueOf(LIMIT_WITHIN_INPUT_BOUNDS), "--samplerate", String.valueOf(SAMPLERATE),
output.getAbsolutePath());
int returnCode = new CatTool().run(System.in, System.out, System.err, args);
assertEquals(0, returnCode);
assertEquals(OFFSET, getFirstIntDatum(output), "output does not start at offset");
} |
public List<String> getPlugins() {
return Collections.unmodifiableList(plugins);
} | @Test
void getPluginsShouldReturnEmptyListWhenNotSet() {
ExtensionInfo info = new ExtensionInfo("org.pf4j.asm.ExtensionInfo");
assertTrue(info.getPlugins().isEmpty());
} |
public static Multimap<String, SourceDescription> fetchSourceDescriptions(
final RemoteHostExecutor remoteHostExecutor
) {
final List<SourceDescription> sourceDescriptions = Maps
.transformValues(
remoteHostExecutor.fetchAllRemoteResults().getLeft(),
SourceDescriptionList.class::cast)
.values()
.stream()
.flatMap((rsl) -> rsl.getSourceDescriptions().stream())
.collect(toImmutableList());
return Multimaps.index(sourceDescriptions, SourceDescription::getName);
} | @SuppressWarnings({"unchecked", "rawtypes"})
@Test
public void itShouldReturnEmptyIfNoRemoteResults() {
// Given
when(augmenter.fetchAllRemoteResults()).thenReturn(new Pair(ImmutableMap.of(), response.keySet()));
Multimap<String, SourceDescription> res = RemoteSourceDescriptionExecutor.fetchSourceDescriptions(augmenter);
response.forEach((key, value) -> value.getSourceDescriptions().forEach(
(sd) -> assertThat(res.get(sd.getName()), hasSize(0))
));
} |
public static String[] getStringArrayBySeparator(String value, String separator) {
String[] strings = StringUtils.splitByWholeSeparator(value, separator);
String[] result = new String[strings.length];
for (int index = 0; index < strings.length; index++) {
result[index] = trim(strings[index]);
}
return result;
} | @Test
public void test_getStringArrayBySeparator_on_input_without_separator() {
String[] result = SettingFormatter.getStringArrayBySeparator(" abc, DeF , ghi", ";");
assertThat(result).containsExactly("abc, DeF , ghi");
} |
public static void mergeParams(
Map<String, ParamDefinition> params,
Map<String, ParamDefinition> paramsToMerge,
MergeContext context) {
if (paramsToMerge == null) {
return;
}
Stream.concat(params.keySet().stream(), paramsToMerge.keySet().stream())
.forEach(
name -> {
ParamDefinition paramToMerge = paramsToMerge.get(name);
if (paramToMerge == null) {
return;
}
if (paramToMerge.getType() == ParamType.MAP && paramToMerge.isLiteral()) {
Map<String, ParamDefinition> baseMap = mapValueOrEmpty(params, name);
Map<String, ParamDefinition> toMergeMap = mapValueOrEmpty(paramsToMerge, name);
mergeParams(
baseMap,
toMergeMap,
MergeContext.copyWithParentMode(
context, params.getOrDefault(name, paramToMerge).getMode()));
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else if (paramToMerge.getType() == ParamType.STRING_MAP
&& paramToMerge.isLiteral()) {
Map<String, String> baseMap = stringMapValueOrEmpty(params, name);
Map<String, String> toMergeMap = stringMapValueOrEmpty(paramsToMerge, name);
baseMap.putAll(toMergeMap);
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else {
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, paramToMerge.getValue()));
}
});
} | @Test
public void testMergeDisallowInvalidStartChanged() {
for (ParamMode mode : Arrays.asList(ParamMode.CONSTANT, ParamMode.IMMUTABLE)) {
AssertHelper.assertThrows(
String.format("Should not allow modifying reserved modes, mode [%s]", mode),
MaestroValidationException.class,
String.format("Cannot modify param with mode [%s] for parameter [tomerge]", mode),
new Runnable() {
@SneakyThrows
@Override
public void run() {
Map<String, ParamDefinition> allParams =
parseParamDefMap(
String.format(
"{'tomerge': {'type': 'STRING','value': 'hello', 'mode': '%s'}}", mode));
Map<String, ParamDefinition> paramsToMerge =
parseParamDefMap("{'tomerge': {'type': 'STRING', 'value': 'goodbye'}}");
ParamsMergeHelper.mergeParams(
allParams,
paramsToMerge,
new ParamsMergeHelper.MergeContext(ParamSource.LAUNCH, false, false, false));
}
});
}
} |
public void start() {
if (running.get()) {
throw new IllegalStateException(
"Attempting to start a MetricsSystem that is already running");
}
running.set(true);
registerDefaultSources();
registerSinks();
sinks.forEach(Sink::start);
} | @Test
void testMetricsSystemWithTwoSinkConfigurations() {
Properties properties = new Properties();
properties.put("sink.mocksink.class", "org.apache.spark.k8s.operator.metrics.sink.MockSink");
properties.put("sink.mocksink.period", "10");
properties.put("sink.console.class", "org.apache.spark.metrics.sink.ConsoleSink");
MetricsSystem metricsSystem = new MetricsSystem(properties);
metricsSystem.start();
assertEquals(3, metricsSystem.getSinks().size());
} |
@Override
public void doFilter(HttpRequest request, HttpResponse response, FilterChain chain) throws IOException{
PluginRiskConsent riskConsent = PluginRiskConsent.valueOf(config.get(PLUGINS_RISK_CONSENT).orElse(NOT_ACCEPTED.name()));
if (userSession.hasSession() && userSession.isLoggedIn()
&& userSession.isSystemAdministrator() && riskConsent == REQUIRED) {
redirectTo(response, request.getContextPath() + PLUGINS_RISK_CONSENT_PATH);
}
chain.doFilter(request, response);
} | @Test
public void doFilter_givenNotLoggedInAndConsentAccepted_dontRedirect() throws Exception {
PluginsRiskConsentFilter consentFilter = new PluginsRiskConsentFilter(configuration, userSession);
when(userSession.hasSession()).thenReturn(true);
when(userSession.isLoggedIn()).thenReturn(false);
when(configuration.get(PLUGINS_RISK_CONSENT)).thenReturn(Optional.of(PluginRiskConsent.ACCEPTED.name()));
consentFilter.doFilter(request, response, chain);
verify(response, times(0)).sendRedirect(Mockito.anyString());
} |
public static Optional<Object[]> coerceParams(Class<?> currentIdxActualParameterType, Class<?> expectedParameterType, Object[] actualParams, int i) {
Object actualObject = actualParams[i];
Optional<Object> coercedObject = coerceParam(currentIdxActualParameterType, expectedParameterType,
actualObject);
return coercedObject.map(o -> actualCoerceParams(actualParams, o, i));
} | @Test
void coerceParamsNotConverted() {
Object item = "TESTED_OBJECT";
Object value = Collections.singleton(item);
Object[] actualParams1 = {value, "NOT_DATE"};
Optional<Object[]> retrieved = CoerceUtil.coerceParams(Set.class, BigDecimal.class, actualParams1, 0);
assertNotNull(retrieved);
assertTrue(retrieved.isEmpty());
value = LocalDate.now();
Object[] actualParams2 = {value, "NOT_DATE"};
retrieved = CoerceUtil.coerceParams(LocalDate.class, String.class, actualParams2, 0);
assertNotNull(retrieved);
assertTrue(retrieved.isEmpty());
} |
static void assertNoCucumberAnnotatedMethods(Class<?> clazz) {
for (Method method : clazz.getDeclaredMethods()) {
for (Annotation annotation : method.getAnnotations()) {
if (annotation.annotationType().getName().startsWith("io.cucumber")) {
throw new CucumberException("\n\n" +
"Classes annotated with @RunWith(Cucumber.class) must not define any\n" +
"Step Definition or Hook methods. Their sole purpose is to serve as\n" +
"an entry point for JUnit. Step Definitions and Hooks should be defined\n" +
"in their own classes. This allows them to be reused across features.\n" +
"Offending class: " + clazz + "\n");
}
}
}
} | @Test
void should_throw_cucumber_exception_when_annotated() {
Executable testMethod = () -> Assertions.assertNoCucumberAnnotatedMethods(WithCucumberMethod.class);
CucumberException expectedThrown = assertThrows(CucumberException.class, testMethod);
assertThat(expectedThrown.getMessage(), is(equalTo(
"\n\n" +
"Classes annotated with @RunWith(Cucumber.class) must not define any\n" +
"Step Definition or Hook methods. Their sole purpose is to serve as\n" +
"an entry point for JUnit. Step Definitions and Hooks should be defined\n" +
"in their own classes. This allows them to be reused across features.\n" +
"Offending class: class io.cucumber.junit.AssertionsTest$WithCucumberMethod\n")));
} |
public SmppConfiguration getConfiguration() {
return configuration;
} | @Test
public void constructorSmppConfigurationShouldSetTheConfiguration() {
SmppConfiguration configuration = new SmppConfiguration();
component = new SmppComponent(configuration);
assertSame(configuration, component.getConfiguration());
} |
public float getProtectThreshold() {
return protectThreshold;
} | @Test
void testProtectThresholdDefault() {
final ProtectMode protectMode = new ProtectMode();
assertEquals(0.8f, protectMode.getProtectThreshold(), 0.01f);
} |
public static HeightLock ofBlockHeight(int blockHeight) {
if (blockHeight < 0)
throw new IllegalArgumentException("illegal negative block height: " + blockHeight);
if (blockHeight >= THRESHOLD)
throw new IllegalArgumentException("block height too high: " + blockHeight);
return new HeightLock(blockHeight);
} | @Test
public void ofBlockHeight() {
assertEquals(1, LockTime.ofBlockHeight(1).blockHeight());
assertEquals(499_999_999, LockTime.ofBlockHeight((int) LockTime.THRESHOLD - 1).blockHeight());
} |
@Override
public int hashCode() {
int hash = _hash;
if (hash == 1 && _bytes.length > 0) {
int i = 0;
for (; i + 7 < _bytes.length; i += 8) {
hash = -1807454463 * hash
+ 1742810335 * _bytes[i]
+ 887503681 * _bytes[i + 1]
+ 28629151 * _bytes[i + 2]
+ 923521 * _bytes[i + 3]
+ 29791 * _bytes[i + 4]
+ 961 * _bytes[i + 5]
+ 31 * _bytes[i + 6]
+ _bytes[i + 7];
}
for (; i < _bytes.length; i++) {
hash = 31 * hash + _bytes[i];
}
_hash = hash;
}
return hash;
} | @Test(description = "hash code may have been used for partitioning so must be stable")
public void testHashCodeWithInterning() {
// ensure to test below 8
byte[] bytes = new byte[ThreadLocalRandom.current().nextInt(8)];
ThreadLocalRandom.current().nextBytes(bytes);
assertEquals(Arrays.hashCode(bytes), new ByteArray(bytes, BYTE_INTERNER).hashCode());
for (int i = 0; i < 10_000; i++) {
bytes = new byte[ThreadLocalRandom.current().nextInt(2048)];
ThreadLocalRandom.current().nextBytes(bytes);
assertEquals(Arrays.hashCode(bytes), new ByteArray(bytes, BYTE_INTERNER).hashCode());
}
} |
public static InetSocketAddress getConnectAddress(ServiceAttributeProvider service,
AlluxioConfiguration conf) {
return InetSocketAddress.createUnresolved(getConnectHost(service, conf),
getPort(service, conf));
} | @Test
public void testGetConnectAddress() throws Exception {
for (ServiceType service : ServiceType.values()) {
if (service == ServiceType.JOB_MASTER_RAFT || service == ServiceType.MASTER_RAFT) {
// Skip the raft services, which don't support separate bind and connect ports.
continue;
}
getConnectAddress(service);
}
} |
@Override
public V load(K key) {
awaitSuccessfulInit();
try (SqlResult queryResult = sqlService.execute(queries.load(), key)) {
Iterator<SqlRow> it = queryResult.iterator();
V value = null;
if (it.hasNext()) {
SqlRow sqlRow = it.next();
if (it.hasNext()) {
throw new IllegalStateException("multiple matching rows for a key " + key);
}
// If there is a single column as the value, return that column as the value
if (queryResult.getRowMetadata().getColumnCount() == 2 && genericMapStoreProperties.singleColumnAsValue) {
value = sqlRow.getObject(1);
} else {
//noinspection unchecked
value = (V) toGenericRecord(sqlRow, genericMapStoreProperties);
}
}
return value;
}
} | @Test
public void givenRowDoesNotExist_whenLoad_thenReturnNull() {
objectProvider.createObject(mapName, false);
mapLoader = createMapLoader();
GenericRecord genericRecord = mapLoader.load(0);
assertThat(genericRecord).isNull();
} |
public static String buildSplitScanQuery(
Table table, SeaTunnelRowType rowType, boolean isFirstSplit, boolean isLastSplit) {
return buildSplitQuery(table, rowType, isFirstSplit, isLastSplit, -1, true);
} | @Test
public void testSplitScanQuery() {
Table table =
Table.editor()
.tableId(TableId.parse("db1.schema1.table1"))
.addColumn(Column.editor().name("id").type("int8").create())
.create();
String splitScanSQL =
PostgresUtils.buildSplitScanQuery(
table,
new SeaTunnelRowType(
new String[] {"id"}, new SeaTunnelDataType[] {BasicType.LONG_TYPE}),
false,
false);
Assertions.assertEquals(
"SELECT * FROM \"schema1\".\"table1\" WHERE \"id\" >= ? AND NOT (\"id\" = ?) AND \"id\" <= ?",
splitScanSQL);
splitScanSQL =
PostgresUtils.buildSplitScanQuery(
table,
new SeaTunnelRowType(
new String[] {"id"}, new SeaTunnelDataType[] {BasicType.LONG_TYPE}),
true,
true);
Assertions.assertEquals("SELECT * FROM \"schema1\".\"table1\"", splitScanSQL);
splitScanSQL =
PostgresUtils.buildSplitScanQuery(
table,
new SeaTunnelRowType(
new String[] {"id"}, new SeaTunnelDataType[] {BasicType.LONG_TYPE}),
true,
false);
Assertions.assertEquals(
"SELECT * FROM \"schema1\".\"table1\" WHERE \"id\" <= ? AND NOT (\"id\" = ?)",
splitScanSQL);
table =
Table.editor()
.tableId(TableId.parse("db1.schema1.table1"))
.addColumn(Column.editor().name("id").type("uuid").create())
.create();
splitScanSQL =
PostgresUtils.buildSplitScanQuery(
table,
new SeaTunnelRowType(
new String[] {"id"},
new SeaTunnelDataType[] {BasicType.STRING_TYPE}),
false,
true);
Assertions.assertEquals(
"SELECT * FROM \"schema1\".\"table1\" WHERE \"id\"::text >= ?", splitScanSQL);
} |
@Override
public void ignoreView(View view) {
} | @Test
public void ignoreView() {
View view = new View(mApplication);
mSensorsAPI.ignoreView(view);
Object tag = view.getTag(R.id.sensors_analytics_tag_view_ignored);
Assert.assertNull(tag);
} |
@Override
public ResourceReconcileResult tryReconcileClusterResources(
TaskManagerResourceInfoProvider taskManagerResourceInfoProvider) {
ResourceReconcileResult.Builder builder = ResourceReconcileResult.builder();
List<TaskManagerInfo> taskManagersIdleTimeout = new ArrayList<>();
List<TaskManagerInfo> taskManagersNonTimeout = new ArrayList<>();
long currentTime = System.currentTimeMillis();
taskManagerResourceInfoProvider
.getRegisteredTaskManagers()
.forEach(
taskManagerInfo -> {
if (taskManagerInfo.isIdle()
&& currentTime - taskManagerInfo.getIdleSince()
>= taskManagerTimeout.toMilliseconds()) {
taskManagersIdleTimeout.add(taskManagerInfo);
} else {
taskManagersNonTimeout.add(taskManagerInfo);
}
});
List<PendingTaskManager> pendingTaskManagersNonUse = new ArrayList<>();
List<PendingTaskManager> pendingTaskManagersInuse = new ArrayList<>();
taskManagerResourceInfoProvider
.getPendingTaskManagers()
.forEach(
pendingTaskManager -> {
if (pendingTaskManager.getPendingSlotAllocationRecords().isEmpty()) {
pendingTaskManagersNonUse.add(pendingTaskManager);
} else {
pendingTaskManagersInuse.add(pendingTaskManager);
}
});
ResourceProfile resourcesToKeep = ResourceProfile.ZERO;
ResourceProfile resourcesInTotal = ResourceProfile.ZERO;
boolean resourceFulfilled = false;
// check whether available resources of used (pending) task manager is enough.
ResourceProfile resourcesAvailableOfNonIdle =
getAvailableResourceOfTaskManagers(taskManagersNonTimeout);
ResourceProfile resourcesInTotalOfNonIdle =
getTotalResourceOfTaskManagers(taskManagersNonTimeout);
resourcesToKeep = resourcesToKeep.merge(resourcesAvailableOfNonIdle);
resourcesInTotal = resourcesInTotal.merge(resourcesInTotalOfNonIdle);
if (isRequiredResourcesFulfilled(resourcesToKeep, resourcesInTotal)) {
resourceFulfilled = true;
} else {
ResourceProfile resourcesAvailableOfNonIdlePendingTaskManager =
getAvailableResourceOfPendingTaskManagers(pendingTaskManagersInuse);
ResourceProfile resourcesInTotalOfNonIdlePendingTaskManager =
getTotalResourceOfPendingTaskManagers(pendingTaskManagersInuse);
resourcesToKeep = resourcesToKeep.merge(resourcesAvailableOfNonIdlePendingTaskManager);
resourcesInTotal = resourcesInTotal.merge(resourcesInTotalOfNonIdlePendingTaskManager);
}
// try reserve or release unused (pending) task managers
for (TaskManagerInfo taskManagerInfo : taskManagersIdleTimeout) {
if (resourceFulfilled
|| isRequiredResourcesFulfilled(resourcesToKeep, resourcesInTotal)) {
resourceFulfilled = true;
builder.addTaskManagerToRelease(taskManagerInfo);
} else {
resourcesToKeep = resourcesToKeep.merge(taskManagerInfo.getAvailableResource());
resourcesInTotal = resourcesInTotal.merge(taskManagerInfo.getTotalResource());
}
}
for (PendingTaskManager pendingTaskManager : pendingTaskManagersNonUse) {
if (resourceFulfilled
|| isRequiredResourcesFulfilled(resourcesToKeep, resourcesInTotal)) {
resourceFulfilled = true;
builder.addPendingTaskManagerToRelease(pendingTaskManager);
} else {
resourcesToKeep = resourcesToKeep.merge(pendingTaskManager.getUnusedResource());
resourcesInTotal =
resourcesInTotal.merge(pendingTaskManager.getTotalResourceProfile());
}
}
if (!resourceFulfilled) {
// fulfill required resources
tryFulFillRequiredResourcesWithAction(
resourcesToKeep, resourcesInTotal, builder::addPendingTaskManagerToAllocate);
}
return builder.build();
} | @Test
void testRedundantResourceShouldBeFulfilled() {
final TaskManagerInfo taskManagerInUse =
new TestingTaskManagerInfo(
DEFAULT_SLOT_RESOURCE.multiply(5),
DEFAULT_SLOT_RESOURCE.multiply(2),
DEFAULT_SLOT_RESOURCE);
final TestingTaskManagerInfo taskManagerIdle =
new TestingTaskManagerInfo(
DEFAULT_SLOT_RESOURCE.multiply(5),
DEFAULT_SLOT_RESOURCE.multiply(5),
DEFAULT_SLOT_RESOURCE);
taskManagerIdle.setIdleSince(System.currentTimeMillis() - 10);
final PendingTaskManager pendingTaskManagerIdle =
new PendingTaskManager(DEFAULT_SLOT_RESOURCE.multiply(5), NUM_OF_SLOTS);
final TaskManagerResourceInfoProvider taskManagerResourceInfoProvider =
TestingTaskManagerResourceInfoProvider.newBuilder()
.setRegisteredTaskManagersSupplier(
() -> Arrays.asList(taskManagerInUse, taskManagerIdle))
.setPendingTaskManagersSupplier(
() -> Collections.singletonList(pendingTaskManagerIdle))
.build();
DefaultResourceAllocationStrategy strategy = createStrategy(4);
ResourceReconcileResult result =
strategy.tryReconcileClusterResources(taskManagerResourceInfoProvider);
// pending task manager should reserved for redundant
assertThat(result.getPendingTaskManagersToRelease()).isEmpty();
// both in use and idle task manager should be reserved for redundant
assertThat(result.getTaskManagersToRelease()).isEmpty();
// add two more pending task manager for redundant since total available resource equals
// 12(2+5+5)
assertThat(result.getPendingTaskManagersToAllocate()).hasSize(2);
} |
public static HttpErrorResponse badRequest(String msg) {
return new HttpErrorResponse(BAD_REQUEST, ErrorCode.BAD_REQUEST.name(), msg);
} | @Test
public void testThatHttpErrorResponseProvidesCorrectErrorMessage() throws IOException {
HttpErrorResponse response = HttpErrorResponse.badRequest("Error doing something");
HandlerTest.assertHttpStatusCodeErrorCodeAndMessage(response, BAD_REQUEST, HttpErrorResponse.ErrorCode.BAD_REQUEST, "Error doing something");
} |
@Override
public EntityExcerpt createExcerpt(InputWithExtractors inputWithExtractors) {
return EntityExcerpt.builder()
.id(ModelId.of(inputWithExtractors.input().getId()))
.type(ModelTypes.INPUT_V1)
.title(inputWithExtractors.input().getTitle())
.build();
} | @Test
public void createExcerpt() {
final ImmutableMap<String, Object> fields = ImmutableMap.of(
"title", "Dashboard Title"
);
final InputImpl input = new InputImpl(fields);
final InputWithExtractors inputWithExtractors = InputWithExtractors.create(input);
final EntityExcerpt excerpt = facade.createExcerpt(inputWithExtractors);
assertThat(excerpt.id()).isEqualTo(ModelId.of(input.getId()));
assertThat(excerpt.type()).isEqualTo(ModelTypes.INPUT_V1);
assertThat(excerpt.title()).isEqualTo(input.getTitle());
} |
SchemaTransformer delegate()
{
return transformer;
} | @Test
void shouldAcceptEmptyStringAsTransformConfiguration()
{
final SchemaTransformerFactory schemaTransformerFactory = new SchemaTransformerFactory("");
assertSame(SchemaTransformer.IDENTITY_TRANSFORMER, schemaTransformerFactory.delegate());
} |
public Span nextSpan(Message message) {
TraceContextOrSamplingFlags extracted =
extractAndClearTraceIdProperties(processorExtractor, message, message);
Span result = tracer.nextSpan(extracted); // Processor spans use the normal sampler.
// When an upstream context was not present, lookup keys are unlikely added
if (extracted.context() == null && !result.isNoop()) {
// simplify code by re-using an existing MessagingRequest impl
tagQueueOrTopic(new MessageConsumerRequest(message, destination(message)), result);
}
return result;
} | @Test void nextSpan_should_clear_propagation_headers() {
Propagation.B3_STRING.injector(SETTER).inject(parent, message);
jmsTracing.nextSpan(message);
assertThat(ITJms.propertiesToMap(message))
.containsOnlyKeys(MessageUtil.JMSXDELIVERYCOUNT); /* always added by getPropertyNames() */
} |
public ConfigKey(String name, String configIdString, String namespace) {
this(name, configIdString, namespace, null);
} | @Test
public void testConfigKey() {
String name = AppConfig.CONFIG_DEF_NAME;
String namespace = AppConfig.CONFIG_DEF_NAMESPACE;
String md5 = AppConfig.CONFIG_DEF_MD5;
String configId = "myId";
ConfigKey<AppConfig> classKey = new ConfigKey<>(AppConfig.class, configId);
assertEquals("Name is set correctly from class", name, classKey.getName());
assertEquals("Namespace is set correctly from class", namespace, classKey.getNamespace());
assertEquals(configId, classKey.getConfigId());
ConfigKey<?> stringKey = new ConfigKey<>(name, configId, namespace);
assertEquals("Key created from class equals key created from strings", stringKey, classKey);
} |
@Override
@VisibleForTesting
public void getLoadBalancedClusterAndUriProperties(String clusterName,
Callback<Pair<ClusterProperties, UriProperties>> callback)
{
boolean waitForUpdatedValue = _timeout > 0;
// if timeout is 0, we must not add the timeout callback, otherwise it would trigger immediately
if (waitForUpdatedValue)
{
Callback<Pair<ClusterProperties, UriProperties>> finalCallback = callback;
try
{
callback =
new TimeoutCallback<>(_executor, _timeout, _unit, new Callback<Pair<ClusterProperties, UriProperties>>()
{
@Override
public void onError(Throwable e)
{
if (e instanceof TimeoutException)
{
handleTimeoutFromGetClusterAndUriProperties(clusterName, finalCallback);
}
else
{
finalCallback.onError(
new ServiceUnavailableException(clusterName, "PEGA_1011. " + e.getMessage(), e));
}
}
@Override
public void onSuccess(Pair<ClusterProperties, UriProperties> result)
{
finalCallback.onSuccess(result);
}
}, "Timeout while fetching cluster");
}
catch (RejectedExecutionException e)
{
_log.debug("Executor rejected new tasks. It has shut down or its queue size has reached max limit");
}
}
getLoadBalancedClusterAndUriProperties(clusterName, waitForUpdatedValue, callback);
} | @Test
@SuppressWarnings("unchecked")
public void testGetLoadBalancedClusterAndUriProperties() throws InterruptedException, ExecutionException
{
MockStore<ServiceProperties> serviceRegistry = new MockStore<>();
MockStore<ClusterProperties> clusterRegistry = new MockStore<>();
MockStore<UriProperties> uriRegistry = new MockStore<>();
SimpleLoadBalancerState state =
spy(new SimpleLoadBalancerState(new SynchronousExecutorService(), uriRegistry, clusterRegistry, serviceRegistry,
new HashMap<>(), new HashMap<>()));
doAnswer(invocation ->
{
Thread.sleep(10);
return null;
}).when(state).listenToCluster(any(), any());
SimpleLoadBalancer loadBalancer = spy(new SimpleLoadBalancer(state, 1, TimeUnit.MILLISECONDS, _d2Executor));
FutureCallback<Pair<ClusterProperties, UriProperties>> callback = spy(new FutureCallback<>());
// case1: listenToCluster timeout, and simpleLoadBalancer not hit the cache value
loadBalancer.getLoadBalancedClusterAndUriProperties(CLUSTER1_NAME, callback);
try
{
callback.get();
}
catch (ExecutionException e)
{
Assert.assertTrue(e.getCause() instanceof ServiceUnavailableException);
}
verify(loadBalancer).handleTimeoutFromGetClusterAndUriProperties(eq(CLUSTER1_NAME), eq(callback));
verify(callback).onError(any(ServiceUnavailableException.class));
// case2: listenToCluster timeout, and simpleLoadBalancer hit the cache value from state
LoadBalancerStateItem<ClusterProperties> clusterItem = new LoadBalancerStateItem<>(CLUSTER_PROPERTIES, 1, 1);
LoadBalancerStateItem<UriProperties> uriItem = new LoadBalancerStateItem<>(URI_PROPERTIES, 1, 1);
when(state.getClusterProperties(CLUSTER1_NAME)).thenReturn(clusterItem);
when(state.getUriProperties(CLUSTER1_NAME)).thenReturn(uriItem);
callback = spy(new FutureCallback<>());
loadBalancer.getLoadBalancedClusterAndUriProperties(CLUSTER1_NAME, callback);
callback.get();
verify(callback).onSuccess(eq(Pair.of(CLUSTER_PROPERTIES, URI_PROPERTIES)));
// case3: getLoadBalancedClusterAndUriProperties without timeout
state =
spy(new SimpleLoadBalancerState(new SynchronousExecutorService(), uriRegistry, clusterRegistry, serviceRegistry,
new HashMap<>(), new HashMap<>()));
loadBalancer = spy(new SimpleLoadBalancer(state, 5, TimeUnit.SECONDS, _d2Executor));
clusterRegistry.put(CLUSTER1_NAME, CLUSTER_PROPERTIES);
uriRegistry.put(CLUSTER1_NAME, URI_PROPERTIES);
callback = spy(new FutureCallback<>());
loadBalancer.getLoadBalancedClusterAndUriProperties(CLUSTER1_NAME, callback);
callback.get();
verify(loadBalancer, never()).handleTimeoutFromGetClusterAndUriProperties(any(), any());
verify(callback).onSuccess(eq(Pair.of(CLUSTER_PROPERTIES, URI_PROPERTIES)));
} |
@Override
public String toString() {
return toString(false);
} | @Test
public void testToStringWithQuota() {
long fileAndDirCount = 55555;
long quota = 44444;
long spaceConsumed = 55555;
long spaceQuota = 66665;
QuotaUsage quotaUsage = new QuotaUsage.Builder().
fileAndDirectoryCount(fileAndDirCount).quota(quota).
spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
String expected =" 44444 -11111 66665" +
" 11110 ";
assertEquals(expected, quotaUsage.toString());
} |
void processRestartRequests() {
List<RestartRequest> restartRequests;
synchronized (this) {
if (pendingRestartRequests.isEmpty()) {
return;
}
//dequeue into a local list to minimize the work being done within the synchronized block
restartRequests = new ArrayList<>(pendingRestartRequests.values());
pendingRestartRequests.clear();
}
restartRequests.forEach(restartRequest -> {
String stageDescription = "handling restart request for connector " + restartRequest.connectorName();
try (TickThreadStage stage = new TickThreadStage(stageDescription)) {
doRestartConnectorAndTasks(restartRequest);
} catch (Exception e) {
log.warn("Unexpected error while trying to process " + restartRequest + ", the restart request will be skipped.", e);
}
});
} | @Test
public void processRestartRequestsFailureSuppression() {
doNothing().when(member).wakeup();
final String connectorName = "foo";
RestartRequest restartRequest = new RestartRequest(connectorName, false, false);
doThrow(new RuntimeException()).when(herder).buildRestartPlan(restartRequest);
configUpdateListener.onRestartRequest(restartRequest);
assertEquals(1, herder.pendingRestartRequests.size());
herder.processRestartRequests();
assertTrue(herder.pendingRestartRequests.isEmpty());
verifyNoMoreInteractions(worker, member, configBackingStore, statusBackingStore);
} |
public static <E> BoundedList<E> newArrayBacked(int maxLength) {
return new BoundedList<>(maxLength, new ArrayList<>());
} | @Test
public void testMaxLengthMustNotBeZero() {
assertEquals("Invalid non-positive maxLength of 0",
assertThrows(IllegalArgumentException.class,
() -> BoundedList.newArrayBacked(0)).getMessage());
assertEquals("Invalid non-positive maxLength of 0",
assertThrows(IllegalArgumentException.class,
() -> BoundedList.newArrayBacked(0, 100)).getMessage());
} |
@Override
public Object toKsqlRow(final Schema connectSchema, final Object connectData) {
if (connectData == null) {
return null;
}
return toKsqlValue(schema, connectSchema, connectData, "");
} | @Test
public void shouldThrowIfNestedFieldTypeDoesntMatch() {
// Given:
final Schema structSchema = SchemaBuilder
.struct()
.field("INT", SchemaBuilder.OPTIONAL_INT32_SCHEMA)
.optional()
.build();
final Schema rowSchema = SchemaBuilder
.struct()
.field("STRUCT", structSchema)
.optional()
.build();
final Schema dataStructSchema = SchemaBuilder
.struct()
.field("INT", SchemaBuilder.OPTIONAL_STRING_SCHEMA)
.optional()
.build();
final Schema dataRowSchema = SchemaBuilder
.struct()
.field("STRUCT", dataStructSchema)
.optional()
.build();
final Struct connectStruct = new Struct(dataRowSchema);
final Struct structColumn = new Struct(dataStructSchema);
structColumn.put("INT", "123");
connectStruct.put("STRUCT", structColumn);
final ConnectDataTranslator connectToKsqlTranslator = new ConnectDataTranslator(rowSchema);
// When:
final DataException e = assertThrows(
DataException.class,
() -> connectToKsqlTranslator.toKsqlRow(dataRowSchema, connectStruct)
);
// Then:
assertThat(e.getMessage(), containsString(Schema.Type.INT32.getName()));
assertThat(e.getMessage(), containsString(Schema.Type.STRING.getName()));
assertThat(e.getMessage(), containsString("STRUCT->INT"));
} |
@Override
public boolean syncData(DistroData data, String targetServer) {
if (isNoExistTarget(targetServer)) {
return true;
}
DistroDataRequest request = new DistroDataRequest(data, data.getType());
Member member = memberManager.find(targetServer);
if (checkTargetServerStatusUnhealthy(member)) {
Loggers.DISTRO
.warn("[DISTRO] Cancel distro sync caused by target server {} unhealthy, key: {}", targetServer,
data.getDistroKey());
return false;
}
try {
Response response = clusterRpcClientProxy.sendRequest(member, request);
return checkResponse(response);
} catch (NacosException e) {
Loggers.DISTRO.error("[DISTRO-FAILED] Sync distro data failed! key: {}", data.getDistroKey(), e);
}
return false;
} | @Test
void testSyncDataWithCallbackForMemberNonExist() throws NacosException {
transportAgent.syncData(new DistroData(), member.getAddress(), distroCallback);
verify(distroCallback).onSuccess();
verify(memberManager, never()).find(member.getAddress());
verify(clusterRpcClientProxy, never()).asyncRequest(any(Member.class), any(), any());
} |
public static <InputT, OutputT> DoFnInvoker<InputT, OutputT> invokerFor(
DoFn<InputT, OutputT> fn) {
return ByteBuddyDoFnInvokerFactory.only().newByteBuddyInvoker(fn);
} | @Test
public void testSplittableDoFnWithHasDefaultMethods() throws Exception {
class MockFn extends DoFn<String, String> {
@ProcessElement
public void processElement(
ProcessContext c,
RestrictionTracker<RestrictionWithBoundedDefaultTracker, Void> tracker,
WatermarkEstimator<WatermarkEstimatorStateWithDefaultWatermarkEstimator>
watermarkEstimator) {}
@GetInitialRestriction
public RestrictionWithBoundedDefaultTracker getInitialRestriction(@Element String element) {
return null;
}
@GetInitialWatermarkEstimatorState
public WatermarkEstimatorStateWithDefaultWatermarkEstimator
getInitialWatermarkEstimatorState() {
return null;
}
}
MockFn fn = mock(MockFn.class);
DoFnInvoker<String, String> invoker = DoFnInvokers.invokerFor(fn);
CoderRegistry coderRegistry = CoderRegistry.createDefault();
coderRegistry.registerCoderProvider(
CoderProviders.fromStaticMethods(
RestrictionWithBoundedDefaultTracker.class, CoderForDefaultTracker.class));
coderRegistry.registerCoderForClass(
WatermarkEstimatorStateWithDefaultWatermarkEstimator.class,
new CoderForWatermarkEstimatorStateWithDefaultWatermarkEstimator());
assertThat(
invoker.<RestrictionWithBoundedDefaultTracker>invokeGetRestrictionCoder(coderRegistry),
instanceOf(CoderForDefaultTracker.class));
assertThat(
invoker.invokeGetWatermarkEstimatorStateCoder(coderRegistry),
instanceOf(CoderForWatermarkEstimatorStateWithDefaultWatermarkEstimator.class));
invoker.invokeSplitRestriction(
new FakeArgumentProvider<String, String>() {
@Override
public String element(DoFn<String, String> doFn) {
return "blah";
}
@Override
public Object restriction() {
return "foo";
}
@Override
public OutputReceiver<String> outputReceiver(DoFn<String, String> doFn) {
return new DoFn.OutputReceiver<String>() {
private boolean invoked;
@Override
public void output(String output) {
assertFalse(invoked);
invoked = true;
assertEquals("foo", output);
}
@Override
public void outputWithTimestamp(String output, Instant instant) {
assertFalse(invoked);
invoked = true;
assertEquals("foo", output);
}
@Override
public void outputWindowedValue(
String output,
Instant timestamp,
Collection<? extends BoundedWindow> windows,
PaneInfo paneInfo) {
assertFalse(invoked);
invoked = true;
assertEquals("foo", output);
}
};
}
});
assertEquals(stop(), invoker.invokeProcessElement(mockArgumentProvider));
assertThat(
invoker.invokeNewTracker(
new FakeArgumentProvider<String, String>() {
@Override
public Object restriction() {
return new RestrictionWithBoundedDefaultTracker();
}
}),
instanceOf(BoundedDefaultTracker.class));
assertThat(
invoker.invokeNewWatermarkEstimator(
new FakeArgumentProvider<String, String>() {
@Override
public Object watermarkEstimatorState() {
return new WatermarkEstimatorStateWithDefaultWatermarkEstimator();
}
}),
instanceOf(DefaultWatermarkEstimator.class));
} |
@Override
public ConsumeMessageDirectlyResult consumeMessageDirectly(MessageExt msg, String brokerName) {
ConsumeMessageDirectlyResult result = new ConsumeMessageDirectlyResult();
result.setOrder(true);
List<MessageExt> msgs = new ArrayList<>();
msgs.add(msg);
MessageQueue mq = new MessageQueue();
mq.setBrokerName(brokerName);
mq.setTopic(msg.getTopic());
mq.setQueueId(msg.getQueueId());
ConsumeOrderlyContext context = new ConsumeOrderlyContext(mq);
this.defaultMQPushConsumerImpl.resetRetryAndNamespace(msgs, this.consumerGroup);
final long beginTime = System.currentTimeMillis();
log.info("consumeMessageDirectly receive new message: {}", msg);
try {
ConsumeOrderlyStatus status = this.messageListener.consumeMessage(msgs, context);
if (status != null) {
switch (status) {
case COMMIT:
result.setConsumeResult(CMResult.CR_COMMIT);
break;
case ROLLBACK:
result.setConsumeResult(CMResult.CR_ROLLBACK);
break;
case SUCCESS:
result.setConsumeResult(CMResult.CR_SUCCESS);
break;
case SUSPEND_CURRENT_QUEUE_A_MOMENT:
result.setConsumeResult(CMResult.CR_LATER);
break;
default:
break;
}
} else {
result.setConsumeResult(CMResult.CR_RETURN_NULL);
}
} catch (Throwable e) {
result.setConsumeResult(CMResult.CR_THROW_EXCEPTION);
result.setRemark(UtilAll.exceptionSimpleDesc(e));
log.warn("consumeMessageDirectly exception: {} Group: {} Msgs: {} MQ: {}",
UtilAll.exceptionSimpleDesc(e),
ConsumeMessagePopOrderlyService.this.consumerGroup,
msgs,
mq, e);
}
result.setAutoCommit(context.isAutoCommit());
result.setSpentTimeMills(System.currentTimeMillis() - beginTime);
log.info("consumeMessageDirectly Result: {}", result);
return result;
} | @Test
public void testConsumeMessageDirectly() {
when(messageListener.consumeMessage(any(), any(ConsumeOrderlyContext.class))).thenReturn(ConsumeOrderlyStatus.SUCCESS);
ConsumeMessageDirectlyResult actual = popService.consumeMessageDirectly(createMessageExt(), defaultBroker);
assertEquals(CMResult.CR_SUCCESS, actual.getConsumeResult());
assertTrue(actual.isOrder());
} |
public void add(double datum) {
dbuf[nd++] = datum;
if (datum < q0) {
q0 = datum;
}
if (datum > qm) {
qm = datum;
}
if (nd == nbuf) {
update();
}
} | @Test
public void testAdd() {
System.out.println("IQAgent");
double[] data = new double[100000];
for (int i = 0; i < data.length; i++)
data[i] = i+1;
MathEx.permutate(data);
IQAgent instance = new IQAgent();
for (double datum : data) instance.add(datum);
for (int i = 1; i <= 100; i++) {
System.out.println(i + "%\t" + instance.quantile(i/100.0) + "\t" + Math.abs(1-instance.quantile(i/100.0)/(i*1000)));
assertTrue(Math.abs(1-instance.quantile(i/100.0)/(i*1000)) < 0.01);
}
} |
@Override
public long maxOffset(MessageQueue mq) throws MQClientException {
return defaultMQAdminExtImpl.maxOffset(mq);
} | @Test
@Ignore
public void testMaxOffset() throws Exception {
when(mQClientAPIImpl.getMaxOffset(anyString(), any(MessageQueue.class), anyLong())).thenReturn(100L);
assertThat(defaultMQAdminExt.maxOffset(new MessageQueue(TOPIC1, BROKER1_NAME, 0))).isEqualTo(100L);
} |
public MeasureDto toMeasureDto(Measure measure, Metric metric, Component component) {
MeasureDto out = new MeasureDto();
out.setMetricUuid(metric.getUuid());
out.setComponentUuid(component.getUuid());
out.setAnalysisUuid(analysisMetadataHolder.getUuid());
if (measure.hasQualityGateStatus()) {
setAlert(out, measure.getQualityGateStatus());
}
out.setValue(valueAsDouble(measure));
out.setData(data(measure));
return out;
} | @Test
public void toMeasureDto_maps_to_only_data_for_STRING_metric() {
MeasureDto trueMeasureDto = underTest.toMeasureDto(Measure.newMeasureBuilder().create(SOME_STRING), SOME_STRING_METRIC, SOME_COMPONENT);
assertThat(trueMeasureDto.getValue()).isNull();
assertThat(trueMeasureDto.getData()).isEqualTo(SOME_STRING);
} |
public BinaryRecordData generate(Object[] rowFields) {
checkArgument(
dataTypes.length == rowFields.length,
String.format(
"The types and values must have the same length. But types is %d and values is %d",
dataTypes.length, rowFields.length));
reuseWriter.reset();
for (int i = 0; i < dataTypes.length; i++) {
if (rowFields[i] == null) {
reuseWriter.setNullAt(i);
} else {
BinaryWriter.write(reuseWriter, i, rowFields[i], dataTypes[i], serializers[i]);
}
}
reuseWriter.complete();
return reuseRecordData.copy();
} | @Test
void testOf() {
RowType rowType =
RowType.of(
DataTypes.BOOLEAN(),
DataTypes.BINARY(3),
DataTypes.VARBINARY(10),
DataTypes.BYTES(),
DataTypes.TINYINT(),
DataTypes.SMALLINT(),
DataTypes.INT(),
DataTypes.BIGINT(),
DataTypes.FLOAT(),
DataTypes.DOUBLE(),
DataTypes.DECIMAL(6, 3),
DataTypes.CHAR(5),
DataTypes.VARCHAR(10),
DataTypes.STRING(),
DataTypes.DATE(),
DataTypes.TIME(),
DataTypes.TIME(6),
DataTypes.TIMESTAMP(),
DataTypes.TIMESTAMP(3),
DataTypes.TIMESTAMP_LTZ(),
DataTypes.TIMESTAMP_LTZ(3),
DataTypes.TIMESTAMP_TZ(),
DataTypes.TIMESTAMP_TZ(3),
DataTypes.ROW(
DataTypes.FIELD("t1", DataTypes.STRING()),
DataTypes.FIELD("t2", DataTypes.BIGINT())),
DataTypes.STRING());
Object[] testData =
new Object[] {
true,
new byte[] {1, 2},
new byte[] {3, 4},
new byte[] {5, 6, 7},
(byte) 1,
(short) 2,
3,
4L,
5.1f,
6.2,
DecimalData.fromBigDecimal(new BigDecimal(7.123), 6, 3),
BinaryStringData.fromString("test1"),
BinaryStringData.fromString("test2"),
BinaryStringData.fromString("test3"),
100,
200,
300,
TimestampData.fromMillis(100, 1),
TimestampData.fromMillis(200, 0),
LocalZonedTimestampData.fromEpochMillis(300, 1),
LocalZonedTimestampData.fromEpochMillis(400),
ZonedTimestampData.of(500, 1, "UTC"),
ZonedTimestampData.of(600, 0, "UTC"),
new BinaryRecordDataGenerator(
RowType.of(DataTypes.STRING(), DataTypes.BIGINT()))
.generate(new Object[] {BinaryStringData.fromString("test"), 23L}),
null
};
BinaryRecordData actual = new BinaryRecordDataGenerator(rowType).generate(testData);
assertThat(actual.getBoolean(0)).isTrue();
assertThat(actual.getBinary(1)).containsExactly((byte[]) testData[1]);
assertThat(actual.getBinary(2)).containsExactly((byte[]) testData[2]);
assertThat(actual.getBinary(3)).containsExactly((byte[]) testData[3]);
assertThat(actual.getByte(4)).isEqualTo(testData[4]);
assertThat(actual.getShort(5)).isEqualTo(testData[5]);
assertThat(actual.getInt(6)).isEqualTo(testData[6]);
assertThat(actual.getLong(7)).isEqualTo(testData[7]);
assertThat(actual.getFloat(8)).isEqualTo(testData[8]);
assertThat(actual.getDouble(9)).isEqualTo(testData[9]);
assertThat(actual.getDecimal(10, 6, 3)).isEqualTo(testData[10]);
assertThat(actual.getString(11)).isEqualTo(BinaryStringData.fromString("test1"));
assertThat(actual.getString(12)).isEqualTo(BinaryStringData.fromString("test2"));
assertThat(actual.getString(13)).isEqualTo(BinaryStringData.fromString("test3"));
assertThat(actual.getInt(14)).isEqualTo(testData[14]);
assertThat(actual.getInt(15)).isEqualTo(testData[15]);
assertThat(actual.getInt(16)).isEqualTo(testData[16]);
assertThat(actual.getTimestamp(17, TimestampType.DEFAULT_PRECISION))
.isEqualTo(testData[17]);
assertThat(actual.getTimestamp(18, 3)).isEqualTo(testData[18]);
assertThat(actual.getLocalZonedTimestampData(19, LocalZonedTimestampType.DEFAULT_PRECISION))
.isEqualTo(testData[19]);
assertThat(actual.getLocalZonedTimestampData(20, 3)).isEqualTo(testData[20]);
assertThat(actual.getZonedTimestamp(21, ZonedTimestampType.DEFAULT_PRECISION))
.isEqualTo(testData[21]);
assertThat(actual.getZonedTimestamp(22, 3)).isEqualTo(testData[22]);
assertThat(actual.getRow(23, 2).getString(0))
.isEqualTo(BinaryStringData.fromString("test"));
assertThat(actual.getRow(23, 2).getLong(1)).isEqualTo(23L);
assertThat(actual.isNullAt(24)).isTrue();
} |
@Override
public OAuth2AccessTokenDO grantPassword(String username, String password, String clientId, List<String> scopes) {
// 使用账号 + 密码进行登录
AdminUserDO user = adminAuthService.authenticate(username, password);
Assert.notNull(user, "用户不能为空!"); // 防御性编程
// 创建访问令牌
return oauth2TokenService.createAccessToken(user.getId(), UserTypeEnum.ADMIN.getValue(), clientId, scopes);
} | @Test
public void testGrantPassword() {
// 准备参数
String username = randomString();
String password = randomString();
String clientId = randomString();
List<String> scopes = Lists.newArrayList("read", "write");
// mock 方法(认证)
AdminUserDO user = randomPojo(AdminUserDO.class);
when(adminAuthService.authenticate(eq(username), eq(password))).thenReturn(user);
// mock 方法(访问令牌)
OAuth2AccessTokenDO accessTokenDO = randomPojo(OAuth2AccessTokenDO.class);
when(oauth2TokenService.createAccessToken(eq(user.getId()), eq(UserTypeEnum.ADMIN.getValue()),
eq(clientId), eq(scopes))).thenReturn(accessTokenDO);
// 调用,并断言
assertPojoEquals(accessTokenDO, oauth2GrantService.grantPassword(
username, password, clientId, scopes));
} |
public static Deserializer<NeighborSolicitation> deserializer() {
return (data, offset, length) -> {
checkInput(data, offset, length, HEADER_LENGTH);
NeighborSolicitation neighborSolicitation = new NeighborSolicitation();
ByteBuffer bb = ByteBuffer.wrap(data, offset, length);
bb.getInt();
bb.get(neighborSolicitation.targetAddress, 0, Ip6Address.BYTE_LENGTH);
if (bb.limit() - bb.position() > 0) {
NeighborDiscoveryOptions options = NeighborDiscoveryOptions.deserializer()
.deserialize(data, bb.position(), bb.limit() - bb.position());
for (NeighborDiscoveryOptions.Option option : options.options()) {
neighborSolicitation.addOption(option.type(), option.data());
}
}
return neighborSolicitation;
};
} | @Test
public void testDeserializeTruncated() throws Exception {
// Run the truncation test only on the NeighborSolicitation header
byte[] nsHeader = new byte[NeighborSolicitation.HEADER_LENGTH];
ByteBuffer.wrap(bytePacket).get(nsHeader);
PacketTestUtils.testDeserializeTruncated(NeighborSolicitation.deserializer(), nsHeader);
} |
public SearchResponse search(IssueQuery query, SearchOptions options) {
SearchRequest requestBuilder = EsClient.prepareSearch(TYPE_ISSUE.getMainType());
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
requestBuilder.source(sourceBuilder);
configureSorting(query, sourceBuilder);
configurePagination(options, sourceBuilder);
configureRouting(query, options, requestBuilder);
AllFilters allFilters = createAllFilters(query);
RequestFiltersComputer filterComputer = newFilterComputer(options, allFilters);
configureTopAggregations(query, options, sourceBuilder, allFilters, filterComputer);
configureQuery(sourceBuilder, filterComputer);
configureTopFilters(sourceBuilder, filterComputer);
sourceBuilder.fetchSource(false)
.trackTotalHits(true);
return client.search(requestBuilder);
} | @Test
void search_with_max_limit() {
ComponentDto project = newPrivateProjectDto();
ComponentDto file = newFileDto(project);
List<IssueDoc> issues = new ArrayList<>();
for (int i = 0; i < 500; i++) {
String key = "I" + i;
issues.add(newDoc(key, project.uuid(), file));
}
indexIssues(issues.toArray(new IssueDoc[]{}));
IssueQuery.Builder query = IssueQuery.builder();
SearchResponse result = underTest.search(query.build(), new SearchOptions().setLimit(500));
assertThat(result.getHits().getHits()).hasSize(SearchOptions.MAX_PAGE_SIZE);
} |
public static Db use() {
return use(DSFactory.get());
} | @Test
public void findTest() throws SQLException {
List<Entity> find = Db.use().find(Entity.create("user").set("age", 18));
assertEquals("王五", find.get(0).get("name"));
} |
@GetMapping("/removeVGroup")
public Result<?> removeVGroup(@RequestParam String vGroup) {
Result<?> result = new Result<>();
boolean rst = vGroupMappingStoreManager.removeVGroup(vGroup);
Instance.getInstance().setTerm(System.currentTimeMillis());
if (!rst) {
result.setCode("500");
result.setMessage("remove vGroup failed!");
}
// push the newest mapping relationship
vGroupMappingStoreManager.notifyMapping();
return result;
} | @Test
void removeVGroup() {
namingController.removeVGroup("group1");
} |
public static NetworkPolicyPeer createPeer(Map<String, String> podSelector, LabelSelector namespaceSelector) {
return new NetworkPolicyPeerBuilder()
.withNewPodSelector()
.withMatchLabels(podSelector)
.endPodSelector()
.withNamespaceSelector(namespaceSelector)
.build();
} | @Test
public void testCreatePeerWithPodLabelsAndEmptyNamespaceSelector() {
NetworkPolicyPeer peer = NetworkPolicyUtils.createPeer(Map.of("labelKey", "labelValue"), new LabelSelectorBuilder().withMatchLabels(Map.of()).build());
assertThat(peer.getNamespaceSelector().getMatchLabels(), is(Map.of()));
assertThat(peer.getPodSelector().getMatchLabels(), is(Map.of("labelKey", "labelValue")));
} |
public static <T extends CharSequence> T validateChinese(T value, String errorMsg) throws ValidateException {
if (false == isChinese(value)) {
throw new ValidateException(errorMsg);
}
return value;
} | @Test
public void validateTest() throws ValidateException {
assertThrows(ValidateException.class, () -> {
Validator.validateChinese("我是一段zhongwen", "内容中包含非中文");
});
} |
@Override
@Deprecated
public <K1, V1> KStream<K1, V1> flatTransform(final org.apache.kafka.streams.kstream.TransformerSupplier<? super K, ? super V, Iterable<KeyValue<K1, V1>>> transformerSupplier,
final String... stateStoreNames) {
Objects.requireNonNull(transformerSupplier, "transformerSupplier can't be null");
final String name = builder.newProcessorName(TRANSFORM_NAME);
return flatTransform(transformerSupplier, Named.as(name), stateStoreNames);
} | @Test
@SuppressWarnings("deprecation")
public void shouldNotAllowBadTransformerSupplierOnFlatTransformWithNamed() {
final org.apache.kafka.streams.kstream.Transformer<String, String, Iterable<KeyValue<String, String>>> transformer = flatTransformerSupplier.get();
final IllegalArgumentException exception = assertThrows(
IllegalArgumentException.class,
() -> testStream.flatTransform(() -> transformer, Named.as("flatTransformer"))
);
assertThat(exception.getMessage(), containsString("#get() must return a new object each time it is called."));
} |
public SmppMessage createSmppMessage(CamelContext camelContext, AlertNotification alertNotification) {
SmppMessage smppMessage = new SmppMessage(camelContext, alertNotification, configuration);
smppMessage.setHeader(SmppConstants.MESSAGE_TYPE, SmppMessageType.AlertNotification.toString());
smppMessage.setHeader(SmppConstants.SEQUENCE_NUMBER, alertNotification.getSequenceNumber());
smppMessage.setHeader(SmppConstants.COMMAND_ID, alertNotification.getCommandId());
smppMessage.setHeader(SmppConstants.COMMAND_STATUS, alertNotification.getCommandStatus());
smppMessage.setHeader(SmppConstants.SOURCE_ADDR, alertNotification.getSourceAddr());
smppMessage.setHeader(SmppConstants.SOURCE_ADDR_NPI, alertNotification.getSourceAddrNpi());
smppMessage.setHeader(SmppConstants.SOURCE_ADDR_TON, alertNotification.getSourceAddrTon());
smppMessage.setHeader(SmppConstants.ESME_ADDR, alertNotification.getEsmeAddr());
smppMessage.setHeader(SmppConstants.ESME_ADDR_NPI, alertNotification.getEsmeAddrNpi());
smppMessage.setHeader(SmppConstants.ESME_ADDR_TON, alertNotification.getEsmeAddrTon());
return smppMessage;
} | @SuppressWarnings("unchecked")
@Test
public void createSmppMessageFromDeliveryReceiptWithOptionalParametersShouldReturnASmppMessage() throws Exception {
DeliverSm deliverSm = new DeliverSm();
deliverSm.setSmscDeliveryReceipt();
deliverSm.setShortMessage(
"id:2 sub:001 dlvrd:001 submit date:0908312310 done date:0908312311 stat:DELIVRD err:xxx Text:Hello SMPP world!"
.getBytes());
deliverSm.setOptionalParameters(
new OptionalParameter.OctetString(Tag.SOURCE_SUBADDRESS, "OctetString"),
new OptionalParameter.COctetString((short) 0x001D, "COctetString"),
new OptionalParameter.Byte(Tag.DEST_ADDR_SUBUNIT, (byte) 0x01),
new OptionalParameter.Short(Tag.DEST_TELEMATICS_ID, (short) 1),
new OptionalParameter.Int(Tag.QOS_TIME_TO_LIVE, 1),
new OptionalParameter.Null(Tag.ALERT_ON_MESSAGE_DELIVERY));
SmppMessage smppMessage = binding.createSmppMessage(camelContext, deliverSm);
assertEquals("Hello SMPP world!", smppMessage.getBody());
assertEquals(10, smppMessage.getHeaders().size());
assertEquals("2", smppMessage.getHeader(SmppConstants.ID));
assertEquals(1, smppMessage.getHeader(SmppConstants.DELIVERED));
// To avoid the test failure when running in different TimeZone
//assertEquals(new Date(1251753060000L), smppMessage.getHeader(SmppConstants.DONE_DATE));
assertEquals("xxx", smppMessage.getHeader(SmppConstants.ERROR));
//assertEquals(new Date(1251753000000L), smppMessage.getHeader(SmppConstants.SUBMIT_DATE));
assertEquals(1, smppMessage.getHeader(SmppConstants.SUBMITTED));
assertEquals(DeliveryReceiptState.DELIVRD, smppMessage.getHeader(SmppConstants.FINAL_STATUS));
assertEquals(SmppMessageType.DeliveryReceipt.toString(), smppMessage.getHeader(SmppConstants.MESSAGE_TYPE));
Map<String, Object> optionalParameters = smppMessage.getHeader(SmppConstants.OPTIONAL_PARAMETERS, Map.class);
assertEquals(6, optionalParameters.size());
assertEquals("OctetString", optionalParameters.get("SOURCE_SUBADDRESS"));
assertEquals("COctetString", optionalParameters.get("ADDITIONAL_STATUS_INFO_TEXT"));
assertEquals(Byte.valueOf((byte) 0x01), optionalParameters.get("DEST_ADDR_SUBUNIT"));
assertEquals(Short.valueOf((short) 1), optionalParameters.get("DEST_TELEMATICS_ID"));
assertEquals(Integer.valueOf(1), optionalParameters.get("QOS_TIME_TO_LIVE"));
assertNull(optionalParameters.get("ALERT_ON_MESSAGE_DELIVERY"), "0x00");
Map<Short, Object> optionalParameter = smppMessage.getHeader(SmppConstants.OPTIONAL_PARAMETER, Map.class);
assertEquals(6, optionalParameter.size());
assertArrayEquals("OctetString".getBytes("UTF-8"), (byte[]) optionalParameter.get(Short.valueOf((short) 0x0202)));
assertEquals("COctetString", optionalParameter.get(Short.valueOf((short) 0x001D)));
assertEquals(Byte.valueOf((byte) 0x01), optionalParameter.get(Short.valueOf((short) 0x0005)));
assertEquals(Short.valueOf((short) 1), optionalParameter.get(Short.valueOf((short) 0x0008)));
assertEquals(Integer.valueOf(1), optionalParameter.get(Short.valueOf((short) 0x0017)));
assertNull(optionalParameter.get(Short.valueOf((short) 0x130C)), "0x00");
} |
public static void shutdown(final NamesrvController controller) {
controller.shutdown();
} | @Test
public void testShutdown() {
NamesrvStartup.shutdown(namesrvController);
Mockito.verify(namesrvController).shutdown();
} |
@Override
public List<NotifyMessageDO> getUnreadNotifyMessageList(Long userId, Integer userType, Integer size) {
return notifyMessageMapper.selectUnreadListByUserIdAndUserType(userId, userType, size);
} | @Test
public void testGetUnreadNotifyMessageList() {
SqlConstants.init(DbType.MYSQL);
// mock 数据
NotifyMessageDO dbNotifyMessage = randomPojo(NotifyMessageDO.class, o -> { // 等会查询到
o.setUserId(1L);
o.setUserType(UserTypeEnum.ADMIN.getValue());
o.setReadStatus(false);
o.setTemplateParams(randomTemplateParams());
});
notifyMessageMapper.insert(dbNotifyMessage);
// 测试 userId 不匹配
notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setUserId(2L)));
// 测试 userType 不匹配
notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setUserType(UserTypeEnum.MEMBER.getValue())));
// 测试 readStatus 不匹配
notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setReadStatus(true)));
// 准备参数
Long userId = 1L;
Integer userType = UserTypeEnum.ADMIN.getValue();
Integer size = 10;
// 调用
List<NotifyMessageDO> list = notifyMessageService.getUnreadNotifyMessageList(userId, userType, size);
// 断言
assertEquals(1, list.size());
assertPojoEquals(dbNotifyMessage, list.get(0));
} |
JavaClasses getClassesToAnalyzeFor(Class<?> testClass, ClassAnalysisRequest classAnalysisRequest) {
checkNotNull(testClass);
checkNotNull(classAnalysisRequest);
if (cachedByTest.containsKey(testClass)) {
return cachedByTest.get(testClass);
}
LocationsKey locations = RequestedLocations.by(classAnalysisRequest, testClass).asKey();
JavaClasses classes = classAnalysisRequest.getCacheMode() == FOREVER
? cachedByLocations.getUnchecked(locations).get()
: new LazyJavaClasses(locations.locations, locations.importOptionTypes).get();
cachedByTest.put(testClass, classes);
return classes;
} | @Test
public void distinguishes_import_option_when_caching() {
JavaClasses importingWholeClasspathWithFilter =
cache.getClassesToAnalyzeFor(TestClass.class, new TestAnalysisRequest().withImportOptions(TestFilterForJUnitJars.class));
JavaClasses importingWholeClasspathWithEquivalentButDifferentFilter =
cache.getClassesToAnalyzeFor(EquivalentTestClass.class,
new TestAnalysisRequest().withImportOptions(AnotherTestFilterForJUnitJars.class));
assertThat(importingWholeClasspathWithFilter)
.as("number of classes imported")
.hasSameSizeAs(importingWholeClasspathWithEquivalentButDifferentFilter);
verifyNumberOfImports(2);
} |
public ConvertedTime getConvertedTime(long duration) {
Set<Seconds> keys = RULES.keySet();
for (Seconds seconds : keys) {
if (duration <= seconds.getSeconds()) {
return RULES.get(seconds).getConvertedTime(duration);
}
}
return new TimeConverter.OverTwoYears().getConvertedTime(duration);
} | @Test
public void testShouldReport1DayFor45Mintues() {
assertEquals(TimeConverter.ABOUT_1_HOUR_AGO, timeConverter.getConvertedTime(45 * 60));
} |
@Deprecated
@Override
public void init(final org.apache.kafka.streams.processor.ProcessorContext context, final StateStore root) {
store.init(context, root);
} | @Deprecated
@Test
public void shouldDeprecatedInitVersionedStore() {
givenWrapperWithVersionedStore();
final org.apache.kafka.streams.processor.ProcessorContext mockContext
= mock(org.apache.kafka.streams.processor.ProcessorContext.class);
wrapper.init(mockContext, wrapper);
verify(versionedStore).init(mockContext, wrapper);
} |
public List<String> getDatacentersBySubnet(InetAddress address) throws IllegalArgumentException {
if(address instanceof Inet4Address) {
for(Map.Entry<Integer, Map<Integer, List<String>>> t: this.ipv4Map.descendingMap().entrySet()) {
int maskedIp = CidrBlock.IpV4CidrBlock.maskToSize((Inet4Address) address, t.getKey());
if(t.getValue().containsKey(maskedIp)) {
return t.getValue().get(maskedIp);
}
}
} else if (address instanceof Inet6Address) {
for(Map.Entry<Integer, Map<BigInteger, List<String>>> t: this.ipv6Map.descendingMap().entrySet()) {
BigInteger maskedIp = CidrBlock.IpV6CidrBlock.maskToSize((Inet6Address) address, t.getKey());
if(t.getValue().containsKey(maskedIp)) {
return t.getValue().get(maskedIp);
}
}
} else {
throw new IllegalArgumentException("Expected either an Inet4Address or Inet6Address");
}
return Collections.emptyList();
} | @Test
void testGetFastestDataCentersBySubnetOverlappingTable() throws UnknownHostException {
var v4address = Inet4Address.getByName("1.123.123.1");
var actualV4 = overlappingTable.getDatacentersBySubnet(v4address);
assertThat(actualV4).isEqualTo(List.of("datacenter-4"));
var v6address = Inet6Address.getByName("2001:db8:b0ac:aaaa:aaaa:aaaa:aaaa:0001");
var actualV6 = overlappingTable.getDatacentersBySubnet(v6address);
assertThat(actualV6).isEqualTo(List.of("datacenter-3", "datacenter-1", "datacenter-2"));
} |
protected static Object prepareObjectType( Object o ) {
return ( o instanceof byte[] ) ? Arrays.hashCode( (byte[]) o ) : o;
} | @Test
public void prepareObjectTypeBinaryTest_Equals() throws Exception {
assertEquals( Arrays.hashCode( new byte[] { 1, 2, 3 } ), SwitchCase.prepareObjectType( new byte[] { 1, 2, 3 } ) ) ;
} |
public T valueOf(Class<?> firstNameComponent, String secondNameComponent) {
return valueOf(
checkNotNull(firstNameComponent, "firstNameComponent").getName() +
'#' +
checkNotNull(secondNameComponent, "secondNameComponent"));
} | @Test
public void testIdUniqueness() {
TestConstant one = pool.valueOf("one");
TestConstant two = pool.valueOf("two");
assertThat(one.id(), is(not(two.id())));
} |
public WorkflowInstanceActionResponse kill(
String workflowId, long workflowInstanceId, long workflowRunId, User caller) {
return terminate(
workflowId, workflowInstanceId, workflowRunId, Actions.WorkflowInstanceAction.KILL, caller);
} | @Test
public void testKill() {
when(instanceDao.tryTerminateQueuedInstance(any(), any(), any())).thenReturn(true);
when(instance.getStatus()).thenReturn(WorkflowInstance.Status.CREATED);
boolean res = actionHandler.kill("test-workflow", 1, 1, user).isCompleted();
assertTrue(res);
verify(instanceDao, times(1)).getLatestWorkflowInstanceRun("test-workflow", 1);
verify(instanceDao, times(1))
.tryTerminateQueuedInstance(any(), eq(WorkflowInstance.Status.FAILED), anyString());
when(instance.getStatus()).thenReturn(WorkflowInstance.Status.IN_PROGRESS);
when(instance.getExecutionId()).thenReturn("foo");
res = actionHandler.kill("test-workflow", 1, 1, user).isCompleted();
assertFalse(res);
verify(instanceDao, times(2)).getLatestWorkflowInstanceRun("test-workflow", 1);
verify(instanceDao, times(1))
.tryTerminateQueuedInstance(any(), eq(WorkflowInstance.Status.FAILED), anyString());
verify(actionDao, times(1)).terminate(any(), any(), any(), anyString());
when(instance.getStatus()).thenReturn(WorkflowInstance.Status.PAUSED);
when(instance.getExecutionId()).thenReturn(null);
res = actionHandler.kill("test-workflow", 1, 1, user).isCompleted();
assertTrue(res);
verify(instanceDao, times(3)).getLatestWorkflowInstanceRun("test-workflow", 1);
verify(instanceDao, times(2))
.tryTerminateQueuedInstance(any(), eq(WorkflowInstance.Status.FAILED), anyString());
verify(actionDao, times(1)).terminate(any(), any(), any(), anyString());
} |
public static WebService.NewParam createRootQualifiersParameter(WebService.NewAction action, QualifierParameterContext context) {
return action.createParam(PARAM_QUALIFIERS)
.setDescription("Comma-separated list of component qualifiers. Filter the results with the specified qualifiers. " +
"Possible values are:" + buildRootQualifiersDescription(context))
.setPossibleValues(getRootQualifiers(context.getResourceTypes()));
} | @Test
public void test_createRootQualifiersParameter() {
when(resourceTypes.getRoots()).thenReturn(asList(Q1, Q2));
when(newAction.createParam(PARAM_QUALIFIERS)).thenReturn(newParam);
when(newParam.setDescription(startsWith("Comma-separated list of component qualifiers. Filter the results with the specified qualifiers. " +
"Possible values are:"
+ "<ul><li>Q1 - null</li>"
+ "<li>Q2 - null</li></ul>"))).thenReturn(newParam);
when(newParam.setPossibleValues(any(Collection.class))).thenReturn(newParam);
NewParam newParam = WsParameterBuilder
.createRootQualifiersParameter(newAction, newQualifierParameterContext(i18n, resourceTypes));
assertThat(newParam).isNotNull();
} |
@Override
ListOffsetsRequest.Builder buildBatchedRequest(int brokerId, Set<TopicPartition> keys) {
Map<String, ListOffsetsTopic> topicsByName = CollectionUtils.groupPartitionsByTopic(
keys,
topicName -> new ListOffsetsTopic().setName(topicName),
(listOffsetsTopic, partitionId) -> {
TopicPartition topicPartition = new TopicPartition(listOffsetsTopic.name(), partitionId);
long offsetTimestamp = offsetTimestampsByPartition.get(topicPartition);
listOffsetsTopic.partitions().add(
new ListOffsetsPartition()
.setPartitionIndex(partitionId)
.setTimestamp(offsetTimestamp));
});
boolean supportsMaxTimestamp = keys
.stream()
.anyMatch(key -> offsetTimestampsByPartition.get(key) == ListOffsetsRequest.MAX_TIMESTAMP);
boolean requireTieredStorageTimestamp = keys
.stream()
.anyMatch(key -> offsetTimestampsByPartition.get(key) == ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP || offsetTimestampsByPartition.get(key) == ListOffsetsRequest.LATEST_TIERED_TIMESTAMP);
return ListOffsetsRequest.Builder
.forConsumer(true, options.isolationLevel(), supportsMaxTimestamp, requireTieredStorageTimestamp)
.setTargetTimes(new ArrayList<>(topicsByName.values()));
} | @Test
public void testBuildRequestSimple() {
ListOffsetsHandler handler =
new ListOffsetsHandler(offsetTimestampsByPartition, new ListOffsetsOptions(), logContext);
ListOffsetsRequest request = handler.buildBatchedRequest(node.id(), mkSet(t0p0, t0p1)).build();
List<ListOffsetsTopic> topics = request.topics();
assertEquals(1, topics.size());
ListOffsetsTopic topic = topics.get(0);
assertEquals(2, topic.partitions().size());
for (ListOffsetsPartition partition : topic.partitions()) {
TopicPartition topicPartition = new TopicPartition(topic.name(), partition.partitionIndex());
assertExpectedTimestamp(topicPartition, partition.timestamp());
}
assertEquals(IsolationLevel.READ_UNCOMMITTED, request.isolationLevel());
} |
public static HttpServerResponse create(@Nullable HttpServletRequest request,
HttpServletResponse response, @Nullable Throwable caught) {
return new HttpServletResponseWrapper(request, response, caught);
} | @Test void method_isRequestMethod() {
HttpServerResponse wrapper = HttpServletResponseWrapper.create(request, response, null);
when(request.getMethod()).thenReturn("POST");
assertThat(wrapper.method()).isEqualTo("POST");
} |
@Override
protected VertexFlameGraph handleRequest(
HandlerRequest<EmptyRequestBody> request, AccessExecutionJobVertex jobVertex)
throws RestHandlerException {
@Nullable Integer subtaskIndex = getSubtaskIndex(request, jobVertex);
if (isTerminated(jobVertex, subtaskIndex)) {
return VertexFlameGraph.terminated();
}
final Optional<VertexThreadInfoStats> threadInfoSample;
if (subtaskIndex == null) {
threadInfoSample =
threadInfoOperatorTracker.getJobVertexStats(
request.getPathParameter(JobIDPathParameter.class), jobVertex);
} else {
threadInfoSample =
threadInfoOperatorTracker.getExecutionVertexStats(
request.getPathParameter(JobIDPathParameter.class),
jobVertex,
subtaskIndex);
}
final FlameGraphTypeQueryParameter.Type flameGraphType = getFlameGraphType(request);
final Optional<VertexFlameGraph> operatorFlameGraph;
switch (flameGraphType) {
case FULL:
operatorFlameGraph =
threadInfoSample.map(VertexFlameGraphFactory::createFullFlameGraphFrom);
break;
case ON_CPU:
operatorFlameGraph =
threadInfoSample.map(VertexFlameGraphFactory::createOnCpuFlameGraph);
break;
case OFF_CPU:
operatorFlameGraph =
threadInfoSample.map(VertexFlameGraphFactory::createOffCpuFlameGraph);
break;
default:
throw new RestHandlerException(
"Unknown Flame Graph type " + flameGraphType + '.',
HttpResponseStatus.BAD_REQUEST);
}
return operatorFlameGraph.orElse(VertexFlameGraph.waiting());
} | @Test
void testHandleFinishedJobVertex() throws Exception {
final ArchivedExecutionJobVertex archivedExecutionJobVertex =
new ArchivedExecutionJobVertex(
new ArchivedExecutionVertex[] {
generateExecutionVertex(0, ExecutionState.FINISHED),
generateExecutionVertex(1, ExecutionState.FINISHED)
},
JOB_VERTEX_ID,
"test",
2,
2,
new SlotSharingGroup(),
ResourceProfile.UNKNOWN,
new StringifiedAccumulatorResult[0]);
HandlerRequest<EmptyRequestBody> request = generateJobVertexFlameGraphParameters(null);
VertexFlameGraph jobVertexFlameGraph =
handler.handleRequest(request, archivedExecutionJobVertex);
assertThat(jobVertexFlameGraph.getEndTime())
.isEqualTo(VertexFlameGraph.terminated().getEndTime());
} |
public UnionOperator(OpChainExecutionContext opChainExecutionContext, List<MultiStageOperator> inputOperators,
DataSchema dataSchema) {
super(opChainExecutionContext, inputOperators, dataSchema);
} | @Test
public void testUnionOperator() {
DataSchema schema = new DataSchema(new String[]{"int_col", "string_col"}, new DataSchema.ColumnDataType[]{
DataSchema.ColumnDataType.INT, DataSchema.ColumnDataType.STRING
});
Mockito.when(_leftOperator.nextBlock())
.thenReturn(OperatorTestUtil.block(schema, new Object[]{1, "AA"}, new Object[]{2, "BB"}))
.thenReturn(TransferableBlockTestUtils.getEndOfStreamTransferableBlock(0));
Mockito.when(_rightOperator.nextBlock()).thenReturn(
OperatorTestUtil.block(schema, new Object[]{3, "aa"}, new Object[]{4, "bb"}, new Object[]{5, "cc"}))
.thenReturn(TransferableBlockTestUtils.getEndOfStreamTransferableBlock(0));
UnionOperator unionOperator =
new UnionOperator(OperatorTestUtil.getTracingContext(), ImmutableList.of(_leftOperator, _rightOperator),
schema);
List<Object[]> resultRows = new ArrayList<>();
TransferableBlock result = unionOperator.nextBlock();
while (!result.isEndOfStreamBlock()) {
resultRows.addAll(result.getContainer());
result = unionOperator.nextBlock();
}
List<Object[]> expectedRows =
Arrays.asList(new Object[]{1, "AA"}, new Object[]{2, "BB"}, new Object[]{3, "aa"}, new Object[]{4, "bb"},
new Object[]{5, "cc"});
Assert.assertEquals(resultRows.size(), expectedRows.size());
for (int i = 0; i < resultRows.size(); i++) {
Assert.assertEquals(resultRows.get(i), expectedRows.get(i));
}
} |
public static RestartBackoffTimeStrategy.Factory createRestartBackoffTimeStrategyFactory(
final RestartStrategies.RestartStrategyConfiguration jobRestartStrategyConfiguration,
final Configuration jobConfiguration,
final Configuration clusterConfiguration,
final boolean isCheckpointingEnabled) {
checkNotNull(jobRestartStrategyConfiguration);
checkNotNull(jobConfiguration);
checkNotNull(clusterConfiguration);
return getJobRestartStrategyFactory(jobRestartStrategyConfiguration)
.orElse(
getRestartStrategyFactoryFromConfig(jobConfiguration)
.orElse(
(getRestartStrategyFactoryFromConfig(clusterConfiguration)
.orElse(
getDefaultRestartStrategyFactory(
isCheckpointingEnabled)))));
} | @Test
void testExponentialDelayRestartStrategySpecifiedInExecutionConfig() {
final Configuration conf = new Configuration();
conf.set(RestartStrategyOptions.RESTART_STRATEGY, FAILURE_RATE.getMainValue());
final RestartBackoffTimeStrategy.Factory factory =
RestartBackoffTimeStrategyFactoryLoader.createRestartBackoffTimeStrategyFactory(
RestartStrategies.exponentialDelayRestart(
Duration.ofMillis(1),
Duration.ofMillis(1000),
1.1,
Duration.ofMillis(2000),
0),
conf,
conf,
false);
assertThat(factory)
.isInstanceOf(
ExponentialDelayRestartBackoffTimeStrategy
.ExponentialDelayRestartBackoffTimeStrategyFactory.class);
} |
public boolean isSuppressed(Device device) {
if (suppressedDeviceType.contains(device.type())) {
return true;
}
final Annotations annotations = device.annotations();
if (containsSuppressionAnnotation(annotations)) {
return true;
}
return false;
} | @Test
public void testSuppressedPortAnnotation() {
Annotations annotation = DefaultAnnotations.builder()
.set("no-lldp", "random")
.build();
Device device = new DefaultDevice(PID,
NON_SUPPRESSED_DID,
Device.Type.SWITCH,
MFR, HW, SW1, SN, CID);
Port port = new DefaultPort(device, P1, true, annotation);
assertTrue(rules.isSuppressed(port));
} |
@Override
public synchronized int read() throws IOException {
checkNotClosed();
if (finished) {
return -1;
}
file.readLock().lock();
try {
int b = file.read(pos++); // it's ok for pos to go beyond size()
if (b == -1) {
finished = true;
} else {
file.setLastAccessTime(fileSystemState.now());
}
return b;
} finally {
file.readLock().unlock();
}
} | @SuppressWarnings("GuardedByChecker")
@Test
public void testFullyReadInputStream_doesNotChangeStateWhenStoreChanges() throws IOException {
JimfsInputStream in = newInputStream(1, 2, 3, 4, 5);
assertThat(in.read(new byte[5])).isEqualTo(5);
assertEmpty(in);
in.file.write(5, new byte[10], 0, 10); // append more bytes to file
assertEmpty(in);
} |
List<Argument> matchedArguments(Step step) {
return argumentMatcher.argumentsFrom(step, types);
} | @Test
void should_convert_empty_pickle_table_cells_to_null_values() {
Feature feature = TestFeatureParser.parse("" +
"Feature: Test feature\n" +
" Scenario: Test scenario\n" +
" Given I have some step\n" +
" | |\n");
StepDefinition stepDefinition = new StubStepDefinition("I have some step", Object.class);
StepExpression expression = stepExpressionFactory.createExpression(stepDefinition);
CoreStepDefinition coreStepDefinition = new CoreStepDefinition(id, stepDefinition, expression);
List<Argument> arguments = coreStepDefinition.matchedArguments(feature.getPickles().get(0).getSteps().get(0));
assertEquals(DataTable.create(singletonList(singletonList(null))), arguments.get(0).getValue());
} |
public boolean cleanupExpiredOffsets(String groupId, List<CoordinatorRecord> records) {
TimelineHashMap<String, TimelineHashMap<Integer, OffsetAndMetadata>> offsetsByTopic =
offsets.offsetsByGroup.get(groupId);
if (offsetsByTopic == null) {
return true;
}
// We expect the group to exist.
Group group = groupMetadataManager.group(groupId);
Set<String> expiredPartitions = new HashSet<>();
long currentTimestampMs = time.milliseconds();
Optional<OffsetExpirationCondition> offsetExpirationCondition = group.offsetExpirationCondition();
if (!offsetExpirationCondition.isPresent()) {
return false;
}
AtomicBoolean allOffsetsExpired = new AtomicBoolean(true);
OffsetExpirationCondition condition = offsetExpirationCondition.get();
offsetsByTopic.forEach((topic, partitions) -> {
if (!group.isSubscribedToTopic(topic)) {
partitions.forEach((partition, offsetAndMetadata) -> {
// We don't expire the offset yet if there is a pending transactional offset for the partition.
if (condition.isOffsetExpired(offsetAndMetadata, currentTimestampMs, config.offsetsRetentionMs()) &&
!hasPendingTransactionalOffsets(groupId, topic, partition)) {
expiredPartitions.add(appendOffsetCommitTombstone(groupId, topic, partition, records).toString());
log.debug("[GroupId {}] Expired offset for partition={}-{}", groupId, topic, partition);
} else {
allOffsetsExpired.set(false);
}
});
} else {
allOffsetsExpired.set(false);
}
});
metrics.record(OFFSET_EXPIRED_SENSOR_NAME, expiredPartitions.size());
// We don't want to remove the group if there are ongoing transactions.
return allOffsetsExpired.get() && !openTransactionsByGroup.containsKey(groupId);
} | @Test
public void testCleanupExpiredOffsetsGroupHasNoOffsets() {
OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder()
.build();
List<CoordinatorRecord> records = new ArrayList<>();
assertTrue(context.cleanupExpiredOffsets("unknown-group-id", records));
assertEquals(Collections.emptyList(), records);
} |
@Override
@MethodNotAvailable
public Map<K, Object> executeOnEntries(com.hazelcast.map.EntryProcessor entryProcessor) {
throw new MethodNotAvailableException();
} | @Test(expected = MethodNotAvailableException.class)
public void testExecuteOnEntriesWithPredicate() {
adapter.executeOnEntries(new IMapReplaceEntryProcessor("value", "newValue"), Predicates.alwaysTrue());
} |
@Override
public DataFrame select(int... cols) {
return new IndexDataFrame(df.select(cols), index);
} | @Test
public void testDataFrameToArray() {
System.out.println("toArray");
double[][] output = df.select("age", "salary", "gender").toArray(false, CategoricalEncoder.ONE_HOT);
assertEquals(4, output.length);
assertEquals(4, output[0].length);
assertEquals(48., output[0][0], 1E-10);
assertEquals(23., output[1][0], 1E-10);
assertEquals(13., output[2][0], 1E-10);
assertEquals(48., output[3][0], 1E-10);
assertEquals(230000., output[0][1], 1E-10);
assertTrue(Double.isNaN(output[1][1]));
assertTrue(Double.isNaN(output[2][1]));
assertEquals(230000., output[3][1], 1E-10);
assertEquals(0, output[0][2], 1E-10);
assertEquals(1, output[1][2], 1E-10);
assertEquals(0, output[2][2], 1E-10);
assertEquals(0, output[3][2], 1E-10);
assertEquals(1, output[0][3], 1E-10);
assertEquals(0, output[1][3], 1E-10);
assertEquals(1, output[2][3], 1E-10);
assertEquals(1, output[3][3], 1E-10);
} |
@Nullable
public static Object getValueFromLiteral(ValueLiteralExpression expr) {
LogicalType logicalType = expr.getOutputDataType().getLogicalType();
switch (logicalType.getTypeRoot()) {
case TIMESTAMP_WITHOUT_TIME_ZONE:
return expr.getValueAs(LocalDateTime.class)
.map(ldt -> ldt.toInstant(ZoneOffset.UTC).toEpochMilli())
.orElse(null);
case TIME_WITHOUT_TIME_ZONE:
return expr.getValueAs(LocalTime.class)
.map(lt -> lt.get(ChronoField.MILLI_OF_DAY))
.orElse(null);
case DATE:
return expr.getValueAs(LocalDate.class)
.map(date -> (int) date.toEpochDay())
.orElse(null);
// NOTE: All integral types of size less than Int are encoded as Ints in MT
case BOOLEAN:
return expr.getValueAs(Boolean.class).orElse(null);
case TINYINT:
return expr.getValueAs(Byte.class).orElse(null);
case SMALLINT:
return expr.getValueAs(Short.class).orElse(null);
case INTEGER:
return expr.getValueAs(Integer.class).orElse(null);
case BIGINT:
return expr.getValueAs(Long.class).orElse(null);
case FLOAT:
return expr.getValueAs(Float.class).orElse(null);
case DOUBLE:
return expr.getValueAs(Double.class).orElse(null);
case BINARY:
case VARBINARY:
return expr.getValueAs(byte[].class).orElse(null);
case CHAR:
case VARCHAR:
return expr.getValueAs(String.class).orElse(null);
case DECIMAL:
return expr.getValueAs(BigDecimal.class).orElse(null);
default:
throw new UnsupportedOperationException("Unsupported type: " + logicalType);
}
} | @Test
void getValueFromLiteralForNull() {
List<RowType.RowField> fields = ((RowType) ROW_DATA_TYPE.getLogicalType()).getFields();
List<DataType> dataTypes = ROW_DATA_TYPE.getChildren();
CallExpression callExpression;
for (int i = 0; i < fields.size(); i++) {
// 1. Build all types
callExpression = new CallExpression(
BuiltInFunctionDefinitions.IS_NOT_NULL,
Arrays.asList(new FieldReferenceExpression(fields.get(i).getName(),
dataTypes.get(i),
2,
2), new ValueLiteralExpression(null, dataTypes.get(i))),
DataTypes.BOOLEAN());
List<Expression> childExprs = callExpression.getChildren();
// 2. Parse each type
boolean hasNullLiteral =
childExprs.stream().anyMatch(e ->
e instanceof ValueLiteralExpression
&& ExpressionUtils.getValueFromLiteral((ValueLiteralExpression) e) == null);
assertTrue(hasNullLiteral);
}
} |
public static Coin parseCoinInexact(final String str) {
try {
long satoshis = new BigDecimal(str).movePointRight(SMALLEST_UNIT_EXPONENT).longValue();
return Coin.valueOf(satoshis);
} catch (ArithmeticException e) {
throw new IllegalArgumentException(e); // Repackage exception to honor method contract
}
} | @Test
public void testParseCoinInexact() {
assertEquals(1, parseCoinInexact("0.00000001").value);
assertEquals(1, parseCoinInexact("0.000000011").value);
} |
public void validate(Map<String, NewDocumentType> documentDefinitions) {
List<String> conflictingNames = documentDefinitions.keySet().stream()
.filter(this::isReservedName)
.toList();
if (!conflictingNames.isEmpty()) {
throw new IllegalArgumentException(makeReservedNameMessage(conflictingNames));
}
} | @Test
void exception_thrown_on_reserved_names() {
// Ensure ordering is consistent for testing
Map<String, NewDocumentType> orderedDocTypes = new TreeMap<>(asDocTypeMapping(ReservedDocumentTypeNameValidator.ORDERED_RESERVED_NAMES));
ReservedDocumentTypeNameValidator validator = new ReservedDocumentTypeNameValidator();
try {
validator.validate(orderedDocTypes);
fail();
} catch (IllegalArgumentException e) {
assertEquals("The following document types conflict with reserved keyword names: " +
"'and', 'false', 'id', 'not', 'null', 'or', 'true'. " +
"Reserved keywords are 'and', 'false', 'id', 'not', 'null', 'or', 'true'",
e.getMessage());
}
} |
public double bearingTo(final IGeoPoint other) {
final double lat1 = Math.toRadians(this.mLatitude);
final double long1 = Math.toRadians(this.mLongitude);
final double lat2 = Math.toRadians(other.getLatitude());
final double long2 = Math.toRadians(other.getLongitude());
final double delta_long = long2 - long1;
final double a = Math.sin(delta_long) * Math.cos(lat2);
final double b = Math.cos(lat1) * Math.sin(lat2) -
Math.sin(lat1) * Math.cos(lat2) * Math.cos(delta_long);
final double bearing = Math.toDegrees(Math.atan2(a, b));
final double bearing_normalized = (bearing + 360) % 360;
return bearing_normalized;
} | @Test
public void test_bearingTo_south() {
final GeoPoint target = new GeoPoint(0.0, 0.0);
final GeoPoint other = new GeoPoint(-10.0, 0.0);
assertEquals("directly south", 180, Math.round(target.bearingTo(other)));
} |
@Nonnull
public static <T> BatchSource<T> list(@Nonnull String listName) {
return batchFromProcessor("listSource(" + listName + ')', readListP(listName));
} | @Test
public void list_byName() {
// Given
List<Integer> input = sequence(itemCount);
addToSrcList(input);
// When
BatchSource<Integer> source = Sources.list(srcName);
// Then
p.readFrom(source).writeTo(sink);
execute();
assertEquals(input, sinkList);
} |
@Override
public ExecuteResult execute(final ServiceContext serviceContext, final ConfiguredKsqlPlan plan,
final boolean restoreInProgress) {
try {
final ExecuteResult result = EngineExecutor
.create(primaryContext, serviceContext, plan.getConfig())
.execute(plan.getPlan(), restoreInProgress);
return result;
} catch (final KsqlStatementException e) {
throw e;
} catch (final KsqlException e) {
// add the statement text to the KsqlException
throw new KsqlStatementException(
e.getMessage(),
e.getMessage(),
plan.getPlan().getStatementText(),
e.getCause()
);
}
} | @Test
public void shouldCreateSourceTablesQueries() {
// Given:
setupKsqlEngineWithSharedRuntimeEnabled();
givenTopicsExist("t1_topic");
// When:
final List<QueryMetadata> queries = KsqlEngineTestUtil.execute(
serviceContext,
ksqlEngine,
"create source table t1 (f0 bigint primary key, f1 double, f2 boolean) "
+ "with (kafka_topic='t1_topic', value_format='json');",
ksqlConfig,
Collections.emptyMap());
// Then:
assertThat(queries, hasSize(1));
assertThat(queries.get(0), is(instanceOf(PersistentQueryMetadata.class)));
final PersistentQueryMetadata metadata = (PersistentQueryMetadata) queries.get(0);
assertThat(metadata.getPersistentQueryType(),
is(KsqlConstants.PersistentQueryType.CREATE_SOURCE));
assertThat((metadata).getSink(), is(Optional.empty()));
assertThat((metadata).getSinkName(), is(Optional.empty()));
assertThat((metadata).getDataSourceType(), is(Optional.empty()));
assertThat((metadata).getResultTopic(), is(Optional.empty()));
assertThat(metadata.getLogicalSchema().key(), hasItems(hasFullName("F0")));
assertThat(metadata.getLogicalSchema().value(), hasItems(hasFullName("F1"), hasFullName("F2")));
} |
public H3IndexResolution getResolution() {
return _resolution;
} | @Test
public void withDisabledTrue()
throws JsonProcessingException {
String confStr = "{\"disabled\": true}";
H3IndexConfig config = JsonUtils.stringToObject(confStr, H3IndexConfig.class);
assertTrue(config.isDisabled(), "Unexpected disabled");
assertNull(config.getResolution(), "Unexpected resolution");
} |
@NotNull
public SocialUserDO authSocialUser(Integer socialType, Integer userType, String code, String state) {
// 优先从 DB 中获取,因为 code 有且可以使用一次。
// 在社交登录时,当未绑定 User 时,需要绑定登录,此时需要 code 使用两次
SocialUserDO socialUser = socialUserMapper.selectByTypeAndCodeAnState(socialType, code, state);
if (socialUser != null) {
return socialUser;
}
// 请求获取
AuthUser authUser = socialClientService.getAuthUser(socialType, userType, code, state);
Assert.notNull(authUser, "三方用户不能为空");
// 保存到 DB 中
socialUser = socialUserMapper.selectByTypeAndOpenid(socialType, authUser.getUuid());
if (socialUser == null) {
socialUser = new SocialUserDO();
}
socialUser.setType(socialType).setCode(code).setState(state) // 需要保存 code + state 字段,保证后续可查询
.setOpenid(authUser.getUuid()).setToken(authUser.getToken().getAccessToken()).setRawTokenInfo((toJsonString(authUser.getToken())))
.setNickname(authUser.getNickname()).setAvatar(authUser.getAvatar()).setRawUserInfo(toJsonString(authUser.getRawUserInfo()));
if (socialUser.getId() == null) {
socialUserMapper.insert(socialUser);
} else {
socialUserMapper.updateById(socialUser);
}
return socialUser;
} | @Test
public void testAuthSocialUser_notNull() {
// mock 数据
SocialUserDO socialUser = randomPojo(SocialUserDO.class,
o -> o.setType(SocialTypeEnum.GITEE.getType()).setCode("tudou").setState("yuanma"));
socialUserMapper.insert(socialUser);
// 准备参数
Integer socialType = SocialTypeEnum.GITEE.getType();
Integer userType = randomEle(SocialTypeEnum.values()).getType();
String code = "tudou";
String state = "yuanma";
// 调用
SocialUserDO result = socialUserService.authSocialUser(socialType, userType, code, state);
// 断言
assertPojoEquals(socialUser, result);
} |
@SuppressWarnings("ParameterNumber")
TransientQueryMetadata buildTransientQuery(
final String statementText,
final QueryId queryId,
final Set<SourceName> sources,
final ExecutionStep<?> physicalPlan,
final String planSummary,
final LogicalSchema schema,
final OptionalInt limit,
final Optional<WindowInfo> windowInfo,
final boolean excludeTombstones,
final QueryMetadata.Listener listener,
final StreamsBuilder streamsBuilder,
final Optional<ImmutableMap<TopicPartition, Long>> endOffsets,
final MetricCollectors metricCollectors
) {
final KsqlConfig ksqlConfig = config.getConfig(true);
final String applicationId = QueryApplicationId.build(ksqlConfig, false, queryId);
final RuntimeBuildContext runtimeBuildContext = buildContext(
applicationId,
queryId,
streamsBuilder
);
final Map<String, Object> streamsProperties = buildStreamsProperties(
applicationId,
Optional.of(queryId),
metricCollectors,
config.getConfig(true),
processingLogContext
);
streamsProperties.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 0);
final Object buildResult = buildQueryImplementation(physicalPlan, runtimeBuildContext);
final TransientQueryQueue queue =
buildTransientQueryQueue(buildResult, limit, excludeTombstones, endOffsets);
final Topology topology = streamsBuilder.build(PropertiesUtil.asProperties(streamsProperties));
final TransientQueryMetadata.ResultType resultType = buildResult instanceof KTableHolder
? windowInfo.isPresent() ? ResultType.WINDOWED_TABLE : ResultType.TABLE
: ResultType.STREAM;
return new TransientQueryMetadata(
statementText,
schema,
sources,
planSummary,
queue,
queryId,
applicationId,
topology,
kafkaStreamsBuilder,
streamsProperties,
config.getOverrides(),
ksqlConfig.getLong(KSQL_SHUTDOWN_TIMEOUT_MS_CONFIG),
ksqlConfig.getInt(KsqlConfig.KSQL_QUERY_ERROR_MAX_QUEUE_SIZE),
resultType,
ksqlConfig.getLong(KsqlConfig.KSQL_QUERY_RETRY_BACKOFF_INITIAL_MS),
ksqlConfig.getLong(KsqlConfig.KSQL_QUERY_RETRY_BACKOFF_MAX_MS),
listener,
processingLogContext.getLoggerFactory()
);
} | @Test
public void shouldBuildTransientQueryCorrectly() {
// Given:
givenTransientQuery();
// When:
final TransientQueryMetadata queryMetadata = queryBuilder.buildTransientQuery(
STATEMENT_TEXT,
QUERY_ID,
SOURCES.stream().map(DataSource::getName).collect(Collectors.toSet()),
physicalPlan,
SUMMARY,
TRANSIENT_SINK_SCHEMA,
LIMIT,
Optional.empty(),
false,
queryListener,
streamsBuilder,
Optional.empty(),
new MetricCollectors()
);
queryMetadata.initialize();
// Then:
assertThat(queryMetadata.getStatementString(), equalTo(STATEMENT_TEXT));
assertThat(queryMetadata.getSourceNames(), equalTo(SOURCES.stream()
.map(DataSource::getName).collect(Collectors.toSet())));
assertThat(queryMetadata.getExecutionPlan(), equalTo(SUMMARY));
assertThat(queryMetadata.getTopology(), is(topology));
assertThat(queryMetadata.getOverriddenProperties(), equalTo(OVERRIDES));
verify(kafkaStreamsBuilder).build(any(), propertyCaptor.capture());
assertThat(queryMetadata.getStreamsProperties(), equalTo(propertyCaptor.getValue()));
assertThat(queryMetadata.getStreamsProperties().get(InternalConfig.TOPIC_PREFIX_ALTERNATIVE), nullValue());
assertThat(queryMetadata.getStreamsProperties().get(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG), equalTo(0));
} |
@PATCH
@Path("/{connector}/offsets")
@Operation(summary = "Alter the offsets for the specified connector")
public Response alterConnectorOffsets(final @Parameter(hidden = true) @QueryParam("forward") Boolean forward,
final @Context HttpHeaders headers, final @PathParam("connector") String connector,
final ConnectorOffsets offsets) throws Throwable {
if (offsets.offsets() == null || offsets.offsets().isEmpty()) {
throw new BadRequestException("Partitions / offsets need to be provided for an alter offsets request");
}
FutureCallback<Message> cb = new FutureCallback<>();
herder.alterConnectorOffsets(connector, offsets.toMap(), cb);
Message msg = requestHandler.completeOrForwardRequest(cb, "/connectors/" + connector + "/offsets", "PATCH", headers, offsets,
new TypeReference<Message>() { }, new IdentityTranslator<>(), forward);
return Response.ok().entity(msg).build();
} | @Test
public void testAlterOffsetsNotLeader() throws Throwable {
Map<String, ?> partition = new HashMap<>();
Map<String, ?> offset = new HashMap<>();
ConnectorOffset connectorOffset = new ConnectorOffset(partition, offset);
ConnectorOffsets body = new ConnectorOffsets(Collections.singletonList(connectorOffset));
final ArgumentCaptor<Callback<Message>> cb = ArgumentCaptor.forClass(Callback.class);
expectAndCallbackNotLeaderException(cb).when(herder).alterConnectorOffsets(eq(CONNECTOR_NAME), eq(body.toMap()), cb.capture());
when(restClient.httpRequest(eq(LEADER_URL + "connectors/" + CONNECTOR_NAME + "/offsets?forward=true"), eq("PATCH"), isNull(), eq(body), any()))
.thenReturn(new RestClient.HttpResponse<>(200, new HashMap<>(), new Message("")));
connectorsResource.alterConnectorOffsets(null, NULL_HEADERS, CONNECTOR_NAME, body);
} |
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
final List<Path> containers = new ArrayList<>();
for(Path file : files.keySet()) {
if(containerService.isContainer(file)) {
containers.add(file);
}
else {
callback.delete(file);
final Path bucket = containerService.getContainer(file);
if(file.getType().contains(Path.Type.upload)) {
// In-progress multipart upload
try {
multipartService.delete(new MultipartUpload(file.attributes().getVersionId(),
bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file)));
}
catch(NotfoundException ignored) {
log.warn(String.format("Ignore failure deleting multipart upload %s", file));
}
}
else {
try {
// Always returning 204 even if the key does not exist. Does not return 404 for non-existing keys
session.getClient().deleteVersionedObject(
file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file));
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Cannot delete {0}", e, file);
}
}
}
}
for(Path file : containers) {
callback.delete(file);
try {
final String bucket = containerService.getContainer(file).getName();
session.getClient().deleteBucket(bucket);
session.getClient().getRegionEndpointCache().removeRegionForBucketName(bucket);
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Cannot delete {0}", e, file);
}
}
} | @Test(expected = NotfoundException.class)
public void testDeleteNotFoundBucket() throws Exception {
final Path container = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume));
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
if (data.size() < 1) {
onInvalidDataReceived(device, data);
return;
}
//noinspection DataFlowIssue
final int sensorLocation = data.getIntValue(Data.FORMAT_UINT8, 0);
onBodySensorLocationReceived(device, sensorLocation);
} | @Test
public void onInvalidDataReceived() {
success = false;
final Data data = new Data();
response.onDataReceived(null, data);
assertFalse(response.isValid());
assertFalse(success);
} |
public void setSortKey(SortKey sortkey) {
if (Objects.equals(this.sortkey, sortkey)) {
return;
}
invalidate();
if (sortkey != null) {
int column = sortkey.getColumn();
if (valueComparators[column] == null) {
throw new IllegalArgumentException(
format("Can't sort column %s, it is mapped to type %s and this one have no natural order. So an explicit one must be specified",
column, model.getColumnClass(column)));
}
}
this.sortkey = sortkey;
this.comparator = null;
} | @Test
public void sortKeyDescending() {
sorter.setSortKey(new SortKey(0, SortOrder.DESCENDING));
assertRowOrderAndIndexes(asList(d4(), c1(), b2(), a3()));
} |
public Iterable<CidrBlock> iterableCidrs() {
return () -> new Iterator<>() {
private final BigInteger increment = BigInteger.ONE.shiftLeft(suffixLength());
private final BigInteger maxValue = BigInteger.ONE.shiftLeft(addressLength).subtract(increment);
private BigInteger current = addressInteger;
public boolean hasNext() {
return current.compareTo(maxValue) < 0;
}
public CidrBlock next() {
if (!hasNext()) throw new NoSuchElementException();
CidrBlock cidrBlock = new CidrBlock(current, prefixLength, addressLength);
current = current.add(increment);
return cidrBlock;
}
};
} | @Test
public void iterableCidrs() {
CidrBlock superBlock = CidrBlock.fromString("10.12.14.0/24");
assertEquals(List.of("10.12.14.200/29", "10.12.14.208/29", "10.12.14.216/29", "10.12.14.224/29", "10.12.14.232/29", "10.12.14.240/29", "10.12.14.248/29"),
StreamSupport.stream(CidrBlock.fromString("10.12.14.200/29").iterableCidrs().spliterator(), false)
.takeWhile(superBlock::overlapsWith)
.map(CidrBlock::asString)
.collect(Collectors.toList()));
assertEquals(StreamSupport.stream(superBlock.iterableIps().spliterator(), false)
.skip(24)
.map(ip -> InetAddressUtil.toString(ip) + "/32")
.collect(Collectors.toList()),
StreamSupport.stream(CidrBlock.fromString("10.12.14.24/32").iterableCidrs().spliterator(), false)
.takeWhile(superBlock::overlapsWith)
.map(CidrBlock::asString)
.collect(Collectors.toList()));
} |
@Override
public String toString() {
StringBuilder builder = new StringBuilder("AfterProcessingTime.pastFirstElementInPane()");
for (TimestampTransform transform : getTimestampTransforms()) {
if (transform instanceof TimestampTransform.Delay) {
TimestampTransform.Delay delay = (TimestampTransform.Delay) transform;
builder
.append(".plusDelayOf(")
.append(DURATION_FORMATTER.print(delay.getDelay().toPeriod()))
.append(")");
} else if (transform instanceof TimestampTransform.AlignTo) {
TimestampTransform.AlignTo alignTo = (TimestampTransform.AlignTo) transform;
builder
.append(".alignedTo(")
.append(DURATION_FORMATTER.print(alignTo.getPeriod().toPeriod()))
.append(", ")
.append(alignTo.getOffset())
.append(")");
}
}
return builder.toString();
} | @Test
public void testToString() {
Trigger trigger = AfterProcessingTime.pastFirstElementInPane();
assertEquals("AfterProcessingTime.pastFirstElementInPane()", trigger.toString());
} |
public T send() throws IOException {
return web3jService.send(this, responseType);
} | @Test
public void testEthNewFilter() throws Exception {
EthFilter ethFilter = new EthFilter().addSingleTopic("0x12341234");
web3j.ethNewFilter(ethFilter).send();
verifyResult(
"{\"jsonrpc\":\"2.0\",\"method\":\"eth_newFilter\","
+ "\"params\":[{\"topics\":[\"0x12341234\"]}],\"id\":1}");
} |
public PropertyPanel addProp(String key, String label, String value) {
properties.add(new Prop(key, label, value));
return this;
} | @Test
public void longValues() {
basic();
pp.addProp(KEY_A, KEY_A, 200L)
.addProp(KEY_B, KEY_B, 2000L)
.addProp(KEY_C, KEY_C, 1234567L)
.addProp(KEY_Z, KEY_Z, Long.MAX_VALUE);
validateProp(KEY_A, "200");
validateProp(KEY_B, "2,000");
validateProp(KEY_C, "1,234,567");
validateProp(KEY_Z, "9,223,372,036,854,775,807");
} |
public static Builder builder() {
return new Builder();
} | @Test(expected = IllegalArgumentException.class)
public void testIllegalMulticastTypeConstruction() {
IpPrefix ip = IpPrefix.valueOf(IP_ADDRESS_1);
MappingAddress address = MappingAddresses.ipv4MappingAddress(ip);
DefaultMappingTreatment.builder()
.withAddress(address)
.setMulticastPriority(10)
.setMulticastWeight(10)
.setMulticastPriority(20)
.build();
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.