focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public boolean checkIndexExists( Database database, String schemaName, String tableName, String[] idxFields ) throws KettleDatabaseException {
String tablename = database.getDatabaseMeta().getQuotedSchemaTableCombination( schemaName, tableName );
boolean[] exists = new boolean[ idxFields.length];
for ( int i = 0; i < exists.length; i++ ) {
exists[i] = false;
}
try {
//
// Get the info from the data dictionary...
//
StringBuilder sql = new StringBuilder( 128 );
sql.append( "select i.name table_name, c.name column_name " );
sql.append( "from sysindexes i, sysindexkeys k, syscolumns c " );
sql.append( "where i.name = '" + tablename + "' " );
sql.append( "AND i.id = k.id " );
sql.append( "AND i.id = c.id " );
sql.append( "AND k.colid = c.colid " );
ResultSet res = null;
try {
res = database.openQuery( sql.toString() );
if ( res != null ) {
Object[] row = database.getRow( res );
while ( row != null ) {
String column = database.getReturnRowMeta().getString( row, "column_name", "" );
int idx = Const.indexOfString( column, idxFields );
if ( idx >= 0 ) {
exists[idx] = true;
}
row = database.getRow( res );
}
} else {
return false;
}
} finally {
if ( res != null ) {
database.closeQuery( res );
}
}
// See if all the fields are indexed...
boolean all = true;
for ( int i = 0; i < exists.length && all; i++ ) {
if ( !exists[i] ) {
all = false;
}
}
return all;
} catch ( Exception e ) {
throw new KettleDatabaseException( "Unable to determine if indexes exists on table [" + tablename + "]", e );
}
} | @Test
public void testCheckIndexExists() throws Exception {
String expectedSQL = "select i.name table_name, c.name column_name from sysindexes i, sysindexkeys k, syscolumns c where i.name = 'FOO' AND i.id = k.id AND i.id = c.id AND k.colid = c.colid "; // yes, space at the end like in the dbmeta
Database db = Mockito.mock( Database.class );
RowMetaInterface rm = Mockito.mock( RowMetaInterface.class );
ResultSet rs = Mockito.mock( ResultSet.class );
DatabaseMeta dm = Mockito.mock( DatabaseMeta.class );
Mockito.when( dm.getQuotedSchemaTableCombination( "", "FOO" ) ).thenReturn( "FOO" );
Mockito.when( rs.next() ).thenReturn( rowCnt < 2 );
Mockito.when( db.openQuery( expectedSQL ) ).thenReturn( rs );
Mockito.when( db.getReturnRowMeta() ).thenReturn( rm );
Mockito.when( rm.getString( row1, "column_name", "" ) ).thenReturn( "ROW1COL2" );
Mockito.when( rm.getString( row2, "column_name", "" ) ).thenReturn( "ROW2COL2" );
Mockito.when( db.getRow( rs ) ).thenAnswer( new Answer<Object[]>() {
@Override
public Object[] answer( InvocationOnMock invocation ) throws Throwable {
rowCnt++;
if ( rowCnt == 1 ) {
return row1;
} else if ( rowCnt == 2 ) {
return row2;
} else {
return null;
}
}
} );
Mockito.when( db.getDatabaseMeta() ).thenReturn( dm );
assertTrue( nativeMeta.checkIndexExists( db, "", "FOO", new String[] { "ROW1COL2", "ROW2COL2" } ) );
assertFalse( nativeMeta.checkIndexExists( db, "", "FOO", new String[] { "ROW2COL2", "NOTTHERE" } ) );
assertFalse( nativeMeta.checkIndexExists( db, "", "FOO", new String[] { "NOTTHERE", "ROW1COL2" } ) );
} |
public static void checkState(boolean isValid, String message) throws IllegalStateException {
if (!isValid) {
throw new IllegalStateException(message);
}
} | @Test
public void checkStateMessageOnlySupportsStringTypeTemplate() {
try {
Preconditions.checkState(true, "Test message %d", 12);
} catch (IllegalStateException e) {
Assert.fail("Should not throw exception when isValid is true");
}
try {
Preconditions.checkState(false, "Test message %d", 12);
Assert.fail("Should throw exception when isValid is false");
} catch (IllegalArgumentException e) {
Assert.assertEquals("%d is not a valid format option", "d != java.lang.String", e.getMessage());
}
} |
@Override
protected Collection<Address> getPossibleAddresses() {
Iterable<DiscoveryNode> discoveredNodes = checkNotNull(discoveryService.discoverNodes(),
"Discovered nodes cannot be null!");
MemberImpl localMember = node.nodeEngine.getLocalMember();
Set<Address> localAddresses = node.getLocalAddressRegistry().getLocalAddresses();
Collection<Address> possibleMembers = new ArrayList<>();
for (DiscoveryNode discoveryNode : discoveredNodes) {
Address discoveredAddress = usePublicAddress ? discoveryNode.getPublicAddress() : discoveryNode.getPrivateAddress();
if (localAddresses.contains(discoveredAddress)) {
if (!usePublicAddress && discoveryNode.getPublicAddress() != null) {
// enrich member with client public address
localMember.getAddressMap().put(EndpointQualifier.resolve(ProtocolType.CLIENT, "public"),
publicAddress(localMember, discoveryNode));
}
continue;
}
possibleMembers.add(discoveredAddress);
}
return possibleMembers;
} | @Test
public void test_DiscoveryJoiner_enriches_member_with_public_address_when_advanced_network_used()
throws UnknownHostException {
DiscoveryJoiner joiner = new DiscoveryJoiner(getNode(hz), service, false);
doReturn(discoveryNodes).when(service).discoverNodes();
// the CLIENT protocol server socket listens on port 5703
// but is mapped to public address 127.0.0.2:6701
getNode(hz).getLocalMember().getAddressMap().put(EndpointQualifier.CLIENT, new Address("127.0.0.1", 5703));
Collection<Address> addresses = joiner.getPossibleAddresses();
String expected;
if (discoveryBehaviourFallbackEnabled) {
expected = "[127.0.0.2]:5703";
} else {
expected = "[127.0.0.2]:6701";
}
assertEquals(expected, getNode(hz).getLocalMember().getAddressMap()
.get(CLIENT_PUBLIC_ENDPOINT_QUALIFIER).toString());
} |
@Override
public void validate(PipelineOptions options) {
delegate().validate(options);
} | @Test
public void validateDelegates() {
@SuppressWarnings("unchecked")
PipelineOptions options = mock(PipelineOptions.class);
doThrow(RuntimeException.class).when(delegate).validate(options);
thrown.expect(RuntimeException.class);
forwarding.validate(options);
} |
@Override
public void memberChange(Set<String> addresses) {
for (int i = 0; i < 5; i++) {
if (this.raftServer.peerChange(jRaftMaintainService, addresses)) {
return;
}
ThreadUtils.sleep(100L);
}
Loggers.RAFT.warn("peer removal failed");
} | @Test
void testMemberChange() {
Set<String> addresses = new HashSet<>();
raftProtocol.memberChange(addresses);
verify(serverMock, times(5)).peerChange(jRaftMaintainService, addresses);
} |
public static String jaasConfig(String moduleName, Map<String, String> options) {
StringJoiner joiner = new StringJoiner(" ");
for (Entry<String, String> entry : options.entrySet()) {
String key = Objects.requireNonNull(entry.getKey());
String value = Objects.requireNonNull(entry.getValue());
if (key.contains("=") || key.contains(";")) {
throw new IllegalArgumentException("Keys must not contain '=' or ';'");
}
if (moduleName.isEmpty() || moduleName.contains(";") || moduleName.contains("=")) {
throw new IllegalArgumentException("module name must be not empty and must not contain '=' or ';'");
} else {
joiner.add(key + "=\"" + value + "\"");
}
}
return moduleName + " required " + joiner + ";";
} | @Test
public void testModuleNameContainsEqualSign() {
Map<String, String> options = new HashMap<>();
options.put("key1", "value1");
String moduleName = "Module=";
assertThrows(IllegalArgumentException.class, () -> AuthenticationUtils.jaasConfig(moduleName, options));
} |
@Override
public void deregisterInstance(String serviceName, String ip, int port) throws NacosException {
deregisterInstance(serviceName, ip, port, Constants.DEFAULT_CLUSTER_NAME);
} | @Test
void testDeregisterInstance6() throws NacosException {
//given
String serviceName = "service1";
String groupName = "group1";
Instance instance = new Instance();
//when
client.deregisterInstance(serviceName, groupName, instance);
//then
verify(proxy, times(1)).deregisterService(serviceName, groupName, instance);
} |
@Override
public Path copy(final Path file, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException {
try {
final BrickApiClient client = new BrickApiClient(session);
if(status.isExists()) {
if(log.isWarnEnabled()) {
log.warn(String.format("Delete file %s to be replaced with %s", target, file));
}
new BrickDeleteFeature(session).delete(Collections.singletonList(target), callback, new Delete.DisabledCallback());
}
final FileActionEntity entity = new FileActionsApi(client)
.copy(new CopyPathBody().destination(StringUtils.removeStart(target.getAbsolute(), String.valueOf(Path.DELIMITER))),
StringUtils.removeStart(file.getAbsolute(), String.valueOf(Path.DELIMITER)));
listener.sent(status.getLength());
if(entity.getFileMigrationId() != null) {
this.poll(client, entity);
}
return target.withAttributes(file.attributes());
}
catch(ApiException e) {
throw new BrickExceptionMappingService().map("Cannot copy {0}", e, file);
}
} | @Test
public void testCopyEmptyFile() throws Exception {
final Path test = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new BrickTouchFeature(session).touch(test, new TransferStatus());
final Path copy = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new BrickCopyFeature(session).copy(test, copy, new TransferStatus(), new DisabledConnectionCallback(), new DisabledStreamListener());
assertTrue(new BrickFindFeature(session).find(test));
assertTrue(new BrickFindFeature(session).find(copy));
new BrickDeleteFeature(session).delete(Collections.<Path>singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
new BrickDeleteFeature(session).delete(Collections.<Path>singletonList(copy), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
public CompletableFuture<InetSocketAddress> resolveAndCheckTargetAddress(String hostAndPort) {
int pos = hostAndPort.lastIndexOf(':');
String host = hostAndPort.substring(0, pos);
int port = Integer.parseInt(hostAndPort.substring(pos + 1));
if (!isPortAllowed(port)) {
return FutureUtil.failedFuture(
new TargetAddressDeniedException("Given port in '" + hostAndPort + "' isn't allowed."));
} else if (!isHostAllowed(host)) {
return FutureUtil.failedFuture(
new TargetAddressDeniedException("Given host in '" + hostAndPort + "' isn't allowed."));
} else {
return NettyFutureUtil.toCompletableFuture(
inetSocketAddressResolver.resolve(InetSocketAddress.createUnresolved(host, port)))
.thenCompose(resolvedAddress -> {
CompletableFuture<InetSocketAddress> result = new CompletableFuture<>();
if (isIPAddressAllowed(resolvedAddress)) {
result.complete(resolvedAddress);
} else {
result.completeExceptionally(new TargetAddressDeniedException(
"The IP address of the given host and port '" + hostAndPort + "' isn't allowed."));
}
return result;
});
}
} | @Test
public void shouldAllowAllWithWildcard() throws Exception {
BrokerProxyValidator brokerProxyValidator = new BrokerProxyValidator(
createMockedAddressResolver("1.2.3.4"),
"*"
, "*"
, "6650");
brokerProxyValidator.resolveAndCheckTargetAddress("myhost.mydomain:6650").get();
} |
public PluginData obtainPluginData(final String pluginName) {
return PLUGIN_MAP.get(pluginName);
} | @Test
public void testObtainPluginData() throws NoSuchFieldException, IllegalAccessException {
PluginData pluginData = PluginData.builder().name(mockName1).build();
ConcurrentHashMap<String, PluginData> pluginMap = getFieldByName(pluginMapStr);
pluginMap.put(mockName1, pluginData);
assertNotNull(pluginMap.get(mockName1));
assertEquals(pluginData, BaseDataCache.getInstance().obtainPluginData(mockName1));
} |
@Override
public int run(InputStream in, PrintStream out, PrintStream err, List<String> args) throws Exception {
boolean useJavaCC = "--useJavaCC".equals(getArg(args, 0, null));
if (args.isEmpty() || args.size() > (useJavaCC ? 3 : 2) || isRequestingHelp(args)) {
err.println("Usage: idl2schemata [--useJavaCC] [idl [outdir]]");
err.println();
err.println("If an output directory is not specified, " + "outputs to current directory.");
return -1;
}
String inputName = getArg(args, useJavaCC ? 1 : 0, "-");
File inputFile = "-".equals(inputName) ? null : new File(inputName);
File outputDirectory = getOutputDirectory(getArg(args, useJavaCC ? 2 : 1, ""));
if (useJavaCC) {
try (Idl parser = new Idl(inputFile)) {
final Protocol protocol = parser.CompilationUnit();
final List<String> warnings = parser.getWarningsAfterParsing();
for (String warning : warnings) {
err.println("Warning: " + warning);
}
for (Schema schema : protocol.getTypes()) {
print(schema, outputDirectory);
}
}
} else {
IdlReader parser = new IdlReader();
IdlFile idlFile = inputFile == null ? parser.parse(in) : parser.parse(inputFile.toPath());
for (String warning : idlFile.getWarnings()) {
err.println("Warning: " + warning);
}
for (Schema schema : idlFile.getNamedSchemas().values()) {
print(schema, outputDirectory);
}
}
return 0;
} | @Test
void splitIdlIntoSchemata() throws Exception {
String idl = "src/test/idl/protocol.avdl";
String outdir = "target/test-split";
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
List<String> arglist = Arrays.asList(idl, outdir);
new IdlToSchemataTool().run(null, null, new PrintStream(buffer), arglist);
String[] files = new File(outdir).list();
assertEquals(4, files.length);
String warnings = readPrintStreamBuffer(buffer);
assertEquals("Warning: Line 1, char 1: Ignoring out-of-place documentation comment."
+ "\nDid you mean to use a multiline comment ( /* ... */ ) instead?", warnings);
} |
public static void getSemanticPropsSingleFromString(
SingleInputSemanticProperties result,
String[] forwarded,
String[] nonForwarded,
String[] readSet,
TypeInformation<?> inType,
TypeInformation<?> outType) {
getSemanticPropsSingleFromString(
result, forwarded, nonForwarded, readSet, inType, outType, false);
} | @Test
void testForwardedNoArrowOneStringInvalidDelimiter() {
String[] forwardedFields = {"f2,f3,f0"};
SingleInputSemanticProperties sp = new SingleInputSemanticProperties();
assertThatThrownBy(
() ->
SemanticPropUtil.getSemanticPropsSingleFromString(
sp,
forwardedFields,
null,
null,
fiveIntTupleType,
fiveIntTupleType))
.isInstanceOf(InvalidSemanticAnnotationException.class);
} |
void runOnce() {
if (transactionManager != null) {
try {
transactionManager.maybeResolveSequences();
RuntimeException lastError = transactionManager.lastError();
// do not continue sending if the transaction manager is in a failed state
if (transactionManager.hasFatalError()) {
if (lastError != null)
maybeAbortBatches(lastError);
client.poll(retryBackoffMs, time.milliseconds());
return;
}
if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) {
return;
}
// Check whether we need a new producerId. If so, we will enqueue an InitProducerId
// request which will be sent below
transactionManager.bumpIdempotentEpochAndResetIdIfNeeded();
if (maybeSendAndPollTransactionalRequest()) {
return;
}
} catch (AuthenticationException e) {
// This is already logged as error, but propagated here to perform any clean ups.
log.trace("Authentication exception while processing transactional request", e);
transactionManager.authenticationFailed(e);
}
}
long currentTimeMs = time.milliseconds();
long pollTimeout = sendProducerData(currentTimeMs);
client.poll(pollTimeout, currentTimeMs);
} | @Test
public void testClusterAuthorizationExceptionInProduceRequest() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = createTransactionManager();
setupWithTransactionState(transactionManager);
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
// cluster authorization is a fatal error for the producer
Future<RecordMetadata> future = appendToAccumulator(tp0);
client.prepareResponse(
body -> body instanceof ProduceRequest && RequestTestUtils.hasIdempotentRecords((ProduceRequest) body),
produceResponse(tp0, -1, Errors.CLUSTER_AUTHORIZATION_FAILED, 0));
sender.runOnce();
assertFutureFailure(future, ClusterAuthorizationException.class);
// cluster authorization errors are fatal, so we should continue seeing it on future sends
assertTrue(transactionManager.hasFatalError());
assertSendFailure(ClusterAuthorizationException.class);
} |
@PostMapping("/rule")
public ShenyuAdminResult saveRule(@RequestBody @Valid @NotNull final DataPermissionDTO dataPermissionDTO) {
return ShenyuAdminResult.success(ShenyuResultMessage.SAVE_SUCCESS, dataPermissionService.createRule(dataPermissionDTO));
} | @Test
public void saveRule() throws Exception {
DataPermissionDTO dataPermissionDTO = new DataPermissionDTO();
dataPermissionDTO.setDataId("testDataId");
dataPermissionDTO.setUserId("testUserId");
given(this.dataPermissionService.createRule(dataPermissionDTO)).willReturn(1);
this.mockMvc.perform(MockMvcRequestBuilders.post("/data-permission/rule")
.contentType(MediaType.APPLICATION_JSON)
.content(GsonUtils.getInstance().toJson(dataPermissionDTO)))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.SAVE_SUCCESS)))
.andExpect(jsonPath("$.data", is(1)))
.andReturn();
} |
@Override
public boolean tryFence(HAServiceTarget target, String args) {
ProcessBuilder builder;
String cmd = parseArgs(target.getTransitionTargetHAStatus(), args);
if (!Shell.WINDOWS) {
builder = new ProcessBuilder("bash", "-e", "-c", cmd);
} else {
builder = new ProcessBuilder("cmd.exe", "/c", cmd);
}
setConfAsEnvVars(builder.environment());
addTargetInfoAsEnvVars(target, builder.environment());
Process p;
try {
p = builder.start();
p.getOutputStream().close();
} catch (IOException e) {
LOG.warn("Unable to execute " + cmd, e);
return false;
}
String pid = tryGetPid(p);
LOG.info("Launched fencing command '" + cmd + "' with "
+ ((pid != null) ? ("pid " + pid) : "unknown pid"));
String logPrefix = abbreviate(cmd, ABBREV_LENGTH);
if (pid != null) {
logPrefix = "[PID " + pid + "] " + logPrefix;
}
// Pump logs to stderr
StreamPumper errPumper = new StreamPumper(
LOG, logPrefix, p.getErrorStream(),
StreamPumper.StreamType.STDERR);
errPumper.start();
StreamPumper outPumper = new StreamPumper(
LOG, logPrefix, p.getInputStream(),
StreamPumper.StreamType.STDOUT);
outPumper.start();
int rc;
try {
rc = p.waitFor();
errPumper.join();
outPumper.join();
} catch (InterruptedException ie) {
LOG.warn("Interrupted while waiting for fencing command: " + cmd);
return false;
}
return rc == 0;
} | @Test
public void testTargetAsEnvironment() {
if (!Shell.WINDOWS) {
fencer.tryFence(TEST_TARGET, "echo $target_host $target_port");
Mockito.verify(ShellCommandFencer.LOG).info(
Mockito.endsWith("echo $ta...rget_port: dummyhost 1234"));
} else {
fencer.tryFence(TEST_TARGET, "echo %target_host% %target_port%");
Mockito.verify(ShellCommandFencer.LOG).info(
Mockito.endsWith("echo %ta...get_port%: dummyhost 1234"));
}
} |
@Override
public void setIndex(int readerIndex, int writerIndex) {
if (readerIndex < 0 || readerIndex > writerIndex || writerIndex > capacity()) {
throw new IndexOutOfBoundsException();
}
this.readerIndex = readerIndex;
this.writerIndex = writerIndex;
} | @Test
void setIndexBoundaryCheck3() {
Assertions.assertThrows(IndexOutOfBoundsException.class, () -> buffer.setIndex(0, CAPACITY + 1));
} |
public List<Metadata> recursiveParserWrapperExample() throws IOException, SAXException, TikaException {
Parser p = new AutoDetectParser();
ContentHandlerFactory factory = new BasicContentHandlerFactory(BasicContentHandlerFactory.HANDLER_TYPE.HTML, -1);
RecursiveParserWrapper wrapper = new RecursiveParserWrapper(p);
Metadata metadata = new Metadata();
metadata.set(TikaCoreProperties.RESOURCE_NAME_KEY, "test_recursive_embedded.docx");
ParseContext context = new ParseContext();
RecursiveParserWrapperHandler handler = new RecursiveParserWrapperHandler(factory, -1);
try (InputStream stream = ParsingExample.class.getResourceAsStream("test_recursive_embedded.docx")) {
wrapper.parse(stream, handler, metadata, context);
}
return handler.getMetadataList();
} | @Test
public void testRecursiveParserWrapperExample() throws IOException, SAXException, TikaException {
List<Metadata> metadataList = parsingExample.recursiveParserWrapperExample();
assertEquals(12, metadataList.size(), "Number of embedded documents + 1 for the container document");
Metadata m = metadataList.get(6);
//this is the location the embed3.txt text file within the outer .docx
assertEquals("/embed1.zip/embed2.zip/embed3.zip/embed3.txt", m.get("X-TIKA:embedded_resource_path"));
//it contains some html encoded content
assertContains("When in the Course", m.get("X-TIKA:content"));
} |
@SafeVarargs
public static <T> T[] append(T[] buffer, T... newElements) {
if (isEmpty(buffer)) {
return newElements;
}
return insert(buffer, buffer.length, newElements);
} | @Test
public void appendTest() {
String[] a = {"1", "2", "3", "4"};
String[] b = {"a", "b", "c"};
String[] result = ArrayUtil.append(a, b);
assertArrayEquals(new String[]{"1", "2", "3", "4", "a", "b", "c"}, result);
} |
public void createResource(CreateResourceStmt stmt) throws DdlException {
Resource resource = Resource.fromStmt(stmt);
this.writeLock();
try {
String resourceName = stmt.getResourceName();
String typeName = resource.getType().name().toLowerCase(Locale.ROOT);
if (resource.needMappingCatalog()) {
try {
String catalogName = getResourceMappingCatalogName(resourceName, typeName);
GlobalStateMgr.getCurrentState().getCatalogMgr().createCatalog(
typeName, catalogName, "mapping " + typeName + " catalog", stmt.getProperties());
} catch (Exception e) {
LOG.error("Failed to create mapping {} catalog {} failed", typeName, resource, e);
throw new DdlException("Failed to create mapping catalog " + resourceName + " failed, msg: " +
e.getMessage());
}
}
if (nameToResource.putIfAbsent(resourceName, resource) != null) {
throw new DdlException("Resource(" + resourceName + ") already exist");
}
// log add
GlobalStateMgr.getCurrentState().getEditLog().logCreateResource(resource);
LOG.info("create resource success. resource: {}", resource);
} finally {
this.writeUnLock();
}
} | @Test(expected = DdlException.class)
public void testAddResourceExist(@Injectable BrokerMgr brokerMgr, @Injectable EditLog editLog,
@Mocked GlobalStateMgr globalStateMgr)
throws UserException {
ResourceMgr mgr = new ResourceMgr();
// add
CreateResourceStmt stmt = addSparkResource(mgr, brokerMgr, editLog, globalStateMgr);
// add again
mgr.createResource(stmt);
} |
public static CountMinSketch merge(CountMinSketch... estimators) throws CMSMergeException {
CountMinSketch merged = null;
if (estimators != null && estimators.length > 0) {
int depth = estimators[0].depth;
int width = estimators[0].width;
long[] hashA = Arrays.copyOf(estimators[0].hashA, estimators[0].hashA.length);
long[][] table = new long[depth][width];
long size = 0;
for (CountMinSketch estimator : estimators) {
if (estimator.depth != depth) {
throw new CMSMergeException("Cannot merge estimators of different depth");
}
if (estimator.width != width) {
throw new CMSMergeException("Cannot merge estimators of different width");
}
if (!Arrays.equals(estimator.hashA, hashA)) {
throw new CMSMergeException("Cannot merge estimators of different seed");
}
for (int i = 0; i < table.length; i++) {
for (int j = 0; j < table[i].length; j++) {
table[i][j] += estimator.table[i][j];
}
}
long previousSize = size;
size += estimator.size;
checkSizeAfterOperation(previousSize, "merge(" + estimator + ")", size);
}
merged = new CountMinSketch(depth, width, size, hashA, table);
}
return merged;
} | @Test
public void testMergeEmpty() throws CMSMergeException {
assertNull(CountMinSketch.merge());
} |
public Optional<Throwable> run(String... arguments) {
try {
if (isFlag(HELP, arguments)) {
parser.printHelp(stdOut);
} else if (isFlag(VERSION, arguments)) {
parser.printVersion(stdOut);
} else {
final Namespace namespace = parser.parseArgs(arguments);
final Command command = requireNonNull(commands.get(namespace.getString(COMMAND_NAME_ATTR)),
"Command is not found");
try {
command.run(bootstrap, namespace);
} catch (Throwable e) {
// The command failed to run, and the command knows
// best how to cleanup / debug exception
command.onError(this, namespace, e);
return Optional.of(e);
}
}
return Optional.empty();
} catch (HelpScreenException ignored) {
// This exception is triggered when the user passes in a help flag.
// Return true to signal that the process executed normally.
return Optional.empty();
} catch (ArgumentParserException e) {
stdErr.println(e.getMessage());
e.getParser().printHelp(stdErr);
return Optional.of(e);
}
} | @Test
void handlesShortHelpCommands() throws Exception {
assertThat(cli.run("-h"))
.isEmpty();
assertThat(stdOut)
.hasToString(String.format(
"usage: java -jar dw-thing.jar [-h] [-v] {check,custom} ...%n" +
"%n" +
"positional arguments:%n" +
" {check,custom} available commands%n" +
"%n" +
"named arguments:%n" +
" -h, --help show this help message and exit%n" +
" -v, --version show the application version and exit%n"
));
assertThat(stdErr.toString())
.isEmpty();
} |
@Override
public int run(String[] argv) {
if (argv.length < 1) {
printUsage("");
return -1;
}
int exitCode = -1;
int i = 0;
String cmd = argv[i++];
//
// verify that we have enough command line parameters
//
if ("-safemode".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-allowSnapshot".equalsIgnoreCase(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-disallowSnapshot".equalsIgnoreCase(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-provisionSnapshotTrash".equalsIgnoreCase(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-report".equals(cmd)) {
if (argv.length > DFS_REPORT_ARGS.length + 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-saveNamespace".equals(cmd)) {
if (argv.length != 1 && argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-rollEdits".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-restoreFailedStorage".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-refreshNodes".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-finalizeUpgrade".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if (RollingUpgradeCommand.matches(cmd)) {
if (argv.length > 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-upgrade".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-metasave".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-refreshServiceAcl".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-refresh".equals(cmd)) {
if (argv.length < 3) {
printUsage(cmd);
return exitCode;
}
} else if ("-refreshUserToGroupsMappings".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-printTopology".equals(cmd)) {
if(argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-refreshNamenodes".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-getVolumeReport".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-reconfig".equals(cmd)) {
if (argv.length != 4) {
printUsage(cmd);
return exitCode;
}
} else if ("-deleteBlockPool".equals(cmd)) {
if ((argv.length != 3) && (argv.length != 4)) {
printUsage(cmd);
return exitCode;
}
} else if ("-setBalancerBandwidth".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-getBalancerBandwidth".equalsIgnoreCase(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-fetchImage".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-shutdownDatanode".equals(cmd)) {
if ((argv.length != 2) && (argv.length != 3)) {
printUsage(cmd);
return exitCode;
}
} else if ("-getDatanodeInfo".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-triggerBlockReport".equals(cmd)) {
if ((argv.length < 2) || (argv.length > 5)) {
printUsage(cmd);
return exitCode;
}
} else if ("-listOpenFiles".equals(cmd)) {
if ((argv.length > 4)) {
printUsage(cmd);
return exitCode;
}
}
// initialize DFSAdmin
init();
Exception debugException = null;
exitCode = 0;
try {
if ("-report".equals(cmd)) {
report(argv, i);
} else if ("-safemode".equals(cmd)) {
setSafeMode(argv, i);
} else if ("-allowSnapshot".equalsIgnoreCase(cmd)) {
allowSnapshot(argv);
} else if ("-disallowSnapshot".equalsIgnoreCase(cmd)) {
disallowSnapshot(argv);
} else if ("-provisionSnapshotTrash".equalsIgnoreCase(cmd)) {
provisionSnapshotTrash(argv);
} else if ("-saveNamespace".equals(cmd)) {
exitCode = saveNamespace(argv);
} else if ("-rollEdits".equals(cmd)) {
exitCode = rollEdits();
} else if ("-restoreFailedStorage".equals(cmd)) {
exitCode = restoreFailedStorage(argv[i]);
} else if ("-refreshNodes".equals(cmd)) {
exitCode = refreshNodes();
} else if ("-finalizeUpgrade".equals(cmd)) {
exitCode = finalizeUpgrade();
} else if (RollingUpgradeCommand.matches(cmd)) {
exitCode = RollingUpgradeCommand.run(getDFS(), argv, i);
} else if ("-upgrade".equals(cmd)) {
exitCode = upgrade(argv[i]);
} else if ("-metasave".equals(cmd)) {
exitCode = metaSave(argv, i);
} else if (ClearQuotaCommand.matches(cmd)) {
exitCode = new ClearQuotaCommand(argv, i, getConf()).runAll();
} else if (SetQuotaCommand.matches(cmd)) {
exitCode = new SetQuotaCommand(argv, i, getConf()).runAll();
} else if (ClearSpaceQuotaCommand.matches(cmd)) {
exitCode = new ClearSpaceQuotaCommand(argv, i, getConf()).runAll();
} else if (SetSpaceQuotaCommand.matches(cmd)) {
exitCode = new SetSpaceQuotaCommand(argv, i, getConf()).runAll();
} else if ("-refreshServiceAcl".equals(cmd)) {
exitCode = refreshServiceAcl();
} else if ("-refreshUserToGroupsMappings".equals(cmd)) {
exitCode = refreshUserToGroupsMappings();
} else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) {
exitCode = refreshSuperUserGroupsConfiguration();
} else if ("-refreshCallQueue".equals(cmd)) {
exitCode = refreshCallQueue();
} else if ("-refresh".equals(cmd)) {
exitCode = genericRefresh(argv, i);
} else if ("-printTopology".equals(cmd)) {
exitCode = printTopology();
} else if ("-refreshNamenodes".equals(cmd)) {
exitCode = refreshNamenodes(argv, i);
} else if ("-getVolumeReport".equals(cmd)) {
exitCode = getVolumeReport(argv, i);
} else if ("-deleteBlockPool".equals(cmd)) {
exitCode = deleteBlockPool(argv, i);
} else if ("-setBalancerBandwidth".equals(cmd)) {
exitCode = setBalancerBandwidth(argv, i);
} else if ("-getBalancerBandwidth".equals(cmd)) {
exitCode = getBalancerBandwidth(argv, i);
} else if ("-fetchImage".equals(cmd)) {
exitCode = fetchImage(argv, i);
} else if ("-shutdownDatanode".equals(cmd)) {
exitCode = shutdownDatanode(argv, i);
} else if ("-evictWriters".equals(cmd)) {
exitCode = evictWriters(argv, i);
} else if ("-getDatanodeInfo".equals(cmd)) {
exitCode = getDatanodeInfo(argv, i);
} else if ("-reconfig".equals(cmd)) {
exitCode = reconfig(argv, i);
} else if ("-triggerBlockReport".equals(cmd)) {
exitCode = triggerBlockReport(argv);
} else if ("-listOpenFiles".equals(cmd)) {
exitCode = listOpenFiles(argv);
} else if ("-help".equals(cmd)) {
if (i < argv.length) {
printHelp(argv[i]);
} else {
printHelp("");
}
} else {
exitCode = -1;
System.err.println(cmd.substring(1) + ": Unknown command");
printUsage("");
}
} catch (IllegalArgumentException arge) {
debugException = arge;
exitCode = -1;
System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage());
printUsage(cmd);
} catch (RemoteException e) {
//
// This is a error returned by hadoop server. Print
// out the first line of the error message, ignore the stack trace.
exitCode = -1;
debugException = e;
try {
String[] content;
content = e.getLocalizedMessage().split("\n");
System.err.println(cmd.substring(1) + ": "
+ content[0]);
} catch (Exception ex) {
System.err.println(cmd.substring(1) + ": "
+ ex.getLocalizedMessage());
debugException = ex;
}
} catch (Exception e) {
exitCode = -1;
debugException = e;
System.err.println(cmd.substring(1) + ": "
+ e.getLocalizedMessage());
}
if (LOG.isDebugEnabled() && debugException != null) {
LOG.debug("Exception encountered:", debugException);
}
return exitCode;
} | @Test(timeout = 180000)
public void testReportCommand() throws Exception {
tearDown();
redirectStream();
// init conf
final Configuration dfsConf = new HdfsConfiguration();
ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies.getByID(
SystemErasureCodingPolicies.XOR_2_1_POLICY_ID);
dfsConf.setInt(
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
dfsConf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
final Path baseDir = new Path(
PathUtils.getTestDir(getClass()).getAbsolutePath(),
GenericTestUtils.getMethodName());
dfsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.toString());
final int numDn =
ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits();
try(MiniDFSCluster miniCluster = new MiniDFSCluster
.Builder(dfsConf)
.numDataNodes(numDn).build()) {
miniCluster.waitActive();
assertEquals(numDn, miniCluster.getDataNodes().size());
final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
final DFSClient client = miniCluster.getFileSystem().getClient();
// Verify report command for all counts to be zero
resetStream();
assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
verifyNodesAndCorruptBlocks(numDn, numDn, 0, 0, client, 0L, 0L);
final short replFactor = 1;
final long fileLength = 512L;
final DistributedFileSystem fs = miniCluster.getFileSystem();
final Path file = new Path(baseDir, "/corrupted");
fs.enableErasureCodingPolicy(ecPolicy.getName());
DFSTestUtil.createFile(fs, file, fileLength, replFactor, 12345L);
DFSTestUtil.waitReplication(fs, file, replFactor);
final ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file);
LocatedBlocks lbs = miniCluster.getFileSystem().getClient().
getNamenode().getBlockLocations(
file.toString(), 0, fileLength);
assertTrue("Unexpected block type: " + lbs.get(0),
lbs.get(0) instanceof LocatedBlock);
LocatedBlock locatedBlock = lbs.get(0);
DatanodeInfo locatedDataNode = locatedBlock.getLocations()[0];
LOG.info("Replica block located on: " + locatedDataNode);
Path ecDir = new Path(baseDir, "ec");
fs.mkdirs(ecDir);
fs.getClient().setErasureCodingPolicy(ecDir.toString(),
ecPolicy.getName());
Path ecFile = new Path(ecDir, "ec-file");
int stripesPerBlock = 2;
int cellSize = ecPolicy.getCellSize();
int blockSize = stripesPerBlock * cellSize;
int blockGroupSize = ecPolicy.getNumDataUnits() * blockSize;
int totalBlockGroups = 1;
DFSTestUtil.createStripedFile(miniCluster, ecFile, ecDir,
totalBlockGroups, stripesPerBlock, false, ecPolicy);
// Verify report command for all counts to be zero
resetStream();
assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
verifyNodesAndCorruptBlocks(numDn, numDn, 0, 0, client, 0L, 0L);
// Choose a DataNode to shutdown
final List<DataNode> datanodes = miniCluster.getDataNodes();
DataNode dataNodeToShutdown = null;
for (DataNode dn : datanodes) {
if (!dn.getDatanodeId().getDatanodeUuid().equals(
locatedDataNode.getDatanodeUuid())) {
dataNodeToShutdown = dn;
break;
}
}
assertTrue("Unable to choose a DataNode to shutdown!",
dataNodeToShutdown != null);
// Shut down the DataNode not hosting the replicated block
LOG.info("Shutting down: " + dataNodeToShutdown);
dataNodeToShutdown.shutdown();
miniCluster.setDataNodeDead(dataNodeToShutdown.getDatanodeId());
// Verify report command to show dead DataNode
assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
verifyNodesAndCorruptBlocks(numDn, numDn - 1, 0, 0, client, 0L, 1L);
// Corrupt the replicated block
final int blockFilesCorrupted = miniCluster
.corruptBlockOnDataNodes(block);
assertEquals("Fail to corrupt all replicas for block " + block,
replFactor, blockFilesCorrupted);
try {
IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(),
conf, true);
fail("Should have failed to read the file with corrupted blocks.");
} catch (ChecksumException ignored) {
// expected exception reading corrupt blocks
}
// Increase replication factor, this should invoke transfer request.
// Receiving datanode fails on checksum and reports it to namenode
fs.setReplication(file, (short) (replFactor + 1));
// get block details and check if the block is corrupt
BlockManagerTestUtil.updateState(
miniCluster.getNameNode().getNamesystem().getBlockManager());
waitForCorruptBlock(miniCluster, client, file);
// verify report command for corrupt replicated block
resetStream();
assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
verifyNodesAndCorruptBlocks(numDn, numDn - 1, 1, 0, client, 0L, 1L);
lbs = miniCluster.getFileSystem().getClient().
getNamenode().getBlockLocations(
ecFile.toString(), 0, blockGroupSize);
assertTrue("Unexpected block type: " + lbs.get(0),
lbs.get(0) instanceof LocatedStripedBlock);
LocatedStripedBlock bg =
(LocatedStripedBlock)(lbs.get(0));
miniCluster.getNamesystem().writeLock();
try {
BlockManager bm = miniCluster.getNamesystem().getBlockManager();
bm.findAndMarkBlockAsCorrupt(bg.getBlock(), bg.getLocations()[0],
"STORAGE_ID", "TEST");
BlockManagerTestUtil.updateState(bm);
} finally {
miniCluster.getNamesystem().writeUnlock();
}
waitForCorruptBlock(miniCluster, client, file);
// verify report command for corrupt replicated block
// and EC block group
resetStream();
assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
verifyNodesAndCorruptBlocks(numDn, numDn - 1, 1, 1, client, 0L, 0L);
// verify report command for list all DN types
resetStream();
String[] reportWithArg = new String[DFSAdmin.DFS_REPORT_ARGS.length + 1];
reportWithArg[0] = "-report";
System.arraycopy(DFSAdmin.DFS_REPORT_ARGS, 0, reportWithArg, 1,
DFSAdmin.DFS_REPORT_ARGS.length);
assertEquals(0, ToolRunner.run(dfsAdmin, reportWithArg));
}
} |
@Override
public void handleRequestWithRestLiResponse(RestRequest request, RequestContext requestContext, Callback<RestLiResponse> callback)
{
if (!isMultipart(request, requestContext, callback))
{
_restRestLiServer.handleRequestWithRestLiResponse(request, requestContext, callback);
}
} | @Test(dataProvider = "restOrStream")
public void testHandleRequestWithRestLiResponseSuccess(final RestOrStream restOrStream) throws Exception
{
Status status = new Status();
status.data().put("test", "this is a test");
final StatusCollectionResource statusResource = getMockResource(StatusCollectionResource.class);
EasyMock.expect(statusResource.get(eq(1L))).andReturn(status).once();
replay(statusResource);
Callback<RestLiResponse> restLiResponseCallback = new Callback<RestLiResponse>()
{
@Override
public void onSuccess(RestLiResponse restLiResponse)
{
assertEquals(restLiResponse.getDataMap(), status.data());
EasyMock.verify(statusResource);
EasyMock.reset(statusResource);
}
@Override
public void onError(Throwable e)
{
fail("We should not get an error here. The server should have returned a 200!");
}
};
if (restOrStream == RestOrStream.REST)
{
RestRequest request = new RestRequestBuilder(new URI("/statuses/1"))
.setHeader(RestConstants.HEADER_RESTLI_PROTOCOL_VERSION, AllProtocolVersions.BASELINE_PROTOCOL_VERSION.toString()).build();
_server.handleRequestWithRestLiResponse(request, new RequestContext(), restLiResponseCallback);
}
else
{
StreamRequest streamRequest = new StreamRequestBuilder(new URI("/statuses/1"))
.setHeader(RestConstants.HEADER_RESTLI_PROTOCOL_VERSION, AllProtocolVersions.BASELINE_PROTOCOL_VERSION.toString())
.build(EntityStreams.emptyStream());
_server.handleRequestWithRestLiResponse(streamRequest, new RequestContext(), restLiResponseCallback);
}
} |
@CanIgnoreReturnValue
public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) {
List<@Nullable Object> expected =
(varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs);
return containsExactlyElementsIn(
expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable);
} | @Test
public void iterableContainsExactlyWithEmptyString() {
expectFailureWhenTestingThat(asList()).containsExactly("");
assertFailureValue("missing (1)", "");
} |
public static void assertAllResultsEqual(Collection<?> objects)
throws AssertionError {
if (objects.size() == 0 || objects.size() == 1)
return;
Object[] resultsArray = objects.toArray();
for (int i = 1; i < resultsArray.length; i++) {
Object currElement = resultsArray[i];
Object lastElement = resultsArray[i - 1];
if ((currElement == null && currElement != lastElement) ||
(currElement != null && !currElement.equals(lastElement))) {
throw new AssertionError("Not all elements match in results: " +
Arrays.toString(resultsArray));
}
}
} | @Test
public void testAssertAllResultsEqual() {
checkAllResults(new Long[]{}, true);
checkAllResults(new Long[]{1l}, true);
checkAllResults(new Long[]{1l, 1l}, true);
checkAllResults(new Long[]{1l, 1l, 1l}, true);
checkAllResults(new Long[]{new Long(1), new Long(1)}, true);
checkAllResults(new Long[]{null, null, null}, true);
checkAllResults(new Long[]{1l, 2l}, false);
checkAllResults(new Long[]{2l, 1l}, false);
checkAllResults(new Long[]{1l, 2l, 1l}, false);
checkAllResults(new Long[]{2l, 1l, 1l}, false);
checkAllResults(new Long[]{1l, 1l, 2l}, false);
checkAllResults(new Long[]{1l, null}, false);
checkAllResults(new Long[]{null, 1l}, false);
checkAllResults(new Long[]{1l, null, 1l}, false);
} |
public ConnectionDetails getConnectionDetails( IMetaStore metaStore, String key, String name ) {
ConnectionProvider<? extends ConnectionDetails> connectionProvider = getConnectionProvider( key );
if ( connectionProvider != null ) {
Class<? extends ConnectionDetails> clazz = connectionProvider.getClassType();
return loadElement( getMetaStoreFactory( metaStore, clazz ), name );
}
return null;
} | @Test
public void testSaveConnection() {
addOne();
TestConnectionWithBucketsDetails testConnectionDetails1 =
(TestConnectionWithBucketsDetails) connectionManager
.getConnectionDetails( TestConnectionWithBucketsProvider.SCHEME, CONNECTION_NAME );
assertEquals( CONNECTION_NAME, testConnectionDetails1.getName() );
} |
@Description("round down to nearest integer")
@ScalarFunction
@SqlType(StandardTypes.BIGINT)
public static long floor(@SqlType(StandardTypes.BIGINT) long num)
{
return num;
} | @Test
public void testFloor()
{
assertFunction("floor(TINYINT'123')", TINYINT, (byte) 123);
assertFunction("floor(TINYINT'-123')", TINYINT, (byte) -123);
assertFunction("floor(CAST(NULL AS TINYINT))", TINYINT, null);
assertFunction("floor(SMALLINT'123')", SMALLINT, (short) 123);
assertFunction("floor(SMALLINT'-123')", SMALLINT, (short) -123);
assertFunction("floor(CAST(NULL AS SMALLINT))", SMALLINT, null);
assertFunction("floor(123)", INTEGER, 123);
assertFunction("floor(-123)", INTEGER, -123);
assertFunction("floor(CAST(NULL AS INTEGER))", INTEGER, null);
assertFunction("floor(BIGINT '123')", BIGINT, 123L);
assertFunction("floor(BIGINT '-123')", BIGINT, -123L);
assertFunction("floor(12300000000)", BIGINT, 12300000000L);
assertFunction("floor(-12300000000)", BIGINT, -12300000000L);
assertFunction("floor(CAST(NULL as BIGINT))", BIGINT, null);
assertFunction("floor(123.0E0)", DOUBLE, 123.0);
assertFunction("floor(-123.0E0)", DOUBLE, -123.0);
assertFunction("floor(123.45E0)", DOUBLE, 123.0);
assertFunction("floor(-123.45E0)", DOUBLE, -124.0);
assertFunction("floor(REAL '123.0')", REAL, 123.0f);
assertFunction("floor(REAL '-123.0')", REAL, -123.0f);
assertFunction("floor(REAL '123.45')", REAL, 123.0f);
assertFunction("floor(REAL '-123.45')", REAL, -124.0f);
// short DECIMAL -> short DECIMAL
assertFunction("floor(DECIMAL '0')", createDecimalType(1), SqlDecimal.of("0"));
assertFunction("floor(CAST(DECIMAL '0.00' AS DECIMAL(3,2)))", createDecimalType(2), SqlDecimal.of("0"));
assertFunction("floor(CAST(DECIMAL '0.00' AS DECIMAL(3,2)))", createDecimalType(2), SqlDecimal.of("0"));
assertFunction("floor(CAST(DECIMAL '0.01' AS DECIMAL(3,2)))", createDecimalType(2), SqlDecimal.of("0"));
assertFunction("floor(CAST(DECIMAL '-0.01' AS DECIMAL(3,2)))", createDecimalType(2), SqlDecimal.of("-1"));
assertFunction("floor(CAST(DECIMAL '0.49' AS DECIMAL(3,2)))", createDecimalType(2), SqlDecimal.of("0"));
assertFunction("floor(CAST(DECIMAL '-0.49' AS DECIMAL(3,2)))", createDecimalType(2), SqlDecimal.of("-1"));
assertFunction("floor(CAST(DECIMAL '0.50' AS DECIMAL(3,2)))", createDecimalType(2), SqlDecimal.of("0"));
assertFunction("floor(CAST(DECIMAL '-0.50' AS DECIMAL(3,2)))", createDecimalType(2), SqlDecimal.of("-1"));
assertFunction("floor(CAST(DECIMAL '0.99' AS DECIMAL(3,2)))", createDecimalType(2), SqlDecimal.of("0"));
assertFunction("floor(CAST(DECIMAL '-0.99' AS DECIMAL(3,2)))", createDecimalType(2), SqlDecimal.of("-1"));
assertFunction("floor(DECIMAL '123')", createDecimalType(3), SqlDecimal.of("123"));
assertFunction("floor(DECIMAL '-123')", createDecimalType(3), SqlDecimal.of("-123"));
assertFunction("floor(DECIMAL '123.00')", createDecimalType(4), SqlDecimal.of("123"));
assertFunction("floor(DECIMAL '-123.00')", createDecimalType(4), SqlDecimal.of("-123"));
assertFunction("floor(DECIMAL '123.01')", createDecimalType(4), SqlDecimal.of("123"));
assertFunction("floor(DECIMAL '-123.01')", createDecimalType(4), SqlDecimal.of("-124"));
assertFunction("floor(DECIMAL '123.45')", createDecimalType(4), SqlDecimal.of("123"));
assertFunction("floor(DECIMAL '-123.45')", createDecimalType(4), SqlDecimal.of("-124"));
assertFunction("floor(DECIMAL '123.49')", createDecimalType(4), SqlDecimal.of("123"));
assertFunction("floor(DECIMAL '-123.49')", createDecimalType(4), SqlDecimal.of("-124"));
assertFunction("floor(DECIMAL '123.50')", createDecimalType(4), SqlDecimal.of("123"));
assertFunction("floor(DECIMAL '-123.50')", createDecimalType(4), SqlDecimal.of("-124"));
assertFunction("floor(DECIMAL '123.99')", createDecimalType(4), SqlDecimal.of("123"));
assertFunction("floor(DECIMAL '-123.99')", createDecimalType(4), SqlDecimal.of("-124"));
assertFunction("floor(DECIMAL '-999.9')", createDecimalType(4), SqlDecimal.of("-1000"));
// long DECIMAL -> long DECIMAL
assertFunction("floor(CAST(DECIMAL '0000000000000000000' AS DECIMAL(19,0)))", createDecimalType(19), SqlDecimal.of("0"));
assertFunction("floor(CAST(DECIMAL '000000000000000000.00' AS DECIMAL(20,2)))", createDecimalType(19), SqlDecimal.of("0"));
assertFunction("floor(CAST(DECIMAL '000000000000000000.01' AS DECIMAL(20,2)))", createDecimalType(19), SqlDecimal.of("0"));
assertFunction("floor(CAST(DECIMAL '-000000000000000000.01' AS DECIMAL(20,2)))", createDecimalType(19), SqlDecimal.of("-1"));
assertFunction("floor(CAST(DECIMAL '000000000000000000.49' AS DECIMAL(20,2)))", createDecimalType(19), SqlDecimal.of("0"));
assertFunction("floor(CAST(DECIMAL '-000000000000000000.49' AS DECIMAL(20,2)))", createDecimalType(19), SqlDecimal.of("-1"));
assertFunction("floor(CAST(DECIMAL '000000000000000000.50' AS DECIMAL(20,2)))", createDecimalType(19), SqlDecimal.of("0"));
assertFunction("floor(CAST(DECIMAL '-000000000000000000.50' AS DECIMAL(20,2)))", createDecimalType(19), SqlDecimal.of("-1"));
assertFunction("floor(CAST(DECIMAL '000000000000000000.99' AS DECIMAL(20,2)))", createDecimalType(19), SqlDecimal.of("0"));
assertFunction("floor(CAST(DECIMAL '-000000000000000000.99' AS DECIMAL(20,2)))", createDecimalType(19), SqlDecimal.of("-1"));
assertFunction("floor(DECIMAL '123456789012345678')", createDecimalType(18), SqlDecimal.of("123456789012345678"));
assertFunction("floor(DECIMAL '-123456789012345678')", createDecimalType(18), SqlDecimal.of("-123456789012345678"));
assertFunction("floor(DECIMAL '123456789012345678.00')", createDecimalType(19), SqlDecimal.of("123456789012345678"));
assertFunction("floor(DECIMAL '-123456789012345678.00')", createDecimalType(19), SqlDecimal.of("-123456789012345678"));
assertFunction("floor(DECIMAL '123456789012345678.01')", createDecimalType(19), SqlDecimal.of("123456789012345678"));
assertFunction("floor(DECIMAL '-123456789012345678.01')", createDecimalType(19), SqlDecimal.of("-123456789012345679"));
assertFunction("floor(DECIMAL '123456789012345678.99')", createDecimalType(19), SqlDecimal.of("123456789012345678"));
assertFunction("floor(DECIMAL '-123456789012345678.49')", createDecimalType(19), SqlDecimal.of("-123456789012345679"));
assertFunction("floor(DECIMAL '123456789012345678.49')", createDecimalType(19), SqlDecimal.of("123456789012345678"));
assertFunction("floor(DECIMAL '-123456789012345678.50')", createDecimalType(19), SqlDecimal.of("-123456789012345679"));
assertFunction("floor(DECIMAL '123456789012345678.50')", createDecimalType(19), SqlDecimal.of("123456789012345678"));
assertFunction("floor(DECIMAL '-123456789012345678.99')", createDecimalType(19), SqlDecimal.of("-123456789012345679"));
assertFunction("floor(DECIMAL '-999999999999999999.9')", createDecimalType(19), SqlDecimal.of("-1000000000000000000"));
// long DECIMAL -> short DECIMAL
assertFunction("floor(DECIMAL '1234567890123456.78')", createDecimalType(17), SqlDecimal.of("1234567890123456"));
assertFunction("floor(DECIMAL '-1234567890123456.78')", createDecimalType(17), SqlDecimal.of("-1234567890123457"));
assertFunction("floor(CAST(NULL as REAL))", REAL, null);
assertFunction("floor(CAST(NULL as DOUBLE))", DOUBLE, null);
assertFunction("floor(CAST(NULL as DECIMAL(1,0)))", createDecimalType(1), null);
assertFunction("floor(CAST(NULL as DECIMAL(25,5)))", createDecimalType(21), null);
} |
public static Object serialize(Object bean) throws NullPointerException {
return serialize(bean, false);
} | @Test
public void serialize() {
TestJsonBean bean = new TestJsonBean();
boolean error = false;
try {
Map map = (Map) BeanSerializer.serialize(bean);
} catch (Exception e) {
error = true;
}
Assert.assertTrue(error);
error = false;
bean.setName("zzzggg");
try {
Map map = (Map) BeanSerializer.serialize(bean, true);
Assert.assertEquals(map.get("Name"), "zzzggg");
Assert.assertEquals(map.get("Sex"), false);
Assert.assertEquals(map.get("age"), 0);
Assert.assertFalse(map.containsKey("friends"));
Assert.assertTrue(map.containsKey("Remark"));
Assert.assertTrue(map.containsKey(JSON.CLASS_KEY));
map = (Map) BeanSerializer.serialize(bean);
Assert.assertEquals(map.get("Name"), "zzzggg");
Assert.assertEquals(map.get("Sex"), false);
Assert.assertEquals(map.get("age"), 0);
Assert.assertFalse(map.containsKey("friends"));
Assert.assertTrue(map.containsKey("Remark"));
Assert.assertFalse(map.containsKey(JSON.CLASS_KEY));
} catch (Exception e) {
error = true;
}
Assert.assertFalse(error);
bean.setName("zzzgg");
bean.setSex(true);
bean.setAge(111);
bean.setStep(1234567890l);
bean.setFriends(new ArrayList<TestJsonBean>());
bean.setStatus(TestJsonBean.Status.START);
error = false;
try {
Map map = (Map) BeanSerializer.serialize(bean, true);
Assert.assertEquals(map.get("Name"), "zzzgg");
Assert.assertEquals(map.get("Sex"), true);
Assert.assertEquals(map.get("age"), 111);
Assert.assertTrue(map.containsKey("friends"));
Assert.assertTrue(map.containsKey("Remark"));
Assert.assertTrue(map.containsKey(JSON.CLASS_KEY));
map = (Map) BeanSerializer.serialize(bean);
Assert.assertEquals(map.get("Name"), "zzzgg");
Assert.assertEquals(map.get("Sex"), true);
Assert.assertEquals(map.get("age"), 111);
Assert.assertTrue(map.containsKey("friends"));
Assert.assertTrue(map.containsKey("Remark"));
Assert.assertFalse(map.containsKey(JSON.CLASS_KEY));
} catch (Exception e) {
error = true;
}
Assert.assertFalse(error);
} |
@Nonnull
public static <C> SourceBuilder<C>.Batch<Void> batch(
@Nonnull String name,
@Nonnull FunctionEx<? super Processor.Context, ? extends C> createFn
) {
return new SourceBuilder<C>(name, createFn).new Batch<>();
} | @Test
public void batch_fileSource() throws Exception {
// Given
File textFile = createTestFile();
// When
BatchSource<String> fileSource = SourceBuilder
.batch("file-source", x -> fileReader(textFile))
.<String>fillBufferFn((in, buf) -> {
String line = in.readLine();
if (line != null) {
buf.add(line);
} else {
buf.close();
}
})
.destroyFn(BufferedReader::close)
.build();
// Then
Pipeline p = Pipeline.create();
p.readFrom(fileSource)
.writeTo(sinkList());
hz().getJet().newJob(p).join();
assertEquals(
IntStream.range(0, itemCount).mapToObj(i -> "line" + i).collect(toList()),
new ArrayList<>(sinkList)
);
} |
public Group getGroup(JID jid) throws GroupNotFoundException {
JID groupJID = GroupJID.fromJID(jid);
return (groupJID instanceof GroupJID) ? getGroup(((GroupJID)groupJID).getGroupName()) : null;
} | @Test
public void willUseACacheMiss() {
groupCache.put(GROUP_NAME, CacheableOptional.of(null));
try {
groupManager.getGroup(GROUP_NAME, false);
} catch (final GroupNotFoundException ignored) {
verifyNoMoreInteractions(groupProvider);
return;
}
fail();
} |
@Override
public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo,
List<String> partNames, boolean areAllPartsFound) throws MetaException {
checkStatisticsList(colStatsWithSourceInfo);
ColumnStatisticsObj statsObj = null;
String colType = null;
String colName = null;
// check if all the ColumnStatisticsObjs contain stats and all the ndv are
// bitvectors
boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size();
NumDistinctValueEstimator ndvEstimator = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
if (statsObj == null) {
colName = cso.getColName();
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType,
cso.getStatsData().getSetField());
LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName,
doAllPartitionContainStats);
}
StringColumnStatsDataInspector stringColumnStatsData = stringInspectorFromStats(cso);
if (stringColumnStatsData.getNdvEstimator() == null) {
ndvEstimator = null;
break;
} else {
// check if all of the bit vectors can merge
NumDistinctValueEstimator estimator = stringColumnStatsData.getNdvEstimator();
if (ndvEstimator == null) {
ndvEstimator = estimator;
} else {
if (ndvEstimator.canMerge(estimator)) {
continue;
} else {
ndvEstimator = null;
break;
}
}
}
}
if (ndvEstimator != null) {
ndvEstimator = NumDistinctValueEstimatorFactory
.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
LOG.debug("all of the bit vectors can merge for " + colName + " is " + (ndvEstimator != null));
ColumnStatisticsData columnStatisticsData = initColumnStatisticsData();
if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) {
StringColumnStatsDataInspector aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
StringColumnStatsDataInspector newData = stringInspectorFromStats(cso);
if (ndvEstimator != null) {
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData
.setMaxColLen(Math.max(aggregateData.getMaxColLen(), newData.getMaxColLen()));
aggregateData
.setAvgColLen(Math.max(aggregateData.getAvgColLen(), newData.getAvgColLen()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs()));
}
}
if (ndvEstimator != null) {
// if all the ColumnStatisticsObjs contain bitvectors, we do not need to
// use uniform distribution assumption because we can merge bitvectors
// to get a good estimation.
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
} else {
// aggregateData already has the ndv of the max of all
}
columnStatisticsData.setStringStats(aggregateData);
} else {
// TODO: bail out if missing stats are over a certain threshold
// we need extrapolation
LOG.debug("start extrapolation for " + colName);
Map<String, Integer> indexMap = new HashMap<>();
for (int index = 0; index < partNames.size(); index++) {
indexMap.put(partNames.get(index), index);
}
Map<String, Double> adjustedIndexMap = new HashMap<>();
Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>();
if (ndvEstimator == null) {
// if not every partition uses bitvector for ndv, we just fall back to
// the traditional extrapolation methods.
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
adjustedIndexMap.put(partName, (double) indexMap.get(partName));
adjustedStatsMap.put(partName, cso.getStatsData());
}
} else {
// we first merge all the adjacent bitvectors that we could merge and
// derive new partition names and index.
StringBuilder pseudoPartName = new StringBuilder();
double pseudoIndexSum = 0;
int length = 0;
int curIndex = -1;
StringColumnStatsDataInspector aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
StringColumnStatsDataInspector newData =
stringInspectorFromStats(cso);
// newData.isSetBitVectors() should be true for sure because we
// already checked it before.
if (indexMap.get(partName) != curIndex) {
// There is bitvector, but it is not adjacent to the previous ones.
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setStringStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
// reset everything
pseudoPartName = new StringBuilder();
pseudoIndexSum = 0;
length = 0;
ndvEstimator = NumDistinctValueEstimatorFactory
.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
aggregateData = null;
}
curIndex = indexMap.get(partName);
pseudoPartName.append(partName);
pseudoIndexSum += curIndex;
length++;
curIndex++;
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setAvgColLen(Math.max(aggregateData.getAvgColLen(),
newData.getAvgColLen()));
aggregateData.setMaxColLen(Math.max(aggregateData.getMaxColLen(),
newData.getMaxColLen()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setStringStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
}
}
extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(),
adjustedIndexMap, adjustedStatsMap, -1);
}
LOG.debug(
"Ndv estimatation for {} is {} # of partitions requested: {} # of partitions found: {}",
colName, columnStatisticsData.getStringStats().getNumDVs(), partNames.size(),
colStatsWithSourceInfo.size());
statsObj.setStatsData(columnStatisticsData);
return statsObj;
} | @Test
public void testAggregateMultiStatsWhenOnlySomeAvailable() throws MetaException {
List<String> partitions = Arrays.asList("part1", "part2", "part3", "part4");
ColumnStatisticsData data1 = new ColStatsBuilder<>(String.class).numNulls(1).numDVs(3).avgColLen(20.0 / 3).maxColLen(13)
.hll(S_1, S_2, S_3).build();
ColumnStatisticsData data3 = new ColStatsBuilder<>(String.class).numNulls(3).numDVs(2).avgColLen(17.5).maxColLen(18)
.hll(S_6, S_7).build();
ColumnStatisticsData data4 = new ColStatsBuilder<>(String.class).numNulls(2).numDVs(3).avgColLen(14).maxColLen(18)
.hll(S_3, S_4, S_5).build();
List<ColStatsObjWithSourceInfo> statsList = Arrays.asList(
createStatsWithInfo(data1, TABLE, COL, partitions.get(0)),
createStatsWithInfo(data3, TABLE, COL, partitions.get(2)),
createStatsWithInfo(data4, TABLE, COL, partitions.get(3)));
StringColumnStatsAggregator aggregator = new StringColumnStatsAggregator();
ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, false);
// hll in case of missing stats is left as null, only numDVs is updated
ColumnStatisticsData expectedStats = new ColStatsBuilder<>(String.class).numNulls(8).numDVs(6)
.avgColLen(24).maxColLen(24).build();
Assert.assertEquals(expectedStats, computedStatsObj.getStatsData());
} |
public OpenConfigComponentHandler addTransceiver(OpenConfigTransceiverHandler transceiver) {
augmentedOcPlatformComponent.transceiver(transceiver.getModelObject());
modelObject.addAugmentation(augmentedOcPlatformComponent);
return this;
} | @Test
public void testAddTransceiver() {
// test Handler
OpenConfigComponentHandler component = new OpenConfigComponentHandler("name", parent);
// call addTransceiver
OpenConfigTransceiverHandler transceiver = new OpenConfigTransceiverHandler(component);
// expected ModelObject
DefaultComponent modelObject = new DefaultComponent();
modelObject.name("name");
DefaultTransceiver trans = new DefaultTransceiver();
DefaultAugmentedOcPlatformComponent augmentedOcPlatformComponent = new DefaultAugmentedOcPlatformComponent();
augmentedOcPlatformComponent.transceiver(trans);
modelObject.addAugmentation(augmentedOcPlatformComponent);
assertEquals("[NG]addTransceiver:ModelObject(Transceiver added) is not an expected one.\n",
modelObject, component.getModelObject());
} |
@Override
public AuthLoginRespVO refreshToken(String refreshToken) {
OAuth2AccessTokenDO accessTokenDO = oauth2TokenService.refreshAccessToken(refreshToken, OAuth2ClientConstants.CLIENT_ID_DEFAULT);
return AuthConvert.INSTANCE.convert(accessTokenDO);
} | @Test
public void testRefreshToken() {
// 准备参数
String refreshToken = randomString();
// mock 方法
OAuth2AccessTokenDO accessTokenDO = randomPojo(OAuth2AccessTokenDO.class);
when(oauth2TokenService.refreshAccessToken(eq(refreshToken), eq("default")))
.thenReturn(accessTokenDO);
// 调用
AuthLoginRespVO loginRespVO = authService.refreshToken(refreshToken);
// 断言
assertPojoEquals(accessTokenDO, loginRespVO);
} |
public FEELFnResult<Boolean> invoke(@ParameterName( "list" ) List list) {
if ( list == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null"));
}
boolean result = false;
boolean containsNull = false;
// Spec. definition: return true if any item is true, else false if all items are false, else null
for ( final Object element : list ) {
if (element != null && !(element instanceof Boolean)) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not a Boolean"));
} else {
if (element != null) {
result |= (Boolean) element;
} else if (!containsNull) {
containsNull = true;
}
}
}
if (containsNull && !result) {
return FEELFnResult.ofResult( null );
} else {
return FEELFnResult.ofResult( result );
}
} | @Test
void invokeListParamEmptyList() {
FunctionTestUtil.assertResult(anyFunction.invoke(Collections.emptyList()), false);
} |
public JobBuilder withLabels(String... labels) {
return withLabels(asSet(labels));
} | @Test
void testWithLabels() {
Job job = aJob()
.withLabels(Set.of("TestLabel", "Email"))
.withDetails(() -> testService.doWorkWithUUID(UUID.randomUUID()))
.build(jobDetailsGenerator);
assertThat(job)
.hasLabels(Set.of("TestLabel", "Email"))
.hasState(StateName.ENQUEUED);
} |
protected ObjectName getJMXObjectName() throws MalformedObjectNameException {
if (jmxObjectName == null) {
ObjectName on = buildObjectName();
setJMXObjectName(on);
}
return jmxObjectName;
} | @Test
public void getJMXObjectNameCached() throws Exception {
JMXEndpoint ep = context.getEndpoint("jmx:platform?objectDomain=FooDomain&key.name=theObjectName", JMXEndpoint.class);
ObjectName on = ep.getJMXObjectName();
assertNotNull(on);
assertSame(on, ep.getJMXObjectName());
} |
public ClusterSerdes init(Environment env,
ClustersProperties clustersProperties,
int clusterIndex) {
ClustersProperties.Cluster clusterProperties = clustersProperties.getClusters().get(clusterIndex);
log.debug("Configuring serdes for cluster {}", clusterProperties.getName());
var globalPropertiesResolver = new PropertyResolverImpl(env);
var clusterPropertiesResolver = new PropertyResolverImpl(env, "kafka.clusters." + clusterIndex);
Map<String, SerdeInstance> registeredSerdes = new LinkedHashMap<>();
// initializing serdes from config
if (clusterProperties.getSerde() != null) {
for (int i = 0; i < clusterProperties.getSerde().size(); i++) {
SerdeConfig serdeConfig = clusterProperties.getSerde().get(i);
if (Strings.isNullOrEmpty(serdeConfig.getName())) {
throw new ValidationException("'name' property not set for serde: " + serdeConfig);
}
if (registeredSerdes.containsKey(serdeConfig.getName())) {
throw new ValidationException("Multiple serdes with same name: " + serdeConfig.getName());
}
var instance = createSerdeFromConfig(
serdeConfig,
new PropertyResolverImpl(env, "kafka.clusters." + clusterIndex + ".serde." + i + ".properties"),
clusterPropertiesResolver,
globalPropertiesResolver
);
registeredSerdes.put(serdeConfig.getName(), instance);
}
}
// initializing remaining built-in serdes with empty selection patters
builtInSerdeClasses.forEach((name, clazz) -> {
if (!registeredSerdes.containsKey(name)) {
BuiltInSerde serde = createSerdeInstance(clazz);
if (autoConfigureSerde(serde, clusterPropertiesResolver, globalPropertiesResolver)) {
registeredSerdes.put(name, new SerdeInstance(name, serde, null, null, null));
}
}
});
registerTopicRelatedSerde(registeredSerdes);
return new ClusterSerdes(
registeredSerdes,
Optional.ofNullable(clusterProperties.getDefaultKeySerde())
.map(name -> Preconditions.checkNotNull(registeredSerdes.get(name), "Default key serde not found"))
.orElse(null),
Optional.ofNullable(clusterProperties.getDefaultValueSerde())
.map(name -> Preconditions.checkNotNull(registeredSerdes.get(name), "Default value serde not found"))
.or(() -> Optional.ofNullable(registeredSerdes.get(SchemaRegistrySerde.name())))
.or(() -> Optional.ofNullable(registeredSerdes.get(ProtobufFileSerde.name())))
.orElse(null),
createFallbackSerde()
);
} | @Test
void pluggedSerdesInitializedByLoader() {
ClustersProperties.SerdeConfig customSerdeConfig = new ClustersProperties.SerdeConfig();
customSerdeConfig.setName("MyPluggedSerde");
customSerdeConfig.setFilePath("/custom.jar");
customSerdeConfig.setClassName("org.test.MyPluggedSerde");
customSerdeConfig.setTopicKeysPattern("keys");
customSerdeConfig.setTopicValuesPattern("values");
when(customSerdeLoaderMock.loadAndConfigure(anyString(), anyString(), any(), any(), any()))
.thenReturn(new CustomSerdeLoader.CustomSerde(new StringSerde(), new URLClassLoader(new URL[]{})));
var serdes = init(customSerdeConfig);
SerdeInstance customSerdeInstance = serdes.serdes.get("MyPluggedSerde");
verifyPatternsMatch(customSerdeConfig, customSerdeInstance);
assertThat(customSerdeInstance.classLoader).isNotNull();
verify(customSerdeLoaderMock).loadAndConfigure(
eq(customSerdeConfig.getClassName()),
eq(customSerdeConfig.getFilePath()),
any(), any(), any()
);
} |
public Distance altitudeDelta() {
double delta = abs(point1.altitude().inFeet() - point2.altitude().inFeet());
return Distance.of(delta, Distance.Unit.FEET);
} | @Test
public void testAltitudeDelta() {
//alt = 2400
Point p1 = NopHit.from("[RH],STARS,A80_B,02/12/2018,18:36:46.667,JIA5545,CRJ9,E,5116,024,157,270,033.63143,-084.33913,1334,5116,22.4031,27.6688,1,O,A,A80,OZZ,OZZ,ATL,1827,ATL,ACT,IFR,,01719,,,,,27L,L,1,,0,{RH}");
//alt = 3400
Point p2 = NopHit.from("[RH],STARS,A80_B,02/12/2018,18:36:46.667,JIA5545,CRJ9,E,5116,034,157,270,033.63143,-084.33913,1334,5116,22.4031,27.6688,1,O,A,A80,OZZ,OZZ,ATL,1827,ATL,ACT,IFR,,01719,,,,,27L,L,1,,0,{RH}");
PointPair pair = new PointPair(p1, p2);
assertTrue(pair.altitudeDelta().equals(Distance.ofFeet(1_000)));
} |
@Override
public boolean supportsPath(String path) {
return path != null && path.startsWith(Constants.HEADER_OSS);
} | @Test
public void supportsPath() {
Assert.assertTrue(mFactory.supportsPath(mOssPath));
assertFalse(mFactory.supportsPath(null));
assertFalse(mFactory.supportsPath("Invalid_Path"));
assertFalse(mFactory.supportsPath("hdfs://test-bucket/path"));
} |
@Override
public void submit(VplsOperation vplsOperation) {
if (isLeader) {
// Only leader can execute operation
addVplsOperation(vplsOperation);
}
} | @Test
@Ignore("Test is brittle - revisit")
public void testDuplicateOperationInQueue() {
VplsData vplsData = VplsData.of(VPLS1);
vplsData.addInterfaces(ImmutableSet.of(V100H1, V100H2));
VplsOperation vplsOperation = VplsOperation.of(vplsData,
VplsOperation.Operation.ADD);
vplsOperationManager.submit(vplsOperation);
vplsOperationManager.submit(vplsOperation);
Deque<VplsOperation> opQueue = vplsOperationManager.pendingVplsOperations.get(VPLS1);
assertEquals(1, opQueue.size());
// Clear operation queue before scheduler process it
opQueue.clear();
} |
@Override
public void define(Context context) {
NewController controller = context.createController("api/user_groups")
.setDescription("Manage user groups.")
.setSince("5.2");
for (UserGroupsWsAction action : actions) {
action.define(controller);
}
controller.done();
} | @Test
public void define_controller() {
WebService.Context context = new WebService.Context();
underTest.define(context);
WebService.Controller controller = context.controller("api/user_groups");
assertThat(controller).isNotNull();
assertThat(controller.description()).isNotEmpty();
assertThat(controller.since()).isEqualTo("5.2");
assertThat(controller.actions()).hasSize(1);
} |
@Override
public Mono<PooledRef<Connection>> acquire() {
return new BorrowerMono(this, Duration.ZERO);
} | @Test
void pendingTimeout() throws Exception {
EmbeddedChannel channel = new EmbeddedChannel();
PoolBuilder<Connection, PoolConfig<Connection>> poolBuilder =
PoolBuilder.from(Mono.just(Connection.from(channel)))
.maxPendingAcquire(10)
.sizeBetween(0, 1);
Http2Pool http2Pool = poolBuilder.build(config -> new Http2Pool(config, null));
CountDownLatch latch = new CountDownLatch(3);
ExecutorService executorService = Executors.newFixedThreadPool(20);
try {
CompletableFuture<?>[] completableFutures = new CompletableFuture<?>[4];
for (int i = 0; i < completableFutures.length; i++) {
completableFutures[i] = CompletableFuture.runAsync(
() -> http2Pool.acquire(Duration.ofMillis(10))
.doOnEach(sig -> channel.runPendingTasks())
.doOnError(t -> latch.countDown())
.onErrorResume(PoolAcquireTimeoutException.class, t -> Mono.empty())
.block(),
executorService);
}
CompletableFuture.allOf(completableFutures).join();
assertThat(latch.await(5, TimeUnit.SECONDS)).isTrue();
}
finally {
channel.finishAndReleaseAll();
Connection.from(channel).dispose();
executorService.shutdown();
}
} |
@Override
public Optional<ConfigItem> resolve(final String propertyName, final boolean strict) {
if (propertyName.startsWith(KSQL_REQUEST_CONFIG_PROPERTY_PREFIX)) {
return resolveRequestConfig(propertyName);
} else if (propertyName.startsWith(KSQL_CONFIG_PROPERTY_PREFIX)
&& !propertyName.startsWith(KSQL_STREAMS_PREFIX)) {
return resolveKsqlConfig(propertyName);
}
return resolveStreamsConfig(propertyName, strict);
} | @Test
public void shouldFindUnknownStreamsPrefixedConsumerPropertyIfNotStrict() {
// Given:
final String configName = StreamsConfig.CONSUMER_PREFIX
+ "custom.interceptor.config";
// Then:
assertThat(
resolver.resolve(KsqlConfig.KSQL_STREAMS_PREFIX + configName, false),
is(unresolvedItem(configName))
);
} |
public FEELFnResult<Object> invoke(@ParameterName("list") List list) {
if ( list == null || list.isEmpty() ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null or empty"));
} else {
try {
return FEELFnResult.ofResult(Collections.min(list, new InterceptNotComparableComparator()));
} catch (ClassCastException e) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "contains items that are not comparable"));
}
}
} | @Test
void invokeNullArray() {
FunctionTestUtil.assertResultError(minFunction.invoke((Object[]) null), InvalidParametersEvent.class);
} |
@Override
public Collection<RequestAndKeys<CoordinatorKey>> buildRequest(int brokerId, Set<CoordinatorKey> groupIds) {
validateKeys(groupIds);
// When the OffsetFetchRequest fails with NoBatchedOffsetFetchRequestException, we completely disable
// the batching end-to-end, including the FindCoordinatorRequest.
if (lookupStrategy.batch()) {
return Collections.singletonList(new RequestAndKeys<>(buildBatchedRequest(groupIds), groupIds));
} else {
return groupIds.stream().map(groupId -> {
Set<CoordinatorKey> keys = Collections.singleton(groupId);
return new RequestAndKeys<>(buildBatchedRequest(keys), keys);
}).collect(Collectors.toList());
}
} | @Test
public void testBuildRequest() {
ListConsumerGroupOffsetsHandler handler =
new ListConsumerGroupOffsetsHandler(singleRequestMap, false, logContext);
OffsetFetchRequest request = handler.buildBatchedRequest(coordinatorKeys(groupZero)).build();
assertEquals(groupZero, request.data().groups().get(0).groupId());
assertEquals(2, request.data().groups().get(0).topics().size());
assertEquals(2, request.data().groups().get(0).topics().get(0).partitionIndexes().size());
assertEquals(2, request.data().groups().get(0).topics().get(1).partitionIndexes().size());
} |
public String[] decodeStringArray(final byte[] parameterBytes, final boolean isBinary) {
ShardingSpherePreconditions.checkState(!isBinary, () -> new UnsupportedSQLOperationException("binary mode"));
String parameterValue = new String(parameterBytes, StandardCharsets.UTF_8);
Collection<String> parameterElements = decodeText(parameterValue);
return parameterElements.toArray(EMPTY_STRING_ARRAY);
} | @Test
void assertParseStringArrayWithNullTextMode() {
String[] actual = DECODER.decodeStringArray("{\"a\",\"b\",NULL}".getBytes(), false);
assertThat(actual.length, is(3));
assertThat(actual[0], is("a"));
assertThat(actual[1], is("b"));
assertNull(actual[2]);
} |
@ApiOperation(value = "Delete a deployment", tags = { "Deployment" }, code = 204)
@ApiResponses(value = {
@ApiResponse(code = 204, message = "Indicates the deployment was found and has been deleted. Response-body is intentionally empty."),
@ApiResponse(code = 404, message = "Indicates the requested deployment was not found.")
})
@DeleteMapping(value = "/repository/deployments/{deploymentId}", produces = "application/json")
@ResponseStatus(HttpStatus.NO_CONTENT)
public void deleteDeployment(@ApiParam(name = "deploymentId") @PathVariable String deploymentId, @RequestParam(value = "cascade", required = false, defaultValue = "false") Boolean cascade) {
Deployment deployment = repositoryService.createDeploymentQuery().deploymentId(deploymentId).singleResult();
if (deployment == null) {
throw new FlowableObjectNotFoundException("Could not find a deployment with id '" + deploymentId + "'.", Deployment.class);
}
if (restApiInterceptor != null) {
restApiInterceptor.deleteDeployment(deployment);
}
if (cascade) {
repositoryService.deleteDeployment(deploymentId, true);
} else {
repositoryService.deleteDeployment(deploymentId);
}
} | @Test
public void testPostNewDeploymentBarFileWithTenantId() throws Exception {
try {
// Create zip with bpmn-file and resource
ByteArrayOutputStream zipOutput = new ByteArrayOutputStream();
ZipOutputStream zipStream = new ZipOutputStream(zipOutput);
// Add bpmn-xml
zipStream.putNextEntry(new ZipEntry("oneTaskProcess.bpmn20.xml"));
IOUtils.copy(ReflectUtil.getResourceAsStream("org/flowable/rest/service/api/repository/oneTaskProcess.bpmn20.xml"), zipStream);
zipStream.closeEntry();
// Add text-resource
zipStream.putNextEntry(new ZipEntry("test.txt"));
IOUtils.write("Testing REST-deployment", zipStream, StandardCharsets.UTF_8);
zipStream.closeEntry();
zipStream.close();
// Upload a bar-file using multipart-data
HttpPost httpPost = new HttpPost(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_DEPLOYMENT_COLLECTION));
httpPost.setEntity(
HttpMultipartHelper.getMultiPartEntity("test-deployment.bar", "application/zip", new ByteArrayInputStream(zipOutput.toByteArray()),
Collections.singletonMap("tenantId", "myTenant")));
CloseableHttpResponse response = executeBinaryRequest(httpPost, HttpStatus.SC_CREATED);
// Check deployment
JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent());
closeResponse(response);
assertThatJson(responseNode)
.when(Option.IGNORING_EXTRA_FIELDS)
.isEqualTo("{"
+ "tenantId: 'myTenant'"
+ "}");
String id = responseNode.get("id").textValue();
Deployment deployment = repositoryService.createDeploymentQuery().deploymentId(id).singleResult();
assertThat(deployment).isNotNull();
assertThat(deployment.getTenantId()).isEqualTo("myTenant");
} finally {
// Always cleanup any created deployments, even if the test failed
List<Deployment> deployments = repositoryService.createDeploymentQuery().list();
for (Deployment deployment : deployments) {
repositoryService.deleteDeployment(deployment.getId(), true);
}
}
} |
public LocalFileSystem() {
this.workingDir = new File(System.getProperty("user.dir")).toURI();
this.homeDir = new File(System.getProperty("user.home")).toURI();
} | @Test
void testLocalFilesystem() throws Exception {
final File tempdir =
new File(TempDirUtils.newFolder(tempFolder), UUID.randomUUID().toString());
final File testfile1 = new File(tempdir, UUID.randomUUID().toString());
final File testfile2 = new File(tempdir, UUID.randomUUID().toString());
final Path pathtotestfile1 = new Path(testfile1.toURI().getPath());
final Path pathtotestfile2 = new Path(testfile2.toURI().getPath());
final LocalFileSystem lfs = new LocalFileSystem();
final Path pathtotmpdir = new Path(tempdir.toURI().getPath());
/*
* check that lfs can see/create/delete/read directories
*/
// check that dir is not existent yet
assertThat(lfs.exists(pathtotmpdir)).isFalse();
assertThat(tempdir.mkdirs()).isTrue();
// check that local file system recognizes file..
assertThat(lfs.exists(pathtotmpdir)).isTrue();
final FileStatus localstatus1 = lfs.getFileStatus(pathtotmpdir);
// check that lfs recognizes directory..
assertThat(localstatus1.isDir()).isTrue();
// get status for files in this (empty) directory..
final FileStatus[] statusforfiles = lfs.listStatus(pathtotmpdir);
// no files in there.. hence, must be zero
assertThat(statusforfiles).isEmpty();
// check that lfs can delete directory..
lfs.delete(pathtotmpdir, true);
// double check that directory is not existent anymore..
assertThat(lfs.exists(pathtotmpdir)).isFalse();
assertThat(tempdir).doesNotExist();
// re-create directory..
lfs.mkdirs(pathtotmpdir);
// creation successful?
assertThat(tempdir).exists();
/*
* check that lfs can create/read/write from/to files properly and read meta information..
*/
// create files.. one ""natively"", one using lfs
final FSDataOutputStream lfsoutput1 = lfs.create(pathtotestfile1, WriteMode.NO_OVERWRITE);
assertThat(testfile2.createNewFile()).isTrue();
// does lfs create files? does lfs recognize created files?
assertThat(testfile1).exists();
assertThat(lfs.exists(pathtotestfile2)).isTrue();
// test that lfs can write to files properly
final byte[] testbytes = {1, 2, 3, 4, 5};
lfsoutput1.write(testbytes);
lfsoutput1.close();
assertThat(testfile1).hasSize(5L);
byte[] testbytestest = new byte[5];
try (FileInputStream fisfile1 = new FileInputStream(testfile1)) {
assertThat(fisfile1.read(testbytestest)).isEqualTo(testbytestest.length);
}
assertThat(testbytestest).containsExactly(testbytes);
// does lfs see the correct file length?
assertThat(testfile1).hasSize(lfs.getFileStatus(pathtotestfile1).getLen());
// as well, when we call the listStatus (that is intended for directories?)
assertThat(testfile1).hasSize(lfs.listStatus(pathtotestfile1)[0].getLen());
// test that lfs can read files properly
final FileOutputStream fosfile2 = new FileOutputStream(testfile2);
fosfile2.write(testbytes);
fosfile2.close();
testbytestest = new byte[5];
final FSDataInputStream lfsinput2 = lfs.open(pathtotestfile2);
assertThat(lfsinput2.read(testbytestest)).isEqualTo(5);
lfsinput2.close();
assertThat(testbytestest).containsExactly(testbytes);
// does lfs see two files?
assertThat(lfs.listStatus(pathtotmpdir)).hasSize(2);
// do we get exactly one blocklocation per file? no matter what start and len we provide
assertThat(lfs.getFileBlockLocations(lfs.getFileStatus(pathtotestfile1), 0, 0).length)
.isOne();
/*
* can lfs delete files / directories?
*/
assertThat(lfs.delete(pathtotestfile1, false)).isTrue();
// and can lfs also delete directories recursively?
assertThat(lfs.delete(pathtotmpdir, true)).isTrue();
assertThat(tempdir).doesNotExist();
} |
public PipelineResult run() {
return run(defaultOptions);
} | @Test
@Category(NeedsRunner.class)
public void testEmptyPipeline() throws Exception {
pipeline.run();
} |
public int ceil(int v) {
return Boundary.CEIL.apply(find(v));
} | @Test public void ceil() {
SortedIntList l = new SortedIntList(5);
l.add(1);
l.add(3);
l.add(5);
assertEquals(0, l.ceil(0));
assertEquals(0, l.ceil(1));
assertEquals(1, l.ceil(2));
assertEquals(1, l.ceil(3));
assertEquals(2, l.ceil(4));
assertEquals(2, l.ceil(5));
assertEquals(3, l.ceil(6));
assertTrue(l.isInRange(0));
assertTrue(l.isInRange(1));
assertTrue(l.isInRange(2));
assertFalse(l.isInRange(3));
} |
public Collection<Map.Entry<Point, UntypedMetric>> getValuesForMetric(String metricName) {
List<Map.Entry<Point, UntypedMetric>> singleMetric = new ArrayList<>();
for (Map.Entry<Identifier, UntypedMetric> entry : values.entrySet()) {
if (metricName.equals(entry.getKey().getName())) {
singleMetric.add(locationValuePair(entry));
}
}
return singleMetric;
} | @Test
final void testGetValuesForMetric() {
twoMetricsUniqueDimensions();
Collection<Entry<Point, UntypedMetric>> values = bucket.getValuesForMetric("nalle");
assertEquals(4, values.size());
} |
@Override
public String telnet(Channel channel, String message) {
long size;
File file = LoggerFactory.getFile();
StringBuilder buf = new StringBuilder();
if (message == null || message.trim().length() == 0) {
buf.append("EXAMPLE: log error / log 100");
} else {
String[] str = message.split(" ");
if (!StringUtils.isNumber(str[0])) {
LoggerFactory.setLevel(Level.valueOf(message.toUpperCase()));
} else {
int showLogLength = Integer.parseInt(str[0]);
if (file != null && file.exists()) {
try (FileInputStream fis = new FileInputStream(file)) {
FileChannel filechannel = fis.getChannel();
size = filechannel.size();
ByteBuffer bb;
if (size <= showLogLength) {
bb = ByteBuffer.allocate((int) size);
filechannel.read(bb, 0);
} else {
int pos = (int) (size - showLogLength);
bb = ByteBuffer.allocate(showLogLength);
filechannel.read(bb, pos);
}
bb.flip();
String content = new String(bb.array())
.replace("<", "<")
.replace(">", ">")
.replace("\n", "<br/><br/>");
buf.append("\r\ncontent:").append(content);
buf.append("\r\nmodified:")
.append(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
.format(new Date(file.lastModified())));
buf.append("\r\nsize:").append(size).append("\r\n");
} catch (Exception e) {
buf.append(e.getMessage());
}
} else {
buf.append("\r\nMESSAGE: log file not exists or log appender is console .");
}
}
}
buf.append("\r\nCURRENT LOG LEVEL:")
.append(LoggerFactory.getLevel())
.append("\r\nCURRENT LOG APPENDER:")
.append(file == null ? "console" : file.getAbsolutePath());
return buf.toString();
} | @Test
void testPrintLog() throws RemotingException {
mockChannel = mock(Channel.class);
String result = log.telnet(mockChannel, "100");
assertTrue(result.contains("CURRENT LOG APPENDER"));
} |
@Udf(description = "Returns the cotangent of an INT value")
public Double cot(
@UdfParameter(
value = "value",
description = "The value in radians to get the cotangent of."
) final Integer value
) {
return cot(value == null ? null : value.doubleValue());
} | @Test
public void shouldHandleNegative() {
assertThat(udf.cot(-0.43), closeTo(-2.1804495406685085, 0.000000000000001));
assertThat(udf.cot(-Math.PI), closeTo(8.165619676597685E15, 0.000000000000001));
assertThat(udf.cot(-Math.PI * 2), closeTo(4.0828098382988425E15, 0.000000000000001));
assertThat(udf.cot(-6), closeTo(3.436353004180128, 0.000000000000001));
assertThat(udf.cot(-6L), closeTo(3.436353004180128, 0.000000000000001));
} |
@Override
public boolean onTouchEvent(@NonNull MotionEvent nativeMotionEvent) {
if (mKeyboard == null) {
// I mean, if there isn't any keyboard I'm handling, what's the point?
return false;
}
final int action = nativeMotionEvent.getActionMasked();
final int pointerCount = nativeMotionEvent.getPointerCount();
if (pointerCount > 1) {
mLastTimeHadTwoFingers =
SystemClock.elapsedRealtime(); // marking the time. Read isAtTwoFingersState()
}
if (mTouchesAreDisabledTillLastFingerIsUp) {
if (!areTouchesDisabled(nativeMotionEvent) /*this means it was just reset*/) {
mTouchesAreDisabledTillLastFingerIsUp = false;
// continue with onTouchEvent flow.
if (action != MotionEvent.ACTION_DOWN) {
// swallowing the event.
// in case this is a DOWN event, we do want to pass it
return true;
}
} else {
// swallowing touch event until we reset mTouchesAreDisabledTillLastFingerIsUp
return true;
}
}
final long eventTime = nativeMotionEvent.getEventTime();
final int index = nativeMotionEvent.getActionIndex();
final int id = nativeMotionEvent.getPointerId(index);
final int x = (int) nativeMotionEvent.getX(index);
final int y = (int) nativeMotionEvent.getY(index);
if (mKeyPressTimingHandler.isInKeyRepeat()) {
// It will keep being in the key repeating mode while the key is
// being pressed.
if (action == MotionEvent.ACTION_MOVE) {
return true;
}
final PointerTracker tracker = getPointerTracker(id);
// Key repeating timer will be canceled if 2 or more keys are in
// action, and current
// event (UP or DOWN) is non-modifier key.
if (pointerCount > 1 && !tracker.isModifier()) {
mKeyPressTimingHandler.cancelKeyRepeatTimer();
}
// Up event will pass through.
}
if (action == MotionEvent.ACTION_MOVE) {
for (int i = 0; i < pointerCount; i++) {
PointerTracker tracker = getPointerTracker(nativeMotionEvent.getPointerId(i));
tracker.onMoveEvent(
(int) nativeMotionEvent.getX(i), (int) nativeMotionEvent.getY(i), eventTime);
}
} else {
PointerTracker tracker = getPointerTracker(id);
sendOnXEvent(action, eventTime, x, y, tracker);
}
return true;
} | @Test
public void testWithLongPressOutputRegularPressKeyPressState() {
final AnyKeyboard.AnyKey key = findKey('f');
key.longPressCode = 'z';
KeyDrawableStateProvider provider =
new KeyDrawableStateProvider(
R.attr.key_type_function,
R.attr.key_type_action,
R.attr.action_done,
R.attr.action_search,
R.attr.action_go);
Assert.assertArrayEquals(provider.KEY_STATE_NORMAL, key.getCurrentDrawableState(provider));
Point keyPoint = ViewTestUtils.getKeyCenterPoint(key);
ViewTestUtils.navigateFromTo(mUnderTest, keyPoint, keyPoint, 60, true, false);
Assert.assertArrayEquals(provider.KEY_STATE_PRESSED, key.getCurrentDrawableState(provider));
mUnderTest.onTouchEvent(
MotionEvent.obtain(
SystemClock.uptimeMillis(),
SystemClock.uptimeMillis(),
MotionEvent.ACTION_UP,
keyPoint.x,
keyPoint.y,
0));
Assert.assertArrayEquals(provider.KEY_STATE_NORMAL, key.getCurrentDrawableState(provider));
} |
@Override
public final int compareTo(final SQLToken sqlToken) {
return startIndex - sqlToken.startIndex;
} | @Test
void assertCompareToEqual() {
assertThat(new SQLTokenFixture(0, 10).compareTo(new SQLTokenFixture(0, 10)), is(0));
} |
public double getY() {
return position.y();
} | @Test
public void testGetY() throws Exception {
World world = mock(World.class);
Location location = new Location(world, Vector3.at(0, TEST_VALUE, 0));
assertEquals(TEST_VALUE, location.getY(), EPSILON);
} |
public String generateSignature() {
StringBuilder builder = new StringBuilder();
append(builder, NLS.str("certificate.serialSigType"), x509cert.getSigAlgName());
append(builder, NLS.str("certificate.serialSigOID"), x509cert.getSigAlgOID());
return builder.toString();
} | @Test
public void decodeRSAKeySignature() {
assertThat(certificateManagerRSA.generateSignature())
.contains("SHA256withRSA")
.contains("1.2.840.113549.1.1.11");
} |
@Override
public LogicalSchema getSchema() {
return getSource().getSchema();
} | @Test
public void shouldSupportMultiRangeExpressionsUsingTableScan() {
// Given:
when(source.getSchema()).thenReturn(MULTI_KEY_SCHEMA);
final Expression expression1 = new ComparisonExpression(
Type.GREATER_THAN,
new UnqualifiedColumnReferenceExp(ColumnName.of("K1")),
new IntegerLiteral(1)
);
final Expression expression2 = new ComparisonExpression(
Type.GREATER_THAN,
new UnqualifiedColumnReferenceExp(ColumnName.of("K2")),
new IntegerLiteral(2)
);
final Expression expression = new LogicalBinaryExpression(
LogicalBinaryExpression.Type.AND,
expression1,
expression2
);
expectTableScan(expression, false);
} |
@GET
@Operation(summary = "List all active connectors")
public Response listConnectors(
final @Context UriInfo uriInfo,
final @Context HttpHeaders headers
) {
if (uriInfo.getQueryParameters().containsKey("expand")) {
Map<String, Map<String, Object>> out = new HashMap<>();
for (String connector : herder.connectors()) {
try {
Map<String, Object> connectorExpansions = new HashMap<>();
for (String expansion : uriInfo.getQueryParameters().get("expand")) {
switch (expansion) {
case "status":
connectorExpansions.put("status", herder.connectorStatus(connector));
break;
case "info":
connectorExpansions.put("info", herder.connectorInfo(connector));
break;
default:
log.info("Ignoring unknown expansion type {}", expansion);
}
}
out.put(connector, connectorExpansions);
} catch (NotFoundException e) {
// this likely means that a connector has been removed while we look its info up
// we can just not include this connector in the return entity
log.debug("Unable to get connector info for {} on this worker", connector);
}
}
return Response.ok(out).build();
} else {
return Response.ok(herder.connectors()).build();
}
} | @Test
public void testFullExpandConnectors() {
when(herder.connectors()).thenReturn(Arrays.asList(CONNECTOR2_NAME, CONNECTOR_NAME));
ConnectorInfo connectorInfo = mock(ConnectorInfo.class);
ConnectorInfo connectorInfo2 = mock(ConnectorInfo.class);
when(herder.connectorInfo(CONNECTOR2_NAME)).thenReturn(connectorInfo2);
when(herder.connectorInfo(CONNECTOR_NAME)).thenReturn(connectorInfo);
ConnectorStateInfo connector = mock(ConnectorStateInfo.class);
ConnectorStateInfo connector2 = mock(ConnectorStateInfo.class);
when(herder.connectorStatus(CONNECTOR2_NAME)).thenReturn(connector2);
when(herder.connectorStatus(CONNECTOR_NAME)).thenReturn(connector);
forward = mock(UriInfo.class);
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<>();
queryParams.put("expand", Arrays.asList("info", "status"));
when(forward.getQueryParameters()).thenReturn(queryParams);
Map<String, Map<String, Object>> expanded = (Map<String, Map<String, Object>>) connectorsResource.listConnectors(forward, NULL_HEADERS).getEntity();
// Ordering isn't guaranteed, compare sets
assertEquals(new HashSet<>(Arrays.asList(CONNECTOR_NAME, CONNECTOR2_NAME)), expanded.keySet());
assertEquals(connectorInfo2, expanded.get(CONNECTOR2_NAME).get("info"));
assertEquals(connectorInfo, expanded.get(CONNECTOR_NAME).get("info"));
assertEquals(connector2, expanded.get(CONNECTOR2_NAME).get("status"));
assertEquals(connector, expanded.get(CONNECTOR_NAME).get("status"));
} |
@SuppressWarnings("unused") // Part of required API.
public void execute(
final ConfiguredStatement<InsertValues> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final InsertValues insertValues = statement.getStatement();
final MetaStore metaStore = executionContext.getMetaStore();
final KsqlConfig config = statement.getSessionConfig().getConfig(true);
final DataSource dataSource = getDataSource(config, metaStore, insertValues);
validateInsert(insertValues.getColumns(), dataSource);
final ProducerRecord<byte[], byte[]> record =
buildRecord(statement, metaStore, dataSource, serviceContext);
try {
producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps());
} catch (final TopicAuthorizationException e) {
// TopicAuthorizationException does not give much detailed information about why it failed,
// except which topics are denied. Here we just add the ACL to make the error message
// consistent with other authorization error messages.
final Exception rootCause = new KsqlTopicAuthorizationException(
AclOperation.WRITE,
e.unauthorizedTopics()
);
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause);
} catch (final ClusterAuthorizationException e) {
// ClusterAuthorizationException is thrown when using idempotent producers
// and either a topic write permission or a cluster-level idempotent write
// permission (only applicable for broker versions no later than 2.8) is
// missing. In this case, we include additional context to help the user
// distinguish this type of failure from other permissions exceptions
// such as the ones thrown above when TopicAuthorizationException is caught.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} catch (final KafkaException e) {
if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) {
// The error message thrown when an idempotent producer is missing permissions
// is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException,
// as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException.
// ksqlDB handles these two the same way, accordingly.
// See https://issues.apache.org/jira/browse/KAFKA-14138 for more.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} else {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} catch (final Exception e) {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} | @Test
public void shouldHandleNullKeyForSourceWithKeyField() {
// Given:
givenSourceStreamWithSchema(BIG_SCHEMA, SerdeFeatures.of(), SerdeFeatures.of());
final ConfiguredStatement<InsertValues> statement = givenInsertValues(
allAndPseudoColumnNames(BIG_SCHEMA),
ImmutableList.of(
new LongLiteral(1L),
new StringLiteral("str"),
new StringLiteral("str"),
new IntegerLiteral(0),
new LongLiteral(2),
new DoubleLiteral(3.0),
new BooleanLiteral("TRUE"),
new StringLiteral("str"),
new DecimalLiteral(new BigDecimal("1.2")))
);
// When:
executor.execute(statement, mock(SessionProperties.class), engine, serviceContext);
// Then:
verify(keySerializer).serialize(TOPIC_NAME, genericKey("str"));
verify(valueSerializer)
.serialize(TOPIC_NAME, genericRow(
"str", 0, 2L, 3.0, true, "str", new BigDecimal("1.2", new MathContext(2)))
);
verify(producer).send(new ProducerRecord<>(TOPIC_NAME, null, 1L, KEY, VALUE));
} |
public void updateSchema( PartitionSchema schema ) {
if ( schema != null && schema.getName() != null ) {
stepMeta.getStepPartitioningMeta().setPartitionSchema( schema );
}
} | @Test
public void metaIsUpdated() {
PartitionSchema schema = new PartitionSchema( "1", Collections.<String>emptyList() );
StepPartitioningMeta meta = mock( StepPartitioningMeta.class );
when( stepMeta.getStepPartitioningMeta() ).thenReturn( meta );
settings.updateSchema( schema );
verify( meta ).setPartitionSchema( schema );
} |
@Override
public int size() {
int size = 0;
for (Map<Data, QueryableEntry> resultSet : resultSets) {
size += resultSet.size();
}
return size;
} | @Test
public void testAddResultSet_notEmpty() {
addEntry(entry(data()));
assertThat(result.size()).isEqualTo(1);
} |
public String getGroup() {
return group;
} | @Test
public void testGetGroup() {
// Test the getGroup method
assertEquals("Group1", event.getGroup());
} |
static Map<Address, List<Shard>> assignShards(Collection<Shard> shards, Collection<Address> addresses) {
Map<String, List<String>> assignment = addresses.stream() // host -> [indexShard...]
.map(Address::getHost).distinct().collect(toMap(identity(), a -> new ArrayList<>()));
Map<String, List<String>> nodeCandidates = shards.stream() // indexShard -> [host...]
.collect(groupingBy(Shard::indexShard, mapping(Shard::getIp, toList())));
// Make the assignment
nodeCandidates.forEach((indexShard, hosts) -> hosts.stream()
.map(assignment::get)
.filter(Objects::nonNull)
.min(comparingInt(List::size))
.orElseThrow(() -> new IllegalStateException("Selected members do not contain shard '" + indexShard + "'"))
.add(indexShard));
// Transform the results
Map<String, List<Address>> addressMap = addresses.stream().collect(groupingBy(Address::getHost, toList()));
Map<String, Shard> shardMap = shards.stream().collect(toMap(s -> s.indexShard() + "@" + s.getIp(), identity()));
return assignment.entrySet().stream()
.flatMap(e -> {
List<Address> a = addressMap.get(e.getKey());
List<Shard> s = e.getValue().stream()
.map(indexShard -> shardMap.get(indexShard + "@" + e.getKey())).toList();
int c = (int) Math.ceil((double) s.size() / a.size());
return IntStream.range(0, a.size())
.mapToObj(i -> entry(a.get(i), List.copyOf(s.subList(i * c, Math.min((i + 1) * c, s.size())))));
}).collect(toMap(Entry::getKey, Entry::getValue));
} | @Test
public void given_multipleNodeAddressesOnLocal_when_assignShards_then_shouldAssignSingleShardToEachAddress()
throws UnknownHostException {
List<Shard> shards = List.of(
new Shard("elastic-index", 0, Prirep.p, 10, "STARTED", "127.0.0.1", "127.0.0.1:9200", "node1"),
new Shard("elastic-index", 1, Prirep.p, 10, "STARTED", "127.0.0.1", "127.0.0.1:9201", "node2"),
new Shard("elastic-index", 2, Prirep.p, 10, "STARTED", "127.0.0.1", "127.0.0.1:9202", "node3")
);
List<Address> addresses = List.of(
new Address("127.0.0.1", 5701),
new Address("127.0.0.1", 5702),
new Address("127.0.0.1", 5703)
);
Map<Address, List<Shard>> assignment = ElasticSourcePMetaSupplier.assignShards(shards, addresses);
assertThat(assignment)
.containsOnlyKeys(addresses)
// shards are distributed evenly
.allSatisfy((address, shardList) -> assertThat(shardList).hasSize(1))
// all shards are assigned
.satisfies(a -> assertThat(a.values().stream().flatMap(List::stream))
.containsExactlyInAnyOrderElementsOf(shards));
} |
public Optional<LDAPUser> searchUserByPrincipal(LDAPConnection connection,
UnboundLDAPConfig config,
String principal) throws LDAPException {
final String filterString = new MessageFormat(config.userSearchPattern(), Locale.ENGLISH)
.format(new Object[]{Filter.encodeValue(principal)});
return searchUser(connection, config, Filter.create(filterString));
} | @Test
public void testUserLookup() throws Exception {
final UnboundLDAPConfig searchConfig = UnboundLDAPConfig.builder()
.userSearchBase("ou=users,dc=example,dc=com")
.userSearchPattern("(&(objectClass=posixAccount)(uid={0}))")
.userUniqueIdAttribute("entryUUID")
.userNameAttribute("uid")
.userFullNameAttribute("cn")
.emailAttributes(new ArrayList<>())
.build();
final LDAPUser entry = connector.searchUserByPrincipal(connection, searchConfig, "john").orElse(null);
assertThat(entry).isNotNull();
assertThat(entry.dn())
.isNotNull()
.isEqualTo("cn=John Doe,ou=users,dc=example,dc=com");
assertThat(new String(Base64.getDecoder().decode(entry.base64UniqueId()), StandardCharsets.UTF_8))
.isNotBlank()
.matches("[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}");
} |
@Override
public String toString() {
return "RollbackRule with pattern [" + this.exceptionName + "]";
} | @Test
public void toStringTest(){
RollbackRule otherRollbackRuleByName = new RollbackRule(Exception.class.getName());
Assertions.assertEquals(otherRollbackRuleByName.toString(), String.format("RollbackRule with pattern [%s]", Exception.class.getName()));
} |
public static UnboundIntFlag defineIntFlag(String flagId, int defaultValue, List<String> owners,
String createdAt, String expiresAt, String description,
String modificationEffect, Dimension... dimensions) {
return define(UnboundIntFlag::new, flagId, defaultValue, owners, createdAt, expiresAt, description, modificationEffect, dimensions);
} | @Test
void testInt() {
testGeneric(Flags.defineIntFlag("int-id", 2, List.of("owner"), "1970-01-01", "2100-01-01", "desc", "mod"), 3);
} |
public URL getInterNodeListener(
final Function<URL, Integer> portResolver
) {
return getInterNodeListener(portResolver, LOGGER);
} | @Test
public void shouldThrowIfExplicitInterNodeListenerHasIpv4WildcardAddress() {
// Given:
final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.<String, Object>builder()
.putAll(MIN_VALID_CONFIGS)
.put(ADVERTISED_LISTENER_CONFIG, "https://0.0.0.0:12589")
.build()
);
// When:
final Exception e = assertThrows(
ConfigException.class,
() -> config.getInterNodeListener(portResolver, logger)
);
// Then:
assertThat(e.getMessage(), containsString("Invalid value https://0.0.0.0:12589 for configuration "
+ ADVERTISED_LISTENER_CONFIG
+ ": Can not be wildcard"));
} |
@Override
protected void decode(ChannelHandlerContext ctx, Object object, List out) throws Exception {
try {
if (object instanceof XMLEvent) {
final XMLEvent event = (XMLEvent) object;
if (event.isStartDocument() || event.isEndDocument()) {
return;
}
if (event.isCharacters() && depth <= 1) {
return;
}
if (depth < 1 && event.isStartElement()) {
out.add(object);
depth++;
return;
}
if (depth <= 1 && event.isEndElement()) {
out.add(object);
depth--;
return;
}
writer.add(event);
if (event.isStartElement()) {
depth++;
} else if (event.isEndElement()) {
depth--;
if (depth == 1) {
writer.flush();
org.dom4j.Element xmlElement = transform().getRootElement();
out.add(xmlElement);
writer.close();
resetWriter();
}
}
}
} catch (Exception e) {
logger.info(e.getCause().getMessage());
throw e;
}
} | @Test
public void testMergeStreamClose() throws Exception {
List<Object> list = Lists.newArrayList();
streamCloseXmlEventList.forEach(xmlEvent -> {
try {
xmlMerger.decode(new ChannelHandlerContextAdapter(), xmlEvent, list);
} catch (Exception e) {
fail();
}
});
// StreamClose should not be merged, should be passed as XMLEvent
assertThat(list.size(), Matchers.is(1));
assertThat(list.get(0), Matchers.is(instanceOf(XMLEvent.class)));
assertThat(((XMLEvent) list.get(0)).isEndElement(), Matchers.is(true));
} |
public static byte[] serialize(final Object body) throws IOException {
final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
final ObjectOutputStream outputStream = new ObjectOutputStream(byteArrayOutputStream);
try {
outputStream.writeObject(body);
return byteArrayOutputStream.toByteArray();
} catch (NotSerializableException exception) {
throw new RuntimeCamelException(exception);
} finally {
byteArrayOutputStream.close();
outputStream.close();
}
} | @Test
public void testSerialisationOfSerializableObject() throws Exception {
Object in = new Obj("id", "name");
byte[] expected = PulsarMessageUtils.serialize(in);
assertNotNull(expected);
} |
@Override
public void aroundWriteTo(WriterInterceptorContext context) throws IOException, WebApplicationException {
try {
context.proceed();
} catch (EofException e) {
LOGGER.debug("Client disconnected while processing and sending response", e);
exceptionCounter.inc();
}
} | @SuppressWarnings("NullAway")
@Test
void shouldSwallowEofException() throws IOException {
MetricRegistry metricRegistry = new MetricRegistry();
EofExceptionWriterInterceptor interceptor = new EofExceptionWriterInterceptor(metricRegistry);
WriterInterceptorContext context = mock(WriterInterceptorContext.class);
doThrow(EofException.class).when(context).proceed();
interceptor.aroundWriteTo(context);
verify(context, only()).proceed();
Counter counter = metricRegistry.getCounters().get("io.dropwizard.jersey.errors.EofExceptionWriterInterceptor.eof-exceptions");
assertThat(counter).isNotNull();
assertThat(counter.getCount()).isEqualTo(1L);
} |
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
try {
final IRODSFileSystemAO fs = session.getClient();
final IRODSFile f = fs.getIRODSFileFactory().instanceIRODSFile(file.getAbsolute());
if(!f.exists()) {
throw new NotfoundException(file.getAbsolute());
}
final ObjStat stats = fs.getObjStat(f.getAbsolutePath());
return this.toAttributes(stats);
}
catch(JargonException e) {
throw new IRODSExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
} | @Test(expected = NotfoundException.class)
public void testFindNotFound() throws Exception {
final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new IRODSProtocol())));
final Profile profile = new ProfilePlistReader(factory).read(
this.getClass().getResourceAsStream("/iRODS (iPlant Collaborative).cyberduckprofile"));
final Host host = new Host(profile, profile.getDefaultHostname(), new Credentials(
PROPERTIES.get("irods.key"), PROPERTIES.get("irods.secret")
));
final IRODSSession session = new IRODSSession(host);
session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback());
session.login(new DisabledLoginCallback(), new DisabledCancelCallback());
new IRODSAttributesFinderFeature(session).find(new Path(UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)));
} |
@Override
public WebSocketExtensionData newRequestData() {
HashMap<String, String> parameters = new HashMap<String, String>(4);
if (requestedServerNoContext) {
parameters.put(SERVER_NO_CONTEXT, null);
}
if (allowClientNoContext) {
parameters.put(CLIENT_NO_CONTEXT, null);
}
if (requestedServerWindowSize != MAX_WINDOW_SIZE) {
parameters.put(SERVER_MAX_WINDOW, Integer.toString(requestedServerWindowSize));
}
if (allowClientWindowSize) {
parameters.put(CLIENT_MAX_WINDOW, null);
}
return new WebSocketExtensionData(PERMESSAGE_DEFLATE_EXTENSION, parameters);
} | @Test
public void testNormalData() {
PerMessageDeflateClientExtensionHandshaker handshaker =
new PerMessageDeflateClientExtensionHandshaker();
WebSocketExtensionData data = handshaker.newRequestData();
assertEquals(PERMESSAGE_DEFLATE_EXTENSION, data.name());
assertEquals(ZlibCodecFactory.isSupportingWindowSizeAndMemLevel() ? 1 : 0, data.parameters().size());
} |
@Nullable
@Override
public ResultSubpartition.BufferAndBacklog getNextBuffer() throws IOException {
synchronized (lock) {
cacheBuffer();
if (cachedBuffers.isEmpty()) {
return null;
}
ResultSubpartition.BufferAndBacklog buffer = cachedBuffers.poll().f0;
return new ResultSubpartition.BufferAndBacklog(
buffer.buffer(),
cachedBuffers.size(),
cachedBuffers.isEmpty()
? Buffer.DataType.NONE
: cachedBuffers.peek().f0.buffer().getDataType(),
sequenceNumber++);
}
} | @Test
void testGetNextBuffer() throws IOException {
assertThat(view.peekNextBufferSubpartitionId()).isEqualTo(-1);
assertThat(view.getNextBuffer()).isNull();
view0.notifyDataAvailable();
assertThat(view.peekNextBufferSubpartitionId()).isZero();
ResultSubpartition.BufferAndBacklog bufferAndBacklog = view.getNextBuffer();
assertThat(bufferAndBacklog.buffer()).isEqualTo(buffers0.get(0));
assertThat(bufferAndBacklog.buffersInBacklog()).isEqualTo(buffers0.size() - 1);
view1.notifyDataAvailable();
assertThat(view.peekNextBufferSubpartitionId()).isZero();
assertThat(view.getNextBuffer().buffer()).isEqualTo(buffers0.get(1));
List<Buffer> buffers = new ArrayList<>();
while (view.getAvailabilityAndBacklog(true).isAvailable()) {
buffers.add(view.getNextBuffer().buffer());
}
assertThat(buffers)
.hasSize(buffers0.size() + buffers1.size() - 2)
.containsSubsequence(buffers0.subList(2, buffers0.size()))
.containsSubsequence(buffers1);
} |
public void handle(ServletResponse response, Exception ex) throws IOException {
if (ex instanceof InterceptException) {
if (response instanceof org.eclipse.jetty.server.Response) {
String errorData = ObjectMapperFactory
.getMapper().writer().writeValueAsString(new ErrorData(ex.getMessage()));
byte[] errorBytes = errorData.getBytes(StandardCharsets.UTF_8);
int errorCode = ((InterceptException) ex).getErrorCode();
HttpFields httpFields = new HttpFields();
HttpField httpField = new HttpField(HttpHeader.CONTENT_TYPE, "application/json;charset=utf-8");
httpFields.add(httpField);
MetaData.Response info = new MetaData.Response(HttpVersion.HTTP_1_1, errorCode, httpFields);
info.setHttpVersion(HttpVersion.HTTP_1_1);
info.setReason(errorData);
info.setStatus(errorCode);
info.setContentLength(errorBytes.length);
((org.eclipse.jetty.server.Response) response).getHttpChannel().sendResponse(info,
ByteBuffer.wrap(errorBytes),
true);
} else {
((HttpServletResponse) response).sendError(((InterceptException) ex).getErrorCode(),
ex.getMessage());
}
} else {
((HttpServletResponse) response).sendError(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(),
ex.getMessage());
}
} | @Test
@SneakyThrows
public void testHandle() {
String restriction = "Reach the max tenants [5] restriction";
String internal = "internal exception";
String illegal = "illegal argument exception ";
ExceptionHandler handler = new ExceptionHandler();
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
handler.handle(response, new InterceptException(PRECONDITION_FAILED_412, restriction));
Mockito.verify(response).sendError(PRECONDITION_FAILED_412, restriction);
handler.handle(response, new InterceptException(INTERNAL_SERVER_ERROR_500, internal));
Mockito.verify(response).sendError(INTERNAL_SERVER_ERROR_500, internal);
handler.handle(response, new IllegalArgumentException(illegal));
Mockito.verify(response).sendError(INTERNAL_SERVER_ERROR_500, illegal);
Response response2 = Mockito.mock(Response.class);
HttpChannel httpChannel = Mockito.mock(HttpChannel.class);
Mockito.when(response2.getHttpChannel()).thenReturn(httpChannel);
handler.handle(response2, new InterceptException(PRECONDITION_FAILED_412, restriction));
Mockito.verify(httpChannel).sendResponse(Mockito.any(), Mockito.any(), Mockito.anyBoolean());
} |
public double logp(int[] o, int[] s) {
if (o.length != s.length) {
throw new IllegalArgumentException("The observation sequence and state sequence are not the same length.");
}
int n = s.length;
double p = MathEx.log(pi[s[0]]) + MathEx.log(b.get(s[0], o[0]));
for (int i = 1; i < n; i++) {
p += MathEx.log(a.get(s[i - 1], s[i])) + MathEx.log(b.get(s[i], o[i]));
}
return p;
} | @Test
public void testLogp() {
System.out.println("logp");
HMM hmm = new HMM(pi, Matrix.of(a), Matrix.of(b));
int[] o = {0, 0, 1, 1, 0, 1, 1, 0};
double expResult = -5.609373;
double result = hmm.logp(o);
assertEquals(expResult, result, 1E-6);
} |
@VisibleForTesting
public int getSignalToContainerFailedRetrieved() {
return numSignalToContainerFailedRetrieved.value();
} | @Test
public void testSignalToContainerFailed() {
long totalBadBefore = metrics.getSignalToContainerFailedRetrieved();
badSubCluster.getSignalContainer();
Assert.assertEquals(totalBadBefore + 1,
metrics.getSignalToContainerFailedRetrieved());
} |
@Override
public SparkPipelineResult run(Pipeline pipeline) {
boolean isStreaming =
options.isStreaming() || options.as(TestSparkPipelineOptions.class).isForceStreaming();
// Default to using the primitive versions of Read.Bounded and Read.Unbounded.
// TODO(https://github.com/apache/beam/issues/20530): Use SDF read as default when we address
// performance issue.
if (!ExperimentalOptions.hasExperiment(pipeline.getOptions(), "beam_fn_api")) {
SplittableParDo.convertReadBasedSplittableDoFnsToPrimitiveReadsIfNecessary(pipeline);
}
JavaSparkContext jsc =
SparkContextFactory.getSparkContext(pipeline.getOptions().as(SparkPipelineOptions.class));
JavaStreamingContext jssc =
new JavaStreamingContext(jsc, new org.apache.spark.streaming.Duration(1000));
SparkRunner.initAccumulators(options, jsc);
TransformTranslator.Translator translator = new TransformTranslator.Translator();
SparkNativePipelineVisitor visitor;
if (isStreaming) {
SparkPipelineTranslator streamingTranslator =
new StreamingTransformTranslator.Translator(translator);
EvaluationContext ctxt = new EvaluationContext(jsc, pipeline, options, jssc);
visitor = new SparkNativePipelineVisitor(streamingTranslator, ctxt);
} else {
EvaluationContext ctxt = new EvaluationContext(jsc, pipeline, options, jssc);
visitor = new SparkNativePipelineVisitor(translator, ctxt);
}
pipeline.traverseTopologically(visitor);
SparkContextFactory.stopSparkContext(jsc);
String debugString = visitor.getDebugString();
LOG.info("Translated Native Spark pipeline:\n{}", debugString);
return new DebugSparkPipelineResult(debugString);
} | @Test
public void debugBatchPipeline() {
PipelineOptions options = contextRule.configure(PipelineOptionsFactory.create());
options.setRunner(SparkRunnerDebugger.class);
Pipeline pipeline = Pipeline.create(options);
PCollection<String> lines =
pipeline.apply(Create.of(Collections.<String>emptyList()).withCoder(StringUtf8Coder.of()));
PCollection<KV<String, Long>> wordCounts = lines.apply(new WordCount.CountWords());
wordCounts.apply(GroupByKey.create()).apply(Combine.groupedValues(Sum.ofLongs()));
PCollection<KV<String, Long>> wordCountsPlusOne =
wordCounts.apply(MapElements.via(new PlusOne()));
PCollectionList.of(wordCounts).and(wordCountsPlusOne).apply(Flatten.pCollections());
wordCounts
.apply(MapElements.via(new WordCount.FormatAsTextFn()))
.apply(TextIO.write().to("!!PLACEHOLDER-OUTPUT-DIR!!").withNumShards(3).withSuffix(".txt"));
final String expectedPipeline =
"sparkContext.<impulse>()\n"
+ "_.mapPartitions(new org.apache.beam.sdk.transforms.FlatMapElements$3())\n"
+ "_.mapPartitions("
+ "new org.apache.beam.runners.spark.examples.WordCount$ExtractWordsFn())\n"
+ "_.mapPartitions(new org.apache.beam.sdk.transforms.Count$PerElement$1())\n"
+ "_.combineByKey(..., new org.apache.beam.sdk.transforms.Count$CountFn(), ...)\n"
+ "_.groupByKey()\n"
+ "_.map(new org.apache.beam.sdk.transforms.Sum$SumLongFn())\n"
+ "_.mapPartitions("
+ "new org.apache.beam.runners.spark.SparkRunnerDebuggerTest$PlusOne())\n"
+ "sparkContext.union(...)\n"
+ "_.mapPartitions("
+ "new org.apache.beam.runners.spark.examples.WordCount$FormatAsTextFn())\n"
+ "_.<org.apache.beam.sdk.io.TextIO$Write>";
SparkRunnerDebugger.DebugSparkPipelineResult result =
(SparkRunnerDebugger.DebugSparkPipelineResult) pipeline.run();
assertThat(
"Debug pipeline did not equal expected",
result.getDebugString(),
Matchers.equalTo(expectedPipeline));
} |
@Override
protected DatanodeDescriptor chooseDataNode(final String scope,
final Collection<Node> excludedNode, StorageType type) {
// only the code that uses DFSNetworkTopology should trigger this code path.
Preconditions.checkArgument(clusterMap instanceof DFSNetworkTopology);
DFSNetworkTopology dfsClusterMap = (DFSNetworkTopology)clusterMap;
DatanodeDescriptor a = (DatanodeDescriptor) dfsClusterMap
.chooseRandomWithStorageTypeTwoTrial(scope, excludedNode, type);
DatanodeDescriptor b = (DatanodeDescriptor) dfsClusterMap
.chooseRandomWithStorageTypeTwoTrial(scope, excludedNode, type);
return select(a, b, false);
} | @Test
public void testChooseDataNode() {
Collection<Node> allNodes = new ArrayList<>(dataNodes.length);
Collections.addAll(allNodes, dataNodes);
if (placementPolicy instanceof AvailableSpaceBlockPlacementPolicy) {
// exclude all datanodes when chooseDataNode, no NPE should be thrown
((AvailableSpaceBlockPlacementPolicy) placementPolicy)
.chooseDataNode("~", allNodes);
}
} |
@Override
public long contains(Collection<T> objects) {
return get(containsAsync(objects));
} | @Test
public void testNotInitializedOnContains() {
Assertions.assertThrows(RedisException.class, () -> {
RBloomFilter<String> filter = redisson.getBloomFilter("filter");
filter.contains("32");
});
} |
@Override
public void execute(String mapName, Predicate predicate, Collection<Integer> partitions, Result result) {
RetryableHazelcastException storedException = null;
for (Integer partitionId : partitions) {
try {
partitionScanRunner.run(mapName, predicate, partitionId, result);
} catch (RetryableHazelcastException e) {
// RetryableHazelcastException are stored and re-thrown later. this is to ensure all partitions
// are touched as when the parallel execution was used.
// see discussion at https://github.com/hazelcast/hazelcast/pull/5049#discussion_r28773099 for details.
if (storedException == null) {
storedException = e;
}
}
}
if (storedException != null) {
throw storedException;
}
} | @Test
public void execute_success() {
PartitionScanRunner runner = mock(PartitionScanRunner.class);
CallerRunsPartitionScanExecutor executor = new CallerRunsPartitionScanExecutor(runner);
Predicate<Object, Object> predicate = Predicates.equal("attribute", 1);
QueryResult queryResult = new QueryResult(IterationType.ENTRY, null, null, Long.MAX_VALUE, false);
executor.execute("Map", predicate, asList(1, 2, 3), queryResult);
Collection<QueryResultRow> result = queryResult.getRows();
assertEquals(0, result.size());
} |
public static Autoscaling empty() {
return empty("");
} | @Test
public void autoscaling_with_unspecified_resources_use_defaults_exclusive() {
var min = new ClusterResources( 2, 1, NodeResources.unspecified());
var max = new ClusterResources( 6, 1, NodeResources.unspecified());
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(false)
.initialResources(Optional.empty())
.capacity(Capacity.from(min, max))
.build();
NodeResources defaultResources =
fixture.tester().nodeRepository().capacityPoliciesFor(fixture.applicationId)
.specifyFully(NodeResources.unspecified(), fixture.clusterSpec);
fixture.tester().assertResources("Min number of nodes and default resources",
2, 1, defaultResources,
fixture.nodes().toResources());
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.25, 0.95, 0.95, 0, 0), 120);
fixture.tester().assertResources("Scaling up",
5, 1,
defaultResources.vcpu(), defaultResources.memoryGiB(), defaultResources.diskGb(),
fixture.autoscale());
} |
public Schema find(String name, String namespace) {
Schema.Type type = PRIMITIVES.get(name);
if (type != null) {
return Schema.create(type);
}
String fullName = fullName(name, namespace);
Schema schema = getNamedSchema(fullName);
if (schema == null) {
schema = getNamedSchema(name);
}
return schema != null ? schema : SchemaResolver.unresolvedSchema(fullName);
} | @Test
public void primitivesAreNotCached() {
EnumSet<Schema.Type> primitives = EnumSet.complementOf(EnumSet.of(Schema.Type.RECORD, Schema.Type.ENUM,
Schema.Type.FIXED, Schema.Type.UNION, Schema.Type.ARRAY, Schema.Type.MAP));
ParseContext context = new ParseContext();
for (Schema.Type type : primitives) {
Schema first = context.find(type.getName(), null);
Schema second = context.find(type.getName(), null);
assertEquals(first, second);
assertNotSame(first, second);
first.addProp("logicalType", "brick");
assertNotEquals(first, second);
}
} |
public static void removeDupes(
final List<CharSequence> suggestions, List<CharSequence> stringsPool) {
if (suggestions.size() < 2) return;
int i = 1;
// Don't cache suggestions.size(), since we may be removing items
while (i < suggestions.size()) {
final CharSequence cur = suggestions.get(i);
// Compare each suggestion with each previous suggestion
for (int j = 0; j < i; j++) {
CharSequence previous = suggestions.get(j);
if (TextUtils.equals(cur, previous)) {
removeSuggestion(suggestions, i, stringsPool);
i--;
break;
}
}
i++;
}
} | @Test
public void testRemoveDupesDupeIsNotFirstNoRecycle() throws Exception {
ArrayList<CharSequence> list =
new ArrayList<>(
Arrays.<CharSequence>asList("typed", "something", "duped", "duped", "something"));
Assert.assertEquals(0, mStringPool.size());
IMEUtil.removeDupes(list, mStringPool);
Assert.assertEquals(3, list.size());
Assert.assertEquals("typed", list.get(0));
Assert.assertEquals("something", list.get(1));
Assert.assertEquals("duped", list.get(2));
Assert.assertEquals(0, mStringPool.size());
} |
@Override
public String getScheme()
{
return uri.getScheme();
} | @Test
public void testGetScheme()
throws Exception
{
Configuration config = new Configuration();
try (PrestoS3FileSystem fs = new PrestoS3FileSystem()) {
fs.initialize(new URI("s3a://test-bucket/table"), config);
assertEquals(fs.getScheme(), "s3a");
}
try (PrestoS3FileSystem fs = new PrestoS3FileSystem()) {
fs.initialize(new URI("s3://test-bucket/table"), config);
assertEquals(fs.getScheme(), "s3");
}
try (PrestoS3FileSystem fs = new PrestoS3FileSystem()) {
fs.initialize(new URI("s3n://test-bucket/table"), config);
assertEquals(fs.getScheme(), "s3n");
}
} |
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
} | @Test
public void testSeekBeforeException() {
buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(),
new ByteArrayDeserializer(), 2, IsolationLevel.READ_UNCOMMITTED);
assignFromUser(mkSet(tp0));
subscriptions.seek(tp0, 1);
assertEquals(1, sendFetches());
Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new HashMap<>();
partitions.put(tidp0, new FetchResponseData.PartitionData()
.setPartitionIndex(tp0.partition())
.setHighWatermark(100)
.setRecords(records));
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertEquals(2, fetchRecords().get(tp0).size());
subscriptions.assignFromUser(mkSet(tp0, tp1));
subscriptions.seekUnvalidated(tp1, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp1)));
assertEquals(1, sendFetches());
partitions = new HashMap<>();
partitions.put(tidp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition())
.setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code())
.setHighWatermark(100));
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
consumerClient.poll(time.timer(0));
assertEquals(1, fetchRecords().get(tp0).size());
subscriptions.seek(tp1, 10);
// Should not throw OffsetOutOfRangeException after the seek
assertEmptyFetch("Should not return records or advance position after seeking to end of topic partitions");
} |
@Override
protected void runOneIteration() {
monitor.debug(() -> "JobMetadata.isInitialized(): " + JobMetadata.isInitialized());
if (JobMetadata.isInitialized()) {
if (stopwatch.elapsed(TimeUnit.SECONDS) > credsTimeoutSeconds) {
UUID jobId = JobMetadata.getJobId();
markJobTimedOut(jobId);
String message =
format(
"Waited over %d seconds for the creds to be provided on the claimed job: %s",
credsTimeoutSeconds, jobId);
monitor.severe(() -> message, EventCode.WORKER_CREDS_TIMEOUT);
throw new CredsTimeoutException(message, jobId);
}
pollUntilJobIsReady();
} else {
// Poll for an unassigned job to process with this transfer worker instance.
// Once a transfer worker instance is assigned, the client will populate storage with
// auth data encrypted with this instances public key and the copy process can begin
pollForUnassignedJob();
}
} | @Test
public void pollingLifeCycle() throws Exception {
when(asymmetricKeyGenerator.generate()).thenReturn(TEST_KEY_PAIR);
// Initial state
assertThat(JobMetadata.isInitialized()).isFalse();
// Run once with no data in the database
jobPollingService.runOneIteration();
assertThat(JobMetadata.isInitialized()).isFalse();
PortabilityJob job = store.findJob(TEST_ID);
assertThat(job).isNull(); // No existing ready job
// API inserts an job in initial authorization state
job =
PortabilityJob.builder()
.setTransferDataType(DataVertical.PHOTOS)
.setExportService("DummyExportService")
.setImportService("DummyImportService")
.setAndValidateJobAuthorization(
JobAuthorization.builder()
.setEncryptionScheme("cleartext")
.setState(State.INITIAL)
.setSessionSecretKey("fooBar")
.build())
.build();
store.createJob(TEST_ID, job);
// Verify initial authorization state
job = store.findJob(TEST_ID);
assertThat(job.jobAuthorization().state()).isEqualTo(State.INITIAL);
// no auth data should exist yet
assertThat(job.jobAuthorization().encryptedAuthData()).isNull();
// API atomically updates job to from 'initial' to 'creds available'
job =
job.toBuilder()
.setAndValidateJobAuthorization(
job.jobAuthorization().toBuilder().setState(State.CREDS_AVAILABLE).build())
.build();
store.updateJobAuthStateToCredsAvailable(TEST_ID);
// Verify 'creds available' state
job = store.findJob(TEST_ID);
assertThat(job.jobAuthorization().state()).isEqualTo(State.CREDS_AVAILABLE);
// no auth data should exist yet
assertThat(job.jobAuthorization().encryptedAuthData()).isNull();
// Worker initiates the JobPollingService
jobPollingService.runOneIteration();
assertThat(JobMetadata.isInitialized()).isTrue();
assertThat(JobMetadata.getJobId()).isEqualTo(TEST_ID);
// Verify assigned without auth data state
job = store.findJob(TEST_ID);
assertThat(job.jobAuthorization().state())
.isEqualTo(JobAuthorization.State.CREDS_ENCRYPTION_KEY_GENERATED);
assertThat(job.jobAuthorization().authPublicKey()).isNotEmpty();
// Client encrypts data and updates the job
job =
job.toBuilder()
.setAndValidateJobAuthorization(
job.jobAuthorization()
.toBuilder()
.setEncryptedAuthData("dummy export data")
.setState(State.CREDS_STORED)
.build())
.build();
store.updateJobWithCredentials(TEST_ID, job);
// Run another iteration of the polling service
// Worker should pick up encrypted data and update job
jobPollingService.runOneIteration();
job = store.findJob(TEST_ID);
JobAuthorization jobAuthorization = job.jobAuthorization();
assertThat(jobAuthorization.state()).isEqualTo(JobAuthorization.State.CREDS_STORED);
assertThat(jobAuthorization.encryptedAuthData()).isNotEmpty();
store.remove(TEST_ID);
} |
public int allocate(final String label)
{
return allocate(label, DEFAULT_TYPE_ID);
} | @Test
void shouldMapAllocatedCounters()
{
manager.allocate("def");
final int id = manager.allocate("abc");
final ReadablePosition reader = new UnsafeBufferPosition(valuesBuffer, id);
final Position writer = new UnsafeBufferPosition(valuesBuffer, id);
final long expectedValue = 0xF_FFFF_FFFFL;
writer.setOrdered(expectedValue);
assertEquals(expectedValue, reader.getVolatile());
} |
@Override
public String getFirstNodeValue(String value) {
long hash = super.hash(value);
System.out.println("value=" + value + " hash = " + hash);
return sortArrayMap.firstNodeValue(hash);
} | @Test
public void getFirstNodeValue() {
AbstractConsistentHash map = new SortArrayMapConsistentHash() ;
List<String> strings = new ArrayList<String>();
for (int i = 0; i < 10; i++) {
strings.add("127.0.0." + i) ;
}
String process = map.process(strings,"zhangsan");
System.out.println(process);
Assert.assertEquals("127.0.0.2",process);
} |
public void convertQueueHierarchy(FSQueue queue) {
List<FSQueue> children = queue.getChildQueues();
final String queueName = queue.getName();
emitChildQueues(queueName, children);
emitMaxAMShare(queueName, queue);
emitMaxParallelApps(queueName, queue);
emitMaxAllocations(queueName, queue);
emitPreemptionDisabled(queueName, queue);
emitChildCapacity(queue);
emitMaximumCapacity(queueName, queue);
emitSizeBasedWeight(queueName);
emitOrderingPolicy(queueName, queue);
checkMaxChildCapacitySetting(queue);
emitDefaultUserLimitFactor(queueName, children);
for (FSQueue childQueue : children) {
convertQueueHierarchy(childQueue);
}
} | @Test
public void testQueueMaxChildCapacityNotSupported() {
converter = builder.build();
expectedException.expect(UnsupportedPropertyException.class);
expectedException.expectMessage("test");
Mockito.doThrow(new UnsupportedPropertyException("test"))
.when(ruleHandler).handleMaxChildCapacity();
converter.convertQueueHierarchy(rootQueue);
} |
@Override
public List<TableIdentifier> listTables(Namespace namespace) {
if (!namespaceExists(namespace)) {
throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
}
return fetch(
row ->
JdbcUtil.stringToTableIdentifier(
row.getString(JdbcUtil.TABLE_NAMESPACE), row.getString(JdbcUtil.TABLE_NAME)),
(schemaVersion == JdbcUtil.SchemaVersion.V1)
? JdbcUtil.V1_LIST_TABLE_SQL
: JdbcUtil.V0_LIST_TABLE_SQL,
catalogName,
JdbcUtil.namespaceToString(namespace));
} | @Test
public void testListTables() {
TableIdentifier tbl1 = TableIdentifier.of("db", "tbl1");
TableIdentifier tbl2 = TableIdentifier.of("db", "tbl2");
TableIdentifier tbl3 = TableIdentifier.of("db", "tbl2", "subtbl2");
TableIdentifier tbl4 = TableIdentifier.of("db", "ns1", "tbl3");
TableIdentifier tbl5 = TableIdentifier.of("db", "metadata", "metadata");
Lists.newArrayList(tbl1, tbl2, tbl3, tbl4, tbl5)
.forEach(t -> catalog.createTable(t, SCHEMA, PartitionSpec.unpartitioned()));
List<TableIdentifier> tbls1 = catalog.listTables(Namespace.of("db"));
Set<String> tblSet = Sets.newHashSet(tbls1.stream().map(TableIdentifier::name).iterator());
assertThat(tblSet).hasSize(2).contains("tbl1", "tbl2");
List<TableIdentifier> tbls2 = catalog.listTables(Namespace.of("db", "ns1"));
assertThat(tbls2).hasSize(1);
assertThat(tbls2.get(0).name()).isEqualTo("tbl3");
assertThatThrownBy(() -> catalog.listTables(Namespace.of("db", "ns1", "ns2")))
.isInstanceOf(NoSuchNamespaceException.class)
.hasMessage("Namespace does not exist: db.ns1.ns2");
} |
public static String humanReadableBytes(Locale locale, long bytes) {
int unit = 1024;
if (bytes < unit) {
return bytes + " B";
}
int exp = (int) (Math.log(bytes) / Math.log(unit));
String pre = String.valueOf("KMGTPE".charAt(exp - 1));
return String.format(locale, "%.1f %sB", bytes / Math.pow(unit, exp), pre);
} | @Test
public void testHumanReadableBytesNullLocale() {
assertEquals("1.3 KB", StringHelper.humanReadableBytes(null, 1280));
} |
public static void updateKeyForBlobStore(Map<String, Object> conf, BlobStore blobStore, CuratorFramework zkClient, String key,
NimbusInfo nimbusDetails) {
try {
// Most of clojure tests currently try to access the blobs using getBlob. Since, updateKeyForBlobStore
// checks for updating the correct version of the blob as a part of nimbus ha before performing any
// operation on it, there is a necessity to stub several test cases to ignore this method. It is a valid
// trade off to return if nimbusDetails which include the details of the current nimbus host port data are
// not initialized as a part of the test. Moreover, this applies to only local blobstore when used along with
// nimbus ha.
if (nimbusDetails == null) {
return;
}
boolean isListContainsCurrentNimbusInfo = false;
List<String> stateInfo;
if (zkClient.checkExists().forPath(BLOBSTORE_SUBTREE + "/" + key) == null) {
return;
}
stateInfo = zkClient.getChildren().forPath(BLOBSTORE_SUBTREE + "/" + key);
if (stateInfo == null || stateInfo.isEmpty()) {
return;
}
LOG.debug("StateInfo for update {}", stateInfo);
Set<NimbusInfo> nimbusInfoList = getNimbodesWithLatestSequenceNumberOfBlob(zkClient, key);
for (NimbusInfo nimbusInfo : nimbusInfoList) {
if (nimbusInfo.getHost().equals(nimbusDetails.getHost())) {
isListContainsCurrentNimbusInfo = true;
break;
}
}
if (!isListContainsCurrentNimbusInfo && downloadUpdatedBlob(conf, blobStore, key, nimbusInfoList)) {
LOG.debug("Updating state inside zookeeper for an update");
createStateInZookeeper(conf, key, nimbusDetails);
}
} catch (KeeperException.NoNodeException | KeyNotFoundException e) {
//race condition with a delete
return;
} catch (Exception exp) {
throw new RuntimeException(exp);
}
} | @Test
public void testUpdateKeyForBlobStore_noMatch() {
zkClientBuilder.withExists(BLOBSTORE_KEY, true);
zkClientBuilder.withGetChildren(BLOBSTORE_KEY, "localhost:1111-1");
when(nimbusDetails.getHost()).thenReturn("no match");
BlobStoreUtils.updateKeyForBlobStore(conf, blobStore, zkClientBuilder.build(), KEY, nimbusDetails);
zkClientBuilder.verifyExists(true);
zkClientBuilder.verifyGetChildren(2);
verify(nimbusDetails).getHost();
verify(conf, atLeastOnce()).get(anyString());
} |
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ItemCounter that = (ItemCounter) o;
if (!map.equals(that.map)) {
return false;
}
return true;
} | @Test
public void testEquals_returnsTrueOnSameInstance() {
assertTrue(counter.equals(counter));
} |
@Deprecated
public static RowMutationInformation of(MutationType mutationType, long sequenceNumber) {
checkArgument(sequenceNumber >= 0, "sequenceNumber must be non-negative");
return new AutoValue_RowMutationInformation(
mutationType, null, Long.toHexString(sequenceNumber));
} | @Test
public void givenTooManySegments_throws() {
IllegalArgumentException error =
assertThrows(
IllegalArgumentException.class,
() ->
RowMutationInformation.of(RowMutationInformation.MutationType.UPSERT, "0/0/0/0/0"));
assertEquals(
"changeSequenceNumber: 0/0/0/0/0 does not match expected pattern: ^([0-9A-Fa-f]{1,16})(/([0-9A-Fa-f]{1,16})){0,3}$",
error.getMessage());
} |
@VisibleForTesting
static void configureSslEngineFactory(
final KsqlConfig config,
final SslEngineFactory sslFactory
) {
sslFactory
.configure(config.valuesWithPrefixOverride(KsqlConfig.KSQL_SCHEMA_REGISTRY_PREFIX));
} | @Test
public void shouldPickUpPrefixedSslConfig() {
// Given:
final KsqlConfig config = config(
"ksql.schema.registry." + SslConfigs.SSL_PROTOCOL_CONFIG, "SSLv3"
);
final Map<String, Object> expectedConfigs = defaultConfigs();
expectedConfigs.put(SslConfigs.SSL_PROTOCOL_CONFIG, "SSLv3");
// When:
KsqlSchemaRegistryClientFactory.configureSslEngineFactory(config, sslEngineFactory);
// Then:
verify(sslEngineFactory).configure(expectedConfigs);
} |
@Override
public RetrievableStateHandle<T> addAndLock(String pathInZooKeeper, T state)
throws PossibleInconsistentStateException, Exception {
checkNotNull(pathInZooKeeper, "Path in ZooKeeper");
checkNotNull(state, "State");
final String path = normalizePath(pathInZooKeeper);
final Optional<Stat> maybeStat = getStat(path);
if (maybeStat.isPresent()) {
if (isNotMarkedForDeletion(maybeStat.get())) {
throw new AlreadyExistException(
String.format("ZooKeeper node %s already exists.", path));
}
Preconditions.checkState(
releaseAndTryRemove(path),
"The state is marked for deletion and, therefore, should be deletable.");
}
final RetrievableStateHandle<T> storeHandle = storage.store(state);
final byte[] serializedStoreHandle = serializeOrDiscard(storeHandle);
try {
writeStoreHandleTransactionally(path, serializedStoreHandle);
return storeHandle;
} catch (KeeperException.NodeExistsException e) {
// Transactions are not idempotent in the curator version we're currently using, so it
// is actually possible that we've re-tried a transaction that has already succeeded.
// We've ensured that the node hasn't been present prior executing the transaction, so
// we can assume that this is a result of the retry mechanism.
return storeHandle;
} catch (Exception e) {
if (indicatesPossiblyInconsistentState(e)) {
throw new PossibleInconsistentStateException(e);
}
// In case of any other failure, discard the state and rethrow the exception.
storeHandle.discardState();
throw e;
}
} | @Test
void testFailingAddWithPossiblyInconsistentState() {
final TestingLongStateHandleHelper stateHandleProvider = new TestingLongStateHandleHelper();
CuratorFramework client = spy(getZooKeeperClient());
when(client.inTransaction()).thenThrow(new RuntimeException("Expected test Exception."));
ZooKeeperStateHandleStore<TestingLongStateHandleHelper.LongStateHandle> store =
new ZooKeeperStateHandleStore<>(client, stateHandleProvider);
// Config
final String pathInZooKeeper = "/testAddDiscardStateHandleAfterFailure";
final long state = 81282227L;
assertThatExceptionOfType(PossibleInconsistentStateException.class)
.as("PossibleInconsistentStateException should have been thrown.")
.isThrownBy(
() ->
store.addAndLock(
pathInZooKeeper,
new TestingLongStateHandleHelper.LongStateHandle(state)));
// State handle created and not discarded
assertThat(TestingLongStateHandleHelper.getGlobalStorageSize()).isOne();
assertThat(TestingLongStateHandleHelper.getStateHandleValueByIndex(0)).isEqualTo(state);
assertThat(TestingLongStateHandleHelper.getDiscardCallCountForStateHandleByIndex(0))
.isZero();
} |
@Override
public List<SnowflakeIdentifier> listSchemas(SnowflakeIdentifier scope) {
StringBuilder baseQuery = new StringBuilder("SHOW SCHEMAS");
String[] queryParams = null;
switch (scope.type()) {
case ROOT:
// account-level listing
baseQuery.append(" IN ACCOUNT");
break;
case DATABASE:
// database-level listing
baseQuery.append(" IN DATABASE IDENTIFIER(?)");
queryParams = new String[] {scope.toIdentifierString()};
break;
default:
throw new IllegalArgumentException(
String.format("Unsupported scope type for listSchemas: %s", scope));
}
final String finalQuery = baseQuery.toString();
final String[] finalQueryParams = queryParams;
List<SnowflakeIdentifier> schemas;
try {
schemas =
connectionPool.run(
conn ->
queryHarness.query(
conn, finalQuery, SCHEMA_RESULT_SET_HANDLER, finalQueryParams));
} catch (SQLException e) {
throw snowflakeExceptionToIcebergException(
scope, e, String.format("Failed to list schemas for scope '%s'", scope));
} catch (InterruptedException e) {
throw new UncheckedInterruptedException(
e, "Interrupted while listing schemas for scope '%s'", scope);
}
schemas.forEach(
schema ->
Preconditions.checkState(
schema.type() == SnowflakeIdentifier.Type.SCHEMA,
"Expected SCHEMA, got identifier '%s' for scope '%s'",
schema,
scope));
return schemas;
} | @SuppressWarnings("unchecked")
@Test
public void testListSchemasInAccount() throws SQLException {
when(mockResultSet.next()).thenReturn(true).thenReturn(true).thenReturn(true).thenReturn(false);
when(mockResultSet.getString("database_name"))
.thenReturn("DB_1")
.thenReturn("DB_1")
.thenReturn("DB_2");
when(mockResultSet.getString("name"))
.thenReturn("SCHEMA_1")
.thenReturn("SCHEMA_2")
.thenReturn("SCHEMA_3");
List<SnowflakeIdentifier> actualList =
snowflakeClient.listSchemas(SnowflakeIdentifier.ofRoot());
verify(mockQueryHarness)
.query(
eq(mockConnection),
eq("SHOW SCHEMAS IN ACCOUNT"),
any(JdbcSnowflakeClient.ResultSetParser.class),
eq(null));
assertThat(actualList)
.containsExactly(
SnowflakeIdentifier.ofSchema("DB_1", "SCHEMA_1"),
SnowflakeIdentifier.ofSchema("DB_1", "SCHEMA_2"),
SnowflakeIdentifier.ofSchema("DB_2", "SCHEMA_3"));
} |
public Optional<User> login(String nameOrEmail, String password) {
if (nameOrEmail == null || password == null) {
return Optional.empty();
}
User user = userDAO.findByName(nameOrEmail);
if (user == null) {
user = userDAO.findByEmail(nameOrEmail);
}
if (user != null && !user.isDisabled()) {
boolean authenticated = encryptionService.authenticate(password, user.getPassword(), user.getSalt());
if (authenticated) {
performPostLoginActivities(user);
return Optional.of(user);
}
}
return Optional.empty();
} | @Test
void apiLoginShouldPerformPostLoginActivitiesIfUserFoundFromApikeyLookupNotDisabled() {
Mockito.when(userDAO.findByApiKey("apikey")).thenReturn(normalUser);
userService.login("apikey");
Mockito.verify(postLoginActivities).executeFor(normalUser);
} |
static String abbreviate(String cmd, int len) {
if (cmd.length() > len && len >= 5) {
int firstHalf = (len - 3) / 2;
int rem = len - firstHalf - 3;
return cmd.substring(0, firstHalf) +
"..." + cmd.substring(cmd.length() - rem);
} else {
return cmd;
}
} | @Test
public void testCommandAbbreviation() {
assertEquals("a...f", ShellCommandFencer.abbreviate("abcdef", 5));
assertEquals("abcdef", ShellCommandFencer.abbreviate("abcdef", 6));
assertEquals("abcdef", ShellCommandFencer.abbreviate("abcdef", 7));
assertEquals("a...g", ShellCommandFencer.abbreviate("abcdefg", 5));
assertEquals("a...h", ShellCommandFencer.abbreviate("abcdefgh", 5));
assertEquals("a...gh", ShellCommandFencer.abbreviate("abcdefgh", 6));
assertEquals("ab...gh", ShellCommandFencer.abbreviate("abcdefgh", 7));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.