focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static Result<Boolean> ok() {
return new Result<>(true, null, null);
} | @Test
public void testOk() {
// Test the ok method
Result<Boolean> result = Result.ok();
// Verify that the result is true
assertTrue(result.getResult());
assertNull(result.getErrMsg());
assertNull(result.getErrMsgParams());
} |
public static boolean isClusterController(ServiceCluster cluster) {
return ServiceType.CLUSTER_CONTROLLER.equals(cluster.serviceType());
} | @Test
public void verifyNonControllerClusterIsNotRecognized() {
ServiceCluster cluster = createServiceCluster(new ServiceType("foo"));
assertFalse(VespaModelUtil.isClusterController(cluster));
} |
@SuppressWarnings("unchecked")
public <T> T convert(DocString docString, Type targetType) {
if (DocString.class.equals(targetType)) {
return (T) docString;
}
List<DocStringType> docStringTypes = docStringTypeRegistry.lookup(docString.getContentType(), targetType);
if (docStringTypes.isEmpty()) {
if (docString.getContentType() == null) {
throw new CucumberDocStringException(format(
"It appears you did not register docstring type for %s",
targetType.getTypeName()));
}
throw new CucumberDocStringException(format(
"It appears you did not register docstring type for '%s' or %s",
docString.getContentType(),
targetType.getTypeName()));
}
if (docStringTypes.size() > 1) {
List<String> suggestedContentTypes = suggestedContentTypes(docStringTypes);
if (docString.getContentType() == null) {
throw new CucumberDocStringException(format(
"Multiple converters found for type %s, add one of the following content types to your docstring %s",
targetType.getTypeName(),
suggestedContentTypes));
}
throw new CucumberDocStringException(format(
"Multiple converters found for type %s, and the content type '%s' did not match any of the registered types %s. Change the content type of the docstring or register a docstring type for '%s'",
targetType.getTypeName(),
docString.getContentType(),
suggestedContentTypes,
docString.getContentType()));
}
return (T) docStringTypes.get(0).transform(docString.getContent());
} | @Test
void different_docstring_content_types_convert_to_matching_doc_string_types() {
registry.defineDocStringType(stringForJson);
registry.defineDocStringType(stringForXml);
registry.defineDocStringType(stringForYaml);
DocString docStringJson = DocString.create("{\"content\":\"hello world\"}", "json");
DocString docStringXml = DocString.create("<content>hello world</content>}", "xml");
DocString docStringYml = DocString.create("content: hello world", "yml");
assertAll(
() -> assertThat(docStringJson.getContent(), equalTo(converter.convert(docStringJson, String.class))),
() -> assertThat(docStringXml.getContent(), equalTo(converter.convert(docStringXml, String.class))),
() -> assertThat(docStringYml.getContent(), equalTo(converter.convert(docStringYml, String.class))));
} |
public static SchemaAndValue parseString(String value) {
if (value == null) {
return NULL_SCHEMA_AND_VALUE;
}
if (value.isEmpty()) {
return new SchemaAndValue(Schema.STRING_SCHEMA, value);
}
ValueParser parser = new ValueParser(new Parser(value));
return parser.parse(false);
} | @Test
public void shouldParseIntegerAsInt32() {
Integer value = Integer.MAX_VALUE;
SchemaAndValue schemaAndValue = Values.parseString(
String.valueOf(value)
);
assertEquals(Schema.INT32_SCHEMA, schemaAndValue.schema());
assertInstanceOf(Integer.class, schemaAndValue.value());
assertEquals(value.intValue(), ((Integer) schemaAndValue.value()).intValue());
value = Integer.MIN_VALUE;
schemaAndValue = Values.parseString(
String.valueOf(value)
);
assertEquals(Schema.INT32_SCHEMA, schemaAndValue.schema());
assertInstanceOf(Integer.class, schemaAndValue.value());
assertEquals(value.intValue(), ((Integer) schemaAndValue.value()).intValue());
} |
void format(NamespaceInfo nsInfo, boolean force) throws IOException {
Preconditions.checkState(nsInfo.getNamespaceID() != 0,
"can't format with uninitialized namespace info: %s",
nsInfo);
LOG.info("Formatting journal id : " + journalId + " with namespace info: " +
nsInfo + " and force: " + force);
storage.format(nsInfo, force);
this.cache = createCache();
refreshCachedData();
} | @Test
public void testFormatNonEmptyStorageDirectories() throws Exception {
try {
// Format again here and to format the non-empty directories in
// journal node.
journal.format(FAKE_NSINFO, false);
fail("Did not fail to format non-empty directories in journal node.");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"Can't format the storage directory because the current "
+ "directory is not empty.", ioe);
}
} |
public void sendResponse(Response response) {
Payload convert = GrpcUtils.convert(response);
payloadStreamObserver.onNext(convert);
} | @Test
void testSendResponse() {
connection.sendResponse(new HealthCheckResponse());
verify(payloadStreamObserver).onNext(any(Payload.class));
} |
@JsonIgnore
public Object getField(String key) {
return message.getField(key);
} | @Test
public void testGetField() throws Exception {
assertNull(messageSummary.getField("foo"));
message.addField("foo", "bar");
assertEquals("bar", messageSummary.getField("foo"));
} |
public static <T> T decodeFromByteArray(Coder<T> coder, byte[] encodedValue)
throws CoderException {
return decodeFromByteArray(coder, encodedValue, Coder.Context.OUTER);
} | @Test
public void testClosingCoderFailsWhenDecodingByteArrayInContext() throws Exception {
expectedException.expect(UnsupportedOperationException.class);
expectedException.expectMessage("Caller does not own the underlying");
CoderUtils.decodeFromByteArray(new ClosingCoder(), new byte[0], Context.NESTED);
} |
@Override
public StorageObject upload(final Path file, Local local, final BandwidthThrottle throttle, final StreamListener listener,
final TransferStatus status, final ConnectionCallback prompt) throws BackgroundException {
if(this.threshold(status)) {
try {
return new S3MultipartUploadService(session, writer, acl).upload(file, local, throttle, listener, status, prompt);
}
catch(NotfoundException | InteroperabilityException e) {
log.warn(String.format("Failure %s using multipart upload. Fallback to single upload.", e));
status.append(false);
try {
return new S3SingleUploadService(session, writer).upload(file, local, throttle, listener, status, prompt);
}
catch(BackgroundException f) {
log.warn(String.format("Failure %s using single upload. Throw original multipart failure %s", e, e));
throw e;
}
}
}
// Use single upload service
return new S3SingleUploadService(session, writer).upload(file, local, throttle, listener, status, prompt);
} | @Test
public void testUploadZeroLength() throws Exception {
final S3ThresholdUploadService service = new S3ThresholdUploadService(session, new S3AccessControlListFeature(session), 5 * 1024L);
final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final String name = new AlphanumericRandomStringService().random();
final Path test = new Path(container, name, EnumSet.of(Path.Type.file));
final Local local = new Local(System.getProperty("java.io.tmpdir"), name);
final byte[] random = RandomUtils.nextBytes(0);
IOUtils.write(random, local.getOutputStream(false));
final TransferStatus status = new TransferStatus();
status.setLength(random.length);
status.setMime("text/plain");
final BytecountStreamListener count = new BytecountStreamListener();
service.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED),
count, status, new DisabledLoginCallback());
assertEquals(random.length, count.getSent());
assertTrue(status.isComplete());
assertTrue(new S3FindFeature(session, new S3AccessControlListFeature(session)).find(test));
final PathAttributes attributes = new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(test);
assertEquals(random.length, attributes.getSize());
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
local.delete();
} |
@Override
public Map<String, Metric> getMetrics() {
return metricRegistry.getMetrics();
} | @Test
public void shouldReturnTotalNumberOfRequestsAs3ForFail() {
HelloWorldService helloWorldService = mock(HelloWorldService.class);
Retry retry = Retry.of("metrics", RetryConfig.<String>custom()
.retryExceptions(Exception.class)
.maxAttempts(5)
.build());
given(helloWorldService.returnHelloWorld())
.willThrow(new HelloWorldException())
.willThrow(new HelloWorldException())
.willReturn("Success");
String result = Retry.decorateSupplier(retry, helloWorldService::returnHelloWorld).get();
assertThat(retry.getMetrics().getNumberOfTotalCalls()).isEqualTo(3);
assertThat(result).isEqualTo("Success");
} |
public SchemaMapping fromParquet(MessageType parquetSchema) {
List<Type> fields = parquetSchema.getFields();
List<TypeMapping> mappings = fromParquet(fields);
List<Field> arrowFields = fields(mappings);
return new SchemaMapping(new Schema(arrowFields), parquetSchema, mappings);
} | @Test
public void testParquetInt32TimeMillisToArrow() {
MessageType parquet = Types.buildMessage()
.addField(Types.optional(INT32)
.as(LogicalTypeAnnotation.timeType(false, MILLIS))
.named("a"))
.named("root");
Schema expected = new Schema(asList(field("a", new ArrowType.Time(TimeUnit.MILLISECOND, 32))));
Assert.assertEquals(expected, converter.fromParquet(parquet).getArrowSchema());
} |
@Override
public void onStreamRequest(StreamRequest req,
RequestContext requestContext,
Map<String, String> wireAttrs,
NextFilter<StreamRequest, StreamResponse> nextFilter)
{
disruptRequest(req, requestContext, wireAttrs, nextFilter);
} | @Test
public void testExecutorRejectExecution() throws Exception
{
final AtomicBoolean success = new AtomicBoolean(false);
final CountDownLatch latch = new CountDownLatch(1);
ExecutorService rejectedExecutor = EasyMock.createStrictMock(ExecutorService.class);
rejectedExecutor.execute(EasyMock.anyObject(Runnable.class));
EasyMock.expectLastCall().andAnswer(() -> {
success.set(true);
latch.countDown();
throw new RejectedExecutionException();
});
EasyMock.replay(rejectedExecutor);
final RequestContext requestContext = new RequestContext();
requestContext.putLocalAttr(DISRUPT_CONTEXT_KEY, DisruptContexts.error(REQUEST_LATENCY));
final DisruptFilter filter = new DisruptFilter(_scheduler, rejectedExecutor, REQUEST_TIMEOUT, _clock);
final NextFilter<StreamRequest, StreamResponse> next = new NextFilter<StreamRequest, StreamResponse>()
{
@Override
public void onRequest(StreamRequest restRequest, RequestContext requestContext, Map<String, String> wireAttrs)
{
success.set(false);
latch.countDown();
}
@Override
public void onResponse(StreamResponse restResponse, RequestContext requestContext, Map<String, String> wireAttrs)
{
success.set(false);
latch.countDown();
}
@Override
public void onError(Throwable ex, RequestContext requestContext, Map<String, String> wireAttrs)
{
success.set(false);
latch.countDown();
}
};
filter.onStreamRequest(new StreamRequestBuilder(
new URI(URI)).build(EntityStreams.emptyStream()),
requestContext,
Collections.emptyMap(), next);
Assert.assertTrue(latch.await(TEST_TIMEOUT, TimeUnit.MILLISECONDS), "Missing NextFilter invocation");
Assert.assertTrue(success.get(), "Unexpected method invocation");
EasyMock.verify(rejectedExecutor);
EasyMock.reset(rejectedExecutor);
} |
@Override
public boolean tableExists(String dbName, String tblName) {
ConnectorMetadata metadata = metadataOfDb(dbName);
return metadata.tableExists(dbName, tblName);
} | @Test
void testTableExists() {
MockedJDBCMetadata mockedJDBCMetadata = new MockedJDBCMetadata(new HashMap<>());
assertTrue(mockedJDBCMetadata.tableExists("db1", "tbl1"));
} |
@Override
public Optional<ErrorResponse> filter(DiscFilterRequest request) {
try {
Optional<ResourceNameAndAction> resourceMapping =
requestResourceMapper.getResourceNameAndAction(request);
log.log(Level.FINE, () -> String.format("Resource mapping for '%s': %s", request, resourceMapping));
if (resourceMapping.isEmpty()) {
incrementAcceptedMetrics(request, false, Optional.empty());
return Optional.empty();
}
Result result = checkAccessAllowed(request, resourceMapping.get());
AuthorizationResult.Type resultType = result.zpeResult.type();
setAttribute(request, RESULT_ATTRIBUTE, resultType.name());
if (resultType == AuthorizationResult.Type.ALLOW) {
populateRequestWithResult(request, result);
incrementAcceptedMetrics(request, true, Optional.of(result));
return Optional.empty();
}
log.log(Level.FINE, () -> String.format("Forbidden (403) for '%s': %s", request, resultType.name()));
incrementRejectedMetrics(request, FORBIDDEN, resultType.name(), Optional.of(result));
return Optional.of(new ErrorResponse(FORBIDDEN, "Access forbidden: " + resultType.getDescription()));
} catch (IllegalArgumentException e) {
log.log(Level.FINE, () -> String.format("Unauthorized (401) for '%s': %s", request, e.getMessage()));
incrementRejectedMetrics(request, UNAUTHORIZED, "Unauthorized", Optional.empty());
return Optional.of(new ErrorResponse(UNAUTHORIZED, e.getMessage()));
}
} | @Test
void accepts_request_with_access_token() {
AthenzAuthorizationFilter filter = createFilter(new AllowingZpe(), List.of());
MockResponseHandler responseHandler = new MockResponseHandler();
DiscFilterRequest request = createRequest(null, ACCESS_TOKEN, USER_IDENTITY_CERTIFICATE);
filter.filter(request, responseHandler);
assertAuthorizationResult(request, Type.ALLOW);
assertRequestNotFiltered(responseHandler);
assertMatchedCredentialType(request, EnabledCredentials.ACCESS_TOKEN);
assertMatchedRole(request, ROLE);
} |
@SuppressWarnings("MethodMayBeStatic")
@Udf(description = "The 2 input points should be specified as (lat, lon) pairs, measured"
+ " in decimal degrees. An optional fifth parameter allows to specify either \"MI\" (miles)"
+ " or \"KM\" (kilometers) as the desired unit for the output measurement. Default is KM.")
public Double geoDistance(
@UdfParameter(description = "The latitude of the first point in decimal degrees.")
final double lat1,
@UdfParameter(description = "The longitude of the first point in decimal degrees.")
final double lon1,
@UdfParameter(description = "The latitude of the second point in decimal degrees.")
final double lat2,
@UdfParameter(description = "The longitude of the second point in decimal degrees.")
final double lon2,
@UdfParameter(description = "The units for the return value. Either MILES or KM.")
final String units
) {
validateLatLonValues(lat1, lon1, lat2, lon2);
final double chosenRadius = selectEarthRadiusToUse(units);
final double deltaLat = Math.toRadians(lat2 - lat1);
final double deltaLon = Math.toRadians(lon2 - lon1);
final double lat1Radians = Math.toRadians(lat1);
final double lat2Radians = Math.toRadians(lat2);
final double a =
haversin(deltaLat) + haversin(deltaLon) * Math.cos(lat1Radians) * Math.cos(lat2Radians);
final double distanceInRadians = 2 * Math.asin(Math.sqrt(a));
return distanceInRadians * chosenRadius;
} | @Test
public void shouldFailInvalidUnitOfMeasure() {
// When:
final Exception e = assertThrows(
KsqlFunctionException.class,
() -> distanceUdf.geoDistance(37.4439, -122.1663, 51.5257, -0.1122, "Parsecs")
);
// Then:
assertThat(e.getMessage(), containsString(
"GeoDistance function units parameter must be one of"));
} |
@Override
public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return true;
}
try {
try {
try {
final HttpHead request = new HttpHead(new DAVPathEncoder().encode(file));
for(Header header : this.headers()) {
request.addHeader(header);
}
return session.getClient().execute(request, new ExistsResponseHandler());
}
catch(SardineException e) {
throw new DAVExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
catch(IOException e) {
throw new HttpExceptionMappingService().map(e, file);
}
}
catch(AccessDeniedException | InteroperabilityException e) {
// 400 Multiple choices
return new DefaultFindFeature(session).find(file, listener);
}
}
catch(AccessDeniedException e) {
// Parent directory may not be accessible. Issue #5662
return true;
}
catch(LoginFailureException | NotfoundException e) {
return false;
}
} | @Test
public void testFind() throws Exception {
assertTrue(new DAVFindFeature(session).find(new DefaultHomeFinderService(session).find()));
} |
@Override
public Map<String, Metric> getMetrics() {
final Map<String, Metric> gauges = new HashMap<>();
for (final Thread.State state : Thread.State.values()) {
gauges.put(name(state.toString().toLowerCase(), "count"),
(Gauge<Object>) () -> getThreadCount(state));
}
gauges.put("count", (Gauge<Integer>) threads::getThreadCount);
gauges.put("daemon.count", (Gauge<Integer>) threads::getDaemonThreadCount);
gauges.put("peak.count", (Gauge<Integer>) threads::getPeakThreadCount);
gauges.put("total_started.count", (Gauge<Long>) threads::getTotalStartedThreadCount);
gauges.put("deadlock.count", (Gauge<Integer>) () -> deadlockDetector.getDeadlockedThreads().size());
gauges.put("deadlocks", (Gauge<Set<String>>) deadlockDetector::getDeadlockedThreads);
return Collections.unmodifiableMap(gauges);
} | @Test
public void hasAGaugeForTotalStartedThreadsCount() {
assertThat(((Gauge<?>) gauges.getMetrics().get("total_started.count")).getValue())
.isEqualTo(42L);
} |
@Override
public void notifyCheckpointComplete(long checkpointId) throws Exception {
super.notifyCheckpointComplete(checkpointId);
sourceReader.notifyCheckpointComplete(checkpointId);
} | @Test
void testNotifyCheckpointComplete() throws Exception {
StateInitializationContext stateContext = context.createStateContext();
operator.initializeState(stateContext);
operator.open();
operator.snapshotState(new StateSnapshotContextSynchronousImpl(100L, 100L));
operator.notifyCheckpointComplete(100L);
assertThat(mockSourceReader.getCompletedCheckpoints().get(0)).isEqualTo(100L);
} |
@Override
protected SSHClient connect(final ProxyFinder proxy, final HostKeyCallback key, final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException {
final DefaultConfig configuration = new DefaultConfig();
if("zlib".equals(preferences.getProperty("ssh.compression"))) {
configuration.setCompressionFactories(Arrays.asList(
new JcraftDelayedZlibCompression.Factory(),
new JcraftZlibCompression.Factory(),
new NoneCompression.Factory()));
}
else {
configuration.setCompressionFactories(Collections.singletonList(new NoneCompression.Factory()));
}
configuration.setVersion(new PreferencesUseragentProvider().get());
final KeepAliveProvider heartbeat;
if(preferences.getProperty("ssh.heartbeat.provider").equals("keep-alive")) {
heartbeat = KeepAliveProvider.KEEP_ALIVE;
}
else {
heartbeat = KeepAliveProvider.HEARTBEAT;
}
configuration.setKeepAliveProvider(heartbeat);
configuration.setCipherFactories(this.lowestPriorityForCBC(configuration.getCipherFactories()));
return this.connect(key, prompt, configuration);
} | @Test(expected = LoginCanceledException.class)
public void testConnectNoValidCredentials() throws Exception {
final Host host = new Host(new SFTPProtocol(), "test.cyberduck.ch", new Credentials("user", "p")) {
@Override
public String getProperty(final String key) {
if("ssh.authentication.agent.enable".equals(key)) {
return String.valueOf(false);
}
return null;
}
};
final Session session = new SFTPSession(host, new DisabledX509TrustManager(), new DefaultX509KeyManager());
final LoginConnectionService login = new LoginConnectionService(new DisabledLoginCallback() {
@Override
public Credentials prompt(final Host bookmark, String username, String title, String reason, LoginOptions options) throws LoginCanceledException {
throw new LoginCanceledException();
}
}, new DisabledHostKeyCallback(), new DisabledPasswordStore(),
new DisabledProgressListener());
login.connect(session, new DisabledCancelCallback());
} |
@Override
public void fromPB(EncryptionKeyPB pb, KeyMgr mgr) {
super.fromPB(pb, mgr);
if (pb.algorithm == null) {
throw new IllegalArgumentException("no algorithm in EncryptionKeyPB for NormalKey id:" + id);
}
algorithm = pb.algorithm;
if (pb.plainKey != null) {
plainKey = pb.plainKey;
} else if (pb.encryptedKey != null) {
encryptedKey = pb.encryptedKey;
} else {
throw new IllegalArgumentException("no encryptedKey in EncryptionKeyPB for NormalKey id:" + id);
}
} | @Test
public void testFromPB() {
EncryptionKeyPB pb = new EncryptionKeyPB();
pb.id = 12345L;
pb.createTime = System.currentTimeMillis();
pb.algorithm = EncryptionAlgorithmPB.AES_128;
pb.encryptedKey = ((NormalKey) normalKey.generateKey()).getEncryptedKey();
NormalKey key = new NormalKey();
KeyMgr mgr = new KeyMgr();
key.fromPB(pb, mgr);
assertEquals(pb.id.longValue(), key.getId());
assertEquals(pb.createTime.longValue(), key.getCreateTime());
assertEquals(pb.algorithm, key.getAlgorithm());
assertArrayEquals(pb.encryptedKey, key.getEncryptedKey());
} |
public double[][] test(DataFrame data) {
DataFrame x = formula.x(data);
int n = x.nrow();
int ntrees = trees.length;
double[][] prediction = new double[ntrees][n];
for (int j = 0; j < n; j++) {
Tuple xj = x.get(j);
double base = b;
for (int i = 0; i < ntrees; i++) {
base += shrinkage * trees[i].predict(xj);
prediction[i][j] = base;
}
}
return prediction;
} | @Test
public void testKin8nmLAD() {
test(Loss.lad(), "kin8nm", Kin8nm.formula, Kin8nm.data, 0.1814);
} |
public synchronized File buildPackage(HeliumPackage pkg,
boolean rebuild,
boolean recopyLocalModule) throws IOException {
if (pkg == null) {
return null;
}
String[] moduleNameVersion = getNpmModuleNameAndVersion(pkg);
if (moduleNameVersion == null) {
LOGGER.warn("Can't get module name and version of package {}", pkg.getName());
return null;
}
String pkgName = pkg.getName();
File bundleDir = getHeliumPackageDirectory(pkgName);
File bundleCache = getHeliumPackageBundleCache(pkgName);
if (!rebuild && bundleCache.exists() && !bundleCache.isDirectory()) {
return bundleCache;
}
// 0. install node, npm (should be called before `downloadPackage`
try {
installNodeAndNpm();
} catch (TaskRunnerException e) {
throw new IOException(e);
}
// 1. prepare directories
if (!heliumLocalRepoDirectory.exists() || !heliumLocalRepoDirectory.isDirectory()) {
FileUtils.deleteQuietly(heliumLocalRepoDirectory);
FileUtils.forceMkdir(heliumLocalRepoDirectory);
}
FrontendPluginFactory fpf = new FrontendPluginFactory(
bundleDir, nodeInstallationDirectory);
// resources: webpack.js, package.json
String templateWebpackConfig = Resources.toString(
Resources.getResource("helium/webpack.config.js"), StandardCharsets.UTF_8);
String templatePackageJson = Resources.toString(
Resources.getResource("helium/" + PACKAGE_JSON), StandardCharsets.UTF_8);
// 2. download helium package using `npm pack`
String mainFileName = null;
try {
mainFileName = downloadPackage(pkg, moduleNameVersion, bundleDir,
templateWebpackConfig, templatePackageJson, fpf);
} catch (TaskRunnerException e) {
throw new IOException(e);
}
// 3. prepare bundle source
prepareSource(pkg, moduleNameVersion, mainFileName);
// 4. install node and local modules for a bundle
copyFrameworkModulesToInstallPath(recopyLocalModule); // should copy local modules first
installNodeModules(fpf);
// 5. let's bundle and update cache
File heliumBundle = bundleHeliumPackage(fpf, bundleDir);
bundleCache.delete();
FileUtils.moveFile(heliumBundle, bundleCache);
return bundleCache;
} | @Test
void switchVersion() throws IOException, TaskRunnerException {
URL res = Resources.getResource("helium/webpack.config.js");
String resDir = new File(res.getFile()).getParent();
HeliumPackage pkgV1 =
newHeliumPackage(
HeliumType.VISUALIZATION,
"zeppelin-bubblechart",
"zeppelin-bubblechart",
"zeppelin-bubblechart@0.0.3",
"",
null,
"license",
"icon");
HeliumPackage pkgV2 =
newHeliumPackage(
HeliumType.VISUALIZATION,
"zeppelin-bubblechart",
"zeppelin-bubblechart",
"zeppelin-bubblechart@0.0.1",
"",
null,
"license",
"icon");
List<HeliumPackage> pkgsV1 = new LinkedList<>();
pkgsV1.add(pkgV1);
List<HeliumPackage> pkgsV2 = new LinkedList<>();
pkgsV2.add(pkgV2);
File bundle1 = hbf.buildPackage(pkgV1, true, true);
File bundle2 = hbf.buildPackage(pkgV2, true, true);
assertNotSame(bundle1.lastModified(), bundle2.lastModified());
} |
@Override
public SchemaPath getSchemaPath(TopicPath topicPath) throws IOException {
Topic topic = pubsub.projects().topics().get(topicPath.getPath()).execute();
if (topic.getSchemaSettings() == null) {
return null;
}
String schemaPath = topic.getSchemaSettings().getSchema();
if (schemaPath.equals(SchemaPath.DELETED_SCHEMA_PATH)) {
return null;
}
return PubsubClient.schemaPathFromPath(schemaPath);
} | @Test
public void testGetSchemaPath() throws IOException {
TopicPath topicDoesNotExist =
PubsubClient.topicPathFromPath("projects/testProject/topics/idontexist");
TopicPath topicExistsDeletedSchema =
PubsubClient.topicPathFromPath("projects/testProject/topics/deletedSchema");
TopicPath topicExistsNoSchema =
PubsubClient.topicPathFromPath("projects/testProject/topics/noSchema");
TopicPath topicExistsSchema =
PubsubClient.topicPathFromPath("projects/testProject/topics/topicWithSchema");
when(mockPubsub.projects().topics().get(topicDoesNotExist.getPath()).execute())
.thenThrow(
new IOException(
String.format("topic does not exist: %s", topicDoesNotExist.getPath())));
when(mockPubsub.projects().topics().get(topicExistsDeletedSchema.getPath()).execute())
.thenReturn(
new Topic()
.setName(topicExistsDeletedSchema.getName())
.setSchemaSettings(
new SchemaSettings().setSchema(PubsubClient.SchemaPath.DELETED_SCHEMA_PATH)));
when(mockPubsub.projects().topics().get(topicExistsNoSchema.getPath()).execute())
.thenReturn(new Topic().setName(topicExistsNoSchema.getName()));
when(mockPubsub.projects().topics().get(topicExistsSchema.getPath()).execute())
.thenReturn(
new Topic()
.setName(topicExistsSchema.getName())
.setSchemaSettings(new SchemaSettings().setSchema(SCHEMA.getPath())));
client = new PubsubJsonClient(null, null, mockPubsub);
assertThrows(
"topic does not exist", IOException.class, () -> client.getSchemaPath(topicDoesNotExist));
assertNull("schema for topic is deleted", client.getSchemaPath(topicExistsDeletedSchema));
assertNull("topic has no schema", client.getSchemaPath(topicExistsNoSchema));
assertEquals(SCHEMA.getPath(), client.getSchemaPath(topicExistsSchema).getPath());
} |
@Override
public void subscribe(Collection<String> topics) {
subscribeInternal(topics, Optional.empty());
} | @Test
public void testSubscriptionOnEmptyTopic() {
consumer = newConsumer();
String emptyTopic = " ";
assertThrows(IllegalArgumentException.class, () -> consumer.subscribe(singletonList(emptyTopic)));
} |
@Override
public void accept(ServerWebExchange exchange, CachedResponse cachedResponse) {
ServerHttpResponse response = exchange.getResponse();
response.getHeaders().clear();
response.getHeaders().addAll(cachedResponse.headers());
} | @Test
void headersFromResponseAreDropped() {
SetResponseHeadersAfterCacheExchangeMutator toTest = new SetResponseHeadersAfterCacheExchangeMutator();
inputExchange.getResponse().getHeaders().set("X-Header-1", "Value-original");
CachedResponse cachedResponse = new CachedResponse.Builder(HttpStatus.OK).build();
toTest.accept(inputExchange, cachedResponse);
Assertions.assertThat(inputExchange.getResponse().getHeaders()).doesNotContainKey("X-Header-1");
} |
public static void register(Observer observer) {
register(SubjectType.SPRING_CONTENT_REFRESHED.name(), observer);
} | @Test
public void testSubjectTypeEnumRegister() {
AbstractSubjectCenter.register(AbstractSubjectCenter.SubjectType.THREAD_POOL_DYNAMIC_REFRESH, subjectNotifyListener);
List<Observer> list = OBSERVERS_MAP.get(AbstractSubjectCenter.SubjectType.THREAD_POOL_DYNAMIC_REFRESH.name());
Assert.assertNotNull(list);
Assert.assertEquals(1, list.size());
Assert.assertSame(subjectNotifyListener, list.get(0));
OBSERVERS_MAP.clear();
} |
public static List<String> getPossibleMountPoints(String path) throws InvalidPathException {
String basePath = cleanPath(path);
List<String> paths = new ArrayList<>();
if ((basePath != null) && !basePath.equals(AlluxioURI.SEPARATOR)) {
paths.add(basePath);
String parent = getParent(path);
while (!parent.equals(AlluxioURI.SEPARATOR)) {
paths.add(0, parent);
parent = getParent(parent);
}
}
return paths;
} | @Test
public void getPossibleMountPointsException() throws InvalidPathException {
mException.expect(InvalidPathException.class);
PathUtils.getPossibleMountPoints("");
} |
public int getNumLocks() {
return mNumLocks;
} | @Test
public void testConstruct() {
create(1023, 128);
assertEquals(128, mLocks.getNumLocks());
create(1023, 127);
assertEquals(128, mLocks.getNumLocks());
create(513, 65);
assertEquals(128, mLocks.getNumLocks());
} |
public VersionMatchResult matches(DeploymentInfo info) {
// Skip if no manifest configuration
if(info.getManifest() == null || info.getManifest().size() == 0) {
return VersionMatchResult.SKIPPED;
}
for (ManifestInfo manifest: info.getManifest()) {
VersionMatchResult result = match(manifest);
if(VersionMatchResult.MATCHED.equals(result)){
LOGGER.debug("Matched {} with {}", this, manifest);
return VersionMatchResult.MATCHED;
}
if(VersionMatchResult.REJECTED.equals(result)){
LOGGER.debug("Rejected {} with {}", this, manifest);
return VersionMatchResult.REJECTED;
}
}
// There were no matches (maybe another matcher will pass)
return VersionMatchResult.SKIPPED;
} | @Test
public void testFailedEmptyArtifactInfo() throws IOException {
Set<MavenInfo> maven = new HashSet<MavenInfo>();
ManifestInfo manifest = new ManifestInfo(null);
DeploymentInfo info = new DeploymentInfo(maven, Collections.singleton(manifest));
System.err.println(info);
PluginMatcher p = new PluginMatcher(NotMatchingPlugin.class);
assertEquals("Failed Matching",VersionMatchResult.REJECTED, p.matches(info));
} |
static COSName mapPNGRenderIntent(int renderIntent)
{
COSName value;
switch (renderIntent)
{
case 0:
value = COSName.PERCEPTUAL;
break;
case 1:
value = COSName.RELATIVE_COLORIMETRIC;
break;
case 2:
value = COSName.SATURATION;
break;
case 3:
value = COSName.ABSOLUTE_COLORIMETRIC;
break;
default:
value = null;
break;
}
return value;
} | @Test
void testMapPNGRenderIntent()
{
assertEquals(COSName.PERCEPTUAL, PNGConverter.mapPNGRenderIntent(0));
assertEquals(COSName.RELATIVE_COLORIMETRIC, PNGConverter.mapPNGRenderIntent(1));
assertEquals(COSName.SATURATION, PNGConverter.mapPNGRenderIntent(2));
assertEquals(COSName.ABSOLUTE_COLORIMETRIC, PNGConverter.mapPNGRenderIntent(3));
assertNull(PNGConverter.mapPNGRenderIntent(-1));
assertNull(PNGConverter.mapPNGRenderIntent(4));
} |
@SuppressWarnings("unused") // Part of required API.
public void execute(
final ConfiguredStatement<InsertValues> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final InsertValues insertValues = statement.getStatement();
final MetaStore metaStore = executionContext.getMetaStore();
final KsqlConfig config = statement.getSessionConfig().getConfig(true);
final DataSource dataSource = getDataSource(config, metaStore, insertValues);
validateInsert(insertValues.getColumns(), dataSource);
final ProducerRecord<byte[], byte[]> record =
buildRecord(statement, metaStore, dataSource, serviceContext);
try {
producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps());
} catch (final TopicAuthorizationException e) {
// TopicAuthorizationException does not give much detailed information about why it failed,
// except which topics are denied. Here we just add the ACL to make the error message
// consistent with other authorization error messages.
final Exception rootCause = new KsqlTopicAuthorizationException(
AclOperation.WRITE,
e.unauthorizedTopics()
);
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause);
} catch (final ClusterAuthorizationException e) {
// ClusterAuthorizationException is thrown when using idempotent producers
// and either a topic write permission or a cluster-level idempotent write
// permission (only applicable for broker versions no later than 2.8) is
// missing. In this case, we include additional context to help the user
// distinguish this type of failure from other permissions exceptions
// such as the ones thrown above when TopicAuthorizationException is caught.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} catch (final KafkaException e) {
if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) {
// The error message thrown when an idempotent producer is missing permissions
// is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException,
// as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException.
// ksqlDB handles these two the same way, accordingly.
// See https://issues.apache.org/jira/browse/KAFKA-14138 for more.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} else {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} catch (final Exception e) {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} | @Test
public void shouldThrowIfNotEnoughValuesSuppliedWithNoSchema() {
// Given:
final ConfiguredStatement<InsertValues> statement = givenInsertValues(
ImmutableList.of(),
ImmutableList.of(
new LongLiteral(1L))
);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> executor.execute(statement, mock(SessionProperties.class), engine, serviceContext)
);
// Then:
assertThat(e.getCause(), (hasMessage(containsString("Expected a value for each column"))));
} |
@Override
public String toString() {
return toStringHelper(getClass())
.add("version", Byte.toString(version))
.add("headerLength", Byte.toString(headerLength))
.add("diffServ", Byte.toString(diffServ))
.add("totalLength", Short.toString(totalLength))
.add("identification", Short.toString(identification))
.add("flags", Byte.toString(flags))
.add("fragmentOffset", Short.toString(fragmentOffset))
.add("ttl", Byte.toString(ttl))
.add("protocol", Byte.toString(protocol))
.add("checksum", Short.toString(checksum))
.add("sourceAddress", Integer.toString(sourceAddress))
.add("destinationAddress", Integer.toString(destinationAddress))
.add("options", Arrays.toString(options))
.add("isTruncated", Boolean.toString(isTruncated))
.toString();
} | @Test
public void testToStringIPv4() throws Exception {
IPv4 ipv4 = deserializer.deserialize(headerBytes, 0, headerBytes.length);
String str = ipv4.toString();
assertTrue(StringUtils.contains(str, "version=" + VERSION));
assertTrue(StringUtils.contains(str, "headerLength=" + HEADER_LENGTH));
assertTrue(StringUtils.contains(str, "diffServ=" + DIFF_SERV));
assertTrue(StringUtils.contains(str, "totalLength=" + TOTAL_LENGTH));
assertTrue(StringUtils.contains(str, "identification=" + IDENTIFICATION));
assertTrue(StringUtils.contains(str, "flags=" + FLAGS));
assertTrue(StringUtils.contains(str, "fragmentOffset=" + FRAGMENT_OFFSET));
assertTrue(StringUtils.contains(str, "ttl=" + TTL));
assertTrue(StringUtils.contains(str, "protocol=" + PROTOCOL));
assertTrue(StringUtils.contains(str, "checksum=" + CHECKSUM));
assertTrue(StringUtils.contains(str, "sourceAddress=" + SOURCE_ADDRESS));
assertTrue(StringUtils.contains(str, "destinationAddress=" + DESTINATION_ADDRESS));
} |
@Override
public void contextDestroyed(ServletContextEvent event) {
if (!instanceEnabled) {
return;
}
// nettoyage avant le retrait de la webapp au cas où celui-ci ne suffise pas
SESSION_MAP_BY_ID.clear();
SESSION_COUNT.set(0);
// issue 665: in WildFly 10.1.0, the MonitoringFilter may never be initialized neither destroyed.
// For this case, it is needed to stop here the JdbcWrapper initialized in contextInitialized
JdbcWrapper.SINGLETON.stop();
// issue 878: NPE at net.bull.javamelody.JspWrapper.createHttpRequestWrapper
if (event.getServletContext().getClass().getName().startsWith("io.undertow")) {
// issue 848: NPE after SpringBoot hot restart
Parameters.initialize((ServletContext) null);
}
LOG.debug("JavaMelody listener destroy done");
} | @Test
public void testContextDestroyed() {
final ServletContext servletContext = createNiceMock(ServletContext.class);
final ServletContextEvent servletContextEvent = new ServletContextEvent(servletContext);
replay(servletContext);
sessionListener.sessionCreated(createSessionEvent());
sessionListener.contextDestroyed(servletContextEvent);
verify(servletContext);
if (!SessionListener.getAllSessionsInformations().isEmpty()) {
fail("contextDestroyed");
}
if (SessionListener.getSessionCount() != 0) {
fail("contextDestroyed");
}
} |
@Override
public PageResult<DiyPageDO> getDiyPagePage(DiyPagePageReqVO pageReqVO) {
return diyPageMapper.selectPage(pageReqVO);
} | @Test
@Disabled // TODO 请修改 null 为需要的值,然后删除 @Disabled 注解
public void testGetDiyPagePage() {
// mock 数据
DiyPageDO dbDiyPage = randomPojo(DiyPageDO.class, o -> { // 等会查询到
o.setName(null);
o.setCreateTime(null);
});
diyPageMapper.insert(dbDiyPage);
// 测试 name 不匹配
diyPageMapper.insert(cloneIgnoreId(dbDiyPage, o -> o.setName(null)));
// 测试 createTime 不匹配
diyPageMapper.insert(cloneIgnoreId(dbDiyPage, o -> o.setCreateTime(null)));
// 准备参数
DiyPagePageReqVO reqVO = new DiyPagePageReqVO();
reqVO.setName(null);
reqVO.setCreateTime(buildBetweenTime(2023, 2, 1, 2023, 2, 28));
// 调用
PageResult<DiyPageDO> pageResult = diyPageService.getDiyPagePage(reqVO);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbDiyPage, pageResult.getList().get(0));
} |
@SuppressWarnings("unchecked")
public static <K, V> Map<K, V> edit(Map<K, V> map, Editor<Entry<K, V>> editor) {
if (null == map || null == editor) {
return map;
}
Map<K, V> map2 = ReflectUtil.newInstanceIfPossible(map.getClass());
if (null == map2) {
map2 = new HashMap<>(map.size(), 1f);
}
if (isEmpty(map)) {
return map2;
}
// issue#3162@Github,在构造中put值,会导致新建map带有值内容,此处清空
if (false == map2.isEmpty()) {
map2.clear();
}
Entry<K, V> modified;
for (Entry<K, V> entry : map.entrySet()) {
modified = editor.edit(entry);
if (null != modified) {
map2.put(modified.getKey(), modified.getValue());
}
}
return map2;
} | @Test
public void editTest() {
final Map<String, String> map = MapUtil.newHashMap();
map.put("a", "1");
map.put("b", "2");
map.put("c", "3");
map.put("d", "4");
final Map<String, String> map2 = MapUtil.edit(map, t -> {
// 修改每个值使之*10
t.setValue(t.getValue() + "0");
return t;
});
assertEquals(4, map2.size());
assertEquals("10", map2.get("a"));
assertEquals("20", map2.get("b"));
assertEquals("30", map2.get("c"));
assertEquals("40", map2.get("d"));
} |
@Override
public void customize(WebServerFactory server) {
// When running in an IDE or with ./mvnw spring-boot:run, set location of the static web assets.
setLocationForStaticAssets(server);
} | @Test
void shouldCustomizeServletContainer() {
env.setActiveProfiles(JHipsterConstants.SPRING_PROFILE_PRODUCTION);
UndertowServletWebServerFactory container = new UndertowServletWebServerFactory();
webConfigurer.customize(container);
assertThat(container.getMimeMappings().get("abs")).isEqualTo("audio/x-mpeg");
assertThat(container.getMimeMappings().get("html")).isEqualTo("text/html");
assertThat(container.getMimeMappings().get("json")).isEqualTo("application/json");
if (container.getDocumentRoot() != null) {
assertThat(container.getDocumentRoot()).isEqualTo(new File("target/classes/static/"));
}
} |
@Override
public boolean equals(Object o) {
if (!(o instanceof OriginName)) {
return false;
}
OriginName that = (OriginName) o;
return Objects.equals(niwsClientName, that.niwsClientName)
&& Objects.equals(target, that.target)
&& Objects.equals(authority, that.authority);
} | @Test
void equals() {
OriginName name1 = OriginName.fromVipAndApp("woodly-doodly", "westerndigital");
OriginName name2 = OriginName.fromVipAndApp("woodly-doodly", "westerndigital", "woodly-doodly");
assertEquals(name1, name2);
assertEquals(name1.hashCode(), name2.hashCode());
} |
public synchronized boolean createCollection(String collectionName)
throws MongoDBResourceManagerException {
LOG.info("Creating collection using collectionName '{}'.", collectionName);
try {
// Check to see if the Collection exists
if (collectionExists(collectionName)) {
return false;
}
// The Collection does not exist in the database, create it and return true.
getDatabase().createCollection(collectionName);
} catch (Exception e) {
throw new MongoDBResourceManagerException("Error creating collection.", e);
}
LOG.info("Successfully created collection {}.{}", databaseName, collectionName);
return true;
} | @Test
public void testCreateCollectionShouldThrowErrorWhenCollectionNameIsInvalid() {
assertThrows(
MongoDBResourceManagerException.class, () -> testManager.createCollection("invalid$name"));
} |
@Override
public void onMsg(TbContext ctx, TbMsg msg) {
ctx.tellNext(msg, config.getMessageTypes().contains(msg.getType()) ? TbNodeConnectionType.TRUE : TbNodeConnectionType.FALSE);
} | @Test
void givenAttributesUpdated_whenOnMsg_then_False() {
// GIVEN
TbMsg msg = getTbMsg(deviceId, ATTRIBUTES_UPDATED);
// WHEN
node.onMsg(ctx, msg);
// THEN
ArgumentCaptor<TbMsg> newMsgCaptor = ArgumentCaptor.forClass(TbMsg.class);
verify(ctx, times(1)).tellNext(newMsgCaptor.capture(), eq(TbNodeConnectionType.FALSE));
verify(ctx, never()).tellFailure(any(), any());
TbMsg newMsg = newMsgCaptor.getValue();
assertThat(newMsg).isNotNull();
assertThat(newMsg).isSameAs(msg);
} |
public static void register(final Runnable task)
{
register("INT", task);
} | @Test
void throwsNullPointerExceptionIfRunnableIsNull()
{
assertThrowsExactly(NullPointerException.class, () -> SigInt.register(null));
} |
static BlockStmt getDerivedFieldVariableDeclaration(final String variableName, final DerivedField derivedField) {
final MethodDeclaration methodDeclaration =
DERIVED_FIELD_TEMPLATE.getMethodsByName(GETKIEPMMLDERIVEDFIELD).get(0).clone();
final BlockStmt derivedFieldBody =
methodDeclaration.getBody().orElseThrow(() -> new KiePMMLException(String.format(MISSING_BODY_TEMPLATE, methodDeclaration)));
final VariableDeclarator variableDeclarator =
getVariableDeclarator(derivedFieldBody, DERIVED_FIELD).orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_IN_BODY, DERIVED_FIELD, derivedFieldBody)));
variableDeclarator.setName(variableName);
final BlockStmt toReturn = new BlockStmt();
String nestedVariableName = String.format(VARIABLE_NAME_TEMPLATE, variableName, 0);
BlockStmt toAdd = getKiePMMLExpressionBlockStmt(nestedVariableName, derivedField.getExpression());
toAdd.getStatements().forEach(toReturn::addStatement);
final MethodCallExpr initializer = variableDeclarator.getInitializer()
.orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_INITIALIZER_TEMPLATE, DERIVED_FIELD, derivedFieldBody)))
.asMethodCallExpr();
final MethodCallExpr builder = getChainedMethodCallExprFrom("builder", initializer);
final Expression dataTypeExpression = getExpressionForDataType(derivedField.getDataType());
final Expression opTypeExpression = getExpressionForOpType(derivedField.getOpType());
builder.setArgument(0, new StringLiteralExpr(derivedField.getName()));
builder.setArgument(2, dataTypeExpression);
builder.setArgument(3, opTypeExpression);
builder.setArgument(4, new NameExpr(nestedVariableName));
getChainedMethodCallExprFrom("withDisplayName", initializer).setArgument(0, getExpressionForObject(derivedField.getDisplayName()));
derivedFieldBody.getStatements().forEach(toReturn::addStatement);
return toReturn;
} | @Test
void getDerivedFieldVariableDeclarationWithApply() throws IOException {
final String variableName = "variableName";
Constant constant = new Constant();
constant.setValue(value1);
FieldRef fieldRef = new FieldRef();
fieldRef.setField("FIELD_REF");
Apply apply = new Apply();
apply.setFunction("/");
apply.addExpressions(constant, fieldRef);
DerivedField derivedField = new DerivedField();
derivedField.setName(PARAM_1);
derivedField.setDataType(DataType.DOUBLE);
derivedField.setOpType(OpType.CONTINUOUS);
derivedField.setExpression(apply);
String dataType = getDATA_TYPEString(derivedField.getDataType());
String opType = getOP_TYPEString(derivedField.getOpType());
BlockStmt retrieved = KiePMMLDerivedFieldFactory.getDerivedFieldVariableDeclaration(variableName, derivedField);
String text = getFileContent(TEST_03_SOURCE);
Statement expected = JavaParserUtils
.parseBlock(String.format(text,
constant.getValue(),fieldRef.getField(),
apply.getFunction(),
apply.getInvalidValueTreatment().value(),
variableName,derivedField.getName(),
dataType,
opType));
assertThat(JavaParserUtils.equalsNode(expected, retrieved)).isTrue();
List<Class<?>> imports = Arrays.asList(KiePMMLConstant.class,
KiePMMLFieldRef.class,
KiePMMLApply.class,
KiePMMLDerivedField.class,
Arrays.class,
Collections.class);
commonValidateCompilationWithImports(retrieved, imports);
} |
@Override
public DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options) {
final long now = time.milliseconds();
final Map<AclBindingFilter, KafkaFutureImpl<FilterResults>> futures = new HashMap<>();
final List<AclBindingFilter> aclBindingFiltersSent = new ArrayList<>();
final List<DeleteAclsFilter> deleteAclsFilters = new ArrayList<>();
for (AclBindingFilter filter : filters) {
if (futures.get(filter) == null) {
aclBindingFiltersSent.add(filter);
deleteAclsFilters.add(DeleteAclsRequest.deleteAclsFilter(filter));
futures.put(filter, new KafkaFutureImpl<>());
}
}
final DeleteAclsRequestData data = new DeleteAclsRequestData().setFilters(deleteAclsFilters);
runnable.call(new Call("deleteAcls", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
DeleteAclsRequest.Builder createRequest(int timeoutMs) {
return new DeleteAclsRequest.Builder(data);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DeleteAclsResponse response = (DeleteAclsResponse) abstractResponse;
List<DeleteAclsResponseData.DeleteAclsFilterResult> results = response.filterResults();
Iterator<DeleteAclsResponseData.DeleteAclsFilterResult> iter = results.iterator();
for (AclBindingFilter bindingFilter : aclBindingFiltersSent) {
KafkaFutureImpl<FilterResults> future = futures.get(bindingFilter);
if (!iter.hasNext()) {
future.completeExceptionally(new UnknownServerException(
"The broker reported no deletion result for the given filter."));
} else {
DeleteAclsFilterResult filterResult = iter.next();
ApiError error = new ApiError(Errors.forCode(filterResult.errorCode()), filterResult.errorMessage());
if (error.isFailure()) {
future.completeExceptionally(error.exception());
} else {
List<FilterResult> filterResults = new ArrayList<>();
for (DeleteAclsMatchingAcl matchingAcl : filterResult.matchingAcls()) {
ApiError aclError = new ApiError(Errors.forCode(matchingAcl.errorCode()),
matchingAcl.errorMessage());
AclBinding aclBinding = DeleteAclsResponse.aclBinding(matchingAcl);
filterResults.add(new FilterResult(aclBinding, aclError.exception()));
}
future.complete(new FilterResults(filterResults));
}
}
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
return new DeleteAclsResult(new HashMap<>(futures));
} | @Test
public void testDeleteAcls() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
// Test a call where one filter has an error.
env.kafkaClient().prepareResponse(new DeleteAclsResponse(new DeleteAclsResponseData()
.setThrottleTimeMs(0)
.setFilterResults(asList(
new DeleteAclsResponseData.DeleteAclsFilterResult()
.setMatchingAcls(asList(
DeleteAclsResponse.matchingAcl(ACL1, ApiError.NONE),
DeleteAclsResponse.matchingAcl(ACL2, ApiError.NONE))),
new DeleteAclsResponseData.DeleteAclsFilterResult()
.setErrorCode(Errors.SECURITY_DISABLED.code())
.setErrorMessage("No security"))),
ApiKeys.DELETE_ACLS.latestVersion()));
DeleteAclsResult results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2));
Map<AclBindingFilter, KafkaFuture<FilterResults>> filterResults = results.values();
FilterResults filter1Results = filterResults.get(FILTER1).get();
assertNull(filter1Results.values().get(0).exception());
assertEquals(ACL1, filter1Results.values().get(0).binding());
assertNull(filter1Results.values().get(1).exception());
assertEquals(ACL2, filter1Results.values().get(1).binding());
TestUtils.assertFutureError(filterResults.get(FILTER2), SecurityDisabledException.class);
TestUtils.assertFutureError(results.all(), SecurityDisabledException.class);
// Test a call where one deletion result has an error.
env.kafkaClient().prepareResponse(new DeleteAclsResponse(new DeleteAclsResponseData()
.setThrottleTimeMs(0)
.setFilterResults(asList(
new DeleteAclsResponseData.DeleteAclsFilterResult()
.setMatchingAcls(asList(
DeleteAclsResponse.matchingAcl(ACL1, ApiError.NONE),
new DeleteAclsResponseData.DeleteAclsMatchingAcl()
.setErrorCode(Errors.SECURITY_DISABLED.code())
.setErrorMessage("No security")
.setPermissionType(AclPermissionType.ALLOW.code())
.setOperation(AclOperation.ALTER.code())
.setResourceType(ResourceType.CLUSTER.code())
.setPatternType(FILTER2.patternFilter().patternType().code()))),
new DeleteAclsResponseData.DeleteAclsFilterResult())),
ApiKeys.DELETE_ACLS.latestVersion()));
results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2));
assertTrue(results.values().get(FILTER2).get().values().isEmpty());
TestUtils.assertFutureError(results.all(), SecurityDisabledException.class);
// Test a call where there are no errors.
env.kafkaClient().prepareResponse(new DeleteAclsResponse(new DeleteAclsResponseData()
.setThrottleTimeMs(0)
.setFilterResults(asList(
new DeleteAclsResponseData.DeleteAclsFilterResult()
.setMatchingAcls(singletonList(DeleteAclsResponse.matchingAcl(ACL1, ApiError.NONE))),
new DeleteAclsResponseData.DeleteAclsFilterResult()
.setMatchingAcls(singletonList(DeleteAclsResponse.matchingAcl(ACL2, ApiError.NONE))))),
ApiKeys.DELETE_ACLS.latestVersion()));
results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2));
Collection<AclBinding> deleted = results.all().get();
assertCollectionIs(deleted, ACL1, ACL2);
}
} |
public Flowable<V> takeElements() {
return ElementsStream.takeElements(queue::takeAsync);
} | @Test
public void testTakeElements() throws InterruptedException {
RBlockingQueueRx<Integer> queue = redisson.getBlockingQueue("test");
List<Integer> elements = new ArrayList<>();
queue.takeElements().subscribe(new Subscriber<Integer>() {
@Override
public void onSubscribe(Subscription s) {
s.request(4);
}
@Override
public void onNext(Integer t) {
elements.add(t);
}
@Override
public void onError(Throwable t) {
}
@Override
public void onComplete() {
}
});
for (int i = 0; i < 10; i++) {
sync(queue.add(i));
}
Thread.sleep(500);
assertThat(elements).containsExactly(0, 1, 2, 3);
} |
@Override
public void deleteUser(String username) {
String sql = "DELETE FROM users WHERE username=?";
try {
jt.update(sql, username);
} catch (CannotGetJdbcConnectionException e) {
LogUtil.FATAL_LOG.error("[db-error] " + e.toString(), e);
throw e;
}
} | @Test
void testDeleteUser() {
externalUserPersistService.deleteUser("username");
String sql = "DELETE FROM users WHERE username=?";
Mockito.verify(jdbcTemplate).update(sql, "username");
} |
public static HttpResponseStatus parseLine(CharSequence line) {
return (line instanceof AsciiString) ? parseLine((AsciiString) line) : parseLine(line.toString());
} | @Test
public void parseLineStringJustCode() {
assertSame(HttpResponseStatus.OK, parseLine("200"));
} |
@Override
protected List<SegmentConversionResult> convert(PinotTaskConfig pinotTaskConfig, List<File> segmentDirs,
File workingDir)
throws Exception {
int numInputSegments = segmentDirs.size();
_eventObserver.notifyProgress(pinotTaskConfig, "Converting segments: " + numInputSegments);
String taskType = pinotTaskConfig.getTaskType();
Map<String, String> configs = pinotTaskConfig.getConfigs();
LOGGER.info("Starting task: {} with configs: {}", taskType, configs);
long startMillis = System.currentTimeMillis();
String realtimeTableName = configs.get(MinionConstants.TABLE_NAME_KEY);
String rawTableName = TableNameBuilder.extractRawTableName(realtimeTableName);
String offlineTableName = TableNameBuilder.OFFLINE.tableNameWithType(rawTableName);
TableConfig tableConfig = getTableConfig(offlineTableName);
Schema schema = getSchema(offlineTableName);
SegmentProcessorConfig.Builder segmentProcessorConfigBuilder =
new SegmentProcessorConfig.Builder().setTableConfig(tableConfig).setSchema(schema);
// Time handler config
segmentProcessorConfigBuilder
.setTimeHandlerConfig(MergeTaskUtils.getTimeHandlerConfig(tableConfig, schema, configs));
// Partitioner config
segmentProcessorConfigBuilder
.setPartitionerConfigs(MergeTaskUtils.getPartitionerConfigs(tableConfig, schema, configs));
// Merge type
MergeType mergeType = MergeTaskUtils.getMergeType(configs);
// Handle legacy key
if (mergeType == null) {
String legacyMergeTypeStr = configs.get(RealtimeToOfflineSegmentsTask.COLLECTOR_TYPE_KEY);
if (legacyMergeTypeStr != null) {
mergeType = MergeType.valueOf(legacyMergeTypeStr.toUpperCase());
}
}
segmentProcessorConfigBuilder.setMergeType(mergeType);
// Aggregation types
segmentProcessorConfigBuilder.setAggregationTypes(MergeTaskUtils.getAggregationTypes(configs));
// Segment config
segmentProcessorConfigBuilder.setSegmentConfig(MergeTaskUtils.getSegmentConfig(configs));
// Progress observer
segmentProcessorConfigBuilder.setProgressObserver(p -> _eventObserver.notifyProgress(_pinotTaskConfig, p));
SegmentProcessorConfig segmentProcessorConfig = segmentProcessorConfigBuilder.build();
List<RecordReader> recordReaders = new ArrayList<>(numInputSegments);
int count = 1;
for (File segmentDir : segmentDirs) {
_eventObserver.notifyProgress(_pinotTaskConfig,
String.format("Creating RecordReader for: %s (%d out of %d)", segmentDir, count++, numInputSegments));
PinotSegmentRecordReader recordReader = new PinotSegmentRecordReader();
// NOTE: Do not fill null field with default value to be consistent with other record readers
recordReader.init(segmentDir, null, null, true);
recordReaders.add(recordReader);
}
List<File> outputSegmentDirs;
try {
_eventObserver.notifyProgress(_pinotTaskConfig, "Generating segments");
outputSegmentDirs = new SegmentProcessorFramework(recordReaders, segmentProcessorConfig, workingDir).process();
} finally {
for (RecordReader recordReader : recordReaders) {
recordReader.close();
}
}
long endMillis = System.currentTimeMillis();
LOGGER.info("Finished task: {} with configs: {}. Total time: {}ms", taskType, configs, (endMillis - startMillis));
List<SegmentConversionResult> results = new ArrayList<>();
for (File outputSegmentDir : outputSegmentDirs) {
String outputSegmentName = outputSegmentDir.getName();
results.add(new SegmentConversionResult.Builder().setFile(outputSegmentDir).setSegmentName(outputSegmentName)
.setTableNameWithType(offlineTableName).build());
}
return results;
} | @Test
public void testTimeFormatSDF()
throws Exception {
FileUtils.deleteQuietly(WORKING_DIR);
RealtimeToOfflineSegmentsTaskExecutor realtimeToOfflineSegmentsTaskExecutor =
new RealtimeToOfflineSegmentsTaskExecutor(null, null);
realtimeToOfflineSegmentsTaskExecutor.setMinionEventObserver(new MinionProgressObserver());
Map<String, String> configs = new HashMap<>();
configs.put(MinionConstants.TABLE_NAME_KEY, TABLE_NAME_SDF);
configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY, "1600473600000");
configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_END_MS_KEY, "1600560000000");
configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.MERGE_TYPE_KEY, "rollup");
PinotTaskConfig pinotTaskConfig =
new PinotTaskConfig(MinionConstants.RealtimeToOfflineSegmentsTask.TASK_TYPE, configs);
List<SegmentConversionResult> conversionResults =
realtimeToOfflineSegmentsTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirListSDF, WORKING_DIR);
assertEquals(conversionResults.size(), 1);
File resultingSegment = conversionResults.get(0).getFile();
SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(resultingSegment);
assertEquals(segmentMetadata.getTotalDocs(), 3);
ColumnMetadata columnMetadataForT = segmentMetadata.getColumnMetadataFor(T_TRX);
assertEquals(columnMetadataForT.getCardinality(), 3);
assertTrue((int) columnMetadataForT.getMinValue() >= 2020091900);
assertTrue((int) columnMetadataForT.getMaxValue() < 2020092000);
} |
public static UnifiedDiff parseUnifiedDiff(InputStream stream) throws IOException, UnifiedDiffParserException {
UnifiedDiffReader parser = new UnifiedDiffReader(new BufferedReader(new InputStreamReader(stream)));
return parser.parse();
} | @Test
public void testParseIssue104() throws IOException {
UnifiedDiff diff = UnifiedDiffReader.parseUnifiedDiff(
UnifiedDiffReaderTest.class.getResourceAsStream("problem_diff_parsing_issue104.diff"));
assertThat(diff.getFiles().size()).isEqualTo(6);
final UnifiedDiffFile file = diff.getFiles().get(2);
assertThat(file.getFromFile()).isEqualTo("/dev/null");
assertThat(file.getToFile()).isEqualTo("doc/samba_data_tool_path.xml.in");
assertThat(file.getPatch().toString()).isEqualTo("Patch{deltas=[[ChangeDelta, position: 0, lines: [] to [@SAMBA_DATA_TOOL@]]]}");
assertThat(diff.getTail()).isEqualTo("2.14.4");
} |
public Entry getChild(int index) {
return childEntries.get(index);
} | @Test
public void getsChildrenWithNoIndices() {
Entry top = entryWithName("top");
assertThat(top.getChild(), equalTo(top));
} |
public static Row toBeamRow(GenericRecord record, Schema schema, ConversionOptions options) {
List<Object> valuesInOrder =
schema.getFields().stream()
.map(
field -> {
try {
org.apache.avro.Schema.Field avroField =
record.getSchema().getField(field.getName());
Object value = avroField != null ? record.get(avroField.pos()) : null;
return convertAvroFormat(field.getType(), value, options);
} catch (Exception cause) {
throw new IllegalArgumentException(
"Error converting field " + field + ": " + cause.getMessage(), cause);
}
})
.collect(toList());
return Row.withSchema(schema).addValues(valuesInOrder).build();
} | @Test
public void testToBeamRow_row() {
Row beamRow = BigQueryUtils.toBeamRow(ROW_TYPE, BQ_ROW_ROW);
assertEquals(ROW_ROW, beamRow);
} |
@Override
public Pixel[] getPixels() {
return pixels;
} | @Test
void testGetPixels() {
try {
var field = FrameBuffer.class.getDeclaredField("pixels");
var pixels = new Pixel[FrameBuffer.HEIGHT * FrameBuffer.WIDTH];
Arrays.fill(pixels, Pixel.WHITE);
pixels[0] = Pixel.BLACK;
var frameBuffer = new FrameBuffer();
field.setAccessible(true);
field.set(frameBuffer, pixels);
assertEquals(pixels, frameBuffer.getPixels());
} catch (NoSuchFieldException | IllegalAccessException e) {
fail("Fail to modify field access.");
}
} |
@Override
public T deserialize(final String topic, final byte[] bytes) {
try {
if (bytes == null) {
return null;
}
// don't use the JsonSchemaConverter to read this data because
// we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS,
// which is not currently available in the standard converters
final JsonNode value = isJsonSchema
? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class)
: MAPPER.readTree(bytes);
final Object coerced = enforceFieldType(
"$",
new JsonValueContext(value, schema)
);
if (LOG.isTraceEnabled()) {
LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced);
}
return SerdeUtils.castToTargetType(coerced, targetType);
} catch (final Exception e) {
// Clear location in order to avoid logging data, for security reasons
if (e instanceof JsonParseException) {
((JsonParseException) e).clearLocation();
}
throw new SerializationException(
"Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e);
}
} | @Test
public void shouldThrowIfCanNotCoerceToInt() {
// Given:
final KsqlJsonDeserializer<Integer> deserializer =
givenDeserializerForSchema(Schema.OPTIONAL_INT32_SCHEMA, Integer.class);
final byte[] bytes = serializeJson(BooleanNode.valueOf(true));
// When:
final Exception e = assertThrows(
SerializationException.class,
() -> deserializer.deserialize(SOME_TOPIC, bytes)
);
// Then:
assertThat(e.getCause(), (hasMessage(startsWith(
"Can't convert type. sourceType: BooleanNode, requiredType: INTEGER"))));
} |
@SuppressWarnings("JdkObsolete")
void runNonGui(String testFile, String logFile, boolean remoteStart, String remoteHostsString, boolean generateReportDashboard)
throws ConfigurationException {
try {
File f = new File(testFile);
if (!f.exists() || !f.isFile()) {
throw new ConfigurationException("The file " + f.getAbsolutePath() + " doesn't exist or can't be opened");
}
FileServer.getFileServer().setBaseForScript(f);
HashTree tree = SaveService.loadTree(f);
@SuppressWarnings("deprecation") // Deliberate use of deprecated ctor
JMeterTreeModel treeModel = new JMeterTreeModel(new Object());// NOSONAR Create non-GUI version to avoid headless problems
JMeterTreeNode root = (JMeterTreeNode) treeModel.getRoot();
treeModel.addSubTree(tree, root);
// Hack to resolve ModuleControllers in non GUI mode
SearchByClass<ReplaceableController> replaceableControllers =
new SearchByClass<>(ReplaceableController.class);
tree.traverse(replaceableControllers);
Collection<ReplaceableController> replaceableControllersRes = replaceableControllers.getSearchResults();
for (ReplaceableController replaceableController : replaceableControllersRes) {
replaceableController.resolveReplacementSubTree(root);
}
// Ensure tree is interpreted (ReplaceableControllers are replaced)
// For GUI runs this is done in Start.java
HashTree clonedTree = convertSubTree(tree, true);
Summariser summariser = null;
String summariserName = JMeterUtils.getPropDefault("summariser.name", "");//$NON-NLS-1$
if (summariserName.length() > 0) {
log.info("Creating summariser <{}>", summariserName);
println("Creating summariser <" + summariserName + ">");
summariser = new Summariser(summariserName);
}
ResultCollector resultCollector = null;
if (logFile != null) {
resultCollector = new ResultCollector(summariser);
resultCollector.setFilename(logFile);
clonedTree.add(clonedTree.getArray()[0], resultCollector);
}
else {
// only add Summariser if it can not be shared with the ResultCollector
if (summariser != null) {
clonedTree.add(clonedTree.getArray()[0], summariser);
}
}
if (deleteResultFile) {
SearchByClass<ResultCollector> resultListeners = new SearchByClass<>(ResultCollector.class);
clonedTree.traverse(resultListeners);
for (ResultCollector rc : resultListeners.getSearchResults()) {
File resultFile = new File(rc.getFilename());
if (resultFile.exists() && !resultFile.delete()) {
throw new IllegalStateException("Could not delete results file " + resultFile.getAbsolutePath()
+ "(canRead:" + resultFile.canRead() + ", canWrite:" + resultFile.canWrite() + ")");
}
}
}
ReportGenerator reportGenerator = null;
if (logFile != null && generateReportDashboard) {
reportGenerator = new ReportGenerator(logFile, resultCollector);
}
// Used for remote notification of threads start/stop,see BUG 54152
// Summariser uses this feature to compute correctly number of threads
// when NON GUI mode is used
clonedTree.add(clonedTree.getArray()[0], new RemoteThreadsListenerTestElement());
List<JMeterEngine> engines = new ArrayList<>();
println("Created the tree successfully using "+testFile);
if (!remoteStart) {
JMeterEngine engine = new StandardJMeterEngine();
clonedTree.add(clonedTree.getArray()[0], new ListenToTest(
org.apache.jmeter.JMeter.ListenToTest.RunMode.LOCAL, false, reportGenerator));
engine.configure(clonedTree);
Instant now = Instant.now();
println("Starting standalone test @ "+ formatLikeDate(now) + " (" + now.toEpochMilli() + ')');
engines.add(engine);
engine.runTest();
} else {
java.util.StringTokenizer st = new java.util.StringTokenizer(remoteHostsString.trim(), ",");//$NON-NLS-1$
List<String> hosts = new ArrayList<>();
while (st.hasMoreElements()) {
hosts.add(((String) st.nextElement()).trim());
}
ListenToTest testListener = new ListenToTest(
org.apache.jmeter.JMeter.ListenToTest.RunMode.REMOTE, remoteStop, reportGenerator);
clonedTree.add(clonedTree.getArray()[0], testListener);
DistributedRunner distributedRunner=new DistributedRunner(this.remoteProps);
distributedRunner.setStdout(System.out); // NOSONAR
distributedRunner.setStdErr(System.err); // NOSONAR
distributedRunner.init(hosts, clonedTree);
engines.addAll(distributedRunner.getEngines());
testListener.setStartedRemoteEngines(engines);
distributedRunner.start();
}
startUdpDdaemon(engines);
} catch (ConfigurationException e) {
throw e;
} catch (Exception e) {
System.out.println("Error in NonGUIDriver " + e.toString());//NOSONAR
log.error("Error in NonGUIDriver", e);
throw new ConfigurationException("Error in NonGUIDriver " + e.getMessage(), e);
}
} | @Test
void testFailureWhenJmxDoesNotExist() {
JMeter jmeter = new JMeter();
try {
jmeter.runNonGui("testPlan.jmx", null, false, null, false);
Assertions.fail("Expected ConfigurationException to be thrown");
} catch (ConfigurationException e) {
Assertions.assertTrue(e.getMessage().contains("doesn't exist or can't be opened"),
"When the file doesn't exist, this method 'runNonGui' should have a detailed message");
}
} |
@Override
public UfsFileStatus getFileStatus(String path, GetStatusOptions options) throws IOException {
String tpath = stripPath(path);
File file = new File(tpath);
try {
PosixFileAttributes attr =
Files.readAttributes(Paths.get(file.getPath()), PosixFileAttributes.class);
if (attr.isDirectory()) {
throw new IOException(String.format("path %s is not a file", path));
}
String contentHash = getContentHash(options.isIncludeRealContentHash(), file);
return new UfsFileStatus(path, contentHash, file.length(), file.lastModified(),
attr.owner().getName(), attr.group().getName(),
FileUtils.translatePosixPermissionToMode(attr.permissions()),
mUfsConf.getBytes(PropertyKey.USER_BLOCK_SIZE_BYTES_DEFAULT));
} catch (FileSystemException e) {
throw new FileNotFoundException(e.getMessage());
}
} | @Test
public void getFileStatus() throws IOException {
String file = PathUtils.concatPath(mLocalUfsRoot, getUniqueFileName());
mLocalUfs.create(file).close();
UfsFileStatus s = mLocalUfs.getFileStatus(file);
assertFalse(s.isDirectory());
assertTrue(s.isFile());
} |
@Override
public Object[] toArray() {
throw new UnsupportedOperationException();
} | @Test(expected = UnsupportedOperationException.class)
public void testToArray_withArray() {
queue.toArray(new Integer[0]);
} |
protected static String toString( FilterType filterType ) {
return filterType == null ? null : filterType.toString();
} | @Test
public void testToString_FilterType() {
assertNull( SelectionAdapterOptions.toString( null ) );
assertEquals( "TXT", SelectionAdapterOptions.toString( FilterType.TXT ) );
} |
public static void populateRackInfoForReplicationFactorChange(Map<Short, Set<String>> topicsByReplicationFactor,
Cluster cluster,
boolean skipTopicRackAwarenessCheck,
Map<String, List<Integer>> brokersByRack,
Map<Integer, String> rackByBroker) {
for (Node node : cluster.nodes()) {
String rack = getRackHandleNull(node);
brokersByRack.putIfAbsent(rack, new ArrayList<>());
brokersByRack.get(rack).add(node.id());
rackByBroker.put(node.id(), rack);
}
topicsByReplicationFactor.forEach((replicationFactor, topics) -> {
if (replicationFactor > rackByBroker.size()) {
throw new RuntimeException(String.format("Unable to change replication factor (RF) of topics %s to %d since there are only %d "
+ "alive brokers in the cluster. Requested RF cannot be more than number of alive brokers.",
topics, replicationFactor, rackByBroker.size()));
} else if (replicationFactor > brokersByRack.size()) {
if (skipTopicRackAwarenessCheck) {
LOG.info("Target replication factor for topics {} is {}, which is larger than the number of racks in cluster. Hence, the same rack"
+ " will contain more than one replicas from the same partition.", topics, replicationFactor);
} else {
throw new RuntimeException(String.format("Unable to change replication factor of topics %s to %d since there are only %d "
+ "racks in the cluster, to skip the rack-awareness check, set %s to true in the request.",
topics, replicationFactor, brokersByRack.size(), ParameterUtils.SKIP_RACK_AWARENESS_CHECK_PARAM));
}
}
});
} | @Test
public void testPopulateRackInfoForReplicationFactorChange() {
Map<String, List<Integer>> brokersByRack = new HashMap<>();
Map<Integer, String> rackByBroker = new HashMap<>();
// Expected: RuntimeException if replication factor (RF) is more than the number of brokers.
assertThrows(RuntimeException.class, () -> populateRackInfoForReplicationFactorChange(Collections.singletonMap((short) (NODES.length + 1),
Collections.singleton(TOPIC)),
CLUSTER,
false,
brokersByRack,
rackByBroker));
// Expected: RuntimeException if RF is more than the number of racks and rack-awareness check is not skipped.
assertThrows(RuntimeException.class, () -> populateRackInfoForReplicationFactorChange(Collections.singletonMap((short) NODES.length,
Collections.singleton(TOPIC)),
CLUSTER,
false,
brokersByRack,
rackByBroker));
// Expected: No failures if RF is more than the number of racks -- but less than the total number of brokers, and
// rack-awareness check is skipped.
populateRackInfoForReplicationFactorChange(Collections.singletonMap((short) NODES.length, Collections.singleton(TOPIC)),
CLUSTER,
true,
brokersByRack,
rackByBroker);
assertEquals(2, brokersByRack.size());
assertEquals(NODES.length, rackByBroker.size());
} |
@Override
public void fetchAll(final DiscoveryHandlerDTO discoveryHandlerDTO, final ProxySelectorDTO proxySelectorDTO) {
List<DiscoveryUpstreamDO> discoveryUpstreamDOS = discoveryUpstreamMapper.selectByDiscoveryHandlerId(discoveryHandlerDTO.getId());
DiscoverySyncData discoverySyncData = new DiscoverySyncData();
discoverySyncData.setPluginName(proxySelectorDTO.getPluginName());
discoverySyncData.setSelectorId(proxySelectorDTO.getId());
discoverySyncData.setSelectorName(proxySelectorDTO.getName());
List<DiscoveryUpstreamData> upstreamDataList = discoveryUpstreamDOS.stream().map(DiscoveryTransfer.INSTANCE::mapToData).collect(Collectors.toList());
discoverySyncData.setUpstreamDataList(upstreamDataList);
DataChangedEvent dataChangedEvent = new DataChangedEvent(ConfigGroupEnum.DISCOVER_UPSTREAM, DataEventTypeEnum.UPDATE, Collections.singletonList(discoverySyncData));
eventPublisher.publishEvent(dataChangedEvent);
} | @Test
public void testFetchAll() {
List<DiscoveryUpstreamDO> discoveryUpstreamDOS = new ArrayList<>();
DiscoveryHandlerDTO discoveryHandlerDTO = new DiscoveryHandlerDTO();
ProxySelectorDTO proxySelectorDTO = new ProxySelectorDTO();
when(discoveryUpstreamMapper.selectByDiscoveryHandlerId(anyString())).thenReturn(discoveryUpstreamDOS);
localDiscoveryProcessor.fetchAll(discoveryHandlerDTO, proxySelectorDTO);
verify(eventPublisher).publishEvent(any(DataChangedEvent.class));
} |
@Override
public void write(int b) throws IOException {
dataOut.write(b);
} | @Test
public void testWriteB() throws Exception {
dataOutputStream.write(1);
verify(mockOutputStream).write(1);
} |
public Fetch<K, V> collectFetch(final FetchBuffer fetchBuffer) {
final Fetch<K, V> fetch = Fetch.empty();
final Queue<CompletedFetch> pausedCompletedFetches = new ArrayDeque<>();
int recordsRemaining = fetchConfig.maxPollRecords;
try {
while (recordsRemaining > 0) {
final CompletedFetch nextInLineFetch = fetchBuffer.nextInLineFetch();
if (nextInLineFetch == null || nextInLineFetch.isConsumed()) {
final CompletedFetch completedFetch = fetchBuffer.peek();
if (completedFetch == null)
break;
if (!completedFetch.isInitialized()) {
try {
fetchBuffer.setNextInLineFetch(initialize(completedFetch));
} catch (Exception e) {
// Remove a completedFetch upon a parse with exception if (1) it contains no completedFetch, and
// (2) there are no fetched completedFetch with actual content preceding this exception.
// The first condition ensures that the completedFetches is not stuck with the same completedFetch
// in cases such as the TopicAuthorizationException, and the second condition ensures that no
// potential data loss due to an exception in a following record.
if (fetch.isEmpty() && FetchResponse.recordsOrFail(completedFetch.partitionData).sizeInBytes() == 0)
fetchBuffer.poll();
throw e;
}
} else {
fetchBuffer.setNextInLineFetch(completedFetch);
}
fetchBuffer.poll();
} else if (subscriptions.isPaused(nextInLineFetch.partition)) {
// when the partition is paused we add the records back to the completedFetches queue instead of draining
// them so that they can be returned on a subsequent poll if the partition is resumed at that time
log.debug("Skipping fetching records for assigned partition {} because it is paused", nextInLineFetch.partition);
pausedCompletedFetches.add(nextInLineFetch);
fetchBuffer.setNextInLineFetch(null);
} else {
final Fetch<K, V> nextFetch = fetchRecords(nextInLineFetch, recordsRemaining);
recordsRemaining -= nextFetch.numRecords();
fetch.add(nextFetch);
}
}
} catch (KafkaException e) {
if (fetch.isEmpty())
throw e;
} finally {
// add any polled completed fetches for paused partitions back to the completed fetches queue to be
// re-evaluated in the next poll
fetchBuffer.addAll(pausedCompletedFetches);
}
return fetch;
} | @Test
public void testFetchWithUnknownLeaderEpoch() {
buildDependencies();
assignAndSeek(topicAPartition0);
// Try to data and validate that we get an empty Fetch back.
CompletedFetch completedFetch = completedFetchBuilder
.error(Errors.UNKNOWN_LEADER_EPOCH)
.build();
fetchBuffer.add(completedFetch);
Fetch<String, String> fetch = fetchCollector.collectFetch(fetchBuffer);
assertTrue(fetch.isEmpty());
} |
@Override
public RegisterApplicationMasterResponse registerApplicationMaster(
RegisterApplicationMasterRequest request) throws YarnException,
IOException {
this.metrics.incrRequestCount();
long startTime = clock.getTime();
try {
RequestInterceptorChainWrapper pipeline =
authorizeAndGetInterceptorChain();
LOG.info("RegisteringAM Host: {}, Port: {}, Tracking Url: {} for application {}. ",
request.getHost(), request.getRpcPort(), request.getTrackingUrl(),
pipeline.getApplicationAttemptId());
RegisterApplicationMasterResponse response =
pipeline.getRootInterceptor().registerApplicationMaster(request);
long endTime = clock.getTime();
this.metrics.succeededRegisterAMRequests(endTime - startTime);
LOG.info("RegisterAM processing finished in {} ms for application {}.",
endTime - startTime, pipeline.getApplicationAttemptId());
return response;
} catch (Throwable t) {
this.metrics.incrFailedRegisterAMRequests();
throw t;
}
} | @Test
public void testRegisterMultipleApplicationMasters() throws Exception {
for (int testAppId = 0; testAppId < 3; testAppId++) {
RegisterApplicationMasterResponse response = registerApplicationMaster(testAppId);
Assert.assertNotNull(response);
Assert.assertEquals(Integer.toString(testAppId), response.getQueue());
}
} |
@Override
protected TableRecords getUndoRows() {
return super.getUndoRows();
} | @Test
public void getUndoRows() {
Assertions.assertEquals(executor.getUndoRows(), executor.getSqlUndoLog().getBeforeImage());
} |
public static SimpleTransform mul(double operand) {
return new SimpleTransform(Operation.mul,operand);
} | @Test
public void testMul() {
TransformationMap t = new TransformationMap(Collections.singletonList(SimpleTransform.mul(-2)),new HashMap<>());
testSimple(t,(double a) -> a * -2);
} |
public static <T> Optional<T> quietlyEval(String action,
String path,
CallableRaisingIOE<T> operation) {
try {
return Optional.of(once(action, path, operation));
} catch (Exception e) {
LOG.debug("Action {} failed", action, e);
return Optional.empty();
}
} | @Test
public void testQuietlyEvalReturnValueSuccess() {
assertOptionalEquals("quietly", 3,
quietlyEval("", "", () -> 3));
} |
public static <T> T copyProperties(Object source, Class<T> tClass, String... ignoreProperties) {
if (null == source) {
return null;
}
T target = ReflectUtil.newInstanceIfPossible(tClass);
copyProperties(source, target, CopyOptions.create().setIgnoreProperties(ignoreProperties));
return target;
} | @Test
public void beanWithEnumSetTest() {
final Vto v1 = new Vto();
v1.setVersions(EnumSet.allOf(Version.class));
final Vto v2 = BeanUtil.copyProperties(v1, Vto.class);
assertNotNull(v2);
assertNotNull(v2.getVersions());
} |
@Override
public Object adapt(final HttpAction action, final WebContext context) {
if (action != null) {
var code = action.getCode();
val response = ((JEEContext) context).getNativeResponse();
if (code < 400) {
response.setStatus(code);
} else {
try {
response.sendError(code);
} catch (final IOException e) {
throw new TechnicalException(e);
}
}
if (action instanceof WithLocationAction withLocationAction) {
context.setResponseHeader(HttpConstants.LOCATION_HEADER, withLocationAction.getLocation());
} else if (action instanceof WithContentAction withContentAction) {
val content = withContentAction.getContent();
if (content != null) {
try {
response.getWriter().write(content);
} catch (final IOException e) {
throw new TechnicalException(e);
}
}
}
return null;
}
throw new TechnicalException("No action provided");
} | @Test
public void testError500() throws IOException {
JEEHttpActionAdapter.INSTANCE.adapt(new StatusAction(500), context);
verify(response).sendError(500);
} |
public void processOnce() throws IOException {
// set status of query to OK.
ctx.getState().reset();
executor = null;
// reset sequence id of MySQL protocol
final MysqlChannel channel = ctx.getMysqlChannel();
channel.setSequenceId(0);
// read packet from channel
try {
packetBuf = channel.fetchOnePacket();
if (packetBuf == null) {
throw new RpcException(ctx.getRemoteIP(), "Error happened when receiving packet.");
}
} catch (AsynchronousCloseException e) {
// when this happened, timeout checker close this channel
// killed flag in ctx has been already set, just return
return;
}
// dispatch
dispatch();
// finalize
finalizeCommand();
ctx.setCommand(MysqlCommand.COM_SLEEP);
} | @Test
public void testFieldList() throws Exception {
ConnectContext ctx = initMockContext(mockChannel(fieldListPacket), GlobalStateMgr.getCurrentState());
myContext.setDatabase("testDb1");
ConnectProcessor processor = new ConnectProcessor(ctx);
processor.processOnce();
Assert.assertEquals(MysqlCommand.COM_FIELD_LIST, myContext.getCommand());
Assert.assertTrue(myContext.getState().toResponsePacket() instanceof MysqlEofPacket);
} |
@SuppressWarnings("WeakerAccess")
public Map<String, Object> getGlobalConsumerConfigs(final String clientId) {
final Map<String, Object> baseConsumerProps = getCommonConsumerConfigs();
// Get global consumer override configs
final Map<String, Object> globalConsumerProps = originalsWithPrefix(GLOBAL_CONSUMER_PREFIX);
baseConsumerProps.putAll(globalConsumerProps);
// no need to set group id for a global consumer
baseConsumerProps.remove(ConsumerConfig.GROUP_ID_CONFIG);
// no need to set instance id for a restore consumer
baseConsumerProps.remove(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG);
// add client id with stream client id prefix
baseConsumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-global-consumer");
baseConsumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none");
return baseConsumerProps;
} | @Test
public void testGetGlobalConsumerConfigs() {
final Map<String, Object> returnedProps = streamsConfig.getGlobalConsumerConfigs(clientId);
assertEquals(returnedProps.get(ConsumerConfig.CLIENT_ID_CONFIG), clientId + "-global-consumer");
assertNull(returnedProps.get(ConsumerConfig.GROUP_ID_CONFIG));
} |
public static <T> Inner<T> create() {
return new Inner<>();
} | @Test
@Category(NeedsRunner.class)
public void addNestedMapField() {
Schema nested = Schema.builder().addStringField("field1").build();
Schema schema =
Schema.builder()
.addMapField("map", Schema.FieldType.STRING, Schema.FieldType.row(nested))
.build();
Row subRow = Row.withSchema(nested).addValue("value").build();
Row row = Row.withSchema(schema).addValue(ImmutableMap.of("key", subRow)).build();
PCollection<Row> added =
pipeline
.apply(Create.of(row).withRowSchema(schema))
.apply(
AddFields.<Row>create()
.field("map.field2", Schema.FieldType.INT32)
.field("map.field3", Schema.FieldType.array(Schema.FieldType.STRING))
.field("map.field4", Schema.FieldType.iterable(Schema.FieldType.STRING)));
Schema expectedNestedSchema =
Schema.builder()
.addStringField("field1")
.addNullableField("field2", Schema.FieldType.INT32)
.addNullableField("field3", Schema.FieldType.array(Schema.FieldType.STRING))
.addNullableField("field4", Schema.FieldType.iterable(Schema.FieldType.STRING))
.build();
Schema expectedSchema =
Schema.builder()
.addMapField("map", Schema.FieldType.STRING, Schema.FieldType.row(expectedNestedSchema))
.build();
assertEquals(expectedSchema, added.getSchema());
Row expectedNested =
Row.withSchema(expectedNestedSchema).addValues("value", null, null, null).build();
Row expected =
Row.withSchema(expectedSchema).addValue(ImmutableMap.of("key", expectedNested)).build();
PAssert.that(added).containsInAnyOrder(expected);
pipeline.run();
} |
@Override
public void emit(String emitKey, List<Metadata> metadataList, ParseContext parseContext)
throws IOException, TikaEmitterException {
if (metadataList == null || metadataList.size() < 1) {
return;
}
List<EmitData> emitDataList = new ArrayList<>();
emitDataList.add(new EmitData(new EmitKey("", emitKey), metadataList));
emit(emitDataList);
} | @Test
public void testMultiValuedFields(@TempDir Path tmpDir) throws Exception {
Files.createDirectories(tmpDir.resolve("db"));
Path dbDir = tmpDir.resolve("db/h2");
Path config = tmpDir.resolve("tika-config.xml");
String connectionString = "jdbc:h2:file:" + dbDir.toAbsolutePath();
writeConfig("/configs/tika-config-jdbc-emitter-multivalued.xml",
connectionString, config);
EmitterManager emitterManager = EmitterManager.load(config);
Emitter emitter = emitterManager.getEmitter();
List<Metadata> data = new ArrayList<>();
Metadata m = new Metadata();
m.add("k1", "first");
m.add("k1", "second");
m.add("k1", "third");
m.add("k1", "fourth");
data.add(m);
emitter.emit("id0", data, new ParseContext());
String expected = "first, second, third, fourth";
int rows = 0;
try (Connection connection = DriverManager.getConnection(connectionString)) {
try (Statement st = connection.createStatement()) {
try (ResultSet rs = st.executeQuery("select * from test")) {
assertEquals("path", rs.getMetaData().getColumnName(1).toLowerCase(Locale.US));
while (rs.next()) {
assertEquals("id0", rs.getString(1));
assertEquals(expected, rs.getString(2));
rows++;
}
}
}
}
assertEquals(1, rows);
} |
public void setViaTable(String name, String path, List<Map<String, String>> list) {
name = StringUtils.trimToEmpty(name);
path = StringUtils.trimToNull(path);
if (path == null) {
StringUtils.Pair nameAndPath = parseVariableAndPath(name);
name = nameAndPath.left;
path = nameAndPath.right;
}
for (Map<String, String> map : list) {
String append = (String) map.get(PATH);
if (append == null) {
continue;
}
List<String> keys = new ArrayList(map.keySet());
keys.remove(PATH);
int columnCount = keys.size();
for (int i = 0; i < columnCount; i++) {
String key = keys.get(i);
String expression = StringUtils.trimToNull(map.get(key));
if (expression == null) { // cucumber cell was left blank
continue; // skip
// default behavior is to skip nulls when the expression evaluates
// this is driven by the routine in setValueByPath
// and users can over-ride this by simply enclosing the expression in parentheses
}
String suffix;
try {
int arrayIndex = Integer.valueOf(key);
suffix = "[" + arrayIndex + "]";
} catch (NumberFormatException e) { // default to the column position as the index
suffix = columnCount > 1 ? "[" + i + "]" : "";
}
String finalPath;
if (append.startsWith("/") || (path != null && path.startsWith("/"))) { // XML
if (path == null) {
finalPath = append + suffix;
} else {
finalPath = path + suffix + '/' + append;
}
} else {
if (path == null) {
path = "$";
}
finalPath = path + suffix + '.' + append;
}
set(name, finalPath, expression, false, true);
}
}
} | @Test
void testSetViaTable() {
Json json = Json.of("[{path: 'bar', value: \"'baz'\" }]");
engine.setViaTable("foo", null, json.asList());
matchEquals("foo", "{ bar: 'baz' }");
json = Json.of("[{path: 'bar', value: 'null' }]"); // has no effect
engine.setViaTable("foo", null, json.asList());
matchEquals("foo", "{ bar: 'baz' }");
json = Json.of("[{path: 'bar', value: '(null)' }]"); // has effect
engine.setViaTable("foo", null, json.asList());
matchEquals("foo", "{ bar: null }");
} |
private static GuardedByExpression bind(JCTree.JCExpression exp, BinderContext context) {
GuardedByExpression expr = BINDER.visit(exp, context);
checkGuardedBy(expr != null, String.valueOf(exp));
checkGuardedBy(expr.kind() != Kind.TYPE_LITERAL, "Raw type literal: %s", exp);
return expr;
} | @Test
public void explicitThisSameClass() {
assertThat(
bind(
"Test",
"Test.this",
forSourceLines(
"threadsafety/Test.java", "package threadsafety;", "class Test {", "}")))
.isEqualTo("(THIS)");
} |
@Override
public DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options) {
// Partition the requested config resources based on which broker they must be sent to with the
// null broker being used for config resources which can be obtained from any broker
final Map<Integer, Map<ConfigResource, KafkaFutureImpl<Config>>> nodeFutures = new HashMap<>(configResources.size());
for (ConfigResource resource : configResources) {
Integer broker = nodeFor(resource);
nodeFutures.compute(broker, (key, value) -> {
if (value == null) {
value = new HashMap<>();
}
value.put(resource, new KafkaFutureImpl<>());
return value;
});
}
final long now = time.milliseconds();
for (Map.Entry<Integer, Map<ConfigResource, KafkaFutureImpl<Config>>> entry : nodeFutures.entrySet()) {
final Integer node = entry.getKey();
Map<ConfigResource, KafkaFutureImpl<Config>> unified = entry.getValue();
runnable.call(new Call("describeConfigs", calcDeadlineMs(now, options.timeoutMs()),
node != null ? new ConstantNodeIdProvider(node, true) : new LeastLoadedBrokerOrActiveKController()) {
@Override
DescribeConfigsRequest.Builder createRequest(int timeoutMs) {
return new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData()
.setResources(unified.keySet().stream()
.map(config ->
new DescribeConfigsRequestData.DescribeConfigsResource()
.setResourceName(config.name())
.setResourceType(config.type().id())
.setConfigurationKeys(null))
.collect(Collectors.toList()))
.setIncludeSynonyms(options.includeSynonyms())
.setIncludeDocumentation(options.includeDocumentation()));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse;
for (Map.Entry<ConfigResource, DescribeConfigsResponseData.DescribeConfigsResult> entry : response.resultMap().entrySet()) {
ConfigResource configResource = entry.getKey();
DescribeConfigsResponseData.DescribeConfigsResult describeConfigsResult = entry.getValue();
KafkaFutureImpl<Config> future = unified.get(configResource);
if (future == null) {
if (node != null) {
log.warn("The config {} in the response from node {} is not in the request",
configResource, node);
} else {
log.warn("The config {} in the response from the least loaded broker is not in the request",
configResource);
}
} else {
if (describeConfigsResult.errorCode() != Errors.NONE.code()) {
future.completeExceptionally(Errors.forCode(describeConfigsResult.errorCode())
.exception(describeConfigsResult.errorMessage()));
} else {
future.complete(describeConfigResult(describeConfigsResult));
}
}
}
completeUnrealizedFutures(
unified.entrySet().stream(),
configResource -> "The node response did not contain a result for config resource " + configResource);
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(unified.values(), throwable);
}
}, now);
}
return new DescribeConfigsResult(
nodeFutures.entrySet()
.stream()
.flatMap(x -> x.getValue().entrySet().stream())
.collect(Collectors.toMap(
Map.Entry::getKey,
Map.Entry::getValue,
(oldValue, newValue) -> {
// Duplicate keys should not occur, throw an exception to signal this issue
throw new IllegalStateException(String.format("Duplicate key for values: %s and %s", oldValue, newValue));
},
HashMap::new
))
);
} | @Test
public void testDescribeConsumerGroupConfigs() throws Exception {
ConfigResource resource1 = new ConfigResource(ConfigResource.Type.GROUP, "group1");
ConfigResource resource2 = new ConfigResource(ConfigResource.Type.GROUP, "group2");
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(new DescribeConfigsResponse(
new DescribeConfigsResponseData().setResults(asList(
new DescribeConfigsResponseData.DescribeConfigsResult()
.setResourceName(resource1.name())
.setResourceType(resource1.type().id())
.setErrorCode(Errors.NONE.code())
.setConfigs(emptyList()),
new DescribeConfigsResponseData.DescribeConfigsResult()
.setResourceName(resource2.name())
.setResourceType(resource2.type().id())
.setErrorCode(Errors.NONE.code())
.setConfigs(emptyList())))));
Map<ConfigResource, KafkaFuture<Config>> result = env.adminClient().describeConfigs(asList(
resource1,
resource2)).values();
assertEquals(new HashSet<>(asList(resource1, resource2)), result.keySet());
assertNotNull(result.get(resource1).get());
assertNotNull(result.get(resource2).get());
}
} |
@Override
public Page getNextPage()
{
if (closed) {
return null;
}
if (serverResponseIterator == null) {
serverResponseIterator = queryPinot(split);
}
ByteBuffer byteBuffer = null;
try {
// Pinot gRPC server response iterator returns:
// - n data blocks based on inbound message size;
// - 1 metadata of the query results.
// So we need to check ResponseType of each ServerResponse.
if (serverResponseIterator.hasNext()) {
long startTimeNanos = System.nanoTime();
Server.ServerResponse serverResponse = serverResponseIterator.next();
readTimeNanos += System.nanoTime() - startTimeNanos;
final String responseType = serverResponse.getMetadataOrThrow("responseType");
switch (responseType) {
case CommonConstants.Query.Response.ResponseType.DATA:
estimatedMemoryUsageInBytes = serverResponse.getSerializedSize();
// Store each dataTable which will later be constructed into Pages.
try {
byteBuffer = serverResponse.getPayload().asReadOnlyByteBuffer();
DataTable dataTable = DataTableFactory.getDataTable(byteBuffer);
checkExceptions(dataTable, split, PinotSessionProperties.isMarkDataFetchExceptionsAsRetriable(session));
currentDataTable = new PinotSegmentPageSource.PinotDataTableWithSize(dataTable, serverResponse.getSerializedSize());
}
catch (IOException e) {
throw new PinotException(
PINOT_DATA_FETCH_EXCEPTION,
split.getSegmentPinotQuery(),
String.format("Encountered Pinot exceptions when fetching data table from Split: < %s >", split),
e);
}
break;
case CommonConstants.Query.Response.ResponseType.METADATA:
// The last part of the response is Metadata
currentDataTable = null;
serverResponseIterator = null;
close();
return null;
default:
throw new PinotException(
PINOT_UNEXPECTED_RESPONSE,
split.getSegmentPinotQuery(),
String.format("Encountered Pinot exceptions, unknown response type - %s", responseType));
}
}
Page page = fillNextPage();
completedPositions += currentDataTable.getDataTable().getNumberOfRows();
return page;
}
finally {
if (byteBuffer != null) {
((Buffer) byteBuffer).clear();
}
}
} | @Test
public void testPrunedColumns()
{
PinotSessionProperties pinotSessionProperties = new PinotSessionProperties(pinotConfig);
ConnectorSession session = new TestingConnectorSession(pinotSessionProperties.getSessionProperties());
List<DataTable> dataTables = IntStream.range(0, 3).mapToObj(i -> createDataTableWithAllTypes()).collect(toImmutableList());
List<PinotColumnHandle> expectedColumnHandles = createPinotColumnHandlesWithAllTypes();
PinotSplit mockPinotSplit = new PinotSplit(pinotConnectorId.toString(), PinotSplit.SplitType.SEGMENT, expectedColumnHandles, Optional.empty(), Optional.of("blah"), ImmutableList.of("seg"), Optional.of("host"), getGrpcPort());
ImmutableList.Builder<Integer> columnsSurvivingBuilder = ImmutableList.builder();
for (int i = expectedColumnHandles.size() - 1; i >= 0; i--) {
if (i % 2 == 0) {
columnsSurvivingBuilder.add(i);
}
}
List<Integer> columnsSurviving = columnsSurvivingBuilder.build();
List<PinotColumnHandle> handlesSurviving = columnsSurviving.stream().map(expectedColumnHandles::get).collect(toImmutableList());
PinotSegmentPageSource pinotSegmentPageSource = getPinotSegmentPageSource(session, dataTables, mockPinotSplit, handlesSurviving);
for (int i = 0; i < dataTables.size(); ++i) {
Page page = requireNonNull(pinotSegmentPageSource.getNextPage(), "Expected a valid page");
Assert.assertEquals(page.getChannelCount(), columnsSurviving.size());
for (int j = 0; j < columnsSurviving.size(); ++j) {
Block block = page.getBlock(j);
int originalColumnIndex = columnsSurviving.get(j);
Type type = PinotColumnUtils.getPrestoTypeFromPinotType(getFieldSpec("dontcare", ALL_TYPES.get(originalColumnIndex)), false, false);
long maxHashCode = Long.MIN_VALUE;
for (int k = 0; k < NUM_ROWS; k++) {
maxHashCode = Math.max(type.hash(block, k), maxHashCode);
}
Assert.assertTrue(maxHashCode != 0, "Not all column values can have hash code 0");
}
}
} |
@Override
public COMMIT3Response commit(XDR xdr, RpcInfo info) {
SecurityHandler securityHandler = getSecurityHandler(info);
RpcCall rpcCall = (RpcCall) info.header();
int xid = rpcCall.getXid();
SocketAddress remoteAddress = info.remoteAddress();
return commit(xdr, info.channel(), xid, securityHandler, remoteAddress);
} | @Test(timeout = 60000)
public void testCommit() throws Exception {
HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
long dirId = status.getFileId();
int namenodeId = Nfs3Utils.getNamenodeId(config);
FileHandle handle = new FileHandle(dirId, namenodeId);
XDR xdr_req = new XDR();
COMMIT3Request req = new COMMIT3Request(handle, 0, 5);
req.serialize(xdr_req);
Channel ch = Mockito.mock(Channel.class);
// Attempt by an unpriviledged user should fail.
COMMIT3Response response1 = nfsd.commit(xdr_req.asReadOnlyWrap(),
ch, 1, securityHandlerUnpriviledged,
new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
response1.getStatus());
// Attempt by a priviledged user should pass.
COMMIT3Response response2 = nfsd.commit(xdr_req.asReadOnlyWrap(),
ch, 1, securityHandler,
new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect COMMIT3Response:", null, response2);
} |
static void validateDependencies(Set<Artifact> dependencies, Set<String> allowedRules, boolean failOnUnmatched)
throws EnforcerRuleException {
SortedSet<Artifact> unmatchedArtifacts = new TreeSet<>();
Set<String> matchedRules = new HashSet<>();
for (Artifact dependency : dependencies) {
boolean matches = false;
for (String rule : allowedRules) {
if (matches(dependency, rule)){
matchedRules.add(rule);
matches = true;
break;
}
}
if (!matches) {
unmatchedArtifacts.add(dependency);
}
}
SortedSet<String> unmatchedRules = new TreeSet<>(allowedRules);
unmatchedRules.removeAll(matchedRules);
if (!unmatchedArtifacts.isEmpty() || (failOnUnmatched && !unmatchedRules.isEmpty())) {
StringBuilder errorMessage = new StringBuilder("Vespa dependency enforcer failed:\n");
if (!unmatchedArtifacts.isEmpty()) {
errorMessage.append("Dependencies not matching any rule:\n");
unmatchedArtifacts.forEach(a -> errorMessage.append(" - ").append(a.toString()).append('\n'));
}
if (failOnUnmatched && !unmatchedRules.isEmpty()) {
errorMessage.append("Rules not matching any dependency:\n");
unmatchedRules.forEach(p -> errorMessage.append(" - ").append(p).append('\n'));
}
throw new EnforcerRuleException(errorMessage.toString());
}
} | @Test
void fails_on_unmatched_rule() {
Set<Artifact> dependencies = Set.of(
artifact("com.yahoo.vespa", "testutils", "8.0.0", "test"));
Set<String> rules = Set.of(
"com.yahoo.vespa:container-core:jar:*:provided",
"com.yahoo.vespa:*:jar:*:test");
EnforcerRuleException exception = assertThrows(
EnforcerRuleException.class,
() -> EnforceDependencies.validateDependencies(dependencies, rules, true));
String expectedErrorMessage =
"""
Vespa dependency enforcer failed:
Rules not matching any dependency:
- com.yahoo.vespa:container-core:jar:*:provided
""";
assertEquals(expectedErrorMessage, exception.getMessage());
} |
@Override
public ConfigOperateResult insertOrUpdateCas(String srcIp, String srcUser, ConfigInfo configInfo,
Map<String, Object> configAdvanceInfo) {
if (Objects.isNull(
findConfigInfoState(configInfo.getDataId(), configInfo.getGroup(), configInfo.getTenant()))) {
return addConfigInfo(srcIp, srcUser, configInfo, configAdvanceInfo);
} else {
return updateConfigInfoCas(configInfo, srcIp, srcUser, configAdvanceInfo);
}
} | @Test
void testInsertOrUpdateCasOfInsertConfigSuccess() {
Map<String, Object> configAdvanceInfo = new HashMap<>();
String desc = "testdesc";
String use = "testuse";
String effect = "testeffect";
String type = "testtype";
String schema = "testschema";
configAdvanceInfo.put("config_tags", "tag1,tag2");
configAdvanceInfo.put("desc", desc);
configAdvanceInfo.put("use", use);
configAdvanceInfo.put("effect", effect);
configAdvanceInfo.put("type", type);
configAdvanceInfo.put("schema", schema);
String dataId = "dataId";
String group = "group";
String tenant = "tenant";
String appName = "appName";
String content = "content132456";
ConfigInfo configInfo = new ConfigInfo(dataId, group, tenant, appName, content);
String encryptedDatakey = "key456";
configInfo.setEncryptedDataKey(encryptedDatakey);
long insertConfigIndoId = 12345678765L;
ConfigInfoStateWrapper configInfoStateWrapperFinalSelect = new ConfigInfoStateWrapper();
configInfoStateWrapperFinalSelect.setId(insertConfigIndoId);
configInfoStateWrapperFinalSelect.setLastModified(System.currentTimeMillis());
//mock get config state
Mockito.when(
databaseOperate.queryOne(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER)))
.thenReturn(null, configInfoStateWrapperFinalSelect);
String srcIp = "iptest";
String srcUser = "users";
ConfigOperateResult configOperateResult = embeddedConfigInfoPersistService.insertOrUpdateCas(srcIp, srcUser, configInfo,
configAdvanceInfo);
assertEquals(configInfoStateWrapperFinalSelect.getId(), configOperateResult.getId());
assertEquals(configInfoStateWrapperFinalSelect.getLastModified(), configOperateResult.getLastModified());
//expect insert config info invoked.
embeddedStorageContextHolderMockedStatic.verify(
() -> EmbeddedStorageContextHolder.addSqlContext(anyString(), anyLong(), eq(dataId), eq(group), eq(tenant), eq(appName),
eq(content), eq(MD5Utils.md5Hex(content, Constants.PERSIST_ENCODE)), eq(srcIp), eq(srcUser),
eq(desc), eq(use), eq(effect), eq(type), eq(schema), eq(encryptedDatakey)), times(1));
//expect insert config tags
embeddedStorageContextHolderMockedStatic.verify(
() -> EmbeddedStorageContextHolder.addSqlContext(anyString(), anyLong(), eq("tag1"), eq(StringUtils.EMPTY), eq(dataId),
eq(group), eq(tenant)), times(1));
embeddedStorageContextHolderMockedStatic.verify(
() -> EmbeddedStorageContextHolder.addSqlContext(anyString(), anyLong(), eq("tag2"), eq(StringUtils.EMPTY), eq(dataId),
eq(group), eq(tenant)), times(1));
//expect insert history info
Mockito.verify(historyConfigInfoPersistService, times(1))
.insertConfigHistoryAtomic(eq(0L), eq(configInfo), eq(srcIp), eq(srcUser), any(Timestamp.class), eq("I"));
} |
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
MutableLong that = (MutableLong) o;
return value == that.value;
} | @Test
public void testEquals() {
assertEquals(MutableLong.valueOf(0), MutableLong.valueOf(0));
assertEquals(MutableLong.valueOf(10), MutableLong.valueOf(10));
assertNotEquals(MutableLong.valueOf(0), MutableLong.valueOf(10));
assertNotEquals(null, MutableLong.valueOf(0));
assertNotEquals("foo", MutableLong.valueOf(0));
MutableLong self = MutableLong.valueOf(0);
assertEquals(self, self);
} |
public CreateTableCommand createTableCommand(
final KsqlStructuredDataOutputNode outputNode,
final Optional<RefinementInfo> emitStrategy
) {
Optional<WindowInfo> windowInfo =
outputNode.getKsqlTopic().getKeyFormat().getWindowInfo();
if (windowInfo.isPresent() && emitStrategy.isPresent()) {
final WindowInfo info = windowInfo.get();
windowInfo = Optional.of(WindowInfo.of(
info.getType(),
info.getSize(),
Optional.of(emitStrategy.get().getOutputRefinement())
));
}
return new CreateTableCommand(
outputNode.getSinkName().get(),
outputNode.getSchema(),
outputNode.getTimestampColumn(),
outputNode.getKsqlTopic().getKafkaTopicName(),
Formats.from(outputNode.getKsqlTopic()),
windowInfo,
Optional.of(outputNode.getOrReplace()),
Optional.of(false)
);
} | @Test
public void shouldCreateTableCommandWithSingleValueWrappingFromConfig() {
// Given:
ksqlConfig = new KsqlConfig(ImmutableMap.of(
KsqlConfig.KSQL_WRAP_SINGLE_VALUES, false
));
final CreateTable statement =
new CreateTable(SOME_NAME, TABLE_ELEMENTS_1_VALUE,
false, true, withProperties, false);
// When:
final CreateTableCommand cmd = createSourceFactory
.createTableCommand(statement, ksqlConfig);
// Then:
assertThat(cmd.getFormats().getValueFeatures(),
is(SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES)));
} |
Session retrieve(String clientID) {
return pool.get(clientID);
} | @Test
public void connectWithCleanSessionUpdateClientSession() throws ExecutionException, InterruptedException {
LOG.info("connectWithCleanSessionUpdateClientSession");
// first connect with clean session true
MqttConnectMessage msg = connMsg.clientId(FAKE_CLIENT_ID).cleanSession(true).build();
connection.processConnect(msg).completableFuture().get();
assertEqualsConnAck(CONNECTION_ACCEPTED, channel.readOutbound());
connection.processDisconnect(null).completableFuture().get();
assertFalse(channel.isOpen());
// second connect with clean session false
EmbeddedChannel anotherChannel = new EmbeddedChannel();
MQTTConnection anotherConnection = createMQTTConnection(ALLOW_ANONYMOUS_AND_ZEROBYTE_CLIENT_ID,
anotherChannel);
MqttConnectMessage secondConnMsg = MqttMessageBuilders.connect()
.clientId(FAKE_CLIENT_ID)
.protocolVersion(MqttVersion.MQTT_3_1)
.build();
anotherConnection.processConnect(secondConnMsg).completableFuture().get();
assertEqualsConnAck(CONNECTION_ACCEPTED, anotherChannel.readOutbound());
// Verify client session is clean false
Session session = sut.retrieve(FAKE_CLIENT_ID);
assertFalse(session.isClean());
} |
public abstract VoiceInstructionValue getConfigForDistance(
double distance,
String turnDescription,
String thenVoiceInstruction); | @Test
public void initialVICImperialTest() {
InitialVoiceInstructionConfig configImperial = new InitialVoiceInstructionConfig(FOR_HIGHER_DISTANCE_PLURAL.imperial, trMap,
locale, 4250, 250, DistanceUtils.Unit.IMPERIAL);
compareVoiceInstructionValues(
3219,
"Continue for 2 miles",
configImperial.getConfigForDistance(5000, "turn", " then")
);
compareVoiceInstructionValues(
3219,
"Continue for 2 miles",
configImperial.getConfigForDistance(4500, "turn", " then")
);
} |
public static Sensor getInvocationSensor(
final Metrics metrics,
final String sensorName,
final String groupName,
final String functionDescription
) {
final Sensor sensor = metrics.sensor(sensorName);
if (sensor.hasMetrics()) {
return sensor;
}
final BiFunction<String, String, MetricName> metricNamer = (suffix, descPattern) -> {
final String description = String.format(descPattern, functionDescription);
return metrics.metricName(sensorName + "-" + suffix, groupName, description);
};
sensor.add(
metricNamer.apply("avg", AVG_DESC),
new Avg()
);
sensor.add(
metricNamer.apply("max", MAX_DESC),
new Max()
);
sensor.add(
metricNamer.apply("count", COUNT_DESC),
new WindowedCount()
);
sensor.add(
metricNamer.apply("rate", RATE_DESC),
new Rate(TimeUnit.SECONDS, new WindowedCount())
);
return sensor;
} | @Test
public void shouldRegisterAvgMetric() {
// Given:
when(metrics.metricName(SENSOR_NAME + "-avg", GROUP_NAME, description(AVG_DESC)))
.thenReturn(specificMetricName);
// When:
FunctionMetrics
.getInvocationSensor(metrics, SENSOR_NAME, GROUP_NAME, FUNC_NAME);
// Then:
verify(sensor).add(eq(specificMetricName), isA(Avg.class));
} |
public static long size2Long(String size) {
if (null == size || size.length() <= 1) {
throw new IllegalArgumentException("could not convert '" + size + "' to byte length");
}
String size2Lower = size.toLowerCase();
char unit = size2Lower.charAt(size.length() - 1);
long number;
try {
number = NumberUtils.toLong(size2Lower.substring(0, size.length() - 1));
} catch (NumberFormatException | NullPointerException ex) {
throw new IllegalArgumentException("could not convert '" + size + "' to byte length");
}
switch (unit) {
case 'k':
return number * RADIX;
case 'm':
return number * RADIX * RADIX;
case 'g':
return number * RADIX * RADIX * RADIX;
case 't':
return number * RADIX * RADIX * RADIX * RADIX;
default:
throw new IllegalArgumentException("could not convert '" + size + "' to byte length");
}
} | @Test
void size2Long() {
assertThatThrownBy(() -> SizeUtil.size2Long(null)).isInstanceOf(IllegalArgumentException.class);
assertThatThrownBy(() -> SizeUtil.size2Long("")).isInstanceOf(IllegalArgumentException.class);
// wrong format
assertThatThrownBy(() -> SizeUtil.size2Long("2kk")).isInstanceOf(IllegalArgumentException.class);
// wrong unit
assertThatThrownBy(() -> SizeUtil.size2Long("2x")).isInstanceOf(IllegalArgumentException.class);
assertThat(SizeUtil.size2Long("2k")).isEqualTo(2L * 1024);
assertThat(SizeUtil.size2Long("2m")).isEqualTo(2L * 1024 * 1024);
assertThat(SizeUtil.size2Long("2G")).isEqualTo(2L * 1024 * 1024 * 1024);
assertThat(SizeUtil.size2Long("2t")).isEqualTo(2L * 1024 * 1024 * 1024 * 1024);
} |
public final void isPositiveInfinity() {
isEqualTo(Float.POSITIVE_INFINITY);
} | @Test
public void isPositiveInfinity() {
assertThat(Float.POSITIVE_INFINITY).isPositiveInfinity();
assertThatIsPositiveInfinityFails(1.23f);
assertThatIsPositiveInfinityFails(Float.NEGATIVE_INFINITY);
assertThatIsPositiveInfinityFails(Float.NaN);
assertThatIsPositiveInfinityFails(null);
} |
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
try {
final EueApiClient client = new EueApiClient(session);
final UiFsModel response;
final String resourceId = fileid.getFileId(file);
switch(resourceId) {
case EueResourceIdProvider.ROOT:
case EueResourceIdProvider.TRASH:
response = new ListResourceAliasApi(client).resourceAliasAliasGet(resourceId,
null, file.attributes().getETag(), null, null, null, null,
Collections.singletonList(OPTION_WIN_32_PROPS), null);
break;
default:
response = new ListResourceApi(client).resourceResourceIdGet(resourceId,
null, file.attributes().getETag(), null, null, null, null,
Collections.singletonList(OPTION_WIN_32_PROPS), null);
break;
}
switch(response.getUifs().getResourceType()) {
case "aliascontainer":
case "container":
if(file.isFile()) {
throw new NotfoundException(file.getAbsolute());
}
break;
default:
if(file.isDirectory()) {
throw new NotfoundException(file.getAbsolute());
}
break;
}
final PathAttributes attr = this.toAttributes(response.getUifs(), response.getUiwin32(),
EueShareFeature.findShareForResource(session.userShares(), resourceId));
if(client.getResponseHeaders().containsKey(HttpHeaders.ETAG)) {
attr.setETag(StringUtils.remove(client.getResponseHeaders().get(HttpHeaders.ETAG).stream().findFirst().orElse(null), '"'));
}
return attr;
}
catch(ApiException e) {
switch(e.getCode()) {
case HttpStatus.SC_NOT_MODIFIED:
if(log.isDebugEnabled()) {
log.debug(String.format("No changes for file %s with ETag %s", file, file.attributes().getETag()));
}
return file.attributes();
}
throw new EueExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
} | @Test
public void testChangeETagPropagatingToRoot() throws Exception {
final EueResourceIdProvider fileid = new EueResourceIdProvider(session);
final EueAttributesFinderFeature feature = new EueAttributesFinderFeature(session, fileid);
final String rootEtag = feature.find(new Path("/", EnumSet.of(Path.Type.directory))).getETag();
assertNotNull(rootEtag);
final long rootModificationDate = feature.find(new Path("/", EnumSet.of(Path.Type.directory))).getModificationDate();
final Path firstlevel = new EueDirectoryFeature(session, fileid).mkdir(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final String firstLevelEtag = feature.find(firstlevel).getETag();
final Long firstLevelRevision = feature.find(firstlevel).getRevision();
assertNull(firstLevelRevision);
final long firstLevelModificationDate = feature.find(firstlevel).getModificationDate();
assertNotNull(firstLevelEtag);
final Path secondlevel = new EueDirectoryFeature(session, fileid).mkdir(new Path(firstlevel, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final String secondLevelEtag = feature.find(secondlevel).getETag();
assertNotNull(secondLevelEtag);
final Path secondlevelSibling = new EueDirectoryFeature(session, fileid).mkdir(new Path(firstlevel, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
assertNotNull(secondlevelSibling);
final Path file = new EueTouchFeature(session, fileid).touch(new Path(secondlevel, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final String secondLevelSiblingEtag = feature.find(secondlevelSibling).getETag();
assertNotEquals(secondLevelEtag, feature.find(secondlevel).getETag());
assertNotEquals(firstLevelEtag, feature.find(firstlevel).getETag());
assertNull(feature.find(firstlevel).getRevision());
assertEquals(firstLevelModificationDate, feature.find(firstlevel).getModificationDate(), 0L);
assertNotEquals(rootEtag, feature.find(new Path("/", EnumSet.of(Path.Type.directory))).getETag());
assertNotEquals(rootModificationDate, feature.find(new Path("/", EnumSet.of(Path.Type.directory))).getModificationDate());
new EueDeleteFeature(session, fileid).delete(Arrays.asList(firstlevel, secondlevel), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
public void goOnlineFromConsuming(SegmentZKMetadata segmentZKMetadata)
throws InterruptedException {
_serverMetrics.setValueOfTableGauge(_clientId, ServerGauge.LLC_PARTITION_CONSUMING, 0);
try {
// Remove the segment file before we do anything else.
removeSegmentFile();
_leaseExtender.removeSegment(_segmentNameStr);
StreamPartitionMsgOffset endOffset = _streamPartitionMsgOffsetFactory.create(segmentZKMetadata.getEndOffset());
_segmentLogger.info("State: {}, transitioning from CONSUMING to ONLINE (startOffset: {}, endOffset: {})", _state,
_startOffset, endOffset);
stop();
_segmentLogger.info("Consumer thread stopped in state {}", _state);
switch (_state) {
case COMMITTED:
case RETAINED:
// Nothing to do. we already built local segment and swapped it with in-memory data.
_segmentLogger.info("State {}. Nothing to do", _state.toString());
break;
case DISCARDED:
case ERROR:
_segmentLogger.info("State {}. Downloading to replace", _state.toString());
downloadSegmentAndReplace(segmentZKMetadata);
break;
case CATCHING_UP:
case HOLDING:
case INITIAL_CONSUMING:
switch (_segmentCompletionMode) {
case DOWNLOAD:
_segmentLogger.info("State {}. CompletionMode {}. Downloading to replace", _state.toString(),
_segmentCompletionMode);
downloadSegmentAndReplace(segmentZKMetadata);
break;
case DEFAULT:
// Allow to catch up upto final offset, and then replace.
if (_currentOffset.compareTo(endOffset) > 0) {
// We moved ahead of the offset that is committed in ZK.
_segmentLogger
.warn("Current offset {} ahead of the offset in zk {}. Downloading to replace", _currentOffset,
endOffset);
downloadSegmentAndReplace(segmentZKMetadata);
} else if (_currentOffset.compareTo(endOffset) == 0) {
_segmentLogger
.info("Current offset {} matches offset in zk {}. Replacing segment", _currentOffset, endOffset);
buildSegmentAndReplace();
} else {
_segmentLogger.info("Attempting to catch up from offset {} to {} ", _currentOffset, endOffset);
boolean success = catchupToFinalOffset(endOffset,
TimeUnit.MILLISECONDS.convert(MAX_TIME_FOR_CONSUMING_TO_ONLINE_IN_SECONDS, TimeUnit.SECONDS));
if (success) {
_segmentLogger.info("Caught up to offset {}", _currentOffset);
buildSegmentAndReplace();
} else {
_segmentLogger
.info("Could not catch up to offset (current = {}). Downloading to replace", _currentOffset);
downloadSegmentAndReplace(segmentZKMetadata);
}
}
break;
default:
break;
}
break;
default:
_segmentLogger.info("Downloading to replace segment while in state {}", _state.toString());
downloadSegmentAndReplace(segmentZKMetadata);
break;
}
} catch (Exception e) {
Utils.rethrowException(e);
} finally {
_serverMetrics.setValueOfTableGauge(_clientId, ServerGauge.LLC_PARTITION_CONSUMING, 0);
}
} | @Test
public void testOnlineTransitionAfterStop()
throws Exception {
SegmentZKMetadata metadata = new SegmentZKMetadata(SEGMENT_NAME_STR);
final long finalOffsetValue = START_OFFSET_VALUE + 600;
final LongMsgOffset finalOffset = new LongMsgOffset(finalOffsetValue);
metadata.setEndOffset(finalOffset.toString());
try (FakeRealtimeSegmentDataManager segmentDataManager = createFakeSegmentManager()) {
segmentDataManager._stopWaitTimeMs = 0;
segmentDataManager._state.set(segmentDataManager, RealtimeSegmentDataManager.State.COMMITTED);
segmentDataManager.goOnlineFromConsuming(metadata);
Assert.assertFalse(segmentDataManager._downloadAndReplaceCalled);
Assert.assertFalse(segmentDataManager._buildAndReplaceCalled);
}
try (FakeRealtimeSegmentDataManager segmentDataManager = createFakeSegmentManager()) {
segmentDataManager._stopWaitTimeMs = 0;
segmentDataManager._state.set(segmentDataManager, RealtimeSegmentDataManager.State.RETAINED);
segmentDataManager.goOnlineFromConsuming(metadata);
Assert.assertFalse(segmentDataManager._downloadAndReplaceCalled);
Assert.assertFalse(segmentDataManager._buildAndReplaceCalled);
}
try (FakeRealtimeSegmentDataManager segmentDataManager = createFakeSegmentManager()) {
segmentDataManager._stopWaitTimeMs = 0;
segmentDataManager._state.set(segmentDataManager, RealtimeSegmentDataManager.State.DISCARDED);
segmentDataManager.goOnlineFromConsuming(metadata);
Assert.assertTrue(segmentDataManager._downloadAndReplaceCalled);
Assert.assertFalse(segmentDataManager._buildAndReplaceCalled);
}
try (FakeRealtimeSegmentDataManager segmentDataManager = createFakeSegmentManager()) {
segmentDataManager._stopWaitTimeMs = 0;
segmentDataManager._state.set(segmentDataManager, RealtimeSegmentDataManager.State.ERROR);
segmentDataManager.goOnlineFromConsuming(metadata);
Assert.assertTrue(segmentDataManager._downloadAndReplaceCalled);
Assert.assertFalse(segmentDataManager._buildAndReplaceCalled);
}
// If holding, but we have overshot the expected final offset, the download and replace
try (FakeRealtimeSegmentDataManager segmentDataManager = createFakeSegmentManager()) {
segmentDataManager._stopWaitTimeMs = 0;
segmentDataManager._state.set(segmentDataManager, RealtimeSegmentDataManager.State.HOLDING);
segmentDataManager.setCurrentOffset(finalOffsetValue + 1);
segmentDataManager.goOnlineFromConsuming(metadata);
Assert.assertTrue(segmentDataManager._downloadAndReplaceCalled);
Assert.assertFalse(segmentDataManager._buildAndReplaceCalled);
}
// If catching up, but we have overshot the expected final offset, the download and replace
try (FakeRealtimeSegmentDataManager segmentDataManager = createFakeSegmentManager()) {
segmentDataManager._stopWaitTimeMs = 0;
segmentDataManager._state.set(segmentDataManager, RealtimeSegmentDataManager.State.CATCHING_UP);
segmentDataManager.setCurrentOffset(finalOffsetValue + 1);
segmentDataManager.goOnlineFromConsuming(metadata);
Assert.assertTrue(segmentDataManager._downloadAndReplaceCalled);
Assert.assertFalse(segmentDataManager._buildAndReplaceCalled);
}
// If catching up, but we did not get to the final offset, then download and replace
try (FakeRealtimeSegmentDataManager segmentDataManager = createFakeSegmentManager()) {
segmentDataManager._stopWaitTimeMs = 0;
segmentDataManager._state.set(segmentDataManager, RealtimeSegmentDataManager.State.CATCHING_UP);
segmentDataManager._consumeOffsets.add(new LongMsgOffset(finalOffsetValue - 1));
segmentDataManager.goOnlineFromConsuming(metadata);
Assert.assertTrue(segmentDataManager._downloadAndReplaceCalled);
Assert.assertFalse(segmentDataManager._buildAndReplaceCalled);
}
// But then if we get to the exact offset, we get to build and replace, not download
try (FakeRealtimeSegmentDataManager segmentDataManager = createFakeSegmentManager()) {
segmentDataManager._stopWaitTimeMs = 0;
segmentDataManager._state.set(segmentDataManager, RealtimeSegmentDataManager.State.CATCHING_UP);
segmentDataManager._consumeOffsets.add(finalOffset);
segmentDataManager.goOnlineFromConsuming(metadata);
Assert.assertFalse(segmentDataManager._downloadAndReplaceCalled);
Assert.assertTrue(segmentDataManager._buildAndReplaceCalled);
}
} |
static List<BigtableResourceManagerCluster> generateDefaultClusters(
String baseString, String zone, int numNodes, StorageType storageType) {
String clusterId =
generateResourceId(
baseString.toLowerCase(),
ILLEGAL_CLUSTER_CHARS,
REPLACE_CLUSTER_CHAR,
MAX_CLUSTER_ID_LENGTH,
TIME_FORMAT);
BigtableResourceManagerCluster cluster =
BigtableResourceManagerCluster.create(clusterId, zone, numNodes, storageType);
return ImmutableList.of(cluster);
} | @Test
public void testGenerateDefaultClustersShouldThrowErrorWhenTestIdIsEmpty() {
assertThrows(
IllegalArgumentException.class,
() -> generateDefaultClusters("", ZONE, NUM_NODES, STORAGE_TYPE));
} |
@LiteralParameters("x")
@ScalarOperator(LESS_THAN_OR_EQUAL)
@SqlType(StandardTypes.BOOLEAN)
public static boolean lessThanOrEqual(@SqlType("varchar(x)") Slice left, @SqlType("varchar(x)") Slice right)
{
return left.compareTo(right) <= 0;
} | @Test
public void testLessThanOrEqual()
{
assertFunction("'foo' <= 'foo'", BOOLEAN, true);
assertFunction("'foo' <= 'bar'", BOOLEAN, false);
assertFunction("'bar' <= 'foo'", BOOLEAN, true);
assertFunction("'bar' <= 'bar'", BOOLEAN, true);
} |
public ProtocolBuilder heartbeat(Integer heartbeat) {
this.heartbeat = heartbeat;
return getThis();
} | @Test
void heartbeat() {
ProtocolBuilder builder = new ProtocolBuilder();
builder.heartbeat(1000);
Assertions.assertEquals(1000, builder.build().getHeartbeat());
} |
public int getClusterNodeAttributesFailedRetrieved() {
return numGetClusterNodeAttributesFailedRetrieved.value();
} | @Test
public void testGetClusterNodeAttributesRetrievedFailed() {
long totalBadBefore = metrics.getClusterNodeAttributesFailedRetrieved();
badSubCluster.getClusterNodeAttributesFailed();
Assert.assertEquals(totalBadBefore + 1,
metrics.getClusterNodeAttributesFailedRetrieved());
} |
static String trimFieldsAndRemoveEmptyFields(String str) {
char[] chars = str.toCharArray();
char[] res = new char[chars.length];
/*
* set when reading the first non trimmable char after a separator char (or the beginning of the string)
* unset when reading a separator
*/
boolean inField = false;
boolean inQuotes = false;
int i = 0;
int resI = 0;
for (; i < chars.length; i++) {
boolean isSeparator = chars[i] == ',';
if (!inQuotes && isSeparator) {
// exiting field (may already be unset)
inField = false;
if (resI > 0) {
resI = retroTrim(res, resI);
}
} else {
boolean isTrimmed = !inQuotes && istrimmable(chars[i]);
if (isTrimmed && !inField) {
// we haven't meet any non trimmable char since the last separator yet
continue;
}
boolean isEscape = isEscapeChar(chars[i]);
if (isEscape) {
inQuotes = !inQuotes;
}
// add separator as we already had one field
if (!inField && resI > 0) {
res[resI] = ',';
resI++;
}
// register in field (may already be set)
inField = true;
// copy current char
res[resI] = chars[i];
resI++;
}
}
// inQuotes can only be true at this point if quotes are unbalanced
if (!inQuotes) {
// trim end of str
resI = retroTrim(res, resI);
}
return new String(res, 0, resI);
} | @Test
@UseDataProvider("emptys")
public void trimFieldsAndRemoveEmptyFields_quotes_allow_to_preserve_fields(String empty) {
String quotedEmpty = '"' + empty + '"';
assertThat(trimFieldsAndRemoveEmptyFields(quotedEmpty)).isEqualTo(quotedEmpty);
assertThat(trimFieldsAndRemoveEmptyFields(',' + quotedEmpty)).isEqualTo(quotedEmpty);
assertThat(trimFieldsAndRemoveEmptyFields(quotedEmpty + ',')).isEqualTo(quotedEmpty);
assertThat(trimFieldsAndRemoveEmptyFields(',' + quotedEmpty + ',')).isEqualTo(quotedEmpty);
assertThat(trimFieldsAndRemoveEmptyFields(quotedEmpty + ',' + quotedEmpty)).isEqualTo(quotedEmpty + ',' + quotedEmpty);
assertThat(trimFieldsAndRemoveEmptyFields(quotedEmpty + ",," + quotedEmpty)).isEqualTo(quotedEmpty + ',' + quotedEmpty);
assertThat(trimFieldsAndRemoveEmptyFields(quotedEmpty + ',' + quotedEmpty + ',' + quotedEmpty)).isEqualTo(quotedEmpty + ',' + quotedEmpty + ',' + quotedEmpty);
} |
public T getOrDefault(final T defaultValue) {
return _delegate.getOrDefault(defaultValue);
} | @Test
public void testGetOrDefaultWithError() {
final Promise<String> delegate = Promises.error(new Exception());
final Promise<String> promise = new DelegatingPromise<String>(delegate);
assertEquals(delegate.getOrDefault("defaultValue"), promise.getOrDefault("defaultValue"));
} |
@Override
public byte[] echo(byte[] message) {
return read(null, ByteArrayCodec.INSTANCE, ECHO, message);
} | @Test
public void testEcho() {
assertThat(connection.echo("test".getBytes())).isEqualTo("test".getBytes());
} |
protected void ensurePath() throws Exception {
ensureContainers.ensure();
} | @Test
public void testEnsurePath() throws Exception {
Timing timing = new Timing();
CuratorFramework client = CuratorFrameworkFactory.newClient(
server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1));
client.start();
try {
try (PathChildrenCache cache = new PathChildrenCache(client, "/one/two/three", false)) {
cache.start();
timing.sleepABit();
try {
client.create().forPath("/one/two/three/four");
} catch (KeeperException.NoNodeException e) {
fail("Path should exist", e);
}
}
timing.sleepABit();
} finally {
TestCleanState.closeAndTestClean(client);
}
} |
T getFunction(final List<SqlArgument> arguments) {
// first try to get the candidates without any implicit casting
Optional<T> candidate = findMatchingCandidate(arguments, false);
if (candidate.isPresent()) {
return candidate.get();
} else if (!supportsImplicitCasts) {
throw createNoMatchingFunctionException(arguments);
}
// if none were found (candidate isn't present) try again with implicit casting
candidate = findMatchingCandidate(arguments, true);
if (candidate.isPresent()) {
return candidate.get();
}
throw createNoMatchingFunctionException(arguments);
} | @Test
public void shouldChooseSpecificOverOnlyVarArgs() {
// Given:
givenFunctions(
function(EXPECTED, -1, STRING),
function(OTHER, 0, STRING_VARARGS)
);
// When:
final KsqlScalarFunction fun = udfIndex.getFunction(ImmutableList.of(SqlArgument.of(SqlTypes.STRING)));
// Then:
assertThat(fun.name(), equalTo(EXPECTED));
} |
@VisibleForTesting
static Object convertAvroField(Object avroValue, Schema schema) {
if (avroValue == null) {
return null;
}
switch (schema.getType()) {
case NULL:
case INT:
case LONG:
case DOUBLE:
case FLOAT:
case BOOLEAN:
return avroValue;
case ENUM:
case STRING:
return avroValue.toString(); // can be a String or org.apache.avro.util.Utf8
case UNION:
for (Schema s : schema.getTypes()) {
if (s.getType() == Schema.Type.NULL) {
continue;
}
return convertAvroField(avroValue, s);
}
throw new IllegalArgumentException("Found UNION schema but it doesn't contain any type");
case ARRAY:
case BYTES:
case FIXED:
case RECORD:
case MAP:
default:
throw new UnsupportedOperationException("Unsupported avro schema type=" + schema.getType()
+ " for value field schema " + schema.getName());
}
} | @Test
public void testConvertAvroLong() {
Object converted = BaseJdbcAutoSchemaSink.convertAvroField(Long.MIN_VALUE, createFieldAndGetSchema((builder) ->
builder.name("field").type().longType().noDefault()));
Assert.assertEquals(converted, Long.MIN_VALUE);
} |
@Override
public void writeMetrics(MetricQueryResults metricQueryResults) throws Exception {
URL url = new URL(urlString);
String metrics = serializeMetrics(metricQueryResults);
byte[] postData = metrics.getBytes(StandardCharsets.UTF_8);
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setDoOutput(true);
connection.setInstanceFollowRedirects(false);
connection.setRequestMethod("POST");
connection.setRequestProperty("Content-Type", "application/json");
connection.setRequestProperty("charset", "utf-8");
connection.setRequestProperty("Content-Length", Integer.toString(postData.length));
connection.setUseCaches(false);
try (DataOutputStream connectionOuputStream =
new DataOutputStream(connection.getOutputStream())) {
connectionOuputStream.write(postData);
}
int responseCode = connection.getResponseCode();
if (responseCode != 200) {
throw new IOException(
"Expected HTTP 200 OK response while writing metrics to MetricsHttpSink but received "
+ responseCode);
}
} | @Test
public void testWriteMetricsWithCommittedUnSupported() throws Exception {
MetricQueryResults metricQueryResults = new CustomMetricQueryResults(false);
MetricsOptions pipelineOptions = PipelineOptionsFactory.create().as(MetricsOptions.class);
pipelineOptions.setMetricsHttpSinkUrl(String.format("http://localhost:%s", port));
MetricsHttpSink metricsHttpSink = new MetricsHttpSink(pipelineOptions);
countDownLatch = new CountDownLatch(1);
metricsHttpSink.writeMetrics(metricQueryResults);
countDownLatch.await();
String expected =
"{\"counters\":[{\"attempted\":20,\"name\":{\"name\":\"n1\","
+ "\"namespace\":\"ns1\"},\"step\":\"s1\"}],\"distributions\":[{\"attempted\":"
+ "{\"count\":4,\"max\":9,\"mean\":6.25,\"min\":3,\"sum\":25},\"name\":{\"name\":\"n2\""
+ ",\"namespace\":\"ns1\"},\"step\":\"s2\"}],\"gauges\":[{\"attempted\":{\"timestamp\":"
+ "\"1970-01-05T00:04:22.800Z\",\"value\":120},\"name\":{\"name\":\"n3\",\"namespace\":"
+ "\"ns1\"},\"step\":\"s3\"}],\"stringSets\":[{\"attempted\":{\"stringSet\":[\"cd\"]},"
+ "\"name\":{\"name\":\"n3\",\"namespace\":\"ns1\"},\"step\":\"s3\"}]}";
assertEquals("Wrong number of messages sent to HTTP server", 1, messages.size());
assertEquals("Wrong messages sent to HTTP server", expected, messages.get(0));
} |
public String getNamespaceId() {
return namespaceId;
} | @Test
void testGetNamespaceId() {
String namespaceId = "aaa";
final NacosClientProperties nacosClientProperties = NacosClientProperties.PROTOTYPE.derive(props);
NamingHttpClientProxy clientProxy = new NamingHttpClientProxy(namespaceId, proxy, mgr, nacosClientProperties);
String actualNamespaceId = clientProxy.getNamespaceId();
assertEquals(namespaceId, actualNamespaceId);
} |
public void printKsqlEntityList(final List<KsqlEntity> entityList) {
switch (outputFormat) {
case JSON:
printAsJson(entityList);
break;
case TABULAR:
final boolean showStatements = entityList.size() > 1;
for (final KsqlEntity ksqlEntity : entityList) {
writer().println();
if (showStatements) {
writer().println(ksqlEntity.getStatementText());
}
printAsTable(ksqlEntity);
}
break;
default:
throw new RuntimeException(String.format(
"Unexpected output format: '%s'",
outputFormat.name()
));
}
} | @Test
public void testPrintPropertyList() {
// Given:
final List<Property> properties = new ArrayList<>();
properties.add(new Property("k1", "KSQL", "1"));
properties.add(new Property("k2", "KSQL", "v2"));
properties.add(new Property("k3", "KSQL", "true"));
final KsqlEntityList entityList = new KsqlEntityList(ImmutableList.of(
new PropertiesList("e", properties, Collections.emptyList(), Collections.emptyList())
));
// When:
console.printKsqlEntityList(entityList);
// Then:
final String output = terminal.getOutputString();
Approvals.verify(output, approvalOptions);
} |
public static boolean isLimit(String accessKeyID) {
RateLimiter rateLimiter = null;
try {
rateLimiter = CACHE.get(accessKeyID, () -> RateLimiter.create(limit));
} catch (Exception e) {
LOGGER.error("create limit fail", e);
}
if (rateLimiter != null && !rateLimiter.tryAcquire(LIMIT_TIME, TimeUnit.MILLISECONDS)) {
LOGGER.error("access_key_id:{} limited", accessKeyID);
return true;
}
return false;
} | @Test
void testIsLimit() {
String keyId = "a";
//For initiating.
assertFalse(Limiter.isLimit(keyId));
long start = System.currentTimeMillis();
for (int j = 0; j < 5; j++) {
assertFalse(Limiter.isLimit(keyId));
}
long elapse = System.currentTimeMillis() - start;
// assert < limit 5qps
assertTrue(elapse > 980);
} |
@Override
public double calcDist3D(double fromLat, double fromLon, double fromHeight,
double toLat, double toLon, double toHeight) {
double eleDelta = hasElevationDiff(fromHeight, toHeight) ? (toHeight - fromHeight) : 0;
double len = calcDist(fromLat, fromLon, toLat, toLon);
return Math.sqrt(eleDelta * eleDelta + len * len);
} | @Test
public void testDistance3dEarth() {
DistanceCalc distCalc = new DistanceCalcEarth();
assertEquals(1, distCalc.calcDist3D(
0, 0, 0,
0, 0, 1
), 1e-6);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.