focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static boolean isOrHasCause(Throwable t, Class<?> classToFind) {
while (t != null && t.getCause() != t && !classToFind.isAssignableFrom(t.getClass())) {
t = t.getCause();
}
return t != null && classToFind.isAssignableFrom(t.getClass());
} | @Test
public void test_isOrHasCause_when_exceptionHasExpectedType() {
RuntimeException e = new RuntimeException("foo");
assertTrue(isOrHasCause(e, RuntimeException.class));
} |
public static <K, V> V getOrPutSynchronized(ConcurrentMap<K, V> map, K key, final Object mutex,
ConstructorFunction<K, V> func) {
if (mutex == null) {
throw new NullPointerException();
}
V value = map.get(key);
if (value == null) {
synchronized (mutex) {
value = map.get(key);
if (value == null) {
value = func.createNew(key);
map.put(key, value);
}
}
}
return value;
} | @Test
public void testGetOrPutSynchronized_withMutexFactory() {
int result = ConcurrencyUtil.getOrPutSynchronized(map, 5, mutexFactory, constructorFunction);
assertEquals(1005, result);
assertEquals(1, constructorFunction.getConstructions());
} |
public OptimizerParserContext getParserContext(final String databaseName) {
return parserContexts.get(databaseName);
} | @Test
void assertGetParserContext() {
OptimizerContext actual = OptimizerContextFactory.create(Collections.singletonMap(DefaultDatabase.LOGIC_NAME, createShardingSphereDatabase()));
assertThat(actual.getParserContext(DefaultDatabase.LOGIC_NAME), instanceOf(OptimizerParserContext.class));
} |
public static JavaToSqlTypeConverter javaToSqlConverter() {
return JAVA_TO_SQL_CONVERTER;
} | @Test
public void shouldThrowOnUnknownJavaType() {
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> javaToSqlConverter().toSqlType(double.class)
);
// Then:
assertThat(e.getMessage(), containsString("Unexpected java type: " + double.class));
} |
public static Map<String, String> setLoggerLevel(String loggerName, String logLevel) {
Level level;
try {
level = Level.valueOf(logLevel);
} catch (Exception e) {
throw new RuntimeException("Unrecognized logger level - " + logLevel, e);
}
LoggerContext context = (LoggerContext) LogManager.getContext(false);
Configuration config = context.getConfiguration();
LoggerConfig loggerConfig;
if (getAllConfiguredLoggers().contains(loggerName)) {
loggerConfig = getLoggerConfig(config, loggerName);
loggerConfig.setLevel(level);
} else {
// Check if the loggerName exists by comparing it to all known loggers in the context
if (getAllLoggers().stream().noneMatch(logger -> {
if (!logger.startsWith(loggerName)) {
return false;
}
if (logger.equals(loggerName)) {
return true;
}
// Check if loggerName is a valid parent / descendant logger for any known logger
return logger.substring(loggerName.length()).startsWith(".");
})) {
throw new RuntimeException("Logger - " + loggerName + " not found");
}
loggerConfig = new LoggerConfig(loggerName, level, true);
config.addLogger(loggerName, loggerConfig);
}
// This causes all Loggers to re-fetch information from their LoggerConfig.
context.updateLoggers();
return getLoggerResponse(loggerConfig);
} | @Test
public void testChangeLoggerLevelWithExceptions() {
try {
LoggerUtils.setLoggerLevel("notExistLogger", "INFO");
fail("Shouldn't reach here");
} catch (RuntimeException e) {
assertEquals(e.getMessage(), "Logger - notExistLogger not found");
}
try {
LoggerUtils.setLoggerLevel(ROOT, "NotALevel");
fail("Shouldn't reach here");
} catch (RuntimeException e) {
assertEquals(e.getMessage(), "Unrecognized logger level - NotALevel");
}
} |
@Override
public ExecuteContext after(ExecuteContext context) {
ThreadLocalUtils.removeRequestData();
LogUtils.printHttpRequestAfterPoint(context);
return context;
} | @Test
public void testAfter() {
ThreadLocalUtils.setRequestData(new RequestData(Collections.emptyMap(), "", ""));
interceptor.after(context);
Assert.assertNull(ThreadLocalUtils.getRequestData());
} |
public static String name2desc(String name) {
StringBuilder sb = new StringBuilder();
int c = 0, index = name.indexOf('[');
if (index > 0) {
c = (name.length() - index) / 2;
name = name.substring(0, index);
}
while (c-- > 0) {
sb.append('[');
}
if ("void".equals(name)) {
sb.append(JVM_VOID);
} else if ("boolean".equals(name)) {
sb.append(JVM_BOOLEAN);
} else if ("byte".equals(name)) {
sb.append(JVM_BYTE);
} else if ("char".equals(name)) {
sb.append(JVM_CHAR);
} else if ("double".equals(name)) {
sb.append(JVM_DOUBLE);
} else if ("float".equals(name)) {
sb.append(JVM_FLOAT);
} else if ("int".equals(name)) {
sb.append(JVM_INT);
} else if ("long".equals(name)) {
sb.append(JVM_LONG);
} else if ("short".equals(name)) {
sb.append(JVM_SHORT);
} else {
sb.append('L').append(name.replace('.', '/')).append(';');
}
return sb.toString();
} | @Test
void testName2desc() {
// name2desc
assertEquals("Z", ReflectUtils.name2desc(ReflectUtils.getName(boolean.class)));
assertEquals("[[[I", ReflectUtils.name2desc(ReflectUtils.getName(int[][][].class)));
assertEquals("[[Ljava/lang/Object;", ReflectUtils.name2desc(ReflectUtils.getName(Object[][].class)));
} |
public static boolean isDockerInstalled(Path dockerExecutable) {
return Files.exists(dockerExecutable);
} | @Test
public void testIsDockerInstalled_fail() {
Assert.assertFalse(CliDockerClient.isDockerInstalled(Paths.get("path/to/nonexistent/file")));
} |
@Override
public void write(T record) {
recordConsumer.startMessage();
try {
messageWriter.writeTopLevelMessage(record);
} catch (RuntimeException e) {
Message m = (record instanceof Message.Builder) ? ((Message.Builder) record).build() : (Message) record;
LOG.error("Cannot write message {}: {}", e.getMessage(), m);
throw e;
}
recordConsumer.endMessage();
} | @Test
public void testProto3RepeatedIntMessage() throws Exception {
RecordConsumer readConsumerMock = Mockito.mock(RecordConsumer.class);
ProtoWriteSupport<TestProto3.RepeatedIntMessage> instance =
createReadConsumerInstance(TestProto3.RepeatedIntMessage.class, readConsumerMock);
TestProto3.RepeatedIntMessage.Builder msg = TestProto3.RepeatedIntMessage.newBuilder();
msg.addRepeatedInt(1323);
msg.addRepeatedInt(54469);
instance.write(msg.build());
InOrder inOrder = Mockito.inOrder(readConsumerMock);
inOrder.verify(readConsumerMock).startMessage();
inOrder.verify(readConsumerMock).startField("repeatedInt", 0);
inOrder.verify(readConsumerMock).addInteger(1323);
inOrder.verify(readConsumerMock).addInteger(54469);
inOrder.verify(readConsumerMock).endField("repeatedInt", 0);
inOrder.verify(readConsumerMock).endMessage();
Mockito.verifyNoMoreInteractions(readConsumerMock);
} |
@Override
public boolean isDetected() {
return environmentVariableIsTrue("CI") && environmentVariableIsTrue("BITRISE_IO");
} | @Test
public void isDetected() {
assertThat(underTest.isDetected()).isFalse();
setEnvVariable("CI", "true");
assertThat(underTest.isDetected()).isFalse();
setEnvVariable("CI", "true");
setEnvVariable("BITRISE_IO", "false");
assertThat(underTest.isDetected()).isFalse();
setEnvVariable("CI", "true");
setEnvVariable("BITRISE_IO", "true");
assertThat(underTest.isDetected()).isTrue();
} |
public static <T extends PipelineOptions> T as(Class<T> klass) {
return new Builder().as(klass);
} | @Test
public void testMissingGetterThrows() throws Exception {
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage(
"Expected getter for property [object] of type [java.lang.Object] on "
+ "[org.apache.beam.sdk.options.PipelineOptionsFactoryTest$MissingGetter].");
PipelineOptionsFactory.as(MissingGetter.class);
} |
public CompletableFuture<VertexThreadInfoStats> triggerThreadInfoRequest(
Map<ImmutableSet<ExecutionAttemptID>, CompletableFuture<TaskExecutorThreadInfoGateway>>
executionsWithGateways,
int numSamples,
Duration delayBetweenSamples,
int maxStackTraceDepth) {
checkNotNull(executionsWithGateways, "Tasks to sample");
checkArgument(executionsWithGateways.size() > 0, "No tasks to sample");
checkArgument(numSamples >= 1, "No number of samples");
checkArgument(maxStackTraceDepth >= 0, "Negative maximum stack trace depth");
// Execution IDs of running tasks grouped by the task manager
Collection<ImmutableSet<ExecutionAttemptID>> runningSubtasksIds =
executionsWithGateways.keySet();
synchronized (lock) {
if (isShutDown) {
return FutureUtils.completedExceptionally(new IllegalStateException("Shut down"));
}
final int requestId = requestIdCounter++;
log.debug("Triggering thread info request {}", requestId);
final PendingThreadInfoRequest pending =
new PendingThreadInfoRequest(requestId, runningSubtasksIds);
// requestTimeout is treated as the time on top of the expected sampling duration.
// Discard the request if it takes too long. We don't send cancel
// messages to the task managers, but only wait for the responses
// and then ignore them.
long expectedDuration = numSamples * delayBetweenSamples.toMillis();
Time timeout = Time.milliseconds(expectedDuration + requestTimeout.toMillis());
// Add the pending request before scheduling the discard task to
// prevent races with removing it again.
pendingRequests.put(requestId, pending);
ThreadInfoSamplesRequest requestParams =
new ThreadInfoSamplesRequest(
requestId, numSamples, delayBetweenSamples, maxStackTraceDepth);
requestThreadInfo(executionsWithGateways, requestParams, timeout);
return pending.getStatsFuture();
}
} | @Test
void testThreadInfoRequestWithException() throws Exception {
Map<ImmutableSet<ExecutionAttemptID>, CompletableFuture<TaskExecutorThreadInfoGateway>>
executionWithGateways =
createMockSubtaskWithGateways(
CompletionType.SUCCESSFULLY, CompletionType.EXCEPTIONALLY);
CompletableFuture<VertexThreadInfoStats> requestFuture =
coordinator.triggerThreadInfoRequest(
executionWithGateways,
DEFAULT_NUMBER_OF_SAMPLES,
DEFAULT_DELAY_BETWEEN_SAMPLES,
DEFAULT_MAX_STACK_TRACE_DEPTH);
assertThatThrownBy(requestFuture::get, "The request must be failed.")
.isInstanceOf(ExecutionException.class)
.hasCauseInstanceOf(RuntimeException.class);
} |
public static AccessTokenValidator create(Map<String, ?> configs) {
return create(configs, (String) null);
} | @Test
public void testConfigureThrowsExceptionOnAccessTokenValidatorClose() {
OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler();
AccessTokenRetriever accessTokenRetriever = new AccessTokenRetriever() {
@Override
public void close() throws IOException {
throw new IOException("My close had an error!");
}
@Override
public String retrieve() {
return "dummy";
}
};
Map<String, ?> configs = getSaslConfigs();
AccessTokenValidator accessTokenValidator = AccessTokenValidatorFactory.create(configs);
handler.init(accessTokenRetriever, accessTokenValidator);
// Basically asserting this doesn't throw an exception :(
handler.close();
} |
public static List<String> stripDotPathComponents(List<String> input) {
List<String> output = new ArrayList<>();
for (String string : input) {
if (string.equals("..")) {
if (!output.isEmpty()) {
output.remove(output.size() - 1);
}
} else if (!string.equals(".")) {
output.add(string);
}
}
return output;
} | @Test
public void testStripDotPathComponents() {
//double dots
assertEquals(Arrays.asList("keep", "keep2"), CommandUtils.stripDotPathComponents(Arrays.asList("..", "keep", "keep2")));
//single dots
assertEquals(Arrays.asList("keep", "keep2"), CommandUtils.stripDotPathComponents(Arrays.asList(".", "keep", "keep2")));
assertEquals(Arrays.asList(".keep", "keep2"), CommandUtils.stripDotPathComponents(Arrays.asList(".", ".keep", "keep2")));
assertEquals(Arrays.asList(".keep", "keep2"), CommandUtils.stripDotPathComponents(Arrays.asList("..", ".keep", "keep2")));
} |
public static UNewClass create(
UExpression enclosingExpression,
List<? extends UExpression> typeArguments,
UExpression identifier,
List<UExpression> arguments,
@Nullable UClassDecl classBody) {
return new AutoValue_UNewClass(
enclosingExpression,
ImmutableList.copyOf(typeArguments),
identifier,
ImmutableList.copyOf(arguments),
classBody);
} | @Test
public void serialization() {
SerializableTester.reserializeAndAssert(
UNewClass.create(UClassIdent.create("java.lang.String"), ULiteral.stringLit("123")));
} |
@Override
public void updateSmsReceiveResult(Long id, Boolean success, LocalDateTime receiveTime,
String apiReceiveCode, String apiReceiveMsg) {
SmsReceiveStatusEnum receiveStatus = Objects.equals(success, true) ?
SmsReceiveStatusEnum.SUCCESS : SmsReceiveStatusEnum.FAILURE;
smsLogMapper.updateById(SmsLogDO.builder().id(id).receiveStatus(receiveStatus.getStatus())
.receiveTime(receiveTime).apiReceiveCode(apiReceiveCode).apiReceiveMsg(apiReceiveMsg).build());
} | @Test
public void testUpdateSmsReceiveResult() {
// mock 数据
SmsLogDO dbSmsLog = randomSmsLogDO(
o -> o.setReceiveStatus(SmsReceiveStatusEnum.INIT.getStatus()));
smsLogMapper.insert(dbSmsLog);
// 准备参数
Long id = dbSmsLog.getId();
Boolean success = randomBoolean();
LocalDateTime receiveTime = randomLocalDateTime();
String apiReceiveCode = randomString();
String apiReceiveMsg = randomString();
// 调用
smsLogService.updateSmsReceiveResult(id, success, receiveTime, apiReceiveCode, apiReceiveMsg);
// 断言
dbSmsLog = smsLogMapper.selectById(id);
assertEquals(success ? SmsReceiveStatusEnum.SUCCESS.getStatus()
: SmsReceiveStatusEnum.FAILURE.getStatus(), dbSmsLog.getReceiveStatus());
assertEquals(receiveTime, dbSmsLog.getReceiveTime());
assertEquals(apiReceiveCode, dbSmsLog.getApiReceiveCode());
assertEquals(apiReceiveMsg, dbSmsLog.getApiReceiveMsg());
} |
public static Duration between(LocalDateTime startTimeInclude, LocalDateTime endTimeExclude) {
return TemporalUtil.between(startTimeInclude, endTimeExclude);
} | @Test
public void between() {
final Duration between = LocalDateTimeUtil.between(
LocalDateTimeUtil.parse("2019-02-02T00:00:00"),
LocalDateTimeUtil.parse("2020-02-02T00:00:00"));
assertEquals(365, between.toDays());
} |
private void putCache(String key, CacheData cache) {
synchronized (cacheMap) {
Map<String, CacheData> copy = new HashMap<>(this.cacheMap.get());
copy.put(key, cache);
cacheMap.set(copy);
}
} | @Test
void testPutCache() throws Exception {
// 反射调用私有方法putCacheIfAbsent
Method putCacheMethod = ClientWorker.class.getDeclaredMethod("putCache", String.class, CacheData.class);
putCacheMethod.setAccessible(true);
Properties prop = new Properties();
ConfigFilterChainManager filter = new ConfigFilterChainManager(new Properties());
ServerListManager agent = Mockito.mock(ServerListManager.class);
final NacosClientProperties nacosClientProperties = NacosClientProperties.PROTOTYPE.derive(prop);
ClientWorker clientWorker = new ClientWorker(filter, agent, nacosClientProperties);
String key = "testKey";
CacheData cacheData = new CacheData(filter, "env", "dataId", "group");
putCacheMethod.invoke(clientWorker, key, cacheData);
Field cacheMapField = ClientWorker.class.getDeclaredField("cacheMap");
cacheMapField.setAccessible(true);
AtomicReference<Map<String, CacheData>> cacheMapRef = (AtomicReference<Map<String, CacheData>>) cacheMapField.get(
clientWorker);
// 检查cacheMap是否包含特定的key
assertNotNull(cacheMapRef.get().get(key));
assertEquals(cacheData, cacheMapRef.get().get(key));
// 测试再次插入相同的key将覆盖原始的值
CacheData newCacheData = new CacheData(filter, "newEnv", "newDataId", "newGroup");
putCacheMethod.invoke(clientWorker, key, newCacheData);
// 检查key对应的value是否改变为newCacheData
assertEquals(newCacheData, cacheMapRef.get().get(key));
} |
@Override
public boolean test(Pair<Point, Point> pair) {
if (timeDeltaIsSmall(pair.first().time(), pair.second().time())) {
return distIsSmall(pair);
} else {
/*
* reject points with large time deltas because we don't want to rely on a numerically
* unstable process
*/
return false;
}
} | @Test
public void testCase2() {
DistanceFilter filter = newTestFilter();
LatLong position1 = new LatLong(0.0, 0.0);
double notTooFarInNm = MAX_DISTANCE_IN_FEET * 0.5 / Spherical.feetPerNM();
Point p1 = new PointBuilder()
.latLong(position1)
.time(Instant.EPOCH)
.altitude(Distance.ofFeet(500.0))
.build();
Point p2 = new PointBuilder()
.latLong(position1.projectOut(90.0, notTooFarInNm)) //move the position
.time(Instant.EPOCH.plusMillis(MAX_TIME_DELTA_IN_MILLISEC * 2))
.altitude(Distance.ofFeet(500.0))
.build();
assertFalse(filter.test(Pair.of(p1, p2)));
assertFalse(filter.test(Pair.of(p2, p1)));
} |
@Override
public Mono<RemoveDeviceResponse> removeDevice(final RemoveDeviceRequest request) {
if (request.getId() == Device.PRIMARY_ID) {
throw Status.INVALID_ARGUMENT.withDescription("Cannot remove primary device").asRuntimeException();
}
final AuthenticatedDevice authenticatedDevice = AuthenticationUtil.requireAuthenticatedDevice();
if (authenticatedDevice.deviceId() != Device.PRIMARY_ID && request.getId() != authenticatedDevice.deviceId()) {
throw Status.PERMISSION_DENIED
.withDescription("Linked devices cannot remove devices other than themselves")
.asRuntimeException();
}
final byte deviceId = DeviceIdUtil.validate(request.getId());
return Mono.fromFuture(() -> accountsManager.getByAccountIdentifierAsync(authenticatedDevice.accountIdentifier()))
.map(maybeAccount -> maybeAccount.orElseThrow(Status.UNAUTHENTICATED::asRuntimeException))
.flatMap(account -> Mono.fromFuture(accountsManager.removeDevice(account, deviceId)))
.thenReturn(RemoveDeviceResponse.newBuilder().build());
} | @Test
void removeDeviceNonPrimaryMismatchAuthenticated() {
mockAuthenticationInterceptor().setAuthenticatedDevice(AUTHENTICATED_ACI, (byte) (Device.PRIMARY_ID + 1));
assertStatusException(Status.PERMISSION_DENIED, () -> authenticatedServiceStub().removeDevice(RemoveDeviceRequest.newBuilder()
.setId(17)
.build()));
verify(accountsManager, never()).removeDevice(any(), anyByte());
} |
@Override
public void close() {
if (metrics != null) {
metrics.removeSensor(errorSensor.name());
}
logger.close();
} | @Test
public void shouldRemoveSensorOnClose() {
// When:
meteredProcessingLogger.close();
// Then:
verify(metrics).removeSensor(sensorName);
verify(processingLogger).close();
} |
@VisibleForTesting
static CPUResource getDefaultCpus(Configuration configuration) {
double fallback = configuration.get(KubernetesConfigOptions.TASK_MANAGER_CPU);
return TaskExecutorProcessUtils.getCpuCoresWithFallback(configuration, fallback);
} | @Test
void testGetCpuCoresKubernetesOption() {
final Configuration configuration = new Configuration();
configuration.set(KubernetesConfigOptions.TASK_MANAGER_CPU, 2.0);
configuration.set(KubernetesConfigOptions.TASK_MANAGER_CPU_LIMIT_FACTOR, 1.5);
configuration.set(TaskManagerOptions.NUM_TASK_SLOTS, 3);
assertThat(KubernetesWorkerResourceSpecFactory.getDefaultCpus(configuration))
.isEqualTo(new CPUResource(2.0));
} |
public final Sensor threadLevelSensor(final String threadId,
final String sensorSuffix,
final RecordingLevel recordingLevel,
final Sensor... parents) {
final String sensorPrefix = threadSensorPrefix(threadId);
synchronized (threadLevelSensors) {
return getSensors(threadLevelSensors, sensorSuffix, sensorPrefix, recordingLevel, parents);
}
} | @Test
public void shouldGetExistingThreadLevelSensor() {
final Metrics metrics = mock(Metrics.class);
final RecordingLevel recordingLevel = RecordingLevel.INFO;
setupGetExistingSensorTest(metrics);
final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, VERSION, time);
final Sensor actualSensor = streamsMetrics.threadLevelSensor(THREAD_ID1, SENSOR_NAME_1, recordingLevel);
assertThat(actualSensor, is(equalToObject(sensor)));
} |
private static Map<String, Object> toJsonMap(
final Token<? extends TokenIdentifier> token) throws IOException {
if (token == null) {
return null;
}
final Map<String, Object> m = new TreeMap<String, Object>();
m.put("urlString", token.encodeToUrlString());
return m;
} | @Test
public void testToDatanodeInfoWithName() throws Exception {
Map<String, Object> response = new HashMap<String, Object>();
// Older servers (1.x, 0.23, etc.) sends 'name' instead of ipAddr
// and xferPort.
String name = "127.0.0.1:1004";
response.put("name", name);
response.put("hostName", "localhost");
response.put("storageID", "fake-id");
response.put("infoPort", 1338l);
response.put("ipcPort", 1339l);
response.put("capacity", 1024l);
response.put("dfsUsed", 512l);
response.put("remaining", 512l);
response.put("blockPoolUsed", 512l);
response.put("lastUpdate", 0l);
response.put("xceiverCount", 4096l);
response.put("networkLocation", "foo.bar.baz");
response.put("adminState", "NORMAL");
response.put("cacheCapacity", 123l);
response.put("cacheUsed", 321l);
DatanodeInfo di = JsonUtilClient.toDatanodeInfo(response);
Assert.assertEquals(name, di.getXferAddr());
// The encoded result should contain name, ipAddr and xferPort.
Map<String, Object> r = JsonUtil.toJsonMap(di);
Assert.assertEquals(name, r.get("name"));
Assert.assertEquals("127.0.0.1", r.get("ipAddr"));
// In this test, it is Integer instead of Long since json was not actually
// involved in constructing the map.
Assert.assertEquals(1004, (int)(Integer)r.get("xferPort"));
// Invalid names
String[] badNames = {"127.0.0.1", "127.0.0.1:", ":", "127.0.0.1:sweet", ":123"};
for (String badName : badNames) {
response.put("name", badName);
checkDecodeFailure(response);
}
// Missing both name and ipAddr
response.remove("name");
checkDecodeFailure(response);
// Only missing xferPort
response.put("ipAddr", "127.0.0.1");
checkDecodeFailure(response);
} |
@Override
public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
if(file.isPlaceholder()) {
final DescriptiveUrl link = new DriveUrlProvider().toUrl(file).find(DescriptiveUrl.Type.http);
if(DescriptiveUrl.EMPTY.equals(link)) {
log.warn(String.format("Missing web link for file %s", file));
return new NullInputStream(file.attributes().getSize());
}
// Write web link file
return IOUtils.toInputStream(UrlFileWriterFactory.get().write(link), Charset.defaultCharset());
}
else {
final HttpHeaders headers = new HttpHeaders();
headers.setContentType(MEDIA_TYPE);
if(status.isAppend()) {
final HttpRange range = HttpRange.withStatus(status);
final String header;
if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) {
header = String.format("bytes=%d-", range.getStart());
}
else {
header = String.format("bytes=%d-%d", range.getStart(), range.getEnd());
}
if(log.isDebugEnabled()) {
log.debug(String.format("Add range header %s for file %s", header, file));
}
headers.setRange(header);
// Disable compression
headers.setAcceptEncoding("identity");
}
if(file.attributes().isDuplicate()) {
// Read previous version
try {
final Drive.Revisions.Get request = session.getClient().revisions().get(fileid.getFileId(file), file.attributes().getVersionId());
request.setRequestHeaders(headers);
return request.executeMediaAsInputStream();
}
catch(IOException e) {
throw new DriveExceptionMappingService(fileid).map("Download {0} failed", e, file);
}
}
else {
try {
try {
final Drive.Files.Get request = session.getClient().files().get(fileid.getFileId(file));
request.setRequestHeaders(headers);
request.setSupportsTeamDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable"));
return request.executeMediaAsInputStream();
}
catch(IOException e) {
throw new DriveExceptionMappingService(fileid).map("Download {0} failed", e, file);
}
}
catch(RetriableAccessDeniedException e) {
throw e;
}
catch(AccessDeniedException e) {
if(!PreferencesFactory.get().getBoolean(String.format("connection.unsecure.download.%s", session.getHost().getHostname()))) {
// Not previously dismissed
callback.warn(session.getHost(),
MessageFormat.format(LocaleFactory.localizedString("Download {0} failed", "Error"), file.getName()),
"Acknowledge the risk of downloading known malware or other abusive file.",
LocaleFactory.localizedString("Continue", "Credentials"), LocaleFactory.localizedString("Cancel", "Localizable"),
String.format("connection.unsecure.download.%s", session.getHost().getHostname()));
}
try {
final Drive.Files.Get request = session.getClient().files().get(fileid.getFileId(file));
request.setAcknowledgeAbuse(true);
request.setRequestHeaders(headers);
request.setSupportsTeamDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable"));
return request.executeMediaAsInputStream();
}
catch(IOException f) {
throw new DriveExceptionMappingService(fileid).map("Download {0} failed", f, file);
}
}
}
}
} | @Test
public void testReadRevision() throws Exception {
final DriveFileIdProvider fileid = new DriveFileIdProvider(session);
final Path directory = new DriveDirectoryFeature(session, fileid).mkdir(
new Path(MYDRIVE_FOLDER, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final Path test = new Path(directory, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final byte[] content = RandomUtils.nextBytes(1645);
{
final TransferStatus status = new TransferStatus().withLength(content.length);
final DriveWriteFeature writer = new DriveWriteFeature(session, fileid);
final HttpResponseOutputStream<File> out = writer.write(test, status, new DisabledConnectionCallback());
new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
}
final Path versioned = new DriveVersioningFeature(session, fileid).list(test, new DisabledListProgressListener()).find(new SimplePathPredicate(test));
assertNotNull(versioned.attributes().getVersionId());
assertTrue(versioned.attributes().isDuplicate());
assertEquals(content.length, versioned.attributes().getSize());
assertArrayEquals(content, IOUtils.readFully(new DriveReadFeature(session, fileid).read(versioned, new TransferStatus(), new DisabledConnectionCallback()), content.length));
// New version
{
final byte[] newcontent = RandomUtils.nextBytes(1045);
final TransferStatus status = new TransferStatus().withLength(newcontent.length);
final DriveWriteFeature writer = new DriveWriteFeature(session, fileid);
final HttpResponseOutputStream<File> out = writer.write(test, status.exists(true), new DisabledConnectionCallback());
new StreamCopier(status, status).transfer(new ByteArrayInputStream(newcontent), out);
}
assertEquals(2, new DriveVersioningFeature(session, fileid).list(test, new DisabledListProgressListener()).size());
// Permanently delete revision
//new DriveDeleteFeature(session, fileid).delete(Collections.singletonList(versioned), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertTrue(new DriveFindFeature(session, fileid).find(test));
new DriveDeleteFeature(session, fileid).delete(Arrays.asList(test, directory), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public void close() {
client.close();
EVENT_LISTENER_EXECUTOR.shutdown();
} | @Test
void assertClose() {
repository.close();
verify(client).close();
} |
public static Host parse(final String url) throws HostParserException {
final Host parsed = new HostParser().get(url);
if(log.isDebugEnabled()) {
log.debug(String.format("Parsed %s as %s", url, parsed));
}
return parsed;
} | @Test
public void parse() throws HostParserException {
final Host host = new HostParser(new ProtocolFactory(Collections.singleton(new TestProtocol(Scheme.https))))
.get("https://t%40u@host:443/key");
assertEquals("host", host.getHostname());
assertEquals(443, host.getPort());
assertEquals("t@u", host.getCredentials().getUsername());
assertEquals("/key", host.getDefaultPath());
} |
public boolean isPathValid(String path) {
return path == null || XmlUtils.matchUsingRegex(PATH_PATTERN_REGEX, path);
} | @Test
public void shouldEnsurePathIsRelative() {
assertThat(filePathTypeValidator.isPathValid(".."), is(false));
assertThat(filePathTypeValidator.isPathValid("../a"), is(false));
assertThat(filePathTypeValidator.isPathValid(" "), is(false));
assertThat(filePathTypeValidator.isPathValid("./a"), is(true));
assertThat(filePathTypeValidator.isPathValid(". "), is(false));
assertThat(filePathTypeValidator.isPathValid(" ."), is(false));
assertThat(filePathTypeValidator.isPathValid("abc"), is(true));
} |
@Override
public void onNotificationClearRequest(int id) {
final NotificationManager notificationManager = (NotificationManager) mContext.getSystemService(Context.NOTIFICATION_SERVICE);
notificationManager.cancel(id);
} | @Test
public void onNotificationClearRequest_clearSpecificNotification() throws Exception {
createUUT().onNotificationClearRequest(666);
verify(mNotificationManager).cancel(eq(666));
verify(mNotificationManager, never()).cancelAll();
} |
public static String jsonFromMap(Map<String, Object> jsonData) {
try {
JsonDocument json = new JsonDocument();
json.startGroup();
for (String key : jsonData.keySet()) {
Object data = jsonData.get(key);
if (data instanceof Map) {
/* it's a nested map, so we'll recursively add the JSON of this map to the current JSON */
json.addValue(key, jsonFromMap((Map<String, Object>) data));
} else if (data instanceof Object[]) {
/* it's an object array, so we'll iterate the elements and put them all in here */
json.addValue(key, "[" + stringArrayFromObjectArray((Object[]) data) + "]");
} else if (data instanceof Collection) {
/* it's a collection, so we'll iterate the elements and put them all in here */
json.addValue(key, "[" + stringArrayFromObjectArray(((Collection) data).toArray()) + "]");
} else if (data instanceof int[]) {
/* it's an int array, so we'll get the string representation */
String intArray = Arrays.toString((int[]) data);
/* remove whitespace */
intArray = intArray.replaceAll(" ", "");
json.addValue(key, intArray);
} else if (data instanceof JsonCapableObject) {
json.addValue(key, jsonFromMap(((JsonCapableObject) data).jsonMap()));
} else {
/* all other objects we assume we are to just put the string value in */
json.addValue(key, String.valueOf(data));
}
}
json.endGroup();
logger.debug("created json from map => {}", json);
return json.toString();
} catch (Exception e) {
logger.error("Could not create JSON from Map. ", e);
return "{}";
}
} | @Test
void testSimpleTwo() {
Map<String, Object> jsonData = new LinkedHashMap<String, Object>();
jsonData.put("myKey", "myValue");
jsonData.put("myKey2", "myValue2");
String json = JsonUtility.jsonFromMap(jsonData);
String expected = "{\"myKey\":\"myValue\",\"myKey2\":\"myValue2\"}";
assertEquals(expected, json);
} |
@Override
public void registerService(String serviceName, String groupName, Instance instance) throws NacosException {
getExecuteClientProxy(instance).registerService(serviceName, groupName, instance);
} | @Test
void testRegisterEphemeralServiceByGrpc() throws NacosException {
String serviceName = "service1";
String groupName = "group1";
Instance instance = new Instance();
instance.setServiceName(serviceName);
instance.setClusterName(groupName);
instance.setIp("1.1.1.1");
instance.setPort(1);
instance.setEphemeral(true);
delegate.registerService(serviceName, groupName, instance);
verify(mockGrpcClient, times(1)).registerService(serviceName, groupName, instance);
} |
@VisibleForTesting
Map<String, List<Operation>> computeOperations(SegmentDirectory.Reader segmentReader)
throws Exception {
Map<String, List<Operation>> columnOperationsMap = new HashMap<>();
// Does not work for segment versions < V3.
if (_segmentDirectory.getSegmentMetadata().getVersion().compareTo(SegmentVersion.v3) < 0) {
return columnOperationsMap;
}
Set<String> existingAllColumns = _segmentDirectory.getSegmentMetadata().getAllColumns();
Set<String> existingDictColumns = _segmentDirectory.getColumnsWithIndex(StandardIndexes.dictionary());
Set<String> existingForwardIndexColumns = _segmentDirectory.getColumnsWithIndex(StandardIndexes.forward());
for (String column : existingAllColumns) {
if (_schema != null && !_schema.hasColumn(column)) {
// _schema will be null only in tests
LOGGER.info("Column {} is not in schema, skipping updating forward index", column);
continue;
}
boolean existingHasDict = existingDictColumns.contains(column);
boolean existingHasFwd = existingForwardIndexColumns.contains(column);
FieldIndexConfigs newConf = _fieldIndexConfigs.get(column);
boolean newIsFwd = newConf.getConfig(StandardIndexes.forward()).isEnabled();
boolean newIsDict = newConf.getConfig(StandardIndexes.dictionary()).isEnabled();
boolean newIsRange = newConf.getConfig(StandardIndexes.range()).isEnabled();
if (existingHasFwd && !newIsFwd) {
// Existing column has a forward index. New column config disables the forward index
ColumnMetadata columnMetadata = _segmentDirectory.getSegmentMetadata().getColumnMetadataFor(column);
if (columnMetadata.isSorted()) {
// Check if the column is sorted. If sorted, disabling forward index should be a no-op. Do not return an
// operation for this column related to disabling forward index.
LOGGER.warn("Trying to disable the forward index for a sorted column {}, ignoring", column);
continue;
}
if (existingHasDict) {
if (!newIsDict) {
// Dictionary was also disabled. Just disable the dictionary and remove it along with the forward index
// If range index exists, don't try to regenerate it on toggling the dictionary, throw an error instead
Preconditions.checkState(!newIsRange, String.format(
"Must disable range (enabled) index to disable the dictionary and forward index for column: %s or "
+ "refresh / back-fill the forward index", column));
columnOperationsMap.put(column,
Arrays.asList(Operation.DISABLE_FORWARD_INDEX, Operation.DISABLE_DICTIONARY));
} else {
// Dictionary is still enabled, keep it but remove the forward index
columnOperationsMap.put(column, Collections.singletonList(Operation.DISABLE_FORWARD_INDEX));
}
} else {
if (!newIsDict) {
// Dictionary remains disabled and we should not reconstruct temporary forward index as dictionary based
columnOperationsMap.put(column, Collections.singletonList(Operation.DISABLE_FORWARD_INDEX));
} else {
// Dictionary is enabled, creation of dictionary and conversion to dictionary based forward index is needed
columnOperationsMap.put(column,
Arrays.asList(Operation.DISABLE_FORWARD_INDEX, Operation.ENABLE_DICTIONARY));
}
}
} else if (!existingHasFwd && newIsFwd) {
// Existing column does not have a forward index. New column config enables the forward index
ColumnMetadata columnMetadata = _segmentDirectory.getSegmentMetadata().getColumnMetadataFor(column);
if (columnMetadata != null && columnMetadata.isSorted()) {
// Check if the column is sorted. If sorted, disabling forward index should be a no-op and forward index
// should already exist. Do not return an operation for this column related to enabling forward index.
LOGGER.warn("Trying to enable the forward index for a sorted column {}, ignoring", column);
continue;
}
// Get list of columns with inverted index
Set<String> existingInvertedIndexColumns =
segmentReader.toSegmentDirectory().getColumnsWithIndex(StandardIndexes.inverted());
if (!existingHasDict || !existingInvertedIndexColumns.contains(column)) {
// If either dictionary or inverted index is missing on the column there is no way to re-generate the forward
// index. Treat this as a no-op and log a warning.
LOGGER.warn("Trying to enable the forward index for a column {} missing either the dictionary ({}) and / or "
+ "the inverted index ({}) is not possible. Either a refresh or back-fill is required to get the "
+ "forward index, ignoring", column, existingHasDict ? "enabled" : "disabled",
existingInvertedIndexColumns.contains(column) ? "enabled" : "disabled");
continue;
}
columnOperationsMap.put(column, Collections.singletonList(Operation.ENABLE_FORWARD_INDEX));
} else if (!existingHasFwd) {
// Forward index is disabled for the existing column and should remain disabled based on the latest config
// Need some checks to see whether the dictionary is being enabled or disabled here and take appropriate actions
// If the dictionary is not enabled on the existing column it must be on the new noDictionary column list.
// Cannot enable the dictionary for a column with forward index disabled.
Preconditions.checkState(existingHasDict || !newIsDict,
String.format("Cannot regenerate the dictionary for column %s with forward index disabled. Please "
+ "refresh or back-fill the data to add back the forward index", column));
if (existingHasDict && !newIsDict) {
// Dictionary is currently enabled on this column but is supposed to be disabled. Remove the dictionary
// and update the segment metadata If the range index exists then throw an error since we are not
// regenerating the range index on toggling the dictionary
Preconditions.checkState(!newIsRange, String.format(
"Must disable range (enabled) index to disable the dictionary for a forwardIndexDisabled column: %s or "
+ "refresh / back-fill the forward index", column));
columnOperationsMap.put(column, Collections.singletonList(Operation.DISABLE_DICTIONARY));
}
} else if (!existingHasDict && newIsDict) {
// Existing column is RAW. New column is dictionary enabled.
if (_schema == null || _tableConfig == null) {
// This can only happen in tests.
LOGGER.warn("Cannot enable dictionary for column={} as schema or tableConfig is null.", column);
continue;
}
ColumnMetadata existingColumnMetadata = _segmentDirectory.getSegmentMetadata().getColumnMetadataFor(column);
if (DictionaryIndexType.ignoreDictionaryOverride(_tableConfig.getIndexingConfig().isOptimizeDictionary(),
_tableConfig.getIndexingConfig().isOptimizeDictionaryForMetrics(),
_tableConfig.getIndexingConfig().getNoDictionarySizeRatioThreshold(), existingColumnMetadata.getFieldSpec(),
_fieldIndexConfigs.get(column), existingColumnMetadata.getCardinality(),
existingColumnMetadata.getTotalNumberOfEntries())) {
columnOperationsMap.put(column, Collections.singletonList(Operation.ENABLE_DICTIONARY));
}
} else if (existingHasDict && !newIsDict) {
// Existing column has dictionary. New config for the column is RAW.
if (shouldDisableDictionary(column, _segmentDirectory.getSegmentMetadata().getColumnMetadataFor(column))) {
columnOperationsMap.put(column, Collections.singletonList(Operation.DISABLE_DICTIONARY));
}
} else if (!existingHasDict) {
// Both existing and new column is RAW forward index encoded. Check if compression needs to be changed.
// TODO: Also check if raw index version needs to be changed
if (shouldChangeRawCompressionType(column, segmentReader)) {
columnOperationsMap.put(column, Collections.singletonList(Operation.CHANGE_INDEX_COMPRESSION_TYPE));
}
} else {
// Both existing and new column is dictionary encoded. Check if compression needs to be changed.
if (shouldChangeDictIdCompressionType(column, segmentReader)) {
columnOperationsMap.put(column, Collections.singletonList(Operation.CHANGE_INDEX_COMPRESSION_TYPE));
}
}
}
return columnOperationsMap;
} | @Test
public void testComputeOperationEnableDictionary()
throws Exception {
// Setup
SegmentMetadataImpl existingSegmentMetadata = new SegmentMetadataImpl(_segmentDirectory);
SegmentDirectory segmentLocalFSDirectory =
new SegmentLocalFSDirectory(_segmentDirectory, existingSegmentMetadata, ReadMode.mmap);
SegmentDirectory.Writer writer = segmentLocalFSDirectory.createWriter();
// TEST1: Enable dictionary for a RAW_ZSTANDARD_INDEX_COLUMN.
IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig);
indexLoadingConfig.removeNoDictionaryColumns(DIM_ZSTANDARD_STRING);
ForwardIndexHandler fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema);
Map<String, List<ForwardIndexHandler.Operation>> operationMap = fwdIndexHandler.computeOperations(writer);
assertEquals(operationMap.get(DIM_ZSTANDARD_STRING),
Collections.singletonList(ForwardIndexHandler.Operation.ENABLE_DICTIONARY));
// TEST2: Enable dictionary for an MV column.
indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig);
indexLoadingConfig.removeNoDictionaryColumns(DIM_MV_PASS_THROUGH_STRING);
fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema);
operationMap = fwdIndexHandler.computeOperations(writer);
assertEquals(operationMap.get(DIM_MV_PASS_THROUGH_STRING),
Collections.singletonList(ForwardIndexHandler.Operation.ENABLE_DICTIONARY));
// TEST3: Enable dictionary for a dict column. Should be a No-op.
indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig);
fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema);
operationMap = fwdIndexHandler.computeOperations(writer);
assertEquals(operationMap, Collections.EMPTY_MAP);
// TEST4: Add an additional text index. ForwardIndexHandler should be a No-Op.
indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig);
indexLoadingConfig.addTextIndexColumns(DIM_DICT_INTEGER);
indexLoadingConfig.addTextIndexColumns(DIM_LZ4_INTEGER);
fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema);
operationMap = fwdIndexHandler.computeOperations(writer);
assertEquals(operationMap, Collections.EMPTY_MAP);
// TEST5: Add text index and enable dictionary.
indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig);
indexLoadingConfig.addRangeIndexColumns(METRIC_LZ4_INTEGER);
indexLoadingConfig.removeNoDictionaryColumns(METRIC_LZ4_INTEGER);
fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema);
operationMap = fwdIndexHandler.computeOperations(writer);
assertEquals(operationMap.get(METRIC_LZ4_INTEGER),
Collections.singletonList(ForwardIndexHandler.Operation.ENABLE_DICTIONARY));
// TEST6: Enable Dictionary for sorted column.
indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig);
indexLoadingConfig.removeNoDictionaryColumns(DIM_RAW_SORTED_INTEGER);
fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema);
operationMap = fwdIndexHandler.computeOperations(writer);
assertEquals(operationMap.get(DIM_RAW_SORTED_INTEGER),
Collections.singletonList(ForwardIndexHandler.Operation.ENABLE_DICTIONARY));
// Tear down
segmentLocalFSDirectory.close();
} |
public LeadControllerManager(String helixControllerInstanceId, HelixManager helixManager,
ControllerMetrics controllerMetrics) {
_helixControllerInstanceId = helixControllerInstanceId;
_helixManager = helixManager;
_controllerMetrics = controllerMetrics;
_leadForPartitions = ConcurrentHashMap.newKeySet();
// Create a thread to periodically fetch controller leadership as a work-around of Helix callback delay
_controllerLeadershipFetchingThread = new Thread("ControllerLeadershipFetchingThread") {
@Override
public void run() {
while (true) {
try {
synchronized (LeadControllerManager.this) {
if (_isShuttingDown) {
return;
}
if (isHelixLeader()) {
if (!_amIHelixLeader) {
_amIHelixLeader = true;
LOGGER.warn("Becoming leader without getting Helix change callback");
_controllerMetrics
.addMeteredGlobalValue(ControllerMeter.CONTROLLER_LEADERSHIP_CHANGE_WITHOUT_CALLBACK, 1L);
}
_controllerMetrics.setValueOfGlobalGauge(ControllerGauge.PINOT_CONTROLLER_LEADER, 1L);
} else {
if (_amIHelixLeader) {
_amIHelixLeader = false;
LOGGER.warn("Losing leadership without getting Helix change callback");
_controllerMetrics
.addMeteredGlobalValue(ControllerMeter.CONTROLLER_LEADERSHIP_CHANGE_WITHOUT_CALLBACK, 1L);
}
_controllerMetrics.setValueOfGlobalGauge(ControllerGauge.PINOT_CONTROLLER_LEADER, 0L);
}
LeadControllerManager.this.wait(CONTROLLER_LEADERSHIP_FETCH_INTERVAL_MS);
}
} catch (Exception e) {
// Ignore all exceptions. The thread keeps running until LeadControllerManager.stop() is invoked.
LOGGER.error("Caught exception within controller leadership fetching thread", e);
}
}
}
};
} | @Test
public void testLeadControllerManager() {
LeadControllerManager leadControllerManager =
new LeadControllerManager(HELIX_CONTROLLER_INSTANCE_ID, _helixManager, _controllerMetrics);
String tableName = "leadControllerTestTable";
int expectedPartitionIndex = LeadControllerUtils.getPartitionIdForTable(tableName);
String partitionName = LeadControllerUtils.generatePartitionName(expectedPartitionIndex);
becomeHelixLeader(false);
leadControllerManager.onHelixControllerChange();
// When there's no resource config change nor helix controller change, leadControllerManager should return false.
Assert.assertFalse(leadControllerManager.isLeaderForTable(tableName));
enableResourceConfig(true);
leadControllerManager.onResourceConfigChange();
// Even resource config is enabled, leadControllerManager should return false because no index is cached yet.
Assert.assertFalse(leadControllerManager.isLeaderForTable(tableName));
Assert.assertTrue(LeadControllerUtils.isLeadControllerResourceEnabled(_helixManager));
// After the target partition index is cached, leadControllerManager should return true.
leadControllerManager.addPartitionLeader(partitionName);
Assert.assertTrue(leadControllerManager.isLeaderForTable(tableName));
// When the target partition index is removed, leadControllerManager should return false.
leadControllerManager.removePartitionLeader(partitionName);
Assert.assertFalse(leadControllerManager.isLeaderForTable(tableName));
// When resource config is set to false, the cache should be disabled, even if the target partition index is in
// the cache.
// The leader depends on whether the current controller is helix leader.
enableResourceConfig(false);
leadControllerManager.onResourceConfigChange();
Assert.assertFalse(LeadControllerUtils.isLeadControllerResourceEnabled(_helixManager));
Assert.assertFalse(leadControllerManager.isLeaderForTable(tableName));
leadControllerManager.addPartitionLeader(partitionName);
Assert.assertFalse(leadControllerManager.isLeaderForTable(tableName));
// When the current controller becomes helix leader and resource is disabled, leadControllerManager should return
// true.
becomeHelixLeader(true);
leadControllerManager.onHelixControllerChange();
Assert.assertTrue(leadControllerManager.isLeaderForTable(tableName));
} |
public static void main(String[] args) throws Exception {
System.setProperty("bookkeeper.metadata.bookie.drivers", PulsarMetadataBookieDriver.class.getName());
System.setProperty("bookkeeper.metadata.client.drivers", PulsarMetadataClientDriver.class.getName());
Arguments arguments = new Arguments();
CommandLine commander = new CommandLine(arguments);
try {
commander.parseArgs(args);
if (arguments.help) {
commander.usage(commander.getOut());
return;
}
if (arguments.generateDocs) {
CmdGenerateDocs cmd = new CmdGenerateDocs("pulsar");
cmd.addCommand("initialize-cluster-metadata", commander);
cmd.run(null);
return;
}
} catch (Exception e) {
commander.getErr().println(e);
throw e;
}
if (arguments.metadataStoreUrl == null && arguments.zookeeper == null) {
System.err.println("Metadata store address argument is required (--metadata-store)");
commander.usage(commander.getOut());
System.exit(1);
}
if (arguments.configurationMetadataStore == null && arguments.configurationStore == null
&& arguments.globalZookeeper == null) {
System.err.println(
"Configuration metadata store address argument is required (--configuration-metadata-store)");
commander.usage(commander.getOut());
System.exit(1);
}
if (arguments.configurationMetadataStore != null && (arguments.configurationStore != null
|| arguments.globalZookeeper != null)) {
System.err.println("Configuration metadata store argument (--configuration-metadata-store) "
+ "supersedes the deprecated (--global-zookeeper and --configuration-store) argument");
commander.usage(commander.getOut());
System.exit(1);
}
if (arguments.configurationMetadataStore == null) {
arguments.configurationMetadataStore = arguments.configurationStore == null ? arguments.globalZookeeper :
arguments.configurationStore;
}
if (arguments.metadataStoreUrl == null) {
arguments.metadataStoreUrl = ZKMetadataStore.ZK_SCHEME_IDENTIFIER + arguments.zookeeper;
}
if (arguments.numTransactionCoordinators <= 0) {
System.err.println("Number of transaction coordinators must greater than 0");
System.exit(1);
}
int bundleNumberForDefaultNamespace =
arguments.numberOfDefaultNamespaceBundles > 0 ? arguments.numberOfDefaultNamespaceBundles
: DEFAULT_BUNDLE_NUMBER;
try {
initializeCluster(arguments, bundleNumberForDefaultNamespace);
} catch (Exception e) {
System.err.println("Unexpected error occured.");
e.printStackTrace(System.err);
System.err.println("Terminating JVM...");
ShutdownUtil.triggerImmediateForcefulShutdown();
}
} | @Test
public void testMainGenerateDocs() throws Exception {
PrintStream oldStream = System.out;
try {
ByteArrayOutputStream baoStream = new ByteArrayOutputStream();
System.setOut(new PrintStream(baoStream));
Class argumentsClass =
Class.forName("org.apache.pulsar.PulsarClusterMetadataSetup$Arguments");
PulsarClusterMetadataSetup.main(new String[]{"-cs", "cs", "-uw", "uw", "-zk", "zk", "-c", "c", "-g"});
String message = baoStream.toString();
Field[] fields = argumentsClass.getDeclaredFields();
for (Field field : fields) {
boolean fieldHasAnno = field.isAnnotationPresent(Option.class);
if (fieldHasAnno) {
Option fieldAnno = field.getAnnotation(Option.class);
String[] names = fieldAnno.names();
if (names.length == 0 || fieldAnno.hidden()) {
continue;
}
String nameStr = Arrays.asList(names).toString();
nameStr = nameStr.substring(1, nameStr.length() - 1);
assertTrue(message.indexOf(nameStr) > 0, nameStr);
}
}
} finally {
System.setOut(oldStream);
}
} |
@Override
public int getLineHashesVersion(Component component) {
if (significantCodeRepository.getRangesPerLine(component).isPresent()) {
return LineHashVersion.WITH_SIGNIFICANT_CODE.getDbValue();
} else {
return LineHashVersion.WITHOUT_SIGNIFICANT_CODE.getDbValue();
}
} | @Test
public void should_return_version_of_line_hashes_with_significant_code_in_the_report() {
LineRange[] lineRanges = {new LineRange(0, 1), null, new LineRange(1, 5)};
when(significantCodeRepository.getRangesPerLine(file)).thenReturn(Optional.of(lineRanges));
assertThat(underTest.getLineHashesVersion(file)).isEqualTo(LineHashVersion.WITH_SIGNIFICANT_CODE.getDbValue());
verify(significantCodeRepository).getRangesPerLine(file);
verifyNoMoreInteractions(significantCodeRepository);
verifyNoInteractions(dbLineHashVersion);
} |
public <T> void execute(final AsyncTask<T> task) {
try {
// some small tasks such as validation can be performed here.
task.onPreCall();
} catch (Exception e) {
task.onError(e);
return;
}
service.submit(new FutureTask<>(task) {
@Override
protected void done() {
super.done();
try {
/*
* called in context of background thread. There is other variant possible where result is
* posted back and sits in the queue of caller thread which then picks it up for
* processing. An example of such a system is Android OS, where the UI elements can only
* be updated using UI thread. So result must be posted back in UI thread.
*/
task.onPostCall(get());
} catch (InterruptedException e) {
// should not occur
} catch (ExecutionException e) {
task.onError(e.getCause());
}
}
});
} | @Test
void testPreCallException() {
final var exception = new IllegalStateException();
doThrow(exception).when(task).onPreCall();
service.execute(task);
verify(task, timeout(2000)).onError(eq(exception));
final var inOrder = inOrder(task);
inOrder.verify(task, times(1)).onPreCall();
inOrder.verify(task, times(1)).onError(exception);
verifyNoMoreInteractions(task);
} |
@Override
public List<QualityProfile> load(String projectKey) {
StringBuilder url = new StringBuilder(WS_URL + "?project=").append(encodeForUrl(projectKey));
return handleErrors(url, () -> String.format("Failed to load the quality profiles of project '%s'", projectKey), true);
} | @Test
public void load_throws_MessageException_if_no_profiles_are_available_for_specified_project() throws IOException {
prepareCallWithEmptyResults();
assertThatThrownBy(() -> underTest.load("project"))
.isInstanceOf(MessageException.class)
.hasMessageContaining("No quality profiles");
} |
@Override
public long skip(long n) throws IOException {
int bufSize = (int) Math.min(n, SKIP_SIZE);
byte[] buf = new byte[bufSize];
long bytesSkipped = 0;
int bytesRead = 0;
while (bytesSkipped < n && bytesRead != -1) {
int len = (int) Math.min(bufSize, n - bytesSkipped);
bytesRead = read(buf, 0, len);
if (bytesRead != -1) {
bytesSkipped += bytesRead;
}
}
return (bytesRead < 0 && bytesSkipped == 0) ? -1 : bytesSkipped;
} | @Test
public void testSkip() throws IOException {
final int tailSize = 128;
final int count = 1024;
final int skipCount = 512;
TailStream stream = new TailStream(generateStream(0, count), tailSize);
assertEquals(skipCount, stream.skip(skipCount), "Wrong skip result");
assertEquals(generateText(skipCount - tailSize, tailSize),
new String(stream.getTail(), UTF_8), "Wrong buffer");
stream.close();
} |
public static List<TargetInfo> parseOptTarget(CommandLine cmd, AlluxioConfiguration conf)
throws IOException {
String[] targets;
if (cmd.hasOption(TARGET_OPTION_NAME)) {
String argTarget = cmd.getOptionValue(TARGET_OPTION_NAME);
if (StringUtils.isBlank(argTarget)) {
throw new IOException("Option " + TARGET_OPTION_NAME + " can not be blank.");
} else if (argTarget.contains(TARGET_SEPARATOR)) {
targets = argTarget.split(TARGET_SEPARATOR);
} else {
targets = new String[]{argTarget};
}
} else {
// By default we set on all targets (master/workers/job_master/job_workers)
targets = new String[]{ROLE_MASTER, ROLE_JOB_MASTER, ROLE_WORKERS, ROLE_JOB_WORKERS};
}
return getTargetInfos(targets, conf);
} | @Test
public void parseEmbeddedHAJobMasterTarget() throws Exception {
mConf.set(PropertyKey.JOB_MASTER_EMBEDDED_JOURNAL_ADDRESSES, "masters-1:19200,masters-2:19200");
CommandLine mockCommandLine = mock(CommandLine.class);
String[] mockArgs = new String[]{"--target", "job_master"};
when(mockCommandLine.getArgs()).thenReturn(mockArgs);
when(mockCommandLine.hasOption(LogLevel.TARGET_OPTION_NAME)).thenReturn(true);
when(mockCommandLine.getOptionValue(LogLevel.TARGET_OPTION_NAME)).thenReturn(mockArgs[1]);
try (MockedStatic<JobMasterClient.Factory> mockFactory =
mockStatic(JobMasterClient.Factory.class)) {
JobMasterClient mockJobClient = mock(JobMasterClient.class);
when(mockJobClient.getRemoteSockAddress()).thenReturn(new InetSocketAddress("masters-2",
mConf.getInt(PropertyKey.JOB_MASTER_RPC_PORT)));
when(mockJobClient.getRemoteHostName()).thenReturn("masters-2");
mockFactory.when(() -> JobMasterClient.Factory.create(any())).thenReturn(mockJobClient);
List<LogLevel.TargetInfo> targets = LogLevel.parseOptTarget(mockCommandLine, mConf);
assertEquals(1, targets.size());
assertEquals(new LogLevel.TargetInfo("masters-2", JOB_MASTER_WEB_PORT, "job_master"),
targets.get(0));
}
} |
public static IpPrefix valueOf(int address, int prefixLength) {
return new IpPrefix(IpAddress.valueOf(address), prefixLength);
} | @Test(expected = IllegalArgumentException.class)
public void testInvalidValueOfStringTooLongPrefixLengthIPv4() {
IpPrefix ipPrefix;
ipPrefix = IpPrefix.valueOf("1.2.3.4/33");
} |
public static void validateConfig(Object config, Class annotationClass) {
for (Field field : config.getClass().getDeclaredFields()) {
Object value = null;
field.setAccessible(true);
try {
value = field.get(config);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
validateField(field, value, annotationClass);
}
validateClass(config, annotationClass);
} | @Test
public void testMapEntry() {
TestConfig testConfig = createGoodConfig();
testConfig.stringIntegerMap = testStringStringMap;
Exception e = expectThrows(IllegalArgumentException.class, () -> ConfigValidation.validateConfig(testConfig));
assertTrue(e.getMessage().contains("stringIntegerMap"));
} |
public static void notNullOrEmpty(String string) {
notNullOrEmpty(string, String.format("string [%s] is null or empty", string));
} | @Test
public void testNotNull1NotEmpty5() {
assertThrows(IllegalArgumentException.class, () -> Precondition.notNullOrEmpty("\t\r\n"));
} |
@Override
public boolean isSimilar(PiMeterCellConfig onosMeter, PiMeterCellConfig deviceMeter) {
final PiMeterBand onosCommittedBand = onosMeter.committedBand();
final PiMeterBand onosPeakBand = onosMeter.peakBand();
final PiMeterBand deviceCommittedBand = deviceMeter.committedBand();
final PiMeterBand devicePeakBand = deviceMeter.peakBand();
// Fail fast, this can easily happen if we send a write very
// close to a read, read can still return the default config
if (deviceCommittedBand == null || devicePeakBand == null) {
return false;
}
final long onosCir = onosCommittedBand.rate();
final long onosCburst = onosCommittedBand.burst();
final long onosPir = onosPeakBand.rate();
final long onosPburst = onosPeakBand.burst();
final long deviceCir = deviceCommittedBand.rate();
final long deviceCburst = deviceCommittedBand.burst();
final long devicePir = devicePeakBand.rate();
final long devicePburst = devicePeakBand.burst();
return isRateSimilar(onosCir, deviceCir) && isRateSimilar(onosPir, devicePir) &&
isBurstSimilar(onosCburst, deviceCburst) && isBurstSimilar(onosPburst, devicePburst);
} | @Test
public void testWrongIsBurstSimilar() {
PiMeterBand onosMeterBand;
PiMeterBand deviceMeterBand;
PiMeterCellConfig onosMeter;
PiMeterCellConfig deviceMeter;
for (Map.Entry<Long, Long> entry : WRONG_BURSTS.entrySet()) {
onosMeterBand = new PiMeterBand(PiMeterBandType.COMMITTED, 0, entry.getKey());
deviceMeterBand = new PiMeterBand(PiMeterBandType.COMMITTED, 0, entry.getValue());
onosMeter = PiMeterCellConfig.builder()
.withMeterCellId(meterCellId)
.withMeterBand(onosMeterBand)
.withMeterBand(new PiMeterBand(PiMeterBandType.PEAK, 0, 0))
.build();
deviceMeter = PiMeterCellConfig.builder()
.withMeterCellId(meterCellId)
.withMeterBand(deviceMeterBand)
.withMeterBand(new PiMeterBand(PiMeterBandType.PEAK, 0, 0))
.build();
assertFalse(meterProgrammable.isSimilar(onosMeter, deviceMeter));
}
} |
protected void saveAndRunJobFilters(List<Job> jobs) {
if (jobs.isEmpty()) return;
try {
jobFilterUtils.runOnStateElectionFilter(jobs);
storageProvider.save(jobs);
jobFilterUtils.runOnStateAppliedFilters(jobs);
} catch (ConcurrentJobModificationException concurrentJobModificationException) {
try {
backgroundJobServer.getConcurrentJobModificationResolver().resolve(concurrentJobModificationException);
} catch (UnresolvableConcurrentJobModificationException unresolvableConcurrentJobModificationException) {
throw new SevereJobRunrException("Could not resolve ConcurrentJobModificationException", unresolvableConcurrentJobModificationException);
}
}
} | @Test
void onConcurrentJobModificationExceptionTaskTriesToResolveAndThrowsExceptionIfNotResolved() {
Job jobInProgress = aJobInProgress().build();
Job enqueuedJob = aCopyOf(jobInProgress).withEnqueuedState(now()).build();
when(storageProvider.save(anyList())).thenThrow(new ConcurrentJobModificationException(jobInProgress));
when(storageProvider.getJobById(jobInProgress.getId())).thenReturn(enqueuedJob);
assertThatCode(() -> task.saveAndRunJobFilters(singletonList(jobInProgress)))
.isInstanceOf(SevereJobRunrException.class)
.hasCauseInstanceOf(UnresolvableConcurrentJobModificationException.class);
} |
public static BoundingBox getBoundingBox(Tile upperLeft, Tile lowerRight) {
BoundingBox ul = upperLeft.getBoundingBox();
BoundingBox lr = lowerRight.getBoundingBox();
return ul.extendBoundingBox(lr);
} | @Test
public void getBoundingBoxTest() {
for (byte zoom = (byte) 0; zoom < 25; zoom++) {
Tile tile1 = new Tile(0, 0, zoom, TILE_SIZE);
if (zoom == 0) {
Assert.assertTrue(tile1.getBoundingBox().equals(new BoundingBox(MercatorProjection.LATITUDE_MIN,
-180, MercatorProjection.LATITUDE_MAX, 180)));
}
Tile tile2 = new Tile(0, 0, zoom, TILE_SIZE);
Assert.assertEquals(tile1.getBoundingBox().maxLatitude, tile2.getBoundingBox().maxLatitude, 0.0001);
Assert.assertEquals(tile1.getBoundingBox().minLongitude, tile2.getBoundingBox().minLongitude, 0.0001);
if (zoom >= 1) {
Tile tile3 = new Tile(1, 1, zoom, TILE_SIZE);
Assert.assertEquals(tile1.getBelow().getBoundingBox().minLatitude, tile3.getBoundingBox().minLatitude, 0.0001);
Assert.assertEquals(tile1.getRight().getBoundingBox().minLongitude, tile3.getBoundingBox().minLongitude, 0.0001);
if (zoom == 1) {
Assert.assertEquals(tile3.getBoundingBox().minLongitude, 0, 0.0001);
Assert.assertEquals(tile3.getBoundingBox().maxLongitude, 180, 0.0001);
}
Assert.assertEquals(tile3.getBoundingBox(), Tile.getBoundingBox(tile3, tile3));
}
Tile tile4 = new Tile(0, 0, zoom, TILE_SIZE);
Assert.assertEquals(tile1.getBoundingBox().maxLatitude, tile4.getBoundingBox().maxLatitude, 0.0001);
Assert.assertEquals(tile1.getBoundingBox().minLongitude, tile4.getBoundingBox().minLongitude, 0.0001);
Tile tile5 = new Tile(0, 0, zoom, TILE_SIZE);
Assert.assertEquals(tile1.getBoundingBox().maxLatitude, tile5.getBoundingBox().maxLatitude, 0.0001);
Assert.assertEquals(tile1.getBoundingBox().minLongitude, tile5.getBoundingBox().minLongitude, 0.0001);
Assert.assertEquals(tile1.getBoundingBox(), Tile.getBoundingBox(tile1, tile1));
Assert.assertEquals(tile2.getBoundingBox(), Tile.getBoundingBox(tile2, tile2));
Assert.assertEquals(tile4.getBoundingBox(), Tile.getBoundingBox(tile4, tile4));
Assert.assertEquals(tile4.getBoundingBox(), Tile.getBoundingBox(tile5, tile5));
}
} |
@Override
public ExecuteContext before(ExecuteContext context) throws Exception {
final Object rawEvent = context.getArguments()[0];
if (rawEvent instanceof ContextClosedEvent) {
tryShutdown((ContextClosedEvent) rawEvent);
}
return context;
} | @Test
public void before() throws Exception {
final SpringCloseEventInterceptor springCloseEventInterceptor = new SpringCloseEventInterceptor();
springCloseEventInterceptor.before(buildContext(new Object[]{"test"}));
Mockito.verify(registryService, Mockito.times(0)).shutdown();
springCloseEventInterceptor.before(buildContext(new Object[]{new ContextClosedEvent(Mockito.mock(
AnnotationConfigApplicationContext.class))}));
Mockito.verify(registryService, Mockito.times(0)).shutdown();
springCloseEventInterceptor.before(buildContext(new Object[]{new ContextClosedEvent(Mockito.mock(
ClassPathXmlApplicationContext.class))}));
Mockito.verify(registryService, Mockito.times(1)).shutdown();
springCloseEventInterceptor.after(buildContext(new Object[]{"test"}));
} |
@Override
public Position position(int major, int minor) {
return new Pos(major, minor);
} | @Test
public void testNegativeOffsetWithBackwardBias() {
Position pos = navigator.position(4, 10);
pos = pos.offsetBy(-10, Backward);
assertEquals(3, pos.getMajor());
assertEquals(10, pos.getMinor());
} |
public static Builder builder() {
return new Builder();
} | @Test
public void testLogicalType() {
Schema schema1 =
Schema.builder().addLogicalTypeField("logical", new TestType("id", "arg")).build();
Schema schema2 =
Schema.builder().addLogicalTypeField("logical", new TestType("id", "arg")).build();
assertEquals(schema1, schema2); // Logical types are the same.
Schema schema3 =
Schema.builder()
.addNullableField("logical", Schema.FieldType.logicalType(new TestType("id", "arg")))
.build();
assertNotEquals(schema1, schema3); // schema1 and schema3 differ in Nullability
Schema schema4 =
Schema.builder().addLogicalTypeField("logical", new TestType("id2", "arg")).build();
assertNotEquals(schema1, schema4); // Logical type id is different.
Schema schema5 =
Schema.builder().addLogicalTypeField("logical", new TestType("id", "arg2")).build();
assertNotEquals(schema1, schema5); // Logical type arg is different.
} |
@Override
protected CloudBlobClient connect(final ProxyFinder proxyfinder, final HostKeyCallback callback, final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException {
try {
// Client configured with no credentials
final URI uri = new URI(String.format("%s://%s", Scheme.https, host.getHostname()));
final CloudBlobClient client = new CloudBlobClient(uri,
new StorageCredentialsAccountAndKey(host.getCredentials().getUsername(), "null"));
client.setDirectoryDelimiter(String.valueOf(Path.DELIMITER));
context.setLoggingEnabled(true);
context.setLogger(LoggerFactory.getLogger(log.getName()));
context.setUserHeaders(new HashMap<>(Collections.singletonMap(
HttpHeaders.USER_AGENT, new PreferencesUseragentProvider().get()))
);
context.getSendingRequestEventHandler().addListener(listener = new StorageEvent<SendingRequestEvent>() {
@Override
public void eventOccurred(final SendingRequestEvent event) {
if(event.getConnectionObject() instanceof HttpsURLConnection) {
final HttpsURLConnection connection = (HttpsURLConnection) event.getConnectionObject();
connection.setSSLSocketFactory(new CustomTrustSSLProtocolSocketFactory(trust, key));
connection.setHostnameVerifier(new DisabledX509HostnameVerifier());
}
}
});
final Proxy proxy = proxyfinder.find(new ProxyHostUrlProvider().get(host));
switch(proxy.getType()) {
case SOCKS: {
if(log.isInfoEnabled()) {
log.info(String.format("Configured to use SOCKS proxy %s", proxyfinder));
}
final java.net.Proxy socksProxy = new java.net.Proxy(
java.net.Proxy.Type.SOCKS, new InetSocketAddress(proxy.getHostname(), proxy.getPort()));
context.setProxy(socksProxy);
break;
}
case HTTP:
case HTTPS: {
if(log.isInfoEnabled()) {
log.info(String.format("Configured to use HTTP proxy %s", proxyfinder));
}
final java.net.Proxy httpProxy = new java.net.Proxy(
java.net.Proxy.Type.HTTP, new InetSocketAddress(proxy.getHostname(), proxy.getPort()));
context.setProxy(httpProxy);
break;
}
}
return client;
}
catch(URISyntaxException e) {
throw new LoginFailureException(e.getMessage(), e);
}
} | @Test(expected = LoginCanceledException.class)
public void testConnectInvalidKey() throws Exception {
final Host host = new Host(new AzureProtocol(), "kahy9boj3eib.blob.core.windows.net", new Credentials(
PROPERTIES.get("azure.user"), "6h9BmTcabGajIE/AVGzgu9JcC15JjrzkjdAIe+2daRK8XlyVdYT6zHtFMwXOtrlCw74jX9R0w4GN56vKQjOpVA=="
));
final AzureSession session = new AzureSession(host);
new LoginConnectionService(new DisabledLoginCallback() {
@Override
public Credentials prompt(final Host bookmark, String username, String title, String reason, LoginOptions options) throws LoginCanceledException {
assertEquals("Login kahy9boj3eib.blob.core.windows.net", title);
assertEquals("Server failed to authenticate the request. Make sure the value of Authorization header is formed correctly including the signature. Please contact your web hosting service provider for assistance.", reason);
return super.prompt(bookmark, username, title, reason, options);
}
}, new DisabledHostKeyCallback(),
new DisabledPasswordStore(), new DisabledProgressListener()).connect(session, new DisabledCancelCallback());
} |
static Collection<String> getIssueKeys(ChangeLogSet<?> changelog, Pattern issuePattern) {
Set<String> issueKeys = new HashSet<>();
for (ChangeLogSet.Entry entry : changelog) {
issueKeys.addAll(BlueJiraIssue.findIssueKeys(entry.getMsg(), issuePattern));
}
return issueKeys;
} | @Test
public void uniqueIssueKeys() throws Exception {
ChangeLogSet<ChangeLogSet.Entry> entries = build( "TST-123", "TST-123", "TST-123", "TST-124",
"TST-123", "TST-124", "TST-125");
Collection<String> keys = JiraSCMListener.getIssueKeys( entries, JiraSite.DEFAULT_ISSUE_PATTERN );
Assert.assertEquals(3, keys.size());
} |
@Override
public ObjectNode encode(OpenstackNode node, CodecContext context) {
checkNotNull(node, "Openstack node cannot be null");
ObjectNode result = context.mapper().createObjectNode()
.put(HOST_NAME, node.hostname())
.put(TYPE, node.type().name())
.put(STATE, node.state().name())
.put(MANAGEMENT_IP, node.managementIp().toString());
OpenstackNode.NodeType type = node.type();
// serialize uplink port only for gateway node
if (type == OpenstackNode.NodeType.GATEWAY) {
result.put(UPLINK_PORT, node.uplinkPort());
}
// serialize keystone config for controller node
if (type == OpenstackNode.NodeType.CONTROLLER) {
ObjectNode keystoneConfigJson = context.codec(KeystoneConfig.class)
.encode(node.keystoneConfig(), context);
result.set(KEYSTONE_CONFIG, keystoneConfigJson);
// serialize neutron config for controller node
if (node.neutronConfig() != null) {
ObjectNode neutronConfigJson = context.codec(NeutronConfig.class)
.encode(node.neutronConfig(), context);
result.set(NEUTRON_CONFIG, neutronConfigJson);
}
}
// serialize integration bridge config
if (node.intgBridge() != null) {
result.put(INTEGRATION_BRIDGE, node.intgBridge().toString());
}
// serialize VLAN interface, it is valid only if any VLAN interface presents
if (node.vlanIntf() != null) {
result.put(VLAN_INTF_NAME, node.vlanIntf());
}
// serialize data IP only if it presents
if (node.dataIp() != null) {
result.put(DATA_IP, node.dataIp().toString());
}
// serialize physical interfaces, it is valid only if any of physical interface presents
if (node.phyIntfs() != null && !node.phyIntfs().isEmpty()) {
ArrayNode phyIntfs = context.mapper().createArrayNode();
node.phyIntfs().forEach(phyIntf -> {
ObjectNode phyIntfJson =
context.codec(OpenstackPhyInterface.class).encode(phyIntf, context);
phyIntfs.add(phyIntfJson);
});
result.set(PHYSICAL_INTERFACES, phyIntfs);
}
// serialize controllers, it is valid only if any of controller presents
if (node.controllers() != null && !node.controllers().isEmpty()) {
ArrayNode controllers = context.mapper().createArrayNode();
node.controllers().forEach(controller -> {
ObjectNode controllerJson =
context.codec(ControllerInfo.class).encode(controller, context);
controllers.add(controllerJson);
});
result.set(CONTROLLERS, controllers);
}
// serialize SSH authentication info, it is valid only if auth info presents
if (node.sshAuthInfo() != null) {
ObjectNode sshAuthJson = context.codec(OpenstackSshAuth.class)
.encode(node.sshAuthInfo(), context);
result.set(SSH_AUTH, sshAuthJson);
}
// serialize DPDK config, it is valid only if dpdk config presents
if (node.dpdkConfig() != null) {
ObjectNode dpdkConfigJson = context.codec(DpdkConfig.class)
.encode(node.dpdkConfig(), context);
result.set(DPDK_CONFIG, dpdkConfigJson);
}
return result;
} | @Test
public void testOpenstackComputeNodeEncode() {
OpenstackPhyInterface phyIntf1 = DefaultOpenstackPhyInterface.builder()
.network("mgmtnetwork")
.intf("eth3")
.build();
OpenstackPhyInterface phyIntf2 = DefaultOpenstackPhyInterface.builder()
.network("oamnetwork")
.intf("eth4")
.build();
OpenstackSshAuth sshAuth = DefaultOpenstackSshAuth.builder()
.id("sdn")
.password("sdn")
.build();
ControllerInfo controller1 =
new ControllerInfo(IpAddress.valueOf("10.10.10.2"), 6653, "tcp");
ControllerInfo controller2 =
new ControllerInfo(IpAddress.valueOf("10.10.10.3"), 6663, "tcp");
OpenstackNode node = DefaultOpenstackNode.builder()
.hostname("compute")
.type(OpenstackNode.NodeType.COMPUTE)
.state(NodeState.INIT)
.managementIp(IpAddress.valueOf("10.10.10.1"))
.intgBridge(DeviceId.deviceId("br-int"))
.vlanIntf("vlan")
.dataIp(IpAddress.valueOf("20.20.20.2"))
.phyIntfs(ImmutableList.of(phyIntf1, phyIntf2))
.controllers(ImmutableList.of(controller1, controller2))
.sshAuthInfo(sshAuth)
.build();
ObjectNode nodeJson = openstackNodeCodec.encode(node, context);
assertThat(nodeJson, matchesOpenstackNode(node));
} |
public static Color fromString(String string)
{
try
{
int i = Integer.decode(string);
return new Color(i, true);
}
catch (NumberFormatException e)
{
return null;
}
} | @Test
public void fromString()
{
String WHITE_MAX_ALPHA = "-1";
String WHITE_ZERO_ALPHA = "0xffffff";
String TOO_LARGE = "0xffffffff";
String INVALID_FORMAT = "ffffff";
assertEquals(Color.WHITE, ColorUtil.fromString(WHITE_MAX_ALPHA));
assertEquals(ColorUtil.colorWithAlpha(Color.WHITE, 0), ColorUtil.fromString(WHITE_ZERO_ALPHA));
assertNotEquals(Color.WHITE, ColorUtil.fromString(WHITE_ZERO_ALPHA));
assertNull(ColorUtil.fromString(TOO_LARGE));
assertNull(ColorUtil.fromString(INVALID_FORMAT));
} |
private Gamma() {
} | @Test
public void testGamma() {
System.out.println("gamma");
assertTrue(Double.isInfinite(Gamma.gamma(0)));
assertEquals(1.0, Gamma.gamma(1), 1E-7);
assertEquals(1.0, Gamma.gamma(2), 1E-7);
assertEquals(2.0, Gamma.gamma(3), 1E-7);
assertEquals(6.0, Gamma.gamma(4), 1E-7);
assertEquals(0.886227, Gamma.gamma(1.5), 1E-6);
assertEquals(1.329340, Gamma.gamma(2.5), 1E-6);
assertEquals(3.323351, Gamma.gamma(3.5), 1E-6);
assertEquals(11.63173, Gamma.gamma(4.5), 1E-5);
} |
@Override
public String topic() {
throw new UnsupportedOperationException("StateStores can't access topic.");
} | @Test
public void shouldThrowOnTopic() {
assertThrows(UnsupportedOperationException.class, () -> context.topic());
} |
public <T> T fromXmlPartial(String partial, Class<T> o) throws Exception {
return fromXmlPartial(toInputStream(partial, UTF_8), o);
} | @Test
void shouldLoadGetFromSvnPartialForDir() throws Exception {
String buildXmlPartial =
"""
<jobs>
<job name="functional">
<tasks>
<fetchartifact artifactOrigin='gocd' stage='dev' job='unit' srcdir='dist' dest='lib' />
</tasks>
</job>
</jobs>""";
JobConfigs jobs = xmlLoader.fromXmlPartial(buildXmlPartial, JobConfigs.class);
JobConfig job = jobs.first();
Tasks fetch = job.tasks();
assertThat(fetch.size()).isEqualTo(1);
FetchTask task = (FetchTask) fetch.first();
assertThat(task.getStage()).isEqualTo(new CaseInsensitiveString("dev"));
assertThat(task.getJob().toString()).isEqualTo("unit");
assertThat(task.getSrc()).isEqualTo("dist");
assertThat(task.getDest()).isEqualTo("lib");
} |
@Override
public TableSchema parse(ReadonlyConfig readonlyConfig) {
ReadonlyConfig schemaConfig =
readonlyConfig
.getOptional(TableSchemaOptions.SCHEMA)
.map(ReadonlyConfig::fromMap)
.orElseThrow(
() -> new IllegalArgumentException("Schema config can't be null"));
if (readonlyConfig.getOptional(TableSchemaOptions.FieldOptions.FIELDS).isPresent()
&& schemaConfig.getOptional(TableSchemaOptions.ColumnOptions.COLUMNS).isPresent()) {
throw new IllegalArgumentException(
"Schema config can't contains both [fields] and [columns], please correct your config first");
}
TableSchema.Builder tableSchemaBuilder = TableSchema.builder();
if (readonlyConfig.getOptional(TableSchemaOptions.FieldOptions.FIELDS).isPresent()) {
// we use readonlyConfig here to avoid flatten, this is used to solve the t.x.x as field
// key
tableSchemaBuilder.columns(fieldParser.parse(readonlyConfig));
}
if (schemaConfig.getOptional(TableSchemaOptions.ColumnOptions.COLUMNS).isPresent()) {
tableSchemaBuilder.columns(columnParser.parse(schemaConfig));
}
if (schemaConfig
.getOptional(TableSchemaOptions.PrimaryKeyOptions.PRIMARY_KEY)
.isPresent()) {
tableSchemaBuilder.primaryKey(primaryKeyParser.parse(schemaConfig));
}
if (schemaConfig
.getOptional(TableSchemaOptions.ConstraintKeyOptions.CONSTRAINT_KEYS)
.isPresent()) {
tableSchemaBuilder.constraintKey(constraintKeyParser.parse(schemaConfig));
}
// todo: validate schema
return tableSchemaBuilder.build();
} | @Test
void parseField() throws FileNotFoundException, URISyntaxException {
ReadonlyConfig config = getReadonlyConfig(FIELD_CONFIG);
ReadonlyConfigParser readonlyConfigParser = new ReadonlyConfigParser();
TableSchema tableSchema = readonlyConfigParser.parse(config);
assertPrimaryKey(tableSchema);
assertConstraintKey(tableSchema);
assertColumn(tableSchema, false);
} |
@Override
public void persistInstance(final InstanceEntity instance) {
String instanceNodeName = buildInstanceNodeName(instance);
String instancePath = InstancePathConstants.buildInstanceParentPath(instance.getAppName());
String realNode = InstancePathConstants.buildRealNode(instancePath, instanceNodeName);
String nodeData = GsonUtils.getInstance().toJson(instance);
client.putEphemeral(realNode, nodeData);
LOGGER.info("etcd client register success: {}", nodeData);
} | @Test
public void testPersistInstance() {
InstanceEntity data = InstanceEntity.builder()
.appName("shenyu-test")
.host("shenyu-host")
.port(9195)
.build();
final String realNode = "/shenyu/register/instance/shenyu-test/shenyu-host:9195";
repository.persistInstance(data);
assertTrue(etcdBroker.containsKey(realNode));
assertEquals(GsonUtils.getInstance().toJson(data), etcdBroker.get(realNode));
repository.close();
} |
public Optional<Projection> createProjection(final ProjectionSegment projectionSegment) {
if (projectionSegment instanceof ShorthandProjectionSegment) {
return Optional.of(createProjection((ShorthandProjectionSegment) projectionSegment));
}
if (projectionSegment instanceof ColumnProjectionSegment) {
return Optional.of(createProjection((ColumnProjectionSegment) projectionSegment));
}
if (projectionSegment instanceof ExpressionProjectionSegment) {
return Optional.of(createProjection((ExpressionProjectionSegment) projectionSegment));
}
if (projectionSegment instanceof AggregationDistinctProjectionSegment) {
return Optional.of(createProjection((AggregationDistinctProjectionSegment) projectionSegment));
}
if (projectionSegment instanceof AggregationProjectionSegment) {
return Optional.of(createProjection((AggregationProjectionSegment) projectionSegment));
}
if (projectionSegment instanceof SubqueryProjectionSegment) {
return Optional.of(createProjection((SubqueryProjectionSegment) projectionSegment));
}
if (projectionSegment instanceof ParameterMarkerExpressionSegment) {
return Optional.of(createProjection((ParameterMarkerExpressionSegment) projectionSegment));
}
return Optional.empty();
} | @Test
void assertCreateProjectionWhenProjectionSegmentInstanceOfColumnProjectionSegment() {
ColumnProjectionSegment columnProjectionSegment = new ColumnProjectionSegment(new ColumnSegment(0, 10, new IdentifierValue("name")));
columnProjectionSegment.setAlias(new AliasSegment(0, 0, new IdentifierValue("alias")));
Optional<Projection> actual = new ProjectionEngine(databaseType).createProjection(columnProjectionSegment);
assertTrue(actual.isPresent());
assertThat(actual.get(), instanceOf(ColumnProjection.class));
} |
public BackgroundException map(final IOException failure, final Path directory) {
return super.map("Connection failed", failure, directory);
} | @Test
public void testSSLHandshakeCertificateDismissed() {
final SSLHandshakeException c = new SSLHandshakeException("f");
c.initCause(new CertificateException("c"));
assertEquals(ConnectionCanceledException.class,
new DefaultIOExceptionMappingService().map(c).getClass());
} |
public String getStringData(final String path) throws Exception {
byte[] bytes = getData(path);
if (bytes != null) {
return new String(bytes, StandardCharsets.UTF_8);
}
return null;
} | @Test
public void testGetStringData() throws Exception {
String node1 = "/node1";
String node2 = "/node2";
assertFalse(curator.exists(node1));
curator.create(node1);
assertNull(curator.getStringData(node1));
byte[] setData = "setData".getBytes(StandardCharsets.UTF_8);
curator.setData(node1, setData, -1);
assertEquals("setData", curator.getStringData(node1));
Stat stat = new Stat();
assertFalse(curator.exists(node2));
curator.create(node2);
assertNull(curator.getStringData(node2, stat));
curator.setData(node2, setData, -1);
assertEquals("setData", curator.getStringData(node2, stat));
} |
@SuppressWarnings("unchecked")
void openDB(final Map<String, Object> configs, final File stateDir) {
// initialize the default rocksdb options
final DBOptions dbOptions = new DBOptions();
final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
userSpecifiedOptions = new RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter(dbOptions, columnFamilyOptions);
final BlockBasedTableConfigWithAccessibleCache tableConfig = new BlockBasedTableConfigWithAccessibleCache();
cache = new LRUCache(BLOCK_CACHE_SIZE);
tableConfig.setBlockCache(cache);
tableConfig.setBlockSize(BLOCK_SIZE);
filter = new BloomFilter();
tableConfig.setFilterPolicy(filter);
userSpecifiedOptions.optimizeFiltersForHits();
userSpecifiedOptions.setTableFormatConfig(tableConfig);
userSpecifiedOptions.setWriteBufferSize(WRITE_BUFFER_SIZE);
userSpecifiedOptions.setCompressionType(COMPRESSION_TYPE);
userSpecifiedOptions.setCompactionStyle(COMPACTION_STYLE);
userSpecifiedOptions.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
userSpecifiedOptions.setCreateIfMissing(true);
userSpecifiedOptions.setErrorIfExists(false);
userSpecifiedOptions.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL);
// this is the recommended way to increase parallelism in RocksDb
// note that the current implementation of setIncreaseParallelism affects the number
// of compaction threads but not flush threads (the latter remains one). Also,
// the parallelism value needs to be at least two because of the code in
// https://github.com/facebook/rocksdb/blob/62ad0a9b19f0be4cefa70b6b32876e764b7f3c11/util/options.cc#L580
// subtracts one from the value passed to determine the number of compaction threads
// (this could be a bug in the RocksDB code and their devs have been contacted).
userSpecifiedOptions.setIncreaseParallelism(Math.max(Runtime.getRuntime().availableProcessors(), 2));
wOptions = new WriteOptions();
wOptions.setDisableWAL(true);
fOptions = new FlushOptions();
fOptions.setWaitForFlush(true);
final Class<RocksDBConfigSetter> configSetterClass =
(Class<RocksDBConfigSetter>) configs.get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG);
if (configSetterClass != null) {
configSetter = Utils.newInstance(configSetterClass);
configSetter.setConfig(name, userSpecifiedOptions, configs);
}
dbDir = new File(new File(stateDir, parentDir), name);
try {
Files.createDirectories(dbDir.getParentFile().toPath());
Files.createDirectories(dbDir.getAbsoluteFile().toPath());
} catch (final IOException fatal) {
throw new ProcessorStateException(fatal);
}
// Setup statistics before the database is opened, otherwise the statistics are not updated
// with the measurements from Rocks DB
setupStatistics(configs, dbOptions);
openRocksDB(dbOptions, columnFamilyOptions);
dbAccessor = new DirectDBAccessor(db, fOptions, wOptions);
open = true;
addValueProvidersToMetricsRecorder();
} | @Test
public void shouldNotSetStatisticsInValueProvidersWhenUserProvidesStatistics() {
rocksDBStore = getRocksDBStoreWithRocksDBMetricsRecorder();
context = getProcessorContext(RecordingLevel.DEBUG, RocksDBConfigSetterWithUserProvidedStatistics.class);
rocksDBStore.openDB(context.appConfigs(), context.stateDir());
verify(metricsRecorder).addValueProviders(eq(DB_NAME), notNull(), notNull(), isNull());
} |
public Port port() {
return port;
} | @Override
@Test
public void withTime() {
Device device = createDevice();
Port port = new DefaultPort(device, PortNumber.portNumber(123), true);
DeviceEvent event = new DeviceEvent(DeviceEvent.Type.DEVICE_ADDED,
device, port, 123L);
validateEvent(event, DeviceEvent.Type.DEVICE_ADDED, device, 123L);
assertEquals("incorrect port", port, event.port());
} |
@Override
public List<String> getPrimaryBrokers() {
return this.primary;
} | @Test
public void testGetPrimaryBrokers() throws Exception {
List<String> primaryBrokers = this.getDefaultPolicy().getPrimaryBrokers();
assertEquals(primaryBrokers.size(), 1);
assertEquals(primaryBrokers.get(0), "prod1-broker[1-3].messaging.use.example.com");
} |
public Optional<Measure> toMeasure(@Nullable ScannerReport.Measure batchMeasure, Metric metric) {
Objects.requireNonNull(metric);
if (batchMeasure == null) {
return Optional.empty();
}
Measure.NewMeasureBuilder builder = Measure.newMeasureBuilder();
switch (metric.getType().getValueType()) {
case INT:
return toIntegerMeasure(builder, batchMeasure);
case LONG:
return toLongMeasure(builder, batchMeasure);
case DOUBLE:
return toDoubleMeasure(builder, batchMeasure);
case BOOLEAN:
return toBooleanMeasure(builder, batchMeasure);
case STRING:
return toStringMeasure(builder, batchMeasure);
case LEVEL:
return toLevelMeasure(builder, batchMeasure);
case NO_VALUE:
return toNoValueMeasure(builder);
default:
throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType());
}
} | @Test
public void toMeasure_returns_no_value_if_dto_has_invalid_string_value_for_LEVEL_Metric() {
Optional<Measure> measure = underTest.toMeasure(ScannerReport.Measure.newBuilder().setStringValue(StringValue.newBuilder().setValue("trololo")).build(), SOME_LEVEL_METRIC);
assertThat(measure).isPresent();
assertThat(measure.get().getValueType()).isEqualTo(Measure.ValueType.NO_VALUE);
} |
@Override
public void execute(ComputationStep.Context context) {
taskResultHolder.setResult(new CeTaskResultImpl(analysisMetadataHolder.getUuid()));
} | @Test
public void execute_populate_TaskResultHolder_with_a_TaskResult_with_snapshot_id_of_the_root_taken_from_DbIdsRepository() {
analysisMetadataHolder.setUuid(AN_ANALYSIS_UUID);
underTest.execute(new TestComputationStepContext());
assertThat(taskResultHolder.getResult().getAnalysisUuid()).contains(AN_ANALYSIS_UUID);
} |
@Override
protected Map<String, Object> toJsonMap(IAccessEvent event) {
return new MapBuilder(timestampFormatter, customFieldNames, additionalFields, includes.size())
.addNumber("port", isIncluded(AccessAttribute.LOCAL_PORT), event::getLocalPort)
.addNumber("contentLength", isIncluded(AccessAttribute.CONTENT_LENGTH), event::getContentLength)
.addTimestamp("timestamp", isIncluded(AccessAttribute.TIMESTAMP), event.getTimeStamp())
.add("method", isIncluded(AccessAttribute.METHOD), event::getMethod)
.add("protocol", isIncluded(AccessAttribute.PROTOCOL), event::getProtocol)
.add("requestContent", isIncluded(AccessAttribute.REQUEST_CONTENT), event::getRequestContent)
.add("remoteAddress", isIncluded(AccessAttribute.REMOTE_ADDRESS), event::getRemoteAddr)
.add("remoteUser", isIncluded(AccessAttribute.REMOTE_USER), event::getRemoteUser)
.addMap("headers", !requestHeaders.isEmpty(),
() -> filterHeaders(event.getRequestHeaderMap(), requestHeaders))
.addMap("params", isIncluded(AccessAttribute.REQUEST_PARAMETERS), event::getRequestParameterMap)
.addNumber("requestTime", isIncluded(AccessAttribute.REQUEST_TIME), event::getElapsedTime)
.add("uri", isIncluded(AccessAttribute.REQUEST_URI), event::getRequestURI)
.add("url", isIncluded(AccessAttribute.REQUEST_URL), event::getRequestURL)
.add("pathQuery", isIncluded(AccessAttribute.PATH_QUERY), () -> event.getRequestURI() + event.getQueryString())
.add("remoteHost", isIncluded(AccessAttribute.REMOTE_HOST), event::getRemoteHost)
.add("responseContent", isIncluded(AccessAttribute.RESPONSE_CONTENT), event::getResponseContent)
.addMap("responseHeaders", !responseHeaders.isEmpty(),
() -> filterHeaders(event.getResponseHeaderMap(), responseHeaders))
.add("serverName", isIncluded(AccessAttribute.SERVER_NAME), event::getServerName)
.addNumber("status", isIncluded(AccessAttribute.STATUS_CODE), event::getStatusCode)
.add("userAgent", isIncluded(AccessAttribute.USER_AGENT), () -> event.getRequestHeader(USER_AGENT))
.add("version", jsonProtocolVersion != null, jsonProtocolVersion)
.addMap("requestAttributes", !requestAttributes.isEmpty(),
() -> filterRequestAttributes(requestAttributes, event))
.build();
} | @Test
void testAddAdditionalFields() {
final Map<String, Object> additionalFields = Map.of(
"serviceName", "user-service",
"serviceVersion", "1.2.3");
accessJsonLayout = new AccessJsonLayout(jsonFormatter, timestampFormatter, includes, Collections.emptyMap(),
additionalFields);
assertThat(accessJsonLayout.toJsonMap(event)).containsOnly(
entry("timestamp", timestamp), entry("remoteUser", "john"),
entry("method", "GET"), entry("uri", uri),
entry("protocol", "HTTP/1.1"), entry("status", 200),
entry("requestTime", 100L), entry("contentLength", 78L),
entry("userAgent", userAgent), entry("remoteAddress", remoteAddress),
entry("serviceName", "user-service"), entry("serviceVersion", "1.2.3"));
} |
@Override
public double getValue(double quantile) {
if (quantile < 0.0 || quantile > 1.0 || Double.isNaN( quantile )) {
throw new IllegalArgumentException(quantile + " is not in [0..1]");
}
if (values.length == 0) {
return 0.0;
}
int posx = Arrays.binarySearch(quantiles, quantile);
if (posx < 0)
posx = ((-posx) - 1) - 1;
if (posx < 1) {
return values[0];
}
if (posx >= values.length) {
return values[values.length - 1];
}
return values[posx];
} | @Test
public void bigQuantilesAreTheLastValue() throws Exception {
assertThat(snapshot.getValue(1.0))
.isEqualTo(5.0, offset(0.1));
} |
@Udf(description = "Returns the inverse (arc) sine of an INT value")
public Double asin(
@UdfParameter(
value = "value",
description = "The value to get the inverse sine of."
) final Integer value
) {
return asin(value == null ? null : value.doubleValue());
} | @Test
public void shouldHandlePositive() {
assertThat(udf.asin(0.43), closeTo(0.444492776935819, 0.000000000000001));
assertThat(udf.asin(0.5), closeTo(0.5235987755982989, 0.000000000000001));
assertThat(udf.asin(1.0), closeTo(1.5707963267948966, 0.000000000000001));
assertThat(udf.asin(1), closeTo(1.5707963267948966, 0.000000000000001));
assertThat(udf.asin(1L), closeTo(1.5707963267948966, 0.000000000000001));
} |
public static <T> T[] clone(final T[] array) {
if (array == null) {
return null;
}
return array.clone();
} | @Test
public void assertClone() {
Assert.isNull(ArrayUtil.clone(null));
String[] array = new String[0];
Assert.isTrue(array != ArrayUtil.clone(array));
Assert.isTrue(array.length == ArrayUtil.clone(array).length);
} |
public void setJobLauncher(JobLauncher jobLauncher) {
this.jobLauncher = jobLauncher;
} | @Test
public void shouldUseJobLauncherFromComponent() throws Exception {
// Given
SpringBatchComponent batchComponent = new SpringBatchComponent();
batchComponent.setJobLauncher(alternativeJobLauncher);
context.addComponent("customBatchComponent", batchComponent);
// When
context().addRoutes(new RouteBuilder() {
@Override
public void configure() throws Exception {
from("direct:startCustom").to("customBatchComponent:mockJob");
}
});
// Then
SpringBatchEndpoint batchEndpoint = context().getEndpoint("customBatchComponent:mockJob", SpringBatchEndpoint.class);
JobLauncher batchEndpointJobLauncher = (JobLauncher) FieldUtils.readField(batchEndpoint, "jobLauncher", true);
assertSame(alternativeJobLauncher, batchEndpointJobLauncher);
} |
public Long asLong(Map<String, ValueReference> parameters) {
switch (valueType()) {
case LONG:
if (value() instanceof Number) {
return ((Number) value()).longValue();
}
throw new IllegalStateException("Expected value reference of type LONG but got " + value().getClass());
case PARAMETER:
return asType(parameters, Long.class);
default:
throw new IllegalStateException("Expected value reference of type LONG but got " + valueType());
}
} | @Test
public void asLong() {
assertThat(ValueReference.of(42L).asLong(Collections.emptyMap())).isEqualTo(42L);
assertThatThrownBy(() -> ValueReference.of("Test").asLong(Collections.emptyMap()))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Expected value reference of type LONG but got STRING");
} |
@Override
public void visit(Entry target) {
final EntryAccessor entryAccessor = new EntryAccessor();
final Component component = (Component) entryAccessor.removeComponent(target);
if (component != null) {
if(component instanceof AbstractButton)
((AbstractButton)component).setAction(null);
removeMenuComponent(component);
ActionEnabler actionEnabler = target.removeAttribute(ActionEnabler.class);
if(actionEnabler != null){
final AFreeplaneAction action = entryAccessor.getAction(target);
action.removePropertyChangeListener(actionEnabler);
}
}
} | @Test
public void ignoresEntriesWithoutComponents() throws Exception {
final JComponentRemover componentRemover = JComponentRemover.INSTANCE;
final Entry entry = new Entry();
componentRemover.visit(entry);
} |
public URI flowUrl(Execution execution) {
return this.build("/ui/" +
(execution.getTenantId() != null ? execution.getTenantId() + "/" : "") +
"flows/" +
execution.getNamespace() + "/" +
execution.getFlowId());
} | @Test
void flowUrl() {
Flow flow = TestsUtils.mockFlow();
Execution execution = TestsUtils.mockExecution(flow, ImmutableMap.of());
assertThat(uriProvider.executionUrl(execution).toString(), containsString("mysuperhost.com/subpath/ui"));
assertThat(uriProvider.flowUrl(execution).toString(), containsString(flow.getNamespace() + "/" + flow.getId()));
assertThat(uriProvider.executionUrl(execution).toString(), containsString("mysuperhost.com/subpath/ui"));
assertThat(uriProvider.flowUrl(flow).toString(), containsString(flow.getNamespace() + "/" + flow.getId()));
} |
public boolean isEmpty() {
return messagesProcessed == 0 && errorsOccurred == 0;
} | @Test
void testIsEmpty() {
StatsPersistMsg emptyStats = new StatsPersistMsg(0, 0, TenantId.SYS_TENANT_ID, TenantId.SYS_TENANT_ID);
assertThat(emptyStats.isEmpty()).isTrue();
} |
@Override
public PollResult poll(long currentTimeMs) {
return pollInternal(
prepareFetchRequests(),
this::handleFetchSuccess,
this::handleFetchFailure
);
} | @Test
public void testFetchResponseMetricsWithOnePartitionError() {
buildFetcher();
assignFromUser(mkSet(tp0, tp1));
subscriptions.seek(tp0, 0);
subscriptions.seek(tp1, 0);
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), Compression.NONE,
TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++)
builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
MemoryRecords records = builder.build();
Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new HashMap<>();
partitions.put(tidp0, new FetchResponseData.PartitionData()
.setPartitionIndex(tp0.partition())
.setHighWatermark(100)
.setLogStartOffset(0)
.setRecords(records));
partitions.put(tidp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition())
.setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code())
.setHighWatermark(100)
.setLogStartOffset(0));
assertEquals(1, sendFetches());
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
networkClientDelegate.poll(time.timer(0));
collectFetch();
int expectedBytes = 0;
for (Record record : records.records())
expectedBytes += record.sizeInBytes();
assertEquals(expectedBytes, (Double) fetchSizeAverage.metricValue(), EPSILON);
assertEquals(3, (Double) recordsCountAverage.metricValue(), EPSILON);
} |
@Override
public boolean isSupported() {
try {
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.P) {
return false;
}
return OAIDRom.sysProperty("persist.sys.identifierid.supported", "0").equals("1");
} catch (Throwable throwable) {
SALog.i(TAG, throwable);
return false;
}
} | @Test
public void isSupported() {
VivoImpl vivo = new VivoImpl(mApplication);
Assert.assertFalse(vivo.isSupported());
} |
protected static List<LastOpenedDTO> filterForExistingIdAndCapAtMaximum(final LastOpenedForUserDTO loi, final GRN grn, final long max) {
return loi.items().stream().filter(i -> !i.grn().equals(grn)).limit(max - 1).toList();
} | @Test
public void testRemoveIfExistsInList() {
var _1 = grnRegistry.newGRN(GRNTypes.DASHBOARD, "1");
LastOpenedForUserDTO dto = new LastOpenedForUserDTO("userId", List.of(new LastOpenedDTO(_1, DateTime.now(DateTimeZone.UTC))));
var result = StartPageService.filterForExistingIdAndCapAtMaximum(dto, _1, MAX);
assertThat(result.isEmpty()).isTrue();
} |
@SuppressWarnings("unchecked")
public static <S, F> S visit(final SqlType type, final SqlTypeWalker.Visitor<S, F> visitor) {
final BiFunction<SqlTypeWalker.Visitor<?, ?>, SqlType, Object> handler = HANDLER
.get(type.baseType());
if (handler == null) {
throw new UnsupportedOperationException("Unsupported schema type: " + type.baseType());
}
return (S) handler.apply(visitor, type);
} | @Test
public void shouldThrowByDefaultFromNonStructured() {
// Given:
visitor = new Visitor<String, Integer>() {
};
nonStructuredTypes().forEach(type -> {
try {
// When:
SqlTypeWalker.visit(type, visitor);
fail();
} catch (final UnsupportedOperationException e) {
// Then:
assertThat(e.getMessage(), is("Unsupported sql type: " + type));
}
});
} |
public HashRange partition(int index, int count) {
if (count <= 0) {
throw new IllegalArgumentException("Count must be a strictly positive value");
}
if (index < 0 || index >= count) {
throw new IllegalArgumentException("Index must be between 0 and " + count);
}
BigInteger partitionStart = minInclusive.add(size().multiply(valueOf(index)).divide(valueOf(count)));
BigInteger partitionEnd = minInclusive.add(size().multiply(valueOf(index + 1)).divide(valueOf(count)));
return new HashRange(partitionStart, partitionEnd);
} | @Test
public void partition() {
HashRange range = HashRange.range(1000, 2000);
assertEquals(HashRange.range(1000, 1500), range.partition(0, 2));
assertEquals(HashRange.range(1500, 2000), range.partition(1, 2));
range = range("0", "170141183460469231731687303715884105728");
assertEquals(range("0", "14178431955039102644307275309657008810"),
range.partition(0, 12));
assertEquals(range("14178431955039102644307275309657008810", "28356863910078205288614550619314017621"),
range.partition(1, 12));
assertEquals(range("28356863910078205288614550619314017621", "42535295865117307932921825928971026432"),
range.partition(2, 12));
// ...
assertEquals(range("141784319550391026443072753096570088106", "155962751505430129087380028406227096917"),
range.partition(10, 12));
assertEquals(range("155962751505430129087380028406227096917", "170141183460469231731687303715884105728"),
range.partition(11, 12));
} |
@Override
public int compareTo(ColumnDescriptor o) {
int length = path.length < o.path.length ? path.length : o.path.length;
for (int i = 0; i < length; i++) {
int compareTo = path[i].compareTo(o.path[i]);
if (compareTo != 0) {
return compareTo;
}
}
return path.length - o.path.length;
} | @Test
public void testComparesTo() throws Exception {
assertEquals(column("a").compareTo(column("a")), 0);
assertEquals(column("a", "b").compareTo(column("a", "b")), 0);
assertEquals(column("a").compareTo(column("b")), -1);
assertEquals(column("b").compareTo(column("a")), 1);
assertEquals(column("a", "a").compareTo(column("a", "b")), -1);
assertEquals(column("b", "a").compareTo(column("a", "a")), 1);
assertEquals(column("a").compareTo(column("a", "b")), -1);
assertEquals(column("b").compareTo(column("a", "b")), 1);
assertEquals(column("a", "b").compareTo(column("a")), 1);
assertEquals(column("a", "b").compareTo(column("b")), -1);
assertEquals(column("").compareTo(column("")), 0);
assertEquals(column("").compareTo(column("a")), -1);
assertEquals(column("a").compareTo(column("")), 1);
} |
public static OpenstackNode getGwByInstancePort(Set<OpenstackNode> gateways,
InstancePort instPort) {
OpenstackNode gw = null;
if (instPort != null && instPort.deviceId() != null) {
gw = getGwByComputeDevId(gateways, instPort.deviceId());
}
return gw;
} | @Test
public void testGetGwByInstancePort() {
Set<OpenstackNode> gws = Sets.newConcurrentHashSet();
gws.add(genGateway(1));
gws.add(genGateway(2));
gws.add(genGateway(3));
int expectedGwIndex = 2;
OpenstackNode gw = getGwByInstancePort(gws, instancePort1);
assertEquals(genGateway(expectedGwIndex), gw);
assertNull(getGwByInstancePort(gws, null));
} |
public static RandomForest fit(Formula formula, DataFrame data) {
return fit(formula, data, new Properties());
} | @Test
public void testBreastCancer() {
System.out.println("Breast Cancer");
MathEx.setSeed(19650218); // to get repeatable results for cross validation.
ClassificationValidations<RandomForest> result = CrossValidation.classification(10, BreastCancer.formula, BreastCancer.data,
(f, x) -> RandomForest.fit(f, x, 100, 5, SplitRule.GINI, 20, 100, 5, 1.0, null, Arrays.stream(seeds)));
System.out.println(result);
assertEquals(0.9550, result.avg.accuracy, 1E-4);
} |
@Override
public byte getByte(int index) {
checkIndex(index);
return _getByte(index);
} | @Test
public void testGetByteAfterRelease() {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().getByte(0);
}
});
} |
@Override
public UfsFileStatus copy() {
return new UfsFileStatus(this);
} | @Test
public void copy() {
Random random = new Random();
String contentHash = CommonUtils.randomAlphaNumString(10);
long contentLength = random.nextLong();
long lastModifiedTimeMs = random.nextLong();
short mode = 077;
long blockSize = random.nextLong();
UfsFileStatus statusToCopy =
new UfsFileStatus("name", contentHash, contentLength, lastModifiedTimeMs, "owner", "group",
mode, blockSize);
UfsFileStatus status = new UfsFileStatus(statusToCopy);
assertEquals(statusToCopy, status);
} |
public Optional<DateTime> nextTime(JobTriggerDto trigger) {
return nextTime(trigger, trigger.nextTime());
} | @Test
public void intervalNextTimeAfter() {
final JobTriggerDto trigger = JobTriggerDto.builderWithClock(clock)
.jobDefinitionId("abc-123")
.jobDefinitionType("event-processor-execution-v1")
.schedule(IntervalJobSchedule.builder()
.interval(1)
.unit(TimeUnit.SECONDS)
.build())
.build();
DateTime date = DateTime.parse("2024-01-01T0:00:00.000Z");
DateTime nextTime = strategies.nextTime(trigger, date).orElse(null);
assertThat(nextTime)
.isNotNull()
.satisfies(dateTime -> {
assertThat(dateTime.getZone()).isEqualTo(DateTimeZone.forID("UTC"));
assertThat(dateTime.toString(DATE_FORMAT)).isEqualTo("01/01/2024 00:00:01");
});
date = DateTime.parse("2024-02-01T0:00:00.000Z");
nextTime = strategies.nextTime(trigger, date).orElse(null);
assertThat(nextTime)
.isNotNull()
.satisfies(dateTime -> {
assertThat(dateTime.getZone()).isEqualTo(DateTimeZone.forID("UTC"));
assertThat(dateTime.toString(DATE_FORMAT)).isEqualTo("01/02/2024 00:00:01");
});
} |
@Override
public OutputT expand(InputT input) {
OutputT res = delegate().expand(input);
if (res instanceof PCollection) {
PCollection pc = (PCollection) res;
try {
pc.setCoder(delegate().getDefaultOutputCoder(input, pc));
} catch (CannotProvideCoderException e) {
// Let coder inference happen later.
}
}
return res;
} | @Test
public void getDefaultOutputCoderDelegates() throws Exception {
@SuppressWarnings("unchecked")
PCollection<Integer> input =
PCollection.createPrimitiveOutputInternal(
null /* pipeline */,
WindowingStrategy.globalDefault(),
PCollection.IsBounded.BOUNDED,
null /* coder */);
@SuppressWarnings("unchecked")
PCollection<String> output =
PCollection.createPrimitiveOutputInternal(
null /* pipeline */,
WindowingStrategy.globalDefault(),
PCollection.IsBounded.BOUNDED,
null /* coder */);
@SuppressWarnings("unchecked")
Coder<String> outputCoder = mock(Coder.class);
when(delegate.expand(input)).thenReturn(output);
when(delegate.getDefaultOutputCoder(input, output)).thenReturn(outputCoder);
assertThat(forwarding.expand(input).getCoder(), equalTo(outputCoder));
} |
public static String toString(RedisCommand<?> command, Object... params) {
if (RedisCommands.AUTH.equals(command)) {
return "command: " + command + ", params: (password masked)";
}
return "command: " + command + ", params: " + LogHelper.toString(params);
} | @Test
public void toStringWithNestedBigCollections() {
List<String> strings = Collections.nCopies(15, "0");
List<Integer> ints = Collections.nCopies(15, 1);
List<Long> longs = Collections.nCopies(15, 2L);
List<Double> doubles = Collections.nCopies(15, 3.1D);
List<Float> floats = Collections.nCopies(15, 4.2F);
List<Byte> bytes = Collections.nCopies(15, (byte)5);
List<Character> chars = Collections.nCopies(15, '6');
Object[] input = new Object[] { strings, ints, longs, doubles, floats, bytes, chars };
StringBuilder sb = new StringBuilder();
sb.append("[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...], ");
sb.append("[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...], ");
sb.append("[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, ...], ");
sb.append("[3.1, 3.1, 3.1, 3.1, 3.1, 3.1, 3.1, 3.1, 3.1, 3.1, ...], ");
sb.append("[4.2, 4.2, 4.2, 4.2, 4.2, 4.2, 4.2, 4.2, 4.2, 4.2, ...], ");
sb.append("[5, 5, 5, 5, 5, 5, 5, 5, 5, 5, ...], ");
sb.append("[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...]]");
assertThat(LogHelper.toString(input)).isEqualTo(sb.toString());
} |
public static double of(double[] truth, double[] prediction) {
if (truth.length != prediction.length) {
throw new IllegalArgumentException(String.format("The vector sizes don't match: %d != %d.", truth.length, prediction.length));
}
int n = truth.length;
double rss = 0.0;
for (int i = 0; i < n; i++) {
double r = truth[i] - prediction[i];
rss += r * r;
}
return rss;
} | @Test
public void test() {
System.out.println("RSS");
double[] truth = {
83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2,
104.6, 108.4, 110.8, 112.6, 114.2, 115.7, 116.9
};
double[] prediction = {
83.60082, 86.94973, 88.09677, 90.73065, 96.53551, 97.83067,
98.12232, 99.87776, 103.20861, 105.08598, 107.33369, 109.57251,
112.98358, 113.92898, 115.50214, 117.54028,
};
double expResult = 12.844;
double result = RSS.of(truth, prediction);
assertEquals(expResult, result, 1E-3);
} |
public Optional<User> login(String nameOrEmail, String password) {
if (nameOrEmail == null || password == null) {
return Optional.empty();
}
User user = userDAO.findByName(nameOrEmail);
if (user == null) {
user = userDAO.findByEmail(nameOrEmail);
}
if (user != null && !user.isDisabled()) {
boolean authenticated = encryptionService.authenticate(password, user.getPassword(), user.getSalt());
if (authenticated) {
performPostLoginActivities(user);
return Optional.of(user);
}
}
return Optional.empty();
} | @Test
void apiLoginShouldReturnUserIfUserFoundFromApikeyLookupNotDisabled() {
Mockito.when(userDAO.findByApiKey("apikey")).thenReturn(normalUser);
Optional<User> returnedUser = userService.login("apikey");
Assertions.assertEquals(normalUser, returnedUser.get());
} |
String getSafeModeTip() {
StringBuilder msg = new StringBuilder();
boolean isBlockThresholdMet = false;
synchronized (this) {
isBlockThresholdMet = (blockSafe >= blockThreshold);
if (!isBlockThresholdMet) {
msg.append(String.format(
"The reported blocks %d needs additional %d"
+ " blocks to reach the threshold %.4f of total blocks %d.%n",
blockSafe, (blockThreshold - blockSafe), threshold, blockTotal));
} else {
msg.append(String.format(
"The reported blocks %d has reached the threshold %.4f of total"
+ " blocks %d. ", blockSafe, threshold, blockTotal));
}
}
if (datanodeThreshold > 0) {
if (isBlockThresholdMet) {
int numLive = blockManager.getDatanodeManager().getNumLiveDataNodes();
if (numLive < datanodeThreshold) {
msg.append(String.format(
"The number of live datanodes %d needs an additional %d live "
+ "datanodes to reach the minimum number %d.%n",
numLive, (datanodeThreshold - numLive), datanodeThreshold));
} else {
msg.append(String.format(
"The number of live datanodes %d has reached the minimum number"
+ " %d. ", numLive, datanodeThreshold));
}
} else {
msg.append("The number of live datanodes is not calculated ")
.append("since reported blocks hasn't reached the threshold. ");
}
} else {
msg.append("The minimum number of live datanodes is not required. ");
}
if (getBytesInFuture() > 0) {
msg.append("Name node detected blocks with generation stamps in future. ")
.append("This means that Name node metadata is inconsistent. This ")
.append("can happen if Name node metadata files have been manually ")
.append("replaced. Exiting safe mode will cause loss of ")
.append(getBytesInFuture())
.append(" byte(s). Please restart name node with right metadata ")
.append("or use \"hdfs dfsadmin -safemode forceExit\" if you ")
.append("are certain that the NameNode was started with the correct ")
.append("FsImage and edit logs. If you encountered this during ")
.append("a rollback, it is safe to exit with -safemode forceExit.");
return msg.toString();
}
final String turnOffTip = "Safe mode will be turned off automatically ";
switch(status) {
case PENDING_THRESHOLD:
msg.append(turnOffTip).append("once the thresholds have been reached.");
break;
case EXTENSION:
msg.append("In safe mode extension. ").append(turnOffTip).append("in ")
.append(timeToLeaveExtension() / 1000).append(" seconds.");
break;
case OFF:
msg.append(turnOffTip).append("soon.");
break;
default:
assert false : "Non-recognized block manager safe mode status: " + status;
}
return msg.toString();
} | @Test(timeout = 30000)
public void testGetSafeModeTip() throws Exception {
bmSafeMode.activate(BLOCK_TOTAL);
String tip = bmSafeMode.getSafeModeTip();
assertTrue(tip.contains(
String.format(
"The reported blocks %d needs additional %d blocks to reach the " +
"threshold %.4f of total blocks %d.%n",
0, BLOCK_THRESHOLD, THRESHOLD, BLOCK_TOTAL)));
assertTrue(tip.contains(
"The number of live datanodes is not calculated " +
"since reported blocks hasn't reached the threshold."));
assertTrue(tip.contains("Safe mode will be turned off automatically once " +
"the thresholds have been reached."));
// safe blocks are enough
setBlockSafe(BLOCK_THRESHOLD);
bmSafeMode.checkSafeMode();
tip = bmSafeMode.getSafeModeTip();
assertTrue(tip.contains(
String.format("The reported blocks %d has reached the threshold"
+ " %.4f of total blocks %d. ",
getblockSafe(), THRESHOLD, BLOCK_TOTAL)));
assertTrue(tip.contains(
String.format("The number of live datanodes %d has reached the " +
"minimum number %d. ", dn.getNumLiveDataNodes(), DATANODE_NUM)));
assertTrue(tip.contains("In safe mode extension. Safe mode will be turned" +
" off automatically in"));
waitForExtensionPeriod();
tip = bmSafeMode.getSafeModeTip();
assertTrue(tip.contains(
String.format("The reported blocks %d has reached the threshold"
+ " %.4f of total blocks %d. ",
getblockSafe(), THRESHOLD, BLOCK_TOTAL)));
assertTrue(tip.contains(
String.format("The number of live datanodes %d has reached the " +
"minimum number %d. ", dn.getNumLiveDataNodes(), DATANODE_NUM)));
assertTrue(tip.contains("Safe mode will be turned off automatically soon"));
} |
@Override
public Map<String, Object> processCsvFile(String encodedCsvData, boolean dryRun) throws JsonProcessingException {
services = new HashMap<>();
serviceParentChildren = new HashMap<>();
Map<String, Object> result = super.processCsvFile(encodedCsvData, dryRun);
if (!services.isEmpty()) {
retrieveLegacyServiceIds();
saveAll(dryRun);
processServiceParentChildren(serviceParentChildren, dryRun);
}
return result;
} | @Test
void processCsvFileFailUniqueEntityIdAndNameTest() throws IOException {
String csvData = """SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS""";
Service service = new Service();
Optional<Service> optService = Optional.of(service);
when(serviceRepositoryMock.findFirstByServiceUuid("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS")).thenReturn(optService);
when(serviceRepositoryMock.findServicesByUuidAndEntityId("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS")).thenReturn(List.of(new Service(), new Service()));
Map<String, Object> resultMap = csvService.processCsvFile(encodeCsv(csvData), false);
assertEquals("Bestand verwerkt", resultMap.get("result"));
String expectedValue = "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS";
assertTrue(((ArrayList) resultMap.get("failed")).size() == 1);
assertTrue(((ArrayList) resultMap.get("failed")).contains(expectedValue));
assertTrue(((ArrayList) resultMap.get("succeeded")).isEmpty());
} |
public MessageListener messageListener(MessageListener messageListener, boolean addConsumerSpan) {
if (messageListener instanceof TracingMessageListener) return messageListener;
return new TracingMessageListener(messageListener, this, addConsumerSpan);
} | @Test void messageListener_wrapsInput() {
assertThat(jmsTracing.messageListener(mock(MessageListener.class), false))
.isInstanceOf(TracingMessageListener.class);
} |
@Override
public void execute() {
boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log);
if (debugMode) {
log.info("Load balancer enabled: {}, Split enabled: {}.",
conf.isLoadBalancerEnabled(), conf.isLoadBalancerAutoBundleSplitEnabled());
}
if (!isLoadBalancerAutoBundleSplitEnabled()) {
if (debugMode) {
log.info("The load balancer or load balancer split already disabled. Skipping.");
}
return;
}
synchronized (bundleSplitStrategy) {
final Set<SplitDecision> decisions = bundleSplitStrategy.findBundlesToSplit(context, pulsar);
if (debugMode) {
log.info("Split Decisions:", decisions);
}
if (!decisions.isEmpty()) {
// currently following the unloading timeout
var asyncOpTimeoutMs = conf.getNamespaceBundleUnloadingTimeoutMs();
List<CompletableFuture<Void>> futures = new ArrayList<>();
for (SplitDecision decision : decisions) {
if (decision.getLabel() == Success) {
var split = decision.getSplit();
futures.add(
splitManager.waitAsync(
serviceUnitStateChannel.publishSplitEventAsync(split),
split.serviceUnit(),
decision,
asyncOpTimeoutMs, TimeUnit.MILLISECONDS)
);
}
}
try {
FutureUtil.waitForAll(futures)
.get(asyncOpTimeoutMs, TimeUnit.MILLISECONDS);
} catch (Throwable e) {
log.error("Failed to wait for split events to persist.", e);
}
} else {
if (debugMode) {
log.info("BundleSplitStrategy returned no bundles to split.");
}
}
}
if (counter.updatedAt() > counterLastUpdatedAt) {
splitMetrics.set(counter.toMetrics(pulsar.getAdvertisedAddress()));
counterLastUpdatedAt = counter.updatedAt();
}
} | @Test(timeOut = 30 * 1000)
public void testExecuteSuccess() {
AtomicReference<List<Metrics>> reference = new AtomicReference();
SplitCounter counter = new SplitCounter();
SplitManager manager = mock(SplitManager.class);
SplitScheduler scheduler = new SplitScheduler(pulsar, channel, manager, counter, reference, context, strategy);
doAnswer((invocation)->{
var decision = invocation.getArgument(2, SplitDecision.class);
counter.update(decision);
return CompletableFuture.completedFuture(null);
}).when(manager).waitAsync(any(), any(), any(), anyLong(), any());
scheduler.execute();
var counterExpected = new SplitCounter();
counterExpected.update(decision1);
counterExpected.update(decision2);
verify(channel, times(1)).publishSplitEventAsync(eq(decision1.getSplit()));
verify(channel, times(1)).publishSplitEventAsync(eq(decision2.getSplit()));
assertEquals(reference.get().toString(), counterExpected.toMetrics(pulsar.getAdvertisedAddress()).toString());
// Test empty splits.
Set<SplitDecision> emptyUnload = Set.of();
doReturn(emptyUnload).when(strategy).findBundlesToSplit(any(), any());
scheduler.execute();
verify(channel, times(2)).publishSplitEventAsync(any());
assertEquals(reference.get().toString(), counterExpected.toMetrics(pulsar.getAdvertisedAddress()).toString());
} |
@Override
public Iterator<T> iterator() {
return new LinkedSetIterator();
} | @Test
public void testRemoveAll() {
LOG.info("Test remove all");
for (Integer i : list) {
assertTrue(set.add(i));
}
for (int i = 0; i < NUM; i++) {
assertTrue(set.remove(list.get(i)));
}
// the deleted elements should not be there
for (int i = 0; i < NUM; i++) {
assertFalse(set.contains(list.get(i)));
}
// iterator should not have next
Iterator<Integer> iter = set.iterator();
assertFalse(iter.hasNext());
assertTrue(set.isEmpty());
LOG.info("Test remove all - DONE");
} |
@Override
public Publisher<Exchange> to(String uri, Object data) {
String streamName = requestedUriToStream.computeIfAbsent(uri, camelUri -> {
try {
String uuid = context.getUuidGenerator().generateUuid();
RouteBuilder.addRoutes(context, rb -> rb.from("reactive-streams:" + uuid).to(camelUri));
return uuid;
} catch (Exception e) {
throw new IllegalStateException("Unable to create requested reactive stream from direct URI: " + uri, e);
}
});
return toStream(streamName, data);
} | @Test
public void testTo() throws Exception {
context.start();
AtomicInteger value = new AtomicInteger();
CountDownLatch latch = new CountDownLatch(1);
Flowable.just(1, 2, 3).flatMap(e -> crs.to("bean:hello", e, String.class))
.doOnNext(res -> assertEquals("Hello " + value.incrementAndGet(), res))
.doOnNext(res -> latch.countDown()).subscribe();
assertTrue(latch.await(2, TimeUnit.SECONDS));
} |
@Override
public boolean betterThan(Num criterionValue1, Num criterionValue2) {
return criterionValue1.isLessThan(criterionValue2);
} | @Test
public void betterThan() {
AnalysisCriterion criterion = getCriterion();
assertTrue(criterion.betterThan(numOf(3), numOf(6)));
assertFalse(criterion.betterThan(numOf(7), numOf(4)));
} |
@Override
public int getTcpReceiveBufferSize() {
return clientConfig.getPropertyAsInteger(IClientConfigKey.Keys.ReceiveBufferSize, DEFAULT_BUFFER_SIZE);
} | @Test
void testGetTcpReceiveBufferSize() {
assertEquals(ConnectionPoolConfigImpl.DEFAULT_BUFFER_SIZE, connectionPoolConfig.getTcpReceiveBufferSize());
} |
public static File copy(String srcPath, String destPath, boolean isOverride) throws IORuntimeException {
return copy(file(srcPath), file(destPath), isOverride);
} | @Test
@Disabled
public void copyTest2(){
final File copy = FileUtil.copy("d:/test/qrcodeCustom.png", "d:/test/pic", false);
// 当复制文件到目标目录的时候,返回复制的目标文件,而非目录
Console.log(copy);
} |
public static List<DataType> getFieldDataTypes(DataType dataType) {
final LogicalType type = dataType.getLogicalType();
if (type.is(LogicalTypeRoot.DISTINCT_TYPE)) {
return getFieldDataTypes(dataType.getChildren().get(0));
} else if (isCompositeType(type)) {
return dataType.getChildren();
}
return Collections.emptyList();
} | @Test
void testGetFieldDataTypes() {
assertThat(
DataType.getFieldDataTypes(
ROW(
FIELD("c0", BOOLEAN()),
FIELD("c1", DOUBLE()),
FIELD("c2", INT()))))
.containsExactly(BOOLEAN(), DOUBLE(), INT());
assertThat(
DataType.getFieldDataTypes(
STRUCTURED(
DataTypesTest.SimplePojo.class,
FIELD("name", STRING()),
FIELD("count", INT().notNull().bridgedTo(int.class)))))
.containsExactly(STRING(), INT().notNull().bridgedTo(int.class));
assertThat(DataType.getFieldDataTypes(ARRAY(INT()))).isEmpty();
assertThat(DataType.getFieldDataTypes(INT())).isEmpty();
} |
public static Set<Result> anaylze(String log) {
Set<Result> results = new HashSet<>();
for (Rule rule : Rule.values()) {
Matcher matcher = rule.pattern.matcher(log);
if (matcher.find()) {
results.add(new Result(rule, log, matcher));
}
}
return results;
} | @Test
public void optifineIsNotCompatibleWithForge3() throws IOException {
CrashReportAnalyzer.Result result = findResultByRule(
CrashReportAnalyzer.anaylze(loadLog("/logs/optifine_is_not_compatible_with_forge4.txt")),
CrashReportAnalyzer.Rule.OPTIFINE_IS_NOT_COMPATIBLE_WITH_FORGE);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.