focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public Optional<Object> invoke(Function<InvokerContext, Object> invokeFunc, Function<Throwable, Object> exFunc,
String serviceName) {
return invoke(invokeFunc, exFunc, serviceName, getRetry(null));
} | @Test
public void testInvoke() {
mockInstances();
// Normal call
testNormalInvoke(buildRetryConfig("normal"));
// Exception calls
testErrorInvoke(buildRetryConfig("error"));
} |
public static Setting get(String name) {
return SETTING_MAP.computeIfAbsent(name, (filePath)->{
final String extName = FileNameUtil.extName(filePath);
if (StrUtil.isEmpty(extName)) {
filePath = filePath + "." + Setting.EXT_NAME;
}
return new Setting(filePath, true);
});
} | @Test
public void getTest() {
String driver = SettingUtil.get("test").get("demo", "driver");
assertEquals("com.mysql.jdbc.Driver", driver);
} |
public static boolean equalsIgnoreCase(final CharSequence str1, final CharSequence str2) {
if (str1 == null || str2 == null) {
return str1 == str2;
} else if (str1 == str2) {
return true;
} else if (str1.length() != str2.length()) {
return false;
} else {
return regionMatches(str1, true, 0, str2, 0, str1.length());
}
} | @Test
public void testEqualsIgnoreCase() {
Assert.assertFalse(StringUtil.equalsIgnoreCase("", "BCCC"));
Assert.assertFalse(StringUtil.equalsIgnoreCase(null, ""));
Assert.assertTrue(StringUtil.equalsIgnoreCase("", ""));
Assert.assertTrue(StringUtil.equalsIgnoreCase("BcCc", "BCCC"));
Assert.assertTrue(StringUtil.equalsIgnoreCase(null, null));
} |
@Override
public EncryptRuleConfiguration buildToBeDroppedRuleConfiguration(final DropEncryptRuleStatement sqlStatement) {
Collection<EncryptTableRuleConfiguration> toBeDroppedTables = new LinkedList<>();
Map<String, AlgorithmConfiguration> toBeDroppedEncryptors = new HashMap<>();
for (String each : sqlStatement.getTables()) {
toBeDroppedTables.add(new EncryptTableRuleConfiguration(each, Collections.emptyList()));
dropRule(each);
}
UnusedAlgorithmFinder.findUnusedEncryptor(rule.getConfiguration()).forEach(each -> toBeDroppedEncryptors.put(each, rule.getConfiguration().getEncryptors().get(each)));
return new EncryptRuleConfiguration(toBeDroppedTables, toBeDroppedEncryptors);
} | @Test
void assertUpdateCurrentRuleConfiguration() {
EncryptRuleConfiguration ruleConfig = createCurrentRuleConfiguration();
EncryptRule rule = mock(EncryptRule.class);
when(rule.getConfiguration()).thenReturn(ruleConfig);
executor.setRule(rule);
EncryptRuleConfiguration toBeDroppedRuleConfig = executor.buildToBeDroppedRuleConfiguration(createSQLStatement("T_ENCRYPT"));
assertThat(toBeDroppedRuleConfig.getTables().size(), is(1));
assertThat(toBeDroppedRuleConfig.getEncryptors().size(), is(3));
} |
@Override
public void createService(String serviceName) throws NacosException {
createService(serviceName, Constants.DEFAULT_GROUP);
} | @Test
void testCreateService3() throws NacosException {
//given
String serviceName = "service1";
String groupName = "groupName";
float protectThreshold = 0.1f;
//when
nacosNamingMaintainService.createService(serviceName, groupName, protectThreshold);
//then
verify(serverProxy, times(1)).createService(argThat(new ArgumentMatcher<Service>() {
@Override
public boolean matches(Service service) {
return service.getName().equals(serviceName) && service.getGroupName().equals(groupName)
&& Math.abs(service.getProtectThreshold() - protectThreshold) < 0.1f
&& service.getMetadata().size() == 0;
}
}), argThat(o -> o instanceof NoneSelector));
} |
public static void copyConfigurationToJob(Properties props, Map<String, String> jobProps)
throws HiveException, IOException {
checkRequiredPropertiesAreDefined(props);
resolveMetadata(props);
for (Entry<Object, Object> entry : props.entrySet()) {
String key = String.valueOf(entry.getKey());
if (!key.equals(CONFIG_PWD) &&
!key.equals(CONFIG_PWD_KEYSTORE) &&
!key.equals(CONFIG_PWD_KEY) &&
!key.equals(CONFIG_PWD_URI)) {
jobProps.put(String.valueOf(entry.getKey()), String.valueOf(entry.getValue()));
}
}
} | @Ignore @Test(expected = IllegalArgumentException.class)
public void testWithJdbcUrlMissing() throws Exception {
Properties props = new Properties();
props.put(JdbcStorageConfig.DATABASE_TYPE.getPropertyName(), DatabaseType.MYSQL.toString());
props.put(JdbcStorageConfig.QUERY.getPropertyName(), "SELECT col1,col2,col3 FROM sometable");
Map<String, String> jobMap = new HashMap<>();
JdbcStorageConfigManager.copyConfigurationToJob(props, jobMap);
} |
Main(Logger console) {
this.console = console;
this.jc = new JCommander(this);
this.help = new Help(jc, console);
jc.setProgramName(DEFAULT_PROGRAM_NAME);
jc.addCommand("help", help, "-h", "-help", "--help");
jc.addCommand("meta", new ParquetMetadataCommand(console));
jc.addCommand("pages", new ShowPagesCommand(console));
jc.addCommand("dictionary", new ShowDictionaryCommand(console));
jc.addCommand("check-stats", new CheckParquet251Command(console));
jc.addCommand("schema", new SchemaCommand(console));
jc.addCommand("csv-schema", new CSVSchemaCommand(console));
jc.addCommand("convert-csv", new ConvertCSVCommand(console));
jc.addCommand("convert", new ConvertCommand(console));
jc.addCommand("to-avro", new ToAvroCommand(console));
jc.addCommand("cat", new CatCommand(console, 0));
jc.addCommand("head", new CatCommand(console, 10));
jc.addCommand("column-index", new ShowColumnIndexCommand(console));
jc.addCommand("column-size", new ColumnSizeCommand(console));
jc.addCommand("prune", new PruneColumnsCommand(console));
jc.addCommand("trans-compression", new TransCompressionCommand(console));
jc.addCommand("masking", new ColumnMaskingCommand(console));
jc.addCommand("footer", new ShowFooterCommand(console));
jc.addCommand("bloom-filter", new ShowBloomFilterCommand(console));
jc.addCommand("scan", new ScanCommand(console));
jc.addCommand("rewrite", new RewriteCommand(console));
} | @Test
public void mainTest() throws Exception {
ToolRunner.run(new Configuration(), new Main(LoggerFactory.getLogger(MainTest.class)), new String[] {});
Assert.assertTrue("we simply verify there are no errors here", true);
} |
@Override public boolean remove(long key1, long key2) {
return super.remove0(key1, key2);
} | @Test
public void testSize() {
final long key1 = randomKey();
final long key2 = randomKey();
insert(key1, key2);
assertEquals(1, hsa.size());
assertTrue(hsa.remove(key1, key2));
assertEquals(0, hsa.size());
} |
@Override
public Optional<ShardingConditionValue> generate(final BinaryOperationExpression predicate, final Column column, final List<Object> params, final TimestampServiceRule timestampServiceRule) {
String operator = predicate.getOperator().toUpperCase();
if (!isSupportedOperator(operator)) {
return Optional.empty();
}
ExpressionSegment valueExpression = predicate.getLeft() instanceof ColumnSegment ? predicate.getRight() : predicate.getLeft();
ConditionValue conditionValue = new ConditionValue(valueExpression, params);
if (conditionValue.isNull()) {
return generate(null, column, operator, conditionValue.getParameterMarkerIndex().orElse(-1));
}
Optional<Comparable<?>> value = conditionValue.getValue();
if (value.isPresent()) {
return generate(value.get(), column, operator, conditionValue.getParameterMarkerIndex().orElse(-1));
}
if (ExpressionConditionUtils.isNowExpression(valueExpression)) {
return generate(timestampServiceRule.getTimestamp(), column, operator, -1);
}
return Optional.empty();
} | @SuppressWarnings("unchecked")
@Test
void assertGenerateConditionValueWithNowExpression() {
BinaryOperationExpression rightValue = new BinaryOperationExpression(0, 0, mock(ColumnSegment.class), new LiteralExpressionSegment(0, 0, "now()"), "=", null);
Optional<ShardingConditionValue> shardingConditionValue = generator.generate(rightValue, column, new LinkedList<>(), mock(TimestampServiceRule.class));
assertTrue(shardingConditionValue.isPresent());
assertFalse(((ListShardingConditionValue<Integer>) shardingConditionValue.get()).getValues().isEmpty());
assertTrue(shardingConditionValue.get().getParameterMarkerIndexes().isEmpty());
} |
static String getLocationFrom( HttpPost method ) {
Header locationHeader = method.getFirstHeader( "Location" );
return locationHeader.getValue();
} | @Test
public void getLocationFrom() {
HttpPost postMethod = mock( HttpPost.class );
Header locationHeader = new BasicHeader( LOCATION_HEADER, TEST_URL );
doReturn( locationHeader ).when( postMethod ).getFirstHeader( LOCATION_HEADER );
assertEquals( TEST_URL, WebService.getLocationFrom( postMethod ) );
} |
@Override
public <UK, UV> MapState<UK, UV> getMapState(MapStateDescriptor<UK, UV> stateProperties) {
KeyedStateStore keyedStateStore = checkPreconditionsAndGetKeyedStateStore(stateProperties);
stateProperties.initializeSerializerUnlessSet(this::createSerializer);
return keyedStateStore.getMapState(stateProperties);
} | @Test
void testMapStateInstantiation() throws Exception {
final ExecutionConfig config = new ExecutionConfig();
config.getSerializerConfig().registerKryoType(Path.class);
final AtomicReference<Object> descriptorCapture = new AtomicReference<>();
StreamingRuntimeContext context = createRuntimeContext(descriptorCapture, config);
MapStateDescriptor<String, TaskInfo> descr =
new MapStateDescriptor<>("name", String.class, TaskInfo.class);
context.getMapState(descr);
MapStateDescriptor<?, ?> descrIntercepted =
(MapStateDescriptor<?, ?>) descriptorCapture.get();
TypeSerializer<?> valueSerializer = descrIntercepted.getValueSerializer();
// check that the Path class is really registered, i.e., the execution config was applied
assertThat(valueSerializer).isInstanceOf(KryoSerializer.class);
assertThat(
((KryoSerializer<?>) valueSerializer)
.getKryo()
.getRegistration(Path.class)
.getId())
.isPositive();
} |
public static Optional<? extends Schema> getSchema(io.swagger.v3.oas.annotations.media.Content annotationContent, Components components, JsonView jsonViewAnnotation) {
return getSchema(annotationContent, components, jsonViewAnnotation, false);
} | @Test(dataProvider = "expectedSchemaFromTypeAndFormat")
public void getSchema(String methodName, Map<String, Object> expected) throws NoSuchMethodException {
final Method method = getClass().getDeclaredMethod(methodName);
Content annotationContent = method.getAnnotation(ApiResponse.class).content()[0];
Optional<? extends Schema> schema = AnnotationsUtils.getSchema(annotationContent, new Components(), null, false);
Assert.assertTrue(schema.isPresent());
Assert.assertEquals(schema.get().getType(), expected.get("type"));
Assert.assertEquals(schema.get().getFormat(), expected.get("format"));
Assert.assertEquals(schema.get().get$ref(), expected.get("$ref"));
} |
@VisibleForTesting
public DU(File path, long interval, long jitter, long initialUsed)
throws IOException {
super(path, interval, jitter, initialUsed);
this.duShell = new DUShell();
} | @Test
public void testDU() throws IOException, InterruptedException {
final int writtenSize = 32*1024; // writing 32K
// Allow for extra 4K on-disk slack for local file systems
// that may store additional file metadata (eg ext attrs).
final int slack = 4*1024;
File file = new File(DU_DIR, "data");
createFile(file, writtenSize);
Thread.sleep(5000); // let the metadata updater catch up
DU du = new DU(file, 10000, 0, -1);
du.init();
long duSize = du.getUsed();
du.close();
assertTrue("Invalid on-disk size",
duSize >= writtenSize &&
writtenSize <= (duSize + slack));
//test with 0 interval, will not launch thread
du = new DU(file, 0, 1, -1);
du.init();
duSize = du.getUsed();
du.close();
assertTrue("Invalid on-disk size",
duSize >= writtenSize &&
writtenSize <= (duSize + slack));
//test without launching thread
du = new DU(file, 10000, 0, -1);
du.init();
duSize = du.getUsed();
assertTrue("Invalid on-disk size",
duSize >= writtenSize &&
writtenSize <= (duSize + slack));
} |
@PUT
@Path("enable/{name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response enableConfig(@PathParam("name") String configName) {
log.trace(String.format(MESSAGE_CONFIG, UPDATE));
TelemetryConfig config = configService.getConfig(
nullIsIllegal(configName, CONFIG_NAME + NOT_NULL_MESSAGE));
if (config == null) {
log.warn("There is no config found to enable for {}", configName);
return Response.notModified().build();
} else {
TelemetryConfig updatedConfig = config.updateStatus(ENABLED);
configService.updateTelemetryConfig(updatedConfig);
return Response.ok().build();
}
} | @Test
public void testEnableConfig() {
expect(mockConfigAdminService.getConfig(anyString()))
.andReturn(telemetryConfig).once();
mockConfigAdminService.updateTelemetryConfig(telemetryConfig);
replay(mockConfigAdminService);
final WebTarget wt = target();
Response response = wt.path(PATH + "/enable/test1")
.request(MediaType.APPLICATION_JSON_TYPE)
.put(Entity.json(""));
final int status = response.getStatus();
assertEquals(200, status);
verify(mockConfigAdminService);
} |
public Double getProcessCpuLoad() {
return getMXBeanValueAsDouble("ProcessCpuLoad");
} | @Test
void ifOperatingSystemMXBeanReturnsNaNForProcessCpuLoadOnLaterCalls_CachedValueIsReturned() throws JMException {
when(mBeanServer.getAttribute(objectName, "ProcessCpuLoad")).thenReturn(0.7, Double.NaN, 0.5);
assertThat(jobServerStats.getProcessCpuLoad()).isEqualTo(0.7);
assertThat(jobServerStats.getProcessCpuLoad()).isEqualTo(0.7);
assertThat(jobServerStats.getProcessCpuLoad()).isEqualTo(0.5);
} |
public Command create(
final ConfiguredStatement<? extends Statement> statement,
final KsqlExecutionContext context) {
return create(statement, context.getServiceContext(), context);
} | @Test
public void shouldFailValidationForTerminateUnknownQuery() {
// Given:
configuredStatement = configuredStatement("TERMINATE X", terminateQuery);
when(terminateQuery.getQueryId()).thenReturn(Optional.of(QUERY_ID));
when(executionContext.getPersistentQuery(QUERY_ID)).thenReturn(Optional.empty());
// When:
final Exception e = assertThrows(
KsqlStatementException.class,
() -> commandFactory.create(configuredStatement, executionContext)
);
// Then:
assertThat(e.getMessage(), containsString("Unknown queryId"));
} |
public ConfigTransformerResult transform(Map<String, String> configs) {
Map<String, Map<String, Set<String>>> keysByProvider = new HashMap<>();
Map<String, Map<String, Map<String, String>>> lookupsByProvider = new HashMap<>();
// Collect the variables from the given configs that need transformation
for (Map.Entry<String, String> config : configs.entrySet()) {
if (config.getValue() != null) {
List<ConfigVariable> configVars = getVars(config.getValue(), DEFAULT_PATTERN);
for (ConfigVariable configVar : configVars) {
Map<String, Set<String>> keysByPath = keysByProvider.computeIfAbsent(configVar.providerName, k -> new HashMap<>());
Set<String> keys = keysByPath.computeIfAbsent(configVar.path, k -> new HashSet<>());
keys.add(configVar.variable);
}
}
}
// Retrieve requested variables from the ConfigProviders
Map<String, Long> ttls = new HashMap<>();
for (Map.Entry<String, Map<String, Set<String>>> entry : keysByProvider.entrySet()) {
String providerName = entry.getKey();
ConfigProvider provider = configProviders.get(providerName);
Map<String, Set<String>> keysByPath = entry.getValue();
if (provider != null && keysByPath != null) {
for (Map.Entry<String, Set<String>> pathWithKeys : keysByPath.entrySet()) {
String path = pathWithKeys.getKey();
Set<String> keys = new HashSet<>(pathWithKeys.getValue());
ConfigData configData = provider.get(path, keys);
Map<String, String> data = configData.data();
Long ttl = configData.ttl();
if (ttl != null && ttl >= 0) {
ttls.put(path, ttl);
}
Map<String, Map<String, String>> keyValuesByPath =
lookupsByProvider.computeIfAbsent(providerName, k -> new HashMap<>());
keyValuesByPath.put(path, data);
}
}
}
// Perform the transformations by performing variable replacements
Map<String, String> data = new HashMap<>(configs);
for (Map.Entry<String, String> config : configs.entrySet()) {
data.put(config.getKey(), replace(lookupsByProvider, config.getValue(), DEFAULT_PATTERN));
}
return new ConfigTransformerResult(data, ttls);
} | @Test
public void testNoReplacement() {
ConfigTransformerResult result = configTransformer.transform(Collections.singletonMap(MY_KEY, "${test:testPath:missingKey}"));
Map<String, String> data = result.data();
assertEquals("${test:testPath:missingKey}", data.get(MY_KEY));
} |
@Override
public int configInfoBetaCount() {
ConfigInfoBetaMapper configInfoBetaMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO_BETA);
String sql = configInfoBetaMapper.count(null);
Integer result = jt.queryForObject(sql, Integer.class);
return result.intValue();
} | @Test
void testConfigInfoBetaCount() {
when(jdbcTemplate.queryForObject(anyString(), eq(Integer.class))).thenReturn(101);
int returnCount = externalConfigInfoBetaPersistService.configInfoBetaCount();
assertEquals(101, returnCount);
} |
@Override
public WindowStoreIterator<V> backwardFetch(final K key,
final Instant timeFrom,
final Instant timeTo) throws IllegalArgumentException {
Objects.requireNonNull(key, "key can't be null");
final List<ReadOnlyWindowStore<K, V>> stores = provider.stores(storeName, windowStoreType);
for (final ReadOnlyWindowStore<K, V> windowStore : stores) {
try {
final WindowStoreIterator<V> result = windowStore.backwardFetch(key, timeFrom, timeTo);
if (!result.hasNext()) {
result.close();
} else {
return result;
}
} catch (final InvalidStateStoreException e) {
throw new InvalidStateStoreException(
"State store is not available anymore and may have been migrated to another instance; " +
"please re-discover its location from the state metadata.");
}
}
return KeyValueIterators.emptyWindowStoreIterator();
} | @Test
public void shouldThrowInvalidStateStoreExceptionIfBackwardFetchThrows() {
underlyingWindowStore.setOpen(false);
final CompositeReadOnlyWindowStore<Object, Object> store =
new CompositeReadOnlyWindowStore<>(
new WrappingStoreProvider(singletonList(stubProviderOne), StoreQueryParameters.fromNameAndType("window-store", QueryableStoreTypes.windowStore())),
QueryableStoreTypes.windowStore(),
"window-store"
);
try {
store.backwardFetch("key", ofEpochMilli(1), ofEpochMilli(10));
fail("InvalidStateStoreException was expected");
} catch (final InvalidStateStoreException e) {
assertEquals("State store is not available anymore and may have been migrated to another instance; " +
"please re-discover its location from the state metadata.", e.getMessage());
}
} |
@Description("bitwise XOR in 2's complement arithmetic")
@ScalarFunction
@SqlType(StandardTypes.BIGINT)
public static long bitwiseXor(@SqlType(StandardTypes.BIGINT) long left, @SqlType(StandardTypes.BIGINT) long right)
{
return left ^ right;
} | @Test
public void testBitwiseXor()
{
assertFunction("bitwise_xor(0, -1)", BIGINT, -1L);
assertFunction("bitwise_xor(3, 8)", BIGINT, 3L ^ 8L);
assertFunction("bitwise_xor(-4, 12)", BIGINT, -4L ^ 12L);
assertFunction("bitwise_xor(60, 21)", BIGINT, 60L ^ 21L);
} |
@Override
public void open() throws InterpreterException {
try {
SparkConf conf = new SparkConf();
for (Map.Entry<Object, Object> entry : getProperties().entrySet()) {
if (!StringUtils.isBlank(entry.getValue().toString())) {
conf.set(entry.getKey().toString(), entry.getValue().toString());
}
// zeppelin.spark.useHiveContext & zeppelin.spark.concurrentSQL are legacy zeppelin
// properties, convert them to spark properties here.
if (entry.getKey().toString().equals("zeppelin.spark.useHiveContext")) {
conf.set("spark.useHiveContext", entry.getValue().toString());
}
if (entry.getKey().toString().equals("zeppelin.spark.concurrentSQL")
&& entry.getValue().toString().equals("true")) {
conf.set(SparkStringConstants.SCHEDULER_MODE_PROP_NAME, "FAIR");
}
}
// use local mode for embedded spark mode when spark.master is not found
if (!conf.contains(SparkStringConstants.MASTER_PROP_NAME)) {
if (conf.contains("master")) {
conf.set(SparkStringConstants.MASTER_PROP_NAME, conf.get("master"));
} else {
String masterEnv = System.getenv(SparkStringConstants.MASTER_ENV_NAME);
conf.set(SparkStringConstants.MASTER_PROP_NAME,
masterEnv == null ? SparkStringConstants.DEFAULT_MASTER_VALUE : masterEnv);
}
}
this.innerInterpreter = loadSparkScalaInterpreter(conf);
this.innerInterpreter.open();
sc = this.innerInterpreter.getSparkContext();
jsc = JavaSparkContext.fromSparkContext(sc);
sparkVersion = SparkVersion.fromVersionString(sc.version());
if (enableSupportedVersionCheck && sparkVersion.isUnsupportedVersion()) {
throw new Exception("This is not officially supported spark version: " + sparkVersion
+ "\nYou can set zeppelin.spark.enableSupportedVersionCheck to false if you really" +
" want to try this version of spark.");
}
sqlContext = this.innerInterpreter.getSqlContext();
sparkSession = this.innerInterpreter.getSparkSession();
SESSION_NUM.incrementAndGet();
} catch (Exception e) {
LOGGER.error("Fail to open SparkInterpreter", e);
throw new InterpreterException("Fail to open SparkInterpreter", e);
}
} | @Test
void testDisableReplOutputForParagraph() throws InterpreterException {
Properties properties = new Properties();
properties.setProperty("spark.master", "local");
properties.setProperty("spark.app.name", "test");
properties.setProperty("zeppelin.spark.maxResult", "100");
properties.setProperty("zeppelin.spark.printREPLOutput", "true");
// disable color output for easy testing
properties.setProperty("zeppelin.spark.scala.color", "false");
properties.setProperty("zeppelin.spark.deprecatedMsg.show", "false");
InterpreterContext.set(getInterpreterContext());
interpreter = new SparkInterpreter(properties);
interpreter.setInterpreterGroup(mock(InterpreterGroup.class));
interpreter.open();
InterpreterResult result = interpreter.interpret("val a=\"hello world\"", getInterpreterContext());
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
// Use contains instead of equals, because there's behavior different between different scala versions
assertTrue(output.contains("a: String = hello world\n"), output);
result = interpreter.interpret("print(a)", getInterpreterContext());
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
// output from print statement will still be displayed
assertEquals("hello world", output);
// disable REPL output
InterpreterContext context = getInterpreterContext();
context.getLocalProperties().put("printREPLOutput", "false");
result = interpreter.interpret("print(a)", context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
// output from print statement will disappear
assertEquals("", output);
// REPL output get back if we don't set printREPLOutput in paragraph local properties
result = interpreter.interpret("val a=\"hello world\"", getInterpreterContext());
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
assertTrue(output.contains("a: String = hello world\n"), output);
result = interpreter.interpret("print(a)", getInterpreterContext());
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
// output from print statement will still be displayed
assertEquals("hello world", output);
} |
@Override
public List<DataSourceConfigDO> getDataSourceConfigList() {
List<DataSourceConfigDO> result = dataSourceConfigMapper.selectList();
// 补充 master 数据源
result.add(0, buildMasterDataSourceConfig());
return result;
} | @Test
public void testGetDataSourceConfigList() {
// mock 数据
DataSourceConfigDO dbDataSourceConfig = randomPojo(DataSourceConfigDO.class);
dataSourceConfigMapper.insert(dbDataSourceConfig);// @Sql: 先插入出一条存在的数据
// 准备参数
// 调用
List<DataSourceConfigDO> dataSourceConfigList = dataSourceConfigService.getDataSourceConfigList();
// 断言
assertEquals(2, dataSourceConfigList.size());
// master
assertEquals(0L, dataSourceConfigList.get(0).getId());
assertEquals("primary", dataSourceConfigList.get(0).getName());
assertEquals("http://localhost:3306", dataSourceConfigList.get(0).getUrl());
assertEquals("yunai", dataSourceConfigList.get(0).getUsername());
assertEquals("tudou", dataSourceConfigList.get(0).getPassword());
// normal
assertPojoEquals(dbDataSourceConfig, dataSourceConfigList.get(1));
} |
public OkHttpClientBuilder setConnectTimeoutMs(long l) {
if (l < 0) {
throw new IllegalArgumentException("Connect timeout must be positive. Got " + l);
}
this.connectTimeoutMs = l;
return this;
} | @Test
public void build_throws_IAE_if_connect_timeout_is_negative() {
assertThatThrownBy(() -> underTest.setConnectTimeoutMs(-10))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Connect timeout must be positive. Got -10");
} |
@Override
public String pluginNamed() {
return PluginEnum.MODIFY_RESPONSE.getName();
} | @Test
public void pluginNamedTest() {
assertEquals(modifyResponsePluginDataHandler.pluginNamed(), PluginEnum.MODIFY_RESPONSE.getName());
} |
@Deprecated
static void updateBlockHandlerFor(Class<?> clazz, String name, Method method) {
if (clazz == null || StringUtil.isBlank(name)) {
throw new IllegalArgumentException("Bad argument");
}
BLOCK_HANDLER_MAP.put(getKey(clazz, name), MethodWrapper.wrap(method));
} | @Test(expected = IllegalArgumentException.class)
public void testUpdateBlockHandlerBadArgument() {
ResourceMetadataRegistry.updateBlockHandlerFor(null, "sxs", new Class[0], String.class.getMethods()[0]);
} |
@Override
public void upgrade() {
if (hasBeenRunSuccessfully()) {
LOG.debug("Migration already completed.");
return;
}
final Set<String> dashboardIdToViewId = new HashSet<>();
final Consumer<String> recordMigratedDashboardIds = dashboardIdToViewId::add;
final Map<String, Set<String>> widgetIdMigrationMapping = new HashMap<>();
final Consumer<Map<String, Set<String>>> recordMigratedWidgetIds = widgetIdMigrationMapping::putAll;
final Map<View, Search> newViews = this.dashboardsService.streamAll()
.sorted(Comparator.comparing(Dashboard::id))
.map(dashboard -> migrateDashboard(dashboard, recordMigratedDashboardIds, recordMigratedWidgetIds))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
writeViews(newViews);
final MigrationCompleted migrationCompleted = MigrationCompleted.create(dashboardIdToViewId, widgetIdMigrationMapping);
writeMigrationCompleted(migrationCompleted);
} | @Test
@MongoDBFixtures("dashboard_with_superfluous_widget_attributes.json")
public void migratesADashboardWithSuperfluousWidgetAttributes() {
this.migration.upgrade();
final MigrationCompleted migrationCompleted = captureMigrationCompleted();
assertThat(migrationCompleted.migratedDashboardIds()).containsExactly("5ddf8ed5b2d44b2e04472992");
assertThat(migrationCompleted.widgetMigrationIds()).hasSize(19);
verify(viewService, times(1)).save(any());
verify(searchService, times(1)).save(any());
} |
@Override
public KTable<K, VOut> aggregate(final Initializer<VOut> initializer,
final Materialized<K, VOut, KeyValueStore<Bytes, byte[]>> materialized) {
return aggregate(initializer, NamedInternal.empty(), materialized);
} | @Test
public void shouldNotHaveNullMaterializedOnAggregateWithNames() {
assertThrows(NullPointerException.class, () -> cogroupedStream.aggregate(STRING_INITIALIZER, Named.as("name"), null));
} |
public abstract Optional<Long> maxPartitionRefreshTs(); | @Test
public void testMaxPartitionRefreshTs() {
Map<String, PartitionInfo> fakePartitionInfo = new HashMap<>();
Partition p1 = new Partition("p1", 100);
Partition p2 = new Partition("p2", 200);
fakePartitionInfo.put("p1", p1);
fakePartitionInfo.put("p2", p2);
new MockUp<DefaultTraits>() {
@Mock
public Map<String, PartitionInfo> getPartitionNameWithPartitionInfo() {
return fakePartitionInfo;
}
};
Optional<Long> result = new PaimonPartitionTraits().maxPartitionRefreshTs();
Assert.assertTrue(result.isPresent());
Assert.assertEquals(200L, result.get().longValue());
} |
private JobDetails() {
this(null, null, null, null);
// used for deserialization
} | @Test
void testJobDetails() {
JobDetails jobDetails = jobDetails()
.withClassName(TestService.class)
.withMethodName("doWork")
.withJobParameter(5)
.build();
assertThat(jobDetails)
.hasClass(TestService.class)
.hasStaticFieldName(null)
.hasMethodName("doWork")
.hasArgs(5)
.isNotCacheable();
assertThat(jobDetails.getJobParameterTypes()).isEqualTo(new Class[]{Integer.class});
assertThat(jobDetails.getJobParameterValues()).isEqualTo(new Object[]{5});
} |
public static CodecFactory fromHadoopString(String hadoopCodecClass) {
CodecFactory o = null;
try {
String avroCodec = HADOOP_AVRO_NAME_MAP.get(hadoopCodecClass);
if (avroCodec != null) {
o = CodecFactory.fromString(avroCodec);
}
} catch (Exception e) {
throw new AvroRuntimeException("Unrecognized hadoop codec: " + hadoopCodecClass, e);
}
return o;
} | @Test
void hadoopCodecFactoryBZip2() {
CodecFactory hadoopSnappyCodec = HadoopCodecFactory.fromHadoopString("org.apache.hadoop.io.compress.BZip2Codec");
CodecFactory avroSnappyCodec = CodecFactory.fromString("bzip2");
assertEquals(hadoopSnappyCodec.getClass(), avroSnappyCodec.getClass());
} |
public static Object parse(String element) throws PathSegment.PathSegmentSyntaxException
{
Queue<Token> tokens = tokenizeElement(element);
Object result = parseElement(tokens);
if (!tokens.isEmpty())
{
throw new PathSegment.PathSegmentSyntaxException("tokens left over after parsing; first excess token: " + tokens.peek().toErrorString() );
}
return result;
} | @Test(dataProvider = "undecodables")
public void testUndecodable(String undecoable, String expectedErrorMessage)
{
try
{
URIElementParser.parse(undecoable);
Assert.fail();
}
catch (PathSegment.PathSegmentSyntaxException e)
{
Assert.assertEquals(e.getMessage(), expectedErrorMessage);
}
} |
public static List<PortDescription> parseJuniperPorts(HierarchicalConfiguration cfg) {
//This methods ignores some internal ports
List<PortDescription> portDescriptions = new ArrayList<>();
List<HierarchicalConfiguration> subtrees =
cfg.configurationsAt(IF_INFO);
for (HierarchicalConfiguration interfInfo : subtrees) {
List<HierarchicalConfiguration> interfaceTree =
interfInfo.configurationsAt(IF_PHY);
for (HierarchicalConfiguration phyIntf : interfaceTree) {
if (phyIntf == null) {
continue;
}
// parse physical Interface
parsePhysicalInterface(portDescriptions, phyIntf);
}
}
return portDescriptions;
} | @Test
public void testInterfacesParsedFromJunos18() {
HierarchicalConfiguration reply = XmlConfigParser.loadXml(
getClass().getResourceAsStream("/Junos_get-interface-information_response_18.4.xml"));
final Collection<PortDescription> expected = new ArrayList<>();
expected.add(DefaultPortDescription.builder()
.withPortNumber(PortNumber.portNumber(513L)).isRemoved(false)
.type(Port.Type.COPPER).portSpeed(JuniperUtils.DEFAULT_PORT_SPEED)
.annotations(DefaultAnnotations.builder()
.set(JuniperUtils.AK_OPER_STATUS, "up")
.set(AnnotationKeys.PORT_NAME, "jsrv")
.set(JuniperUtils.AK_IF_TYPE, "Ethernet")
.set(AnnotationKeys.PORT_MAC, "2c:6b:f5:03:ff:c0")
.set(JuniperUtils.AK_ADMIN_STATUS, "up")
.build()).build());
expected.add(DefaultPortDescription.builder()
.withPortNumber(PortNumber.portNumber(514L))
.isRemoved(false).type(Port.Type.COPPER)
.portSpeed(JuniperUtils.DEFAULT_PORT_SPEED)
.annotations(
DefaultAnnotations.builder()
.set(JuniperUtils.AK_ENCAPSULATION, "unknown")
.set("portName", "jsrv.1")
.set(JuniperUtils.AK_PHYSICAL_PORT_NAME, "jsrv")
.set("inet", "128.0.0.127")
.set("ip", "128.0.0.127")
.build()).build());
assertEquals(expected, JuniperUtils.parseJuniperPorts(reply));
} |
public static Pair<CloudObjectIncrCheckpoint, Option<Dataset<Row>>> filterAndGenerateCheckpointBasedOnSourceLimit(Dataset<Row> sourceData,
long sourceLimit, QueryInfo queryInfo,
CloudObjectIncrCheckpoint cloudObjectIncrCheckpoint) {
if (sourceData.isEmpty()) {
// There is no file matching the prefix.
CloudObjectIncrCheckpoint updatedCheckpoint =
queryInfo.getEndInstant().equals(cloudObjectIncrCheckpoint.getCommit())
? cloudObjectIncrCheckpoint
: new CloudObjectIncrCheckpoint(queryInfo.getEndInstant(), null);
return Pair.of(updatedCheckpoint, Option.empty());
}
// Let's persist the dataset to avoid triggering the dag repeatedly
sourceData.persist(StorageLevel.MEMORY_AND_DISK());
// Set ordering in query to enable batching
Dataset<Row> orderedDf = QueryRunner.applyOrdering(sourceData, queryInfo.getOrderByColumns());
Option<String> lastCheckpoint = Option.of(cloudObjectIncrCheckpoint.getCommit());
Option<String> lastCheckpointKey = Option.ofNullable(cloudObjectIncrCheckpoint.getKey());
Option<String> concatenatedKey = lastCheckpoint.flatMap(checkpoint -> lastCheckpointKey.map(key -> checkpoint + key));
// Filter until last checkpoint key
if (concatenatedKey.isPresent()) {
orderedDf = orderedDf.withColumn("commit_key",
functions.concat(functions.col(queryInfo.getOrderColumn()), functions.col(queryInfo.getKeyColumn())));
// Apply incremental filter
orderedDf = orderedDf.filter(functions.col("commit_key").gt(concatenatedKey.get())).drop("commit_key");
// If there are no more files where commit_key is greater than lastCheckpointCommit#lastCheckpointKey
if (orderedDf.isEmpty()) {
LOG.info("Empty ordered source, returning endpoint:" + queryInfo.getEndInstant());
sourceData.unpersist();
// queryInfo.getEndInstant() represents source table's last completed instant
// If current checkpoint is c1#abc and queryInfo.getEndInstant() is c1, return c1#abc.
// If current checkpoint is c1#abc and queryInfo.getEndInstant() is c2, return c2.
CloudObjectIncrCheckpoint updatedCheckpoint =
queryInfo.getEndInstant().equals(cloudObjectIncrCheckpoint.getCommit())
? cloudObjectIncrCheckpoint
: new CloudObjectIncrCheckpoint(queryInfo.getEndInstant(), null);
return Pair.of(updatedCheckpoint, Option.empty());
}
}
// Limit based on sourceLimit
WindowSpec windowSpec = Window.orderBy(col(queryInfo.getOrderColumn()), col(queryInfo.getKeyColumn()));
// Add the 'cumulativeSize' column with running sum of 'limitColumn'
Dataset<Row> aggregatedData = orderedDf.withColumn(CUMULATIVE_COLUMN_NAME,
sum(col(queryInfo.getLimitColumn())).over(windowSpec));
Dataset<Row> collectedRows = aggregatedData.filter(col(CUMULATIVE_COLUMN_NAME).leq(sourceLimit));
Row row = null;
if (collectedRows.isEmpty()) {
// If the first element itself exceeds limits then return first element
LOG.info("First object exceeding source limit: " + sourceLimit + " bytes");
row = aggregatedData.select(queryInfo.getOrderColumn(), queryInfo.getKeyColumn(), CUMULATIVE_COLUMN_NAME).first();
collectedRows = aggregatedData.limit(1);
} else {
// Get the last row and form composite key
row = collectedRows.select(queryInfo.getOrderColumn(), queryInfo.getKeyColumn(), CUMULATIVE_COLUMN_NAME).orderBy(
col(queryInfo.getOrderColumn()).desc(), col(queryInfo.getKeyColumn()).desc()).first();
}
LOG.info("Processed batch size: " + row.get(row.fieldIndex(CUMULATIVE_COLUMN_NAME)) + " bytes");
sourceData.unpersist();
return Pair.of(new CloudObjectIncrCheckpoint(row.getString(0), row.getString(1)), Option.of(collectedRows));
} | @Test
void testSingleObjectExceedingSourceLimit() {
List<Triple<String, Long, String>> filePathSizeAndCommitTime = new ArrayList<>();
// Add file paths and sizes to the list
filePathSizeAndCommitTime.add(Triple.of("path/to/file1.json", 100L, "commit1"));
filePathSizeAndCommitTime.add(Triple.of("path/to/file3.json", 200L, "commit1"));
filePathSizeAndCommitTime.add(Triple.of("path/to/file2.json", 150L, "commit1"));
filePathSizeAndCommitTime.add(Triple.of("path/to/file4.json", 50L, "commit2"));
filePathSizeAndCommitTime.add(Triple.of("path/to/file5.json", 150L, "commit2"));
Dataset<Row> inputDs = generateDataset(filePathSizeAndCommitTime);
QueryInfo queryInfo = new QueryInfo(
QUERY_TYPE_INCREMENTAL_OPT_VAL(), "commit1", "commit1",
"commit2", "_hoodie_commit_time",
"s3.object.key", "s3.object.size");
Pair<CloudObjectIncrCheckpoint, Option<Dataset<Row>>> result = IncrSourceHelper.filterAndGenerateCheckpointBasedOnSourceLimit(
inputDs, 50L, queryInfo, new CloudObjectIncrCheckpoint("commit1", null));
Row row = result.getRight().get().select("cumulativeSize").collectAsList().get((int) result.getRight().get().count() - 1);
assertEquals("commit1#path/to/file1.json", result.getKey().toString());
List<Row> rows = result.getRight().get().collectAsList();
assertEquals(1, rows.size());
assertEquals("[[commit1,[[bucket-1],[path/to/file1.json,100]],100]]", rows.toString());
assertEquals(100L, row.get(0));
} |
@VisibleForTesting
void collectPackages() throws UploaderException {
parseLists();
String[] list = StringUtils.split(input, File.pathSeparatorChar);
for (String item : list) {
LOG.info("Original source " + item);
String expanded = expandEnvironmentVariables(item, System.getenv());
LOG.info("Expanded source " + expanded);
if (expanded.endsWith("*")) {
File path = new File(expanded.substring(0, expanded.length() - 1));
if (path.isDirectory()) {
File[] files = path.listFiles();
if (files != null) {
for (File jar : files) {
if (!jar.isDirectory()) {
addJar(jar);
} else {
LOG.info("Ignored " + jar + " because it is a directory");
}
}
} else {
LOG.warn("Could not list directory " + path);
}
} else {
LOG.warn("Ignored " + expanded + ". It is not a directory");
}
} else if (expanded.endsWith(".jar")) {
File jarFile = new File(expanded);
addJar(jarFile);
} else if (!expanded.isEmpty()) {
LOG.warn("Ignored " + expanded + " only jars are supported");
}
}
} | @Test
void testCollectPackages() throws IOException, UploaderException {
File parent = new File(testDir);
try {
parent.deleteOnExit();
assertTrue(parent.mkdirs(), "Directory creation failed");
File dirA = new File(parent, "A");
assertTrue(dirA.mkdirs());
File dirB = new File(parent, "B");
assertTrue(dirB.mkdirs());
File jarA = new File(dirA, "a.jar");
assertTrue(jarA.createNewFile());
File jarB = new File(dirA, "b.jar");
assertTrue(jarB.createNewFile());
File jarC = new File(dirA, "c.jar");
assertTrue(jarC.createNewFile());
File txtD = new File(dirA, "d.txt");
assertTrue(txtD.createNewFile());
File jarD = new File(dirB, "d.jar");
assertTrue(jarD.createNewFile());
File txtE = new File(dirB, "e.txt");
assertTrue(txtE.createNewFile());
FrameworkUploader uploader = new FrameworkUploader();
uploader.whitelist = ".*a\\.jar,.*b\\.jar,.*d\\.jar";
uploader.blacklist = ".*b\\.jar";
uploader.input = dirA.getAbsolutePath() + File.separatorChar + "*" +
File.pathSeparatorChar +
dirB.getAbsolutePath() + File.separatorChar + "*";
uploader.collectPackages();
assertEquals(3,
uploader.whitelistedFiles.size(),
"Whitelist count error");
assertEquals(1,
uploader.blacklistedFiles.size(),
"Blacklist count error");
assertTrue(uploader.filteredInputFiles.contains(jarA.getAbsolutePath()),
"File not collected");
assertFalse(uploader.filteredInputFiles.contains(jarB.getAbsolutePath()),
"File collected");
assertTrue(uploader.filteredInputFiles.contains(jarD.getAbsolutePath()),
"File not collected");
assertEquals(2,
uploader.filteredInputFiles.size(),
"Too many whitelists");
} finally {
FileUtils.deleteDirectory(parent);
}
} |
@Override
public String convertTo(Duration value) {
// Durations will always be converted to ISO8601 formatted Strings
// There is no meaningful way to convert them back to a simple jadconfig 'number+unit' format.
return value.toString();
} | @Test
public void convertTo() {
assertThat(converter.convertTo(Duration.ofMillis(10))).isEqualTo("PT0.01S");
assertThat(converter.convertTo(Duration.ofSeconds(10))).isEqualTo("PT10S");
assertThat(converter.convertTo(Duration.ofSeconds(70))).isEqualTo("PT1M10S");
} |
public B export(Boolean export) {
this.export = export;
return getThis();
} | @Test
void export() {
ServiceBuilder builder = new ServiceBuilder();
builder.export(true);
Assertions.assertTrue(builder.build().getExport());
builder.export(false);
Assertions.assertFalse(builder.build().getExport());
} |
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
SimpleSubscriptionData that = (SimpleSubscriptionData) o;
return Objects.equals(topic, that.topic) && Objects.equals(expressionType, that.expressionType) && Objects.equals(expression, that.expression);
} | @Test
public void testSetNotEqual() {
String topic = "test-topic";
String expressionType = "TAG";
String expression1 = "test-expression-1";
String expression2 = "test-expression-2";
Set<SimpleSubscriptionData> set1 = Sets.newHashSet(new SimpleSubscriptionData(topic, expressionType, expression1, 1));
Set<SimpleSubscriptionData> set2 = Sets.newHashSet(new SimpleSubscriptionData(topic, expressionType, expression2, 1));
assertThat(set1.equals(set2)).isFalse();
} |
static Optional<Model> fromXml(DeployState ds, Element parent, String name, Set<String> requiredTags) {
return XmlHelper.getOptionalChild(parent, name).map(e -> fromXml(ds, e, requiredTags));
} | @Test
void invalid_url(){
var xml = """
<component id="bert-embedder" type="bert-embedder">
<transformer-model url="models/e5-base-v2.onnx" />
<tokenizer-vocab path="models/vocab.txt"/>
</component>
""";
var state = new DeployState.Builder().build();
var element = XML.getDocument(xml).getDocumentElement();
var exception = assertThrows(IllegalArgumentException.class,
() -> Model.fromXml(state, element, "transformer-model", Set.of()));
org.junit.jupiter.api.Assertions.assertEquals("Invalid url 'models/e5-base-v2.onnx': url has no 'scheme' component", exception.getMessage());
} |
public static <K, V, S extends StateStore> Materialized<K, V, S> as(final DslStoreSuppliers storeSuppliers) {
Objects.requireNonNull(storeSuppliers, "store type can't be null");
return new Materialized<>(storeSuppliers);
} | @Test
public void shouldThrowNullPointerIfStoreTypeIsNull() {
final NullPointerException e = assertThrows(NullPointerException.class,
() -> Materialized.as((Materialized.StoreType) null));
assertEquals(e.getMessage(), "store type can't be null");
} |
@Override
public Optional<ConstraintMetaData> revise(final String tableName, final ConstraintMetaData originalMetaData, final ShardingRule rule) {
for (DataNode each : shardingTable.getActualDataNodes()) {
String referencedTableName = originalMetaData.getReferencedTableName();
Optional<String> logicIndexName = getLogicIndex(originalMetaData.getName(), each.getTableName());
if (logicIndexName.isPresent()) {
return Optional.of(new ConstraintMetaData(
logicIndexName.get(), rule.getAttributes().getAttribute(DataNodeRuleAttribute.class).findLogicTableByActualTable(referencedTableName).orElse(referencedTableName)));
}
}
return Optional.empty();
} | @Test
void assertReviseWhenTableDoesNotMatch() {
assertFalse(reviser.revise("table_name_1", new ConstraintMetaData("test_table_name_2", "referenced_table_name"), shardingRule).isPresent());
} |
public static VerificationMode times(final int count) {
checkArgument(count >= 0, "Times count must not be less than zero");
return new TimesVerification(count);
} | @Test
public void should_verify_expected_request() throws Exception {
final HttpServer server = httpServer(port(), hit);
server.get(by(uri("/foo"))).response("bar");
running(server, () -> assertThat(helper.get(remoteUrl("/foo")), is("bar")));
hit.verify(by(uri("/foo")), times(1));
} |
@Override
public List<CodegenTableDO> getCodegenTableList(Long dataSourceConfigId) {
return codegenTableMapper.selectListByDataSourceConfigId(dataSourceConfigId);
} | @Test
public void testGetCodegenTableList() {
// mock 数据
CodegenTableDO table01 = randomPojo(CodegenTableDO.class,
o -> o.setScene(CodegenSceneEnum.ADMIN.getScene()));
codegenTableMapper.insert(table01);
CodegenTableDO table02 = randomPojo(CodegenTableDO.class,
o -> o.setScene(CodegenSceneEnum.ADMIN.getScene()));
codegenTableMapper.insert(table02);
// 准备参数
Long dataSourceConfigId = table01.getDataSourceConfigId();
// 调用
List<CodegenTableDO> result = codegenService.getCodegenTableList(dataSourceConfigId);
// 断言
assertEquals(1, result.size());
assertPojoEquals(table01, result.get(0));
} |
@Subscribe
public void onChatMessage(ChatMessage chatMessage)
{
if (chatMessage.getType() != ChatMessageType.TRADE
&& chatMessage.getType() != ChatMessageType.GAMEMESSAGE
&& chatMessage.getType() != ChatMessageType.SPAM
&& chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION)
{
return;
}
String message = chatMessage.getMessage();
Matcher matcher = KILLCOUNT_PATTERN.matcher(message);
if (matcher.find())
{
final String boss = matcher.group("boss");
final int kc = Integer.parseInt(matcher.group("kc"));
final String pre = matcher.group("pre");
final String post = matcher.group("post");
if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post))
{
unsetKc(boss);
return;
}
String renamedBoss = KILLCOUNT_RENAMES
.getOrDefault(boss, boss)
// The config service doesn't support keys with colons in them
.replace(":", "");
if (boss != renamedBoss)
{
// Unset old TOB kc
unsetKc(boss);
unsetPb(boss);
unsetKc(boss.replace(":", "."));
unsetPb(boss.replace(":", "."));
// Unset old story mode
unsetKc("Theatre of Blood Story Mode");
unsetPb("Theatre of Blood Story Mode");
}
setKc(renamedBoss, kc);
// We either already have the pb, or need to remember the boss for the upcoming pb
if (lastPb > -1)
{
log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb);
if (renamedBoss.contains("Theatre of Blood"))
{
// TOB team size isn't sent in the kill message, but can be computed from varbits
int tobTeamSize = tobTeamSize();
lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players");
}
else if (renamedBoss.contains("Tombs of Amascut"))
{
// TOA team size isn't sent in the kill message, but can be computed from varbits
int toaTeamSize = toaTeamSize();
lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players");
}
final double pb = getPb(renamedBoss);
// If a raid with a team size, only update the pb if it is lower than the existing pb
// so that the pb is the overall lowest of any team size
if (lastTeamSize == null || pb == 0 || lastPb < pb)
{
log.debug("Setting overall pb (old: {})", pb);
setPb(renamedBoss, lastPb);
}
if (lastTeamSize != null)
{
log.debug("Setting team size pb: {}", lastTeamSize);
setPb(renamedBoss + " " + lastTeamSize, lastPb);
}
lastPb = -1;
lastTeamSize = null;
}
else
{
lastBossKill = renamedBoss;
lastBossTime = client.getTickCount();
}
return;
}
matcher = DUEL_ARENA_WINS_PATTERN.matcher(message);
if (matcher.find())
{
final int oldWins = getKc("Duel Arena Wins");
final int wins = matcher.group(2).equals("one") ? 1 :
Integer.parseInt(matcher.group(2).replace(",", ""));
final String result = matcher.group(1);
int winningStreak = getKc("Duel Arena Win Streak");
int losingStreak = getKc("Duel Arena Lose Streak");
if (result.equals("won") && wins > oldWins)
{
losingStreak = 0;
winningStreak += 1;
}
else if (result.equals("were defeated"))
{
losingStreak += 1;
winningStreak = 0;
}
else
{
log.warn("unrecognized duel streak chat message: {}", message);
}
setKc("Duel Arena Wins", wins);
setKc("Duel Arena Win Streak", winningStreak);
setKc("Duel Arena Lose Streak", losingStreak);
}
matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message);
if (matcher.find())
{
int losses = matcher.group(1).equals("one") ? 1 :
Integer.parseInt(matcher.group(1).replace(",", ""));
setKc("Duel Arena Losses", losses);
}
matcher = KILL_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = NEW_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = HS_PB_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group("floor"));
String floortime = matcher.group("floortime");
String floorpb = matcher.group("floorpb");
String otime = matcher.group("otime");
String opb = matcher.group("opb");
String pb = MoreObjects.firstNonNull(floorpb, floortime);
setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb));
if (otime != null)
{
pb = MoreObjects.firstNonNull(opb, otime);
setPb("Hallowed Sepulchre", timeStringToSeconds(pb));
}
}
matcher = HS_KC_FLOOR_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group(1));
int kc = Integer.parseInt(matcher.group(2).replaceAll(",", ""));
setKc("Hallowed Sepulchre Floor " + floor, kc);
}
matcher = HS_KC_GHC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hallowed Sepulchre", kc);
}
matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hunter Rumours", kc);
}
if (lastBossKill != null && lastBossTime != client.getTickCount())
{
lastBossKill = null;
lastBossTime = -1;
}
matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message);
if (matcher.find())
{
String item = matcher.group(1);
int petId = findPet(item);
if (petId != -1)
{
final List<Integer> petList = new ArrayList<>(getPetList());
if (!petList.contains(petId))
{
log.debug("New pet added: {}/{}", item, petId);
petList.add(petId);
setPetList(petList);
}
}
}
matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1));
setKc("Guardians of the Rift", kc);
}
} | @Test
public void testBarrows()
{
ChatMessage chatMessageEvent = new ChatMessage(null, GAMEMESSAGE, "", "Your Barrows chest count is: <col=ff0000>277</col>.", null, 0);
chatCommandsPlugin.onChatMessage(chatMessageEvent);
verify(configManager).setRSProfileConfiguration("killcount", "barrows chests", 277);
} |
static int writeHeaderBuffer(final MapWriterConfiguration configuration,
final TileBasedDataProcessor dataProcessor, final ByteBuffer containerHeaderBuffer) {
LOGGER.fine("writing header");
LOGGER.fine("Bounding box for file: " + dataProcessor.getBoundingBox().toString());
// write file header
// MAGIC BYTE
byte[] magicBytes = MAGIC_BYTE.getBytes(UTF8_CHARSET);
containerHeaderBuffer.put(magicBytes);
// HEADER SIZE: Write dummy pattern as header size. It will be replaced
// later in time
int headerSizePosition = containerHeaderBuffer.position();
containerHeaderBuffer.putInt(DUMMY_INT);
// FILE VERSION
containerHeaderBuffer.putInt(configuration.getFileSpecificationVersion());
// FILE SIZE: Write dummy pattern as file size. It will be replaced
// later in time
containerHeaderBuffer.putLong(DUMMY_LONG);
// DATE OF CREATION
containerHeaderBuffer.putLong(System.currentTimeMillis());
// BOUNDING BOX
containerHeaderBuffer.putInt(LatLongUtils.degreesToMicrodegrees(dataProcessor.getBoundingBox().minLatitude));
containerHeaderBuffer.putInt(LatLongUtils.degreesToMicrodegrees(dataProcessor.getBoundingBox().minLongitude));
containerHeaderBuffer.putInt(LatLongUtils.degreesToMicrodegrees(dataProcessor.getBoundingBox().maxLatitude));
containerHeaderBuffer.putInt(LatLongUtils.degreesToMicrodegrees(dataProcessor.getBoundingBox().maxLongitude));
// TILE SIZE
containerHeaderBuffer.putShort((short) Constants.DEFAULT_TILE_SIZE);
// PROJECTION
writeUTF8(PROJECTION, containerHeaderBuffer);
// check whether zoom start is a valid zoom level
// FLAGS
containerHeaderBuffer.put(infoByteOptmizationParams(configuration));
// MAP START POSITION
LatLong mapStartPosition = configuration.getMapStartPosition();
if (mapStartPosition != null) {
containerHeaderBuffer.putInt(LatLongUtils.degreesToMicrodegrees(mapStartPosition.latitude));
containerHeaderBuffer.putInt(LatLongUtils.degreesToMicrodegrees(mapStartPosition.longitude));
}
// MAP START ZOOM
if (configuration.hasMapStartZoomLevel()) {
containerHeaderBuffer.put((byte) configuration.getMapStartZoomLevel());
}
// PREFERRED LANGUAGE
if (configuration.getPreferredLanguages() != null && !configuration.getPreferredLanguages().isEmpty()) {
String langStr = "";
for (String preferredLanguage : configuration.getPreferredLanguages()) {
langStr += (langStr.length() > 0 ? "," : "") + preferredLanguage;
}
writeUTF8(langStr, containerHeaderBuffer);
}
// COMMENT
if (configuration.getComment() != null) {
writeUTF8(configuration.getComment(), containerHeaderBuffer);
}
// CREATED WITH
writeUTF8(configuration.getWriterVersion(), containerHeaderBuffer);
OSMTagMapping mapping = configuration.getTagMapping();
// AMOUNT POI TAGS
containerHeaderBuffer.putShort((short) mapping.getOptimizedPoiIds().size());
// POI TAGS
// retrieves tag ids in order of frequency, most frequent come first
for (short tagId : mapping.getOptimizedPoiIds().keySet()) {
OSMTag tag = mapping.getPoiTag(tagId);
writeUTF8(tag.tagKey(), containerHeaderBuffer);
}
// AMOUNT OF WAY TAGS
containerHeaderBuffer.putShort((short) mapping.getOptimizedWayIds().size());
// WAY TAGS
for (short tagId : mapping.getOptimizedWayIds().keySet()) {
OSMTag tag = mapping.getWayTag(tagId);
writeUTF8(tag.tagKey(), containerHeaderBuffer);
}
// AMOUNT OF ZOOM INTERVALS
int numberOfZoomIntervals = dataProcessor.getZoomIntervalConfiguration().getNumberOfZoomIntervals();
containerHeaderBuffer.put((byte) numberOfZoomIntervals);
// SET MARK OF THIS BUFFER AT POSITION FOR WRITING ZOOM INTERVAL CONFIG
containerHeaderBuffer.mark();
// ZOOM INTERVAL CONFIGURATION: SKIP COMPUTED AMOUNT OF BYTES
containerHeaderBuffer.position(containerHeaderBuffer.position() + SIZE_ZOOMINTERVAL_CONFIGURATION
* numberOfZoomIntervals);
// now write header size
// -4 bytes of header size variable itself
int headerSize = containerHeaderBuffer.position() - headerSizePosition - BYTES_INT;
containerHeaderBuffer.putInt(headerSizePosition, headerSize);
return containerHeaderBuffer.position();
} | @Test
public void testWriteHeaderBuffer() {
ByteBuffer headerBuffer = ByteBuffer.allocate(MapFileWriter.HEADER_BUFFER_SIZE);
int headerLength = MapFileWriter.writeHeaderBuffer(this.configuration, this.dataProcessor, headerBuffer);
// expected header length
// 20 + 4 + 4 + 8 + 8 + 16 + 2
// + 9 ("Mercator")
// + 1 + 8 + 1
// + 6 ("en,de")
// + 17 ("i love mapsforge")
// + 5("test")
// + 2 + 19 ("amenity=university")
// + 2 + 14 + 18 ("natural=beach", natural=coastline")
// + 1
// + 3 * (3 + 8 + 8)
// == 222
Assert.assertEquals(222, headerLength);
} |
public static boolean compareRawTaggedFields(List<RawTaggedField> first,
List<RawTaggedField> second) {
if (first == null) {
return second == null || second.isEmpty();
} else if (second == null) {
return first.isEmpty();
} else {
return first.equals(second);
}
} | @Test
public void testCompareRawTaggedFields() {
assertTrue(MessageUtil.compareRawTaggedFields(null, null));
assertTrue(MessageUtil.compareRawTaggedFields(null, Collections.emptyList()));
assertTrue(MessageUtil.compareRawTaggedFields(Collections.emptyList(), null));
assertFalse(MessageUtil.compareRawTaggedFields(Collections.emptyList(),
Collections.singletonList(new RawTaggedField(1, new byte[] {1}))));
assertFalse(MessageUtil.compareRawTaggedFields(null,
Collections.singletonList(new RawTaggedField(1, new byte[] {1}))));
assertFalse(MessageUtil.compareRawTaggedFields(
Collections.singletonList(new RawTaggedField(1, new byte[] {1})),
Collections.emptyList()));
assertTrue(MessageUtil.compareRawTaggedFields(
Arrays.asList(new RawTaggedField(1, new byte[] {1}),
new RawTaggedField(2, new byte[] {})),
Arrays.asList(new RawTaggedField(1, new byte[] {1}),
new RawTaggedField(2, new byte[] {}))));
} |
@Override
public void updateUserLogin(Long id, String loginIp) {
userMapper.updateById(new AdminUserDO().setId(id).setLoginIp(loginIp).setLoginDate(LocalDateTime.now()));
} | @Test
public void testUpdateUserLogin() {
// mock 数据
AdminUserDO user = randomAdminUserDO(o -> o.setLoginDate(null));
userMapper.insert(user);
// 准备参数
Long id = user.getId();
String loginIp = randomString();
// 调用
userService.updateUserLogin(id, loginIp);
// 断言
AdminUserDO dbUser = userMapper.selectById(id);
assertEquals(loginIp, dbUser.getLoginIp());
assertNotNull(dbUser.getLoginDate());
} |
@Override
public void inc() {
counter.inc(1D);
} | @Test
void assertCreate() throws ReflectiveOperationException {
PrometheusMetricsCounterCollector collector = new PrometheusMetricsCounterCollector(new MetricConfiguration("foo_counter",
MetricCollectorType.COUNTER, "foo_help", Collections.emptyList(), Collections.emptyMap()));
collector.inc();
Counter counter = (Counter) Plugins.getMemberAccessor().get(PrometheusMetricsCounterCollector.class.getDeclaredField("counter"), collector);
assertThat(counter.get(), is(1D));
} |
@Override
public ParsedLine parse(final String line, final int cursor, final ParseContext context) {
final String trimmed = line.trim();
final int adjCursor = adjustCursor(line, trimmed, cursor);
return delegate.parse(trimmed, adjCursor, context);
} | @Test
public void shouldTrimWhiteSpaceAndReturnLine() {
expect(delegate.parse("line \t containing \t space", 0, UNSPECIFIED))
.andReturn(parsedLine);
replay(delegate);
final ParsedLine line = parser.parse(" \t line \t containing \t space \t ", 0, UNSPECIFIED);
assertThat(line, is(parsedLine));
} |
public SnapshotDto setProjectVersion(@Nullable String projectVersion) {
checkLength(MAX_VERSION_LENGTH, projectVersion, "projectVersion");
this.projectVersion = projectVersion;
return this;
} | @Test
void fail_if_projectVersion_is_longer_then_100_characters() {
SnapshotDto snapshotDto = new SnapshotDto();
snapshotDto.setProjectVersion(null);
snapshotDto.setProjectVersion("1.0");
snapshotDto.setProjectVersion(repeat("a", 100));
assertThatThrownBy(() -> snapshotDto.setProjectVersion(repeat("a", 101)))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("projectVersion" +
" length (101) is longer than the maximum authorized (100). " +
"'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' was provided.");
} |
private CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> consumerGroupHeartbeat(
String groupId,
String memberId,
int memberEpoch,
String instanceId,
String rackId,
int rebalanceTimeoutMs,
String clientId,
String clientHost,
List<String> subscribedTopicNames,
String assignorName,
List<ConsumerGroupHeartbeatRequestData.TopicPartitions> ownedTopicPartitions
) throws ApiException {
final long currentTimeMs = time.milliseconds();
final List<CoordinatorRecord> records = new ArrayList<>();
// Get or create the consumer group.
boolean createIfNotExists = memberEpoch == 0;
final ConsumerGroup group = getOrMaybeCreateConsumerGroup(groupId, createIfNotExists, records);
throwIfConsumerGroupIsFull(group, memberId);
// Get or create the member.
if (memberId.isEmpty()) memberId = Uuid.randomUuid().toString();
final ConsumerGroupMember member;
if (instanceId == null) {
member = getOrMaybeSubscribeDynamicConsumerGroupMember(
group,
memberId,
memberEpoch,
ownedTopicPartitions,
createIfNotExists,
false
);
} else {
member = getOrMaybeSubscribeStaticConsumerGroupMember(
group,
memberId,
memberEpoch,
instanceId,
ownedTopicPartitions,
createIfNotExists,
false,
records
);
}
// 1. Create or update the member. If the member is new or has changed, a ConsumerGroupMemberMetadataValue
// record is written to the __consumer_offsets partition to persist the change. If the subscriptions have
// changed, the subscription metadata is updated and persisted by writing a ConsumerGroupPartitionMetadataValue
// record to the __consumer_offsets partition. Finally, the group epoch is bumped if the subscriptions have
// changed, and persisted by writing a ConsumerGroupMetadataValue record to the partition.
ConsumerGroupMember updatedMember = new ConsumerGroupMember.Builder(member)
.maybeUpdateInstanceId(Optional.ofNullable(instanceId))
.maybeUpdateRackId(Optional.ofNullable(rackId))
.maybeUpdateRebalanceTimeoutMs(ofSentinel(rebalanceTimeoutMs))
.maybeUpdateServerAssignorName(Optional.ofNullable(assignorName))
.maybeUpdateSubscribedTopicNames(Optional.ofNullable(subscribedTopicNames))
.setClientId(clientId)
.setClientHost(clientHost)
.setClassicMemberMetadata(null)
.build();
boolean bumpGroupEpoch = hasMemberSubscriptionChanged(
groupId,
member,
updatedMember,
records
);
int groupEpoch = group.groupEpoch();
Map<String, TopicMetadata> subscriptionMetadata = group.subscriptionMetadata();
Map<String, Integer> subscribedTopicNamesMap = group.subscribedTopicNames();
SubscriptionType subscriptionType = group.subscriptionType();
if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) {
// The subscription metadata is updated in two cases:
// 1) The member has updated its subscriptions;
// 2) The refresh deadline has been reached.
subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember);
subscriptionMetadata = group.computeSubscriptionMetadata(
subscribedTopicNamesMap,
metadataImage.topics(),
metadataImage.cluster()
);
int numMembers = group.numMembers();
if (!group.hasMember(updatedMember.memberId()) && !group.hasStaticMember(updatedMember.instanceId())) {
numMembers++;
}
subscriptionType = ModernGroup.subscriptionType(
subscribedTopicNamesMap,
numMembers
);
if (!subscriptionMetadata.equals(group.subscriptionMetadata())) {
log.info("[GroupId {}] Computed new subscription metadata: {}.",
groupId, subscriptionMetadata);
bumpGroupEpoch = true;
records.add(newConsumerGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata));
}
if (bumpGroupEpoch) {
groupEpoch += 1;
records.add(newConsumerGroupEpochRecord(groupId, groupEpoch));
log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch);
metrics.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME);
}
group.setMetadataRefreshDeadline(currentTimeMs + consumerGroupMetadataRefreshIntervalMs, groupEpoch);
}
// 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between
// the existing and the new target assignment is persisted to the partition.
final int targetAssignmentEpoch;
final Assignment targetAssignment;
if (groupEpoch > group.assignmentEpoch()) {
targetAssignment = updateTargetAssignment(
group,
groupEpoch,
member,
updatedMember,
subscriptionMetadata,
subscriptionType,
records
);
targetAssignmentEpoch = groupEpoch;
} else {
targetAssignmentEpoch = group.assignmentEpoch();
targetAssignment = group.targetAssignment(updatedMember.memberId(), updatedMember.instanceId());
}
// 3. Reconcile the member's assignment with the target assignment if the member is not
// fully reconciled yet.
updatedMember = maybeReconcile(
groupId,
updatedMember,
group::currentPartitionEpoch,
targetAssignmentEpoch,
targetAssignment,
ownedTopicPartitions,
records
);
scheduleConsumerGroupSessionTimeout(groupId, memberId);
// Prepare the response.
ConsumerGroupHeartbeatResponseData response = new ConsumerGroupHeartbeatResponseData()
.setMemberId(updatedMember.memberId())
.setMemberEpoch(updatedMember.memberEpoch())
.setHeartbeatIntervalMs(consumerGroupHeartbeatIntervalMs(groupId));
// The assignment is only provided in the following cases:
// 1. The member sent a full request. It does so when joining or rejoining the group with zero
// as the member epoch; or on any errors (e.g. timeout). We use all the non-optional fields
// (rebalanceTimeoutMs, subscribedTopicNames and ownedTopicPartitions) to detect a full request
// as those must be set in a full request.
// 2. The member's assignment has been updated.
boolean isFullRequest = memberEpoch == 0 || (rebalanceTimeoutMs != -1 && subscribedTopicNames != null && ownedTopicPartitions != null);
if (isFullRequest || hasAssignedPartitionsChanged(member, updatedMember)) {
response.setAssignment(createConsumerGroupResponseAssignment(updatedMember));
}
return new CoordinatorResult<>(records, response);
} | @Test
public void testNewJoiningMemberTriggersNewTargetAssignment() {
String groupId = "fooup";
// Use a static member id as it makes the test easier.
String memberId1 = Uuid.randomUuid().toString();
String memberId2 = Uuid.randomUuid().toString();
String memberId3 = Uuid.randomUuid().toString();
Uuid fooTopicId = Uuid.randomUuid();
String fooTopicName = "foo";
Uuid barTopicId = Uuid.randomUuid();
String barTopicName = "bar";
MockPartitionAssignor assignor = new MockPartitionAssignor("range");
GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
.withConsumerGroupAssignors(Collections.singletonList(assignor))
.withMetadataImage(new MetadataImageBuilder()
.addTopic(fooTopicId, fooTopicName, 6)
.addTopic(barTopicId, barTopicName, 3)
.addRacks()
.build())
.withConsumerGroup(new ConsumerGroupBuilder(groupId, 10)
.withMember(new ConsumerGroupMember.Builder(memberId1)
.setState(MemberState.STABLE)
.setMemberEpoch(10)
.setPreviousMemberEpoch(9)
.setClientId(DEFAULT_CLIENT_ID)
.setClientHost(DEFAULT_CLIENT_ADDRESS.toString())
.setRebalanceTimeoutMs(5000)
.setSubscribedTopicNames(Arrays.asList("foo", "bar"))
.setServerAssignorName("range")
.setAssignedPartitions(mkAssignment(
mkTopicAssignment(fooTopicId, 0, 1, 2),
mkTopicAssignment(barTopicId, 0, 1)))
.build())
.withMember(new ConsumerGroupMember.Builder(memberId2)
.setState(MemberState.STABLE)
.setMemberEpoch(10)
.setPreviousMemberEpoch(9)
.setClientId(DEFAULT_CLIENT_ID)
.setClientHost(DEFAULT_CLIENT_ADDRESS.toString())
.setRebalanceTimeoutMs(5000)
.setSubscribedTopicNames(Arrays.asList("foo", "bar"))
.setServerAssignorName("range")
.setAssignedPartitions(mkAssignment(
mkTopicAssignment(fooTopicId, 3, 4, 5),
mkTopicAssignment(barTopicId, 2)))
.build())
.withAssignment(memberId1, mkAssignment(
mkTopicAssignment(fooTopicId, 0, 1, 2),
mkTopicAssignment(barTopicId, 0, 1)))
.withAssignment(memberId2, mkAssignment(
mkTopicAssignment(fooTopicId, 3, 4, 5),
mkTopicAssignment(barTopicId, 2)))
.withAssignmentEpoch(10))
.build();
assignor.prepareGroupAssignment(new GroupAssignment(
new HashMap<String, MemberAssignment>() {
{
put(memberId1, new MemberAssignmentImpl(mkAssignment(
mkTopicAssignment(fooTopicId, 0, 1),
mkTopicAssignment(barTopicId, 0)
)));
put(memberId2, new MemberAssignmentImpl(mkAssignment(
mkTopicAssignment(fooTopicId, 2, 3),
mkTopicAssignment(barTopicId, 1)
)));
put(memberId3, new MemberAssignmentImpl(mkAssignment(
mkTopicAssignment(fooTopicId, 4, 5),
mkTopicAssignment(barTopicId, 2)
)));
}
}
));
// Member 3 joins the consumer group.
CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result = context.consumerGroupHeartbeat(
new ConsumerGroupHeartbeatRequestData()
.setGroupId(groupId)
.setMemberId(memberId3)
.setMemberEpoch(0)
.setRebalanceTimeoutMs(5000)
.setSubscribedTopicNames(Arrays.asList("foo", "bar"))
.setServerAssignor("range")
.setTopicPartitions(Collections.emptyList()));
assertResponseEquals(
new ConsumerGroupHeartbeatResponseData()
.setMemberId(memberId3)
.setMemberEpoch(11)
.setHeartbeatIntervalMs(5000)
.setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()),
result.response()
);
ConsumerGroupMember expectedMember3 = new ConsumerGroupMember.Builder(memberId3)
.setState(MemberState.UNRELEASED_PARTITIONS)
.setMemberEpoch(11)
.setPreviousMemberEpoch(0)
.setClientId(DEFAULT_CLIENT_ID)
.setClientHost(DEFAULT_CLIENT_ADDRESS.toString())
.setRebalanceTimeoutMs(5000)
.setSubscribedTopicNames(Arrays.asList("foo", "bar"))
.setServerAssignorName("range")
.build();
List<CoordinatorRecord> expectedRecords = Arrays.asList(
GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember3),
GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap<String, TopicMetadata>() {
{
put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6, mkMapOfPartitionRacks(6)));
put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3, mkMapOfPartitionRacks(3)));
}
}),
GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11),
GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId1, mkAssignment(
mkTopicAssignment(fooTopicId, 0, 1),
mkTopicAssignment(barTopicId, 0)
)),
GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId2, mkAssignment(
mkTopicAssignment(fooTopicId, 2, 3),
mkTopicAssignment(barTopicId, 1)
)),
GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId3, mkAssignment(
mkTopicAssignment(fooTopicId, 4, 5),
mkTopicAssignment(barTopicId, 2)
)),
GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord(groupId, 11),
GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember3)
);
assertRecordsEquals(expectedRecords.subList(0, 3), result.records().subList(0, 3));
assertUnorderedListEquals(expectedRecords.subList(3, 6), result.records().subList(3, 6));
assertRecordsEquals(expectedRecords.subList(6, 8), result.records().subList(6, 8));
} |
@Override
public String[] requiredModules() {
return new String[] {
CoreModule.NAME,
ConfigurationModule.NAME
};
} | @Test
public void requiredModules() {
String[] modules = moduleProvider.requiredModules();
assertArrayEquals(new String[] {
CoreModule.NAME,
ConfigurationModule.NAME
}, modules);
} |
public static GeneratorResult run(String resolverPath,
String defaultPackage,
final boolean generateImported,
final boolean generateDataTemplates,
RestliVersion version,
RestliVersion deprecatedByVersion,
String targetDirectoryPath,
String[] sources)
throws IOException
{
return run(resolverPath,
defaultPackage,
null,
generateImported,
generateDataTemplates,
version,
deprecatedByVersion,
targetDirectoryPath,
sources);
} | @Test(dataProvider = "restliVersionsDataProvider")
public void testDeterministicMethodOrder(RestliVersion version) throws Exception
{
final String pegasusDir = moduleDir + FS + RESOURCES_DIR + FS + "pegasus";
final String outPath = outdir.getPath();
RestRequestBuilderGenerator.run(pegasusDir,
null,
moduleDir,
true,
false,
version,
null,
outPath,
new String[] { moduleDir + FS + RESOURCES_DIR + FS + "idls" + FS + "testSimple.restspec.json" });
final File builderFile = new File(outPath + FS + "com" + FS + "linkedin" + FS + "restli" + FS + "swift" + FS + "integration" + FS + "TestSimpleBuilders.java");
Assert.assertTrue(builderFile.exists());
final String builderFileContent = IOUtils.toString(new FileInputStream(builderFile));
Assert.assertTrue(builderFileContent.contains("Generated from " + RESOURCES_DIR + FS + "idls" + FS + "testSimple.restspec.json"));
List<String> actualMethodNames = StaticJavaParser.parse(builderFileContent)
.findAll(MethodDeclaration.class).stream()
.map(MethodDeclaration::getNameAsString)
.collect(Collectors.toList());
List<String> expectedMethodNames = Lists.newArrayList(
"getBaseUriTemplate",
"getRequestOptions",
"getPathComponents",
"assignRequestOptions",
"getPrimaryResource",
"options",
"get",
"update",
"delete");
Assert.assertEquals(actualMethodNames, expectedMethodNames, "Expected method names to be generated in explicit order.");
} |
@Cacheable(value = CACHE_LATEST_EXTENSION_VERSION, keyGenerator = GENERATOR_LATEST_EXTENSION_VERSION)
public ExtensionVersion getLatest(List<ExtensionVersion> versions, boolean groupedByTargetPlatform) {
return getLatest(versions, groupedByTargetPlatform, false);
} | @Test
public void testGetLatestNoPreRelease() {
var release = new ExtensionVersion();
release.setTargetPlatform(TargetPlatform.NAME_UNIVERSAL);
release.setPreRelease(false);
release.setVersion("1.0.0");
var minor = new ExtensionVersion();
minor.setTargetPlatform(TargetPlatform.NAME_LINUX_ARM64);
minor.setPreRelease(true);
minor.setVersion("1.0.0-next.1fd3e8c");
var major = new ExtensionVersion();
major.setTargetPlatform(TargetPlatform.NAME_LINUX_ARM64);
major.setPreRelease(true);
major.setVersion("0.3.0");
var latest = versions.getLatest(List.of(major, minor, release), false, false);
assertEquals(release, latest);
} |
public static void setMetadata(
Context context, NotificationCompat.Builder notification, int type) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
switch (type) {
case TYPE_NORMAL:
createNormalChannel(context);
break;
case TYPE_FTP:
createFtpChannel(context);
break;
default:
throw new IllegalArgumentException("Unrecognized type:" + type);
}
} else {
switch (type) {
case TYPE_NORMAL:
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
notification.setCategory(Notification.CATEGORY_SERVICE);
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) {
notification.setPriority(Notification.PRIORITY_MIN);
}
break;
case TYPE_FTP:
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
notification.setCategory(Notification.CATEGORY_SERVICE);
notification.setVisibility(NotificationCompat.VISIBILITY_PUBLIC);
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) {
notification.setPriority(Notification.PRIORITY_MAX);
}
break;
default:
throw new IllegalArgumentException("Unrecognized type:" + type);
}
}
} | @Test
@Config(sdk = {KITKAT}) // max sdk is N
public void testNormalNotification() {
NotificationCompat.Builder builder =
new NotificationCompat.Builder(context, CHANNEL_NORMAL_ID)
.setContentTitle(context.getString(R.string.waiting_title))
.setContentText(context.getString(R.string.waiting_content))
.setAutoCancel(false)
.setSmallIcon(R.drawable.ic_all_inclusive_white_36dp)
.setProgress(0, 0, true);
NotificationConstants.setMetadata(context, builder, TYPE_NORMAL);
Notification result = builder.build();
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
assertEquals(Notification.CATEGORY_SERVICE, result.category);
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) {
assertEquals(Notification.PRIORITY_MIN, result.priority);
} else {
assertEquals(Notification.PRIORITY_DEFAULT, result.priority);
}
} |
String getSignature(URL url, Invocation invocation, String secretKey, String time) {
String requestString = String.format(
Constants.SIGNATURE_STRING_FORMAT,
url.getColonSeparatedKey(),
RpcUtils.getMethodName(invocation),
secretKey,
time);
boolean parameterEncrypt = url.getParameter(Constants.PARAMETER_SIGNATURE_ENABLE_KEY, false);
if (parameterEncrypt) {
return SignatureUtils.sign(invocation.getArguments(), requestString, secretKey);
}
return SignatureUtils.sign(requestString, secretKey);
} | @Test
void testGetSignatureNoParameter() {
URL url = mock(URL.class);
Invocation invocation = mock(Invocation.class);
String secretKey = "123456";
AccessKeyAuthenticator helper = new AccessKeyAuthenticator(ApplicationModel.defaultModel());
String signature = helper.getSignature(url, invocation, secretKey, String.valueOf(System.currentTimeMillis()));
assertNotNull(signature);
} |
@Override
public List<Long> getTenantIdList() {
List<TenantDO> tenants = tenantMapper.selectList();
return CollectionUtils.convertList(tenants, TenantDO::getId);
} | @Test
public void testGetTenantIdList() {
// mock 数据
TenantDO tenant = randomPojo(TenantDO.class, o -> o.setId(1L));
tenantMapper.insert(tenant);
// 调用,并断言业务异常
List<Long> result = tenantService.getTenantIdList();
assertEquals(Collections.singletonList(1L), result);
} |
public Organization getOrganizationByName(String name) {
Optional<Organization> conf = organizationRepository.findByName(name);
if (!conf.isPresent()) {
throw new NotFoundException("Could not find organization with name: " + name);
}
return conf.get();
} | @Test
public void organizationNotFound() {
Optional<Organization> organizationOptional = Optional.empty();
when(repositoryMock.findByName(anyString())).thenReturn(organizationOptional);
assertThrows(NotFoundException.class, () -> {
organizationServiceMock.getOrganizationByName("organization");
});
} |
public int compute(final RectL pInputRect, final PointL pInputPoint, final double pInputRadius,
final PointL pOutputIntersection1, final PointL pOutputIntersection2) {
mRect = pInputRect;
mPoint = pInputPoint;
if (pInputRect.contains(mPoint.x, mPoint.y)) {
return CORNER_INSIDE;
}
final double angle = MyMath.computeAngle(mRect.centerX(), mRect.centerY(), mPoint.x, mPoint.y);
computeCirclePoint(mTrianglePoint, pInputRadius, angle, false);
final int corner1 = checkIntersection(pOutputIntersection1);
computeCirclePoint(mTrianglePoint, pInputRadius, angle, true);
final int corner2 = checkIntersection(pOutputIntersection2);
if (corner1 == corner2) {
return CORNER_NONE;
}
return corner1 | corner2;
} | @Test
public void testCompute() {
final SpeechBalloonHelper helper = new SpeechBalloonHelper();
final long radius = 10;
final RectL inputRect = new RectL();
final PointL inputPoint = new PointL();
final PointL intersection1 = new PointL();
final PointL intersection2 = new PointL();
inputRect.set(0, 0, 100, 100);
inputPoint.set(1, 1);
Assert.assertEquals(SpeechBalloonHelper.CORNER_INSIDE,
helper.compute(inputRect, inputPoint, radius, intersection1, intersection2));
inputPoint.set(50, 200);
Assert.assertEquals(SpeechBalloonHelper.CORNER_NONE,
helper.compute(inputRect, inputPoint, radius, intersection1, intersection2));
Assert.assertEquals(100, intersection1.y);
Assert.assertEquals(100, intersection2.y);
inputPoint.set(50, -200);
Assert.assertEquals(SpeechBalloonHelper.CORNER_NONE,
helper.compute(inputRect, inputPoint, radius, intersection1, intersection2));
Assert.assertEquals(0, intersection1.y);
Assert.assertEquals(0, intersection2.y);
inputPoint.set(110, 110);
Assert.assertEquals(SpeechBalloonHelper.CORNER_BOTTOM | SpeechBalloonHelper.CORNER_RIGHT,
helper.compute(inputRect, inputPoint, radius, intersection1, intersection2));
Assert.assertEquals(100, intersection1.x);
Assert.assertEquals(100, intersection2.y);
inputPoint.set(-10, -10);
Assert.assertEquals(SpeechBalloonHelper.CORNER_TOP | SpeechBalloonHelper.CORNER_LEFT,
helper.compute(inputRect, inputPoint, radius, intersection1, intersection2));
Assert.assertEquals(0, intersection1.x);
Assert.assertEquals(0, intersection2.y);
inputPoint.set(-10, 110);
Assert.assertEquals(SpeechBalloonHelper.CORNER_BOTTOM | SpeechBalloonHelper.CORNER_LEFT,
helper.compute(inputRect, inputPoint, radius, intersection1, intersection2));
Assert.assertEquals(0, intersection2.x);
Assert.assertEquals(100, intersection1.y);
inputPoint.set(110, -10);
Assert.assertEquals(SpeechBalloonHelper.CORNER_TOP | SpeechBalloonHelper.CORNER_RIGHT,
helper.compute(inputRect, inputPoint, radius, intersection1, intersection2));
Assert.assertEquals(100, intersection2.x);
Assert.assertEquals(0, intersection1.y);
} |
static List<Integer> deltaEncode(List<Integer> list) {
if (list == null) {
return null;
}
ArrayList<Integer> result = new ArrayList<>();
if (list.isEmpty()) {
return result;
}
Iterator<Integer> it = list.iterator();
// add the first way node to the result list
Integer prevLat = it.next();
Integer prevLon = it.next();
result.add(prevLat);
result.add(prevLon);
while (it.hasNext()) {
Integer currentLat = it.next();
Integer currentLon = it.next();
result.add(Integer.valueOf(currentLat.intValue() - prevLat.intValue()));
result.add(Integer.valueOf(currentLon.intValue() - prevLon.intValue()));
prevLat = currentLat;
prevLon = currentLon;
}
return result;
} | @Test
public void testDeltaEncode() {
List<Integer> deltaEncoded = DeltaEncoder.deltaEncode(this.mockCoordinates);
Assert.assertEquals(Integer.valueOf(52000000), deltaEncoded.get(0));
Assert.assertEquals(Integer.valueOf(13000000), deltaEncoded.get(1));
Assert.assertEquals(Integer.valueOf(100), deltaEncoded.get(2));
Assert.assertEquals(Integer.valueOf(100), deltaEncoded.get(3));
Assert.assertEquals(Integer.valueOf(400), deltaEncoded.get(4));
Assert.assertEquals(Integer.valueOf(400), deltaEncoded.get(5));
Assert.assertEquals(Integer.valueOf(-100), deltaEncoded.get(6));
Assert.assertEquals(Integer.valueOf(-100), deltaEncoded.get(7));
Assert.assertEquals(Integer.valueOf(400), deltaEncoded.get(8));
Assert.assertEquals(Integer.valueOf(400), deltaEncoded.get(9));
Assert.assertEquals(Integer.valueOf(200), deltaEncoded.get(10));
Assert.assertEquals(Integer.valueOf(200), deltaEncoded.get(11));
} |
@Override
public void execute(Exchange exchange) throws SmppException {
CancelSm cancelSm = createCancelSm(exchange);
if (log.isDebugEnabled()) {
log.debug("Canceling a short message for exchange id '{}' and message id '{}'",
exchange.getExchangeId(), cancelSm.getMessageId());
}
try {
session.cancelShortMessage(
cancelSm.getServiceType(),
cancelSm.getMessageId(),
TypeOfNumber.valueOf(cancelSm.getSourceAddrTon()),
NumberingPlanIndicator.valueOf(cancelSm.getSourceAddrNpi()),
cancelSm.getSourceAddr(),
TypeOfNumber.valueOf(cancelSm.getDestAddrTon()),
NumberingPlanIndicator.valueOf(cancelSm.getDestAddrNpi()),
cancelSm.getDestinationAddress());
} catch (Exception e) {
throw new SmppException(e);
}
if (log.isDebugEnabled()) {
log.debug("Cancel a short message for exchange id '{}' and message id '{}'",
exchange.getExchangeId(), cancelSm.getMessageId());
}
Message message = ExchangeHelper.getResultMessage(exchange);
message.setHeader(SmppConstants.ID, cancelSm.getMessageId());
} | @Test
public void execute() throws Exception {
Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut);
exchange.getIn().setHeader(SmppConstants.COMMAND, "CancelSm");
exchange.getIn().setHeader(SmppConstants.ID, "1");
exchange.getIn().setHeader(SmppConstants.SERVICE_TYPE, "XXX");
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR_TON, TypeOfNumber.NATIONAL.value());
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR_NPI, NumberingPlanIndicator.NATIONAL.value());
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR, "1818");
exchange.getIn().setHeader(SmppConstants.DEST_ADDR_TON, TypeOfNumber.INTERNATIONAL.value());
exchange.getIn().setHeader(SmppConstants.DEST_ADDR_NPI, NumberingPlanIndicator.INTERNET.value());
exchange.getIn().setHeader(SmppConstants.DEST_ADDR, "1919");
command.execute(exchange);
verify(session).cancelShortMessage("XXX", "1", TypeOfNumber.NATIONAL, NumberingPlanIndicator.NATIONAL, "1818",
TypeOfNumber.INTERNATIONAL, NumberingPlanIndicator.INTERNET, "1919");
assertEquals("1", exchange.getMessage().getHeader(SmppConstants.ID));
} |
private ConfigModelContext(ApplicationType applicationType,
DeployState deployState,
VespaModel vespaModel,
ConfigModelRepoAdder configModelRepoAdder,
TreeConfigProducer<AnyConfigProducer> parent,
String producerId) {
this.applicationType = applicationType;
this.deployState = deployState;
this.vespaModel = vespaModel;
this.configModelRepoAdder = configModelRepoAdder;
this.parent = parent;
this.producerId = producerId;
} | @Test
void testConfigModelContext() {
MockRoot root = new MockRoot();
String id = "foobar";
ApplicationPackage pkg = new MockApplicationPackage.Builder()
.withServices("<services version=\"1.0\"><admin version=\"2.0\" /></services>")
.build();
DeployState deployState = DeployState.createTestState(pkg);
DeployLogger logger = deployState.getDeployLogger();
ConfigModelContext ctx = ConfigModelContext.create(deployState, null, null, root, id);
assertEquals(pkg, ctx.getApplicationPackage());
assertEquals(id, ctx.getProducerId());
assertEquals(root, ctx.getParentProducer());
assertEquals(logger, ctx.getDeployLogger());
ctx = ConfigModelContext.create(root.getDeployState(), null, null, root, id);
assertEquals(id, ctx.getProducerId());
assertEquals(root, ctx.getParentProducer());
TreeConfigProducer newRoot = new MockRoot("bar");
ctx = ctx.withParent(newRoot);
assertEquals(id, ctx.getProducerId());
assertNotEquals(root, ctx.getParentProducer());
assertEquals(newRoot, ctx.getParentProducer());
} |
@Override
@SuppressWarnings({"unchecked", "rawtypes"})
protected Mono<Void> doExecute(final ServerWebExchange exchange, final ShenyuPluginChain chain, final SelectorData selector, final RuleData rule) {
String body = exchange.getAttribute(Constants.PARAM_TRANSFORM);
ShenyuContext shenyuContext = exchange.getAttribute(Constants.CONTEXT);
assert shenyuContext != null;
MetaData metaData = exchange.getAttribute(Constants.META_DATA);
if (!checkMetaData(metaData)) {
assert metaData != null;
LOG.error(" path is :{}, meta data have error.... {}", shenyuContext.getPath(), metaData);
exchange.getResponse().setStatusCode(HttpStatus.INTERNAL_SERVER_ERROR);
Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.META_DATA_ERROR);
return WebFluxResultUtils.result(exchange, error);
}
if (StringUtils.isNoneBlank(metaData.getParameterTypes()) && StringUtils.isBlank(body)) {
exchange.getResponse().setStatusCode(HttpStatus.INTERNAL_SERVER_ERROR);
Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.TARS_HAVE_BODY_PARAM);
return WebFluxResultUtils.result(exchange, error);
}
TarsInvokePrxList tarsInvokePrxList = ApplicationConfigCache.getInstance().get(metaData.getPath());
int index = ThreadLocalRandom.current().nextInt(tarsInvokePrxList.getTarsInvokePrxList().size());
Object prx = tarsInvokePrxList.getTarsInvokePrxList().get(index).getInvokePrx();
Method method = tarsInvokePrxList.getMethod();
CompletableFuture future;
try {
future = (CompletableFuture) method
.invoke(prx, PrxInfoUtil.getParamArray(tarsInvokePrxList.getParamTypes(), tarsInvokePrxList.getParamNames(), body));
} catch (Exception e) {
LOG.error("Invoke tars error", e);
exchange.getResponse().setStatusCode(HttpStatus.INTERNAL_SERVER_ERROR);
Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.TARS_INVOKE);
return WebFluxResultUtils.result(exchange, error);
}
return Mono.fromFuture(future.thenApply(ret -> {
if (Objects.isNull(ret)) {
ret = Constants.TARS_RPC_RESULT_EMPTY;
}
exchange.getAttributes().put(Constants.RPC_RESULT, ret);
exchange.getAttributes().put(Constants.CLIENT_RESPONSE_RESULT_TYPE, ResultEnum.SUCCESS.getName());
return ret;
})).onErrorMap(m -> new ShenyuException("failed to invoke tars")).then(chain.execute(exchange));
} | @Test
public void testTarsPluginWithEmptyBody() {
ShenyuContext context = mock(ShenyuContext.class);
exchange.getAttributes().put(Constants.CONTEXT, context);
exchange.getAttributes().put(Constants.META_DATA, metaData);
when(chain.execute(exchange)).thenReturn(Mono.empty());
RuleData data = mock(RuleData.class);
SelectorData selectorData = mock(SelectorData.class);
StepVerifier.create(tarsPluginUnderTest.doExecute(exchange, chain, selectorData, data)).expectSubscription().verifyComplete();
} |
public SearchOptions setPage(int page, int pageSize) {
checkArgument(page >= 1, "Page must be greater or equal to 1 (got " + page + ")");
setLimit(pageSize);
int lastResultIndex = page * pageSize;
checkArgument(lastResultIndex <= MAX_RETURNABLE_RESULTS, "Can return only the first %s results. %sth result asked.", MAX_RETURNABLE_RESULTS, lastResultIndex);
setOffset(lastResultIndex - pageSize);
return this;
} | @Test
public void fail_if_page_is_not_strictly_positive() {
assertThatThrownBy(() -> new SearchOptions().setPage(0, 10))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Page must be greater or equal to 1 (got 0)");
} |
public static FieldScope fromSetFields(Message message) {
return fromSetFields(
message, AnyUtils.defaultTypeRegistry(), AnyUtils.defaultExtensionRegistry());
} | @Test
public void testFromSetFields_iterables_errorForDifferentMessageTypes() {
// Don't run this test twice.
if (!testIsRunOnce()) {
return;
}
try {
FieldScopes.fromSetFields(
TestMessage2.newBuilder().setOInt(2).build(),
TestMessage3.newBuilder().setOInt(2).build());
fail("Expected failure.");
} catch (RuntimeException expected) {
expect
.that(expected)
.hasMessageThat()
.contains("Cannot create scope from messages with different descriptors");
expect.that(expected).hasMessageThat().contains(TestMessage2.getDescriptor().getFullName());
expect.that(expected).hasMessageThat().contains(TestMessage3.getDescriptor().getFullName());
}
} |
@Override
public Map<K, V> loadAll(Collection<K> keys) {
throw new UnsupportedOperationException();
} | @Test(expected = UnsupportedOperationException.class)
public void loadAll() {
cacheStore.loadAll(asList("1", "2"));
} |
public static Sensor closeTaskSensor(final String threadId,
final StreamsMetricsImpl streamsMetrics) {
return invocationRateAndCountSensor(
threadId,
CLOSE_TASK,
CLOSE_TASK_RATE_DESCRIPTION,
CLOSE_TASK_TOTAL_DESCRIPTION,
RecordingLevel.INFO,
streamsMetrics
);
} | @Test
public void shouldGetCloseTaskSensor() {
final String operation = "task-closed";
final String totalDescription = "The total number of closed tasks";
final String rateDescription = "The average per-second number of closed tasks";
when(streamsMetrics.threadLevelSensor(THREAD_ID, operation, RecordingLevel.INFO)).thenReturn(expectedSensor);
when(streamsMetrics.threadLevelTagMap(THREAD_ID)).thenReturn(tagMap);
try (final MockedStatic<StreamsMetricsImpl> streamsMetricsStaticMock = mockStatic(StreamsMetricsImpl.class)) {
final Sensor sensor = ThreadMetrics.closeTaskSensor(THREAD_ID, streamsMetrics);
streamsMetricsStaticMock.verify(
() -> StreamsMetricsImpl.addInvocationRateAndCountToSensor(
expectedSensor,
THREAD_LEVEL_GROUP,
tagMap,
operation,
rateDescription,
totalDescription
)
);
assertThat(sensor, is(expectedSensor));
}
} |
public static void main(String[] args) {
var root = new NodeImpl("1",
new NodeImpl("11",
new NodeImpl("111", NullNode.getInstance(), NullNode.getInstance()),
NullNode.getInstance()
),
new NodeImpl("12",
NullNode.getInstance(),
new NodeImpl("122", NullNode.getInstance(), NullNode.getInstance())
)
);
root.walk();
} | @Test
void shouldExecuteApplicationWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
} |
public static Comparator<StructLike> forType(Types.StructType struct) {
return new StructLikeComparator(struct);
} | @Test
public void testFixed() {
assertComparesCorrectly(
Comparators.forType(Types.FixedType.ofLength(3)),
ByteBuffer.wrap(new byte[] {1, 1, 3}),
ByteBuffer.wrap(new byte[] {1, 2, 1}));
} |
public static void main(String[] args) throws Exception {
// Create data source and create the customers, products and purchases tables
final var dataSource = createDataSource();
deleteSchema(dataSource);
createSchema(dataSource);
// create customer
var customerDao = new CustomerDaoImpl(dataSource);
var tom =
Customer.builder()
.name("Tom")
.money(Money.of(USD, 30))
.customerDao(customerDao)
.build();
tom.save();
// create products
var productDao = new ProductDaoImpl(dataSource);
var eggs =
Product.builder()
.name("Eggs")
.price(Money.of(USD, 10.0))
.expirationDate(LocalDate.now().plusDays(7))
.productDao(productDao)
.build();
var butter =
Product.builder()
.name("Butter")
.price(Money.of(USD, 20.00))
.expirationDate(LocalDate.now().plusDays(9))
.productDao(productDao)
.build();
var cheese =
Product.builder()
.name("Cheese")
.price(Money.of(USD, 25.0))
.expirationDate(LocalDate.now().plusDays(2))
.productDao(productDao)
.build();
eggs.save();
butter.save();
cheese.save();
// show money balance of customer after each purchase
tom.showBalance();
tom.showPurchases();
// buy eggs
tom.buyProduct(eggs);
tom.showBalance();
// buy butter
tom.buyProduct(butter);
tom.showBalance();
// trying to buy cheese, but receive a refusal
// because he didn't have enough money
tom.buyProduct(cheese);
tom.showBalance();
// return butter and get money back
tom.returnProduct(butter);
tom.showBalance();
// Tom can buy cheese now because he has enough money
// and there is a discount on cheese because it expires in 2 days
tom.buyProduct(cheese);
tom.save();
// show money balance and purchases after shopping
tom.showBalance();
tom.showPurchases();
} | @Test
void shouldExecuteApplicationWithoutException() {
assertDoesNotThrow(() -> App.main(new String[] {}));
} |
public static String getCanonicalName(Class cls) {
Objects.requireNonNull(cls, "cls");
return cls.getCanonicalName();
} | @Test
public void getCanonicalNameTest() {
final String canonicalName = ClassUtil.getCanonicalName(TestClass.class);
Assert.isTrue(Objects.equals("cn.hippo4j.config.toolkit.ClassUtilTest.TestClass", canonicalName));
} |
public abstract void run(T configuration, Environment environment) throws Exception; | @Test
void exitWithFatalErrorWhenCommandFails() throws Exception {
final File configFile = File.createTempFile("dropwizard-invalid-config", ".yml");
try {
final FakeApplication application = new FakeApplication();
application.run("server", configFile.getAbsolutePath());
assertThat(application.fatalError).isTrue();
} finally {
configFile.delete();
}
} |
public final void isNotNaN() {
if (actual == null) {
failWithActual(simpleFact("expected a double other than NaN"));
} else {
isNotEqualTo(NaN);
}
} | @Test
public void isNotNaNIsNaN() {
expectFailureWhenTestingThat(Double.NaN).isNotNaN();
} |
@Nonnull
public static <T> Traverser<T> traverseIterable(@Nonnull Iterable<? extends T> iterable) {
return traverseIterator(iterable.iterator());
} | @Test
public void when_traverseIterable_then_seeAllItems() {
validateTraversal(traverseIterable(asList(1, 2)));
} |
public boolean isSetByUser(PropertyKey key) {
if (mUserProps.containsKey(key)) {
Optional<Object> val = mUserProps.get(key);
// Sources larger than Source.CLUSTER_DEFAULT are considered to be set by the user
return val.isPresent() && (getSource(key).compareTo(Source.CLUSTER_DEFAULT) > 0);
}
return false;
} | @Test
public void isSetByUser() {
assertFalse(mProperties.isSetByUser(mKeyWithValue));
assertFalse(mProperties.isSetByUser(mKeyWithoutValue));
mProperties.put(mKeyWithValue, "value", Source.CLUSTER_DEFAULT);
mProperties.put(mKeyWithoutValue, "value", Source.CLUSTER_DEFAULT);
assertFalse(mProperties.isSetByUser(mKeyWithValue));
assertFalse(mProperties.isSetByUser(mKeyWithoutValue));
// Sources larger than Source.CLUSTER_DEFAULT are considered to be set by the user
mProperties.put(mKeyWithValue, "value", Source.SYSTEM_PROPERTY);
mProperties.put(mKeyWithoutValue, "value", Source.SYSTEM_PROPERTY);
assertTrue(mProperties.isSetByUser(mKeyWithValue));
assertTrue(mProperties.isSetByUser(mKeyWithoutValue));
mProperties.remove(mKeyWithValue);
assertFalse(mProperties.isSetByUser(mKeyWithValue));
} |
@Override
public SchemaAndValue toConnectData(String topic, byte[] value) {
try {
return new SchemaAndValue(Schema.OPTIONAL_BOOLEAN_SCHEMA,
deserializer.deserialize(topic, value));
} catch (SerializationException e) {
throw new DataException("Failed to deserialize boolean: ", e);
}
} | @Test
public void testToConnectNullValue() {
assertEquals(Schema.OPTIONAL_BOOLEAN_SCHEMA, converter.toConnectData(TOPIC, null).schema());
assertNull(converter.toConnectData(TOPIC, null).value());
} |
@Override
protected void decode(final ChannelHandlerContext ctx, final ByteBuf in, final List<Object> out) {
while (in.readableBytes() >= 1 + MySQLBinlogEventHeader.MYSQL_BINLOG_EVENT_HEADER_LENGTH) {
in.markReaderIndex();
MySQLPacketPayload payload = new MySQLPacketPayload(in, ctx.channel().attr(CommonConstants.CHARSET_ATTRIBUTE_KEY).get());
checkPayload(payload);
MySQLBinlogEventHeader binlogEventHeader = new MySQLBinlogEventHeader(payload, binlogContext.getChecksumLength());
if (!checkEventIntegrity(in, binlogEventHeader)) {
return;
}
Optional<MySQLBaseBinlogEvent> binlogEvent = decodeEvent(binlogEventHeader, payload);
if (!binlogEvent.isPresent()) {
skipChecksum(binlogEventHeader.getEventType(), in);
return;
}
if (binlogEvent.get() instanceof PlaceholderBinlogEvent) {
out.add(binlogEvent.get());
skipChecksum(binlogEventHeader.getEventType(), in);
return;
}
if (decodeWithTX) {
processEventWithTX(binlogEvent.get(), out);
} else {
processEventIgnoreTX(binlogEvent.get(), out);
}
skipChecksum(binlogEventHeader.getEventType(), in);
}
} | @Test
void assertDecodeTableMapEvent() {
ByteBuf byteBuf = ByteBufAllocator.DEFAULT.buffer();
// the hex data is from binlog data, The first event used in Row Based Replication
byteBuf.writeBytes(StringUtil.decodeHexDump("00cb38a962130100000041000000be7d000000007b000000000001000464735f310009745f6f726465725f31000408030ff604c8000a020c0101000201e0ff0a9b3a"));
binlogContext.getTableMap().put(123L, tableMapEventPacket);
List<Object> decodedEvents = new LinkedList<>();
binlogEventPacketDecoder.decode(channelHandlerContext, byteBuf, decodedEvents);
assertThat(binlogContext.getTableMap().size(), is(1));
assertThat(binlogContext.getTableMap().get(123L), instanceOf(MySQLBinlogTableMapEventPacket.class));
} |
public ModelLocalUriId asModelLocalUriId() {
return this.getClass().equals(ModelLocalUriId.class) ? this : new ModelLocalUriId(this.asLocalUri());
} | @Test
void asModelLocalUriIdWithModelLocalUriId() {
String path = "/example/some-id/instances/some-instance-id";
LocalUri parsed = LocalUri.parse(path);
ModelLocalUriId retrieved = new ModelLocalUriId(parsed);
ModelLocalUriId modelLocalUriId = retrieved.asModelLocalUriId();
assertThat(modelLocalUriId).isEqualTo(retrieved);
assertThat(modelLocalUriId == retrieved).isTrue();
} |
@Override
public ImportResult importItem(
UUID jobId,
IdempotentImportExecutor idempotentImportExecutor,
TokensAndUrlAuthData authData,
MusicContainerResource data)
throws Exception {
if (data == null) {
// Nothing to do
return new ImportResult(new AppleContentException("Null MusicContainerResource received on AppleMusicImporter::importItem"));
}
int playlistsCount = 0;
int playlistItemsCount = 0;
AppleMusicInterface musicInterface = factory
.getOrCreateMusicInterface(jobId, authData, appCredentials, exportingService, monitor);
if (!data.getPlaylists().isEmpty()) {
playlistsCount = musicInterface.importPlaylists(jobId, idempotentImportExecutor, data.getPlaylists());
}
if (!data.getPlaylistItems().isEmpty()) {
playlistItemsCount = musicInterface.importMusicPlaylistItems(jobId, idempotentImportExecutor, data.getPlaylistItems());
}
final Map<String, Integer> counts =
new ImmutableMap.Builder<String, Integer>()
.put(AppleMusicConstants.PLAYLISTS_COUNT_DATA_NAME, playlistsCount)
.put(AppleMusicConstants.PLAYLIST_ITEMS_COUNT_DATA_NAME, playlistItemsCount)
.build();
return ImportResult.OK
.copyWithCounts(counts);
} | @Test
public void testImportPlaylistTracks() throws Exception {
List<MusicPlaylistItem> musicPlaylistItems = createTestPlaylistItems(randomString());
setUpImportPlaylistTracksBatchResponse(musicPlaylistItems.stream().collect(
Collectors.toMap(item -> item.getTrack().getIsrcCode(), item -> SC_OK)));
MusicContainerResource playlistItemsResource = new MusicContainerResource(null, musicPlaylistItems, null, null);
final ImportResult importResult = appleMusicImporter.importItem(uuid, executor, authData, playlistItemsResource);
verify(appleMusicInterface)
.importMusicPlaylistItemsBatch(uuid.toString(), musicPlaylistItems);
assertThat(importResult.getCounts().isPresent());
assertThat(importResult.getCounts().get().get(AppleMusicConstants.PLAYLIST_ITEMS_COUNT_DATA_NAME) == playlistItemsResource.getPlaylistItems().size());
assertThat(importResult.getCounts().get().get(AppleMusicConstants.PLAYLISTS_COUNT_DATA_NAME) == 0);
} |
@Deprecated
@InlineMe(replacement = "JsonParser.parseString(json)", imports = "com.google.gson.JsonParser")
public JsonElement parse(String json) throws JsonSyntaxException {
return parseString(json);
} | @Test
public void testReadWriteTwoObjects() throws Exception {
Gson gson = new Gson();
CharArrayWriter writer = new CharArrayWriter();
BagOfPrimitives expectedOne = new BagOfPrimitives(1, 1, true, "one");
writer.write(gson.toJson(expectedOne).toCharArray());
BagOfPrimitives expectedTwo = new BagOfPrimitives(2, 2, false, "two");
writer.write(gson.toJson(expectedTwo).toCharArray());
CharArrayReader reader = new CharArrayReader(writer.toCharArray());
JsonReader parser = new JsonReader(reader);
parser.setStrictness(Strictness.LENIENT);
JsonElement element1 = Streams.parse(parser);
JsonElement element2 = Streams.parse(parser);
BagOfPrimitives actualOne = gson.fromJson(element1, BagOfPrimitives.class);
assertThat(actualOne.stringValue).isEqualTo("one");
BagOfPrimitives actualTwo = gson.fromJson(element2, BagOfPrimitives.class);
assertThat(actualTwo.stringValue).isEqualTo("two");
} |
public static void main(String[] args) {
// Getting the bar series
BarSeries series = CsvTradesLoader.loadBitstampSeries();
// Building the trading strategy
Strategy strategy = buildStrategy(series);
// Running the strategy
BarSeriesManager seriesManager = new BarSeriesManager(series);
TradingRecord tradingRecord = seriesManager.run(strategy);
System.out.println("Number of positions for the strategy: " + tradingRecord.getPositionCount());
// Analysis
System.out.println("Total return for the strategy: " + new ReturnCriterion().calculate(series, tradingRecord));
} | @Test
public void test() {
GlobalExtremaStrategy.main(null);
} |
static Hashtable<String, String> generateJmxTable(Map<String, String> variables) {
Hashtable<String, String> ht = new Hashtable<>(variables.size());
for (Map.Entry<String, String> variable : variables.entrySet()) {
ht.put(
replaceInvalidChars(variable.getKey()),
replaceInvalidChars(variable.getValue()));
}
return ht;
} | @Test
void testGenerateTable() {
Map<String, String> vars = new HashMap<>();
vars.put("key0", "value0");
vars.put("key1", "value1");
vars.put("\"key2,=;:?'", "\"value2 (test),=;:?'");
Hashtable<String, String> jmxTable = JMXReporter.generateJmxTable(vars);
assertThat(jmxTable).containsEntry("key0", "value0");
assertThat(jmxTable).containsEntry("key0", "value0");
assertThat(jmxTable).containsEntry("key1", "value1");
assertThat(jmxTable).containsEntry("key2------", "value2_(test)------");
} |
@Override
public String getDocumentationLink(@Nullable String suffix) {
return documentationBaseUrl + Optional.ofNullable(suffix).orElse("");
} | @Test
public void getDocumentationLink_suffixNotProvided_withPropertyOverride() {
String propertyValue = "https://new-url.sonarqube.org/";
when(configuration.get(DOCUMENTATION_BASE_URL)).thenReturn(Optional.of(propertyValue));
documentationLinkGenerator = new DefaultDocumentationLinkGenerator(sonarQubeVersion, configuration);
String generatedLink = documentationLinkGenerator.getDocumentationLink(null);
assertThat(generatedLink).isEqualTo(propertyValue + "100.1000");
} |
public String summarize(final ExecutionStep<?> step) {
return summarize(step, "").summary;
} | @Test
public void shouldSummarizeSource() {
// When:
final String summary = planSummaryBuilder.summarize(sourceStep);
// Then:
assertThat(summary, is(
" > [ SOURCE ] | Schema: ROWKEY STRING KEY, L0 INTEGER | Logger: QID.src\n"
));
} |
public static SimpleAclRuleResource fromKafkaResourcePattern(ResourcePattern kafkaResourcePattern) {
String resourceName;
SimpleAclRuleResourceType resourceType;
AclResourcePatternType resourcePattern = null;
switch (kafkaResourcePattern.resourceType()) {
case TOPIC:
resourceName = kafkaResourcePattern.name();
resourceType = SimpleAclRuleResourceType.TOPIC;
switch (kafkaResourcePattern.patternType()) {
case LITERAL:
resourcePattern = AclResourcePatternType.LITERAL;
break;
case PREFIXED:
resourcePattern = AclResourcePatternType.PREFIX;
break;
default:
throw new IllegalArgumentException("Invalid Resource type: " + kafkaResourcePattern.resourceType());
}
break;
case GROUP:
resourceType = SimpleAclRuleResourceType.GROUP;
resourceName = kafkaResourcePattern.name();
switch (kafkaResourcePattern.patternType()) {
case LITERAL:
resourcePattern = AclResourcePatternType.LITERAL;
break;
case PREFIXED:
resourcePattern = AclResourcePatternType.PREFIX;
break;
default:
throw new IllegalArgumentException("Invalid Resource type: " + kafkaResourcePattern.resourceType());
}
break;
case CLUSTER:
resourceType = SimpleAclRuleResourceType.CLUSTER;
resourceName = "kafka-cluster";
resourcePattern = AclResourcePatternType.LITERAL;
break;
case TRANSACTIONAL_ID:
resourceType = SimpleAclRuleResourceType.TRANSACTIONAL_ID;
resourceName = kafkaResourcePattern.name();
switch (kafkaResourcePattern.patternType()) {
case LITERAL:
resourcePattern = AclResourcePatternType.LITERAL;
break;
case PREFIXED:
resourcePattern = AclResourcePatternType.PREFIX;
break;
default:
throw new IllegalArgumentException("Invalid Resource type: " + kafkaResourcePattern.resourceType());
}
break;
default:
throw new IllegalArgumentException("Invalid Resource type: " + kafkaResourcePattern.resourceType());
}
return new SimpleAclRuleResource(resourceName, resourceType, resourcePattern);
} | @Test
public void testFromKafkaResourcePatternWithClusterResource() {
// Regular cluster
ResourcePattern kafkaClusterResourcePattern = new ResourcePattern(ResourceType.CLUSTER, "kafka-cluster", PatternType.LITERAL);
SimpleAclRuleResource expectedClusterResourceRules = new SimpleAclRuleResource("kafka-cluster", SimpleAclRuleResourceType.CLUSTER, AclResourcePatternType.LITERAL);
assertThat(SimpleAclRuleResource.fromKafkaResourcePattern(kafkaClusterResourcePattern), is(expectedClusterResourceRules));
} |
public boolean isPasswordReminderNeeded() {
return isPasswordReminderNeeded(new Date().getTime());
} | @Test
public void testIsPasswordReminderNeeded() {
long currTime = new Date().getTime();
Context context = ApplicationProvider.getApplicationContext();
Preferences prefs = new Preferences(context);
SharedPreferences sharedPrefs = PreferenceManager.getDefaultSharedPreferences(context);
// make sure that the password reminder is enabled by default
assertNotEquals(prefs.getPasswordReminderFrequency(), PassReminderFreq.NEVER);
// if the old preference is set to false, the frequency should be NEVER
sharedPrefs.edit().putBoolean("pref_password_reminder", false).apply();
assertEquals(prefs.getPasswordReminderFrequency(), PassReminderFreq.NEVER);
assertFalse(prefs.isPasswordReminderNeeded());
// password reminders are never needed when the frequency is set to NEVER
PassReminderFreq freq = PassReminderFreq.NEVER;
prefs.setPasswordReminderFrequency(freq);
assertFalse(prefs.isPasswordReminderNeeded());
// test correct behavior when the frequency is set to something other than NEVER
freq = PassReminderFreq.WEEKLY;
prefs.setPasswordReminderFrequency(freq);
assertFalse(prefs.isPasswordReminderNeeded(currTime));
prefs.setPasswordReminderTimestamp(currTime - freq.getDurationMillis() + 1);
assertFalse(prefs.isPasswordReminderNeeded(currTime));
prefs.setPasswordReminderTimestamp(currTime - freq.getDurationMillis());
assertTrue(prefs.isPasswordReminderNeeded(currTime));
prefs.setPasswordReminderTimestamp(currTime - freq.getDurationMillis() - 1);
assertTrue(prefs.isPasswordReminderNeeded(currTime));
// a password reminder should no longer be needed if it's configured to be less frequent than before
freq = PassReminderFreq.BIWEEKLY;
prefs.setPasswordReminderFrequency(freq);
assertFalse(prefs.isPasswordReminderNeeded(currTime));
} |
public static StatementExecutorResponse execute(
final ConfiguredStatement<ListProperties> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final KsqlConfigResolver resolver = new KsqlConfigResolver();
final Map<String, String> engineProperties = statement
.getSessionConfig()
.getConfig(false)
.getAllConfigPropsWithSecretsObfuscated();
final List<Property> mergedProperties = mergedProperties(statement);
final List<String> overwritten = mergedProperties
.stream()
.filter(property -> !Objects.equals(
engineProperties.get(property.getName()), property.getValue()))
.map(Property::getName)
.collect(Collectors.toList());
final List<String> defaultProps = mergedProperties.stream()
.filter(property -> resolver.resolve(property.getName(), false)
.map(resolved -> resolved.isDefaultValue(property.getValue()))
.orElse(false))
.map(Property::getName)
.collect(Collectors.toList());
return StatementExecutorResponse.handled(Optional.of(new PropertiesList(
statement.getMaskedStatementText(), mergedProperties, overwritten, defaultProps)));
} | @Test
public void shouldListUnresolvedStreamsTopicProperties() {
// When:
final PropertiesList properties = (PropertiesList) CustomExecutors.LIST_PROPERTIES.execute(
engine.configure("LIST PROPERTIES;")
.withConfig(new KsqlConfig(ImmutableMap.of(
"ksql.streams.topic.min.insync.replicas", "2"))),
mock(SessionProperties.class),
engine.getEngine(),
engine.getServiceContext()
).getEntity().orElseThrow(IllegalStateException::new);
// Then:
assertThat(
properties.getProperties(),
hasItem(new Property("ksql.streams.topic.min.insync.replicas", "KSQL", "2")));
} |
private static int getErrorCode(final int kernelCode, final int errorCode) {
Preconditions.checkArgument(kernelCode >= 0 && kernelCode < 10, "The value range of kernel code should be [0, 10).");
Preconditions.checkArgument(errorCode >= 0 && errorCode < 1000, "The value range of error code should be [0, 1000).");
return kernelCode * 1000 + errorCode;
} | @Test
void assertToSQLException() {
SQLException actual = new KernelSQLException(XOpenSQLState.GENERAL_ERROR, 1, 1, "reason") {
}.toSQLException();
assertThat(actual.getSQLState(), is(XOpenSQLState.GENERAL_ERROR.getValue()));
assertThat(actual.getErrorCode(), is(11001));
assertThat(actual.getMessage(), is("reason"));
assertNull(actual.getCause());
} |
@VisibleForTesting
@SuppressWarnings("deprecation")
public static boolean isOnlyDictionaryEncodingPages(ColumnChunkMetaData columnMetaData)
{
// Files written with newer versions of Parquet libraries (e.g. parquet-mr 1.9.0) will have EncodingStats available
// Otherwise, fallback to v1 logic
EncodingStats stats = columnMetaData.getEncodingStats();
if (stats != null) {
return stats.hasDictionaryPages() && !stats.hasNonDictionaryEncodedPages();
}
Set<Encoding> encodings = columnMetaData.getEncodings();
if (encodings.contains(PLAIN_DICTIONARY)) {
// PLAIN_DICTIONARY was present, which means at least one page was
// dictionary-encoded and 1.0 encodings are used
// The only other allowed encodings are RLE and BIT_PACKED which are used for repetition or definition levels
return Sets.difference(encodings, ImmutableSet.of(PLAIN_DICTIONARY, RLE, BIT_PACKED)).isEmpty();
}
return false;
} | @Test
@SuppressWarnings("deprecation")
public void testDictionaryEncodingV1()
{
Set<Encoding> required = ImmutableSet.of(BIT_PACKED);
Set<Encoding> optional = ImmutableSet.of(BIT_PACKED, RLE);
Set<Encoding> repeated = ImmutableSet.of(RLE);
Set<Encoding> notDictionary = ImmutableSet.of(PLAIN);
Set<Encoding> mixedDictionary = ImmutableSet.of(PLAIN_DICTIONARY, PLAIN);
Set<Encoding> dictionary = ImmutableSet.of(PLAIN_DICTIONARY);
assertFalse(isOnlyDictionaryEncodingPages(createColumnMetaDataV1(union(required, notDictionary))), "required notDictionary");
assertFalse(isOnlyDictionaryEncodingPages(createColumnMetaDataV1(union(optional, notDictionary))), "optional notDictionary");
assertFalse(isOnlyDictionaryEncodingPages(createColumnMetaDataV1(union(repeated, notDictionary))), "repeated notDictionary");
assertFalse(isOnlyDictionaryEncodingPages(createColumnMetaDataV1(union(required, mixedDictionary))), "required mixedDictionary");
assertFalse(isOnlyDictionaryEncodingPages(createColumnMetaDataV1(union(optional, mixedDictionary))), "optional mixedDictionary");
assertFalse(isOnlyDictionaryEncodingPages(createColumnMetaDataV1(union(repeated, mixedDictionary))), "repeated mixedDictionary");
assertTrue(isOnlyDictionaryEncodingPages(createColumnMetaDataV1(union(required, dictionary))), "required dictionary");
assertTrue(isOnlyDictionaryEncodingPages(createColumnMetaDataV1(union(optional, dictionary))), "optional dictionary");
assertTrue(isOnlyDictionaryEncodingPages(createColumnMetaDataV1(union(repeated, dictionary))), "repeated dictionary");
} |
public void tryLock() {
try {
if (!lock.tryLock()) {
failAlreadyInProgress(null);
}
} catch (OverlappingFileLockException e) {
failAlreadyInProgress(e);
}
} | @Test
public void tryLockConcurrently() {
lock.tryLock();
assertThatThrownBy(() -> lock.tryLock())
.isInstanceOf(IllegalStateException.class)
.hasMessage("Another SonarQube analysis is already in progress for this project");
} |
@Override
public void clearMine() {
Map<UUID, TimestampedNodeHealth> sqHealthState = readReplicatedMap();
UUID clientUUID = hzMember.getUuid();
if (LOG.isTraceEnabled()) {
LOG.trace("Reading {} and clearing for {}", new HashMap<>(sqHealthState), clientUUID);
}
sqHealthState.remove(clientUUID);
} | @Test
public void clearMine_clears_entry_into_map_sq_health_state_under_current_client_uuid() {
Map<UUID, TimestampedNodeHealth> map = mock(Map.class);
doReturn(map).when(hazelcastMember).getReplicatedMap(MAP_SQ_HEALTH_STATE);
UUID uuid = UUID.randomUUID();
when(hazelcastMember.getUuid()).thenReturn(uuid);
underTest.clearMine();
verify(map).remove(uuid);
verifyNoMoreInteractions(map);
assertThat(logging.getLogs()).isEmpty();
} |
@Override
public Result invoke(Invocation invocation) throws RpcException {
Result result;
String value = getUrl().getMethodParameter(
RpcUtils.getMethodName(invocation), MOCK_KEY, Boolean.FALSE.toString())
.trim();
if (ConfigUtils.isEmpty(value)) {
// no mock
result = this.invoker.invoke(invocation);
} else if (value.startsWith(FORCE_KEY)) {
if (logger.isWarnEnabled()) {
logger.warn(
CLUSTER_FAILED_MOCK_REQUEST,
"force mock",
"",
"force-mock: " + RpcUtils.getMethodName(invocation) + " force-mock enabled , url : "
+ getUrl());
}
// force:direct mock
result = doMockInvoke(invocation, null);
} else {
// fail-mock
try {
result = this.invoker.invoke(invocation);
// fix:#4585
if (result.getException() != null && result.getException() instanceof RpcException) {
RpcException rpcException = (RpcException) result.getException();
if (rpcException.isBiz()) {
throw rpcException;
} else {
result = doMockInvoke(invocation, rpcException);
}
}
} catch (RpcException e) {
if (e.isBiz()) {
throw e;
}
if (logger.isWarnEnabled()) {
logger.warn(
CLUSTER_FAILED_MOCK_REQUEST,
"failed to mock invoke",
"",
"fail-mock: " + RpcUtils.getMethodName(invocation) + " fail-mock enabled , url : "
+ getUrl(),
e);
}
result = doMockInvoke(invocation, e);
}
}
return result;
} | @Test
void testMockInvokerFromOverride_Invoke_force_throwCustemExceptionNotFound() {
URL url = URL.valueOf("remote://1.2.3.4/" + IHelloService.class.getName())
.addParameter(
REFER_KEY,
URL.encode(PATH_KEY + "=" + IHelloService.class.getName() + "&"
+ "getBoolean2.mock=force:throw java.lang.RuntimeException2"))
.addParameter("invoke_return_error", "true");
Invoker<IHelloService> cluster = getClusterInvoker(url);
// Configured with mock
RpcInvocation invocation = new RpcInvocation();
invocation.setMethodName("getBoolean2");
try {
cluster.invoke(invocation);
Assertions.fail();
} catch (Exception e) {
Assertions.assertTrue(e.getCause() instanceof IllegalStateException);
}
} |
@Override
public Option<HoodieBaseFile> getBaseFileOn(String partitionPath, String instantTime, String fileId) {
return execute(partitionPath, instantTime, fileId, preferredView::getBaseFileOn, (path, instant, id) -> getSecondaryView().getBaseFileOn(path, instant, id));
} | @Test
public void testGetBaseFileOn() {
Option<HoodieBaseFile> actual;
Option<HoodieBaseFile> expected = Option.of(new HoodieBaseFile("test.file"));
String partitionPath = "/table2";
String instantTime = "2020-01-01";
String fileID = "file.123";
when(primary.getBaseFileOn(partitionPath, instantTime, fileID)).thenReturn(expected);
actual = fsView.getBaseFileOn(partitionPath, instantTime, fileID);
assertEquals(expected, actual);
verify(secondaryViewSupplier, never()).get();
resetMocks();
when(secondaryViewSupplier.get()).thenReturn(secondary);
when(primary.getBaseFileOn(partitionPath, instantTime, fileID))
.thenThrow(new RuntimeException());
when(secondary.getBaseFileOn(partitionPath, instantTime, fileID)).thenReturn(expected);
actual = fsView.getBaseFileOn(partitionPath, instantTime, fileID);
assertEquals(expected, actual);
resetMocks();
when(secondary.getBaseFileOn(partitionPath, instantTime, fileID)).thenReturn(expected);
actual = fsView.getBaseFileOn(partitionPath, instantTime, fileID);
assertEquals(expected, actual);
resetMocks();
when(secondary.getBaseFileOn(partitionPath, instantTime, fileID))
.thenThrow(new RuntimeException());
assertThrows(RuntimeException.class, () -> {
fsView.getBaseFileOn(partitionPath, instantTime, fileID);
});
} |
public Map<String, Object> getClusterMetrics(Map<String, Object> previousMetrics) {
Request clusterStatRequest = new Request("GET", "_stats");
try {
Response response = client.getLowLevelClient().performRequest(clusterStatRequest);
JsonNode responseNode = objectMapper.readValue(response.getEntity().getContent(), JsonNode.class);
if (responseNode != null) {
DocumentContext statContext = JsonPath.parse(responseNode.toString());
Map<String, Object> metrics = new HashMap<>();
Arrays.stream(ClusterStatMetrics.values())
.filter(m -> Objects.nonNull(m.getClusterStat()))
.forEach(metric -> {
String fieldName = metric.getFieldName();
try {
Object value = statContext.read(metric.getClusterStat());
if (value instanceof Number current && metric.isRateMetric() && previousMetrics.containsKey(fieldName)) {
Number previous = (Number) previousMetrics.get(fieldName);
long rate = current.longValue() - previous.longValue();
if (rate > 0) {
metrics.put(metric.getRateFieldName(), rate);
}
}
metrics.put(fieldName, value);
} catch (Exception e) {
log.error("Could not retrieve cluster metric {}", fieldName);
}
});
return metrics;
}
throw new IOException("No cluster stats returned");
} catch (IOException e) {
log.error("Error retrieving cluster stats", e);
}
return Map.of();
} | @Test
public void getClusterMetrics() {
final Map<String, Object> previousMetrics = Map.of("search_ops", 5);
Map<String, Object> clusterMetrics = collector.getClusterMetrics(previousMetrics);
assertThat(clusterMetrics.get("doc_count")).isEqualTo(6206956);
assertThat(clusterMetrics.get("search_ops")).isEqualTo(13);
assertThat(clusterMetrics.get("search_ops_rate")).isEqualTo(8L);
String[] allMetrics = Arrays.stream(ClusterStatMetrics.values()).map(ClusterStatMetrics::getFieldName).toArray(String[]::new);
assertThat(clusterMetrics).containsKeys(allMetrics);
} |
public AtomicLong clientCaCertificateExpiration(String clusterName, String namespace) {
return getGaugeLong(new CertificateMetricKey(kind, namespace, clusterName, CertificateMetricKey.Type.CLIENT_CA),
METRICS_CERTIFICATE_EXPIRATION_MS, "Time in milliseconds when the certificate expires",
Optional.empty(), certificateExpirationMap,
Tag.of("cluster", clusterName),
Tag.of("type", CertificateMetricKey.Type.CLIENT_CA.getDisplayName()),
Tag.of("resource-namespace", namespace));
} | @Test
@DisplayName("Should return correct expiration time for client CA certificate")
void shouldReturnCorrectExpirationTimeForClientCaCertificate() {
AtomicLong expirationTime = metricsHolder.clientCaCertificateExpiration("TestCluster", "TestNamespace");
assertEquals(1000L, expirationTime.get(), "Expiration time should match the initial value");
} |
static boolean isValidIfPresent(@Nullable String email) {
return isEmpty(email) || isValidEmail(email);
} | @Test
public void various_examples_of_invalid_emails() {
assertThat(isValidIfPresent("infosonarsource.com")).isFalse();
assertThat(isValidIfPresent("info@.sonarsource.com")).isFalse();
assertThat(isValidIfPresent("info\"@.sonarsource.com")).isFalse();
} |
public AggregateAnalysisResult analyze(
final ImmutableAnalysis analysis,
final List<SelectExpression> finalProjection
) {
if (!analysis.getGroupBy().isPresent()) {
throw new IllegalArgumentException("Not an aggregate query");
}
final AggAnalyzer aggAnalyzer = new AggAnalyzer(analysis, functionRegistry);
aggAnalyzer.process(finalProjection);
return aggAnalyzer.result();
} | @Test
public void shouldNotCaptureNonAggregateFunction() {
// given:
givenSelectExpression(FUNCTION_CALL);
givenHavingExpression(FUNCTION_CALL);
// When:
final AggregateAnalysisResult result = analyzer.analyze(analysis, selects);
// Then:
assertThat(result.getAggregateFunctions(), contains(REQUIRED_AGG_FUNC_CALL));
} |
@Override
public synchronized void unexport() {
if (!exported) {
return;
}
if (unexported) {
return;
}
if (!exporters.isEmpty()) {
for (List<Exporter<?>> es : exporters.values()) {
for (Exporter<?> exporter : es) {
try {
exporter.unregister();
} catch (Throwable t) {
logger.warn(
CONFIG_UNEXPORT_ERROR,
"",
"",
"Unexpected error occurred when unexport " + exporter,
t);
}
}
}
waitForIdle();
for (List<Exporter<?>> es : exporters.values()) {
for (Exporter<?> exporter : es) {
try {
exporter.unexport();
} catch (Throwable t) {
logger.warn(
CONFIG_UNEXPORT_ERROR,
"",
"",
"Unexpected error occurred when unexport " + exporter,
t);
}
}
}
exporters.clear();
}
unexported = true;
onUnexpoted();
ModuleServiceRepository repository = getScopeModel().getServiceRepository();
repository.unregisterProvider(providerModel);
} | @Test
void testUnexport() throws Exception {
System.setProperty(SHUTDOWN_WAIT_KEY, "0");
try {
service.export();
service.unexport();
// Thread.sleep(1000);
Mockito.verify(exporter, Mockito.atLeastOnce()).unexport();
} finally {
System.clearProperty(SHUTDOWN_TIMEOUT_KEY);
}
} |
@Override
public void executeSystemTask(WorkflowSystemTask systemTask, String taskId, int callbackTime) {
try {
Task task = executionDAOFacade.getTaskById(taskId);
if (task == null) {
LOG.error("TaskId: {} could not be found while executing SystemTask", taskId);
return;
}
LOG.debug("Task: {} fetched from execution DAO for taskId: {}", task, taskId);
String queueName = QueueUtils.getQueueName(task);
if (task.getStatus().isTerminal()) {
// Tune the SystemTaskWorkerCoordinator's queues - if the queue size is very big this can
// happen!
LOG.info("Task {}/{} was already completed.", task.getTaskType(), task.getTaskId());
queueDAO.remove(queueName, task.getTaskId());
return;
}
String workflowId = task.getWorkflowInstanceId();
Workflow workflow = executionDAOFacade.getWorkflowById(workflowId, true);
if (task.getStartTime() == 0) {
task.setStartTime(System.currentTimeMillis());
executionDAOFacade.updateTask(task);
Monitors.recordQueueWaitTime(task.getTaskDefName(), task.getQueueWaitTime());
}
if (workflow.getStatus().isTerminal()) {
LOG.info(
"Workflow {} has been completed for {}/{}",
workflow.getWorkflowId(),
systemTask.getName(),
task.getTaskId());
if (!task.getStatus().isTerminal()) {
task.setStatus(CANCELED);
}
executionDAOFacade.updateTask(task);
queueDAO.remove(queueName, task.getTaskId());
return;
}
LOG.debug("Executing {}/{}-{}", task.getTaskType(), task.getTaskId(), task.getStatus());
if (task.getStatus() == SCHEDULED || !systemTask.isAsyncComplete(task)) {
task.setPollCount(task.getPollCount() + 1);
// removed poll count DB update here
}
deciderService.populateTaskData(task);
// Stop polling for asyncComplete system tasks that are not in SCHEDULED state
if (systemTask.isAsyncComplete(task) && task.getStatus() != SCHEDULED) {
queueDAO.remove(QueueUtils.getQueueName(task), task.getTaskId());
return;
}
taskRunner.runMaestroTask(this, workflow, task, systemTask);
if (!task.getStatus().isTerminal()) {
task.setCallbackAfterSeconds(callbackTime);
try {
configureCallbackInterval(task); // overwrite if needed
} catch (Exception e) {
LOG.error(
"Error configuring callback interval for task [{}]. Please investigate it",
task.getTaskId(),
e);
}
}
updateTask(new TaskResult(task));
LOG.debug(
"Done Executing {}/{}-{} output={}",
task.getTaskType(),
task.getTaskId(),
task.getStatus(),
task.getOutputData());
} catch (Exception e) {
Monitors.error("MaestroWorkflowExecutor", "executeSystemTask");
LOG.error("Error executing system task - {}, with id: {}", systemTask, taskId, e);
}
} | @Test
public void testExecuteSystemTaskWithoutUpdatingPollingCount() {
String workflowId = "workflow-id";
String taskId = "task-id-1";
Task maestroTask = new Task();
maestroTask.setTaskType(Constants.MAESTRO_TASK_NAME);
maestroTask.setReferenceTaskName("maestroTask");
maestroTask.setWorkflowInstanceId(workflowId);
maestroTask.setScheduledTime(System.currentTimeMillis());
maestroTask.setTaskId(taskId);
maestroTask.setStatus(Task.Status.IN_PROGRESS);
maestroTask.setStartTime(123456L);
Workflow workflow = new Workflow();
workflow.setWorkflowId(workflowId);
workflow.setStatus(Workflow.WorkflowStatus.RUNNING);
when(executionDAOFacade.getTaskById(anyString())).thenReturn(maestroTask);
when(executionDAOFacade.getWorkflowById(anyString(), anyBoolean())).thenReturn(workflow);
maestroWorkflowExecutor.executeSystemTask(task2, taskId, 30);
assertEquals(Task.Status.IN_PROGRESS, maestroTask.getStatus());
assertEquals(1, maestroTask.getPollCount());
verify(executionDAOFacade, times(0)).updateTask(any());
assertEquals(123456, maestroTask.getStartTime());
} |
public Invocable getInvocable() {
return invocable;
} | @Test
void testAccessors() {
final Set<? extends ConstraintViolation<?>> violations = Collections.emptySet();
@SuppressWarnings("unchecked")
final Inflector<Request, ?> inf = mock(Inflector.class);
final Invocable inv = Invocable.create(inf);
final JerseyViolationException test = new JerseyViolationException(violations, inv);
assertSame(inv, test.getInvocable());
} |
public static Read read() {
return Read.create();
} | @Test
public void testReadValidationFailsMissingTable() {
BigtableIO.Read read = BigtableIO.read().withBigtableOptions(BIGTABLE_OPTIONS);
thrown.expect(IllegalArgumentException.class);
read.expand(null);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.