focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public HttpAction restore(final CallContext ctx, final String defaultUrl) {
val webContext = ctx.webContext();
val sessionStore = ctx.sessionStore();
val optRequestedUrl = sessionStore.get(webContext, Pac4jConstants.REQUESTED_URL);
HttpAction requestedAction = null;
if (optRequestedUrl.isPresent()) {
sessionStore.set(webContext, Pac4jConstants.REQUESTED_URL, null);
val requestedUrl = optRequestedUrl.get();
if (requestedUrl instanceof String) {
requestedAction = new FoundAction((String) requestedUrl);
} else if (requestedUrl instanceof RedirectionAction) {
requestedAction = (RedirectionAction) requestedUrl;
}
}
if (requestedAction == null) {
requestedAction = new FoundAction(defaultUrl);
}
LOGGER.debug("requestedAction: {}", requestedAction.getMessage());
if (requestedAction instanceof FoundAction) {
return HttpActionHelper.buildRedirectUrlAction(webContext, ((FoundAction) requestedAction).getLocation());
} else {
return HttpActionHelper.buildFormPostContentAction(webContext, ((OkAction) requestedAction).getContent());
}
} | @Test
public void testRestoreOkActionAfterPost() {
val context = MockWebContext.create().setFullRequestURL(PAC4J_URL).addRequestParameter(KEY, VALUE);
val formPost = HttpActionHelper.buildFormPostContent(context);
context.setRequestMethod("POST");
val sessionStore = new MockSessionStore();
sessionStore.set(context, Pac4jConstants.REQUESTED_URL, new OkAction(formPost));
val action = handler.restore(new CallContext(context, sessionStore), LOGIN_URL);
assertTrue(action instanceof OkAction);
assertEquals(FORM_DATA, ((OkAction) action).getContent());
assertFalse(sessionStore.get(context, Pac4jConstants.REQUESTED_URL).isPresent());
} |
@Override
public PageResult<DiscountActivityDO> getDiscountActivityPage(DiscountActivityPageReqVO pageReqVO) {
return discountActivityMapper.selectPage(pageReqVO);
} | @Test
public void testGetDiscountActivityPage() {
// mock 数据
DiscountActivityDO dbDiscountActivity = randomPojo(DiscountActivityDO.class, o -> { // 等会查询到
o.setName("芋艿");
o.setStatus(PromotionActivityStatusEnum.WAIT.getStatus());
o.setCreateTime(buildTime(2021, 1, 15));
});
discountActivityMapper.insert(dbDiscountActivity);
// 测试 name 不匹配
discountActivityMapper.insert(cloneIgnoreId(dbDiscountActivity, o -> o.setName("土豆")));
// 测试 status 不匹配
discountActivityMapper.insert(cloneIgnoreId(dbDiscountActivity, o -> o.setStatus(PromotionActivityStatusEnum.END.getStatus())));
// 测试 createTime 不匹配
discountActivityMapper.insert(cloneIgnoreId(dbDiscountActivity, o -> o.setCreateTime(buildTime(2021, 2, 10))));
// 准备参数
DiscountActivityPageReqVO reqVO = new DiscountActivityPageReqVO();
reqVO.setName("芋艿");
reqVO.setStatus(PromotionActivityStatusEnum.WAIT.getStatus());
reqVO.setCreateTime((new LocalDateTime[]{buildTime(2021, 1, 1), buildTime(2021, 1, 31)}));
// 调用
PageResult<DiscountActivityDO> pageResult = discountActivityService.getDiscountActivityPage(reqVO);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbDiscountActivity, pageResult.getList().get(0));
} |
@GET
@Path("{netId}")
@Produces(MediaType.APPLICATION_JSON)
public Response allocateIp(@PathParam("netId") String netId) {
log.trace("Received IP allocation request of network " + netId);
K8sNetwork network =
nullIsNotFound(networkService.network(netId), NETWORK_ID_NOT_FOUND);
IpAddress ip =
nullIsNotFound(ipamService.allocateIp(network.networkId()), IP_NOT_ALLOCATED);
ObjectNode root = mapper().createObjectNode();
String ipamId = network.networkId() + "-" + ip.toString();
K8sIpam ipam = new DefaultK8sIpam(ipamId, ip, network.networkId());
root.set(IPAM, codec(K8sIpam.class).encode(ipam, this));
return ok(root).build();
} | @Test
public void testAllocateIpWithNullIp() {
expect(mockNetworkService.network(anyObject())).andReturn(k8sNetwork);
expect(mockIpamService.allocateIp(anyObject())).andReturn(null);
replay(mockNetworkService);
replay(mockIpamService);
final WebTarget wt = target();
Response response = wt.path(IPAM + "/sona-network").request().get();
final int status = response.getStatus();
assertEquals(404, status);
verify(mockNetworkService);
verify(mockIpamService);
} |
public synchronized QueryId createNextQueryId()
{
// only generate 100,000 ids per day
if (counter > 99_999) {
// wait for the second to rollover
while (MILLISECONDS.toSeconds(nowInMillis()) == lastTimeInSeconds) {
Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
}
counter = 0;
}
// if it has been a second since the last id was generated, generate a new timestamp
long now = nowInMillis();
if (MILLISECONDS.toSeconds(now) != lastTimeInSeconds) {
// generate new timestamp
lastTimeInSeconds = MILLISECONDS.toSeconds(now);
lastTimestamp = TIMESTAMP_FORMAT.print(now);
// if the day has rolled over, restart the counter
if (MILLISECONDS.toDays(now) != lastTimeInDays) {
lastTimeInDays = MILLISECONDS.toDays(now);
counter = 0;
}
}
return new QueryId(String.format("%s_%05d_%s", lastTimestamp, counter++, coordinatorId));
} | @Test
public void testCreateNextQueryId()
{
TestIdGenerator idGenerator = new TestIdGenerator();
long millis = new DateTime(2001, 7, 14, 1, 2, 3, 4, DateTimeZone.UTC).getMillis();
idGenerator.setNow(millis);
// generate ids to 99,999
for (int i = 0; i < 100_000; i++) {
assertEquals(idGenerator.createNextQueryId(), new QueryId(String.format("20010714_010203_%05d_%s", i, idGenerator.getCoordinatorId())));
}
// next id will cause counter to roll, but we need to add a second to the time or code will block for ever
millis += 1000;
idGenerator.setNow(millis);
for (int i = 0; i < 100_000; i++) {
assertEquals(idGenerator.createNextQueryId(), new QueryId(String.format("20010714_010204_%05d_%s", i, idGenerator.getCoordinatorId())));
}
// more forward one more second and generate 100 ids
millis += 1000;
idGenerator.setNow(millis);
for (int i = 0; i < 100; i++) {
assertEquals(idGenerator.createNextQueryId(), new QueryId(String.format("20010714_010205_%05d_%s", i, idGenerator.getCoordinatorId())));
}
// now we move to the start of the next day, and the counter should reset
millis = new DateTime(2001, 7, 15, 0, 0, 0, 0, DateTimeZone.UTC).getMillis();
idGenerator.setNow(millis);
for (int i = 0; i < 100_000; i++) {
assertEquals(idGenerator.createNextQueryId(), new QueryId(String.format("20010715_000000_%05d_%s", i, idGenerator.getCoordinatorId())));
}
} |
public PublicKey convertPublicKey(final String publicPemKey) {
final StringReader keyReader = new StringReader(publicPemKey);
try {
SubjectPublicKeyInfo publicKeyInfo = SubjectPublicKeyInfo
.getInstance(new PEMParser(keyReader).readObject());
return new JcaPEMKeyConverter().getPublicKey(publicKeyInfo);
} catch (IOException exception) {
throw new RuntimeException(exception);
}
} | @Test
void givenEmptyPublicKey_whenConvertPublicKey_thenThrowRuntimeException() {
// Given
String emptyPublicPemKey = "";
// When & Then
assertThatThrownBy(() -> KeyConverter.convertPublicKey(emptyPublicPemKey))
.isInstanceOf(RuntimeException.class)
.hasCauseInstanceOf(PEMException.class)
.hasMessageContaining("PEMException");
} |
public static CompletableFuture<Channel> toCompletableFuture(ChannelFuture channelFuture) {
Objects.requireNonNull(channelFuture, "channelFuture cannot be null");
CompletableFuture<Channel> adapter = new CompletableFuture<>();
if (channelFuture.isDone()) {
if (channelFuture.isSuccess()) {
adapter.complete(channelFuture.channel());
} else {
adapter.completeExceptionally(channelFuture.cause());
}
} else {
channelFuture.addListener((ChannelFuture cf) -> {
if (cf.isSuccess()) {
adapter.complete(cf.channel());
} else {
adapter.completeExceptionally(cf.cause());
}
});
}
return adapter;
} | @Test(expectedExceptions = NullPointerException.class)
public void toCompletableFuture_shouldRequireNonNullArgument() {
ChannelFutures.toCompletableFuture(null);
} |
public Properties getProperties()
{
return properties;
} | @Test
public void testEmptyPassword()
throws SQLException
{
PrestoDriverUri parameters = createDriverUri("presto://localhost:8080?password=");
assertEquals(parameters.getProperties().getProperty("password"), "");
} |
public static void analyze(CreateTableStmt statement, ConnectContext context) {
final TableName tableNameObject = statement.getDbTbl();
MetaUtils.normalizationTableName(context, tableNameObject);
final String catalogName = tableNameObject.getCatalog();
MetaUtils.checkCatalogExistAndReport(catalogName);
final String tableName = tableNameObject.getTbl();
FeNameFormat.checkTableName(tableName);
Database db = MetaUtils.getDatabase(catalogName, tableNameObject.getDb());
if (statement instanceof CreateTemporaryTableStmt) {
analyzeTemporaryTable(statement, context, catalogName, db, tableName);
} else {
if (db.getTable(tableName) != null && !statement.isSetIfNotExists()) {
ErrorReport.reportSemanticException(ErrorCode.ERR_TABLE_EXISTS_ERROR, tableName);
}
}
analyzeEngineName(statement, catalogName);
analyzeCharsetName(statement);
preCheckColumnRef(statement);
analyzeKeysDesc(statement);
analyzeSortKeys(statement);
analyzePartitionDesc(statement);
analyzeDistributionDesc(statement);
analyzeColumnRef(statement, catalogName);
if (statement.isHasGeneratedColumn()) {
analyzeGeneratedColumn(statement, context);
}
analyzeIndexDefs(statement);
} | @Test
public void testAnalyzeMaxBucket() throws Exception {
Config.max_column_number_per_table = 10000;
String sql = "CREATE TABLE test_create_table_db.starrocks_test_table\n" +
"(\n" +
" `tag_id` bigint not null,\n" +
" `tag_name` string\n" +
") DUPLICATE KEY(`tag_id`)\n" +
"PARTITION BY (`tag_id`)\n" +
"DISTRIBUTED BY HASH(`tag_id`) BUCKETS 1025\n" +
"PROPERTIES (\n" +
"\"replication_num\" = \"1\"\n" +
")\n";
expectedEx.expect(SemanticException.class);
expectedEx.expectMessage("max_bucket_number_per_partition");
CreateTableStmt createTableStmt = (CreateTableStmt) com.starrocks.sql.parser.SqlParser
.parse(sql, connectContext.getSessionVariable().getSqlMode()).get(0);
CreateTableAnalyzer.analyze(createTableStmt, connectContext);
} |
@Override
public List<RegisteredMigrationStep> readFrom(long migrationNumber) {
validate(migrationNumber);
int startingIndex = lookupIndexOfClosestTo(migrationNumber);
if (startingIndex < 0) {
return Collections.emptyList();
}
return steps.subList(startingIndex, steps.size());
} | @Test
public void readFrom_returns_an_empty_stream_if_argument_is_greater_than_biggest_migration_number() {
verifyContainsNumbers(underTest.readFrom(9));
verifyContainsNumbers(unorderedSteps.readFrom(9));
} |
@Override
public PollResult poll(long currentTimeMs) {
return pollInternal(
prepareFetchRequests(),
this::handleFetchSuccess,
this::handleFetchFailure
);
} | @Test
public void testInFlightFetchOnPausedPartition() {
buildFetcher();
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
assertEquals(1, sendFetches());
subscriptions.pause(tp0);
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0));
networkClientDelegate.poll(time.timer(0));
assertNull(fetchRecords().get(tp0));
} |
@GET
@Path("/{entityType}/{entityId}")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8
/* , MediaType.APPLICATION_XML */})
public TimelineEntity getEntity(
@Context HttpServletRequest req,
@Context HttpServletResponse res,
@PathParam("entityType") String entityType,
@PathParam("entityId") String entityId,
@QueryParam("fields") String fields) {
init(res);
TimelineEntity entity = null;
try {
entity = timelineDataManager.getEntity(
parseStr(entityType),
parseStr(entityId),
parseFieldsStr(fields, ","),
getUser(req));
} catch (YarnException e) {
// The user doesn't have the access to override the existing domain.
LOG.info(e.getMessage(), e);
throw new ForbiddenException(e);
} catch (IllegalArgumentException e) {
throw new BadRequestException(e);
} catch (Exception e) {
LOG.error("Error getting entity", e);
throw new WebApplicationException(e,
Response.Status.INTERNAL_SERVER_ERROR);
}
if (entity == null) {
throw new NotFoundException("Timeline entity "
+ new EntityIdentifier(parseStr(entityId), parseStr(entityType))
+ " is not found");
}
return entity;
} | @Test
void testPostEntitiesWithPrimaryFilter() throws Exception {
TimelineEntities entities = new TimelineEntities();
TimelineEntity entity = new TimelineEntity();
Map<String, Set<Object>> filters = new HashMap<String, Set<Object>>();
filters.put(TimelineStore.SystemFilter.ENTITY_OWNER.toString(),
new HashSet<Object>());
entity.setPrimaryFilters(filters);
entity.setEntityId("test id 6");
entity.setEntityType("test type 6");
entity.setStartTime(System.currentTimeMillis());
entities.addEntity(entity);
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "tester")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities);
TimelinePutResponse putResposne =
response.getEntity(TimelinePutResponse.class);
assertEquals(0, putResposne.getErrors().size());
} |
public int remap(int var, int size) {
if ((var & REMAP_FLAG) != 0) {
return unmask(var);
}
int offset = var - argsSize;
if (offset < 0) {
// self projection for method arguments
return var;
}
if (offset >= mapping.length) {
mapping = Arrays.copyOf(mapping, Math.max(mapping.length * 2, offset + 1));
}
int mappedVar = mapping[offset];
int unmasked = unmask(mappedVar);
boolean isRemapped = ((mappedVar & REMAP_FLAG) != 0);
if (size == 2) {
if ((mappedVar & DOUBLE_SLOT_FLAG) == 0) {
// no double slot mapping over an int slot;
// must re-map unless the int slot is the last used one or there is a free double-ext slot
isRemapped = false;
}
} else {
// size == 1
if ((mappedVar & DOUBLE_SLOT_FLAG_2) != 0) {
// no mapping over a previously 2-slot value
isRemapped = false;
} else if ((mappedVar & DOUBLE_SLOT_FLAG) != 0) {
// the previously second part of the double slot is free to reuse
mapping[unmasked + 1] = (unmasked + 1) | REMAP_FLAG;
}
}
if (!isRemapped) {
mappedVar = remapVar(newVarIdxInternal(size), size);
setMapping(offset, mappedVar, size);
}
unmasked = unmask(mappedVar);
// adjust the mapping pointer if remapping with variable occupying 2 slots
nextMappedVar = Math.max(unmasked + size, nextMappedVar);
return unmasked;
} | @Test
public void remapSame() {
int var = instance.remap(2, 1);
assertEquals(var, instance.remap(2, 1));
} |
public ImmutableList<GlobalSetting> parse(final InputStream is) {
return Jsons.toObjects(is, GlobalSetting.class);
} | @Test
public void should_parse_settings_file_with_context() {
InputStream stream = getResourceAsStream("settings/context-settings.json");
ImmutableList<GlobalSetting> globalSettings = parser.parse(stream);
assertThat(globalSettings.get(0).includes().get(0), is(join("src", "test", "resources", "settings", "details", "foo.json")));
assertThat(globalSettings.get(0).getContext(), is("/foo"));
assertThat(globalSettings.get(1).includes().get(0), is(join("src", "test", "resources", "settings", "details", "bar.json")));
assertThat(globalSettings.get(1).getContext(), is("/bar"));
} |
protected int calculateDegree(Graph graph, Node n) {
return graph.getDegree(n);
} | @Test
public void testOneNodeDegree() {
GraphModel graphModel = GraphGenerator.generateNullUndirectedGraph(1);
Graph graph = graphModel.getGraph();
Node n = graph.getNode("0");
Degree d = new Degree();
int degree = d.calculateDegree(graph, n);
assertEquals(degree, 0);
} |
public Optional<Violation> validate(IndexSetConfig newConfig) {
// Don't validate prefix conflicts in case of an update
if (Strings.isNullOrEmpty(newConfig.id())) {
final Violation prefixViolation = validatePrefix(newConfig);
if (prefixViolation != null) {
return Optional.of(prefixViolation);
}
}
final Violation fieldMappingViolation = validateMappingChangesAreLegal(newConfig);
if (fieldMappingViolation != null) {
return Optional.of(fieldMappingViolation);
}
Violation refreshIntervalViolation = validateSimpleIndexSetConfig(newConfig);
if (refreshIntervalViolation != null){
return Optional.of(refreshIntervalViolation);
}
return Optional.empty();
} | @Test
public void testDataTieringByDefaultDisabledInCloud() {
final IndexSet indexSet = mock(IndexSet.class);
when(indexSet.getIndexPrefix()).thenReturn("foo");
when(indexSetRegistry.iterator()).thenReturn(Collections.singleton(indexSet).iterator());
this.validator = new IndexSetValidator(indexSetRegistry, elasticsearchConfiguration, dataTieringOrchestrator, dataTieringChecker);
IndexSetConfig config = testIndexSetConfig().toBuilder().dataTieringConfig(mock(DataTieringConfig.class)).build();
assertThat(validator.validate(config)).hasValueSatisfying(v ->
assertThat(v.message()).isEqualTo("data tiering feature is disabled!"));
when(dataTieringChecker.isEnabled()).thenReturn(true);
assertThat(validator.validate(config)).isEmpty();
} |
@Override
public int read() {
return (mPosition < mLimit) ? (mData[mPosition++] & 0xff) : -1;
} | @Test
void testReadEmptyByteArray() {
Assertions.assertThrows(NullPointerException.class, () -> {
UnsafeByteArrayInputStream stream = new UnsafeByteArrayInputStream("abc".getBytes());
stream.read(null, 0, 1);
});
} |
public static Optional<PrimaryKey> getPrimaryKey(DatabaseMetaData metaData, TablePath tablePath)
throws SQLException {
// According to the Javadoc of java.sql.DatabaseMetaData#getPrimaryKeys,
// the returned primary key columns are ordered by COLUMN_NAME, not by KEY_SEQ.
// We need to sort them based on the KEY_SEQ value.
// seq -> column name
List<Pair<Integer, String>> primaryKeyColumns = new ArrayList<>();
String pkName = null;
try (ResultSet rs =
metaData.getPrimaryKeys(
tablePath.getDatabaseName(),
tablePath.getSchemaName(),
tablePath.getTableName())) {
while (rs.next()) {
String columnName = rs.getString("COLUMN_NAME");
// all the PK_NAME should be the same
pkName = cleanKeyName(rs.getString("PK_NAME"));
int keySeq = rs.getInt("KEY_SEQ");
// KEY_SEQ is 1-based index
primaryKeyColumns.add(Pair.of(keySeq, columnName));
}
}
// initialize size
List<String> pkFields =
primaryKeyColumns.stream()
.sorted(Comparator.comparingInt(Pair::getKey))
.map(Pair::getValue)
.distinct()
.collect(Collectors.toList());
if (CollectionUtils.isEmpty(pkFields)) {
return Optional.empty();
}
return Optional.of(PrimaryKey.of(pkName, pkFields));
} | @Test
void testPrimaryKeysNameWithOutSpecialChar() throws SQLException {
Optional<PrimaryKey> primaryKey =
CatalogUtils.getPrimaryKey(new TestDatabaseMetaData(), TablePath.of("test.test"));
Assertions.assertEquals("testfdawe_", primaryKey.get().getPrimaryKey());
} |
public static String[] split(String splittee, String splitChar, boolean truncate) { //NOSONAR
if (splittee == null || splitChar == null) {
return new String[0];
}
final String EMPTY_ELEMENT = "";
int spot;
final int splitLength = splitChar.length();
final String adjacentSplit = splitChar + splitChar;
final int adjacentSplitLength = adjacentSplit.length();
if (truncate) {
while ((spot = splittee.indexOf(adjacentSplit)) != -1) {
splittee = splittee.substring(0, spot + splitLength)
+ splittee.substring(spot + adjacentSplitLength, splittee.length());
}
if (splittee.startsWith(splitChar)) {
splittee = splittee.substring(splitLength);
}
if (splittee.endsWith(splitChar)) { // Remove trailing splitter
splittee = splittee.substring(0, splittee.length() - splitLength);
}
}
List<String> returns = new ArrayList<>();
final int length = splittee.length(); // This is the new length
int start = 0;
spot = 0;
while (start < length && (spot = splittee.indexOf(splitChar, start)) > -1) {
if (spot > 0) {
returns.add(splittee.substring(start, spot));
} else {
returns.add(EMPTY_ELEMENT);
}
start = spot + splitLength;
}
if (start < length) {
returns.add(splittee.substring(start));
} else if (spot == length - splitLength) {// Found splitChar at end of line
returns.add(EMPTY_ELEMENT);
}
return returns.toArray(new String[returns.size()]);
} | @Test
public void testSplitSSSSameDelimiterAsDefaultValue() {
assertThat(JOrphanUtils.split("a,bc,,", ",", ","), CoreMatchers.equalTo(new String[]{"a", "bc", ",", ","}));
} |
public static String getCityCodeByIdCard(String idcard) {
int len = idcard.length();
if (len == CHINA_ID_MIN_LENGTH || len == CHINA_ID_MAX_LENGTH) {
return idcard.substring(0, 4);
}
return null;
} | @Test
public void getCityCodeByIdCardTest() {
String codeByIdCard = IdcardUtil.getCityCodeByIdCard(ID_18);
assertEquals("3210", codeByIdCard);
} |
@Override
protected void setProperties(Map<String, String> properties) throws DdlException {
Preconditions.checkState(properties != null);
for (String key : properties.keySet()) {
if (!DRIVER_URL.equals(key) && !URI.equals(key) && !USER.equals(key) && !PASSWORD.equals(key)
&& !TYPE.equals(key) && !NAME.equals(key) && !DRIVER_CLASS.equals(key)) {
throw new DdlException("Property " + key + " is unknown");
}
}
configs = properties;
checkProperties(DRIVER_URL);
checkProperties(DRIVER_CLASS);
checkProperties(URI);
checkProperties(USER);
checkProperties(PASSWORD);
computeDriverChecksum();
} | @Test(expected = DdlException.class)
public void testWithUnknownProperty() throws Exception {
Map<String, String> configs = getMockConfigs();
configs.put("xxx", "xxx");
JDBCResource resource = new JDBCResource("jdbc_resource_test");
resource.setProperties(configs);
} |
@Override
public void cycle() {
if (!getConfig().isWritable()) {
LOG.debug("Not cycling non-writable index set <{}> ({})", getConfig().id(), getConfig().title());
return;
}
int oldTargetNumber;
try {
oldTargetNumber = getNewestIndexNumber();
} catch (NoTargetIndexException ex) {
oldTargetNumber = -1;
}
final int newTargetNumber = oldTargetNumber + 1;
final String newTarget = buildIndexName(newTargetNumber);
final String oldTarget = buildIndexName(oldTargetNumber);
if (oldTargetNumber == -1) {
LOG.info("Cycling from <none> to <{}>.", newTarget);
} else {
LOG.info("Cycling from <{}> to <{}>.", oldTarget, newTarget);
}
// Create new index.
LOG.info("Creating target index <{}>.", newTarget);
if (!indices.create(newTarget, this)) {
throw new RuntimeException("Could not create new target index <" + newTarget + ">.");
}
LOG.info("Waiting for allocation of index <{}>.", newTarget);
final HealthStatus healthStatus = indices.waitForRecovery(newTarget);
checkIfHealthy(healthStatus, (status) -> new RuntimeException("New target index did not become healthy (target index: <" + newTarget + ">)"));
LOG.debug("Health status of index <{}>: {}", newTarget, healthStatus);
addDeflectorIndexRange(newTarget);
LOG.info("Index <{}> has been successfully allocated.", newTarget);
// Point deflector to new index.
final String indexAlias = getWriteIndexAlias();
LOG.info("Pointing index alias <{}> to new index <{}>.", indexAlias, newTarget);
final Activity activity = new Activity(IndexSet.class);
if (oldTargetNumber == -1) {
// Only pointing, not cycling.
pointTo(newTarget);
activity.setMessage("Cycled index alias <" + indexAlias + "> from <none> to <" + newTarget + ">.");
} else {
// Re-pointing from existing old index to the new one.
LOG.debug("Switching over index alias <{}>.", indexAlias);
pointTo(newTarget, oldTarget);
setIndexReadOnlyAndCalculateRange(oldTarget);
activity.setMessage("Cycled index alias <" + indexAlias + "> from <" + oldTarget + "> to <" + newTarget + ">.");
}
LOG.info("Successfully pointed index alias <{}> to index <{}>.", indexAlias, newTarget);
activityWriter.write(activity);
auditEventSender.success(AuditActor.system(nodeId), ES_WRITE_INDEX_UPDATE, ImmutableMap.of("indexName", newTarget));
} | @Test
public void cycleSwitchesIndexAliasToNewTarget() {
final String oldIndexName = config.indexPrefix() + "_0";
final String newIndexName = config.indexPrefix() + "_1";
final String deflector = "graylog_deflector";
final Map<String, Set<String>> indexNameAliases = ImmutableMap.of(
oldIndexName, Collections.singleton(deflector));
when(indices.getIndexNamesAndAliases(anyString())).thenReturn(indexNameAliases);
when(indices.create(newIndexName, mongoIndexSet)).thenReturn(true);
when(indices.waitForRecovery(newIndexName)).thenReturn(HealthStatus.Green);
final MongoIndexSet mongoIndexSet = createIndexSet(config);
mongoIndexSet.cycle();
verify(indices, times(1)).cycleAlias(deflector, newIndexName, oldIndexName);
} |
static void handleJvmOptions(String[] args, String lsJavaOpts) {
final JvmOptionsParser parser = new JvmOptionsParser(args[0]);
final String jvmOpts = args.length == 2 ? args[1] : null;
try {
Optional<Path> jvmOptions = parser.lookupJvmOptionsFile(jvmOpts);
parser.handleJvmOptions(jvmOptions, lsJavaOpts);
} catch (JvmOptionsFileParserException pex) {
System.err.printf(Locale.ROOT,
"encountered [%d] error%s parsing [%s]",
pex.invalidLines().size(),
pex.invalidLines().size() == 1 ? "" : "s",
pex.jvmOptionsFile());
int errorCounter = 0;
for (final Map.Entry<Integer, String> entry : pex.invalidLines().entrySet()) {
errorCounter++;
System.err.printf(Locale.ROOT,
"[%d]: encountered improperly formatted JVM option in [%s] on line number [%d]: [%s]",
errorCounter,
pex.jvmOptionsFile(),
entry.getKey(),
entry.getValue());
}
} catch (IOException ex) {
System.err.println("Error accessing jvm.options file");
System.exit(1);
}
} | @Test
public void givenLS_JAVA_OPTS_containingMultipleDefinitionsWithAlsoMaxOrderThenNoDuplicationOfMaxOrderOptionShouldHappen() throws IOException {
JvmOptionsParser.handleJvmOptions(new String[] {temp.toString()}, "-Xblabla -Dio.netty.allocator.maxOrder=13");
// Verify
final String output = outputStreamCaptor.toString();
int firstMatch = output.indexOf("-Dio.netty.allocator.maxOrder");
int lastMatch = output.lastIndexOf("-Dio.netty.allocator.maxOrder");
assertEquals("No duplication of options (io.netty.allocator.maxOrder) are admitted \n raw data[" + output + "]", firstMatch, lastMatch);
} |
@Override
public OAuth2AccessTokenDO grantRefreshToken(String refreshToken, String clientId) {
return oauth2TokenService.refreshAccessToken(refreshToken, clientId);
} | @Test
public void testGrantRefreshToken() {
// 准备参数
String refreshToken = randomString();
String clientId = randomString();
// mock 方法
OAuth2AccessTokenDO accessTokenDO = randomPojo(OAuth2AccessTokenDO.class);
when(oauth2TokenService.refreshAccessToken(eq(refreshToken), eq(clientId)))
.thenReturn(accessTokenDO);
// 调用,并断言
assertPojoEquals(accessTokenDO, oauth2GrantService.grantRefreshToken(
refreshToken, clientId));
} |
@Override
public void add(long item, long count) {
if (count < 0) {
// Actually for negative increments we'll need to use the median
// instead of minimum, and accuracy will suffer somewhat.
// Probably makes sense to add an "allow negative increments"
// parameter to constructor.
throw new IllegalArgumentException("Negative increments not implemented");
}
for (int i = 0; i < depth; ++i) {
table[i][hash(item, i)] += count;
}
checkSizeAfterAdd(String.valueOf(item), count);
} | @Test(expected = IllegalStateException.class)
public void sizeOverflow() {
CountMinSketch sketch = new CountMinSketch(0.0001, 0.99999, 1);
sketch.add(3, Long.MAX_VALUE);
sketch.add(4, 1);
} |
public ImmutableSetMultimap<String, Pipeline> resolveStreamConnections(Map<String, Pipeline> currentPipelines) {
// Read all stream connections of those pipelines to allow processing messages through them
final HashMultimap<String, Pipeline> connections = HashMultimap.create();
try (final var pipelineConnectionsStream = pipelineConnectionsSupplier.get()) {
pipelineConnectionsStream.forEach(streamConnection -> {
streamConnection.pipelineIds().stream()
.map(currentPipelines::get)
.filter(Objects::nonNull)
.forEach(pipeline -> connections.put(streamConnection.streamId(), pipeline));
});
}
return ImmutableSetMultimap.copyOf(connections);
} | @Test
void resolveStreamConnections() {
final var registry = PipelineMetricRegistry.create(metricRegistry, Pipeline.class.getName(), Rule.class.getName());
final var resolver = new PipelineResolver(
new PipelineRuleParser(new FunctionRegistry(Map.of())),
PipelineResolverConfig.of(
() -> Stream.of(rule1),
() -> Stream.of(pipeline1),
() -> Stream.of(connections1, connections2)
)
);
final var pipelines = resolver.resolvePipelines(registry);
final var streamConnections = resolver.resolveStreamConnections(pipelines);
assertThat(streamConnections.size()).isEqualTo(2);
assertThat(streamConnections.get("stream-1").stream().toList()).satisfies(connections -> {
assertThat(connections).hasSize(1);
assertThat(connections.get(0).id()).isEqualTo("pipeline-1");
});
assertThat(streamConnections.get("stream-2").stream().toList()).satisfies(connections -> {
assertThat(connections).hasSize(1);
assertThat(connections.get(0).id()).isEqualTo("pipeline-1");
});
} |
@Override
public JSONObject getPresetProperties() {
return new JSONObject();
} | @Test
public void getPresetProperties() {
JSONObject jsonObject = mSensorsAPI.getPresetProperties();
Assert.assertEquals(0, jsonObject.length());
} |
public static String formatSql(final AstNode root) {
final StringBuilder builder = new StringBuilder();
new Formatter(builder).process(root, 0);
return StringUtils.stripEnd(builder.toString(), "\n");
} | @Test
public void shouldFormatCreateTableStatementWithExplicitTimestamp() {
// Given:
final CreateSourceProperties props = CreateSourceProperties.from(
new ImmutableMap.Builder<String, Literal>()
.putAll(SOME_WITH_PROPS.copyOfOriginalLiterals())
.put(CommonCreateConfigs.TIMESTAMP_NAME_PROPERTY, new StringLiteral("Foo"))
.put(CommonCreateConfigs.TIMESTAMP_FORMAT_PROPERTY, new StringLiteral("%s"))
.build()
);
final CreateTable createTable = new CreateTable(
TEST,
ELEMENTS_WITH_PRIMARY_KEY,
false,
false,
props,
false);
// When:
final String sql = SqlFormatter.formatSql(createTable);
// Then:
assertThat(sql, is("CREATE TABLE TEST (`k3` STRING PRIMARY KEY, `Foo` STRING) "
+ "WITH (KAFKA_TOPIC='topic_test', "
+ "TIMESTAMP='Foo', TIMESTAMP_FORMAT='%s', VALUE_FORMAT='JSON');"));
} |
public FEELFnResult<TemporalAccessor> invoke(@ParameterName("from") String val) {
if ( val == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null"));
}
try {
TemporalAccessor parsed = FEEL_TIME.parse(val);
if (parsed.query(TemporalQueries.offset()) != null) {
// it is an offset-zoned time, so I can know for certain an OffsetTime
OffsetTime asOffSetTime = parsed.query(OffsetTime::from);
return FEELFnResult.ofResult(asOffSetTime);
} else if (parsed.query(TemporalQueries.zone()) == null) {
// if it does not contain any zone information at all, then I know for certain is a local time.
LocalTime asLocalTime = parsed.query(LocalTime::from);
return FEELFnResult.ofResult(asLocalTime);
} else if (parsed.query(TemporalQueries.zone()) != null) {
boolean hasSeconds = timeStringWithSeconds(val);
LocalTime asLocalTime = parsed.query(LocalTime::from);
ZoneId zoneId = parsed.query(TemporalQueries.zone());
ZoneTime zoneTime = ZoneTime.of(asLocalTime, zoneId, hasSeconds);
return FEELFnResult.ofResult(zoneTime);
}
return FEELFnResult.ofResult(parsed);
} catch (DateTimeException e) {
return manageDateTimeException(e, val);
}
} | @Test
void invokeStringParamNotDateOrTime() {
FunctionTestUtil.assertResultError(timeFunction.invoke("test"), InvalidParametersEvent.class);
} |
@Override
public PageResult<CombinationActivityDO> getCombinationActivityPage(CombinationActivityPageReqVO pageReqVO) {
return combinationActivityMapper.selectPage(pageReqVO);
} | @Test
@Disabled // TODO 请修改 null 为需要的值,然后删除 @Disabled 注解
public void testGetCombinationActivityPage() {
// mock 数据
CombinationActivityDO dbCombinationActivity = randomPojo(CombinationActivityDO.class, o -> { // 等会查询到
o.setName(null);
//o.setSpuId(null);
o.setTotalLimitCount(null);
o.setSingleLimitCount(null);
o.setStartTime(null);
o.setEndTime(null);
o.setUserSize(null);
o.setVirtualGroup(null);
o.setStatus(null);
o.setLimitDuration(null);
o.setCreateTime(null);
});
combinationActivityMapper.insert(dbCombinationActivity);
// 测试 name 不匹配
combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setName(null)));
// 测试 spuId 不匹配
//combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setSpuId(null)));
// 测试 totalLimitCount 不匹配
combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setTotalLimitCount(null)));
// 测试 singleLimitCount 不匹配
combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setSingleLimitCount(null)));
// 测试 startTime 不匹配
combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setStartTime(null)));
// 测试 endTime 不匹配
combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setEndTime(null)));
// 测试 userSize 不匹配
combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setUserSize(null)));
// 测试 virtualGroup 不匹配
combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setVirtualGroup(null)));
// 测试 status 不匹配
combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setStatus(null)));
// 测试 limitDuration 不匹配
combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setLimitDuration(null)));
// 测试 createTime 不匹配
combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setCreateTime(null)));
// 准备参数
CombinationActivityPageReqVO reqVO = new CombinationActivityPageReqVO();
reqVO.setName(null);
reqVO.setStatus(null);
// 调用
PageResult<CombinationActivityDO> pageResult = combinationActivityService.getCombinationActivityPage(reqVO);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbCombinationActivity, pageResult.getList().get(0));
} |
static public int convert(ILoggingEvent event) {
Level level = event.getLevel();
switch (level.levelInt) {
case Level.ERROR_INT:
return SyslogConstants.ERROR_SEVERITY;
case Level.WARN_INT:
return SyslogConstants.WARNING_SEVERITY;
case Level.INFO_INT:
return SyslogConstants.INFO_SEVERITY;
case Level.DEBUG_INT:
case Level.TRACE_INT:
return SyslogConstants.DEBUG_SEVERITY;
default:
throw new IllegalArgumentException("Level " + level + " is not a valid level for a printing method");
}
} | @Test
public void smoke() {
assertEquals(SyslogConstants.DEBUG_SEVERITY, LevelToSyslogSeverity.convert(createEventOfLevel(Level.TRACE)));
assertEquals(SyslogConstants.DEBUG_SEVERITY, LevelToSyslogSeverity.convert(createEventOfLevel(Level.DEBUG)));
assertEquals(SyslogConstants.INFO_SEVERITY, LevelToSyslogSeverity.convert(createEventOfLevel(Level.INFO)));
assertEquals(SyslogConstants.WARNING_SEVERITY, LevelToSyslogSeverity.convert(createEventOfLevel(Level.WARN)));
assertEquals(SyslogConstants.ERROR_SEVERITY, LevelToSyslogSeverity.convert(createEventOfLevel(Level.ERROR)));
} |
public static boolean instanceOfBottomNavigationItemView(Object view) {
return ReflectUtil.isInstance(view, "com.google.android.material.bottomnavigation.BottomNavigationItemView", "android.support.design.internal.NavigationMenuItemView");
} | @Test
public void instanceOfBottomNavigationItemView() {
CheckBox textView1 = new CheckBox(mApplication);
textView1.setText("child1");
Assert.assertFalse(SAViewUtils.instanceOfActionMenuItem(textView1));
} |
public AutoDetectParserConfig getAutoDetectParserConfig() {
return autoDetectParserConfig;
} | @Test
public void testAutoDetectParserConfig() throws Exception {
TikaConfig tikaConfig =
new TikaConfig(TikaConfigTest.class.getResourceAsStream("TIKA-3594.xml"));
AutoDetectParserConfig config = tikaConfig.getAutoDetectParserConfig();
assertEquals(12345, config.getSpoolToDisk());
assertEquals(6789, config.getOutputThreshold());
assertNull(config.getMaximumCompressionRatio());
assertNull(config.getMaximumDepth());
assertNull(config.getMaximumPackageEntryDepth());
} |
public HtmlCreator title(String title) {
html.append("<title>").append(title).append("</title>");
return this;
} | @Test
public void testTitle() {
htmlCreator.title("Blade");
Assert.assertEquals(true, htmlCreator.html().contains("<title>Blade</title>"));
} |
public static Timestamp previous(Timestamp timestamp) {
if (timestamp.equals(Timestamp.MIN_VALUE)) {
return timestamp;
}
final int nanos = timestamp.getNanos();
final long seconds = timestamp.getSeconds();
if (nanos - 1 >= 0) {
return Timestamp.ofTimeSecondsAndNanos(seconds, nanos - 1);
} else {
return Timestamp.ofTimeSecondsAndNanos(seconds - 1, NANOS_PER_SECOND - 1);
}
} | @Test
public void testPreviousDecrementsSecondsWhenNanosUnderflow() {
assertEquals(
Timestamp.ofTimeSecondsAndNanos(9L, 999999999),
TimestampUtils.previous(Timestamp.ofTimeSecondsAndNanos(10L, 0)));
} |
@Restricted(NoExternalUse.class)
public static String extractPluginNameFromIconSrc(String iconSrc) {
if (iconSrc == null) {
return "";
}
if (!iconSrc.contains("plugin-")) {
return "";
}
String[] arr = iconSrc.split(" ");
for (String element : arr) {
if (element.startsWith("plugin-")) {
return element.replaceFirst("plugin-", "");
}
}
return "";
} | @Test
public void extractPluginNameFromIconSrcHandlesNull() {
String result = Functions.extractPluginNameFromIconSrc(null);
assertThat(result, is(emptyString()));
} |
@Override
public Path move(final Path file, final Path target, final TransferStatus status, final Delete.Callback delete, final ConnectionCallback callback) throws BackgroundException {
try {
final EueApiClient client = new EueApiClient(session);
if(status.isExists()) {
if(!new CaseInsensitivePathPredicate(file).test(target)) {
if(log.isWarnEnabled()) {
log.warn(String.format("Trash file %s to be replaced with %s", target, file));
}
new EueTrashFeature(session, fileid).delete(Collections.singletonMap(target, status), callback, delete);
}
}
final String resourceId = fileid.getFileId(file);
if(!new SimplePathPredicate(file.getParent()).test(target.getParent())) {
final ResourceMoveResponseEntries resourceMoveResponseEntries;
final String parentResourceId = fileid.getFileId(target.getParent());
switch(parentResourceId) {
case EueResourceIdProvider.ROOT:
case EueResourceIdProvider.TRASH:
resourceMoveResponseEntries = new MoveChildrenForAliasApiApi(client)
.resourceAliasAliasChildrenMovePost(parentResourceId,
Collections.singletonList(String.format("%s/resource/%s",
session.getBasePath(), resourceId)), null, null, null,
"rename", null);
break;
default:
resourceMoveResponseEntries = new MoveChildrenApi(client)
.resourceResourceIdChildrenMovePost(parentResourceId,
Collections.singletonList(String.format("%s/resource/%s",
session.getBasePath(), resourceId)), null, null, null,
"rename", null);
}
if(null == resourceMoveResponseEntries) {
// Move of single file will return 200 status code with empty response body
}
else {
for(ResourceMoveResponseEntry resourceMoveResponseEntry : resourceMoveResponseEntries.values()) {
switch(resourceMoveResponseEntry.getStatusCode()) {
case HttpStatus.SC_OK:
break;
default:
log.warn(String.format("Failure %s moving file %s", resourceMoveResponseEntries, file));
final ResourceCreationResponseEntryEntity entity = resourceMoveResponseEntry.getEntity();
if(null == entity) {
throw new EueExceptionMappingService().map(new ApiException(resourceMoveResponseEntry.getReason(),
null, resourceMoveResponseEntry.getStatusCode(), client.getResponseHeaders()));
}
throw new EueExceptionMappingService().map(new ApiException(resourceMoveResponseEntry.getEntity().getError(),
null, resourceMoveResponseEntry.getStatusCode(), client.getResponseHeaders()));
}
}
}
}
if(!StringUtils.equals(file.getName(), target.getName())) {
final ResourceUpdateModel resourceUpdateModel = new ResourceUpdateModel();
final ResourceUpdateModelUpdate resourceUpdateModelUpdate = new ResourceUpdateModelUpdate();
final Uifs uifs = new Uifs();
uifs.setName(target.getName());
resourceUpdateModelUpdate.setUifs(uifs);
resourceUpdateModel.setUpdate(resourceUpdateModelUpdate);
final ResourceMoveResponseEntries resourceMoveResponseEntries = new UpdateResourceApi(client).resourceResourceIdPatch(resourceId,
resourceUpdateModel, null, null, null);
if(null == resourceMoveResponseEntries) {
// Move of single file will return 200 status code with empty response body
}
else {
for(ResourceMoveResponseEntry resourceMoveResponseEntry : resourceMoveResponseEntries.values()) {
switch(resourceMoveResponseEntry.getStatusCode()) {
case HttpStatus.SC_CREATED:
break;
default:
log.warn(String.format("Failure %s renaming file %s", resourceMoveResponseEntry, file));
throw new EueExceptionMappingService().map(new ApiException(resourceMoveResponseEntry.getReason(),
null, resourceMoveResponseEntry.getStatusCode(), client.getResponseHeaders()));
}
}
}
}
fileid.cache(file, null);
return target;
}
catch(ApiException e) {
throw new EueExceptionMappingService().map("Cannot rename {0}", e, file);
}
} | @Test
public void testMoveFileOverride() throws Exception {
final EueResourceIdProvider fileid = new EueResourceIdProvider(session);
final Path folder = new EueDirectoryFeature(session, fileid).mkdir(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final Path sourceFile = new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
createFile(fileid, sourceFile, RandomUtils.nextBytes(48));
final Path targetFile = new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
createFile(fileid, targetFile, RandomUtils.nextBytes(541));
new EueMoveFeature(session, fileid).move(sourceFile, targetFile, new TransferStatus().exists(true), new Delete.DisabledCallback(), new DisabledConnectionCallback());
assertFalse(new EueFindFeature(session, fileid).find(sourceFile));
assertTrue(new EueFindFeature(session, fileid).find(targetFile));
new EueDeleteFeature(session, fileid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@ConstantFunction(name = "bitShiftRightLogical", argTypes = {TINYINT, BIGINT}, returnType = TINYINT)
public static ConstantOperator bitShiftRightLogicalTinyInt(ConstantOperator first, ConstantOperator second) {
byte b = first.getTinyInt();
int i = b >= 0 ? b : (((int) b) + 256);
return ConstantOperator.createTinyInt((byte) (i >>> second.getBigint()));
} | @Test
public void bitShiftRightLogicalTinyInt() {
assertEquals(1, ScalarOperatorFunctions.bitShiftRightLogicalTinyInt(O_TI_10, O_BI_3).getTinyInt());
} |
public static Field getField(Class<?> beanClass, String name) throws SecurityException {
final Field[] fields = getFields(beanClass);
return ArrayUtil.firstMatch(field -> name.equals(getFieldName(field)), fields);
} | @Test
public void getFieldTest() {
Field privateField = ReflectUtil.getField(TestSubClass.class, "privateField");
Assert.assertNotNull(privateField);
Field field = ReflectUtil.getField(TestSubClass.class, "field");
Assert.assertNotNull(field);
} |
@VisibleForTesting
static Object convertAvroField(Object avroValue, Schema schema) {
if (avroValue == null) {
return null;
}
switch (schema.getType()) {
case NULL:
case INT:
case LONG:
case DOUBLE:
case FLOAT:
case BOOLEAN:
return avroValue;
case ENUM:
case STRING:
return avroValue.toString(); // can be a String or org.apache.avro.util.Utf8
case UNION:
for (Schema s : schema.getTypes()) {
if (s.getType() == Schema.Type.NULL) {
continue;
}
return convertAvroField(avroValue, s);
}
throw new IllegalArgumentException("Found UNION schema but it doesn't contain any type");
case ARRAY:
case BYTES:
case FIXED:
case RECORD:
case MAP:
default:
throw new UnsupportedOperationException("Unsupported avro schema type=" + schema.getType()
+ " for value field schema " + schema.getName());
}
} | @Test(expectedExceptions = UnsupportedOperationException.class,
expectedExceptionsMessageRegExp = "Unsupported avro schema type.*")
public void testNotSupportedAvroTypesRecord() {
BaseJdbcAutoSchemaSink.convertAvroField(new Object(), createFieldAndGetSchema((builder) ->
builder.name("field").type()
.record("myrecord").fields()
.name("f1").type().intType().noDefault()
.endRecord().noDefault()));
} |
protected static boolean isDoubleQuoted(String input) {
if (input == null || input.isBlank()) {
return false;
}
return input.matches("(^" + QUOTE_CHAR + "{2}([^" + QUOTE_CHAR + "]+)" + QUOTE_CHAR + "{2})");
} | @Test
public void testDoubleQuoted() {
assertTrue(isDoubleQuoted("\"\"c:\\program files\\test\\\"\""));
} |
public void insert(String data) {
this.insert(this.root, data);
} | @Test
public void insert() throws Exception {
TrieTree trieTree = new TrieTree();
trieTree.insert("abc");
trieTree.insert("abcd");
} |
public static Object del(final ConvertedMap data, final FieldReference field) {
final Object target = findParent(data, field);
if (target instanceof ConvertedMap) {
return ((ConvertedMap) target).remove(field.getKey());
} else {
return target == null ? null : delFromList((ConvertedList) target, field.getKey());
}
} | @Test
public void testDel() throws Exception {
final ConvertedMap data = new ConvertedMap(1);
List<Object> inner = new ConvertedList(1);
data.put("foo", inner);
inner.add("bar");
data.put("bar", "baz");
assertEquals("bar", del(data, "[foo][0]"));
assertNull(del(data, "[foo][0]"));
assertEquals(new ConvertedList(0), get(data,"[foo]"));
assertEquals("baz", del(data, "[bar]"));
assertNull(get(data, "[bar]"));
} |
@Override
public void validateInputFilePatternSupported(String filepattern) {
getGcsPath(filepattern);
verifyPath(filepattern);
verifyPathIsAccessible(filepattern, "Could not find file %s");
} | @Test
public void testValidFilePattern() {
validator.validateInputFilePatternSupported("gs://bucket/path");
} |
@Override
public int capacity() {
return capacity;
} | @Test
public void testCapacity() {
assertEquals(CAPACITY, queue.capacity());
} |
@Override
public ContentHandler getNewContentHandler() {
if (type == HANDLER_TYPE.BODY) {
return new BodyContentHandler(
new WriteOutContentHandler(new ToTextContentHandler(), writeLimit,
throwOnWriteLimitReached, parseContext));
} else if (type == HANDLER_TYPE.IGNORE) {
return new DefaultHandler();
}
ContentHandler formatHandler = getFormatHandler();
if (writeLimit < 0) {
return formatHandler;
}
return new WriteOutContentHandler(formatHandler, writeLimit, throwOnWriteLimitReached,
parseContext);
} | @Test
public void testBody() throws Exception {
Parser p = new MockParser(OVER_DEFAULT);
BasicContentHandlerFactory.HANDLER_TYPE type = BasicContentHandlerFactory.HANDLER_TYPE.BODY;
ContentHandler handler = new BasicContentHandlerFactory(type, -1).getNewContentHandler();
assertTrue(handler instanceof BodyContentHandler);
p.parse(null, handler, null, null);
String extracted = handler.toString();
assertNotContains("title", extracted);
assertContains("aaaaaaaaaa", extracted);
assertTrue(extracted.length() > 110000);
//now test write limit
p = new MockParser(10);
handler = new BasicContentHandlerFactory(type, 5).getNewContentHandler();
assertTrue(handler instanceof BodyContentHandler);
assertWriteLimitReached(p, (BodyContentHandler) handler);
extracted = handler.toString();
assertNotContains("This ", extracted);
assertContains("aaaa", extracted);
//now test outputstream call
p = new MockParser(OVER_DEFAULT);
ByteArrayOutputStream os = new ByteArrayOutputStream();
handler = new BasicContentHandlerFactory(type, -1).getNewContentHandler(os, UTF_8);
assertTrue(handler instanceof BodyContentHandler);
p.parse(null, handler, null, null);
assertNotContains("title", os.toByteArray());
assertContains("aaaaaaaaaa", os.toByteArray());
assertNotContains("<body", os.toByteArray());
assertNotContains("<html", os.toByteArray());
assertTrue(os.toByteArray().length > 110000);
p = new MockParser(10);
os = new ByteArrayOutputStream();
handler = new BasicContentHandlerFactory(type, 5).getNewContentHandler(os, UTF_8);
assertTrue(handler instanceof WriteOutContentHandler);
assertWriteLimitReached(p, (WriteOutContentHandler) handler);
assertEquals(0, os.toByteArray().length);
} |
static void closeSilently(
final ServerWebSocket webSocket,
final int code,
final String message) {
try {
final ImmutableMap<String, String> finalMessage = ImmutableMap.of(
"error",
message != null ? message : ""
);
final String json = ApiJsonMapper.INSTANCE.get().writeValueAsString(finalMessage);
webSocket
.writeFinalTextFrame(json, r -> { })
.close((short) code, truncate(message));
} catch (final Exception e) {
LOG.info("Exception caught closing websocket", e);
}
} | @Test
public void shouldCloseQuietly() throws Exception {
// Given:
doThrow(new RuntimeException("Boom")).when(websocket)
.close(any(Short.class), any(String.class));
// When:
SessionUtil.closeSilently(websocket, INVALID_MESSAGE_TYPE.code(), "reason");
// Then:
verify(websocket).close(any(Short.class), any(String.class));
// And exception swallowed.
} |
@Override
public void check(final DataSource dataSource) {
try (
Connection connection = dataSource.getConnection();
PreparedStatement preparedStatement = connection.prepareStatement(SHOW_VARIABLES_SQL)) {
int parameterIndex = 1;
for (Entry<String, String> entry : REQUIRED_VARIABLES.entrySet()) {
preparedStatement.setString(parameterIndex++, entry.getKey());
}
try (ResultSet resultSet = preparedStatement.executeQuery()) {
while (resultSet.next()) {
String variableName = resultSet.getString(1).toUpperCase();
String expectedValue = REQUIRED_VARIABLES.get(variableName);
String actualValue = resultSet.getString(2);
ShardingSpherePreconditions.checkState(expectedValue.equalsIgnoreCase(actualValue), () -> new UnexpectedVariableValueException(variableName, expectedValue, actualValue));
}
}
} catch (final SQLException ex) {
throw new CheckDatabaseEnvironmentFailedException(ex);
}
} | @Test
void assertCheckVariableWithWrong() throws SQLException {
when(preparedStatement.executeQuery()).thenReturn(resultSet);
when(resultSet.next()).thenReturn(true, true, false);
when(resultSet.getString(1)).thenReturn("BINLOG_FORMAT", "LOG_BIN");
when(resultSet.getString(2)).thenReturn("ROW", "OFF");
assertThrows(UnexpectedVariableValueException.class, () -> variableChecker.check(dataSource));
} |
@Override
public void removeEdge(E edge) {
checkArgument(edge.src().equals(edge.dst()) ||
edges.indexOf(edge) == 0 ||
edges.lastIndexOf(edge) == edges.size() - 1,
"Edge must be at start or end of path, or it must be a cyclic edge");
edges.remove(edge);
} | @Test
public void removeEdge() {
MutablePath<TestVertex, TestEdge> p = new DefaultMutablePath<>();
p.appendEdge(new TestEdge(A, B));
p.appendEdge(new TestEdge(B, C));
p.appendEdge(new TestEdge(C, C));
p.appendEdge(new TestEdge(C, D));
validatePath(p, A, D, 4);
p.removeEdge(new TestEdge(A, B));
validatePath(p, B, D, 3);
p.removeEdge(new TestEdge(C, C));
validatePath(p, B, D, 2);
p.removeEdge(new TestEdge(C, D));
validatePath(p, B, C, 1);
} |
String getServiceDns() {
return serviceDns;
} | @Test
public void propertyServiceNameIsEmpty() {
// given
Map<String, Comparable> properties = createProperties();
properties.put(SERVICE_NAME.key(), " ");
String serviceDns = "service-dns";
properties.put(SERVICE_DNS.key(), serviceDns);
//when
KubernetesConfig config = new KubernetesConfig(properties);
//then
assertEquals(serviceDns, config.getServiceDns());
} |
public PluginDescriptor getDescriptor() {
Iterator<PluginInfo> iterator = this.iterator();
if (!iterator.hasNext()) {
throw new RuntimeException("Cannot get descriptor. Could not find any plugin information.");
}
return iterator.next().getDescriptor();
} | @Test
public void shouldGetDescriptorOfPluginUsingAnyPluginInfo() {
PluginDescriptor descriptor = mock(PluginDescriptor.class);
NotificationPluginInfo notificationPluginInfo = new NotificationPluginInfo(descriptor, null);
PluggableTaskPluginInfo pluggableTaskPluginInfo = new PluggableTaskPluginInfo(descriptor, null, null);
CombinedPluginInfo pluginInfo = new CombinedPluginInfo(List.of(pluggableTaskPluginInfo, notificationPluginInfo));
assertThat(pluginInfo.getDescriptor(), is(descriptor));
} |
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
} | @Test
public void sendDocument() {
Message message = bot.execute(new SendDocument(chatId, docFileId)).message();
MessageTest.checkMessage(message);
DocumentTest.check(message.document());
message = bot.execute(
new SendDocument(chatId, docBytes).thumb(thumbBytes).contentType("application/pdf")
.caption("caption").captionEntities(new MessageEntity(MessageEntity.Type.italic, 0, 7)))
.message();
MessageTest.checkMessage(message);
DocumentTest.check(message.document());
assertEquals(thumbSize, message.document().thumb().fileSize());
MessageEntity captionEntity = message.captionEntities()[0];
assertEquals(MessageEntity.Type.italic, captionEntity.type());
assertEquals((Integer) 0, captionEntity.offset());
assertEquals((Integer) 7, captionEntity.length());
String caption = "caption <b>bold</b>", fileName = "my doc.pdf";
ParseMode parseMode = ParseMode.HTML;
message = bot.execute(
new SendDocument(chatId, docFile).fileName(fileName).thumb(thumbFile).caption(caption).parseMode(parseMode)
.disableContentTypeDetection(true))
.message();
MessageTest.checkMessage(message);
DocumentTest.check(message.document());
assertEquals(caption.replace("<b>", "").replace("</b>", ""), message.caption());
assertEquals(fileName, message.document().fileName());
assertEquals(thumbSize, message.document().thumb().fileSize());
captionEntity = message.captionEntities()[0];
assertEquals(MessageEntity.Type.bold, captionEntity.type());
assertEquals((Integer) 8, captionEntity.offset());
assertEquals((Integer) 4, captionEntity.length());
} |
public String anonymize(final ParseTree tree) {
return build(tree);
} | @Test
public void shouldAnonymizeDropStatementsCorrectly() {
Assert.assertEquals("DROP STREAM IF EXISTS stream1 DELETE TOPIC;",
anon.anonymize("DROP STREAM IF EXISTS my_stream DELETE TOPIC;"));
Assert.assertEquals("DROP TABLE IF EXISTS table1 DELETE TOPIC;",
anon.anonymize("DROP TABLE IF EXISTS my_table DELETE TOPIC;"));
Assert.assertEquals("DROP CONNECTOR IF EXISTS connector;",
anon.anonymize("DROP CONNECTOR IF EXISTS my_connector;"));
Assert.assertEquals("DROP TYPE IF EXISTS type;",
anon.anonymize("DROP TYPE IF EXISTS my_type;"));
} |
@Override
public String name() {
return store.name();
} | @Test
public void shouldGetNameForVersionedStore() {
givenWrapperWithVersionedStore();
when(versionedStore.name()).thenReturn(STORE_NAME);
assertThat(wrapper.name(), equalTo(STORE_NAME));
} |
public static void checkNullOrNonNullNonEmptyEntries(
@Nullable Collection<String> values, String propertyName) {
if (values == null) {
// pass
return;
}
for (String value : values) {
Preconditions.checkNotNull(
value, "Property '" + propertyName + "' cannot contain null entries");
Preconditions.checkArgument(
!value.trim().isEmpty(), "Property '" + propertyName + "' cannot contain empty strings");
}
} | @Test
public void testCheckNullOrNonNullNonEmptyEntries_mapEmptyValueFail() {
try {
Validator.checkNullOrNonNullNonEmptyEntries(Collections.singletonMap("key1", " "), "test");
Assert.fail();
} catch (IllegalArgumentException iae) {
Assert.assertEquals("Property 'test' cannot contain empty string values", iae.getMessage());
}
} |
@Override
public TypeSerializerSchemaCompatibility<T> resolveSchemaCompatibility(
TypeSerializerSnapshot<T> oldSerializerSnapshot) {
if (!(oldSerializerSnapshot instanceof PojoSerializerSnapshot)) {
return TypeSerializerSchemaCompatibility.incompatible();
}
PojoSerializerSnapshot<T> previousPojoSerializerSnapshot =
(PojoSerializerSnapshot<T>) oldSerializerSnapshot;
final Class<T> previousPojoClass =
previousPojoSerializerSnapshot.snapshotData.getPojoClass();
final LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> fieldSerializerSnapshots =
previousPojoSerializerSnapshot.snapshotData.getFieldSerializerSnapshots();
final LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>>
registeredSubclassSerializerSnapshots =
previousPojoSerializerSnapshot.snapshotData
.getRegisteredSubclassSerializerSnapshots();
final LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>>
nonRegisteredSubclassSerializerSnapshots =
previousPojoSerializerSnapshot.snapshotData
.getNonRegisteredSubclassSerializerSnapshots();
if (previousPojoClass != snapshotData.getPojoClass()) {
return TypeSerializerSchemaCompatibility.incompatible();
}
if (registeredSubclassSerializerSnapshots.hasAbsentKeysOrValues()) {
return TypeSerializerSchemaCompatibility.incompatible();
}
if (nonRegisteredSubclassSerializerSnapshots.hasAbsentKeysOrValues()) {
return TypeSerializerSchemaCompatibility.incompatible();
}
final IntermediateCompatibilityResult<T> preExistingFieldSerializersCompatibility =
getCompatibilityOfPreExistingFields(fieldSerializerSnapshots);
if (preExistingFieldSerializersCompatibility.isIncompatible()) {
return TypeSerializerSchemaCompatibility.incompatible();
}
final IntermediateCompatibilityResult<T> preExistingRegistrationsCompatibility =
getCompatibilityOfPreExistingRegisteredSubclasses(
registeredSubclassSerializerSnapshots);
if (preExistingRegistrationsCompatibility.isIncompatible()) {
return TypeSerializerSchemaCompatibility.incompatible();
}
if (newPojoSerializerIsCompatibleAfterMigration(
preExistingFieldSerializersCompatibility,
preExistingRegistrationsCompatibility,
fieldSerializerSnapshots)) {
return TypeSerializerSchemaCompatibility.compatibleAfterMigration();
}
if (newPojoSerializerIsCompatibleWithReconfiguredSerializer(
preExistingFieldSerializersCompatibility,
preExistingRegistrationsCompatibility,
registeredSubclassSerializerSnapshots,
nonRegisteredSubclassSerializerSnapshots)) {
return TypeSerializerSchemaCompatibility.compatibleWithReconfiguredSerializer(
constructReconfiguredPojoSerializer(
preExistingFieldSerializersCompatibility,
registeredSubclassSerializerSnapshots,
preExistingRegistrationsCompatibility,
nonRegisteredSubclassSerializerSnapshots));
}
return TypeSerializerSchemaCompatibility.compatibleAsIs();
} | @Test
void testResolveSchemaCompatibilityWithNewFields() {
final PojoSerializerSnapshot<TestPojo> oldSnapshot =
buildTestSnapshot(Collections.singletonList(HEIGHT_FIELD));
final PojoSerializerSnapshot<TestPojo> newSnapshot =
buildTestSnapshot(Arrays.asList(ID_FIELD, NAME_FIELD, HEIGHT_FIELD));
final TypeSerializerSchemaCompatibility<TestPojo> resultCompatibility =
newSnapshot.resolveSchemaCompatibility(oldSnapshot);
assertThat(resultCompatibility.isCompatibleAfterMigration()).isTrue();
} |
@VisibleForTesting
public int getFailApplicationAttemptFailedRetrieved() {
return numFailAppAttemptFailedRetrieved.value();
} | @Test
public void testFailApplicationAttemptFailed() {
long totalBadBefore = metrics.getFailApplicationAttemptFailedRetrieved();
badSubCluster.getFailApplicationAttempt();
Assert.assertEquals(totalBadBefore + 1, metrics.getFailApplicationAttemptFailedRetrieved());
} |
@Bean
public TimeLimiterRegistry timeLimiterRegistry(
TimeLimiterConfigurationProperties timeLimiterConfigurationProperties,
EventConsumerRegistry<TimeLimiterEvent> timeLimiterEventConsumerRegistry,
RegistryEventConsumer<TimeLimiter> timeLimiterRegistryEventConsumer,
@Qualifier("compositeTimeLimiterCustomizer") CompositeCustomizer<TimeLimiterConfigCustomizer> compositeTimeLimiterCustomizer) {
TimeLimiterRegistry timeLimiterRegistry =
createTimeLimiterRegistry(timeLimiterConfigurationProperties, timeLimiterRegistryEventConsumer,
compositeTimeLimiterCustomizer);
registerEventConsumer(timeLimiterRegistry, timeLimiterEventConsumerRegistry, timeLimiterConfigurationProperties);
initTimeLimiterRegistry(timeLimiterRegistry, timeLimiterConfigurationProperties, compositeTimeLimiterCustomizer);
return timeLimiterRegistry;
} | @Test
public void testCreateTimeLimiterRegistryWithUnknownConfig() {
TimeLimiterConfigurationProperties timeLimiterConfigurationProperties = new TimeLimiterConfigurationProperties();
io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties instanceProperties = new io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties();
instanceProperties.setBaseConfig("unknownConfig");
timeLimiterConfigurationProperties.getInstances().put("backend", instanceProperties);
TimeLimiterConfiguration timeLimiterConfiguration = new TimeLimiterConfiguration();
DefaultEventConsumerRegistry<TimeLimiterEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>();
//When
assertThatThrownBy(() -> timeLimiterConfiguration.timeLimiterRegistry(timeLimiterConfigurationProperties, eventConsumerRegistry, new CompositeRegistryEventConsumer<>(emptyList()), compositeTimeLimiterCustomizerTestInstance()))
.isInstanceOf(ConfigurationNotFoundException.class)
.hasMessage("Configuration with name 'unknownConfig' does not exist");
} |
public static boolean isBasicInfoChanged(Member actual, Member expected) {
if (null == expected) {
return null != actual;
}
if (!expected.getIp().equals(actual.getIp())) {
return true;
}
if (expected.getPort() != actual.getPort()) {
return true;
}
if (!expected.getAddress().equals(actual.getAddress())) {
return true;
}
if (!expected.getState().equals(actual.getState())) {
return true;
}
// if change
if (expected.isGrpcReportEnabled() != actual.isGrpcReportEnabled()) {
return true;
}
return isBasicInfoChangedInExtendInfo(expected, actual);
} | @Test
void testIsBasicInfoChangedForMoreBasicExtendInfo() {
Member newMember = buildMember();
newMember.setExtendVal(MemberMetaDataConstants.VERSION, "TEST");
assertTrue(MemberUtil.isBasicInfoChanged(newMember, originalMember));
} |
@Override
public List<Integer> applyTransforms(List<Integer> originalGlyphIds)
{
List<Integer> intermediateGlyphsFromGsub = originalGlyphIds;
for (String feature : FEATURES_IN_ORDER)
{
if (!gsubData.isFeatureSupported(feature))
{
LOG.debug("the feature {} was not found", feature);
continue;
}
LOG.debug("applying the feature {}", feature);
ScriptFeature scriptFeature = gsubData.getFeature(feature);
intermediateGlyphsFromGsub = applyGsubFeature(scriptFeature,
intermediateGlyphsFromGsub);
}
return Collections.unmodifiableList(intermediateGlyphsFromGsub);
} | @Test
void testApplyLigaturesCalibri() throws IOException
{
File file = new File("c:/windows/fonts/calibri.ttf");
Assumptions.assumeTrue(file.exists(), "calibri ligature test skipped");
CmapLookup cmapLookup;
GsubWorker gsubWorkerForLatin;
try (TrueTypeFont ttf = new TTFParser().parse(new RandomAccessReadBufferedFile(file)))
{
cmapLookup = ttf.getUnicodeCmapLookup();
gsubWorkerForLatin = new GsubWorkerFactory().getGsubWorker(cmapLookup, ttf.getGsubData());
}
assertEquals(Arrays.asList(286, 299, 286, 272, 415, 448, 286),
gsubWorkerForLatin.applyTransforms(getGlyphIds("effective", cmapLookup)));
assertEquals(Arrays.asList(258, 427, 410, 437, 282, 286),
gsubWorkerForLatin.applyTransforms(getGlyphIds("attitude", cmapLookup)));
assertEquals(Arrays.asList(258, 312, 367, 349, 258, 410, 286),
gsubWorkerForLatin.applyTransforms(getGlyphIds("affiliate", cmapLookup)));
assertEquals(Arrays.asList(302, 367, 373),
gsubWorkerForLatin.applyTransforms(getGlyphIds("film", cmapLookup)));
assertEquals(Arrays.asList(327, 381, 258, 410),
gsubWorkerForLatin.applyTransforms(getGlyphIds("float", cmapLookup)));
assertEquals(Arrays.asList(393, 367, 258, 414, 381, 396, 373),
gsubWorkerForLatin.applyTransforms(getGlyphIds("platform", cmapLookup)));
} |
@Override
public boolean isInputConsumable(
SchedulingExecutionVertex executionVertex,
Set<ExecutionVertexID> verticesToDeploy,
Map<ConsumedPartitionGroup, Boolean> consumableStatusCache) {
for (ConsumedPartitionGroup consumedPartitionGroup :
executionVertex.getConsumedPartitionGroups()) {
if (!consumableStatusCache.computeIfAbsent(
consumedPartitionGroup, this::isConsumableBasedOnFinishedProducers)) {
return false;
}
}
return true;
} | @Test
void testAllFinishedBlockingInput() {
final TestingSchedulingTopology topology = new TestingSchedulingTopology();
final List<TestingSchedulingExecutionVertex> producers =
topology.addExecutionVertices().withParallelism(2).finish();
final List<TestingSchedulingExecutionVertex> consumer =
topology.addExecutionVertices().withParallelism(2).finish();
topology.connectAllToAll(producers, consumer)
.withResultPartitionState(ResultPartitionState.ALL_DATA_PRODUCED)
.withResultPartitionType(ResultPartitionType.BLOCKING)
.finish();
PartialFinishedInputConsumableDecider inputConsumableDecider =
createPartialFinishedInputConsumableDecider();
assertThat(
inputConsumableDecider.isInputConsumable(
consumer.get(0), Collections.emptySet(), new HashMap<>()))
.isTrue();
assertThat(
inputConsumableDecider.isInputConsumable(
consumer.get(1), Collections.emptySet(), new HashMap<>()))
.isTrue();
} |
public void pruneColumns(Configuration conf, Path inputFile, Path outputFile, List<String> cols)
throws IOException {
RewriteOptions options = new RewriteOptions.Builder(conf, inputFile, outputFile)
.prune(cols)
.build();
ParquetRewriter rewriter = new ParquetRewriter(options);
rewriter.processBlocks();
rewriter.close();
} | @Test
public void testPruneNestedParentColumn() throws Exception {
// Create Parquet file
String inputFile = createParquetFile("input");
String outputFile = createTempFile("output");
// Remove parent column. All of it's children will be removed.
List<String> cols = Arrays.asList("Links");
columnPruner.pruneColumns(conf, new Path(inputFile), new Path(outputFile), cols);
// Verify the schema are not changed for the columns not pruned
ParquetMetadata pmd =
ParquetFileReader.readFooter(conf, new Path(outputFile), ParquetMetadataConverter.NO_FILTER);
MessageType schema = pmd.getFileMetaData().getSchema();
List<Type> fields = schema.getFields();
assertEquals(fields.size(), 3);
assertEquals(fields.get(0).getName(), "DocId");
assertEquals(fields.get(1).getName(), "Name");
assertEquals(fields.get(2).getName(), "Gender");
// Verify the data are not changed for the columns not pruned
List<String> prunePaths = Arrays.asList("Links");
validateColumns(inputFile, prunePaths);
} |
@Override
public boolean hasNext() {
try {
if (iteratorClosed) {
return false;
}
getNextItem();
if (!hasNext) {
close();
}
hasNextMethodCalled = true;
return hasNext;
} catch (SQLException sqlException) {
close();
throw ExceptionUtil.sneakyThrow(sqlException);
}
} | @Test
void testHasNext() throws SQLException {
try (Connection connection = DriverManager.getConnection(dbConnectionUrl)) {
JoinPredicateScanResultSetIterator<String> iterator = createIterator(connection);
ArrayList<String> tableNameList = new ArrayList<>();
while (iterator.hasNext()) {
tableNameList.add(iterator.next());
}
assertThat(tableNameList).containsAll(EXPECTED_TABLES);
// Call hasNext() and next() methods one more time after the loop to test their behavior
assertThat(iterator.hasNext()).isFalse();
assertThatThrownBy(iterator::next).isInstanceOf(NoSuchElementException.class);
}
} |
@Override
public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) {
int seconds = payload.getByteBuf().readInt();
if (0 == seconds) {
return MySQLTimeValueUtils.DATETIME_OF_ZERO;
}
int nanos = columnDef.getColumnMeta() > 0 ? new MySQLFractionalSeconds(columnDef.getColumnMeta(), payload).getNanos() : 0;
Timestamp result = new Timestamp(seconds * 1000L);
result.setNanos(nanos);
return result;
} | @Test
void assertReadNullTime() {
when(byteBuf.readInt()).thenReturn(0);
assertThat(new MySQLTimestamp2BinlogProtocolValue().read(columnDef, payload), is(MySQLTimeValueUtils.DATETIME_OF_ZERO));
} |
@Override // mappedStatementId 参数,暂时没有用。以后,可以基于 mappedStatementId + DataPermission 进行缓存
public List<DataPermissionRule> getDataPermissionRule(String mappedStatementId) {
// 1. 无数据权限
if (CollUtil.isEmpty(rules)) {
return Collections.emptyList();
}
// 2. 未配置,则默认开启
DataPermission dataPermission = DataPermissionContextHolder.get();
if (dataPermission == null) {
return rules;
}
// 3. 已配置,但禁用
if (!dataPermission.enable()) {
return Collections.emptyList();
}
// 4. 已配置,只选择部分规则
if (ArrayUtil.isNotEmpty(dataPermission.includeRules())) {
return rules.stream().filter(rule -> ArrayUtil.contains(dataPermission.includeRules(), rule.getClass()))
.collect(Collectors.toList()); // 一般规则不会太多,所以不采用 HashSet 查询
}
// 5. 已配置,只排除部分规则
if (ArrayUtil.isNotEmpty(dataPermission.excludeRules())) {
return rules.stream().filter(rule -> !ArrayUtil.contains(dataPermission.excludeRules(), rule.getClass()))
.collect(Collectors.toList()); // 一般规则不会太多,所以不采用 HashSet 查询
}
// 6. 已配置,全部规则
return rules;
} | @Test
public void testGetDataPermissionRule_05() {
// 准备参数
String mappedStatementId = randomString();
// mock 方法
DataPermissionContextHolder.add(AnnotationUtils.findAnnotation(TestClass05.class, DataPermission.class));
// 调用
List<DataPermissionRule> result = dataPermissionRuleFactory.getDataPermissionRule(mappedStatementId);
// 断言
assertEquals(1, result.size());
assertEquals(DataPermissionRule02.class, result.get(0).getClass());
} |
@Override
public Map<T, Double> getWeightedSubset(Map<T, Double> weightMap, DeterministicSubsettingMetadata metadata)
{
if (metadata != null)
{
List<T> points = new ArrayList<>(weightMap.keySet());
Collections.sort(points);
Collections.shuffle(points, new Random(_randomSeed));
List<Double> weights = points.stream().map(weightMap::get).collect(Collectors.toList());
double totalWeight = weights.stream().mapToDouble(Double::doubleValue).sum();
if (totalWeight == 0)
{
return null;
}
Ring ring = new Ring(weights, totalWeight);
double offset = metadata.getInstanceId() / (double) metadata.getTotalInstanceCount();
double subsetSliceWidth = getSubsetSliceWidth(metadata.getTotalInstanceCount(), points.size());
List<Integer> indices = ring.getIndices(offset, subsetSliceWidth);
return indices.stream().collect(
Collectors.toMap(points::get, i -> round(ring.getWeight(i, offset, subsetSliceWidth), WEIGHT_DECIMAL_PLACE)));
}
else
{
_log.warn("Cannot retrieve metadata required for D2 subsetting. Revert to use all available hosts.");
return null;
}
} | @Test(dataProvider = "uniformWeightData")
public void testDistributionWithUniformWeight(int clientNum, int hostNum, int minSubsetSize)
{
double[] weights = new double[hostNum];
Arrays.fill(weights, 1D);
Map<String, Double> pointsMap = constructPointsMap(weights);
Map<String, Double> distributionMap = new HashMap<>();
for (int i = 0; i < clientNum; i++)
{
_deterministicSubsettingStrategy = new DeterministicSubsettingStrategy<>("test", minSubsetSize);
Map<String, Double> weightedSubset = _deterministicSubsettingStrategy.getWeightedSubset(pointsMap,
new DeterministicSubsettingMetadata(i, clientNum, 0));
assertTrue(weightedSubset.size() >= Math.min(minSubsetSize, hostNum));
for (Map.Entry<String, Double> entry: weightedSubset.entrySet())
{
distributionMap.put(entry.getKey(), distributionMap.getOrDefault(entry.getKey(), 0D) + entry.getValue());
}
}
double host0WeightSum = distributionMap.getOrDefault("host0", 0D);
for (double weightSum: distributionMap.values())
{
assertEquals(weightSum, host0WeightSum, DELTA_DIFF);
}
} |
public void replaceChars(int startPosition, int endPosition, String replacement) {
try {
sourceBuilder.replace(startPosition, endPosition, replacement);
} catch (StringIndexOutOfBoundsException e) {
throw new IndexOutOfBoundsException(
String.format(
"Replacement cannot be made. Source file %s has length %d, requested start "
+ "position %d, requested end position %d, replacement %s",
path, sourceBuilder.length(), startPosition, endPosition, replacement));
}
} | @Test
public void replaceChars() {
sourceFile.replaceChars(3, 8, "Sasquatch");
assertThat(sourceFile.getSourceText()).isEqualTo(SOURCE_TEXT.replace("Lorem", "Sasquatch"));
assertThat(sourceFile.getLines().get(0))
.isEqualTo("// Sasquatch ipsum dolor sit amet, consectetur adipisicing elit, sed do");
} |
@Override
public Object removeVariable(String name) {
if (variables.containsKey(name)) {
return variables.remove(name);
}
if (parent != null) {
return parent.removeVariable(name);
}
return null;
} | @Test
public void testRemoveVariable() {
ProcessContextImpl context = new ProcessContextImpl();
ProcessContextImpl parentContext = new ProcessContextImpl();
parentContext.setVariable("key", "value");
context.setParent(parentContext);
context.setVariable("key1", "value1");
context.removeVariable("key");
context.removeVariable("key1");
Assertions.assertEquals(0, context.getVariables().size());
} |
public void bootstrap(String device, int rootBandwidthMbit, int
yarnBandwidthMbit)
throws ResourceHandlerException {
if (device == null) {
throw new ResourceHandlerException("device cannot be null!");
}
String tmpDirBase = conf.get("hadoop.tmp.dir");
if (tmpDirBase == null) {
throw new ResourceHandlerException("hadoop.tmp.dir not set!");
}
tmpDirPath = tmpDirBase + "/nm-tc-rules";
File tmpDir = new File(tmpDirPath);
if (!(tmpDir.exists() || tmpDir.mkdirs())) {
LOG.warn("Unable to create directory: " + tmpDirPath);
throw new ResourceHandlerException("Unable to create directory: " +
tmpDirPath);
}
this.device = device;
this.rootBandwidthMbit = rootBandwidthMbit;
this.yarnBandwidthMbit = yarnBandwidthMbit;
defaultClassBandwidthMbit = (rootBandwidthMbit - yarnBandwidthMbit) <= 0
? rootBandwidthMbit : (rootBandwidthMbit - yarnBandwidthMbit);
boolean recoveryEnabled = conf.getBoolean(YarnConfiguration
.NM_RECOVERY_ENABLED, YarnConfiguration.DEFAULT_NM_RECOVERY_ENABLED);
String state = null;
if (!recoveryEnabled) {
LOG.info("NM recovery is not enabled. We'll wipe tc state before proceeding.");
} else {
//NM recovery enabled - run a state check
state = readState();
if (checkIfAlreadyBootstrapped(state)) {
LOG.info("TC configuration is already in place. Not wiping state.");
//We already have the list of existing container classes, if any
//that were created after bootstrapping
reacquireContainerClasses(state);
return;
} else {
LOG.info("TC configuration is incomplete. Wiping tc state before proceeding");
}
}
wipeState(); //start over in case preview bootstrap was incomplete
initializeState();
} | @Test
public void testBootstrapRecoveryEnabled() {
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
TrafficController trafficController = new TrafficController(conf,
privilegedOperationExecutorMock);
try {
//Return a default tc state when attempting to read state
when(privilegedOperationExecutorMock.executePrivilegedOperation(
any(PrivilegedOperation.class), eq(true)))
.thenReturn(DEFAULT_TC_STATE_EXAMPLE);
trafficController
.bootstrap(DEVICE, ROOT_BANDWIDTH_MBIT, YARN_BANDWIDTH_MBIT);
ArgumentCaptor<PrivilegedOperation> readOpCaptor = ArgumentCaptor.forClass
(PrivilegedOperation.class);
//NM_RECOVERY_ENABLED - so we expect three privileged operation executions
//1) read tc state 2) wipe tc state 3) init tc state
//one for wiping tc state - a second for initializing state
//First, verify read op
verify(privilegedOperationExecutorMock, times(1))
.executePrivilegedOperation(readOpCaptor.capture(), eq(true));
List<PrivilegedOperation> readOps = readOpCaptor.getAllValues();
verifyTrafficControlOperation(readOps.get(0),
PrivilegedOperation.OperationType.TC_READ_STATE,
Arrays.asList(READ_QDISC_CMD, READ_FILTER_CMD, READ_CLASS_CMD));
ArgumentCaptor<PrivilegedOperation> writeOpCaptor = ArgumentCaptor
.forClass(PrivilegedOperation.class);
verify(privilegedOperationExecutorMock, times(2))
.executePrivilegedOperation(writeOpCaptor.capture(), eq(false));
//Now verify that the two write operations were correct
List<PrivilegedOperation> writeOps = writeOpCaptor.getAllValues();
verifyTrafficControlOperation(writeOps.get(0),
PrivilegedOperation.OperationType.TC_MODIFY_STATE,
Arrays.asList(WIPE_STATE_CMD));
verifyTrafficControlOperation(writeOps.get(1),
PrivilegedOperation.OperationType.TC_MODIFY_STATE,
Arrays.asList(ADD_ROOT_QDISC_CMD, ADD_CGROUP_FILTER_CMD,
ADD_ROOT_CLASS_CMD, ADD_DEFAULT_CLASS_CMD, ADD_YARN_CLASS_CMD));
} catch (ResourceHandlerException | PrivilegedOperationException |
IOException e) {
LOG.error("Unexpected exception: " + e);
Assert.fail("Caught unexpected exception: "
+ e.getClass().getSimpleName());
}
} |
@Override
public Collection<RejectedAwarePlugin> getRejectedAwarePluginList() {
return mainLock.applyWithReadLock(rejectedAwarePluginList::getPlugins);
} | @Test
public void testGetRejectedAwarePluginList() {
manager.register(new TestRejectedAwarePlugin());
Assert.assertEquals(1, manager.getRejectedAwarePluginList().size());
} |
@Override
public Optional<SimpleAddress> selectAddress(Optional<String> addressSelectionContext)
{
if (addressSelectionContext.isPresent()) {
return addressSelectionContext
.map(HostAndPort::fromString)
.map(SimpleAddress::new);
}
List<HostAndPort> resourceManagers = internalNodeManager.getResourceManagers().stream()
.filter(node -> node.getThriftPort().isPresent())
.map(resourceManagerNode -> {
HostAddress hostAndPort = resourceManagerNode.getHostAndPort();
return HostAndPort.fromParts(hostAndPort.getHostText(), resourceManagerNode.getThriftPort().getAsInt());
})
.collect(toImmutableList());
return hostSelector.apply(resourceManagers).map(SimpleAddress::new);
} | @Test
public void testAddressSelectionNoContext()
{
InMemoryNodeManager internalNodeManager = new InMemoryNodeManager();
RandomResourceManagerAddressSelector selector = new RandomResourceManagerAddressSelector(internalNodeManager, hostAndPorts -> Optional.of(hostAndPorts.get(0)));
internalNodeManager.addNode(
CONNECTOR_ID,
new InternalNode(
"1",
URI.create("local://localhost:123/1"),
OptionalInt.empty(),
"1",
false,
true,
false,
false));
internalNodeManager.addNode(
CONNECTOR_ID,
new InternalNode(
"2",
URI.create("local://localhost:456/1"),
OptionalInt.of(2),
"1",
false,
true,
false,
false));
internalNodeManager.addNode(
CONNECTOR_ID,
new InternalNode(
"3",
URI.create("local://localhost:789/2"),
OptionalInt.of(3),
"1",
false,
true,
false,
false));
Optional<SimpleAddressSelector.SimpleAddress> address = selector.selectAddress(Optional.empty());
assertTrue(address.isPresent());
assertEquals(address.get().getHostAndPort(), HostAndPort.fromParts("localhost", 2));
} |
public CompletableFuture<Acknowledge> triggerSavepoint(
AsynchronousJobOperationKey operationKey,
String targetDirectory,
SavepointFormatType formatType,
TriggerSavepointMode savepointMode,
Time timeout) {
return registerOperationIdempotently(
operationKey,
() ->
triggerSavepointFunction.apply(
operationKey.getJobId(),
targetDirectory,
formatType,
savepointMode,
timeout));
} | @Test
public void triggerSavepointRepeatedly() throws ExecutionException, InterruptedException {
CompletableFuture<Acknowledge> firstAcknowledge =
handler.triggerSavepoint(
operationKey,
targetDirectory,
SavepointFormatType.CANONICAL,
TriggerSavepointMode.SAVEPOINT,
TIMEOUT);
CompletableFuture<Acknowledge> secondAcknowledge =
handler.triggerSavepoint(
operationKey,
targetDirectory,
SavepointFormatType.CANONICAL,
TriggerSavepointMode.SAVEPOINT,
TIMEOUT);
assertThat(triggerSavepointFunction.getNumberOfInvocations(), is(1));
assertThat(
triggerSavepointFunction.getInvocationParameters().get(0),
is(
new Tuple4<>(
jobID,
targetDirectory,
SavepointFormatType.CANONICAL,
TriggerSavepointMode.SAVEPOINT)));
assertThat(firstAcknowledge.get(), is(Acknowledge.get()));
assertThat(secondAcknowledge.get(), is(Acknowledge.get()));
} |
void shuffle()
{
close();
clearExclusion();
final Random random = ThreadLocalRandom.current();
for (int i = endpoints.length; --i > -1;)
{
final int j = random.nextInt(i + 1);
final String tmp = endpoints[i];
endpoints[i] = endpoints[j];
endpoints[j] = tmp;
}
} | @Test
void shouldEventuallyGetADifferentOrderAfterShuffle()
{
final String[] originalOrder = Arrays.copyOf(endpoints, endpoints.length);
int differenceCount = 0;
for (int i = 0; i < 100; i++)
{
publicationGroup.shuffle();
differenceCount += !Arrays.equals(originalOrder, endpoints) ? 1 : 0;
}
assertNotEquals(0, differenceCount);
} |
@Override
public HttpMethodWrapper getHttpMethod() {
return HttpMethodWrapper.GET;
} | @Test
void testHttpMethod() {
assertThat(metricsHandlerHeaders.getHttpMethod()).isEqualTo(HttpMethodWrapper.GET);
} |
public <E extends Enum<E>> void logUntetheredSubscriptionStateChange(
final E oldState, final E newState, final long subscriptionId, final int streamId, final int sessionId)
{
final int length = untetheredSubscriptionStateChangeLength(oldState, newState);
final int captureLength = captureLength(length);
final int encodedLength = encodedLength(captureLength);
final ManyToOneRingBuffer ringBuffer = this.ringBuffer;
final int index = ringBuffer.tryClaim(toEventCodeId(UNTETHERED_SUBSCRIPTION_STATE_CHANGE), encodedLength);
if (index > 0)
{
try
{
encodeUntetheredSubscriptionStateChange(
(UnsafeBuffer)ringBuffer.buffer(),
index,
captureLength,
length,
oldState,
newState,
subscriptionId,
streamId,
sessionId);
}
finally
{
ringBuffer.commit(index);
}
}
} | @Test
void logUntetheredSubscriptionStateChange()
{
final int recordOffset = align(192, ALIGNMENT);
logBuffer.putLong(CAPACITY + TAIL_POSITION_OFFSET, recordOffset);
final TimeUnit from = TimeUnit.DAYS;
final TimeUnit to = TimeUnit.NANOSECONDS;
final long subscriptionId = Long.MIN_VALUE;
final int streamId = 61;
final int sessionId = 8;
final int captureLength = captureLength(untetheredSubscriptionStateChangeLength(from, to));
logger.logUntetheredSubscriptionStateChange(from, to, subscriptionId, streamId, sessionId);
verifyLogHeader(
logBuffer, recordOffset, toEventCodeId(UNTETHERED_SUBSCRIPTION_STATE_CHANGE), captureLength, captureLength);
assertEquals(subscriptionId,
logBuffer.getLong(encodedMsgOffset(recordOffset + LOG_HEADER_LENGTH), LITTLE_ENDIAN));
assertEquals(streamId,
logBuffer.getInt(encodedMsgOffset(recordOffset + LOG_HEADER_LENGTH + SIZE_OF_LONG), LITTLE_ENDIAN));
assertEquals(sessionId,
logBuffer.getInt(encodedMsgOffset(recordOffset + LOG_HEADER_LENGTH + SIZE_OF_LONG + SIZE_OF_INT),
LITTLE_ENDIAN));
assertEquals(from.name() + STATE_SEPARATOR + to.name(), logBuffer.getStringAscii(
encodedMsgOffset(recordOffset + LOG_HEADER_LENGTH + SIZE_OF_INT * 2 + SIZE_OF_LONG), LITTLE_ENDIAN));
} |
@JsonCreator
public static DataSize parse(CharSequence size) {
return parse(size, DataSizeUnit.BYTES);
} | @Test
void unableParseWrongDataSizeCount() {
assertThatIllegalArgumentException()
.isThrownBy(() -> DataSize.parse("three bytes"))
.withMessage("Invalid size: three bytes");
} |
@Override
public void execute(ComputationStep.Context context) {
new PathAwareCrawler<>(
FormulaExecutorComponentVisitor.newBuilder(metricRepository, measureRepository)
.buildFor(List.of(duplicationFormula)))
.visit(treeRootHolder.getRoot());
} | @Test
public void compute_duplicated_blocks_one_for_original_one_for_each_InnerDuplicate() {
TextBlock original = new TextBlock(1, 1);
duplicationRepository.addDuplication(FILE_1_REF, original, new TextBlock(2, 2), new TextBlock(4, 4), new TextBlock(3, 4));
setNewLines(FILE_1);
underTest.execute(new TestComputationStepContext());
assertRawMeasureValue(FILE_1_REF, NEW_BLOCKS_DUPLICATED_KEY, 4);
} |
@Override
public TradePriceCalculateRespBO calculatePrice(TradePriceCalculateReqBO calculateReqBO) {
// 1.1 获得商品 SKU 数组
List<ProductSkuRespDTO> skuList = checkSkuList(calculateReqBO);
// 1.2 获得商品 SPU 数组
List<ProductSpuRespDTO> spuList = checkSpuList(skuList);
// 2.1 计算价格
TradePriceCalculateRespBO calculateRespBO = TradePriceCalculatorHelper
.buildCalculateResp(calculateReqBO, spuList, skuList);
priceCalculators.forEach(calculator -> calculator.calculate(calculateReqBO, calculateRespBO));
// 2.2 如果最终支付金额小于等于 0,则抛出业务异常
if (calculateRespBO.getPrice().getPayPrice() <= 0) {
log.error("[calculatePrice][价格计算不正确,请求 calculateReqDTO({}),结果 priceCalculate({})]",
calculateReqBO, calculateRespBO);
throw exception(PRICE_CALCULATE_PAY_PRICE_ILLEGAL);
}
return calculateRespBO;
} | @Test
public void testCalculatePrice() {
// 准备参数
TradePriceCalculateReqBO calculateReqBO = new TradePriceCalculateReqBO()
.setUserId(10L)
.setCouponId(20L).setAddressId(30L)
.setItems(Arrays.asList(
new TradePriceCalculateReqBO.Item().setSkuId(100L).setCount(1).setSelected(true),
new TradePriceCalculateReqBO.Item().setSkuId(200L).setCount(3).setSelected(true),
new TradePriceCalculateReqBO.Item().setSkuId(300L).setCount(6).setCartId(233L).setSelected(false)
));
// mock 方法
List<ProductSkuRespDTO> skuList = Arrays.asList(
new ProductSkuRespDTO().setId(100L).setStock(500).setPrice(1000).setPicUrl("https://t.cn/1.png").setSpuId(1001L)
.setProperties(singletonList(new ProductPropertyValueDetailRespDTO().setPropertyId(1L).setPropertyName("颜色")
.setValueId(2L).setValueName("红色"))),
new ProductSkuRespDTO().setId(200L).setStock(400).setPrice(2000).setPicUrl("https://t.cn/2.png").setSpuId(1001L)
.setProperties(singletonList(new ProductPropertyValueDetailRespDTO().setPropertyId(1L).setPropertyName("颜色")
.setValueId(3L).setValueName("黄色"))),
new ProductSkuRespDTO().setId(300L).setStock(600).setPrice(3000).setPicUrl("https://t.cn/3.png").setSpuId(1001L)
.setProperties(singletonList(new ProductPropertyValueDetailRespDTO().setPropertyId(1L).setPropertyName("颜色")
.setValueId(4L).setValueName("黑色")))
);
when(productSkuApi.getSkuList(Mockito.eq(asSet(100L, 200L, 300L)))).thenReturn(skuList);
when(productSpuApi.getSpuList(Mockito.eq(asSet(1001L))))
.thenReturn(singletonList(new ProductSpuRespDTO().setId(1001L).setName("小菜").setCategoryId(666L)
.setStatus(ProductSpuStatusEnum.ENABLE.getStatus())));
// 调用
TradePriceCalculateRespBO calculateRespBO = tradePriceService.calculatePrice(calculateReqBO);
// 断言
assertEquals(TradeOrderTypeEnum.NORMAL.getType(), calculateRespBO.getType());
assertEquals(0, calculateRespBO.getPromotions().size());
assertNull(calculateRespBO.getCouponId());
// 断言:订单价格
assertEquals(7000, calculateRespBO.getPrice().getTotalPrice());
assertEquals(0, calculateRespBO.getPrice().getDiscountPrice());
assertEquals(0, calculateRespBO.getPrice().getDeliveryPrice());
assertEquals(0, calculateRespBO.getPrice().getCouponPrice());
assertEquals(0, calculateRespBO.getPrice().getPointPrice());
assertEquals(7000, calculateRespBO.getPrice().getPayPrice());
// 断言:SKU 1
assertEquals(1001L, calculateRespBO.getItems().get(0).getSpuId());
assertEquals(100L, calculateRespBO.getItems().get(0).getSkuId());
assertEquals(1, calculateRespBO.getItems().get(0).getCount());
assertNull(calculateRespBO.getItems().get(0).getCartId());
assertTrue(calculateRespBO.getItems().get(0).getSelected());
assertEquals(1000, calculateRespBO.getItems().get(0).getPrice());
assertEquals(0, calculateRespBO.getItems().get(0).getDiscountPrice());
assertEquals(0, calculateRespBO.getItems().get(0).getDeliveryPrice());
assertEquals(0, calculateRespBO.getItems().get(0).getCouponPrice());
assertEquals(0, calculateRespBO.getItems().get(0).getPointPrice());
assertEquals(1000, calculateRespBO.getItems().get(0).getPayPrice());
assertEquals("小菜", calculateRespBO.getItems().get(0).getSpuName());
assertEquals("https://t.cn/1.png", calculateRespBO.getItems().get(0).getPicUrl());
assertEquals(666L, calculateRespBO.getItems().get(0).getCategoryId());
assertEquals(skuList.get(0).getProperties(), calculateRespBO.getItems().get(0).getProperties());
// 断言:SKU 2
assertEquals(1001L, calculateRespBO.getItems().get(1).getSpuId());
assertEquals(200L, calculateRespBO.getItems().get(1).getSkuId());
assertEquals(3, calculateRespBO.getItems().get(1).getCount());
assertNull(calculateRespBO.getItems().get(1).getCartId());
assertTrue(calculateRespBO.getItems().get(1).getSelected());
assertEquals(2000, calculateRespBO.getItems().get(1).getPrice());
assertEquals(0, calculateRespBO.getItems().get(1).getDiscountPrice());
assertEquals(0, calculateRespBO.getItems().get(1).getDeliveryPrice());
assertEquals(0, calculateRespBO.getItems().get(1).getCouponPrice());
assertEquals(0, calculateRespBO.getItems().get(1).getPointPrice());
assertEquals(6000, calculateRespBO.getItems().get(1).getPayPrice());
assertEquals("小菜", calculateRespBO.getItems().get(1).getSpuName());
assertEquals("https://t.cn/2.png", calculateRespBO.getItems().get(1).getPicUrl());
assertEquals(666L, calculateRespBO.getItems().get(1).getCategoryId());
assertEquals(skuList.get(1).getProperties(), calculateRespBO.getItems().get(1).getProperties());
// 断言:SKU 3
assertEquals(1001L, calculateRespBO.getItems().get(2).getSpuId());
assertEquals(300L, calculateRespBO.getItems().get(2).getSkuId());
assertEquals(6, calculateRespBO.getItems().get(2).getCount());
assertEquals(233L, calculateRespBO.getItems().get(2).getCartId());
assertFalse(calculateRespBO.getItems().get(2).getSelected());
assertEquals(3000, calculateRespBO.getItems().get(2).getPrice());
assertEquals(0, calculateRespBO.getItems().get(2).getDiscountPrice());
assertEquals(0, calculateRespBO.getItems().get(2).getDeliveryPrice());
assertEquals(0, calculateRespBO.getItems().get(2).getCouponPrice());
assertEquals(0, calculateRespBO.getItems().get(2).getPointPrice());
assertEquals(18000, calculateRespBO.getItems().get(2).getPayPrice());
assertEquals("小菜", calculateRespBO.getItems().get(2).getSpuName());
assertEquals("https://t.cn/3.png", calculateRespBO.getItems().get(2).getPicUrl());
assertEquals(666L, calculateRespBO.getItems().get(2).getCategoryId());
assertEquals(skuList.get(2).getProperties(), calculateRespBO.getItems().get(2).getProperties());
} |
public void register(RegisterRequest request) {
Optional<User> userOptional = userRepository.findByIdentificationNumber(request.getIdentificationNumber());
if (userOptional.isPresent()) {
throw GenericException.builder()
.httpStatus(HttpStatus.BAD_REQUEST)
.logMessage(this.getClass().getName() + ".register user already exists with identification number {0}", request.getIdentificationNumber())
.message(ErrorCode.USER_ALREADY_EXISTS)
.build();
}
User user = User.builder().identificationNumber(request.getIdentificationNumber()).firstname(request.getFirstName()).lastname(request.getLastName()).password(passwordEncoder.encode(request.getPassword())).build();
userRepository.save(user);
} | @Test
void register_successfulRegistration() {
// Arrange
RegisterRequest request = new RegisterRequest("1234567890", "John", "Doe", "password");
when(userRepository.findByIdentificationNumber(request.getIdentificationNumber())).thenReturn(Optional.empty());
// Act
userService.register(request);
// Assert
verify(userRepository, times(1)).save(any(User.class));
} |
@NonNull
public static Permutor<FeedItem> getPermutor(@NonNull SortOrder sortOrder) {
Comparator<FeedItem> comparator = null;
Permutor<FeedItem> permutor = null;
switch (sortOrder) {
case EPISODE_TITLE_A_Z:
comparator = (f1, f2) -> itemTitle(f1).compareTo(itemTitle(f2));
break;
case EPISODE_TITLE_Z_A:
comparator = (f1, f2) -> itemTitle(f2).compareTo(itemTitle(f1));
break;
case DATE_OLD_NEW:
comparator = (f1, f2) -> pubDate(f1).compareTo(pubDate(f2));
break;
case DATE_NEW_OLD:
comparator = (f1, f2) -> pubDate(f2).compareTo(pubDate(f1));
break;
case DURATION_SHORT_LONG:
comparator = (f1, f2) -> Integer.compare(duration(f1), duration(f2));
break;
case DURATION_LONG_SHORT:
comparator = (f1, f2) -> Integer.compare(duration(f2), duration(f1));
break;
case EPISODE_FILENAME_A_Z:
comparator = (f1, f2) -> itemLink(f1).compareTo(itemLink(f2));
break;
case EPISODE_FILENAME_Z_A:
comparator = (f1, f2) -> itemLink(f2).compareTo(itemLink(f1));
break;
case FEED_TITLE_A_Z:
comparator = (f1, f2) -> feedTitle(f1).compareTo(feedTitle(f2));
break;
case FEED_TITLE_Z_A:
comparator = (f1, f2) -> feedTitle(f2).compareTo(feedTitle(f1));
break;
case RANDOM:
permutor = Collections::shuffle;
break;
case SMART_SHUFFLE_OLD_NEW:
permutor = (queue) -> smartShuffle(queue, true);
break;
case SMART_SHUFFLE_NEW_OLD:
permutor = (queue) -> smartShuffle(queue, false);
break;
case SIZE_SMALL_LARGE:
comparator = (f1, f2) -> Long.compare(size(f1), size(f2));
break;
case SIZE_LARGE_SMALL:
comparator = (f1, f2) -> Long.compare(size(f2), size(f1));
break;
case COMPLETION_DATE_NEW_OLD:
comparator = (f1, f2) -> f2.getMedia().getPlaybackCompletionDate()
.compareTo(f1.getMedia().getPlaybackCompletionDate());
break;
default:
throw new IllegalArgumentException("Permutor not implemented");
}
if (comparator != null) {
final Comparator<FeedItem> comparator2 = comparator;
permutor = (queue) -> Collections.sort(queue, comparator2);
}
return permutor;
} | @Test
public void testPermutorForRule_DATE_ASC() {
Permutor<FeedItem> permutor = FeedItemPermutors.getPermutor(SortOrder.DATE_OLD_NEW);
List<FeedItem> itemList = getTestList();
assertTrue(checkIdOrder(itemList, 1, 3, 2)); // before sorting
permutor.reorder(itemList);
assertTrue(checkIdOrder(itemList, 1, 2, 3)); // after sorting
} |
@Override
public void onNewResourcesAvailable() {
checkDesiredOrSufficientResourcesAvailable();
} | @Test
void testSchedulingWithSufficientResourcesAndNoStabilizationTimeout() {
Duration noStabilizationTimeout = Duration.ofMillis(0);
WaitingForResources wfr =
new WaitingForResources(ctx, LOG, Duration.ofSeconds(1000), noStabilizationTimeout);
ctx.setHasDesiredResources(() -> false);
ctx.setHasSufficientResources(() -> true);
ctx.setExpectCreatingExecutionGraph();
wfr.onNewResourcesAvailable();
} |
public FEELFnResult<Object> invoke(@ParameterName("list") List list) {
if ( list == null || list.isEmpty() ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null or empty"));
} else {
try {
return FEELFnResult.ofResult(Collections.max(list, new InterceptNotComparableComparator()));
} catch (ClassCastException e) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "contains items that are not comparable"));
}
}
} | @Test
void invokeEmptyArray() {
FunctionTestUtil.assertResultError(maxFunction.invoke(new Object[]{}), InvalidParametersEvent.class);
} |
@Override
public boolean isNeedRewrite(final SQLStatementContext sqlStatementContext) {
return sqlStatementContext instanceof InsertStatementContext
&& ((InsertStatementContext) sqlStatementContext).getGeneratedKeyContext().isPresent()
&& ((InsertStatementContext) sqlStatementContext).getGeneratedKeyContext().get().isGenerated()
&& !((InsertStatementContext) sqlStatementContext).getGeneratedKeyContext().get().getGeneratedValues().isEmpty();
} | @Test
void assertIsNeedRewrite() {
GeneratedKeyInsertValueParameterRewriter paramRewriter = new GeneratedKeyInsertValueParameterRewriter();
SelectStatementContext selectStatementContext = mock(SelectStatementContext.class);
assertFalse(paramRewriter.isNeedRewrite(selectStatementContext));
InsertStatementContext insertStatementContext = mock(InsertStatementContext.class, RETURNS_DEEP_STUBS);
assertFalse(paramRewriter.isNeedRewrite(insertStatementContext));
when(insertStatementContext.getGeneratedKeyContext().isPresent()).thenReturn(Boolean.TRUE);
assertFalse(paramRewriter.isNeedRewrite(insertStatementContext));
when(insertStatementContext.getGeneratedKeyContext().get().isGenerated()).thenReturn(Boolean.TRUE);
when(insertStatementContext.getGeneratedKeyContext().get().getGeneratedValues().isEmpty()).thenReturn(Boolean.TRUE);
assertFalse(paramRewriter.isNeedRewrite(insertStatementContext));
when(insertStatementContext.getGeneratedKeyContext().get().getGeneratedValues().isEmpty()).thenReturn(Boolean.FALSE);
assertTrue(paramRewriter.isNeedRewrite(insertStatementContext));
} |
public void notifyOfError(Throwable error) {
if (error != null && this.error == null) {
this.error = error;
// this should wake up any blocking calls
try {
connectedSocket.close();
} catch (Throwable ignored) {
}
try {
socket.close();
} catch (Throwable ignored) {
}
}
} | @Test
void testIteratorWithException() throws Exception {
final SocketStreamIterator<Long> iterator =
new SocketStreamIterator<>(LongSerializer.INSTANCE);
// asynchronously set an error
new Thread() {
@Override
public void run() {
try {
Thread.sleep(100);
} catch (InterruptedException ignored) {
}
iterator.notifyOfError(new Exception("test"));
}
}.start();
assertThatThrownBy(iterator::hasNext)
.isInstanceOf(RuntimeException.class)
.hasMessageContaining("test");
} |
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
} | @Test
public void testFetchPositionAfterException() {
// verify the advancement in the next fetch offset equals to the number of fetched records when
// some fetched partitions cause Exception. This ensures that consumer won't lose record upon exception
buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(),
new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED);
assignFromUser(mkSet(tp0, tp1));
subscriptions.seek(tp0, 1);
subscriptions.seek(tp1, 1);
assertEquals(1, sendFetches());
Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new LinkedHashMap<>();
partitions.put(tidp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition())
.setHighWatermark(100)
.setRecords(records));
partitions.put(tidp0, new FetchResponseData.PartitionData()
.setPartitionIndex(tp0.partition())
.setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code())
.setHighWatermark(100));
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
consumerClient.poll(time.timer(0));
List<ConsumerRecord<byte[], byte[]>> allFetchedRecords = new ArrayList<>();
fetchRecordsInto(allFetchedRecords);
assertEquals(1, subscriptions.position(tp0).offset);
assertEquals(4, subscriptions.position(tp1).offset);
assertEquals(3, allFetchedRecords.size());
OffsetOutOfRangeException e = assertThrows(OffsetOutOfRangeException.class, () ->
fetchRecordsInto(allFetchedRecords));
assertEquals(singleton(tp0), e.offsetOutOfRangePartitions().keySet());
assertEquals(1L, e.offsetOutOfRangePartitions().get(tp0).longValue());
assertEquals(1, subscriptions.position(tp0).offset);
assertEquals(4, subscriptions.position(tp1).offset);
assertEquals(3, allFetchedRecords.size());
} |
@SuppressWarnings("unchecked")
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
if (statement.getStatement() instanceof CreateSource) {
return handleCreateSource((ConfiguredStatement<CreateSource>) statement);
}
return statement;
} | @Test
public void shouldInjectMissingValueFormat() {
// Given
givenConfig(ImmutableMap.of(
KsqlConfig.KSQL_DEFAULT_VALUE_FORMAT_CONFIG, "JSON"
));
givenSourceProps(ImmutableMap.of(
"KEY_FORMAT", new StringLiteral("KAFKA")
));
// When
final ConfiguredStatement<?> result = injector.inject(csStatement);
// Then
assertThat(result.getMaskedStatementText(), containsString("VALUE_FORMAT='JSON'"));
} |
@PublicAPI(usage = ACCESS)
public JavaClasses importClasses(Class<?>... classes) {
return importClasses(Arrays.asList(classes));
} | @Test
public void imports_enclosing_constructor_of_local_class() throws ClassNotFoundException {
@SuppressWarnings("unused")
class ClassCreatingLocalClassInConstructor {
ClassCreatingLocalClassInConstructor() {
class SomeLocalClass {
}
}
}
String localClassName = ClassCreatingLocalClassInConstructor.class.getName() + "$1SomeLocalClass";
JavaClasses classes = new ClassFileImporter().importClasses(
ClassCreatingLocalClassInConstructor.class, Class.forName(localClassName)
);
JavaClass enclosingClass = classes.get(ClassCreatingLocalClassInConstructor.class);
JavaClass localClass = classes.get(localClassName);
assertThat(localClass.getEnclosingCodeUnit()).contains(enclosingClass.getConstructor(getClass()));
assertThat(localClass.getEnclosingClass()).contains(enclosingClass);
} |
private List<Configserver> getConfigServers(DeployState deployState, TreeConfigProducer<AnyConfigProducer> parent, Element adminE) {
Element configserversE = XML.getChild(adminE, "configservers");
if (configserversE == null) {
Element adminserver = XML.getChild(adminE, "adminserver");
if (adminserver == null) {
return createSingleConfigServer(deployState, parent);
} else {
SimpleConfigProducer<AnyConfigProducer> configServers = new SimpleConfigProducer<>(parent, "configservers");
return List.of(new ConfigserverBuilder(0, configServerSpecs).build(deployState, configServers, adminserver));
}
}
else {
SimpleConfigProducer<AnyConfigProducer> configServers = new SimpleConfigProducer<>(parent, "configservers");
List<Configserver> configservers = new ArrayList<>();
int i = 0;
for (Element configserverE : XML.getChildren(configserversE, "configserver"))
configservers.add(new ConfigserverBuilder(i++, configServerSpecs).build(deployState, configServers, configserverE));
return configservers;
}
} | @Test
void noAdminServerOrConfigServer() {
Admin admin = buildAdmin(servicesAdminNoAdminServerOrConfigServer());
assertEquals(1, admin.getConfigservers().size());
} |
public static Map<String, PluginConfiguration> load(final File agentRootPath) throws IOException {
return YamlPluginConfigurationLoader.load(new File(agentRootPath, Paths.get("conf", "agent.yaml").toString())).map(YamlPluginsConfigurationSwapper::swap).orElse(Collections.emptyMap());
} | @Test
void assertLoad() throws IOException {
Map<String, PluginConfiguration> actual = PluginConfigurationLoader.load(new File(getResourceURL()));
assertThat(actual.size(), is(3));
assertLoggingPluginConfiguration(actual.get("log_fixture"));
assertMetricsPluginConfiguration(actual.get("metrics_fixture"));
assertTracingPluginConfiguration(actual.get("tracing_fixture"));
} |
public int doWork()
{
final long nowNs = nanoClock.nanoTime();
cachedNanoClock.update(nowNs);
dutyCycleTracker.measureAndUpdate(nowNs);
final int workCount = commandQueue.drain(CommandProxy.RUN_TASK, Configuration.COMMAND_DRAIN_LIMIT);
final long shortSendsBefore = shortSends.get();
final int bytesSent = doSend(nowNs);
int bytesReceived = 0;
if (0 == bytesSent ||
++dutyCycleCounter >= dutyCycleRatio ||
(controlPollDeadlineNs - nowNs < 0) ||
shortSendsBefore < shortSends.get())
{
bytesReceived = controlTransportPoller.pollTransports();
dutyCycleCounter = 0;
controlPollDeadlineNs = nowNs + statusMessageReadTimeoutNs;
}
if (reResolutionCheckIntervalNs > 0 && (reResolutionDeadlineNs - nowNs) < 0)
{
reResolutionDeadlineNs = nowNs + reResolutionCheckIntervalNs;
controlTransportPoller.checkForReResolutions(nowNs, conductorProxy);
}
return workCount + bytesSent + bytesReceived;
} | @Test
void shouldSendLastDataFrameAsHeartbeatWhenIdle()
{
final StatusMessageFlyweight msg = mock(StatusMessageFlyweight.class);
when(msg.consumptionTermId()).thenReturn(INITIAL_TERM_ID);
when(msg.consumptionTermOffset()).thenReturn(0);
when(msg.receiverWindowLength()).thenReturn(ALIGNED_FRAME_LENGTH);
publication.onStatusMessage(msg, rcvAddress, mockDriverConductorProxy);
final UnsafeBuffer buffer = new UnsafeBuffer(ByteBuffer.allocateDirect(PAYLOAD.length));
buffer.putBytes(0, PAYLOAD);
appendUnfragmentedMessage(rawLog, 0, INITIAL_TERM_ID, 0, headerWriter, buffer, 0, PAYLOAD.length);
sender.doWork();
assertThat(receivedFrames.size(), is(1)); // should send ticks
receivedFrames.remove(); // skip data frame
nanoClock.advance(Configuration.PUBLICATION_HEARTBEAT_TIMEOUT_NS - 1);
sender.doWork();
assertThat(receivedFrames.size(), is(0)); // should not send yet
nanoClock.advance(10);
sender.doWork();
assertThat(receivedFrames.size(), greaterThanOrEqualTo(1)); // should send ticks
dataHeader.wrap(receivedFrames.remove());
assertThat(dataHeader.frameLength(), is(0));
assertThat(dataHeader.termOffset(), is(offsetOfMessage(2)));
} |
@Override
protected String[] getTypeNames() {
return new String[] { TYPE_NAME };
} | @Test
public void getTypeNames() {
assertArrayEquals( new String[] { ElementTransfer.TYPE_NAME }, elementTransfer.getTypeNames() );
} |
Map<String, Object> sourceAdminConfig(String role) {
Map<String, Object> props = new HashMap<>();
props.putAll(originalsWithPrefix(SOURCE_CLUSTER_PREFIX));
props.keySet().retainAll(MirrorClientConfig.CLIENT_CONFIG_DEF.names());
props.putAll(originalsWithPrefix(ADMIN_CLIENT_PREFIX));
props.putAll(originalsWithPrefix(SOURCE_PREFIX + ADMIN_CLIENT_PREFIX));
addClientId(props, role);
return props;
} | @Test
public void testSourceAdminConfigWithSourcePrefix() {
String prefix = MirrorConnectorConfig.SOURCE_PREFIX + MirrorConnectorConfig.ADMIN_CLIENT_PREFIX;
Map<String, String> connectorProps = makeProps(prefix + "connections.max.idle.ms", "10000");
MirrorConnectorConfig config = new TestMirrorConnectorConfig(connectorProps);
Map<String, Object> connectorAdminProps = config.sourceAdminConfig("test");
Map<String, Object> expectedAdminProps = new HashMap<>();
expectedAdminProps.put("connections.max.idle.ms", "10000");
expectedAdminProps.put("client.id", "source1->target2|ConnectorName|test");
assertEquals(expectedAdminProps, connectorAdminProps, prefix + " source connector admin props not matching");
} |
public Optional<Object> retrieveSingleValue(final Object jsonObject, final String valueName) {
final Map<String, Object> map = objectMapper.convertValue(jsonObject, new TypeReference<>() {});
return Optional.ofNullable(map.get(valueName));
} | @Test
void testRetrievesSingleValue() {
Optional<Object> value = toTest.retrieveSingleValue(new TestJson(42, "ho!"), "test_string");
assertEquals(Optional.of("ho!"), value);
value = toTest.retrieveSingleValue(new TestJson(42, "ho!"), "test_int");
assertEquals(Optional.of(42), value);
} |
public Map<String, Object> getKsqlStreamConfigProps(final String applicationId) {
final Map<String, Object> map = new HashMap<>(getKsqlStreamConfigProps());
map.put(
MetricCollectors.RESOURCE_LABEL_PREFIX
+ StreamsConfig.APPLICATION_ID_CONFIG,
applicationId
);
// Streams client metrics aren't used in Confluent deployment
possiblyConfigureConfluentTelemetry(map);
return Collections.unmodifiableMap(map);
} | @Test
public void shouldOverrideStreamsConfigProperties() {
Map<String, Object> originals = new HashMap<>();
originals.put(KsqlConfig.KSQL_STREAMS_PREFIX + SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG,
"kafka.jks");
originals.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG,
"https.jks");
final KsqlConfig ksqlConfig = new KsqlConfig(originals);
assertThat(ksqlConfig.getKsqlStreamConfigProps().
get(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG), equalTo("kafka.jks"));
} |
public AgentConfigurationsTable readAgentConfigurationsTable() {
AgentConfigurationsTable agentConfigurationsTable = new AgentConfigurationsTable();
try {
if (Objects.nonNull(yamlData)) {
Map configurationsData = (Map) yamlData.get("configurations");
if (configurationsData != null) {
configurationsData.forEach((k, v) -> {
Map map = (Map) v;
StringBuilder serviceConfigStr = new StringBuilder();
Map<String, String> config = new HashMap<>(map.size());
map.forEach((key, value) -> {
config.put(key.toString(), value.toString());
serviceConfigStr.append(key).append(":").append(value);
});
// noinspection UnstableApiUsage
AgentConfigurations agentConfigurations = new AgentConfigurations(
k.toString(), config,
Hashing.sha512().hashString(
serviceConfigStr.toString(), StandardCharsets.UTF_8).toString()
);
agentConfigurationsTable.getAgentConfigurationsCache()
.put(agentConfigurations.getService(), agentConfigurations);
});
}
}
} catch (Exception e) {
log.error("Read ConfigurationDiscovery configurations error.", e);
}
return agentConfigurationsTable;
} | @Test
public void testReadAgentConfigurations() {
AgentConfigurationsReader reader = new AgentConfigurationsReader(
this.getClass().getClassLoader().getResourceAsStream("agent-dynamic-configuration.yml"));
Map<String, AgentConfigurations> configurationCache = reader.readAgentConfigurationsTable()
.getAgentConfigurationsCache();
Assertions.assertEquals(2, configurationCache.size());
AgentConfigurations agentConfigurations0 = configurationCache.get("serviceA");
Assertions.assertEquals("serviceA", agentConfigurations0.getService());
Assertions.assertEquals(2, agentConfigurations0.getConfiguration().size());
Assertions.assertEquals("1000", agentConfigurations0.getConfiguration().get("trace.sample_rate"));
Assertions.assertEquals(
"/api/seller/seller/*", agentConfigurations0.getConfiguration().get("trace.ignore_path"));
Assertions.assertEquals(
"92670f1ccbdee60e14ffc054d70a5cf3f93f6b5fb1adb83b10bea4fec79b96e7bc5e7b188e231428853721ded42ec756663947316065617f3cfdf51d6dfc8da6",
agentConfigurations0.getUuid()
);
AgentConfigurations agentConfigurations1 = configurationCache.get("serviceB");
Assertions.assertEquals("serviceB", agentConfigurations1.getService());
Assertions.assertEquals(2, agentConfigurations1.getConfiguration().size());
Assertions.assertEquals("1000", agentConfigurations1.getConfiguration().get("trace.sample_rate"));
Assertions.assertEquals(
"/api/seller/seller/*", agentConfigurations1.getConfiguration().get("trace.ignore_path"));
Assertions.assertEquals(
"92670f1ccbdee60e14ffc054d70a5cf3f93f6b5fb1adb83b10bea4fec79b96e7bc5e7b188e231428853721ded42ec756663947316065617f3cfdf51d6dfc8da6",
agentConfigurations0.getUuid()
);
} |
@Override
public void process(Exchange exchange) throws Exception {
String operation = getOperation(exchange);
switch (operation) {
case GlanceConstants.RESERVE:
doReserve(exchange);
break;
case OpenstackConstants.CREATE:
doCreate(exchange);
break;
case OpenstackConstants.UPDATE:
doUpdate(exchange);
break;
case GlanceConstants.UPLOAD:
doUpload(exchange);
break;
case OpenstackConstants.GET:
doGet(exchange);
break;
case OpenstackConstants.GET_ALL:
doGetAll(exchange);
break;
case OpenstackConstants.DELETE:
doDelete(exchange);
break;
default:
throw new IllegalArgumentException("Unsupported operation " + operation);
}
} | @Test
public void uploadWithUpdatingTest() throws Exception {
final String newName = "newName";
dummyImage.setName(newName);
when(osImage.getName()).thenReturn(newName);
msg.setHeader(OpenstackConstants.OPERATION, GlanceConstants.UPLOAD);
final String id = "id";
msg.setHeader(OpenstackConstants.ID, id);
msg.setHeader(OpenstackConstants.NAME, dummyImage.getName());
msg.setHeader(GlanceConstants.OWNER, dummyImage.getOwner());
msg.setHeader(GlanceConstants.MIN_DISK, dummyImage.getMinDisk());
msg.setHeader(GlanceConstants.MIN_RAM, dummyImage.getMinRam());
msg.setHeader(GlanceConstants.CHECKSUM, dummyImage.getChecksum());
msg.setHeader(GlanceConstants.DISK_FORMAT, dummyImage.getDiskFormat());
msg.setHeader(GlanceConstants.CONTAINER_FORMAT, dummyImage.getContainerFormat());
final File file = File.createTempFile("image", ".iso");
msg.setBody(file);
producer.process(exchange);
verify(imageService).upload(imageIdCaptor.capture(), payloadCaptor.capture(), imageCaptor.capture());
assertEquals(id, imageIdCaptor.getValue());
assertEquals(file, payloadCaptor.getValue().getRaw());
assertEquals(newName, imageCaptor.getValue().getName());
final Image result = msg.getBody(Image.class);
assertNotNull(result.getId());
assertEqualsImages(dummyImage, result);
} |
public void processAg31(Ag31 ag31, Afnemersbericht afnemersbericht){
String aNummer = CategorieUtil.findANummer(ag31.getCategorie());
String bsn = CategorieUtil.findBsn(ag31.getCategorie());
digidXClient.setANummer(bsn, aNummer);
if(ag31.getStatus() != null && ag31.getDatum() != null){
digidXClient.setOpschortingsStatus(aNummer, ag31.getStatus());
}
// Only possible if original message (Ag01) was not sent
if(afnemersbericht != null) {
afnemersberichtRepository.delete(afnemersbericht);
}
logger.info("Finished processing Ag31 message");
} | @Test
public void testProcessAg31StatusA(){
String testBsn = "SSSSSSSSS";
Ag31 testAg31 = TestDglMessagesUtil.createTestAg31(testBsn, "A", "SSSSSSSS");
classUnderTest.processAg31(testAg31, afnemersbericht);
verify(digidXClient, times(1)).setANummer(testBsn,"A" + testBsn);
verify(digidXClient, times(1)).setOpschortingsStatus("A" + testBsn, "A");
} |
public void validate(ExternalIssueReport report, Path reportPath) {
if (report.rules != null && report.issues != null) {
Set<String> ruleIds = validateRules(report.rules, reportPath);
validateIssuesCctFormat(report.issues, ruleIds, reportPath);
} else if (report.rules == null && report.issues != null) {
String documentationLink = documentationLinkGenerator.getDocumentationLink(DOCUMENTATION_SUFFIX);
LOGGER.warn("External issues were imported with a deprecated format which will be removed soon. " +
"Please switch to the newest format to fully benefit from Clean Code: {}", documentationLink);
validateIssuesDeprecatedFormat(report.issues, reportPath);
} else {
throw new IllegalStateException(String.format("Failed to parse report '%s': invalid report detected.", reportPath));
}
} | @Test
public void validate_whenMissingMandatoryCleanCodeAttributeField_shouldThrowException() throws IOException {
ExternalIssueReport report = read(REPORTS_LOCATION);
report.rules[0].cleanCodeAttribute = null;
assertThatThrownBy(() -> validator.validate(report, reportPath))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Failed to parse report 'report-path': missing mandatory field 'cleanCodeAttribute'.");
} |
@Override
public String generateSqlType(Dialect dialect) {
return switch (dialect.getId()) {
case MsSql.ID -> "VARBINARY(MAX)";
case Oracle.ID, H2.ID -> "BLOB";
case PostgreSql.ID -> "BYTEA";
default -> throw new IllegalArgumentException("Unsupported dialect id " + dialect.getId());
};
} | @Test
public void generateSqlType_for_Oracle() {
assertThat(underTest.generateSqlType(new Oracle())).isEqualTo("BLOB");
} |
public Response<AiMessage> execute( String input, List<IntermediateStep> intermediateSteps ) {
var userMessageTemplate = PromptTemplate.from( "{{input}}" )
.apply( mapOf( "input", input));
var messages = new ArrayList<ChatMessage>();
messages.add(new SystemMessage("You are a helpful assistant"));
messages.add(new UserMessage(userMessageTemplate.text()));
if (!intermediateSteps.isEmpty()) {
var toolRequests = intermediateSteps.stream()
.map(IntermediateStep::action)
.map(AgentAction::toolExecutionRequest)
.collect(Collectors.toList());
messages.add(new AiMessage(toolRequests)); // reply with tool requests
for (IntermediateStep step : intermediateSteps) {
var toolRequest = step.action().toolExecutionRequest();
messages.add(new ToolExecutionResultMessage(toolRequest.id(), toolRequest.name(), step.observation()));
}
}
return chatLanguageModel.generate( messages, tools );
} | @Test
public void runAgentTest() throws Exception {
assertTrue(DotEnvConfig.valueOf("OPENAI_API_KEY").isPresent());
var chatLanguageModel = OpenAiChatModel.builder()
.apiKey( DotEnvConfig.valueOf("OPENAI_API_KEY").get() )
.modelName( "gpt-3.5-turbo-0613" )
.logResponses(true)
.maxRetries(2)
.temperature(0.0)
.maxTokens(2000)
.build();
var tool = new TestTool();
var agent = Agent.builder()
.chatLanguageModel(chatLanguageModel)
.tools( ToolInfo.of(tool).stream().map(ToolInfo::specification).collect(Collectors.toList()) )
.build();
var msg = "hello world";
var response = agent.execute( format("this is an AI test with message: '%s'", msg), emptyList() );
assertNotNull(response);
assertEquals(response.finishReason(), FinishReason.TOOL_EXECUTION );
var content = response.content();
assertNotNull(content);
assertNull( content.text());
assertTrue(content.hasToolExecutionRequests());
var toolExecutionRequests = content.toolExecutionRequests();
assertEquals(1, toolExecutionRequests.size());
var toolExecutionRequest = toolExecutionRequests.get(0);
assertEquals("execTest", toolExecutionRequest.name());
assertEquals("{ \"arg0\": \"hello world\"}", toolExecutionRequest.arguments().replaceAll("\n",""));
} |
@SuppressWarnings("unchecked")
@Override
public void handle(LogHandlerEvent event) {
switch (event.getType()) {
case APPLICATION_STARTED:
LogHandlerAppStartedEvent appStartedEvent =
(LogHandlerAppStartedEvent) event;
this.appOwners.put(appStartedEvent.getApplicationId(),
appStartedEvent.getUser());
this.dispatcher.getEventHandler().handle(
new ApplicationEvent(appStartedEvent.getApplicationId(),
ApplicationEventType.APPLICATION_LOG_HANDLING_INITED));
break;
case CONTAINER_FINISHED:
// Ignore
break;
case APPLICATION_FINISHED:
LogHandlerAppFinishedEvent appFinishedEvent =
(LogHandlerAppFinishedEvent) event;
ApplicationId appId = appFinishedEvent.getApplicationId();
String user = appOwners.remove(appId);
if (user == null) {
LOG.error("Unable to locate user for {}", appId);
// send LOG_HANDLING_FAILED out
NonAggregatingLogHandler.this.dispatcher.getEventHandler().handle(
new ApplicationEvent(appId,
ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED));
break;
}
LogDeleterRunnable logDeleter = new LogDeleterRunnable(user, appId);
long deletionTimestamp = System.currentTimeMillis()
+ this.deleteDelaySeconds * 1000;
LogDeleterProto deleterProto = LogDeleterProto.newBuilder()
.setUser(user)
.setDeletionTime(deletionTimestamp)
.build();
try {
stateStore.storeLogDeleter(appId, deleterProto);
} catch (IOException e) {
LOG.error("Unable to record log deleter state", e);
}
try {
boolean logDeleterStarted = false;
if (enableTriggerDeleteBySize) {
final long appLogSize = calculateSizeOfAppLogs(user, appId);
if (appLogSize >= deleteThreshold) {
LOG.info("Log Deletion for application: {}, with no delay, size={}", appId, appLogSize);
sched.schedule(logDeleter, 0, TimeUnit.SECONDS);
logDeleterStarted = true;
}
}
if (!logDeleterStarted) {
LOG.info("Scheduling Log Deletion for application: {}, with delay of {} seconds",
appId, this.deleteDelaySeconds);
sched.schedule(logDeleter, this.deleteDelaySeconds, TimeUnit.SECONDS);
}
} catch (RejectedExecutionException e) {
// Handling this event in local thread before starting threads
// or after calling sched.shutdownNow().
logDeleter.run();
}
break;
default:
}
} | @Test
public void testDelayedDelete() throws IOException {
File[] localLogDirs = getLocalLogDirFiles(this.getClass().getName(), 2);
String localLogDirsString =
localLogDirs[0].getAbsolutePath() + ","
+ localLogDirs[1].getAbsolutePath();
conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDirsString);
conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false);
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS,
YarnConfiguration.DEFAULT_NM_LOG_RETAIN_SECONDS);
dirsHandler.init(conf);
NonAggregatingLogHandler logHandler =
new NonAggregatingLogHandlerWithMockExecutor(dispatcher, mockDelService,
dirsHandler);
logHandler.init(conf);
logHandler.start();
logHandler.handle(new LogHandlerAppStartedEvent(appId, user, null, null));
logHandler.handle(new LogHandlerContainerFinishedEvent(container11,
ContainerType.APPLICATION_MASTER, 0));
logHandler.handle(new LogHandlerAppFinishedEvent(appId));
Path[] localAppLogDirs = new Path[2];
localAppLogDirs[0] =
new Path(localLogDirs[0].getAbsolutePath(), appId.toString());
localAppLogDirs[1] =
new Path(localLogDirs[1].getAbsolutePath(), appId.toString());
ScheduledThreadPoolExecutor mockSched =
((NonAggregatingLogHandlerWithMockExecutor) logHandler).mockSched;
verify(mockSched).schedule(any(Runnable.class), eq(10800l),
eq(TimeUnit.SECONDS));
logHandler.close();
for (int i = 0; i < localLogDirs.length; i++) {
FileUtils.deleteDirectory(localLogDirs[i]);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.