focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public void begin() throws SQLException {
ConnectionTransaction connectionTransaction = getConnectionTransaction();
if (TransactionType.isDistributedTransaction(connectionTransaction.getTransactionType())) {
close();
connectionTransaction.begin();
}
connectionContext.getTransactionContext().beginTransaction(String.valueOf(connectionTransaction.getTransactionType()));
} | @Test
void assertBeginTransaction() throws SQLException {
databaseConnectionManager.begin();
assertTrue(databaseConnectionManager.getConnectionContext().getTransactionContext().isInTransaction());
} |
public static PrivateKey readPemPrivateKey(InputStream pemStream) {
return (PrivateKey) readPemKey(pemStream);
} | @Test
public void readECPrivateKeyTest() {
final PrivateKey privateKey = PemUtil.readPemPrivateKey(ResourceUtil.getStream("test_ec_sec1_private_key.pem"));
final SM2 sm2 = new SM2(privateKey, null);
sm2.usePlainEncoding();
//需要签名的明文,得到明文对应的字节数组
final byte[] dataBytes = "我是一段测试aaaa".getBytes(StandardCharsets.UTF_8);
final byte[] sign = sm2.sign(dataBytes, null);
// 64位签名
assertEquals(64, sign.length);
} |
public static Collection<SubquerySegment> getSubquerySegments(final SelectStatement selectStatement) {
List<SubquerySegment> result = new LinkedList<>();
extractSubquerySegments(result, selectStatement);
return result;
} | @Test
void assertGetSubquerySegmentsInFrom1() {
SelectStatement subquery = mock(SelectStatement.class);
ColumnSegment left = new ColumnSegment(59, 66, new IdentifierValue("order_id"));
LiteralExpressionSegment right = new LiteralExpressionSegment(70, 70, 1);
when(subquery.getWhere()).thenReturn(Optional.of(new WhereSegment(53, 70, new BinaryOperationExpression(59, 70, left, right, "=", "order_id = 1"))));
when(subquery.getFrom()).thenReturn(Optional.of(new SimpleTableSegment(new TableNameSegment(45, 51, new IdentifierValue("t_order")))));
ProjectionsSegment subqueryProjections = new ProjectionsSegment(31, 38);
when(subquery.getProjections()).thenReturn(subqueryProjections);
subqueryProjections.getProjections().add(new ColumnProjectionSegment(new ColumnSegment(31, 38, new IdentifierValue("order_id"))));
SelectStatement selectStatement = mock(SelectStatement.class);
ProjectionsSegment projections = new ProjectionsSegment(7, 16);
when(selectStatement.getProjections()).thenReturn(projections);
projections.getProjections().add(new ColumnProjectionSegment(new ColumnSegment(7, 16, new IdentifierValue("order_id"))));
SubqueryTableSegment subqueryTableSegment = new SubqueryTableSegment(0, 0, new SubquerySegment(23, 71, subquery, ""));
when(selectStatement.getFrom()).thenReturn(Optional.of(subqueryTableSegment));
Collection<SubquerySegment> actual = SubqueryExtractUtils.getSubquerySegments(selectStatement);
assertThat(actual.size(), is(1));
assertThat(actual.iterator().next(), is(subqueryTableSegment.getSubquery()));
} |
public static String substVars(String val, PropertyContainer pc1) throws ScanException {
return substVars(val, pc1, null);
} | @Disabled
@Test
public void defaultExpansionForEmptyVariables() throws JoranException, ScanException {
String varName = "var"+diff;
context.putProperty(varName, "");
String r = OptionHelper.substVars("x ${"+varName+":-def} b", context);
assertEquals("x def b", r);
} |
@VisibleForTesting
void validateDictTypeUnique(Long id, String type) {
if (StrUtil.isEmpty(type)) {
return;
}
DictTypeDO dictType = dictTypeMapper.selectByType(type);
if (dictType == null) {
return;
}
// 如果 id 为空,说明不用比较是否为相同 id 的字典类型
if (id == null) {
throw exception(DICT_TYPE_TYPE_DUPLICATE);
}
if (!dictType.getId().equals(id)) {
throw exception(DICT_TYPE_TYPE_DUPLICATE);
}
} | @Test
public void testValidateDictTypeUnique_valueDuplicateForUpdate() {
// 准备参数
Long id = randomLongId();
String type = randomString();
// mock 数据
dictTypeMapper.insert(randomDictTypeDO(o -> o.setType(type)));
// 调用,校验异常
assertServiceException(() -> dictTypeService.validateDictTypeUnique(id, type),
DICT_TYPE_TYPE_DUPLICATE);
} |
public T get(final int index) {
if (index < 0 || index >= values.size()) {
throw new IndexOutOfBoundsException(
String.format(
"Attempted to access variadic argument at index %s when only %s "
+ "arguments are available",
index,
values.size()
)
);
}
return values.get(index);
} | @Test
public void shouldThrowWhenIndexTooLarge() {
final VariadicArgs<Integer> varArgs = new VariadicArgs<>(ImmutableList.of(1, 2, 3));
final Exception e = assertThrows(
IndexOutOfBoundsException.class,
() -> varArgs.get(3)
);
assertThat(e.getMessage(), is("Attempted to access variadic argument at index 3 when only 3 "
+ "arguments are available"));
} |
@Override
public Optional<ShardingConditionValue> generate(final BinaryOperationExpression predicate, final Column column, final List<Object> params, final TimestampServiceRule timestampServiceRule) {
String operator = predicate.getOperator().toUpperCase();
if (!isSupportedOperator(operator)) {
return Optional.empty();
}
ExpressionSegment valueExpression = predicate.getLeft() instanceof ColumnSegment ? predicate.getRight() : predicate.getLeft();
ConditionValue conditionValue = new ConditionValue(valueExpression, params);
if (conditionValue.isNull()) {
return generate(null, column, operator, conditionValue.getParameterMarkerIndex().orElse(-1));
}
Optional<Comparable<?>> value = conditionValue.getValue();
if (value.isPresent()) {
return generate(value.get(), column, operator, conditionValue.getParameterMarkerIndex().orElse(-1));
}
if (ExpressionConditionUtils.isNowExpression(valueExpression)) {
return generate(timestampServiceRule.getTimestamp(), column, operator, -1);
}
return Optional.empty();
} | @Test
void assertGenerateConditionValueWithoutNowExpression() {
BinaryOperationExpression rightValue = new BinaryOperationExpression(0, 0, mock(ColumnSegment.class), new CommonExpressionSegment(0, 0, "value"), "=", null);
assertFalse(generator.generate(rightValue, column, new LinkedList<>(), mock(TimestampServiceRule.class)).isPresent());
} |
@VisibleForTesting
long getDelayMs() {
return delayMs;
} | @Test
public void create_instance_with_default_delay() {
AbstractStopRequestWatcher underTest = new AbstractStopRequestWatcher(threadName, booleanSupplier, stopAction) {
};
assertThat(underTest.getDelayMs()).isEqualTo(500L);
} |
static void readFullyHeapBuffer(InputStream f, ByteBuffer buf) throws IOException {
readFully(f, buf.array(), buf.arrayOffset() + buf.position(), buf.remaining());
buf.position(buf.limit());
} | @Test
public void testHeapReadFullySmallBuffer() throws Exception {
ByteBuffer readBuffer = ByteBuffer.allocate(8);
MockInputStream stream = new MockInputStream();
DelegatingSeekableInputStream.readFullyHeapBuffer(stream, readBuffer);
Assert.assertEquals(8, readBuffer.position());
Assert.assertEquals(8, readBuffer.limit());
DelegatingSeekableInputStream.readFullyHeapBuffer(stream, readBuffer);
Assert.assertEquals(8, readBuffer.position());
Assert.assertEquals(8, readBuffer.limit());
readBuffer.flip();
Assert.assertEquals("Buffer contents should match", ByteBuffer.wrap(TEST_ARRAY, 0, 8), readBuffer);
} |
static Map<String, Object> of(final Task task) {
return Map.of(
"id", task.getId(),
"type", task.getType()
);
} | @Test
void shouldGetVariablesGivenFlowWithNoTenant() {
Map<String, Object> variables = new RunVariables.DefaultBuilder()
.withFlow(Flow
.builder()
.id("id-value")
.namespace("namespace-value")
.revision(42)
.build()
)
.build(new RunContextLogger());
Assertions.assertEquals(Map.of(
"id", "id-value",
"namespace", "namespace-value",
"revision", 42
), variables.get("flow"));
} |
public static Map<AbilityKey, Boolean> getStaticAbilities() {
return INSTANCE.getSupportedAbilities();
} | @Test
void testSupportPersistentInstanceByGrpcAbilities() {
assertTrue(ServerAbilities.getStaticAbilities().get(AbilityKey.SERVER_SUPPORT_PERSISTENT_INSTANCE_BY_GRPC));
} |
public Iterator<Entry<String, Optional<MetaProperties>>> nonFailedDirectoryProps() {
return new Iterator<Entry<String, Optional<MetaProperties>>>() {
private final Iterator<String> emptyLogDirsIterator = emptyLogDirs.iterator();
private final Iterator<Entry<String, MetaProperties>> logDirsIterator =
logDirProps.entrySet().iterator();
@Override
public boolean hasNext() {
return emptyLogDirsIterator.hasNext() || logDirsIterator.hasNext();
}
@Override
public Entry<String, Optional<MetaProperties>> next() {
if (emptyLogDirsIterator.hasNext()) {
return new SimpleImmutableEntry<>(emptyLogDirsIterator.next(), Optional.empty());
}
Entry<String, MetaProperties> entry = logDirsIterator.next();
return new SimpleImmutableEntry<>(entry.getKey(), Optional.of(entry.getValue()));
}
};
} | @Test
public void testNonFailedDirectoryPropsForEmpty() {
assertFalse(EMPTY.nonFailedDirectoryProps().hasNext());
} |
@Override
public List<RemoteFileInfo> getRemoteFiles(Table table, GetRemoteFilesParams params) {
List<Partition> partitions = buildGetRemoteFilesPartitions(table, params);
boolean useCache = true;
if (table instanceof HiveTable) {
useCache = ((HiveTable) table).isUseMetadataCache();
}
// if we disable cache explicitly
if (!params.isUseCache()) {
useCache = false;
}
GetRemoteFilesParams updatedParams = params.copy();
updatedParams.setUseCache(useCache);
return fileOps.getRemoteFiles(table, partitions, updatedParams);
} | @Test
public void testGetRemoteFiles(
@Mocked HiveTable table,
@Mocked HiveMetastoreOperations hmsOps) {
List<String> partitionNames = Lists.newArrayList("dt=20200101", "dt=20200102", "dt=20200103");
Map<String, Partition> partitionMap = Maps.newHashMap();
for (String name : partitionNames) {
Map<String, String> parameters = Maps.newHashMap();
TextFileFormatDesc formatDesc = new TextFileFormatDesc("a", "b", "c", "d");
String fullPath = HDFS_HIVE_TABLE;
Partition partition = new Partition(parameters, RemoteFileInputFormat.PARQUET, formatDesc, fullPath, true);
partitionMap.put(name, partition);
}
new Expectations() {
{
hmsOps.getPartitionByNames((Table) any, (List<String>) any);
result = partitionMap;
minTimes = 1;
}
};
GetRemoteFilesParams params = GetRemoteFilesParams.newBuilder().setPartitionNames(partitionNames).build();
List<RemoteFileInfo> remoteFileInfos = hiveMetadata.getRemoteFiles(table, params);
Assert.assertEquals(3, remoteFileInfos.size());
} |
public static void main(String[] args) {
// Getting the bar series
BarSeries series = CsvTradesLoader.loadBitstampSeries();
// Building the trading strategy
Strategy strategy = buildStrategy(series);
// Running the strategy
BarSeriesManager seriesManager = new BarSeriesManager(series);
TradingRecord tradingRecord = seriesManager.run(strategy);
System.out.println("Number of positions for the strategy: " + tradingRecord.getPositionCount());
// Analysis
System.out.println("Total profit for the strategy: " + new ReturnCriterion().calculate(series, tradingRecord));
} | @Test
public void test() {
MovingMomentumStrategy.main(null);
} |
static boolean unprotectedSetOwner(
FSDirectory fsd, INodesInPath iip, String username, String groupname)
throws FileNotFoundException, UnresolvedLinkException,
QuotaExceededException, SnapshotAccessControlException {
assert fsd.hasWriteLock();
final INode inode = FSDirectory.resolveLastINode(iip);
long oldPerm = inode.getPermissionLong();
if (username != null) {
inode.setUser(username, iip.getLatestSnapshotId());
}
if (groupname != null) {
inode.setGroup(groupname, iip.getLatestSnapshotId());
}
return oldPerm != inode.getPermissionLong();
} | @Test
public void testUnprotectedSetOwner() throws Exception {
assertTrue("SetOwner should return true for a new user",
unprotectedSetAttributes((short) 0777, (short) 0777, "user1",
"user2", true));
assertFalse("SetOwner should return false for same user",
unprotectedSetAttributes((short) 0777, (short) 0777, "user1",
"user1", true));
} |
public static String detectWithCustomConfig(String name) throws Exception {
String config = "/org/apache/tika/mime/tika-mimetypes.xml";
Tika tika = new Tika(MimeTypesFactory.create(config));
return tika.detect(name);
} | @Test
public void testDetectWithCustomConfig() throws Exception {
assertEquals("application/xml", AdvancedTypeDetector.detectWithCustomConfig("pom.xml"));
} |
@Override
public T deserialize(final String topic, final byte[] bytes) {
try {
if (bytes == null) {
return null;
}
// don't use the JsonSchemaConverter to read this data because
// we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS,
// which is not currently available in the standard converters
final JsonNode value = isJsonSchema
? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class)
: MAPPER.readTree(bytes);
final Object coerced = enforceFieldType(
"$",
new JsonValueContext(value, schema)
);
if (LOG.isTraceEnabled()) {
LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced);
}
return SerdeUtils.castToTargetType(coerced, targetType);
} catch (final Exception e) {
// Clear location in order to avoid logging data, for security reasons
if (e instanceof JsonParseException) {
((JsonParseException) e).clearLocation();
}
throw new SerializationException(
"Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e);
}
} | @Test
public void shouldFixScaleWhenDeserializingDecimalsWithTooSmallAScale() {
// Given:
final KsqlJsonDeserializer<BigDecimal> deserializer =
givenDeserializerForSchema(DecimalUtil.builder(4, 3).build(), BigDecimal.class);
final byte[] bytes = addMagic("1.1".getBytes(UTF_8));
// When:
final Object result = deserializer.deserialize(SOME_TOPIC, bytes);
// Then:
assertThat(result, is(new BigDecimal("1.100")));
} |
@Description("test if value is finite")
@ScalarFunction
@SqlType(StandardTypes.BOOLEAN)
public static boolean isFinite(@SqlType(StandardTypes.DOUBLE) double num)
{
return Doubles.isFinite(num);
} | @Test
public void testIsFinite()
{
assertFunction("is_finite(100000)", BOOLEAN, true);
assertFunction("is_finite(rand() / 0.0E0)", BOOLEAN, false);
assertFunction("is_finite(REAL '754.2008E0')", BOOLEAN, true);
assertFunction("is_finite(rand() / REAL '0.0E0')", BOOLEAN, false);
assertFunction("is_finite(NULL)", BOOLEAN, null);
} |
@Override
public Connection getConnection() {
return new CircuitBreakerConnection();
} | @Test
void assertGetConnection() {
assertThat(dataSource.getConnection(), instanceOf(CircuitBreakerConnection.class));
assertThat(dataSource.getConnection("", ""), instanceOf(CircuitBreakerConnection.class));
} |
@Override
public void authenticate(
final JsonObject authInfo,
final Handler<AsyncResult<User>> resultHandler
) {
final String username = authInfo.getString("username");
if (username == null) {
resultHandler.handle(Future.failedFuture("authInfo missing 'username' field"));
return;
}
final String password = authInfo.getString("password");
if (password == null) {
resultHandler.handle(Future.failedFuture("authInfo missing 'password' field"));
return;
}
server.getWorkerExecutor().executeBlocking(
promisedUser -> getUser(contextName, username, password, promisedUser),
false,
resultHandler
);
} | @Test
public void shouldFailToAuthenticateWithNonAllowedRole() throws Exception {
// Given:
givenAllowedRoles("user");
givenUserRoles("other");
// When:
authProvider.authenticate(authInfo, userHandler);
// Then:
verifyUnauthorizedSuccessfulLogin();
} |
@Override
public byte[] encode(ILoggingEvent event) {
var baos = new ByteArrayOutputStream();
try (var generator = jsonFactory.createGenerator(baos)) {
generator.writeStartObject();
// https://cloud.google.com/logging/docs/structured-logging#structured_logging_special_fields
// https://github.com/googleapis/java-logging-logback/blob/main/src/main/java/com/google/cloud/logging/logback/LoggingAppender.java
writeTimestamp(generator, event);
writeSeverity(generator, event);
writeLogger(generator, event);
writeMessage(generator, event);
writeThread(generator, event);
writeServiceContext(generator);
writeTraceContext(generator);
var mdc = event.getMDCPropertyMap();
writeMdc(generator, mdc);
writeKeyValue(generator, event);
if ("ERROR".equals(event.getLevel().toString())) {
writeError(generator, event, mdc);
}
writeStackTrace(generator, event);
generator.writeEndObject();
generator.writeRaw('\n');
generator.flush();
} catch (NullPointerException | IOException e) {
return logFallbackError(event, e);
}
return baos.toByteArray();
} | @Test
void encode_fallback() {
var e = mockEvent();
doThrow(NullPointerException.class).when(e).getKeyValuePairs();
var msg = encoder.encode(e);
assertMatchesJson(
"""
{"message":"error serializing log record: null","serviceContext":{"service":"","version":""},"severity":"ERROR","stack_trace":"java.lang.NullPointerException\\n","time":"2024-08-09T14:13:33Z"}
""",
msg);
} |
public static Element getUniqueDirectChild(Element parent, String namespace, String tag)
throws MalformedXmlException {
NodeList children = parent.getElementsByTagNameNS(namespace, tag);
if (children.getLength() == 0) {
throw new MalformedXmlException("Element " + tag + " is missing under " + parent.getTagName());
}
for (int i = 0; i < children.getLength(); i++) {
Node child = children.item(i);
if (child.getNodeType() == Node.ELEMENT_NODE && child.getParentNode() == parent) {
return (Element) child;
}
}
throw new MalformedXmlException("Element " + tag + " was expected directly under " + parent.getTagName());
} | @Test
void getUniqueDirectChild() {
assertThrows(MalformedXmlException.class, () -> {
XmlUtil.getUniqueDirectChild(parent, "http://example.com", "child1");
});
Element uniqueChild = document.createElementNS("http://example.com", "uniqueChild");
parent.appendChild(uniqueChild);
try {
Element retrievedChild = XmlUtil.getUniqueDirectChild(parent, "http://example.com", "uniqueChild");
assertEquals("uniqueChild", retrievedChild.getLocalName());
} catch (MalformedXmlException e) {
fail("Exception should not have been thrown");
}
} |
public static Timer getRaftApplyLogTimer() {
return RAFT_APPLY_LOG_TIMER;
} | @Test
void testRaftApplyLogTimer() {
Timer raftApplyTimerLog = MetricsMonitor.getRaftApplyLogTimer();
raftApplyTimerLog.record(10, TimeUnit.SECONDS);
raftApplyTimerLog.record(20, TimeUnit.SECONDS);
assertEquals(0.5D, raftApplyTimerLog.totalTime(TimeUnit.MINUTES), 0.01);
assertEquals(30D, raftApplyTimerLog.totalTime(TimeUnit.SECONDS), 0.01);
} |
public String readStringEOF() {
byte[] result = new byte[byteBuf.readableBytes()];
byteBuf.readBytes(result);
return new String(result, charset);
} | @Test
void assertReadStringEOF() {
when(byteBuf.readableBytes()).thenReturn(0);
assertThat(new MySQLPacketPayload(byteBuf, StandardCharsets.UTF_8).readStringEOF(), is(""));
} |
public static String decimalFormat(String pattern, double value) {
Assert.isTrue(isValid(value), "value is NaN or Infinite!");
return new DecimalFormat(pattern).format(value);
} | @Test
public void decimalFormatDoubleTest() {
final Double c = 467.8101;
final String format = NumberUtil.decimalFormat("0.00", c);
assertEquals("467.81", format);
} |
@Override
public String save(AccessToken accessToken) throws ValidationException {
return super.save(encrypt(accessToken));
} | @Test
public void testSave() throws Exception {
final String username = "admin";
final String tokenname = "web";
final String tokenString = "foobar";
assertNull(accessTokenService.load(tokenString));
assertEquals(0, accessTokenService.loadAll(username).size());
final AccessToken token = accessTokenService.create(username, tokenname);
token.setToken(tokenString);
accessTokenService.save(token);
assertEquals(1, accessTokenService.loadAll(username).size());
final AccessToken newToken = accessTokenService.load(tokenString);
assertNotNull(newToken);
assertEquals(token.getUserName(), newToken.getUserName());
assertEquals(token.getName(), newToken.getName());
assertEquals(token.getToken(), newToken.getToken());
} |
@Override
public void notify(Metrics metrics) {
WithMetadata withMetadata = (WithMetadata) metrics;
MetricsMetaInfo meta = withMetadata.getMeta();
int scope = meta.getScope();
if (!DefaultScopeDefine.inServiceCatalog(scope) && !DefaultScopeDefine.inServiceInstanceCatalog(scope)
&& !DefaultScopeDefine.inEndpointCatalog(scope) && !DefaultScopeDefine.inServiceRelationCatalog(scope)
&& !DefaultScopeDefine.inServiceInstanceRelationCatalog(scope) && !DefaultScopeDefine.inEndpointRelationCatalog(scope)) {
return;
}
MetaInAlarm metaInAlarm;
if (DefaultScopeDefine.inServiceCatalog(scope)) {
final String serviceId = meta.getId();
final IDManager.ServiceID.ServiceIDDefinition serviceIDDefinition = IDManager.ServiceID.analysisId(
serviceId);
ServiceMetaInAlarm serviceMetaInAlarm = new ServiceMetaInAlarm();
serviceMetaInAlarm.setMetricsName(meta.getMetricsName());
serviceMetaInAlarm.setId(serviceId);
serviceMetaInAlarm.setName(serviceIDDefinition.getName());
metaInAlarm = serviceMetaInAlarm;
} else if (DefaultScopeDefine.inServiceInstanceCatalog(scope)) {
final String instanceId = meta.getId();
final IDManager.ServiceInstanceID.InstanceIDDefinition instanceIDDefinition = IDManager.ServiceInstanceID.analysisId(
instanceId);
final IDManager.ServiceID.ServiceIDDefinition serviceIDDefinition = IDManager.ServiceID.analysisId(
instanceIDDefinition.getServiceId());
ServiceInstanceMetaInAlarm instanceMetaInAlarm = new ServiceInstanceMetaInAlarm();
instanceMetaInAlarm.setMetricsName(meta.getMetricsName());
instanceMetaInAlarm.setId(instanceId);
instanceMetaInAlarm.setName(instanceIDDefinition.getName() + " of " + serviceIDDefinition.getName());
metaInAlarm = instanceMetaInAlarm;
} else if (DefaultScopeDefine.inEndpointCatalog(scope)) {
final String endpointId = meta.getId();
final IDManager.EndpointID.EndpointIDDefinition endpointIDDefinition = IDManager.EndpointID.analysisId(
endpointId);
final IDManager.ServiceID.ServiceIDDefinition serviceIDDefinition = IDManager.ServiceID.analysisId(
endpointIDDefinition.getServiceId());
EndpointMetaInAlarm endpointMetaInAlarm = new EndpointMetaInAlarm();
endpointMetaInAlarm.setMetricsName(meta.getMetricsName());
endpointMetaInAlarm.setId(meta.getId());
endpointMetaInAlarm.setName(
endpointIDDefinition.getEndpointName() + " in " + serviceIDDefinition.getName());
metaInAlarm = endpointMetaInAlarm;
} else if (DefaultScopeDefine.inServiceRelationCatalog(scope)) {
final String serviceRelationId = meta.getId();
final IDManager.ServiceID.ServiceRelationDefine serviceRelationDefine = IDManager.ServiceID.analysisRelationId(
serviceRelationId);
final IDManager.ServiceID.ServiceIDDefinition sourceIdDefinition = IDManager.ServiceID.analysisId(
serviceRelationDefine.getSourceId());
final IDManager.ServiceID.ServiceIDDefinition destIdDefinition = IDManager.ServiceID.analysisId(
serviceRelationDefine.getDestId());
ServiceRelationMetaInAlarm serviceRelationMetaInAlarm = new ServiceRelationMetaInAlarm();
serviceRelationMetaInAlarm.setMetricsName(meta.getMetricsName());
serviceRelationMetaInAlarm.setId(serviceRelationId);
serviceRelationMetaInAlarm.setName(sourceIdDefinition.getName() + " to " + destIdDefinition.getName());
metaInAlarm = serviceRelationMetaInAlarm;
} else if (DefaultScopeDefine.inServiceInstanceRelationCatalog(scope)) {
final String instanceRelationId = meta.getId();
final IDManager.ServiceInstanceID.ServiceInstanceRelationDefine serviceRelationDefine = IDManager.ServiceInstanceID.analysisRelationId(
instanceRelationId);
final IDManager.ServiceInstanceID.InstanceIDDefinition sourceIdDefinition = IDManager.ServiceInstanceID.analysisId(
serviceRelationDefine.getSourceId());
final IDManager.ServiceID.ServiceIDDefinition sourceServiceId = IDManager.ServiceID.analysisId(
sourceIdDefinition.getServiceId());
final IDManager.ServiceInstanceID.InstanceIDDefinition destIdDefinition = IDManager.ServiceInstanceID.analysisId(
serviceRelationDefine.getDestId());
final IDManager.ServiceID.ServiceIDDefinition destServiceId = IDManager.ServiceID.analysisId(
destIdDefinition.getServiceId());
ServiceInstanceRelationMetaInAlarm instanceRelationMetaInAlarm = new ServiceInstanceRelationMetaInAlarm();
instanceRelationMetaInAlarm.setMetricsName(meta.getMetricsName());
instanceRelationMetaInAlarm.setId(instanceRelationId);
instanceRelationMetaInAlarm.setName(sourceIdDefinition.getName() + " of " + sourceServiceId.getName()
+ " to " + destIdDefinition.getName() + " of " + destServiceId.getName());
metaInAlarm = instanceRelationMetaInAlarm;
} else if (DefaultScopeDefine.inEndpointRelationCatalog(scope)) {
final String endpointRelationId = meta.getId();
final IDManager.EndpointID.EndpointRelationDefine endpointRelationDefine = IDManager.EndpointID.analysisRelationId(
endpointRelationId);
final IDManager.ServiceID.ServiceIDDefinition sourceService = IDManager.ServiceID.analysisId(
endpointRelationDefine.getSourceServiceId());
final IDManager.ServiceID.ServiceIDDefinition destService = IDManager.ServiceID.analysisId(
endpointRelationDefine.getDestServiceId());
EndpointRelationMetaInAlarm endpointRelationMetaInAlarm = new EndpointRelationMetaInAlarm();
endpointRelationMetaInAlarm.setMetricsName(meta.getMetricsName());
endpointRelationMetaInAlarm.setId(endpointRelationId);
endpointRelationMetaInAlarm.setName(endpointRelationDefine.getSource() + " in " + sourceService.getName()
+ " to " + endpointRelationDefine.getDest() + " in " + destService.getName());
metaInAlarm = endpointRelationMetaInAlarm;
} else {
return;
}
List<RunningRule> runningRules = core.findRunningRule(meta.getMetricsName());
if (runningRules == null) {
return;
}
runningRules.forEach(rule -> rule.in(metaInAlarm, metrics));
} | @Test
public void testNotifyWithServiceCatalog() {
String metricsName = "service-metrics";
when(metadata.getMetricsName()).thenReturn(metricsName);
when(DefaultScopeDefine.inServiceCatalog(0)).thenReturn(true);
final String serviceId = IDManager.ServiceID.buildId("service", true);
when(metadata.getId()).thenReturn(serviceId);
ArgumentCaptor<MetaInAlarm> metaCaptor = ArgumentCaptor.forClass(MetaInAlarm.class);
notifyHandler.notify(metrics);
verify(rule).in(metaCaptor.capture(), any());
MetaInAlarm metaInAlarm = metaCaptor.getValue();
assertTrue(metaInAlarm instanceof ServiceMetaInAlarm);
assertEquals("c2VydmljZQ==.1", metaInAlarm.getId0());
assertEquals(DefaultScopeDefine.SERVICE_CATALOG_NAME, metaInAlarm.getScope());
assertEquals("service", metaInAlarm.getName());
assertEquals(DefaultScopeDefine.SERVICE, metaInAlarm.getScopeId());
} |
public void removeData(Service service) {
serviceDataIndexes.remove(service);
serviceClusterIndex.remove(service);
} | @Test
void testRemoveData() throws NoSuchFieldException, IllegalAccessException {
serviceStorage.removeData(SERVICE);
Field serviceClusterIndex = ServiceStorage.class.getDeclaredField("serviceClusterIndex");
serviceClusterIndex.setAccessible(true);
ConcurrentMap<Service, Set<String>> serviceSetConcurrentMap = (ConcurrentMap<Service, Set<String>>) serviceClusterIndex.get(
serviceStorage);
Field serviceDataIndexes = ServiceStorage.class.getDeclaredField("serviceDataIndexes");
serviceDataIndexes.setAccessible(true);
ConcurrentMap<Service, ServiceInfo> infoConcurrentMap = (ConcurrentMap<Service, ServiceInfo>) serviceDataIndexes.get(
serviceStorage);
assertEquals(0, serviceSetConcurrentMap.size());
assertEquals(0, infoConcurrentMap.size());
} |
public boolean supportsFileAttributeView(Class<? extends FileAttributeView> type) {
return providersByViewType.containsKey(type);
} | @Test
public void testSupportsFileAttributeView() {
assertThat(service.supportsFileAttributeView(BasicFileAttributeView.class)).isTrue();
assertThat(service.supportsFileAttributeView(TestAttributeView.class)).isTrue();
assertThat(service.supportsFileAttributeView(PosixFileAttributeView.class)).isFalse();
} |
@Override
public AdminUserDO authenticate(String username, String password) {
final LoginLogTypeEnum logTypeEnum = LoginLogTypeEnum.LOGIN_USERNAME;
// 校验账号是否存在
AdminUserDO user = userService.getUserByUsername(username);
if (user == null) {
createLoginLog(null, username, logTypeEnum, LoginResultEnum.BAD_CREDENTIALS);
throw exception(AUTH_LOGIN_BAD_CREDENTIALS);
}
if (!userService.isPasswordMatch(password, user.getPassword())) {
createLoginLog(user.getId(), username, logTypeEnum, LoginResultEnum.BAD_CREDENTIALS);
throw exception(AUTH_LOGIN_BAD_CREDENTIALS);
}
// 校验是否禁用
if (CommonStatusEnum.isDisable(user.getStatus())) {
createLoginLog(user.getId(), username, logTypeEnum, LoginResultEnum.USER_DISABLED);
throw exception(AUTH_LOGIN_USER_DISABLED);
}
return user;
} | @Test
public void testAuthenticate_success() {
// 准备参数
String username = randomString();
String password = randomString();
// mock user 数据
AdminUserDO user = randomPojo(AdminUserDO.class, o -> o.setUsername(username)
.setPassword(password).setStatus(CommonStatusEnum.ENABLE.getStatus()));
when(userService.getUserByUsername(eq(username))).thenReturn(user);
// mock password 匹配
when(userService.isPasswordMatch(eq(password), eq(user.getPassword()))).thenReturn(true);
// 调用
AdminUserDO loginUser = authService.authenticate(username, password);
// 校验
assertPojoEquals(user, loginUser);
} |
public void completeTx(SendRequest req) throws InsufficientMoneyException, CompletionException {
lock.lock();
try {
checkArgument(!req.completed, () ->
"given SendRequest has already been completed");
log.info("Completing send tx with {} outputs totalling {} and a fee of {}/vkB", req.tx.getOutputs().size(),
req.tx.getOutputSum().toFriendlyString(), req.feePerKb.toFriendlyString());
// Calculate a list of ALL potential candidates for spending and then ask a coin selector to provide us
// with the actual outputs that'll be used to gather the required amount of value. In this way, users
// can customize coin selection policies. The call below will ignore immature coinbases and outputs
// we don't have the keys for.
List<TransactionOutput> prelimCandidates = calculateAllSpendCandidates(true, req.missingSigsMode == MissingSigsMode.THROW);
// Connect (add a value amount) unconnected inputs
List<TransactionInput> inputs = connectInputs(prelimCandidates, req.tx.getInputs());
req.tx.clearInputs();
inputs.forEach(req.tx::addInput);
// Warn if there are remaining unconnected inputs whose value we do not know
// TODO: Consider throwing if there are inputs that we don't have a value for
if (req.tx.getInputs().stream()
.map(TransactionInput::getValue)
.anyMatch(Objects::isNull))
log.warn("SendRequest transaction already has inputs but we don't know how much they are worth - they will be added to fee.");
// If any inputs have already been added, we don't need to get their value from wallet
Coin totalInput = req.tx.getInputSum();
// Calculate the amount of value we need to import.
Coin valueNeeded = req.tx.getOutputSum().subtract(totalInput);
// Enforce the OP_RETURN limit
if (req.tx.getOutputs().stream()
.filter(o -> ScriptPattern.isOpReturn(o.getScriptPubKey()))
.count() > 1) // Only 1 OP_RETURN per transaction allowed.
throw new MultipleOpReturnRequested();
// Check for dusty sends
if (req.ensureMinRequiredFee && !req.emptyWallet) { // Min fee checking is handled later for emptyWallet.
if (req.tx.getOutputs().stream().anyMatch(TransactionOutput::isDust))
throw new DustySendRequested();
}
// Filter out candidates that are already included in the transaction inputs
List<TransactionOutput> candidates = prelimCandidates.stream()
.filter(output -> alreadyIncluded(req.tx.getInputs(), output))
.collect(StreamUtils.toUnmodifiableList());
CoinSelection bestCoinSelection;
TransactionOutput bestChangeOutput = null;
List<Coin> updatedOutputValues = null;
if (!req.emptyWallet) {
// This can throw InsufficientMoneyException.
FeeCalculation feeCalculation = calculateFee(req, valueNeeded, req.ensureMinRequiredFee, candidates);
bestCoinSelection = feeCalculation.bestCoinSelection;
bestChangeOutput = feeCalculation.bestChangeOutput;
updatedOutputValues = feeCalculation.updatedOutputValues;
} else {
// We're being asked to empty the wallet. What this means is ensuring "tx" has only a single output
// of the total value we can currently spend as determined by the selector, and then subtracting the fee.
checkState(req.tx.getOutputs().size() == 1, () ->
"empty wallet TX must have a single output only");
CoinSelector selector = req.coinSelector == null ? coinSelector : req.coinSelector;
bestCoinSelection = selector.select((Coin) network.maxMoney(), candidates);
candidates = null; // Selector took ownership and might have changed candidates. Don't access again.
req.tx.getOutput(0).setValue(bestCoinSelection.totalValue());
log.info(" emptying {}", bestCoinSelection.totalValue().toFriendlyString());
}
bestCoinSelection.outputs()
.forEach(req.tx::addInput);
if (req.emptyWallet) {
if (!adjustOutputDownwardsForFee(req.tx, bestCoinSelection, req.feePerKb, req.ensureMinRequiredFee))
throw new CouldNotAdjustDownwards();
}
if (updatedOutputValues != null) {
for (int i = 0; i < updatedOutputValues.size(); i++) {
req.tx.getOutput(i).setValue(updatedOutputValues.get(i));
}
}
if (bestChangeOutput != null) {
req.tx.addOutput(bestChangeOutput);
log.info(" with {} change", bestChangeOutput.getValue().toFriendlyString());
}
// Now shuffle the outputs to obfuscate which is the change.
if (req.shuffleOutputs)
req.tx.shuffleOutputs();
// Now sign the inputs, thus proving that we are entitled to redeem the connected outputs.
if (req.signInputs)
signTransaction(req);
// Check size.
final int size = req.tx.messageSize();
if (size > Transaction.MAX_STANDARD_TX_SIZE)
throw new ExceededMaxTransactionSize();
// Label the transaction as being self created. We can use this later to spend its change output even before
// the transaction is confirmed. We deliberately won't bother notifying listeners here as there's not much
// point - the user isn't interested in a confidence transition they made themselves.
getConfidence(req.tx).setSource(TransactionConfidence.Source.SELF);
// Label the transaction as being a user requested payment. This can be used to render GUI wallet
// transaction lists more appropriately, especially when the wallet starts to generate transactions itself
// for internal purposes.
req.tx.setPurpose(Transaction.Purpose.USER_PAYMENT);
// Record the exchange rate that was valid when the transaction was completed.
req.tx.setExchangeRate(req.exchangeRate);
req.tx.setMemo(req.memo);
req.completed = true;
log.info(" completed: {}", req.tx);
} finally {
lock.unlock();
}
} | @Test(expected = Wallet.ExceededMaxTransactionSize.class)
public void respectMaxStandardSize() throws Exception {
// Check that we won't create txns > 100kb. Average tx size is ~220 bytes so this would have to be enormous.
sendMoneyToWallet(AbstractBlockChain.NewBlockType.BEST_CHAIN, valueOf(100, 0));
Transaction tx = new Transaction();
byte[] bits = new byte[20];
new Random().nextBytes(bits);
Coin v = CENT;
// 3100 outputs to a random address.
for (int i = 0; i < 3100; i++) {
tx.addOutput(v, LegacyAddress.fromPubKeyHash(TESTNET, bits));
}
SendRequest req = SendRequest.forTx(tx);
wallet.completeTx(req);
} |
@UdafFactory(description = "Build a value-to-count histogram of input Strings")
public static TableUdaf<String, Map<String, Long>, Map<String, Long>> histogramString() {
return histogram();
} | @Test
public void shouldUndoCountedValues() {
final TableUdaf<String, Map<String, Long>, Map<String, Long>> udaf = HistogramUdaf.histogramString();
Map<String, Long> agg = udaf.initialize();
final Boolean[] values = new Boolean[] {true, true, false, null, true};
for (final Boolean thisValue : values) {
agg = udaf.aggregate(String.valueOf(thisValue), agg);
}
assertThat(agg.entrySet(), hasSize(3));
assertThat(agg, hasEntry("true", 3L));
assertThat(agg, hasEntry("false", 1L));
assertThat(agg, hasEntry("null", 1L));
agg = udaf.undo("true", agg);
assertThat(agg.entrySet(), hasSize(3));
assertThat(agg, hasEntry("true", 2L));
assertThat(agg, hasEntry("false", 1L));
assertThat(agg, hasEntry("null", 1L));
} |
public static Color colorLerp(final Color a, final Color b, final double t)
{
final double r1 = a.getRed();
final double r2 = b.getRed();
final double g1 = a.getGreen();
final double g2 = b.getGreen();
final double b1 = a.getBlue();
final double b2 = b.getBlue();
final double a1 = a.getAlpha();
final double a2 = b.getAlpha();
return new Color(
(int) Math.round(r1 + (t * (r2 - r1))),
(int) Math.round(g1 + (t * (g2 - g1))),
(int) Math.round(b1 + (t * (b2 - b1))),
(int) Math.round(a1 + (t * (a2 - a1)))
);
} | @Test
public void colorLerp()
{
assertEquals(Color.WHITE, ColorUtil.colorLerp(Color.WHITE, Color.WHITE, 0.9));
assertEquals(new Color(128, 128, 128), ColorUtil.colorLerp(Color.BLACK, Color.WHITE, 0.5));
assertEquals(Color.BLACK, ColorUtil.colorLerp(Color.BLACK, Color.CYAN, 0));
assertEquals(Color.CYAN, ColorUtil.colorLerp(Color.BLACK, Color.CYAN, 1));
assertEquals(new Color(0x80800080, true), ColorUtil.colorLerp(new Color(0xff0000ff, true), new Color(0x00ff0000, true), 0.5));
} |
public static String buildURIFromPattern(String pattern, List<Parameter> parameters) {
if (parameters != null) {
// Browse parameters and choose between template or query one.
for (Parameter parameter : parameters) {
String wadlTemplate = "{" + parameter.getName() + "}";
String swaggerTemplate = "/:" + parameter.getName();
if (pattern.contains(wadlTemplate)) {
// It's a template parameter.
pattern = pattern.replace(wadlTemplate, encodePath(parameter.getValue()));
} else if (pattern.contains(swaggerTemplate)) {
// It's a template parameter.
pattern = pattern.replace(":" + parameter.getName(), encodePath(parameter.getValue()));
} else {
// It's a query parameter, ensure we have started delimiting them.
if (!pattern.contains("?")) {
pattern += "?";
}
if (pattern.contains("=")) {
pattern += "&";
}
pattern += parameter.getName() + "=" + encodeValue(parameter.getValue());
}
}
}
return pattern;
} | @Test
void testBuildURIFromPatternWithMap() {
// Prepare a bunch of parameters.
Map<String, String> parameters = new HashMap<>();
parameters.put("year", "2018");
parameters.put("month", "05");
parameters.put("status", "published");
parameters.put("page", "0");
// Test with old wadl like template format.
String pattern = "http://localhost:8080/blog/{year}/{month}";
String uri = URIBuilder.buildURIFromPattern(pattern, parameters);
assertTrue("http://localhost:8080/blog/2018/05?page=0&status=published".equals(uri)
|| "http://localhost:8080/blog/2018/05?status=published&page=0".equals(uri));
// Test with new swagger like template format.
pattern = "http://localhost:8080/blog/:year/:month";
uri = URIBuilder.buildURIFromPattern(pattern, parameters);
assertTrue("http://localhost:8080/blog/2018/05?page=0&status=published".equals(uri)
|| "http://localhost:8080/blog/2018/05?status=published&page=0".equals(uri));
} |
public static DataSourceProvider tryGetDataSourceProviderOrNull(Configuration hdpConfig) {
final String configuredPoolingType = MetastoreConf.getVar(hdpConfig,
MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE);
return Iterables.tryFind(FACTORIES, factory -> {
String poolingType = factory.getPoolingType();
return poolingType != null && poolingType.equalsIgnoreCase(configuredPoolingType);
}).orNull();
} | @Test
public void testSetHikariCpLeakDetectionThresholdProperty() throws SQLException {
MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, HikariCPDataSourceProvider.HIKARI);
conf.set(HikariCPDataSourceProvider.HIKARI + ".leakDetectionThreshold", "3600");
conf.set(HikariCPDataSourceProvider.HIKARI + ".initializationFailTimeout", "-1");
DataSourceProvider dsp = DataSourceProviderFactory.tryGetDataSourceProviderOrNull(conf);
Assert.assertNotNull(dsp);
DataSource ds = dsp.create(conf);
Assert.assertTrue(ds instanceof HikariDataSource);
Assert.assertEquals(3600L, ((HikariDataSource)ds).getLeakDetectionThreshold());
} |
DataTableType lookupTableTypeByType(Type type) {
return lookupTableTypeByType(type, Function.identity());
} | @Test
void null_long_transformed_to_null() {
DataTableTypeRegistry registry = new DataTableTypeRegistry(Locale.ENGLISH);
DataTableType dataTableType = registry.lookupTableTypeByType(LIST_OF_LIST_OF_LONG);
assertEquals(
singletonList(singletonList(null)),
dataTableType.transform(singletonList(singletonList(null))));
} |
@GetMapping(
path = "/api/{namespace}/{extension}",
produces = MediaType.APPLICATION_JSON_VALUE
)
@CrossOrigin
@Operation(summary = "Provides metadata of the latest version of an extension")
@ApiResponses({
@ApiResponse(
responseCode = "200",
description = "The extension metadata are returned in JSON format"
),
@ApiResponse(
responseCode = "404",
description = "The specified extension could not be found",
content = @Content()
),
@ApiResponse(
responseCode = "429",
description = "A client has sent too many requests in a given amount of time",
content = @Content(),
headers = {
@Header(
name = "X-Rate-Limit-Retry-After-Seconds",
description = "Number of seconds to wait after receiving a 429 response",
schema = @Schema(type = "integer", format = "int32")
),
@Header(
name = "X-Rate-Limit-Remaining",
description = "Remaining number of requests left",
schema = @Schema(type = "integer", format = "int32")
)
}
)
})
public ResponseEntity<ExtensionJson> getExtension(
@PathVariable @Parameter(description = "Extension namespace", example = "redhat")
String namespace,
@PathVariable @Parameter(description = "Extension name", example = "java")
String extension
) {
for (var registry : getRegistries()) {
try {
return ResponseEntity.ok()
.cacheControl(CacheControl.noCache().cachePublic())
.body(registry.getExtension(namespace, extension, null));
} catch (NotFoundException exc) {
// Try the next registry
}
}
var json = ExtensionJson.error("Extension not found: " + NamingUtil.toExtensionId(namespace, extension));
return new ResponseEntity<>(json, HttpStatus.NOT_FOUND);
} | @Test
public void testPreReleaseExtensionVersionWebTarget() throws Exception {
var extVersion = mockExtension("web");
extVersion.setPreRelease(true);
extVersion.setDisplayName("Foo Bar (web)");
Mockito.when(repositories.findExtensionVersion("foo", "bar", "web", VersionAlias.PRE_RELEASE)).thenReturn(extVersion);
Mockito.when(repositories.findLatestVersionForAllUrls(extVersion.getExtension(), "web", false, true)).thenReturn(extVersion);
Mockito.when(repositories.findLatestVersionForAllUrls(extVersion.getExtension(), "web", true, true)).thenReturn(extVersion);
mockMvc.perform(get("/api/{namespace}/{extension}/{target}/{version}", "foo", "bar", "web", "pre-release"))
.andExpect(status().isOk())
.andExpect(content().json(extensionJson(e -> {
e.namespace = "foo";
e.name = "bar";
e.version = "1.0.0";
e.verified = false;
e.timestamp = "2000-01-01T10:00Z";
e.displayName = "Foo Bar (web)";
e.versionAlias = List.of("pre-release", "latest");
e.preRelease = true;
})));
} |
@Override
public Boolean run(final Session<?> session) throws BackgroundException {
final Metadata feature = session.getFeature(Metadata.class);
if(log.isDebugEnabled()) {
log.debug(String.format("Run with feature %s", feature));
}
for(Path file : files) {
if(this.isCanceled()) {
throw new ConnectionCanceledException();
}
this.write(session, feature, file);
}
return true;
} | @Test
public void testRunEmpty() throws Exception {
final List<Path> files = new ArrayList<>();
WriteMetadataWorker worker = new WriteMetadataWorker(files, Collections.emptyMap(), false, new DisabledProgressListener()) {
@Override
public void cleanup(final Boolean result) {
fail();
}
};
worker.run(new NullSession(new Host(new TestProtocol())) {
@Override
@SuppressWarnings("unchecked")
public <T> T _getFeature(final Class<T> type) {
if(type == Metadata.class) {
return (T) new Metadata() {
@Override
public Map<String, String> getDefault(final Local local) {
return Collections.emptyMap();
}
@Override
public Map<String, String> getMetadata(final Path file) {
fail();
return null;
}
@Override
public void setMetadata(final Path file, final TransferStatus status) {
fail();
}
};
}
return super._getFeature(type);
}
});
} |
@Override
public Map<String, Object> toElasticSearchObject(ObjectMapper objectMapper, @Nonnull final Meter invalidTimestampMeter) {
final Map<String, Object> obj = Maps.newHashMapWithExpectedSize(REQUIRED_FIELDS.size() + fields.size());
for (Map.Entry<String, Object> entry : fields.entrySet()) {
final String key = entry.getKey();
if (key.equals(FIELD_ID)) {
continue;
}
final Object value = entry.getValue();
// Elasticsearch does not allow "." characters in keys since version 2.0.
// See: https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking_20_mapping_changes.html#_field_names_may_not_contain_dots
if (key.contains(".")) {
final String newKey = key.replace('.', KEY_REPLACEMENT_CHAR);
// If the message already contains the transformed key, we skip the field and emit a warning.
// This is still not optimal but better than implementing expensive logic with multiple replacement
// character options. Conflicts should be rare...
if (!obj.containsKey(newKey)) {
obj.put(newKey, value);
} else {
LOG.warn("Keys must not contain a \".\" character! Ignoring field \"{}\"=\"{}\" in message [{}] - Unable to replace \".\" with a \"{}\" because of key conflict: \"{}\"=\"{}\"",
key, value, getId(), KEY_REPLACEMENT_CHAR, newKey, obj.get(newKey));
LOG.debug("Full message with \".\" in message key: {}", this);
}
} else {
if (obj.containsKey(key)) {
final String newKey = key.replace(KEY_REPLACEMENT_CHAR, '.');
// Deliberate warning duplicates because the key with the "." might be transformed before reaching
// the duplicate original key with a "_". Otherwise we would silently overwrite the transformed key.
LOG.warn("Keys must not contain a \".\" character! Ignoring field \"{}\"=\"{}\" in message [{}] - Unable to replace \".\" with a \"{}\" because of key conflict: \"{}\"=\"{}\"",
newKey, fields.get(newKey), getId(), KEY_REPLACEMENT_CHAR, key, value);
LOG.debug("Full message with \".\" in message key: {}", this);
}
obj.put(key, value);
}
}
obj.put(FIELD_MESSAGE, getMessage());
obj.put(FIELD_SOURCE, getSource());
obj.put(FIELD_STREAMS, getStreamIds());
obj.put(FIELD_GL2_ACCOUNTED_MESSAGE_SIZE, getSize());
final Object timestampValue = getField(FIELD_TIMESTAMP);
DateTime dateTime = timestampValue == null ? fallbackForNullTimestamp() : convertToDateTime(timestampValue);
obj.put(FIELD_TIMESTAMP, buildElasticSearchTimeFormat(dateTime.withZone(UTC)));
if (processingErrors != null && !processingErrors.isEmpty()) {
if (processingErrors.stream().anyMatch(processingError -> processingError.getCause().equals(ProcessingFailureCause.InvalidTimestampException))) {
invalidTimestampMeter.mark();
}
obj.put(FIELD_GL2_PROCESSING_ERROR,
processingErrors.stream()
.map(pe -> pe.getMessage() + " - " + pe.getDetails())
.collect(Collectors.joining(", ")));
}
return obj;
} | @Test
public void testToElasticsearchObjectAddsAccountedMessageSize() {
final Message message = new Message("message", "source", Tools.nowUTC());
assertThat(message.toElasticSearchObject(objectMapper, invalidTimestampMeter).get("gl2_accounted_message_size"))
.isEqualTo(43L);
} |
public static ElasticAgentRuntimeInfo fromAgent(AgentIdentifier identifier, AgentRuntimeStatus runtimeStatus,
String workingDir, String elasticAgentId, String pluginId,
String agentBootstrapperVersion, String agentVersion, Supplier<String> operatingSystemNameSupplier) {
ElasticAgentRuntimeInfo runtimeInfo = new ElasticAgentRuntimeInfo(identifier, runtimeStatus, workingDir, null, elasticAgentId, pluginId);
return (ElasticAgentRuntimeInfo) runtimeInfo
.refreshOperatingSystem(operatingSystemNameSupplier)
.refreshUsableSpace()
.updateBootstrapperVersion(agentBootstrapperVersion)
.updateAgentVersion(agentVersion);
} | @Test
public void shouldRefreshOperatingSystemOfAgent() {
AgentIdentifier identifier = new AgentIdentifier("local.in", "127.0.0.1", "uuid-1");
AgentRuntimeInfo runtimeInfo = ElasticAgentRuntimeInfo.fromAgent(identifier, AgentRuntimeStatus.Idle, "/tmp/foo", "20.3.0-1234", "20.5.0-2345", TEST_OS_SUPPLIER);
assertThat(runtimeInfo.getOperatingSystem(), is("My OS 10.1"));
} |
public void executeInNonInteractiveMode(String content) {
try {
terminal = terminalFactory.get();
executeFile(content, terminal.output(), ExecutionMode.NON_INTERACTIVE_EXECUTION);
} finally {
closeTerminal();
}
} | @Test
void testCancelExecutionInNonInteractiveMode() throws Exception {
// add "\n" with quit to trigger commit the line
final List<String> statements =
Arrays.asList(
"HELP;",
"CREATE TABLE tbl( -- comment\n"
+ "-- comment with ;\n"
+ "id INT,\n"
+ "name STRING\n"
+ ") WITH (\n"
+ " 'connector' = 'values'\n"
+ ");\n",
"INSERT INTO \n" + "MyOtherTable VALUES (1, 101), (2, 102);",
"DESC MyOtherTable;",
"SHOW TABLES;",
"QUIT;\n");
// use table.dml-sync to keep running
// therefore in non-interactive mode, the last executed command is INSERT INTO
final int hookIndex = 2;
String content = String.join("\n", statements);
final MockExecutor mockExecutor = new MockExecutor(new SqlParserHelper(), true);
Path historyFilePath = historyTempFile();
OutputStream outputStream = new ByteArrayOutputStream(256);
try (CliClient client =
new CliClient(
() -> TerminalUtils.createDumbTerminal(outputStream),
mockExecutor,
historyFilePath,
null)) {
Thread thread = new Thread(() -> client.executeInNonInteractiveMode(content));
thread.start();
while (!mockExecutor.isAwait) {
Thread.sleep(10);
}
thread.interrupt();
while (thread.isAlive()) {
Thread.sleep(10);
}
assertThat(outputStream.toString())
.contains("java.lang.InterruptedException: sleep interrupted");
}
// read the last executed statement
assertThat(statements.get(hookIndex)).isEqualTo(mockExecutor.receivedStatement.trim());
} |
@Deprecated
public DomainNameMapping<V> add(String hostname, V output) {
map.put(normalizeHostname(checkNotNull(hostname, "hostname")), checkNotNull(output, "output"));
return this;
} | @Test
public void testNullValuesAreForbiddenInDeprecatedApi() {
assertThrows(NullPointerException.class, new Executable() {
@Override
public void execute() {
new DomainNameMapping<String>("NotFound").add("Some key", null);
}
});
} |
@Override
public Logger getParentLogger() throws SQLFeatureNotSupportedException {
return Logger.getLogger(Logger.GLOBAL_LOGGER_NAME);
} | @Test
public void testGetParentLogger() throws SQLFeatureNotSupportedException {
assertNotNull("getParentLogger", driver.getParentLogger());
} |
public static RelativeUnixPath get(String relativePath) {
if (relativePath.startsWith("/")) {
throw new IllegalArgumentException("Path starts with forward slash (/): " + relativePath);
}
return new RelativeUnixPath(UnixPathParser.parse(relativePath));
} | @Test
public void testGet_absolute() {
try {
RelativeUnixPath.get("/absolute");
Assert.fail();
} catch (IllegalArgumentException ex) {
Assert.assertEquals("Path starts with forward slash (/): /absolute", ex.getMessage());
}
} |
@PUT
@Path("{noteId}/revision/{revisionId}")
@ZeppelinApi
public Response setNoteRevision(@PathParam("noteId") String noteId,
@PathParam("revisionId") String revisionId) throws IOException {
LOGGER.info("Revert note {} to the revision {}", noteId, revisionId);
notebookService.setNoteRevision(noteId, revisionId, getServiceContext(), new RestServiceCallback<>());
return new JsonResponse<>(Status.OK).build();
} | @Test
void testSetNoteRevision() throws IOException {
LOG.info("Running testSetNoteRevision");
String note1Id = null;
try {
String notePath = "note1";
note1Id = notebook.createNote(notePath, anonymous);
//Add a paragraph and commit
NotebookRepoWithVersionControl.Revision first_commit =
notebook.processNote(note1Id, note -> {
Paragraph p1 = note.addNewParagraph(anonymous);
p1.setText("text1");
notebook.saveNote(note, AuthenticationInfo.ANONYMOUS);
return notebook.checkpointNote(note.getId(), note.getPath(), "first commit", anonymous);
});
//Add a paragraph again
notebook.processNote(note1Id, note -> {
Paragraph p2 = note.addNewParagraph(anonymous);
p2.setText("text2");
notebook.saveNote(note, AuthenticationInfo.ANONYMOUS);
return null;
});
// Call restful api to revert note to first revision and verify
CloseableHttpResponse put = httpPut("/notebook/" + note1Id + "/revision/" + first_commit.id, "");
assertThat(put, isAllowed());
Map<String, Object> resp = gson.fromJson(EntityUtils.toString(put.getEntity(), StandardCharsets.UTF_8),
new TypeToken<Map<String, Object>>() {
}.getType());
assertEquals("OK", resp.get("status"));
notebook.processNote(note1Id, note -> {
assertEquals(1, note.getParagraphs().size());
assertEquals("text1", note.getParagraph(0).getText());
return null;
});
put.close();
} finally {
// cleanup
if (null != note1Id) {
notebook.removeNote(note1Id, anonymous);
}
}
} |
public String transform() throws ScanException {
StringBuilder stringBuilder = new StringBuilder();
compileNode(node, stringBuilder, new Stack<Node>());
return stringBuilder.toString();
} | @Test
public void loneColonShouldReadLikeAnyOtherCharacter() throws ScanException {
String input = "java:comp/env/jdbc/datasource";
Node node = makeNode(input);
NodeToStringTransformer nodeToStringTransformer = new NodeToStringTransformer(node, propertyContainer0);
assertEquals(input, nodeToStringTransformer.transform());
} |
public static <T> NavigableSet<Point<T>> slowKNearestPoints(Collection<? extends Point<T>> points, Instant time, int k) {
checkNotNull(points, "The input collection of Points cannot be null");
checkNotNull(time, "The input time cannot be null");
checkArgument(k >= 0, "k (" + k + ") must be non-negative");
if (k >= points.size()) {
return newTreeSet(points);
}
if (k == 0) {
return newTreeSet();
}
TreeSet<Point<T>> bestSoFar = new TreeSet<>();
Iterator<? extends Point<T>> iter = points.iterator();
//seed with k pieces of data
while (bestSoFar.size() < k) {
bestSoFar.add(iter.next());
}
//the "next point" must be closer than this to go into the working solution
Duration upperDelta = durationBtw(time, bestSoFar.first().time());
Duration lowerDelta = durationBtw(time, bestSoFar.last().time());
Duration addThreshold = max(upperDelta, lowerDelta);
while (iter.hasNext()) {
Point<T> next = iter.next();
Duration delta = durationBtw(time, next.time());
if (theDuration(delta).isLessThan(addThreshold)) {
/* This element improves the working result. So Add it */
bestSoFar.add(next);
/* Recompute the upper and lower thresholds so we know which Point gets removed. */
upperDelta = durationBtw(time, bestSoFar.first().time());
lowerDelta = durationBtw(time, bestSoFar.last().time());
//remove the k+1 element
if (theDuration(upperDelta).isGreaterThanOrEqualTo(lowerDelta)) {
bestSoFar.pollFirst();
upperDelta = durationBtw(time, bestSoFar.first().time());
} else {
bestSoFar.pollLast();
lowerDelta = durationBtw(time, bestSoFar.last().time());
}
addThreshold = max(upperDelta, lowerDelta);
}
}
return newTreeSet(bestSoFar);
} | @Test
public void testKNearestPointsWithGenericInputs() {
/*
* This test confirms that the signature of kNearestPoints will accept any Collection<T>
* where T is some arbitrary class that implements Point
*/
List<Point<NopHit>> listOfPoints = newArrayList();
NavigableSet<Point<NopHit>> neighbors = slowKNearestPoints(listOfPoints, Instant.EPOCH, 2);
assertTrue(
neighbors.isEmpty(),
"There should be no neighbors because the input list was empty"
);
} |
void start(Iterable<ShardCheckpoint> checkpoints) {
LOG.info(
"Pool {} - starting for stream {} consumer {}. Checkpoints = {}",
poolId,
read.getStreamName(),
consumerArn,
checkpoints);
for (ShardCheckpoint shardCheckpoint : checkpoints) {
checkState(
!state.containsKey(shardCheckpoint.getShardId()),
"Duplicate shard id %s",
shardCheckpoint.getShardId());
ShardState shardState =
new ShardState(
initShardSubscriber(shardCheckpoint), shardCheckpoint, watermarkPolicyFactory);
state.put(shardCheckpoint.getShardId(), shardState);
}
} | @Test
public void poolReSubscribesAndReadsRecordsAfterCheckPointWithPositiveSubSeqNumber()
throws Exception {
kinesis = new EFOStubbedKinesisAsyncClient(10);
kinesis.stubSubscribeToShard("shard-000", eventWithRecords(3));
kinesis.stubSubscribeToShard("shard-001", eventWithRecords(11, 3));
KinesisReaderCheckpoint initialCheckpoint =
new KinesisReaderCheckpoint(
ImmutableList.of(
afterCheckpoint("shard-000", "0", 1000L),
afterCheckpoint("shard-001", "11", Long.MAX_VALUE)));
pool = new EFOShardSubscribersPool(readSpec, consumerArn, kinesis);
pool.start(initialCheckpoint);
PoolAssertion.assertPool(pool)
.givesCheckPointedRecords(
ShardAssertion.shard("shard-000")
.gives(KinesisRecordView.generate("shard-000", 1, 2))
.withLastCheckpointSequenceNumber(2),
ShardAssertion.shard("shard-001")
.gives(KinesisRecordView.generate("shard-001", 12, 2))
.withLastCheckpointSequenceNumber(13));
assertThat(kinesis.subscribeRequestsSeen())
.containsExactlyInAnyOrder(
subscribeAtSeqNumber("shard-000", "0"),
subscribeAtSeqNumber("shard-001", "11"),
subscribeAfterSeqNumber("shard-000", "2"),
subscribeAfterSeqNumber("shard-001", "13"));
} |
public ApolloAuditTracer tracer() {
RequestAttributes requestAttributes = RequestContextHolder.getRequestAttributes();
if (requestAttributes != null) {
Object tracer = requestAttributes.getAttribute(ApolloAuditConstants.TRACER,
RequestAttributes.SCOPE_REQUEST);
if (tracer != null) {
return ((ApolloAuditTracer) tracer);
} else {
ApolloAuditTracer newTracer = new ApolloAuditTracer(new ApolloAuditScopeManager(), operatorSupplier);
setTracer(newTracer);
return newTracer;
}
}
return null;
} | @Test
public void testGetTracerNotInRequestThread() {
ApolloAuditTracer get = traceContext.tracer();
assertNull(get);
} |
@Override
public void route(final RouteContext routeContext, final SingleRule singleRule) {
if (routeContext.getRouteUnits().isEmpty() || sqlStatement instanceof SelectStatement) {
routeStatement(routeContext, singleRule);
} else {
RouteContext newRouteContext = new RouteContext();
routeStatement(newRouteContext, singleRule);
combineRouteContext(routeContext, newRouteContext);
}
} | @Test
void assertRouteIfNotExistsDuplicateSingleTable() {
SingleStandardRouteEngine engine = new SingleStandardRouteEngine(Collections.singleton(new QualifiedTable(DefaultDatabase.LOGIC_NAME, "t_order")), mockStatement(true));
assertDoesNotThrow(() -> engine.route(new RouteContext(), mockSingleRule()));
} |
public static FromMatchesFilter createFull(Jid address) {
return new FromMatchesFilter(address, false);
} | @Test
public void fullCompareMatchingEntityFullJid() {
FromMatchesFilter filter = FromMatchesFilter.createFull(FULL_JID1_R1);
Stanza packet = StanzaBuilder.buildMessage().build();
packet.setFrom(FULL_JID1_R1);
assertTrue(filter.accept(packet));
packet.setFrom(BASE_JID1);
assertFalse(filter.accept(packet));
packet.setFrom(FULL_JID1_R2);
assertFalse(filter.accept(packet));
packet.setFrom(BASE_JID2);
assertFalse(filter.accept(packet));
packet.setFrom(FULL_JID2);
assertFalse(filter.accept(packet));
packet.setFrom(BASE_JID3);
assertFalse(filter.accept(packet));
} |
public LoggerContextListener enableJulChangePropagation(LoggerContext loggerContext) {
LogManager.getLogManager().reset();
SLF4JBridgeHandler.removeHandlersForRootLogger();
SLF4JBridgeHandler.install();
LevelChangePropagator propagator = new LevelChangePropagator();
propagator.setContext(loggerContext);
propagator.setResetJUL(true);
propagator.start();
loggerContext.addListener(propagator);
return propagator;
} | @Test
public void enableJulChangePropagation() {
LoggerContext ctx = underTest.getRootContext();
int countListeners = ctx.getCopyOfListenerList().size();
LoggerContextListener propagator = underTest.enableJulChangePropagation(ctx);
assertThat(ctx.getCopyOfListenerList()).hasSize(countListeners + 1);
ctx.removeListener(propagator);
} |
static String[] getListFromProperty(Map<String, String> properties, String key) {
String propValue = properties.get(key);
if (propValue != null) {
return parseAsCsv(key, propValue);
}
return new String[0];
} | @Test
public void shouldGetEmptyList() {
Map<String, String> props = new HashMap<>();
props.put("prop", "");
assertThat(ProjectReactorBuilder.getListFromProperty(props, "prop")).isEmpty();
} |
@Override
public boolean supportsMinimumSQLGrammar() {
return false;
} | @Test
void assertSupportsMinimumSQLGrammar() {
assertFalse(metaData.supportsMinimumSQLGrammar());
} |
protected String decrypt(String encryptedStr) throws Exception {
String[] split = encryptedStr.split(":");
checkTrue(split.length == 3, "Wrong format of the encrypted variable (" + encryptedStr + ")");
byte[] salt = Base64.getDecoder().decode(split[0].getBytes(StandardCharsets.UTF_8));
checkTrue(salt.length == saltLengthBytes, "Salt length doesn't match.");
int iterations = Integer.parseInt(split[1]);
byte[] encryptedVal = Base64.getDecoder().decode(split[2].getBytes(StandardCharsets.UTF_8));
return new String(transform(Cipher.DECRYPT_MODE, encryptedVal, salt, iterations), StandardCharsets.UTF_8);
} | @Test(expected = IllegalArgumentException.class)
public void testDecryptionFailWithEmptyPassword() throws Exception {
assumeDefaultAlgorithmsSupported();
AbstractPbeReplacer replacer = createAndInitReplacer("", new Properties());
replacer.decrypt("aSalt1xx:1:test");
} |
@Override
public State getState() {
return State.DONE;
} | @Test
public void testPipelineResultReturnsDone() {
FlinkRunnerResult result = new FlinkRunnerResult(Collections.emptyMap(), 100);
assertThat(result.getState(), is(PipelineResult.State.DONE));
} |
@Override
public ByteBuf writeMedium(int value) {
ensureWritable0(3);
_setMedium(writerIndex, value);
writerIndex += 3;
return this;
} | @Test
public void testWriteMediumAfterRelease() {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().writeMedium(1);
}
});
} |
public static Date getDateNowPlusDays(int days) throws ParseException {
Calendar calendar = Calendar.getInstance();
calendar.add(Calendar.DATE, days);
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
String dateStr = dateFormat.format(calendar.getTime());
return dateFormat.parse(dateStr);
} | @Test
public void testGetDateNowPlusDays() throws ParseException {
Assertions.assertNotNull(DateUtil.getDateNowPlusDays(2));
} |
public MappedFile createFile(final long physicalOffset) throws IOException {
// cache max offset
return mappedFileQueue.tryCreateMappedFile(physicalOffset);
} | @Test
public void testCreateFile() throws IOException {
scq = new SparseConsumeQueue(topic, queueId, path, BatchConsumeQueue.CQ_STORE_UNIT_SIZE, defaultMessageStore);
long physicalOffset = Math.abs(ThreadLocalRandom.current().nextLong());
String formatName = UtilAll.offset2FileName(physicalOffset);
scq.createFile(physicalOffset);
assertTrue(Files.exists(Paths.get(path, topic, String.valueOf(queueId), formatName)));
scq.putBatchMessagePositionInfo(5,4,3,2,1,(short)1);
assertEquals(4, scq.get(1).getSize());
} |
public static void main(String[] args) {
// Getting a bar series (from any provider: CSV, web service, etc.)
BarSeries series = CsvTradesLoader.loadBitstampSeries();
// Getting the close price of the bars
Num firstClosePrice = series.getBar(0).getClosePrice();
System.out.println("First close price: " + firstClosePrice.doubleValue());
// Or within an indicator:
ClosePriceIndicator closePrice = new ClosePriceIndicator(series);
// Here is the same close price:
System.out.println(firstClosePrice.isEqual(closePrice.getValue(0))); // equal to firstClosePrice
// Getting the simple moving average (SMA) of the close price over the last 5
// bars
SMAIndicator shortSma = new SMAIndicator(closePrice, 5);
// Here is the 5-bars-SMA value at the 42nd index
System.out.println("5-bars-SMA value at the 42nd index: " + shortSma.getValue(42).doubleValue());
// Getting a longer SMA (e.g. over the 30 last bars)
SMAIndicator longSma = new SMAIndicator(closePrice, 30);
// Ok, now let's building our trading rules!
// Buying rules
// We want to buy:
// - if the 5-bars SMA crosses over 30-bars SMA
// - or if the price goes below a defined price (e.g $800.00)
Rule buyingRule = new CrossedUpIndicatorRule(shortSma, longSma)
.or(new CrossedDownIndicatorRule(closePrice, 800));
// Selling rules
// We want to sell:
// - if the 5-bars SMA crosses under 30-bars SMA
// - or if the price loses more than 3%
// - or if the price earns more than 2%
Rule sellingRule = new CrossedDownIndicatorRule(shortSma, longSma)
.or(new StopLossRule(closePrice, series.numOf(3)))
.or(new StopGainRule(closePrice, series.numOf(2)));
// Running our juicy trading strategy...
BarSeriesManager seriesManager = new BarSeriesManager(series);
TradingRecord tradingRecord = seriesManager.run(new BaseStrategy(buyingRule, sellingRule));
System.out.println("Number of positions for our strategy: " + tradingRecord.getPositionCount());
// Analysis
// Getting the winning positions ratio
AnalysisCriterion winningPositionsRatio = new PositionsRatioCriterion(PositionFilter.PROFIT);
System.out.println("Winning positions ratio: " + winningPositionsRatio.calculate(series, tradingRecord));
// Getting a risk-reward ratio
AnalysisCriterion romad = new ReturnOverMaxDrawdownCriterion();
System.out.println("Return over Max Drawdown: " + romad.calculate(series, tradingRecord));
// Total return of our strategy vs total return of a buy-and-hold strategy
AnalysisCriterion vsBuyAndHold = new VersusEnterAndHoldCriterion(new ReturnCriterion());
System.out.println("Our return vs buy-and-hold return: " + vsBuyAndHold.calculate(series, tradingRecord));
// Your turn!
} | @Test
public void test() {
Quickstart.main(null);
} |
public static Schema schemaFromPojoClass(
TypeDescriptor<?> typeDescriptor, FieldValueTypeSupplier fieldValueTypeSupplier) {
return StaticSchemaInference.schemaFromClass(typeDescriptor, fieldValueTypeSupplier);
} | @Test
public void testNestedCollection() {
Schema schema =
POJOUtils.schemaFromPojoClass(
new TypeDescriptor<NestedCollectionPOJO>() {}, JavaFieldTypeSupplier.INSTANCE);
SchemaTestUtils.assertSchemaEquivalent(NESTED_COLLECTION_POJO_SCHEMA, schema);
} |
@Override
public <T extends ComponentRoot> T get(Class<T> providerId) {
try {
return providerId.getConstructor().newInstance();
} catch (ReflectiveOperationException e) {
throw new IllegalArgumentException(e);
}
} | @Test
void getPrivateConstructorImplementation() {
IllegalArgumentException thrown = assertThrows(
IllegalArgumentException.class,
() -> appRoot.get(ComponentRootPrivateConstructor.class),
"Expected constructor to throw, but it didn't"
);
String expectedMessage = "java.lang.NoSuchMethodException";
assertThat(thrown.getMessage()).startsWith(expectedMessage);
} |
public static WebService.NewParam createQualifiersParameter(WebService.NewAction action, QualifierParameterContext context) {
return createQualifiersParameter(action, context, getAllQualifiers(context.getResourceTypes()));
} | @Test
public void createQualifiersParameter_whenIgnoreIsSetToTrue_shouldNotReturnQualifier(){
when(resourceTypes.getAll()).thenReturn(asList(Q1, Q2,ResourceType.builder("Q3").setProperty("ignored", true).build()));
when(newAction.createParam(PARAM_QUALIFIERS)).thenReturn(newParam);
when(newParam.setPossibleValues(any(Collection.class))).thenReturn(newParam);
when(newParam.setDescription(any())).thenReturn(newParam);
NewParam newParam = WsParameterBuilder
.createQualifiersParameter(newAction, newQualifierParameterContext(i18n, resourceTypes));
verify(newParam).setPossibleValues(Sets.newHashSet(Q1.getQualifier(), Q2.getQualifier()));
} |
@SuppressWarnings("unused") // Part of required API.
public void execute(
final ConfiguredStatement<InsertValues> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final InsertValues insertValues = statement.getStatement();
final MetaStore metaStore = executionContext.getMetaStore();
final KsqlConfig config = statement.getSessionConfig().getConfig(true);
final DataSource dataSource = getDataSource(config, metaStore, insertValues);
validateInsert(insertValues.getColumns(), dataSource);
final ProducerRecord<byte[], byte[]> record =
buildRecord(statement, metaStore, dataSource, serviceContext);
try {
producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps());
} catch (final TopicAuthorizationException e) {
// TopicAuthorizationException does not give much detailed information about why it failed,
// except which topics are denied. Here we just add the ACL to make the error message
// consistent with other authorization error messages.
final Exception rootCause = new KsqlTopicAuthorizationException(
AclOperation.WRITE,
e.unauthorizedTopics()
);
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause);
} catch (final ClusterAuthorizationException e) {
// ClusterAuthorizationException is thrown when using idempotent producers
// and either a topic write permission or a cluster-level idempotent write
// permission (only applicable for broker versions no later than 2.8) is
// missing. In this case, we include additional context to help the user
// distinguish this type of failure from other permissions exceptions
// such as the ones thrown above when TopicAuthorizationException is caught.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} catch (final KafkaException e) {
if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) {
// The error message thrown when an idempotent producer is missing permissions
// is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException,
// as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException.
// ksqlDB handles these two the same way, accordingly.
// See https://issues.apache.org/jira/browse/KAFKA-14138 for more.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} else {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} catch (final Exception e) {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} | @Test
public void shouldThrowIfColumnDoesNotExistInSchema() {
// Given:
givenSourceStreamWithSchema(SINGLE_VALUE_COLUMN_SCHEMA, SerdeFeatures.of(), SerdeFeatures.of());
final ConfiguredStatement<InsertValues> statement = givenInsertValues(
ImmutableList.of(
K0,
ColumnName.of("NONEXISTENT")),
ImmutableList.of(
new StringLiteral("foo"),
new StringLiteral("bar"))
);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> executor.execute(statement, mock(SessionProperties.class), engine, serviceContext)
);
// Then:
assertThat(e.getCause(), (hasMessage(containsString("Column name `NONEXISTENT` does not exist."))));
} |
public void contains(@Nullable CharSequence string) {
checkNotNull(string);
if (actual == null) {
failWithActual("expected a string that contains", string);
} else if (!actual.contains(string)) {
failWithActual("expected to contain", string);
}
} | @Test
public void stringContainsCharSeq() {
CharSequence charSeq = new StringBuilder("c");
assertThat("abc").contains(charSeq);
} |
@Override
public @Nullable V replace(K key, V value) {
requireNonNull(key);
requireNonNull(value);
int[] oldWeight = new int[1];
@SuppressWarnings("unchecked")
K[] nodeKey = (K[]) new Object[1];
@SuppressWarnings("unchecked")
V[] oldValue = (V[]) new Object[1];
long[] now = new long[1];
int weight = weigher.weigh(key, value);
Node<K, V> node = data.computeIfPresent(nodeFactory.newLookupKey(key), (k, n) -> {
synchronized (n) {
requireIsAlive(key, n);
nodeKey[0] = n.getKey();
oldValue[0] = n.getValue();
oldWeight[0] = n.getWeight();
if ((nodeKey[0] == null) || (oldValue[0] == null)
|| hasExpired(n, now[0] = expirationTicker().read())) {
oldValue[0] = null;
return n;
}
long varTime = expireAfterUpdate(n, key, value, expiry(), now[0]);
n.setValue(value, valueReferenceQueue());
n.setWeight(weight);
setVariableTime(n, varTime);
setAccessTime(n, now[0]);
setWriteTime(n, now[0]);
discardRefresh(k);
return n;
}
});
if (oldValue[0] == null) {
return null;
}
int weightedDifference = (weight - oldWeight[0]);
if (expiresAfterWrite() || (weightedDifference != 0)) {
afterWrite(new UpdateTask(node, weightedDifference));
} else {
afterRead(node, now[0], /* recordHit */ false);
}
notifyOnReplace(nodeKey[0], oldValue[0], value);
return oldValue[0];
} | @CheckMaxLogLevel(ERROR)
@Test(dataProvider = "caches")
@CacheSpec(population = Population.EMPTY, keys = ReferenceType.STRONG)
public void brokenEquality_replace(
BoundedLocalCache<MutableInt, Int> cache, CacheContext context) {
testForBrokenEquality(cache, context, key -> {
var value = cache.replace(key, context.absentValue());
assertThat(value).isEqualTo(context.absentValue());
});
} |
public <T> Future<Iterable<TimestampedValue<T>>> orderedListFuture(
Range<Long> range, ByteString encodedTag, String stateFamily, Coder<T> elemCoder) {
// First request has no continuation position.
StateTag<ByteString> stateTag =
StateTag.<ByteString>of(StateTag.Kind.ORDERED_LIST, encodedTag, stateFamily)
.toBuilder()
.setSortedListRange(Preconditions.checkNotNull(range))
.build();
return valuesToPagingIterableFuture(stateTag, elemCoder, this.stateFuture(stateTag, elemCoder));
} | @Test
public void testReadSortedList() throws Exception {
long beginning = SortedListRange.getDefaultInstance().getStart();
long end = SortedListRange.getDefaultInstance().getLimit();
Future<Iterable<TimestampedValue<Integer>>> future =
underTest.orderedListFuture(
Range.closedOpen(beginning, end), STATE_KEY_1, STATE_FAMILY, INT_CODER);
Mockito.verifyNoMoreInteractions(mockWindmill);
// Fetch the entire list.
Windmill.KeyedGetDataRequest.Builder expectedRequest =
Windmill.KeyedGetDataRequest.newBuilder()
.setKey(DATA_KEY)
.setShardingKey(SHARDING_KEY)
.setWorkToken(WORK_TOKEN)
.setMaxBytes(WindmillStateReader.MAX_KEY_BYTES)
.addSortedListsToFetch(
Windmill.TagSortedListFetchRequest.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.addFetchRanges(SortedListRange.newBuilder().setStart(beginning).setLimit(end))
.setFetchMaxBytes(WindmillStateReader.MAX_ORDERED_LIST_BYTES));
Windmill.KeyedGetDataResponse.Builder response =
Windmill.KeyedGetDataResponse.newBuilder()
.setKey(DATA_KEY)
.addTagSortedLists(
Windmill.TagSortedListFetchResponse.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.addEntries(
SortedListEntry.newBuilder().setValue(intData(5)).setSortKey(5000).setId(5))
.addEntries(
SortedListEntry.newBuilder().setValue(intData(6)).setSortKey(6000).setId(5))
.addEntries(
SortedListEntry.newBuilder().setValue(intData(7)).setSortKey(7000).setId(7))
.addEntries(
SortedListEntry.newBuilder().setValue(intData(8)).setSortKey(8000).setId(8))
.addFetchRanges(
SortedListRange.newBuilder().setStart(beginning).setLimit(end)));
Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest.build()))
.thenReturn(response.build());
Iterable<TimestampedValue<Integer>> results = future.get();
Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest.build());
for (TimestampedValue<Integer> unused : results) {
// Iterate over the results to force loading all the pages.
}
Mockito.verifyNoMoreInteractions(mockWindmill);
assertThat(
results,
Matchers.contains(
TimestampedValue.of(5, Instant.ofEpochMilli(5)),
TimestampedValue.of(6, Instant.ofEpochMilli(6)),
TimestampedValue.of(7, Instant.ofEpochMilli(7)),
TimestampedValue.of(8, Instant.ofEpochMilli(8))));
assertNoReader(future);
} |
@SuppressWarnings("MethodLength")
public void onFragment(final DirectBuffer buffer, final int offset, final int length, final Header header)
{
messageHeaderDecoder.wrap(buffer, offset);
final int templateId = messageHeaderDecoder.templateId();
final int schemaId = messageHeaderDecoder.schemaId();
if (schemaId != MessageHeaderDecoder.SCHEMA_ID)
{
if (listenerExtension != null)
{
listenerExtension.onExtensionMessage(
messageHeaderDecoder.blockLength(),
templateId,
schemaId,
messageHeaderDecoder.version(),
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
length - MessageHeaderDecoder.ENCODED_LENGTH);
return;
}
throw new ClusterException("expected schemaId=" +
MessageHeaderDecoder.SCHEMA_ID + ", actual=" + schemaId);
}
switch (templateId)
{
case SessionMessageHeaderDecoder.TEMPLATE_ID:
{
sessionMessageHeaderDecoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
messageHeaderDecoder.blockLength(),
messageHeaderDecoder.version());
final long sessionId = sessionMessageHeaderDecoder.clusterSessionId();
if (sessionId == clusterSessionId)
{
listener.onMessage(
sessionId,
sessionMessageHeaderDecoder.timestamp(),
buffer,
offset + SESSION_HEADER_LENGTH,
length - SESSION_HEADER_LENGTH,
header);
}
break;
}
case SessionEventDecoder.TEMPLATE_ID:
{
sessionEventDecoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
messageHeaderDecoder.blockLength(),
messageHeaderDecoder.version());
final long sessionId = sessionEventDecoder.clusterSessionId();
if (sessionId == clusterSessionId)
{
listener.onSessionEvent(
sessionEventDecoder.correlationId(),
sessionId,
sessionEventDecoder.leadershipTermId(),
sessionEventDecoder.leaderMemberId(),
sessionEventDecoder.code(),
sessionEventDecoder.detail());
}
break;
}
case NewLeaderEventDecoder.TEMPLATE_ID:
{
newLeaderEventDecoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
messageHeaderDecoder.blockLength(),
messageHeaderDecoder.version());
final long sessionId = newLeaderEventDecoder.clusterSessionId();
if (sessionId == clusterSessionId)
{
listener.onNewLeader(
sessionId,
newLeaderEventDecoder.leadershipTermId(),
newLeaderEventDecoder.leaderMemberId(),
newLeaderEventDecoder.ingressEndpoints());
}
break;
}
case AdminResponseDecoder.TEMPLATE_ID:
{
adminResponseDecoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
messageHeaderDecoder.blockLength(),
messageHeaderDecoder.version());
final long sessionId = adminResponseDecoder.clusterSessionId();
if (sessionId == clusterSessionId)
{
final long correlationId = adminResponseDecoder.correlationId();
final AdminRequestType requestType = adminResponseDecoder.requestType();
final AdminResponseCode responseCode = adminResponseDecoder.responseCode();
final String message = adminResponseDecoder.message();
final int payloadOffset = adminResponseDecoder.offset() +
AdminResponseDecoder.BLOCK_LENGTH +
AdminResponseDecoder.messageHeaderLength() +
message.length() +
AdminResponseDecoder.payloadHeaderLength();
final int payloadLength = adminResponseDecoder.payloadLength();
listener.onAdminResponse(
sessionId,
correlationId,
requestType,
responseCode,
message,
buffer,
payloadOffset,
payloadLength);
}
break;
}
default:
break;
}
} | @Test
void onFragmentShouldInvokeOnNewLeaderCallbackIfSessionIdMatches()
{
final int offset = 0;
final long clusterSessionId = 0;
final long leadershipTermId = 6;
final int leaderMemberId = 9999;
final String ingressEndpoints = "ingress endpoints ...";
newLeaderEventEncoder
.wrapAndApplyHeader(buffer, offset, messageHeaderEncoder)
.leadershipTermId(leadershipTermId)
.clusterSessionId(clusterSessionId)
.leaderMemberId(leaderMemberId)
.ingressEndpoints(ingressEndpoints);
final EgressListener egressListener = mock(EgressListener.class);
final Header header = new Header(1, 3);
final EgressAdapter adapter = new EgressAdapter(egressListener, clusterSessionId, mock(Subscription.class), 10);
adapter.onFragment(buffer, offset, newLeaderEventEncoder.encodedLength(), header);
verify(egressListener).onNewLeader(clusterSessionId, leadershipTermId, leaderMemberId, ingressEndpoints);
verifyNoMoreInteractions(egressListener);
} |
public static List<Method> getAllMethods(Class clazz) {
List<Method> all = new ArrayList<Method>();
for (Class<?> c = clazz; c != Object.class && c != null; c = c.getSuperclass()) {
Method[] methods = c.getDeclaredMethods(); // 所有方法,不包含父类
for (Method method : methods) {
int mod = method.getModifiers();
// native的不要
if (Modifier.isNative(mod)) {
continue;
}
method.setAccessible(true); // 不管private还是protect都可以
all.add(method);
}
}
return all;
} | @Test
public void getAllMethods() throws Exception {
List<Method> methods = ClassUtils.getAllMethods(TestBean.class);
Assert.assertTrue(methods.size() >= 8);
} |
void writeLogs(OutputStream out, Instant from, Instant to, long maxLines, Optional<String> hostname) {
double fromSeconds = from.getEpochSecond() + from.getNano() / 1e9;
double toSeconds = to.getEpochSecond() + to.getNano() / 1e9;
long linesWritten = 0;
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out));
for (List<Path> logs : getMatchingFiles(from, to)) {
List<LogLineIterator> logLineIterators = new ArrayList<>();
try {
// Logs in each sub-list contain entries covering the same time interval, so do a merge sort while reading
for (Path log : logs)
logLineIterators.add(new LogLineIterator(log, fromSeconds, toSeconds, hostname));
Iterator<LineWithTimestamp> lines = Iterators.mergeSorted(logLineIterators,
Comparator.comparingDouble(LineWithTimestamp::timestamp));
PriorityQueue<LineWithTimestamp> heap = new PriorityQueue<>(Comparator.comparingDouble(LineWithTimestamp::timestamp));
while (lines.hasNext()) {
heap.offer(lines.next());
if (heap.size() > 1000) {
if (linesWritten++ >= maxLines) return;
writer.write(heap.poll().line);
writer.newLine();
}
}
while ( ! heap.isEmpty()) {
if (linesWritten++ >= maxLines) return;
writer.write(heap.poll().line);
writer.newLine();
}
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
finally {
for (LogLineIterator ll : logLineIterators) {
try { ll.close(); } catch (IOException ignored) { }
}
Exceptions.uncheck(writer::flush);
}
}
} | @EnabledIf("hasZstdcat")
@Test
void testThatLogsOutsideRangeAreExcluded() {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
LogReader logReader = new LogReader(logDirectory, Pattern.compile(".*"));
logReader.writeLogs(baos, Instant.ofEpochMilli(150), Instant.ofEpochMilli(3601050), 100, Optional.empty());
assertEquals(log100b + log100a + logv11 + log110, baos.toString(UTF_8));
} |
public static List<Type> decode(String rawInput, List<TypeReference<Type>> outputParameters) {
return decoder.decodeFunctionResult(rawInput, outputParameters);
} | @Test
@SuppressWarnings("unchecked")
public void testDecodeStaticStructDynamicArray() {
String rawInput =
"0x0000000000000000000000000000000000000000000000000000000000000020"
+ "0000000000000000000000000000000000000000000000000000000000000002"
+ "000000000000000000000000000000000000000000000000000000000000007b"
+ "000000000000000000000000000000000000000000000000000000000000007b"
+ "000000000000000000000000000000000000000000000000000000000000007b"
+ "000000000000000000000000000000000000000000000000000000000000007b";
assertEquals(
FunctionReturnDecoder.decode(
rawInput,
AbiV2TestFixture.getBarDynamicArrayFunction.getOutputParameters()),
Arrays.asList(
new DynamicArray(
AbiV2TestFixture.Bar.class,
new AbiV2TestFixture.Bar(
BigInteger.valueOf(123), BigInteger.valueOf(123)),
new AbiV2TestFixture.Bar(
BigInteger.valueOf(123), BigInteger.valueOf(123)))));
} |
@Override
public ProxyInvocationHandler parserInterfaceToProxy(Object target, String objectName) throws Exception {
Class<?> serviceInterface = DefaultTargetClassParser.get().findTargetClass(target);
Class<?>[] interfacesIfJdk = DefaultTargetClassParser.get().findInterfaces(target);
if (existsAnnotation(serviceInterface) || existsAnnotation(interfacesIfJdk)) {
ProxyInvocationHandler proxyInvocationHandler = createProxyInvocationHandler();
ConfigurationFactory.getInstance().addConfigListener(ConfigurationKeys.DISABLE_GLOBAL_TRANSACTION, (CachedConfigurationChangeListener) proxyInvocationHandler);
return proxyInvocationHandler;
}
return null;
} | @Test
void parserInterfaceToProxy() throws Exception {
//given
BusinessImpl business = new BusinessImpl();
GlobalTransactionalInterceptorParser globalTransactionalInterceptorParser = new GlobalTransactionalInterceptorParser();
//when
ProxyInvocationHandler proxyInvocationHandler = globalTransactionalInterceptorParser.parserInterfaceToProxy(business, business.getClass().getName());
//then
Assertions.assertNotNull(proxyInvocationHandler);
} |
@Override
public PageResult<ProductSpuDO> getSpuPage(ProductSpuPageReqVO pageReqVO) {
return productSpuMapper.selectPage(pageReqVO);
} | @Test
void getSpuPage_alarmStock_empty() {
// 准备参数
ArrayList<ProductSpuDO> createReqVOs = Lists.newArrayList(randomPojo(ProductSpuDO.class,o->{
o.setCategoryId(generateId());
o.setBrandId(generateId());
o.setDeliveryTemplateId(generateId());
o.setSort(RandomUtil.randomInt(1,100)); // 限制排序范围
o.setGiveIntegral(generaInt()); // 限制范围为正整数
o.setVirtualSalesCount(generaInt()); // 限制范围为正整数
o.setPrice(generaInt()); // 限制范围为正整数
o.setMarketPrice(generaInt()); // 限制范围为正整数
o.setCostPrice(generaInt()); // 限制范围为正整数
o.setStock(11); // 限制范围为正整数
o.setGiveIntegral(generaInt()); // 限制范围为正整数
o.setSalesCount(generaInt()); // 限制范围为正整数
o.setBrowseCount(generaInt()); // 限制范围为正整数
}), randomPojo(ProductSpuDO.class,o->{
o.setCategoryId(generateId());
o.setBrandId(generateId());
o.setDeliveryTemplateId(generateId());
o.setSort(RandomUtil.randomInt(1,100)); // 限制排序范围
o.setGiveIntegral(generaInt()); // 限制范围为正整数
o.setVirtualSalesCount(generaInt()); // 限制范围为正整数
o.setPrice(generaInt()); // 限制范围为正整数
o.setMarketPrice(generaInt()); // 限制范围为正整数
o.setCostPrice(generaInt()); // 限制范围为正整数
o.setStock(11); // 限制范围为正整数
o.setGiveIntegral(generaInt()); // 限制范围为正整数
o.setSalesCount(generaInt()); // 限制范围为正整数
o.setBrowseCount(generaInt()); // 限制范围为正整数
}));
productSpuMapper.insertBatch(createReqVOs);
// 调用
ProductSpuPageReqVO productSpuPageReqVO = new ProductSpuPageReqVO();
productSpuPageReqVO.setTabType(ProductSpuPageReqVO.ALERT_STOCK);
PageResult<ProductSpuDO> spuPage = productSpuService.getSpuPage(productSpuPageReqVO);
PageResult<Object> result = PageResult.empty();
Assertions.assertIterableEquals(result.getList(), spuPage.getList());
assertEquals(spuPage.getTotal(), result.getTotal());
} |
@Override
public void doRun() {
if (versionOverride.isPresent()) {
LOG.debug("Elasticsearch version is set manually. Not running check.");
return;
}
final Optional<SearchVersion> probedVersion = this.versionProbe.probe(this.elasticsearchHosts);
probedVersion.ifPresent(version -> {
if (compatible(this.initialElasticsearchVersion, version)) {
notificationService.fixed(Notification.Type.ES_VERSION_MISMATCH);
} else {
LOG.warn("Elasticsearch version currently running ({}) is incompatible with the one Graylog was started " +
"with ({}) - a restart is required!", version, initialElasticsearchVersion);
final Notification notification = notificationService.buildNow()
.addType(Notification.Type.ES_VERSION_MISMATCH)
.addSeverity(Notification.Severity.URGENT)
.addDetail("initial_version", initialElasticsearchVersion.toString())
.addDetail("current_version", version.toString());
notificationService.publishIfFirst(notification);
}
});
} | @Test
void fixesNotificationIfCurrentVersionIsIncompatibleWithInitialOne() {
returnProbedVersion(Version.of(8, 2, 3));
createPeriodical(SearchVersion.elasticsearch(8, 1, 2)).doRun();
assertNotificationWasFixed();
} |
@Nonnull
public HazelcastInstance getClient() {
if (getConfig().isShared()) {
retain();
return proxy.get();
} else {
return HazelcastClient.newHazelcastClient(clientConfig);
}
} | @Test
public void shared_client_from_file_should_return_same_instance() {
DataConnectionConfig dataConnectionConfig = sharedDataConnectionConfigFromFile(clusterName);
hazelcastDataConnection = new HazelcastDataConnection(dataConnectionConfig);
HazelcastInstance c1 = hazelcastDataConnection.getClient();
HazelcastInstance c2 = hazelcastDataConnection.getClient();
try {
assertThat(c1).isSameAs(c2);
} finally {
c1.shutdown();
c2.shutdown();
// Delete the file at the end of the test
try {
String filePath = dataConnectionConfig.getProperty(HazelcastDataConnection.CLIENT_XML_PATH);
assert filePath != null;
Files.delete(Paths.get(filePath));
} catch (IOException ignored) {
}
}
} |
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof AlluxioWorkerInfo)) {
return false;
}
AlluxioWorkerInfo that = (AlluxioWorkerInfo) o;
return Objects.equal(mCapacity, that.mCapacity)
&& Objects.equal(mConfiguration, that.mConfiguration)
&& Objects.equal(mMetrics, that.mMetrics)
&& Objects.equal(mRpcAddress, that.mRpcAddress)
&& mStartTimeMs == that.mStartTimeMs
&& Objects.equal(mTierCapacity, that.mTierCapacity)
&& Objects.equal(mTierPaths, that.mTierPaths)
&& mUptimeMs == that.mUptimeMs
&& Objects.equal(mVersion, that.mVersion)
&& Objects.equal(mRevision, that.mRevision);
} | @Test
public void equals() {
alluxio.test.util.CommonUtils.testEquals(AlluxioMasterInfo.class);
} |
@Override
public void unlinkNode(K parentKey, K childKey) {
final TreeEntryNode<K, V> childNode = nodes.get(childKey);
if (ObjectUtil.isNull(childNode)) {
return;
}
if (childNode.hasParent()) {
childNode.getDeclaredParent().removeDeclaredChild(childNode.getKey());
}
} | @Test
public void unlinkNodeTest() {
final ForestMap<String, String> map = new LinkedForestMap<>(false);
map.linkNodes("a", "b");
final TreeEntry<String, String> parent = map.get("a");
final TreeEntry<String, String> child = map.get("b");
map.unlinkNode("a", "b");
assertFalse(child.hasParent());
assertFalse(parent.hasChildren());
} |
public static ValidateTopicResult validateTopic(String topic) {
if (UtilAll.isBlank(topic)) {
return new ValidateTopicResult(false, "The specified topic is blank.");
}
if (isTopicOrGroupIllegal(topic)) {
return new ValidateTopicResult(false, "The specified topic contains illegal characters, allowing only ^[%|a-zA-Z0-9_-]+$");
}
if (topic.length() > TOPIC_MAX_LENGTH) {
return new ValidateTopicResult(false, "The specified topic is longer than topic max length.");
}
return new ValidateTopicResult(true, "");
} | @Test
public void testTopicValidator_NotPass() {
TopicValidator.ValidateTopicResult res = TopicValidator.validateTopic("");
assertThat(res.isValid()).isFalse();
assertThat(res.getRemark()).contains("The specified topic is blank");
res = TopicValidator.validateTopic("../TopicTest");
assertThat(res.isValid()).isFalse();
assertThat(res.getRemark()).contains("The specified topic contains illegal characters");
res = TopicValidator.validateTopic(generateString(128));
assertThat(res.isValid()).isFalse();
assertThat(res.getRemark()).contains("The specified topic is longer than topic max length.");
} |
int getDictionaryId(String word) throws LongWordException {
requireNonNull(word);
if (word.length() > MAX_WORD_LENGTH) {
throw new LongWordException("Too long value in the metric descriptor found, maximum is "
+ MAX_WORD_LENGTH + ": " + word);
}
int nextWordId = orderedDictionary.size();
return orderedDictionary
.computeIfAbsent(word, key -> new Word(word, nextWordId))
.id;
} | @Test
public void when_tooLongWord_then_fails() {
String longWord = Stream.generate(() -> "a")
.limit(MetricsDictionary.MAX_WORD_LENGTH + 1)
.collect(Collectors.joining());
assertThrows(LongWordException.class, () -> dictionary.getDictionaryId(longWord));
} |
public static FuryBuilder builder() {
return new FuryBuilder();
} | @Test
public void testPrintReadObjectsWhenFailed() {
Fury fury =
Fury.builder()
.withRefTracking(true)
.withCodegen(false)
.requireClassRegistration(false)
.build();
PrintReadObject o = new PrintReadObject(true);
try {
serDe(fury, ImmutableList.of(ImmutableList.of("a", "b"), o));
Assert.fail();
} catch (FuryException e) {
Assert.assertTrue(e.getMessage().contains("[a, b]"));
}
} |
@VisibleForTesting
public static <OUT>
RecordWriterDelegate<SerializationDelegate<StreamRecord<OUT>>>
createRecordWriterDelegate(
StreamConfig configuration, Environment environment) {
List<RecordWriter<SerializationDelegate<StreamRecord<OUT>>>> recordWrites =
createRecordWriters(configuration, environment);
if (recordWrites.size() == 1) {
return new SingleRecordWriter<>(recordWrites.get(0));
} else if (recordWrites.size() == 0) {
return new NonRecordWriter<>();
} else {
return new MultipleRecordWriters<>(recordWrites);
}
} | @Test
void testForwardPartitionerIsConvertedToRebalanceOnParallelismChanges() throws Exception {
StreamTaskMailboxTestHarnessBuilder<Integer> builder =
new StreamTaskMailboxTestHarnessBuilder<>(
OneInputStreamTask::new, BasicTypeInfo.INT_TYPE_INFO)
.addInput(BasicTypeInfo.INT_TYPE_INFO)
.setOutputPartitioner(new ForwardPartitioner<>())
.setupOutputForSingletonOperatorChain(
new TestBoundedOneInputStreamOperator());
try (StreamTaskMailboxTestHarness<Integer> harness = builder.build()) {
RecordWriterDelegate<SerializationDelegate<StreamRecord<Object>>> recordWriterDelegate =
harness.streamTask.createRecordWriterDelegate(
harness.streamTask.configuration, harness.streamMockEnvironment);
// Prerequisite: We are using the ForwardPartitioner
assertThat(
((ChannelSelectorRecordWriter)
((SingleRecordWriter) recordWriterDelegate)
.getRecordWriter(0))
.getChannelSelector())
.isInstanceOf(ForwardPartitioner.class);
// Simulate changed downstream task parallelism (1->2)
List<ResultPartitionWriter> newOutputs = new ArrayList<>();
newOutputs.add(
new MockResultPartitionWriter() {
@Override
public int getNumberOfSubpartitions() {
return 2;
}
});
harness.streamMockEnvironment.setOutputs(newOutputs);
// Re-create outputs
recordWriterDelegate =
harness.streamTask.createRecordWriterDelegate(
harness.streamTask.configuration, harness.streamMockEnvironment);
// We should now have a RebalancePartitioner to distribute the load
// for the non-matching downstream parallelism
assertThat(
((ChannelSelectorRecordWriter)
((SingleRecordWriter) recordWriterDelegate)
.getRecordWriter(0))
.getChannelSelector())
.isInstanceOf(RebalancePartitioner.class);
}
} |
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
return inject(statement, new TopicProperties.Builder());
} | @SuppressWarnings("unchecked")
@Test
public void shouldBuildWithClauseWithTopicProperties() {
// Given:
givenStatement("CREATE STREAM x WITH (kafka_topic='topic') AS SELECT * FROM SOURCE;");
when(builder.build()).thenReturn(new TopicProperties("expectedName", 10, (short) 10, (long) 5000));
// When:
final ConfiguredStatement<CreateAsSelect> result =
(ConfiguredStatement<CreateAsSelect>) injector.inject(statement, builder);
// Then:
final CreateSourceAsProperties props = result.getStatement().getProperties();
assertThat(props.getKafkaTopic(), is(Optional.of("expectedName")));
assertThat(props.getPartitions(), is(Optional.of(10)));
assertThat(props.getReplicas(), is(Optional.of((short) 10)));
assertThat(props.getRetentionInMillis(), is(Optional.of((long) 5000)));
} |
public static Comparator<BaseOptionModel> createGroupAndLabelComparator() {
return new EndpointOptionGroupAndLabelComparator();
} | @Test
public void testSort2() throws IOException {
final String json = PackageHelper.loadText(new File(
Objects.requireNonNull(getClass().getClassLoader().getResource("json/test_component4.json")).getFile()));
final ComponentModel componentModel = JsonMapper.generateComponentModel(json);
componentModel.getComponentOptions().sort(EndpointHelper.createGroupAndLabelComparator());
assertEquals("baseUri,clearHeaders,cryptoContextProperties,disallowDoctypeDecl,"
+ "keySelector,omitXmlDeclaration,lazyStartProducer,outputNodeSearch,outputNodeSearchType,"
+ "outputXmlEncoding,removeSignatureElements,schemaResourceUri,secureValidation,"
+ "validationFailedHandler,xmlSignature2Message,xmlSignatureChecker,basicPropertyBinding,"
+ "uriDereferencer,verifierConfiguration",
componentModel.getComponentOptions().stream()
.map(ComponentOptionModel::getName).collect(Collectors.joining(",")));
} |
@ExecuteOn(TaskExecutors.IO)
@Post(uri = "validate", consumes = MediaType.APPLICATION_YAML)
@Operation(tags = {"Flows"}, summary = "Validate a list of flows")
public List<ValidateConstraintViolation> validateFlows(
@Parameter(description = "A list of flows") @Body String flows
) {
AtomicInteger index = new AtomicInteger(0);
return Stream
.of(flows.split("\\n+---\\n*?"))
.map(flow -> {
ValidateConstraintViolation.ValidateConstraintViolationBuilder<?, ?> validateConstraintViolationBuilder = ValidateConstraintViolation.builder();
validateConstraintViolationBuilder.index(index.getAndIncrement());
try {
Flow flowParse = yamlFlowParser.parse(flow, Flow.class);
Integer sentRevision = flowParse.getRevision();
if (sentRevision != null) {
Integer lastRevision = Optional.ofNullable(flowRepository.lastRevision(tenantService.resolveTenant(), flowParse.getNamespace(), flowParse.getId()))
.orElse(0);
validateConstraintViolationBuilder.outdated(!sentRevision.equals(lastRevision + 1));
}
validateConstraintViolationBuilder.deprecationPaths(flowService.deprecationPaths(flowParse));
List<String> warnings = new ArrayList<>();
warnings.addAll(flowService.warnings(flowParse));
warnings.addAll(flowService.relocations(flow).stream().map(relocation -> relocation.from() + " is replaced by " + relocation.to()).toList());
validateConstraintViolationBuilder.warnings(warnings);
validateConstraintViolationBuilder.flow(flowParse.getId());
validateConstraintViolationBuilder.namespace(flowParse.getNamespace());
modelValidator.validate(pluginDefaultService.injectDefaults(flowParse));
} catch (ConstraintViolationException e) {
validateConstraintViolationBuilder.constraints(e.getMessage());
} catch (RuntimeException re) {
// In case of any error, we add a validation violation so the error is displayed in the UI.
// We may change that by throwing an internal error and handle it in the UI, but this should not occur except for rare cases
// in dev like incompatible plugin versions.
log.error("Unable to validate the flow", re);
validateConstraintViolationBuilder.constraints("Unable to validate the flow: " + re.getMessage());
}
return validateConstraintViolationBuilder.build();
})
.collect(Collectors.toList());
} | @Test
void validateFlows() throws IOException {
URL resource = TestsUtils.class.getClassLoader().getResource("flows/validateMultipleValidFlows.yaml");
String flow = Files.readString(Path.of(Objects.requireNonNull(resource).getPath()), Charset.defaultCharset());
String firstFlowSource = flow.split("(?m)^---")[0];
Flow firstFlow = parseFlow(firstFlowSource);
jdbcFlowRepository.create(firstFlow, firstFlowSource, firstFlow);
HttpResponse<List<ValidateConstraintViolation>> response = client.toBlocking().exchange(POST("/api/v1/flows/validate", flow).contentType(MediaType.APPLICATION_YAML), Argument.listOf(ValidateConstraintViolation.class));
List<ValidateConstraintViolation> body = response.body();
assertThat(body.size(), is(2));
// We don't send any revision while the flow already exists so it's outdated
assertThat(body.getFirst().isOutdated(), is(true));
assertThat(body.getFirst().getDeprecationPaths(), hasSize(3));
assertThat(body.getFirst().getDeprecationPaths(), containsInAnyOrder("tasks[1]", "tasks[1].additionalProperty", "listeners"));
assertThat(body.getFirst().getWarnings().size(), is(1));
assertThat(body.getFirst().getWarnings().getFirst(), containsString("The system namespace is reserved for background workflows"));
assertThat(body.get(1).isOutdated(), is(false));
assertThat(body.get(1).getDeprecationPaths(), containsInAnyOrder("tasks[0]", "tasks[1]"));
assertThat(body, everyItem(
Matchers.hasProperty("constraints", is(nullValue()))
));
resource = TestsUtils.class.getClassLoader().getResource("flows/validateMultipleInvalidFlows.yaml");
flow = Files.readString(Path.of(Objects.requireNonNull(resource).getPath()), Charset.defaultCharset());
response = client.toBlocking().exchange(POST("/api/v1/flows/validate", flow).contentType(MediaType.APPLICATION_YAML), Argument.listOf(ValidateConstraintViolation.class));
body = response.body();
assertThat(body.size(), is(2));
assertThat(body.getFirst().getConstraints(), containsString("Unrecognized field \"unknownProp\""));
assertThat(body.get(1).getConstraints(), containsString("Invalid type: io.kestra.plugin.core.debug.UnknownTask"));
} |
public void recordSendOffsets(long duration) {
sendOffsetsSensor.record(duration);
} | @Test
public void shouldRecordSendOffsetsTime() {
// When:
producerMetrics.recordSendOffsets(METRIC_VALUE);
// Then:
assertMetricValue(TXN_SEND_OFFSETS_TIME_TOTAL);
} |
public static CoordinatorRecord newConsumerGroupMemberSubscriptionRecord(
String groupId,
ConsumerGroupMember member
) {
List<String> topicNames = new ArrayList<>(member.subscribedTopicNames());
Collections.sort(topicNames);
return new CoordinatorRecord(
new ApiMessageAndVersion(
new ConsumerGroupMemberMetadataKey()
.setGroupId(groupId)
.setMemberId(member.memberId()),
(short) 5
),
new ApiMessageAndVersion(
new ConsumerGroupMemberMetadataValue()
.setRackId(member.rackId())
.setInstanceId(member.instanceId())
.setClientId(member.clientId())
.setClientHost(member.clientHost())
.setSubscribedTopicNames(topicNames)
.setSubscribedTopicRegex(member.subscribedTopicRegex())
.setServerAssignor(member.serverAssignorName().orElse(null))
.setRebalanceTimeoutMs(member.rebalanceTimeoutMs())
.setClassicMemberMetadata(member.classicMemberMetadata().orElse(null)),
(short) 0
)
);
} | @Test
public void testNewConsumerGroupMemberSubscriptionRecord() {
List<ConsumerGroupMemberMetadataValue.ClassicProtocol> protocols = new ArrayList<>();
protocols.add(new ConsumerGroupMemberMetadataValue.ClassicProtocol()
.setName("range")
.setMetadata(new byte[0]));
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member-id")
.setInstanceId("instance-id")
.setRackId("rack-id")
.setRebalanceTimeoutMs(5000)
.setClientId("client-id")
.setClientHost("client-host")
.setSubscribedTopicNames(Arrays.asList("foo", "zar", "bar"))
.setSubscribedTopicRegex("regex")
.setServerAssignorName("range")
.setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata()
.setSupportedProtocols(protocols))
.build();
CoordinatorRecord expectedRecord = new CoordinatorRecord(
new ApiMessageAndVersion(
new ConsumerGroupMemberMetadataKey()
.setGroupId("group-id")
.setMemberId("member-id"),
(short) 5),
new ApiMessageAndVersion(
new ConsumerGroupMemberMetadataValue()
.setInstanceId("instance-id")
.setRackId("rack-id")
.setRebalanceTimeoutMs(5000)
.setClientId("client-id")
.setClientHost("client-host")
.setSubscribedTopicNames(Arrays.asList("bar", "foo", "zar"))
.setSubscribedTopicRegex("regex")
.setServerAssignor("range")
.setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata()
.setSupportedProtocols(protocols)),
(short) 0));
assertEquals(expectedRecord, newConsumerGroupMemberSubscriptionRecord(
"group-id",
member
));
} |
@Operation(summary = "start new activation session with other app", tags = { SwaggerConfig.ACTIVATE_WITH_APP }, operationId = "app_activate_start",
parameters = {@Parameter(ref = "API-V"), @Parameter(ref = "OS-T"), @Parameter(ref = "APP-V"), @Parameter(ref = "OS-V"), @Parameter(ref = "REL-T")})
@GetMapping(value = "activate/start", produces = "application/json")
@ResponseBody
public AppResponse startActivateWithOtherApp() throws FlowNotDefinedException, NoSuchAlgorithmException, FlowStateNotDefinedException, IOException, SharedServiceClientException {
return service.startFlow(ActivateAppWithOtherAppFlow.NAME, Action.START_ACTIVATE_WITH_APP, null);
} | @Test
void validateIfCorrectProcessesAreCalledWithOtherApp() throws FlowNotDefinedException, NoSuchAlgorithmException, FlowStateNotDefinedException, IOException, SharedServiceClientException {
activationController.startActivateWithOtherApp();
verify(flowService, times(1)).startFlow(ActivateAppWithOtherAppFlow.NAME, Action.START_ACTIVATE_WITH_APP, null);
} |
public static <T> RemoteIterator<T> remoteIteratorFromSingleton(
@Nullable T singleton) {
return new SingletonIterator<>(singleton);
} | @Test
public void testSingleton() throws Throwable {
StringBuilder result = new StringBuilder();
String name = "singleton";
RemoteIterator<String> it = remoteIteratorFromSingleton(name);
assertStringValueContains(it, "SingletonIterator");
assertStringValueContains(it, name);
verifyInvoked(
it,
1,
(s) -> result.append(s));
assertThat(result.toString())
.isEqualTo(name);
} |
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
try {
final PathContainerService service = new DefaultPathContainerService();
if(service.isContainer(file)) {
for(RootFolder r : session.roots()) {
if(StringUtils.equalsIgnoreCase(file.getName(), PathNormalizer.name(r.getPath()))
|| StringUtils.equalsIgnoreCase(file.getName(), PathNormalizer.name(r.getName()))) {
return this.toAttributes(r);
}
}
throw new NotfoundException(file.getAbsolute());
}
final FilesApi files = new FilesApi(session.getClient());
return this.toAttributes(files.filesGet_0(URIEncoder.encode(fileid.getPrefixedPath(file))));
}
catch(ApiException e) {
throw new StoregateExceptionMappingService(fileid).map("Failure to read attributes of {0}", e, file);
}
} | @Test
public void testDefaultPaths() throws Exception {
final StoregateIdProvider nodeid = new StoregateIdProvider(session);
for(Path container : new StoregateListService(session, nodeid).list(Home.ROOT, new DisabledListProgressListener())) {
assertEquals(container.attributes(), new StoregateAttributesFinderFeature(session, nodeid).find(container));
}
for(RootFolder root : session.roots()) {
assertNotEquals(PathAttributes.EMPTY, new StoregateAttributesFinderFeature(session, nodeid).find(new Path(root.getPath(), EnumSet.of(Path.Type.directory))));
}
} |
public static UUnary create(Kind unaryOp, UExpression expression) {
checkArgument(
UNARY_OP_CODES.containsKey(unaryOp), "%s is not a recognized unary operation", unaryOp);
return new AutoValue_UUnary(unaryOp, expression);
} | @Test
public void preIncrement() {
assertUnifiesAndInlines("++foo", UUnary.create(Kind.PREFIX_INCREMENT, fooIdent));
} |
public static boolean checkPermission(byte neededPerm, byte ownedPerm) {
if ((ownedPerm & DENY) > 0) {
return false;
}
if ((neededPerm & ANY) > 0) {
return (ownedPerm & PUB) > 0 || (ownedPerm & SUB) > 0;
}
return (neededPerm & ownedPerm) > 0;
} | @Test
public void checkPermissionTest() {
boolean boo = Permission.checkPermission(Permission.DENY, Permission.DENY);
Assert.assertFalse(boo);
boo = Permission.checkPermission(Permission.PUB, Permission.PUB);
Assert.assertTrue(boo);
boo = Permission.checkPermission(Permission.SUB, Permission.SUB);
Assert.assertTrue(boo);
boo = Permission.checkPermission(Permission.PUB, (byte) (Permission.PUB | Permission.SUB));
Assert.assertTrue(boo);
boo = Permission.checkPermission(Permission.SUB, (byte) (Permission.PUB | Permission.SUB));
Assert.assertTrue(boo);
boo = Permission.checkPermission(Permission.ANY, (byte) (Permission.PUB | Permission.SUB));
Assert.assertTrue(boo);
boo = Permission.checkPermission(Permission.ANY, Permission.SUB);
Assert.assertTrue(boo);
boo = Permission.checkPermission(Permission.ANY, Permission.PUB);
Assert.assertTrue(boo);
boo = Permission.checkPermission(Permission.DENY, Permission.ANY);
Assert.assertFalse(boo);
boo = Permission.checkPermission(Permission.DENY, Permission.PUB);
Assert.assertFalse(boo);
boo = Permission.checkPermission(Permission.DENY, Permission.SUB);
Assert.assertFalse(boo);
} |
public Set<Analysis.AliasedDataSource> extractDataSources(final AstNode node) {
new Visitor().process(node, null);
return getAllSources();
} | @Test
public void shouldHandleAliasedJoinDataSources() {
// Given:
final AstNode stmt = givenQuery("SELECT * FROM TEST1 t1 JOIN TEST2 t2"
+ " ON test1.col1 = test2.col1;");
// When:
extractor.extractDataSources(stmt);
// Then:
assertContainsAlias(T1, T2);
} |
@Override @Nullable public String errorCode() {
if (status.isOk()) return null;
return status.getCode().name();
} | @Test void errorCode() {
assertThat(response.errorCode()).isEqualTo("CANCELLED");
} |
ClassicGroup getOrMaybeCreateClassicGroup(
String groupId,
boolean createIfNotExists
) throws GroupIdNotFoundException {
Group group = groups.get(groupId);
if (group == null && !createIfNotExists) {
throw new GroupIdNotFoundException(String.format("Classic group %s not found.", groupId));
}
if (group == null) {
ClassicGroup classicGroup = new ClassicGroup(logContext, groupId, ClassicGroupState.EMPTY, time, metrics);
groups.put(groupId, classicGroup);
metrics.onClassicGroupStateTransition(null, classicGroup.currentState());
return classicGroup;
} else {
if (group.type() == CLASSIC) {
return (ClassicGroup) group;
} else {
// We don't support upgrading/downgrading between protocols at the moment so
// we throw an exception if a group exists with the wrong type.
throw new GroupIdNotFoundException(String.format("Group %s is not a classic group.",
groupId));
}
}
} | @Test
public void testStaticMemberFenceDuplicateRejoiningFollowerAfterMemberIdChanged() throws Exception {
GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
.build();
GroupMetadataManagerTestContext.RebalanceResult rebalanceResult = context.staticMembersJoinAndRebalance(
"group-id",
"leader-instance-id",
"follower-instance-id"
);
ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false);
// Known leader rejoins will trigger rebalance.
JoinGroupRequestData request = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder()
.withGroupId("group-id")
.withGroupInstanceId("leader-instance-id")
.withMemberId(rebalanceResult.leaderId)
.withDefaultProtocolTypeAndProtocols()
.withRebalanceTimeoutMs(10000)
.build();
GroupMetadataManagerTestContext.JoinResult leaderJoinResult = context.sendClassicGroupJoin(request);
assertTrue(leaderJoinResult.records.isEmpty());
assertFalse(leaderJoinResult.joinFuture.isDone());
assertTrue(group.isInState(PREPARING_REBALANCE));
// Duplicate follower joins group will trigger member id replacement.
GroupMetadataManagerTestContext.JoinResult duplicateFollowerJoinResult = context.sendClassicGroupJoin(
request
.setMemberId(UNKNOWN_MEMBER_ID)
.setGroupInstanceId("follower-instance-id")
);
assertTrue(duplicateFollowerJoinResult.records.isEmpty());
assertTrue(duplicateFollowerJoinResult.joinFuture.isDone());
// Old follower rejoins group will fail because member id is already updated.
GroupMetadataManagerTestContext.JoinResult oldFollowerJoinResult = context.sendClassicGroupJoin(
request.setMemberId(rebalanceResult.followerId)
);
assertTrue(oldFollowerJoinResult.records.isEmpty());
assertTrue(oldFollowerJoinResult.joinFuture.isDone());
assertTrue(leaderJoinResult.joinFuture.isDone());
JoinGroupResponseData expectedLeaderResponse = new JoinGroupResponseData()
.setErrorCode(Errors.NONE.code())
.setGenerationId(rebalanceResult.generationId + 1)
.setMemberId(rebalanceResult.leaderId)
.setLeader(rebalanceResult.leaderId)
.setProtocolName("range")
.setProtocolType("consumer")
.setMembers(toJoinResponseMembers(group));
checkJoinGroupResponse(
expectedLeaderResponse,
leaderJoinResult.joinFuture.get(),
group,
COMPLETING_REBALANCE,
mkSet("leader-instance-id", "follower-instance-id")
);
assertTrue(duplicateFollowerJoinResult.joinFuture.isDone());
JoinGroupResponseData expectedDuplicateFollowerResponse = new JoinGroupResponseData()
.setErrorCode(Errors.NONE.code())
.setGenerationId(rebalanceResult.generationId + 1)
.setMemberId(duplicateFollowerJoinResult.joinFuture.get().memberId())
.setLeader(rebalanceResult.leaderId)
.setProtocolName("range")
.setProtocolType("consumer")
.setMembers(Collections.emptyList());
checkJoinGroupResponse(
expectedDuplicateFollowerResponse,
duplicateFollowerJoinResult.joinFuture.get(),
group,
COMPLETING_REBALANCE,
Collections.emptySet()
);
assertTrue(duplicateFollowerJoinResult.joinFuture.isDone());
JoinGroupResponseData expectedOldFollowerResponse = new JoinGroupResponseData()
.setErrorCode(Errors.FENCED_INSTANCE_ID.code())
.setGenerationId(-1)
.setMemberId(rebalanceResult.followerId)
.setLeader(UNKNOWN_MEMBER_ID)
.setProtocolName(null)
.setProtocolType(null)
.setMembers(Collections.emptyList());
checkJoinGroupResponse(
expectedOldFollowerResponse,
oldFollowerJoinResult.joinFuture.get(),
group,
COMPLETING_REBALANCE,
Collections.emptySet()
);
} |
public boolean appliesTo(Component project, @Nullable MetricEvaluationResult metricEvaluationResult) {
return metricEvaluationResult != null
&& metricEvaluationResult.evaluationResult.level() != Measure.Level.OK
&& METRICS_TO_IGNORE_ON_SMALL_CHANGESETS.contains(metricEvaluationResult.condition.getMetric().getKey())
&& config.getConfiguration().getBoolean(CoreProperties.QUALITY_GATE_IGNORE_SMALL_CHANGES).orElse(true)
&& isSmallChangeset(project);
} | @Test
public void should_not_change_issue_related_metrics() {
QualityGateMeasuresStep.MetricEvaluationResult metricEvaluationResult = generateEvaluationResult(NEW_BUGS_KEY, ERROR);
Component project = generateNewRootProject();
measureRepository.addRawMeasure(PROJECT_REF, CoreMetrics.NEW_LINES_KEY, newMeasureBuilder().create(19));
boolean result = underTest.appliesTo(project, metricEvaluationResult);
assertThat(result).isFalse();
} |
public static Iterable<String> expandAtNFilepattern(String filepattern) {
ImmutableList.Builder<String> builder = ImmutableList.builder();
Matcher match = AT_N_SPEC.matcher(filepattern);
if (!match.find()) {
builder.add(filepattern);
} else {
int numShards = Integer.parseInt(match.group("N"));
String formatString = "-%0" + getShardWidth(numShards, filepattern) + "d-of-%05d";
for (int i = 0; i < numShards; ++i) {
builder.add(
AT_N_SPEC.matcher(filepattern).replaceAll(String.format(formatString, i, numShards)));
}
if (match.find()) {
throw new IllegalArgumentException(
"More than one @N wildcard found in filepattern: " + filepattern);
}
}
return builder.build();
} | @Test
public void testExpandAtNFilepatternLarge() throws Exception {
Iterable<String> iterable = Filepatterns.expandAtNFilepattern("gs://bucket/file@200000.ism");
assertThat(iterable, Matchers.<String>iterableWithSize(200000));
assertThat(
iterable,
hasItems("gs://bucket/file-003232-of-200000.ism", "gs://bucket/file-199999-of-200000.ism"));
} |
@JsonIgnore
public RunPolicy getRestartRunPolicyWithUpstreamRestartMode() {
if (upstreamRestartMode == UpstreamRestartMode.RESTART_FROM_INLINE_ROOT) {
return RunPolicy.valueOf(restartPolicy.name());
} else {
return RunPolicy.RESTART_FROM_SPECIFIC;
}
} | @Test
public void testGetRestartRunPolicyWithUpstreamRestartMode() {
StepInstanceRestartRequest request = new StepInstanceRestartRequest();
request.setRestartPolicy(RestartPolicy.RESTART_FROM_BEGINNING);
assertEquals(
RunPolicy.RESTART_FROM_SPECIFIC, request.getRestartRunPolicyWithUpstreamRestartMode());
request.setUpstreamRestartMode(UpstreamRestartMode.RESTART_FROM_STEP);
assertEquals(
RunPolicy.RESTART_FROM_SPECIFIC, request.getRestartRunPolicyWithUpstreamRestartMode());
request.setUpstreamRestartMode(UpstreamRestartMode.RESTART_FROM_INLINE_ROOT);
assertEquals(
RunPolicy.RESTART_FROM_BEGINNING, request.getRestartRunPolicyWithUpstreamRestartMode());
} |
public ClientSession toClientSession()
{
return new ClientSession(
parseServer(server),
user,
source,
Optional.empty(),
parseClientTags(clientTags),
clientInfo,
catalog,
schema,
TimeZone.getDefault().getID(),
Locale.getDefault(),
toResourceEstimates(resourceEstimates),
toProperties(sessionProperties),
emptyMap(),
emptyMap(),
toExtraCredentials(extraCredentials),
null,
clientRequestTimeout,
disableCompression,
emptyMap(),
emptyMap(),
validateNextUriSource);
} | @Test
public void testServerHostOnly()
{
ClientOptions options = new ClientOptions();
options.server = "localhost";
ClientSession session = options.toClientSession();
assertEquals(session.getServer().toString(), "http://localhost:80");
} |
public List<SchemaChangeEvent> applySchemaChange(SchemaChangeEvent schemaChangeEvent) {
List<SchemaChangeEvent> events = new ArrayList<>();
TableId originalTable = schemaChangeEvent.tableId();
boolean noRouteMatched = true;
for (Tuple3<Selectors, String, String> route : routes) {
// Check routing table
if (!route.f0.isMatch(originalTable)) {
continue;
}
noRouteMatched = false;
// Matched a routing rule
TableId derivedTable = resolveReplacement(originalTable, route);
Set<TableId> originalTables =
derivationMapping.computeIfAbsent(derivedTable, t -> new HashSet<>());
originalTables.add(originalTable);
if (originalTables.size() == 1) {
// single source mapping, replace the table ID directly
SchemaChangeEvent derivedSchemaChangeEvent =
ChangeEventUtils.recreateSchemaChangeEvent(schemaChangeEvent, derivedTable);
events.add(derivedSchemaChangeEvent);
} else {
// multiple source mapping (merging tables)
Schema derivedTableSchema =
schemaManager.getLatestEvolvedSchema(derivedTable).get();
events.addAll(
Objects.requireNonNull(
SchemaChangeEventVisitor.visit(
schemaChangeEvent,
addColumnEvent ->
handleAddColumnEvent(
addColumnEvent,
derivedTableSchema,
derivedTable),
alterColumnTypeEvent ->
handleAlterColumnTypeEvent(
alterColumnTypeEvent,
derivedTableSchema,
derivedTable),
createTableEvent ->
handleCreateTableEvent(
createTableEvent,
derivedTableSchema,
derivedTable),
dropColumnEvent ->
Collections.emptyList(), // Column drop shouldn't be
// spread to route
// destination.
dropTableEvent ->
Collections.emptyList(), // Table drop shouldn't be
// spread to route
// destination.
renameColumnEvent ->
handleRenameColumnEvent(
renameColumnEvent,
derivedTableSchema,
derivedTable),
truncateTableEvent ->
Collections.emptyList() // // Table truncation
// shouldn't be spread to route
// destination.
)));
}
}
if (noRouteMatched) {
// No routes are matched, leave it as-is
return Collections.singletonList(schemaChangeEvent);
} else {
return events;
}
} | @Test
void testMergingTablesWithExactSameSchema() {
SchemaManager schemaManager = new SchemaManager();
SchemaDerivation schemaDerivation =
new SchemaDerivation(schemaManager, ROUTES, new HashMap<>());
// Create table 1
List<SchemaChangeEvent> derivedChangesAfterCreateTable =
schemaDerivation.applySchemaChange(new CreateTableEvent(TABLE_1, SCHEMA));
assertThat(derivedChangesAfterCreateTable).hasSize(1);
assertThat(derivedChangesAfterCreateTable.get(0))
.asCreateTableEvent()
.hasTableId(MERGED_TABLE)
.hasSchema(SCHEMA);
derivedChangesAfterCreateTable.forEach(schemaManager::applyEvolvedSchemaChange);
// Create table 2
assertThat(schemaDerivation.applySchemaChange(new CreateTableEvent(TABLE_2, SCHEMA)))
.isEmpty();
// Add column for table 1
AddColumnEvent.ColumnWithPosition newCol1 =
new AddColumnEvent.ColumnWithPosition(
new PhysicalColumn("new_col1", DataTypes.STRING(), null));
AddColumnEvent.ColumnWithPosition newCol2 =
new AddColumnEvent.ColumnWithPosition(
new PhysicalColumn("new_col2", DataTypes.STRING(), null));
List<AddColumnEvent.ColumnWithPosition> newColumns = Arrays.asList(newCol1, newCol2);
List<SchemaChangeEvent> derivedChangesAfterAddColumn =
schemaDerivation.applySchemaChange(new AddColumnEvent(TABLE_1, newColumns));
assertThat(derivedChangesAfterAddColumn).hasSize(1);
assertThat(derivedChangesAfterAddColumn.get(0))
.asAddColumnEvent()
.hasTableId(MERGED_TABLE)
.containsAddedColumns(newCol1, newCol2);
derivedChangesAfterAddColumn.forEach(schemaManager::applyEvolvedSchemaChange);
// Add column for table 2
assertThat(schemaDerivation.applySchemaChange(new AddColumnEvent(TABLE_2, newColumns)))
.isEmpty();
// Alter column type for table 1
ImmutableMap<String, DataType> typeMapping = ImmutableMap.of("age", DataTypes.BIGINT());
List<SchemaChangeEvent> derivedChangesAfterAlterColumnType =
schemaDerivation.applySchemaChange(new AlterColumnTypeEvent(TABLE_1, typeMapping));
assertThat(derivedChangesAfterAlterColumnType).hasSize(1);
assertThat(derivedChangesAfterAlterColumnType.get(0))
.asAlterColumnTypeEvent()
.hasTableId(MERGED_TABLE)
.containsTypeMapping(typeMapping);
derivedChangesAfterAlterColumnType.forEach(schemaManager::applyEvolvedSchemaChange);
// Alter column type for table 2
assertThat(
schemaDerivation.applySchemaChange(
new AlterColumnTypeEvent(TABLE_2, typeMapping)))
.isEmpty();
// Drop column for table 1
List<String> droppedColumns = Arrays.asList("new_col1", "new_col2");
assertThat(schemaDerivation.applySchemaChange(new DropColumnEvent(TABLE_1, droppedColumns)))
.isEmpty();
// Drop column for table 2
assertThat(schemaDerivation.applySchemaChange(new DropColumnEvent(TABLE_2, droppedColumns)))
.isEmpty();
// Rename column for table 1
Map<String, String> renamedColumns = ImmutableMap.of("name", "last_name");
List<SchemaChangeEvent> derivedChangesAfterRenameColumn =
schemaDerivation.applySchemaChange(new RenameColumnEvent(TABLE_1, renamedColumns));
assertThat(derivedChangesAfterRenameColumn).hasSize(1);
assertThat(derivedChangesAfterRenameColumn.get(0))
.asAddColumnEvent()
.hasTableId(MERGED_TABLE)
.containsAddedColumns(
new AddColumnEvent.ColumnWithPosition(
new PhysicalColumn("last_name", DataTypes.STRING(), null)));
derivedChangesAfterRenameColumn.forEach(schemaManager::applyEvolvedSchemaChange);
// Rename column for table 2
assertThat(
schemaDerivation.applySchemaChange(
new RenameColumnEvent(TABLE_2, renamedColumns)))
.isEmpty();
} |
public static <K, V> Reshuffle<K, V> of() {
return new Reshuffle<>();
} | @Test
@Category(ValidatesRunner.class)
public void testReshuffleAfterSlidingWindows() {
PCollection<KV<String, Integer>> input =
pipeline
.apply(
Create.of(ARBITRARY_KVS)
.withCoder(KvCoder.of(StringUtf8Coder.of(), VarIntCoder.of())))
.apply(Window.into(FixedWindows.of(Duration.standardMinutes(10L))));
PCollection<KV<String, Integer>> output = input.apply(Reshuffle.of());
PAssert.that(output).containsInAnyOrder(ARBITRARY_KVS);
assertEquals(input.getWindowingStrategy(), output.getWindowingStrategy());
pipeline.run();
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.