focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@SuppressWarnings({"unchecked", "UnstableApiUsage"})
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement) {
if (!(statement.getStatement() instanceof DropStatement)) {
return statement;
}
final DropStatement dropStatement = (DropStatement) statement.getStatement();
if (!dropStatement.isDeleteTopic()) {
return statement;
}
final SourceName sourceName = dropStatement.getName();
final DataSource source = metastore.getSource(sourceName);
if (source != null) {
if (source.isSource()) {
throw new KsqlException("Cannot delete topic for read-only source: " + sourceName.text());
}
checkTopicRefs(source);
deleteTopic(source);
final Closer closer = Closer.create();
closer.register(() -> deleteKeySubject(source));
closer.register(() -> deleteValueSubject(source));
try {
closer.close();
} catch (final KsqlException e) {
throw e;
} catch (final Exception e) {
throw new KsqlException(e);
}
} else if (!dropStatement.getIfExists()) {
throw new KsqlException("Could not find source to delete topic for: " + statement);
}
final T withoutDelete = (T) dropStatement.withoutDeleteClause();
final String withoutDeleteText = SqlFormatter.formatSql(withoutDelete) + ";";
return statement.withStatement(withoutDeleteText, withoutDelete);
} | @Test
public void shouldNotDeleteSchemaInSRIfNotSRSupported() throws IOException, RestClientException {
// Given:
when(topic.getValueFormat()).thenReturn(ValueFormat.of(FormatInfo.of(FormatFactory.DELIMITED.name()),
SerdeFeatures.of()));
// When:
deleteInjector.inject(DROP_WITH_DELETE_TOPIC);
// Then:
verify(registryClient, never()).deleteSubject(any());
} |
@Override
public String getName() {
return _name;
} | @Test
public void testStringStartsWithTransformFunction() {
ExpressionContext expression =
RequestContextUtils.getExpression(String.format("starts_with(%s, 'A')", STRING_ALPHANUM_SV_COLUMN));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper);
assertEquals(transformFunction.getName(), "startsWith");
int[] expectedValues = new int[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = StringUtils.startsWith(_stringAlphaNumericSVValues[i], "A") ? 1 : 0;
}
testTransformFunction(transformFunction, expectedValues);
} |
public static List<String> colTypesFromSchema(final String schema) {
return splitAcrossOneLevelDeepComma(schema).stream()
.map(RowUtil::removeBackTickAndKeyTrim)
.map(RowUtil::splitAndGetSecond)
.collect(Collectors.toList());
} | @Test
public void shouldGetColumnTypesFromSchema() {
// Given
final String schema = "`K` STRUCT<`F1` ARRAY<STRING>>, "
+ "`STR` STRING, "
+ "`LONG` BIGINT, "
+ "`DEC` DECIMAL(4, 2),"
+ "`BYTES_` BYTES, "
+ "`ARRAY` ARRAY<STRING>, "
+ "`MAP` MAP<STRING, STRING>, "
+ "`STRUCT` STRUCT<`F1` INTEGER>, "
+ "`COMPLEX` STRUCT<`DECIMAL` DECIMAL(2, 1), `STRUCT` STRUCT<`F1` STRING, `F2` INTEGER>, `ARRAY_ARRAY` ARRAY<ARRAY<STRING>>, `ARRAY_STRUCT` ARRAY<STRUCT<`F1` STRING>>, `ARRAY_MAP` ARRAY<MAP<STRING, INTEGER>>, `MAP_ARRAY` MAP<STRING, ARRAY<STRING>>, `MAP_MAP` MAP<STRING, MAP<STRING, INTEGER>>, `MAP_STRUCT` MAP<STRING, STRUCT<`F1` STRING>>>, "
+ "`TIMESTAMP` TIMESTAMP, "
+ "`DATE` DATE, "
+ "`TIME` TIME, "
+ "`HEAD` BYTES";
// When
final List<String> columnTypes = RowUtil.colTypesFromSchema(schema);
// Then
assertThat(
columnTypes,
contains(
"STRUCT<F1 ARRAY<STRING>>",
"STRING",
"BIGINT",
"DECIMAL(4, 2)",
"BYTES",
"ARRAY<STRING>",
"MAP<STRING, STRING>",
"STRUCT<F1 INTEGER>",
"STRUCT<DECIMAL DECIMAL(2, 1), STRUCT STRUCT<F1 STRING, F2 INTEGER>, ARRAY_ARRAY ARRAY<ARRAY<STRING>>, ARRAY_STRUCT ARRAY<STRUCT<F1 STRING>>, ARRAY_MAP ARRAY<MAP<STRING, INTEGER>>, MAP_ARRAY MAP<STRING, ARRAY<STRING>>, MAP_MAP MAP<STRING, MAP<STRING, INTEGER>>, MAP_STRUCT MAP<STRING, STRUCT<F1 STRING>>>",
"TIMESTAMP",
"DATE",
"TIME",
"BYTES"
));
} |
@Override
public boolean rejoinNeededOrPending() {
if (!subscriptions.hasAutoAssignedPartitions())
return false;
// we need to rejoin if we performed the assignment and metadata has changed;
// also for those owned-but-no-longer-existed partitions we should drop them as lost
if (assignmentSnapshot != null && !assignmentSnapshot.matches(metadataSnapshot)) {
final String fullReason = String.format("cached metadata has changed from %s at the beginning of the rebalance to %s",
assignmentSnapshot, metadataSnapshot);
requestRejoinIfNecessary("cached metadata has changed", fullReason);
return true;
}
// we need to join if our subscription has changed since the last join
if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) {
final String fullReason = String.format("subscription has changed from %s at the beginning of the rebalance to %s",
joinedSubscription, subscriptions.subscription());
requestRejoinIfNecessary("subscription has changed", fullReason);
return true;
}
return super.rejoinNeededOrPending();
} | @Test
public void testDisconnectInJoin() {
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
final List<TopicPartition> owned = Collections.emptyList();
final List<TopicPartition> assigned = singletonList(t1p);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// disconnected from original coordinator will cause re-discover and join again
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE), true);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(assigned, Errors.NONE));
coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(toSet(assigned), subscriptions.assignedPartitions());
// nothing to be revoked hence callback not triggered
assertEquals(0, rebalanceListener.revokedCount);
assertNull(rebalanceListener.revoked);
assertEquals(1, rebalanceListener.assignedCount);
assertEquals(getAdded(owned, assigned), rebalanceListener.assigned);
} |
@Override
public String toString(final RouteUnit routeUnit) {
return identifier.getQuoteCharacter().wrap(getConstraintValue(routeUnit));
} | @Test
void assertToString() {
assertThat(new ConstraintToken(0, 1, new IdentifierValue("uc"),
mock(SQLStatementContext.class, withSettings().extraInterfaces(TableAvailable.class).defaultAnswer(RETURNS_DEEP_STUBS)), mock(ShardingRule.class)).toString(getRouteUnit()), is("uc"));
} |
public static FingerprintTrustManagerFactoryBuilder builder(String algorithm) {
return new FingerprintTrustManagerFactoryBuilder(algorithm);
} | @Test
public void testWithNoFingerprints() {
assertThrows(IllegalStateException.class, new Executable() {
@Override
public void execute() {
FingerprintTrustManagerFactory.builder("SHA-256").build();
}
});
} |
private ArrayAccess() {
} | @Test
public void shouldReturnNullOnNegativeOutOfBoundsIndex() {
// Given:
final List<Integer> list = ImmutableList.of(1, 2);
// When:
final Integer access = ArrayAccess.arrayAccess(list, -3);
// Then:
assertThat(access, nullValue());
} |
public static String getSpnegoKeytabKey(Configuration conf, String defaultKey) {
String value =
conf.get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
return (value == null || value.isEmpty()) ?
defaultKey : DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY;
} | @Test(timeout=5000)
public void testGetSpnegoKeytabKey() {
HdfsConfiguration conf = new HdfsConfiguration();
String defaultKey = "default.spengo.key";
conf.unset(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
assertEquals("Test spnego key in config is null", defaultKey,
DFSUtil.getSpnegoKeytabKey(conf, defaultKey));
conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, "");
assertEquals("Test spnego key is empty", defaultKey,
DFSUtil.getSpnegoKeytabKey(conf, defaultKey));
String spengoKey = "spengo.key";
conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
spengoKey);
assertEquals("Test spnego key is NOT null",
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
DFSUtil.getSpnegoKeytabKey(conf, defaultKey));
} |
private double calcDistance(ReaderWay way, WaySegmentParser.CoordinateSupplier coordinateSupplier) {
LongArrayList nodes = way.getNodes();
// every way has at least two nodes according to our acceptWay function
GHPoint3D prevPoint = coordinateSupplier.getCoordinate(nodes.get(0));
if (prevPoint == null)
return Double.NaN;
boolean is3D = !Double.isNaN(prevPoint.ele);
double distance = 0;
for (int i = 1; i < nodes.size(); i++) {
GHPoint3D point = coordinateSupplier.getCoordinate(nodes.get(i));
if (point == null)
return Double.NaN;
if (Double.isNaN(point.ele) == is3D)
throw new IllegalStateException("There should be elevation data for either all points or no points at all. OSM way: " + way.getId());
distance += is3D
? distCalc.calcDist3D(prevPoint.lat, prevPoint.lon, prevPoint.ele, point.lat, point.lon, point.ele)
: distCalc.calcDist(prevPoint.lat, prevPoint.lon, point.lat, point.lon);
prevPoint = point;
}
return distance;
} | @Test
public void testDoNotRejectEdgeIfFirstNodeIsMissing_issue2221() {
GraphHopper hopper = new GraphHopperFacade("test-osm9.xml").importOrLoad();
BaseGraph graph = hopper.getBaseGraph();
assertEquals(2, graph.getNodes());
assertEquals(1, graph.getEdges());
AllEdgesIterator iter = graph.getAllEdges();
iter.next();
assertEquals(0, iter.getBaseNode());
assertEquals(1, iter.getAdjNode());
assertEquals(51.21, graph.getNodeAccess().getLat(0), 1.e-3);
assertEquals(9.41, graph.getNodeAccess().getLon(0), 1.e-3);
assertEquals(51.22, graph.getNodeAccess().getLat(1), 1.e-3);
assertEquals(9.42, graph.getNodeAccess().getLon(1), 1.e-3);
assertEquals(DistanceCalcEarth.DIST_EARTH.calcDistance(iter.fetchWayGeometry(FetchMode.ALL)), iter.getDistance(), 1.e-3);
assertEquals(1312.1, iter.getDistance(), 1.e-1);
assertEquals(1312.1, DistanceCalcEarth.DIST_EARTH.calcDistance(iter.fetchWayGeometry(FetchMode.ALL)), 1.e-1);
assertFalse(iter.next());
} |
protected static String getReverseZoneNetworkAddress(String baseIp, int range,
int index) throws UnknownHostException {
if (index < 0) {
throw new IllegalArgumentException(
String.format("Invalid index provided, must be positive: %d", index));
}
if (range < 0) {
throw new IllegalArgumentException(
String.format("Invalid range provided, cannot be negative: %d",
range));
}
return calculateIp(baseIp, range, index);
} | @Test
public void testGetReverseZoneNetworkAddress() throws Exception {
assertEquals("172.17.4.0",
ReverseZoneUtils.getReverseZoneNetworkAddress(NET, RANGE, INDEX));
} |
static boolean shouldUpdate(AmazonInfo newInfo, AmazonInfo oldInfo) {
if (newInfo.getMetadata().isEmpty()) {
logger.warn("Newly resolved AmazonInfo is empty, skipping an update cycle");
} else if (!newInfo.equals(oldInfo)) {
if (isBlank(newInfo.get(AmazonInfo.MetaDataKey.instanceId))) {
logger.warn("instanceId is blank, skipping an update cycle");
return false;
} else if (isBlank(newInfo.get(AmazonInfo.MetaDataKey.localIpv4))) {
logger.warn("localIpv4 is blank, skipping an update cycle");
return false;
} else {
Set<String> newKeys = new HashSet<>(newInfo.getMetadata().keySet());
Set<String> oldKeys = new HashSet<>(oldInfo.getMetadata().keySet());
Set<String> union = new HashSet<>(newKeys);
union.retainAll(oldKeys);
newKeys.removeAll(union);
oldKeys.removeAll(union);
for (String key : newKeys) {
logger.info("Adding new metadata {}={}", key, newInfo.getMetadata().get(key));
}
for (String key : oldKeys) {
logger.info("Removing old metadata {}={}", key, oldInfo.getMetadata().get(key));
}
}
return true;
}
return false;
} | @Test
public void testAmazonInfoUpdatePositiveCase() {
AmazonInfo oldInfo = (AmazonInfo) instanceInfo.getDataCenterInfo();
AmazonInfo newInfo = copyAmazonInfo(instanceInfo);
newInfo.getMetadata().remove(amiId.getName());
assertThat(newInfo.getMetadata().size(), is(oldInfo.getMetadata().size() - 1));
assertThat(RefreshableAmazonInfoProvider.shouldUpdate(newInfo, oldInfo), is(true));
String newKey = "someNewKey";
newInfo.getMetadata().put(newKey, "bar");
assertThat(newInfo.getMetadata().size(), is(oldInfo.getMetadata().size()));
assertThat(RefreshableAmazonInfoProvider.shouldUpdate(newInfo, oldInfo), is(true));
} |
public static int getPortFromEnvOrStartup(String[] args) {
int port = 0;
if (args != null && args.length >= 2) {
for (int i = 0; i < args.length; ++i) {
if ("-p".equalsIgnoreCase(args[i]) && i < args.length - 1) {
port = NumberUtils.toInt(args[i + 1], 0);
}
}
}
if (port == 0) {
port = ContainerHelper.getPort();
}
return port;
} | @Test
public void testGetPortFromEnvOrStartup() {
Assertions.assertEquals(0,PortHelper.getPortFromEnvOrStartup(new String[]{}));
} |
@Override
public String toString() {
return "CmdbContext{" + "consumer=" + consumer + ", providers=" + providers + '}';
} | @Test
void testToString() {
CmdbContext<Instance> cmdbContext = new CmdbContext<>();
cmdbContext.setProviders(Collections.singletonList(new CmdbContext.CmdbInstance<>()));
cmdbContext.setConsumer(new CmdbContext.CmdbInstance<>());
System.out.println(cmdbContext.toString());
assertEquals(
"CmdbContext{consumer=CmdbInstance{entity=null, instance=null}, providers=[CmdbInstance{entity=null, instance=null}]}",
cmdbContext.toString());
} |
@Override
public Long getUnreadNotifyMessageCount(Long userId, Integer userType) {
return notifyMessageMapper.selectUnreadCountByUserIdAndUserType(userId, userType);
} | @Test
public void testGetUnreadNotifyMessageCount() {
SqlConstants.init(DbType.MYSQL);
// mock 数据
NotifyMessageDO dbNotifyMessage = randomPojo(NotifyMessageDO.class, o -> { // 等会查询到
o.setUserId(1L);
o.setUserType(UserTypeEnum.ADMIN.getValue());
o.setReadStatus(false);
o.setTemplateParams(randomTemplateParams());
});
notifyMessageMapper.insert(dbNotifyMessage);
// 测试 userId 不匹配
notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setUserId(2L)));
// 测试 userType 不匹配
notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setUserType(UserTypeEnum.MEMBER.getValue())));
// 测试 readStatus 不匹配
notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setReadStatus(true)));
// 准备参数
Long userId = 1L;
Integer userType = UserTypeEnum.ADMIN.getValue();
// 调用,并断言
assertEquals(1, notifyMessageService.getUnreadNotifyMessageCount(userId, userType));
} |
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return PathAttributes.EMPTY;
}
final SMBSession.DiskShareWrapper share = session.openShare(file);
try {
if(new SMBPathContainerService(session).isContainer(file)) {
final ShareInfo shareInformation = share.get().getShareInformation();
final PathAttributes attributes = new PathAttributes();
final long used = shareInformation.getTotalSpace() - shareInformation.getFreeSpace();
attributes.setSize(used);
attributes.setQuota(new Quota.Space(used, shareInformation.getFreeSpace()));
return attributes;
}
else {
final FileAllInformation fileInformation = share.get().getFileInformation(new SMBPathContainerService(session).getKey(file));
if(file.isDirectory() && !fileInformation.getStandardInformation().isDirectory()) {
throw new NotfoundException(String.format("File %s found but type is not directory", file.getName()));
}
else if(file.isFile() && fileInformation.getStandardInformation().isDirectory()) {
throw new NotfoundException(String.format("File %s found but type is not file", file.getName()));
}
return this.toAttributes(fileInformation);
}
}
catch(SMBRuntimeException e) {
throw new SMBExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
finally {
session.releaseShare(share);
}
} | @Test
public void testFindFile() throws Exception {
final Path test = new SMBTouchFeature(session).touch(new Path(new DefaultHomeFinderService(session).find(),
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final SMBAttributesFinderFeature f = new SMBAttributesFinderFeature(session);
final PathAttributes attributes = f.find(test);
assertEquals(0L, attributes.getSize());
assertNotEquals(-1L, attributes.getModificationDate());
// Test wrong type
assertThrows(NotfoundException.class, () -> f.find(new Path(test.getAbsolute(), EnumSet.of(Path.Type.directory))));
new SMBDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Nullable
public Float getFloatValue(@FloatFormat final int formatType,
@IntRange(from = 0) final int offset) {
if ((offset + getTypeLen(formatType)) > size()) return null;
switch (formatType) {
case FORMAT_SFLOAT -> {
if (mValue[offset + 1] == 0x07 && mValue[offset] == (byte) 0xFE)
return Float.POSITIVE_INFINITY;
if ((mValue[offset + 1] == 0x07 && mValue[offset] == (byte) 0xFF) ||
(mValue[offset + 1] == 0x08 && mValue[offset] == 0x00) ||
(mValue[offset + 1] == 0x08 && mValue[offset] == 0x01))
return Float.NaN;
if (mValue[offset + 1] == 0x08 && mValue[offset] == 0x02)
return Float.NEGATIVE_INFINITY;
return bytesToFloat(mValue[offset], mValue[offset + 1]);
}
case FORMAT_FLOAT -> {
if (mValue[offset + 3] == 0x00) {
if (mValue[offset + 2] == 0x7F && mValue[offset + 1] == (byte) 0xFF) {
if (mValue[offset] == (byte) 0xFE)
return Float.POSITIVE_INFINITY;
if (mValue[offset] == (byte) 0xFF)
return Float.NaN;
} else if (mValue[offset + 2] == (byte) 0x80 && mValue[offset + 1] == 0x00) {
if (mValue[offset] == 0x00 || mValue[offset] == 0x01)
return Float.NaN;
if (mValue[offset] == 0x02)
return Float.NEGATIVE_INFINITY;
}
}
return bytesToFloat(mValue[offset], mValue[offset + 1],
mValue[offset + 2], mValue[offset + 3]);
}
}
return null;
} | @Test
public void setValue_SFLOAT_roundUp() {
final MutableData data = new MutableData(new byte[2]);
data.setValue(123.45f, Data.FORMAT_SFLOAT, 0);
final float value = data.getFloatValue(Data.FORMAT_SFLOAT, 0);
assertEquals(123.5f, value, 0.00);
} |
@Override
public Credentials configure(final Host host) {
if(StringUtils.isNotBlank(host.getHostname())) {
final Credentials credentials = new Credentials(host.getCredentials());
configuration.refresh();
// Update this host credentials from the OpenSSH configuration file in ~/.ssh/config
final OpenSshConfig.Host entry = configuration.lookup(host.getHostname());
if(StringUtils.isNotBlank(entry.getUser())) {
if(!credentials.validate(host.getProtocol(), new LoginOptions(host.getProtocol()).password(false))) {
if(log.isInfoEnabled()) {
log.info(String.format("Using username %s from %s", entry, configuration));
}
credentials.setUsername(entry.getUser());
}
}
if(!credentials.isPublicKeyAuthentication()) {
if(null != entry.getIdentityFile()) {
if(log.isInfoEnabled()) {
log.info(String.format("Using identity %s from %s", entry, configuration));
}
credentials.setIdentity(entry.getIdentityFile());
}
else {
// No custom public key authentication configuration
if(new HostPreferences(host).getBoolean("ssh.authentication.publickey.default.enable")) {
final Local rsa = LocalFactory.get(new HostPreferences(host).getProperty("ssh.authentication.publickey.default.rsa"));
if(rsa.exists()) {
if(log.isInfoEnabled()) {
log.info(String.format("Using RSA default host key %s from %s", rsa, configuration));
}
credentials.setIdentity(rsa);
}
else {
final Local dsa = LocalFactory.get(new HostPreferences(host).getProperty("ssh.authentication.publickey.default.dsa"));
if(dsa.exists()) {
if(log.isInfoEnabled()) {
log.info(String.format("Using DSA default host key %s from %s", dsa, configuration));
}
credentials.setIdentity(dsa);
}
}
}
}
}
return credentials;
}
return CredentialsConfigurator.DISABLED.configure(host);
} | @Test
public void testConfigureDefaultKey() {
OpenSSHCredentialsConfigurator c = new OpenSSHCredentialsConfigurator(
new OpenSshConfig(
new Local("src/main/test/resources", "openssh/config")));
final Credentials credentials = c.configure(new Host(new TestProtocol(Scheme.sftp), "t"));
// ssh.authentication.publickey.default.enable
assertNull(credentials.getIdentity());
} |
public void publish(DefaultGoPublisher goPublisher, String destPath, File source, JobIdentifier jobIdentifier) {
if (!source.exists()) {
String message = "Failed to find " + source.getAbsolutePath();
goPublisher.taggedConsumeLineWithPrefix(PUBLISH_ERR, message);
bomb(message);
}
int publishingAttempts = 0;
Throwable lastException = null;
while (publishingAttempts < PUBLISH_MAX_RETRIES) {
File tmpDir = null;
try {
publishingAttempts++;
tmpDir = FileUtil.createTempFolder();
File dataToUpload = new File(tmpDir, source.getName() + ".zip");
zipUtil.zip(source, dataToUpload, Deflater.BEST_SPEED);
long size = 0;
if (source.isDirectory()) {
size = FileUtils.sizeOfDirectory(source);
} else {
size = source.length();
}
goPublisher.taggedConsumeLineWithPrefix(PUBLISH, "Uploading artifacts from " + source.getAbsolutePath() + " to " + getDestPath(destPath));
String normalizedDestPath = FilenameUtils.separatorsToUnix(destPath);
String url = urlService.getUploadUrlOfAgent(jobIdentifier, normalizedDestPath, publishingAttempts);
int statusCode = httpService.upload(url, size, dataToUpload, artifactChecksums(source, normalizedDestPath));
if (statusCode == HttpURLConnection.HTTP_ENTITY_TOO_LARGE) {
String message = String.format("Artifact upload for file %s (Size: %s) was denied by the server. This usually happens when server runs out of disk space.",
source.getAbsolutePath(), size);
goPublisher.taggedConsumeLineWithPrefix(PUBLISH_ERR, message);
LOGGER.error("[Artifact Upload] Artifact upload was denied by the server. This usually happens when server runs out of disk space.");
publishingAttempts = PUBLISH_MAX_RETRIES;
bomb(message + ". HTTP return code is " + statusCode);
}
if (statusCode < HttpURLConnection.HTTP_OK || statusCode >= HttpURLConnection.HTTP_MULT_CHOICE) {
bomb("Failed to upload " + source.getAbsolutePath() + ". HTTP return code is " + statusCode);
}
return;
} catch (Throwable e) {
String message = "Failed to upload " + source.getAbsolutePath();
LOGGER.error(message, e);
goPublisher.taggedConsumeLineWithPrefix(PUBLISH_ERR, message);
lastException = e;
} finally {
FileUtils.deleteQuietly(tmpDir);
}
}
if (lastException != null) {
throw new RuntimeException(lastException);
}
} | @Test
public void shouldBombWithErrorWhenStatusCodeReturnedIsRequestEntityTooLarge() throws IOException {
when(httpService.upload(any(String.class), eq(tempFile.length()), any(File.class), any(Properties.class))).thenReturn(HttpServletResponse.SC_REQUEST_ENTITY_TOO_LARGE);
CircularFifoQueue buffer = ReflectionUtil.getField(ReflectionUtil.getField(goPublisher, "consoleOutputTransmitter"), "buffer");
synchronized (buffer) {
try {
goArtifactsManipulatorStub.publish(goPublisher, "some_dest", tempFile, jobIdentifier);
fail("should have thrown request entity too large error");
} catch (RuntimeException e) {
String expectedMessage = "Artifact upload for file " + tempFile.getAbsolutePath() + " (Size: "+ tempFile.length() +") was denied by the server. This usually happens when server runs out of disk space.";
assertThat(e.getMessage(), is("java.lang.RuntimeException: " + expectedMessage + ". HTTP return code is 413"));
assertThat(buffer.toString().contains(expectedMessage), is(true));
}
}
} |
@Override
public YamlModeConfiguration swapToYamlConfiguration(final ModeConfiguration data) {
YamlModeConfiguration result = new YamlModeConfiguration();
result.setType(data.getType());
if (null != data.getRepository()) {
YamlPersistRepositoryConfigurationSwapper<PersistRepositoryConfiguration> swapper = TypedSPILoader.getService(YamlPersistRepositoryConfigurationSwapper.class, data.getType());
result.setRepository(swapper.swapToYamlConfiguration(data.getRepository()));
}
return result;
} | @Test
void assertSwapToYamlConfiguration() {
YamlModeConfiguration actual = swapper.swapToYamlConfiguration(new ModeConfiguration("TEST_TYPE", null));
assertThat(actual.getType(), is(TEST_TYPE));
} |
@Override
public Server build(Environment environment) {
printBanner(environment.getName());
final ThreadPool threadPool = createThreadPool(environment.metrics());
final Server server = buildServer(environment.lifecycle(), threadPool);
final Handler applicationHandler = createAppServlet(server,
environment.jersey(),
environment.getObjectMapper(),
environment.getValidator(),
environment.getApplicationContext(),
environment.getJerseyServletContainer(),
environment.metrics());
final Handler adminHandler = createAdminServlet(server,
environment.getAdminContext(),
environment.metrics(),
environment.healthChecks(),
environment.admin());
final RoutingHandler routingHandler = buildRoutingHandler(environment.metrics(),
server,
applicationHandler,
adminHandler);
final Handler gzipHandler = buildGzipHandler(routingHandler);
server.setHandler(addStatsHandler(addRequestLog(server, gzipHandler, environment.getName())));
return server;
} | @Test
void doesNotDefaultDetailedJsonProcessingExceptionToFalse() {
http.setDetailedJsonProcessingExceptionMapper(true);
http.build(environment);
assertThat(environment.jersey().getResourceConfig().getSingletons())
.filteredOn(x -> x instanceof ExceptionMapperBinder)
.map(x -> (ExceptionMapperBinder) x)
.singleElement()
.satisfies(x -> assertThat(x.isShowDetails()).isTrue());
} |
@Override
public ParDoFn create(
PipelineOptions options,
CloudObject cloudUserFn,
List<SideInputInfo> sideInputInfos,
TupleTag<?> mainOutputTag,
Map<TupleTag<?>, Integer> outputTupleTagsToReceiverIndices,
DataflowExecutionContext<?> executionContext,
DataflowOperationContext operationContext)
throws Exception {
String className = cloudUserFn.getClassName();
ParDoFnFactory factory = defaultFactories.get(className);
if (factory == null) {
throw new Exception("No known ParDoFnFactory for " + className);
}
return factory.create(
options,
cloudUserFn,
sideInputInfos,
mainOutputTag,
outputTupleTagsToReceiverIndices,
executionContext,
operationContext);
} | @Test
public void testCreateUnknownParDoFn() throws Exception {
// A bogus serialized DoFn
CloudObject cloudUserFn = CloudObject.forClassName("UnknownKindOfDoFn");
try {
DEFAULT_FACTORY.create(
DEFAULT_OPTIONS,
cloudUserFn,
null,
MAIN_OUTPUT,
ImmutableMap.<TupleTag<?>, Integer>of(MAIN_OUTPUT, 0),
DEFAULT_EXECUTION_CONTEXT,
TestOperationContext.create(counterSet));
fail("should have thrown an exception");
} catch (Exception exn) {
assertThat(exn.toString(), Matchers.containsString("No known ParDoFnFactory"));
}
} |
@NonNull @VisibleForTesting
static String[] getPermissionsStrings(int requestCode) {
switch (requestCode) {
case CONTACTS_PERMISSION_REQUEST_CODE -> {
return new String[] {Manifest.permission.READ_CONTACTS};
}
case NOTIFICATION_PERMISSION_REQUEST_CODE -> {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
return new String[] {Manifest.permission.POST_NOTIFICATIONS};
} else {
return new String[0];
}
}
default -> throw new IllegalArgumentException("Unknown request code " + requestCode);
}
} | @Test
@Config(sdk = TIRAMISU)
public void testGetPermissionsStringsNotificationsNewDevice() {
Assert.assertArrayEquals(
new String[] {Manifest.permission.POST_NOTIFICATIONS},
PermissionRequestHelper.getPermissionsStrings(
PermissionRequestHelper.NOTIFICATION_PERMISSION_REQUEST_CODE));
} |
public boolean isNewerThan(JavaSpecVersion otherVersion) {
return this.compareTo(otherVersion) > 0;
} | @Test
public void test17newerThan11() throws Exception
{
// Setup fixture.
final JavaSpecVersion eleven = new JavaSpecVersion( "11" );
final JavaSpecVersion seventeen = new JavaSpecVersion( "17" );
// Execute system under test.
final boolean result = seventeen.isNewerThan( eleven );
// Verify results.
assertTrue( result );
} |
public B appendParameters(Map<String, String> appendParameters) {
this.parameters = appendParameters(parameters, appendParameters);
return getThis();
} | @Test
void appendParameters() {
Map<String, String> source = new HashMap<>();
source.put("default.num", "one");
source.put("num", "ONE");
MethodBuilder builder = new MethodBuilder();
builder.appendParameters(source);
Map<String, String> parameters = builder.build().getParameters();
Assertions.assertTrue(parameters.containsKey("default.num"));
Assertions.assertEquals("ONE", parameters.get("num"));
} |
@Override
public SofaResponse invoke(FilterInvoker invoker, SofaRequest request) throws SofaRpcException {
// Now only support sync invoke.
if (request.getInvokeType() != null && !RpcConstants.INVOKER_TYPE_SYNC.equals(request.getInvokeType())) {
return invoker.invoke(request);
}
String interfaceResourceName = getInterfaceResourceName(request);
String methodResourceName = getMethodResourceName(request);
Entry interfaceEntry = null;
Entry methodEntry = null;
try {
interfaceEntry = SphU.entry(interfaceResourceName, ResourceTypeConstants.COMMON_RPC, EntryType.OUT);
methodEntry = SphU.entry(methodResourceName, ResourceTypeConstants.COMMON_RPC,
EntryType.OUT, getMethodArguments(request));
SofaResponse response = invoker.invoke(request);
traceResponseException(response, interfaceEntry, methodEntry);
return response;
} catch (BlockException e) {
return SofaRpcFallbackRegistry.getConsumerFallback().handle(invoker, request, e);
} catch (Throwable t) {
throw traceOtherException(t, interfaceEntry, methodEntry);
} finally {
if (methodEntry != null) {
methodEntry.exit(1, getMethodArguments(request));
}
if (interfaceEntry != null) {
interfaceEntry.exit();
}
}
} | @Test
public void testInvokeSentinelWorks() {
SentinelSofaRpcConsumerFilter filter = new SentinelSofaRpcConsumerFilter();
final String interfaceResourceName = "com.alibaba.csp.sentinel.adapter.sofa.rpc.service.DemoService";
final String methodResourceName = "com.alibaba.csp.sentinel.adapter.sofa.rpc.service.DemoService#sayHello(java.lang.String,int)";
SofaRequest request = mock(SofaRequest.class);
when(request.getInvokeType()).thenReturn(RpcConstants.INVOKER_TYPE_SYNC);
when(request.getInterfaceName()).thenReturn(interfaceResourceName);
when(request.getMethodName()).thenReturn("sayHello");
when(request.getMethodArgSigs()).thenReturn(new String[]{"java.lang.String", "int"});
when(request.getMethodArgs()).thenReturn(new Object[]{"Sentinel", 2020});
FilterInvoker filterInvoker = mock(FilterInvoker.class);
when(filterInvoker.invoke(request)).thenAnswer(new Answer<SofaResponse>() {
@Override
public SofaResponse answer(InvocationOnMock invocationOnMock) throws Throwable {
verifyInvocationStructure(interfaceResourceName, methodResourceName);
SofaResponse response = new SofaResponse();
response.setAppResponse("Hello Sentinel 2020");
return response;
}
});
// Before invoke
assertNull(ContextUtil.getContext());
// Do invoke
SofaResponse response = filter.invoke(filterInvoker, request);
assertEquals("Hello Sentinel 2020", response.getAppResponse());
verify(filterInvoker).invoke(request);
// After invoke, make sure exit context
assertNull(ContextUtil.getContext());
} |
@Override
public void execute(ComputationStep.Context context) {
for (TriggerViewRefreshDelegate triggerViewRefreshDelegate : this.triggerViewRefreshDelegates) {
OptionalInt count = triggerViewRefreshDelegate.triggerFrom(analysisMetadata.getProject());
count.ifPresent(i -> context.getStatistics().add("refreshes" + triggerViewRefreshDelegate.getQualifier(), i));
}
} | @Test
public void execute_has_no_effect_if_constructor_without_delegate() {
TriggerViewRefreshStep underTest = new TriggerViewRefreshStep(analysisMetadataHolder);
underTest.execute(new TestComputationStepContext());
verifyNoInteractions(analysisMetadataHolder);
} |
public static List<TypedExpression> coerceCorrectConstructorArguments(
final Class<?> type,
List<TypedExpression> arguments,
List<Integer> emptyCollectionArgumentsIndexes) {
Objects.requireNonNull(type, "Type parameter cannot be null as the method searches constructors from that class!");
Objects.requireNonNull(arguments, "Arguments parameter cannot be null! Use an empty list instance if needed instead.");
Objects.requireNonNull(emptyCollectionArgumentsIndexes, "EmptyListArgumentIndexes parameter cannot be null! Use an empty list instance if needed instead.");
if (emptyCollectionArgumentsIndexes.size() > arguments.size()) {
throw new IllegalArgumentException("There cannot be more empty collection arguments than all arguments! emptyCollectionArgumentsIndexes parameter has more items than arguments parameter. "
+ "(" + emptyCollectionArgumentsIndexes.size() + " > " + arguments.size() + ")");
}
// Rather work only with the argumentsType and when a method is resolved, flip the arguments list based on it.
final List<TypedExpression> coercedArgumentsTypesList = new ArrayList<>(arguments);
Constructor<?> constructor = resolveConstructor(type, coercedArgumentsTypesList);
if (constructor != null) {
return coercedArgumentsTypesList;
} else {
// This needs to go through all possible combinations.
final int indexesListSize = emptyCollectionArgumentsIndexes.size();
for (int numberOfProcessedIndexes = 0; numberOfProcessedIndexes < indexesListSize; numberOfProcessedIndexes++) {
for (int indexOfEmptyListIndex = numberOfProcessedIndexes; indexOfEmptyListIndex < indexesListSize; indexOfEmptyListIndex++) {
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex));
constructor = resolveConstructor(type, coercedArgumentsTypesList);
if (constructor != null) {
return coercedArgumentsTypesList;
}
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex));
}
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(numberOfProcessedIndexes));
}
// No constructor found, return the original arguments.
return arguments;
}
} | @Test
public void coerceCorrectConstructorArgumentsIsNotCollectionAtIndex() {
final List<TypedExpression> arguments = List.of(new IntegerLiteralExpressionT(new IntegerLiteralExpr("12")));
Assertions.assertThatThrownBy(
() -> MethodResolutionUtils.coerceCorrectConstructorArguments(
Person.class,
arguments,
List.of(0)))
.isInstanceOf(IllegalArgumentException.class);
} |
public void printKsqlEntityList(final List<KsqlEntity> entityList) {
switch (outputFormat) {
case JSON:
printAsJson(entityList);
break;
case TABULAR:
final boolean showStatements = entityList.size() > 1;
for (final KsqlEntity ksqlEntity : entityList) {
writer().println();
if (showStatements) {
writer().println(ksqlEntity.getStatementText());
}
printAsTable(ksqlEntity);
}
break;
default:
throw new RuntimeException(String.format(
"Unexpected output format: '%s'",
outputFormat.name()
));
}
} | @Test
public void shouldPrintAssertTopicResult() {
// Given:
final KsqlEntityList entities = new KsqlEntityList(ImmutableList.of(
new AssertTopicEntity("statement", "name", true)
));
// When:
console.printKsqlEntityList(entities);
// Then:
final String output = terminal.getOutputString();
Approvals.verify(output, approvalOptions);
} |
public int getIssueCount() throws IOException {
Request request = new Request.Builder().url(url + "/issues.json").build();
Response response = client.newCall(request).execute();
JSONObject jsonObject = new JSONObject(response.body().string());
return jsonObject.getInt("total_count");
} | @Test
public void canGetIssueCount() throws Exception {
RedmineClient redmineClient = new RedmineClient(redmineContainer.getRedmineUrl());
assertThat(redmineClient.getIssueCount()).as("The issue count can be retrieved.").isZero();
} |
public static <K, V> Read<K, V> read() {
return new AutoValue_KafkaIO_Read.Builder<K, V>()
.setTopics(new ArrayList<>())
.setTopicPartitions(new ArrayList<>())
.setConsumerFactoryFn(KafkaIOUtils.KAFKA_CONSUMER_FACTORY_FN)
.setConsumerConfig(KafkaIOUtils.DEFAULT_CONSUMER_PROPERTIES)
.setMaxNumRecords(Long.MAX_VALUE)
.setCommitOffsetsInFinalizeEnabled(false)
.setDynamicRead(false)
.setTimestampPolicyFactory(TimestampPolicyFactory.withProcessingTime())
.setConsumerPollingTimeout(2L)
.setRedistributed(false)
.setAllowDuplicates(false)
.setRedistributeNumKeys(0)
.build();
} | @Test(expected = IllegalStateException.class)
public void testWithInvalidConsumerPollingTimeout() {
KafkaIO.<Integer, Long>read().withConsumerPollingTimeout(-5L);
} |
public void parse(File xmlFile) throws XMLStreamException {
FileInputStream input = null;
try {
input = new FileInputStream(xmlFile);
parse(input);
} catch (FileNotFoundException e) {
throw new XMLStreamException(e);
} finally {
IOUtils.closeQuietly(input);
}
} | @Test
public void testXMLWithXSD() throws XMLStreamException {
StaxParser parser = new StaxParser(getTestHandler());
parser.parse(getClass().getClassLoader().getResourceAsStream("org/sonar/scanner/genericcoverage/xml-xsd-test.xml"));
} |
public static String buildPluginParentPath() {
return String.join(PATH_SEPARATOR, PLUGIN_PARENT);
} | @Test
public void testBuildPluginParentPath() {
String pluginParentPath = DefaultPathConstants.buildPluginParentPath();
assertThat(pluginParentPath, notNullValue());
assertThat(PLUGIN_PARENT, equalTo(pluginParentPath));
} |
@Override
public String getPrefix() {
return String.format("%s.%s", GoogleStorageProtocol.class.getPackage().getName(), "GoogleStorage");
} | @Test
public void testPrefix() {
assertEquals("ch.cyberduck.core.googlestorage.GoogleStorage", new GoogleStorageProtocol().getPrefix());
} |
@Override
protected int getDefaultPort() {
return FTP.DEFAULT_PORT;
} | @Test
public void testFTPDefaultPort() throws Exception {
FTPFileSystem ftp = new FTPFileSystem();
assertEquals(FTP.DEFAULT_PORT, ftp.getDefaultPort());
} |
@Override
public synchronized KafkaMessageBatch fetchMessages(StreamPartitionMsgOffset startMsgOffset, int timeoutMs) {
long startOffset = ((LongMsgOffset) startMsgOffset).getOffset();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Polling partition: {}, startOffset: {}, timeout: {}ms", _topicPartition, startOffset, timeoutMs);
}
if (_lastFetchedOffset < 0 || _lastFetchedOffset != startOffset - 1) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Seeking to offset: {}", startOffset);
}
_consumer.seek(_topicPartition, startOffset);
}
ConsumerRecords<String, Bytes> consumerRecords = _consumer.poll(Duration.ofMillis(timeoutMs));
List<ConsumerRecord<String, Bytes>> records = consumerRecords.records(_topicPartition);
List<BytesStreamMessage> filteredRecords = new ArrayList<>(records.size());
long firstOffset = -1;
long offsetOfNextBatch = startOffset;
StreamMessageMetadata lastMessageMetadata = null;
if (!records.isEmpty()) {
firstOffset = records.get(0).offset();
_lastFetchedOffset = records.get(records.size() - 1).offset();
offsetOfNextBatch = _lastFetchedOffset + 1;
for (ConsumerRecord<String, Bytes> record : records) {
StreamMessageMetadata messageMetadata = extractMessageMetadata(record);
Bytes message = record.value();
if (message != null) {
String key = record.key();
byte[] keyBytes = key != null ? key.getBytes(StandardCharsets.UTF_8) : null;
filteredRecords.add(new BytesStreamMessage(keyBytes, message.get(), messageMetadata));
} else if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Tombstone message at offset: {}", record.offset());
}
lastMessageMetadata = messageMetadata;
}
}
return new KafkaMessageBatch(filteredRecords, records.size(), offsetOfNextBatch, firstOffset, lastMessageMetadata,
firstOffset > startOffset);
} | @Test
public void testOffsetsExpired()
throws TimeoutException {
Map<String, String> streamConfigMap = new HashMap<>();
streamConfigMap.put("streamType", "kafka");
streamConfigMap.put("stream.kafka.topic.name", TEST_TOPIC_3);
streamConfigMap.put("stream.kafka.broker.list", _kafkaBrokerAddress);
streamConfigMap.put("stream.kafka.consumer.type", "lowlevel");
streamConfigMap.put("stream.kafka.consumer.factory.class.name", getKafkaConsumerFactoryName());
streamConfigMap.put("stream.kafka.decoder.class.name", "decoderClass");
streamConfigMap.put("auto.offset.reset", "earliest");
StreamConfig streamConfig = new StreamConfig("tableName_REALTIME", streamConfigMap);
StreamConsumerFactory streamConsumerFactory = StreamConsumerFactoryProvider.create(streamConfig);
PartitionGroupConsumer consumer = streamConsumerFactory.createPartitionGroupConsumer("clientId",
new PartitionGroupConsumptionStatus(0, 0, new LongMsgOffset(0),
new LongMsgOffset(NUM_MSG_PRODUCED_PER_PARTITION), "CONSUMING"));
// Start offset has expired. Automatically reset to earliest available and fetch whatever available
MessageBatch messageBatch = consumer.fetchMessages(new LongMsgOffset(0), 10000);
assertEquals(messageBatch.getMessageCount(), 500);
assertEquals(messageBatch.getUnfilteredMessageCount(), 500);
for (int i = 0; i < 500; i++) {
assertEquals(new String((byte[]) messageBatch.getStreamMessage(i).getValue()), "sample_msg_" + (200 + i));
}
assertEquals(messageBatch.getOffsetOfNextBatch().toString(), "700");
} |
@Override public HashSlotCursor12byteKey cursor() {
return new CursorIntKey2();
} | @Test
@RequireAssertEnabled
public void testCursor_advance_afterAdvanceReturnsFalse() {
insert(randomKey(), randomKey());
HashSlotCursor12byteKey cursor = hsa.cursor();
cursor.advance();
cursor.advance();
assertThrows(AssertionError.class, cursor::advance);
} |
public InetAddress getAddress() {
return inetSocketAddress.getAddress();
} | @Test
public void testGetAddress() throws Exception {
final InetSocketAddress inetSocketAddress = new InetSocketAddress(Inet4Address.getLoopbackAddress(), 12345);
final ResolvableInetSocketAddress address = new ResolvableInetSocketAddress(inetSocketAddress);
assertThat(address.getAddress()).isEqualTo(inetSocketAddress.getAddress());
} |
@Subscribe
public void onChatMessage(ChatMessage event)
{
if (event.getType() == ChatMessageType.GAMEMESSAGE || event.getType() == ChatMessageType.SPAM)
{
String message = Text.removeTags(event.getMessage());
Matcher dodgyCheckMatcher = DODGY_CHECK_PATTERN.matcher(message);
Matcher dodgyProtectMatcher = DODGY_PROTECT_PATTERN.matcher(message);
Matcher dodgyBreakMatcher = DODGY_BREAK_PATTERN.matcher(message);
Matcher bindingNecklaceCheckMatcher = BINDING_CHECK_PATTERN.matcher(message);
Matcher bindingNecklaceUsedMatcher = BINDING_USED_PATTERN.matcher(message);
Matcher ringOfForgingCheckMatcher = RING_OF_FORGING_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryCheckMatcher = AMULET_OF_CHEMISTRY_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryUsedMatcher = AMULET_OF_CHEMISTRY_USED_PATTERN.matcher(message);
Matcher amuletOfChemistryBreakMatcher = AMULET_OF_CHEMISTRY_BREAK_PATTERN.matcher(message);
Matcher amuletOfBountyCheckMatcher = AMULET_OF_BOUNTY_CHECK_PATTERN.matcher(message);
Matcher amuletOfBountyUsedMatcher = AMULET_OF_BOUNTY_USED_PATTERN.matcher(message);
Matcher chronicleAddMatcher = CHRONICLE_ADD_PATTERN.matcher(message);
Matcher chronicleUseAndCheckMatcher = CHRONICLE_USE_AND_CHECK_PATTERN.matcher(message);
Matcher slaughterActivateMatcher = BRACELET_OF_SLAUGHTER_ACTIVATE_PATTERN.matcher(message);
Matcher slaughterCheckMatcher = BRACELET_OF_SLAUGHTER_CHECK_PATTERN.matcher(message);
Matcher expeditiousActivateMatcher = EXPEDITIOUS_BRACELET_ACTIVATE_PATTERN.matcher(message);
Matcher expeditiousCheckMatcher = EXPEDITIOUS_BRACELET_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceCheckMatcher = BLOOD_ESSENCE_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceExtractMatcher = BLOOD_ESSENCE_EXTRACT_PATTERN.matcher(message);
Matcher braceletOfClayCheckMatcher = BRACELET_OF_CLAY_CHECK_PATTERN.matcher(message);
if (message.contains(RING_OF_RECOIL_BREAK_MESSAGE))
{
notifier.notify(config.recoilNotification(), "Your Ring of Recoil has shattered");
}
else if (dodgyBreakMatcher.find())
{
notifier.notify(config.dodgyNotification(), "Your dodgy necklace has crumbled to dust.");
updateDodgyNecklaceCharges(MAX_DODGY_CHARGES);
}
else if (dodgyCheckMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyCheckMatcher.group(1)));
}
else if (dodgyProtectMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyProtectMatcher.group(1)));
}
else if (amuletOfChemistryCheckMatcher.find())
{
updateAmuletOfChemistryCharges(Integer.parseInt(amuletOfChemistryCheckMatcher.group(1)));
}
else if (amuletOfChemistryUsedMatcher.find())
{
final String match = amuletOfChemistryUsedMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateAmuletOfChemistryCharges(charges);
}
else if (amuletOfChemistryBreakMatcher.find())
{
notifier.notify(config.amuletOfChemistryNotification(), "Your amulet of chemistry has crumbled to dust.");
updateAmuletOfChemistryCharges(MAX_AMULET_OF_CHEMISTRY_CHARGES);
}
else if (amuletOfBountyCheckMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyCheckMatcher.group(1)));
}
else if (amuletOfBountyUsedMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyUsedMatcher.group(1)));
}
else if (message.equals(AMULET_OF_BOUNTY_BREAK_TEXT))
{
updateAmuletOfBountyCharges(MAX_AMULET_OF_BOUNTY_CHARGES);
}
else if (message.contains(BINDING_BREAK_TEXT))
{
notifier.notify(config.bindingNotification(), BINDING_BREAK_TEXT);
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateBindingNecklaceCharges(MAX_BINDING_CHARGES + 1);
}
else if (bindingNecklaceUsedMatcher.find())
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
if (equipment.contains(ItemID.BINDING_NECKLACE))
{
updateBindingNecklaceCharges(getItemCharges(ItemChargeConfig.KEY_BINDING_NECKLACE) - 1);
}
}
else if (bindingNecklaceCheckMatcher.find())
{
final String match = bindingNecklaceCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateBindingNecklaceCharges(charges);
}
else if (ringOfForgingCheckMatcher.find())
{
final String match = ringOfForgingCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateRingOfForgingCharges(charges);
}
else if (message.equals(RING_OF_FORGING_USED_TEXT) || message.equals(RING_OF_FORGING_VARROCK_PLATEBODY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player smelted with a Ring of Forging equipped.
if (equipment == null)
{
return;
}
if (equipment.contains(ItemID.RING_OF_FORGING) && (message.equals(RING_OF_FORGING_USED_TEXT) || inventory.count(ItemID.IRON_ORE) > 1))
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_RING_OF_FORGING) - 1, 0, MAX_RING_OF_FORGING_CHARGES);
updateRingOfForgingCharges(charges);
}
}
else if (message.equals(RING_OF_FORGING_BREAK_TEXT))
{
notifier.notify(config.ringOfForgingNotification(), "Your ring of forging has melted.");
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateRingOfForgingCharges(MAX_RING_OF_FORGING_CHARGES + 1);
}
else if (chronicleAddMatcher.find())
{
final String match = chronicleAddMatcher.group(1);
if (match.equals("one"))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(match));
}
}
else if (chronicleUseAndCheckMatcher.find())
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(chronicleUseAndCheckMatcher.group(1)));
}
else if (message.equals(CHRONICLE_ONE_CHARGE_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else if (message.equals(CHRONICLE_EMPTY_TEXT) || message.equals(CHRONICLE_NO_CHARGES_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 0);
}
else if (message.equals(CHRONICLE_FULL_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1000);
}
else if (slaughterActivateMatcher.find())
{
final String found = slaughterActivateMatcher.group(1);
if (found == null)
{
updateBraceletOfSlaughterCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.slaughterNotification(), BRACELET_OF_SLAUGHTER_BREAK_TEXT);
}
else
{
updateBraceletOfSlaughterCharges(Integer.parseInt(found));
}
}
else if (slaughterCheckMatcher.find())
{
updateBraceletOfSlaughterCharges(Integer.parseInt(slaughterCheckMatcher.group(1)));
}
else if (expeditiousActivateMatcher.find())
{
final String found = expeditiousActivateMatcher.group(1);
if (found == null)
{
updateExpeditiousBraceletCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.expeditiousNotification(), EXPEDITIOUS_BRACELET_BREAK_TEXT);
}
else
{
updateExpeditiousBraceletCharges(Integer.parseInt(found));
}
}
else if (expeditiousCheckMatcher.find())
{
updateExpeditiousBraceletCharges(Integer.parseInt(expeditiousCheckMatcher.group(1)));
}
else if (bloodEssenceCheckMatcher.find())
{
updateBloodEssenceCharges(Integer.parseInt(bloodEssenceCheckMatcher.group(1)));
}
else if (bloodEssenceExtractMatcher.find())
{
updateBloodEssenceCharges(getItemCharges(ItemChargeConfig.KEY_BLOOD_ESSENCE) - Integer.parseInt(bloodEssenceExtractMatcher.group(1)));
}
else if (message.contains(BLOOD_ESSENCE_ACTIVATE_TEXT))
{
updateBloodEssenceCharges(MAX_BLOOD_ESSENCE_CHARGES);
}
else if (braceletOfClayCheckMatcher.find())
{
updateBraceletOfClayCharges(Integer.parseInt(braceletOfClayCheckMatcher.group(1)));
}
else if (message.equals(BRACELET_OF_CLAY_USE_TEXT) || message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN))
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player mined with a Bracelet of Clay equipped.
if (equipment != null && equipment.contains(ItemID.BRACELET_OF_CLAY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
// Charge is not used if only 1 inventory slot is available when mining in Prifddinas
boolean ignore = inventory != null
&& inventory.count() == 27
&& message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN);
if (!ignore)
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_BRACELET_OF_CLAY) - 1, 0, MAX_BRACELET_OF_CLAY_CHARGES);
updateBraceletOfClayCharges(charges);
}
}
}
else if (message.equals(BRACELET_OF_CLAY_BREAK_TEXT))
{
notifier.notify(config.braceletOfClayNotification(), "Your bracelet of clay has crumbled to dust");
updateBraceletOfClayCharges(MAX_BRACELET_OF_CLAY_CHARGES);
}
}
} | @Test
public void testDodgyProtect1()
{
ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.GAMEMESSAGE, "", PROTECT_1, "", 0);
itemChargePlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration(ItemChargeConfig.GROUP, ItemChargeConfig.KEY_DODGY_NECKLACE, 1);
} |
public static ADXSinkConfig load(String yamlFile) throws IOException {
ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
return mapper.readValue(new File(yamlFile), ADXSinkConfig.class);
} | @Test
public final void loadFromMapTest() throws IOException {
Map<String, Object> map = getConfig();
SinkContext sinkContext = Mockito.mock(SinkContext.class);
ADXSinkConfig config = ADXSinkConfig.load(map, sinkContext);
assertNotNull(config);
assertEquals(config.getClusterUrl(), "https://somecluster.eastus.kusto.windows.net");
assertEquals(config.getDatabase(), "somedb");
assertEquals(config.getTable(), "tableName");
assertEquals(config.getAppId(), "xxxx-xxxx-xxxx-xxxx");
assertEquals(config.getAppKey(), "xxxx-xxxx-xxxx-xxxx");
assertEquals(config.getTenantId(), "xxxx-xxxx-xxxx-xxxx");
assertEquals(config.getManagedIdentityId(), "xxxx-some-id-xxxx OR empty string");
assertEquals(config.getMappingRefName(), "mapping ref name");
assertEquals(config.getMappingRefType(), "CSV");
assertFalse(config.isFlushImmediately());
assertEquals(config.getBatchSize(), 100);
assertEquals(config.getBatchTimeMs(), 10000);
} |
@Override
public Deserializer deserializer(String topic, Target type) {
return new Deserializer() {
@SneakyThrows
@Override
public DeserializeResult deserialize(RecordHeaders headers, byte[] data) {
try {
UnknownFieldSet unknownFields = UnknownFieldSet.parseFrom(data);
return new DeserializeResult(unknownFields.toString(), DeserializeResult.Type.STRING, Map.of());
} catch (Exception e) {
throw new ValidationException(e.getMessage());
}
}
};
} | @Test
void deserializeInvalidMessage() {
var deserializer = serde.deserializer(DUMMY_TOPIC, Serde.Target.VALUE);
assertThatThrownBy(() -> deserializer.deserialize(null, new byte[] { 1, 2, 3 }))
.isInstanceOf(ValidationException.class)
.hasMessageContaining("Protocol message contained an invalid tag");
} |
protected static void configureMulticastSocket(MulticastSocket multicastSocket, Address bindAddress,
HazelcastProperties hzProperties, MulticastConfig multicastConfig, ILogger logger)
throws SocketException, IOException, UnknownHostException {
multicastSocket.setReuseAddress(true);
// bind to receive interface
multicastSocket.bind(new InetSocketAddress(multicastConfig.getMulticastPort()));
multicastSocket.setTimeToLive(multicastConfig.getMulticastTimeToLive());
try {
boolean loopbackBind = bindAddress.getInetAddress().isLoopbackAddress();
Boolean loopbackModeEnabled = multicastConfig.getLoopbackModeEnabled();
if (loopbackModeEnabled != null) {
// setting loopbackmode is just a hint - and the argument means "disable"!
// to check the real value we call getLoopbackMode() (and again - return value means "disabled")
multicastSocket.setLoopbackMode(!loopbackModeEnabled);
}
// If LoopBack mode is not enabled (i.e. getLoopbackMode return true) and bind address is a loopback one,
// then print a warning
if (loopbackBind && multicastSocket.getLoopbackMode()) {
logger.warning("Hazelcast is bound to " + bindAddress.getHost() + " and loop-back mode is "
+ "disabled. This could cause multicast auto-discovery issues "
+ "and render it unable to work. Check your network connectivity, try to enable the "
+ "loopback mode and/or force -Djava.net.preferIPv4Stack=true on your JVM.");
}
// warning: before modifying lines below, take a look at these links:
// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4417033
// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6402758
// https://github.com/hazelcast/hazelcast/pull/19251#issuecomment-891375270
boolean callSetInterface = OS.isMac() || !loopbackBind;
String propSetInterface = hzProperties.getString(ClusterProperty.MULTICAST_SOCKET_SET_INTERFACE);
if (propSetInterface != null) {
callSetInterface = Boolean.parseBoolean(propSetInterface);
}
if (callSetInterface) {
multicastSocket.setInterface(bindAddress.getInetAddress());
}
} catch (Exception e) {
logger.warning(e);
}
multicastSocket.setReceiveBufferSize(SOCKET_BUFFER_SIZE);
multicastSocket.setSendBufferSize(SOCKET_BUFFER_SIZE);
String multicastGroup = hzProperties.getString(ClusterProperty.MULTICAST_GROUP);
if (multicastGroup == null) {
multicastGroup = multicastConfig.getMulticastGroup();
}
multicastConfig.setMulticastGroup(multicastGroup);
multicastSocket.joinGroup(InetAddress.getByName(multicastGroup));
multicastSocket.setSoTimeout(SOCKET_TIMEOUT);
} | @Test
public void testSetInterfaceDefaultWhenNonLoopbackAddrAndLoopbackMode() throws Exception {
Config config = createConfig(null);
MulticastConfig multicastConfig = config.getNetworkConfig().getJoin().getMulticastConfig();
multicastConfig.setLoopbackModeEnabled(true);
MulticastSocket multicastSocket = mock(MulticastSocket.class);
Address address = new Address("10.0.0.2", 5701);
HazelcastProperties hzProperties = new HazelcastProperties(config);
MulticastService.configureMulticastSocket(multicastSocket, address, hzProperties , multicastConfig, mock(ILogger.class));
verify(multicastSocket).setInterface(address.getInetAddress());
verify(multicastSocket).setLoopbackMode(false);
} |
@Override
public void addInterface(VplsData vplsData, Interface iface) {
requireNonNull(vplsData);
requireNonNull(iface);
VplsData newData = VplsData.of(vplsData);
newData.addInterface(iface);
updateVplsStatus(newData, VplsData.VplsState.UPDATING);
} | @Test
public void testAddInterface() {
VplsData vplsData = vplsManager.createVpls(VPLS1, NONE);
vplsManager.addInterface(vplsData, V100H1);
vplsManager.addInterface(vplsData, V100H2);
vplsData = vplsStore.getVpls(VPLS1);
assertNotNull(vplsData);
assertEquals(vplsData.state(), UPDATING);
assertEquals(2, vplsData.interfaces().size());
assertTrue(vplsData.interfaces().contains(V100H1));
assertTrue(vplsData.interfaces().contains(V100H2));
} |
@Override
public void execute(GraphModel graphModel) {
Graph graph = graphModel.getGraphVisible();
execute(graph);
} | @Test
public void testColumnCreation() {
GraphModel graphModel = GraphGenerator.generateNullUndirectedGraph(1);
WeightedDegree d = new WeightedDegree();
d.execute(graphModel);
Assert.assertTrue(graphModel.getNodeTable().hasColumn(WeightedDegree.WDEGREE));
} |
@SuppressWarnings("unchecked")
@Override
public boolean canHandleReturnType(Class returnType) {
return rxSupportedTypes.stream()
.anyMatch(classType -> classType.isAssignableFrom(returnType));
} | @Test
public void testCheckTypes() {
assertThat(rxJava2RetryAspectExt.canHandleReturnType(Flowable.class)).isTrue();
assertThat(rxJava2RetryAspectExt.canHandleReturnType(Single.class)).isTrue();
} |
@Override
@SuppressWarnings("DuplicatedCode")
public Integer cleanJobLog(Integer exceedDay, Integer deleteLimit) {
int count = 0;
LocalDateTime expireDate = LocalDateTime.now().minusDays(exceedDay);
// 循环删除,直到没有满足条件的数据
for (int i = 0; i < Short.MAX_VALUE; i++) {
int deleteCount = jobLogMapper.deleteByCreateTimeLt(expireDate, deleteLimit);
count += deleteCount;
// 达到删除预期条数,说明到底了
if (deleteCount < deleteLimit) {
break;
}
}
return count;
} | @Test
public void testCleanJobLog() {
// mock 数据
JobLogDO log01 = randomPojo(JobLogDO.class, o -> o.setCreateTime(addTime(Duration.ofDays(-3))))
.setExecuteIndex(1);
jobLogMapper.insert(log01);
JobLogDO log02 = randomPojo(JobLogDO.class, o -> o.setCreateTime(addTime(Duration.ofDays(-1))))
.setExecuteIndex(1);
jobLogMapper.insert(log02);
// 准备参数
Integer exceedDay = 2;
Integer deleteLimit = 1;
// 调用
Integer count = jobLogService.cleanJobLog(exceedDay, deleteLimit);
// 断言
assertEquals(1, count);
List<JobLogDO> logs = jobLogMapper.selectList();
assertEquals(1, logs.size());
assertEquals(log02, logs.get(0));
} |
@Override
public <T extends State> T state(StateNamespace namespace, StateTag<T> address) {
return workItemState.get(namespace, address, StateContexts.nullContext());
} | @Test
public void testNewWatermarkNoFetch() throws Exception {
StateTag<WatermarkHoldState> addr =
StateTags.watermarkStateInternal("watermark", TimestampCombiner.EARLIEST);
WatermarkHoldState bag = underTestNewKey.state(NAMESPACE, addr);
assertThat(bag.read(), Matchers.nullValue());
// Shouldn't need to read from windmill for this.
Mockito.verifyZeroInteractions(mockReader);
} |
public T send() throws IOException {
return web3jService.send(this, responseType);
} | @Test
public void testShhGetMessages() throws Exception {
web3j.shhGetMessages(Numeric.toBigInt("0x7")).send();
verifyResult(
"{\"jsonrpc\":\"2.0\",\"method\":\"shh_getMessages\","
+ "\"params\":[\"0x7\"],\"id\":1}");
} |
public PullRequestStateProducer(GitHubEndpoint endpoint) throws Exception {
super(endpoint);
Registry registry = endpoint.getCamelContext().getRegistry();
Object service = registry.lookupByName(GitHubConstants.GITHUB_COMMIT_SERVICE);
if (service != null) {
LOG.debug("Using CommitService found in registry {}", service.getClass().getCanonicalName());
commitService = (CommitService) service;
} else {
commitService = new CommitService();
}
initService(commitService);
state = endpoint.getState();
targetUrl = endpoint.getTargetUrl();
} | @Test
public void testPullRequestStateProducer() {
commitsha = commitService.getNextSha();
Endpoint stateProducerEndpoint = getMandatoryEndpoint("direct:validPullRequest");
Exchange exchange = stateProducerEndpoint.createExchange();
String text = "Message sent at " + new Date();
exchange.getIn().setBody(text);
Exchange response = template.send(stateProducerEndpoint, exchange);
assertNotNull(response.getMessage().getBody());
if (!(response.getMessage().getBody() instanceof CommitStatus)) {
fail("Expecting CommitStatus");
}
CommitStatus status = response.getMessage().getBody(CommitStatus.class);
// Check status set on commit service
if (commitService.getCommitStatus(commitsha) != status) {
fail("Commit status sent to service is different from response");
}
assertEquals("success", status.getState());
assertEquals(status.getDescription(), text);
} |
@Override
@TpsControl(pointName = "ConfigPublish")
@Secured(action = ActionTypes.WRITE, signType = SignType.CONFIG)
@ExtractorManager.Extractor(rpcExtractor = ConfigRequestParamExtractor.class)
public ConfigPublishResponse handle(ConfigPublishRequest request, RequestMeta meta) throws NacosException {
try {
String dataId = request.getDataId();
String group = request.getGroup();
String content = request.getContent();
final String tenant = request.getTenant();
final String srcIp = meta.getClientIp();
final String requestIpApp = request.getAdditionParam("requestIpApp");
final String tag = request.getAdditionParam("tag");
final String appName = request.getAdditionParam("appName");
final String type = request.getAdditionParam("type");
final String srcUser = request.getAdditionParam("src_user");
final String encryptedDataKey = request.getAdditionParam("encryptedDataKey");
// check tenant
ParamUtils.checkParam(dataId, group, "datumId", content);
ParamUtils.checkParam(tag);
Map<String, Object> configAdvanceInfo = new HashMap<>(10);
MapUtil.putIfValNoNull(configAdvanceInfo, "config_tags", request.getAdditionParam("config_tags"));
MapUtil.putIfValNoNull(configAdvanceInfo, "desc", request.getAdditionParam("desc"));
MapUtil.putIfValNoNull(configAdvanceInfo, "use", request.getAdditionParam("use"));
MapUtil.putIfValNoNull(configAdvanceInfo, "effect", request.getAdditionParam("effect"));
MapUtil.putIfValNoNull(configAdvanceInfo, "type", type);
MapUtil.putIfValNoNull(configAdvanceInfo, "schema", request.getAdditionParam("schema"));
ParamUtils.checkParam(configAdvanceInfo);
if (AggrWhitelist.isAggrDataId(dataId)) {
Loggers.REMOTE_DIGEST.warn("[aggr-conflict] {} attempt to publish single data, {}, {}", srcIp, dataId,
group);
throw new NacosException(NacosException.NO_RIGHT, "dataId:" + dataId + " is aggr");
}
ConfigInfo configInfo = new ConfigInfo(dataId, group, tenant, appName, content);
configInfo.setMd5(request.getCasMd5());
configInfo.setType(type);
configInfo.setEncryptedDataKey(encryptedDataKey);
String betaIps = request.getAdditionParam("betaIps");
ConfigOperateResult configOperateResult = null;
String persistEvent = ConfigTraceService.PERSISTENCE_EVENT;
if (StringUtils.isBlank(betaIps)) {
if (StringUtils.isBlank(tag)) {
if (StringUtils.isNotBlank(request.getCasMd5())) {
configOperateResult = configInfoPersistService.insertOrUpdateCas(srcIp, srcUser, configInfo,
configAdvanceInfo);
if (!configOperateResult.isSuccess()) {
return ConfigPublishResponse.buildFailResponse(ResponseCode.FAIL.getCode(),
"Cas publish fail,server md5 may have changed.");
}
} else {
configOperateResult = configInfoPersistService.insertOrUpdate(srcIp, srcUser, configInfo,
configAdvanceInfo);
}
ConfigChangePublisher.notifyConfigChange(new ConfigDataChangeEvent(false, dataId, group, tenant,
configOperateResult.getLastModified()));
} else {
if (StringUtils.isNotBlank(request.getCasMd5())) {
configOperateResult = configInfoTagPersistService.insertOrUpdateTagCas(configInfo, tag, srcIp,
srcUser);
if (!configOperateResult.isSuccess()) {
return ConfigPublishResponse.buildFailResponse(ResponseCode.FAIL.getCode(),
"Cas publish tag config fail,server md5 may have changed.");
}
} else {
configOperateResult = configInfoTagPersistService.insertOrUpdateTag(configInfo, tag, srcIp,
srcUser);
}
persistEvent = ConfigTraceService.PERSISTENCE_EVENT_TAG + "-" + tag;
ConfigChangePublisher.notifyConfigChange(
new ConfigDataChangeEvent(false, dataId, group, tenant, tag,
configOperateResult.getLastModified()));
}
} else {
// beta publish
if (StringUtils.isNotBlank(request.getCasMd5())) {
configOperateResult = configInfoBetaPersistService.insertOrUpdateBetaCas(configInfo, betaIps, srcIp,
srcUser);
if (!configOperateResult.isSuccess()) {
return ConfigPublishResponse.buildFailResponse(ResponseCode.FAIL.getCode(),
"Cas publish beta config fail,server md5 may have changed.");
}
} else {
configOperateResult = configInfoBetaPersistService.insertOrUpdateBeta(configInfo, betaIps, srcIp,
srcUser);
}
persistEvent = ConfigTraceService.PERSISTENCE_EVENT_BETA;
ConfigChangePublisher.notifyConfigChange(
new ConfigDataChangeEvent(true, dataId, group, tenant, configOperateResult.getLastModified()));
}
ConfigTraceService.logPersistenceEvent(dataId, group, tenant, requestIpApp,
configOperateResult.getLastModified(), srcIp, persistEvent, ConfigTraceService.PERSISTENCE_TYPE_PUB,
content);
return ConfigPublishResponse.buildSuccessResponse();
} catch (Exception e) {
Loggers.REMOTE_DIGEST.error("[ConfigPublishRequestHandler] publish config error ,request ={}", request, e);
return ConfigPublishResponse.buildFailResponse(
(e instanceof NacosException) ? ((NacosException) e).getErrCode() : ResponseCode.FAIL.getCode(),
e.getMessage());
}
} | @Test
void testBetaPublishNotCas() throws NacosException, InterruptedException {
String dataId = "testBetaPublish";
String group = "group";
String tenant = "tenant";
String content = "content";
ConfigPublishRequest configPublishRequest = new ConfigPublishRequest();
configPublishRequest.setDataId(dataId);
configPublishRequest.setGroup(group);
configPublishRequest.setTenant(tenant);
configPublishRequest.setContent(content);
Map<String, String> keyMap = new HashMap<>();
String srcUser = "src_user111";
keyMap.put("src_user", srcUser);
String betaIps = "127.0.0.1,127.0.0.2";
keyMap.put("betaIps", betaIps);
configPublishRequest.setAdditionMap(keyMap);
RequestMeta requestMeta = new RequestMeta();
String clientIp = "127.0.0.1";
requestMeta.setClientIp(clientIp);
AtomicReference<ConfigDataChangeEvent> reference = new AtomicReference<>();
NotifyCenter.registerSubscriber(new Subscriber() {
@Override
public void onEvent(Event event) {
ConfigDataChangeEvent event1 = (ConfigDataChangeEvent) event;
if (event1.dataId.equals(dataId)) {
reference.set((ConfigDataChangeEvent) event);
}
}
@Override
public Class<? extends Event> subscribeType() {
return ConfigDataChangeEvent.class;
}
});
ConfigOperateResult configOperateResult = new ConfigOperateResult(true);
long timestamp = System.currentTimeMillis();
long id = timestamp / 1000;
configOperateResult.setId(id);
configOperateResult.setLastModified(timestamp);
when(configInfoBetaPersistService.insertOrUpdateBeta(any(ConfigInfo.class), eq(betaIps), eq(requestMeta.getClientIp()),
eq(srcUser))).thenReturn(configOperateResult);
ConfigPublishResponse response = configPublishRequestHandler.handle(configPublishRequest, requestMeta);
assertEquals(ResponseCode.SUCCESS.getCode(), response.getResultCode());
Thread.sleep(500L);
assertTrue(reference.get() != null);
assertEquals(dataId, reference.get().dataId);
assertEquals(group, reference.get().group);
assertEquals(tenant, reference.get().tenant);
assertEquals(timestamp, reference.get().lastModifiedTs);
assertFalse(reference.get().isBatch);
assertTrue(reference.get().isBeta);
} |
public static boolean isNullOrBlank(final CharSequence cs) {
if (cs == null) {
return true;
}
for (int c : cs.chars().toArray()) {
if (!Character.isWhitespace(c)) {
return false;
}
}
return true;
} | @Test
public void testIsBlank() throws IOException {
assertTrue(StringUtils.isNullOrBlank(null));
assertTrue(StringUtils.isNullOrBlank(""));
assertTrue(StringUtils.isNullOrBlank(" "));
assertFalse(StringUtils.isNullOrBlank(" a "));
assertFalse(StringUtils.isNullOrBlank("abc"));
} |
@Override
public MapperResult findAllConfigInfoFragment(MapperContext context) {
String contextParameter = context.getContextParameter(ContextConstant.NEED_CONTENT);
boolean needContent = contextParameter != null && Boolean.parseBoolean(contextParameter);
return new MapperResult("SELECT id,data_id,group_id,tenant_id,app_name," + (needContent ? "content," : "")
+ "md5,gmt_modified,type FROM config_info WHERE id > ? " + "ORDER BY id ASC OFFSET "
+ context.getStartRow() + " ROWS FETCH NEXT " + context.getPageSize() + " ROWS ONLY",
CollectionUtils.list(context.getWhereParameter(FieldConstant.ID)));
} | @Test
void testFindAllConfigInfoFragment() {
//with content
context.putContextParameter(ContextConstant.NEED_CONTENT, "true");
MapperResult mapperResult = configInfoMapperByDerby.findAllConfigInfoFragment(context);
assertEquals(mapperResult.getSql(), "SELECT id,data_id,group_id,tenant_id,app_name,content,md5,gmt_modified,type FROM config_info "
+ "WHERE id > ? ORDER BY id ASC OFFSET " + startRow + " ROWS FETCH NEXT " + pageSize + " ROWS ONLY");
assertArrayEquals(new Object[] {id}, mapperResult.getParamList().toArray());
//with out content
context.putContextParameter(ContextConstant.NEED_CONTENT, "false");
MapperResult mapperResult2 = configInfoMapperByDerby.findAllConfigInfoFragment(context);
assertEquals(mapperResult2.getSql(), "SELECT id,data_id,group_id,tenant_id,app_name,md5,gmt_modified,type FROM config_info "
+ "WHERE id > ? ORDER BY id ASC OFFSET " + startRow + " ROWS FETCH NEXT " + pageSize + " ROWS ONLY");
assertArrayEquals(new Object[] {id}, mapperResult2.getParamList().toArray());
} |
@Override
public void getConfig(MetricsNodesConfig.Builder builder) {
builder.node.addAll(MetricsNodesConfigGenerator.generate(getContainers()));
} | @Test
void cluster_is_prepared_so_that_application_metadata_config_is_produced() {
VespaModel model = getModel(servicesWithAdminOnly(), self_hosted);
ApplicationMetadataConfig config = model.getConfig(ApplicationMetadataConfig.class, CLUSTER_CONFIG_ID);
assertEquals(MockApplicationPackage.APPLICATION_GENERATION, config.generation());
assertEquals(MockApplicationPackage.APPLICATION_NAME, config.name());
} |
public Map<String, Parameter> generateMergedStepParams(
WorkflowSummary workflowSummary,
Step stepDefinition,
StepRuntime stepRuntime,
StepRuntimeSummary runtimeSummary) {
Map<String, ParamDefinition> allParamDefs = new LinkedHashMap<>();
// Start with default step level params if present
Map<String, ParamDefinition> globalDefault = defaultParamManager.getDefaultStepParams();
if (globalDefault != null) {
ParamsMergeHelper.mergeParams(
allParamDefs,
globalDefault,
ParamsMergeHelper.MergeContext.stepCreate(ParamSource.SYSTEM_DEFAULT));
}
// Merge in injected params returned by step if present (template schema)
Map<String, ParamDefinition> injectedParams =
stepRuntime.injectRuntimeParams(workflowSummary, stepDefinition);
maybeOverrideParamType(allParamDefs);
if (injectedParams != null) {
maybeOverrideParamType(injectedParams);
ParamsMergeHelper.mergeParams(
allParamDefs,
injectedParams,
ParamsMergeHelper.MergeContext.stepCreate(ParamSource.TEMPLATE_SCHEMA));
}
// Merge in params applicable to step type
Optional<Map<String, ParamDefinition>> defaultStepTypeParams =
defaultParamManager.getDefaultParamsForType(stepDefinition.getType());
if (defaultStepTypeParams.isPresent()) {
LOG.debug("Merging step level default for {}", stepDefinition.getType());
ParamsMergeHelper.mergeParams(
allParamDefs,
defaultStepTypeParams.get(),
ParamsMergeHelper.MergeContext.stepCreate(ParamSource.SYSTEM_DEFAULT));
}
// Merge in workflow and step info
ParamsMergeHelper.mergeParams(
allParamDefs,
injectWorkflowAndStepInfoParams(workflowSummary, runtimeSummary),
ParamsMergeHelper.MergeContext.stepCreate(ParamSource.SYSTEM_INJECTED));
// merge step run param and user provided restart step run params
// first to get undefined params from both run param and restart params
Map<String, ParamDefinition> undefinedRestartParams = new LinkedHashMap<>();
Optional<Map<String, ParamDefinition>> stepRestartParams =
getUserStepRestartParam(workflowSummary, runtimeSummary);
stepRestartParams.ifPresent(undefinedRestartParams::putAll);
Optional<Map<String, ParamDefinition>> stepRunParams =
getStepRunParams(workflowSummary, runtimeSummary);
Map<String, ParamDefinition> systemInjectedRestartRunParams = new LinkedHashMap<>();
stepRunParams.ifPresent(
params -> {
params.forEach(
(key, val) -> {
if (runtimeSummary.getRestartConfig() != null
&& Constants.RESERVED_PARAM_NAMES.contains(key)
&& val.getMode() == ParamMode.CONSTANT
&& val.getSource() == ParamSource.SYSTEM_INJECTED) {
((AbstractParamDefinition) val)
.getMeta()
.put(Constants.METADATA_SOURCE_KEY, ParamSource.RESTART.name());
systemInjectedRestartRunParams.put(key, val);
}
});
systemInjectedRestartRunParams.keySet().forEach(params::remove);
});
stepRunParams.ifPresent(undefinedRestartParams::putAll);
Optional.ofNullable(stepDefinition.getParams())
.ifPresent(
stepDefParams ->
stepDefParams.keySet().stream()
.filter(undefinedRestartParams::containsKey)
.forEach(undefinedRestartParams::remove));
// Then merge undefined restart params
if (!undefinedRestartParams.isEmpty()) {
mergeUserProvidedStepParams(allParamDefs, undefinedRestartParams, workflowSummary);
}
// Final merge from step definition
if (stepDefinition.getParams() != null) {
maybeOverrideParamType(stepDefinition.getParams());
ParamsMergeHelper.mergeParams(
allParamDefs,
stepDefinition.getParams(),
ParamsMergeHelper.MergeContext.stepCreate(ParamSource.DEFINITION));
}
// merge step run params
stepRunParams.ifPresent(
stepParams -> mergeUserProvidedStepParams(allParamDefs, stepParams, workflowSummary));
// merge all user provided restart step run params
stepRestartParams.ifPresent(
stepParams -> mergeUserProvidedStepParams(allParamDefs, stepParams, workflowSummary));
// merge all system injected restart step run params with mode and source already set.
allParamDefs.putAll(systemInjectedRestartRunParams);
// Cleanup any params that are missing and convert to params
return ParamsMergeHelper.convertToParameters(ParamsMergeHelper.cleanupParams(allParamDefs));
} | @Test
public void testRestartConfigStepRunParamMergeOrder() {
((TypedStep) step)
.setParams(
twoItemMap(
"p1", ParamDefinition.buildParamDefinition("p1", "d1"),
"p2", ParamDefinition.buildParamDefinition("p2", "d2")));
Map<String, Map<String, ParamDefinition>> stepRunParams =
singletonMap(
"stepid",
twoItemMap(
"p2", ParamDefinition.buildParamDefinition("p2", "d3"),
"p3", ParamDefinition.buildParamDefinition("p3", "d4")));
Map<String, Map<String, ParamDefinition>> stepRestartParams =
singletonMap(
"stepid",
twoItemMap(
"p2", ParamDefinition.buildParamDefinition("p2", "d5"),
"pp", ParamDefinition.buildParamDefinition("pp", "dd")));
ManualInitiator manualInitiator = new ManualInitiator();
workflowSummary.setInitiator(manualInitiator);
workflowSummary.setStepRunParams(stepRunParams);
workflowSummary.setRestartConfig(
RestartConfig.builder()
.addRestartNode("sample-wf-map-params", 1, "foo")
.stepRestartParams(stepRestartParams)
.build());
workflowSummary.setRunPolicy(RunPolicy.RESTART_FROM_SPECIFIC);
Map<String, Parameter> stepParams =
paramsManager.generateMergedStepParams(workflowSummary, step, stepRuntime, runtimeSummary);
Assert.assertEquals("d1", stepParams.get("p1").asStringParam().getValue());
Assert.assertEquals("d5", stepParams.get("p2").asStringParam().getValue());
Assert.assertEquals("d4", stepParams.get("p3").asStringParam().getValue());
Assert.assertEquals("dd", stepParams.get("pp").asStringParam().getValue());
Assert.assertEquals(ParamSource.DEFINITION, stepParams.get("p1").getSource());
Assert.assertEquals(ParamSource.RESTART, stepParams.get("p2").getSource());
Assert.assertEquals(ParamSource.RESTART, stepParams.get("p3").getSource());
Assert.assertEquals(ParamSource.RESTART, stepParams.get("pp").getSource());
Assert.assertEquals(
Arrays.asList("pp", "p3", "p1", "p2"),
new ArrayList<>(stepParams.keySet()).subList(stepParams.size() - 4, stepParams.size()));
} |
public int size() {
return records.stream().mapToInt(r -> r.size()).sum();
} | @Test
public void emptyTreeShouldBeZeroSized() {
assertEquals(0, new SObjectTree().size());
} |
public Map<String, LdapContextFactory> getContextFactories() {
if (contextFactories == null) {
contextFactories = new LinkedHashMap<>();
String[] serverKeys = config.getStringArray(LDAP_SERVERS_PROPERTY);
if (serverKeys.length > 0) {
initMultiLdapConfiguration(serverKeys);
} else {
initSimpleLdapConfiguration();
}
}
return contextFactories;
} | @Test
public void testContextFactoriesWithMultipleLdap() {
LdapSettingsManager settingsManager = new LdapSettingsManager(
generateMultipleLdapSettingsWithUserAndGroupMapping().asConfig());
assertThat(settingsManager.getContextFactories()).hasSize(2);
// We do it twice to make sure the settings keep the same.
assertThat(settingsManager.getContextFactories()).hasSize(2);
} |
@Override
public void accept(Props props) {
if (isClusterEnabled(props)) {
checkClusterProperties(props);
}
} | @Test
public void accept_throws_MessageException_if_node_type_is_not_correct() {
TestAppSettings settings = new TestAppSettings(of(CLUSTER_ENABLED.getKey(), "true", CLUSTER_NODE_TYPE.getKey(), "bla"));
ClusterSettings clusterSettings = new ClusterSettings(network);
Props props = settings.getProps();
assertThatThrownBy(() -> clusterSettings.accept(props))
.isInstanceOf(MessageException.class)
.hasMessage("Invalid value for property sonar.cluster.node.type: [bla], only [application, search] are allowed");
} |
public CharacterPosition getLastNonWhitespacePosition()
{
return _lastNonWhitespacePosition;
} | @Test
public void testGetLastNonWhitespacePosition() throws IOException
{
LineColumnNumberWriter writer = new LineColumnNumberWriter(new StringWriter());
writer.write("123");
Assert.assertEquals(writer.getLastNonWhitespacePosition(), new LineColumnNumberWriter.CharacterPosition(1, 3));
writer.write("\n ");
Assert.assertEquals(writer.getLastNonWhitespacePosition(), new LineColumnNumberWriter.CharacterPosition(1, 3));
writer.write("4");
Assert.assertEquals(writer.getLastNonWhitespacePosition(), new LineColumnNumberWriter.CharacterPosition(2, 2));
} |
public static String getProperty(String propertyName, String envName) {
return System.getenv().getOrDefault(envName, System.getProperty(propertyName));
} | @Test
void getPropertyWithDefaultValue() {
String property = PropertyUtils.getProperty("nacos.test", "xx", "test001");
assertEquals("test001", property);
} |
public List<ChangeStreamRecord> toChangeStreamRecords(
PartitionMetadata partition,
ChangeStreamResultSet resultSet,
ChangeStreamResultSetMetadata resultSetMetadata) {
if (this.isPostgres()) {
// In PostgresQL, change stream records are returned as JsonB.
return Collections.singletonList(
toChangeStreamRecordJson(partition, resultSet.getPgJsonb(0), resultSetMetadata));
}
// In GoogleSQL, change stream records are returned as an array of structs.
return resultSet.getCurrentRowAsStruct().getStructList(0).stream()
.flatMap(struct -> toChangeStreamRecord(partition, struct, resultSetMetadata))
.collect(Collectors.toList());
} | @Test
public void testMappingUpdateStructRowToDataChangeRecord() {
final DataChangeRecord dataChangeRecord =
new DataChangeRecord(
"partitionToken",
Timestamp.ofTimeSecondsAndNanos(10L, 20),
"serverTransactionId",
true,
"1",
"tableName",
Arrays.asList(
new ColumnType("column1", new TypeCode("{\"code\":\"INT64\"}"), true, 1L),
new ColumnType("column2", new TypeCode("{\"code\":\"BYTES\"}"), false, 2L)),
Collections.singletonList(
new Mod(
"{\"column1\":\"value1\"}",
"{\"column2\":\"oldValue2\"}",
"{\"column2\":\"newValue2\"}")),
ModType.UPDATE,
ValueCaptureType.OLD_AND_NEW_VALUES,
10L,
2L,
"transactionTag",
true,
null);
final Struct jsonFieldsStruct = recordsToStructWithJson(dataChangeRecord);
ChangeStreamResultSet resultSet = mock(ChangeStreamResultSet.class);
when(resultSet.getCurrentRowAsStruct()).thenReturn(jsonFieldsStruct);
assertEquals(
Collections.singletonList(dataChangeRecord),
mapper.toChangeStreamRecords(partition, resultSet, resultSetMetadata));
} |
public static ColumnIndex build(
PrimitiveType type,
BoundaryOrder boundaryOrder,
List<Boolean> nullPages,
List<Long> nullCounts,
List<ByteBuffer> minValues,
List<ByteBuffer> maxValues) {
return build(type, boundaryOrder, nullPages, nullCounts, minValues, maxValues, null, null);
} | @Test
public void testStaticBuildDouble() {
ColumnIndex columnIndex = ColumnIndexBuilder.build(
Types.required(DOUBLE).named("test_double"),
BoundaryOrder.UNORDERED,
asList(false, false, false, false, false, false),
asList(0l, 1l, 2l, 3l, 4l, 5l),
toBBList(-1.0, -2.0, -3.0, -4.0, -5.0, -6.0),
toBBList(1.0, 2.0, 3.0, 4.0, 5.0, 6.0));
assertEquals(BoundaryOrder.UNORDERED, columnIndex.getBoundaryOrder());
assertCorrectNullCounts(columnIndex, 0, 1, 2, 3, 4, 5);
assertCorrectNullPages(columnIndex, false, false, false, false, false, false);
assertCorrectValues(columnIndex.getMaxValues(), 1.0, 2.0, 3.0, 4.0, 5.0, 6.0);
assertCorrectValues(columnIndex.getMinValues(), -1.0, -2.0, -3.0, -4.0, -5.0, -6.0);
} |
@Override
public String toString() {
StringBuilder b = new StringBuilder();
if (StringUtils.isNotBlank(protocol)) {
b.append(protocol);
b.append("://");
}
if (StringUtils.isNotBlank(host)) {
b.append(host);
}
if (!isPortDefault() && port != -1) {
b.append(':');
b.append(port);
}
if (StringUtils.isNotBlank(path)) {
// If no scheme/host/port, leave the path as is
if (b.length() > 0 && !path.startsWith("/")) {
b.append('/');
}
b.append(encodePath(path));
}
if (queryString != null && !queryString.isEmpty()) {
b.append(queryString.toString());
}
if (fragment != null) {
b.append("#");
b.append(encodePath(fragment));
}
return b.toString();
} | @Test
public void testNonHttpProtocolNoPort() {
s = "ftp://ftp.example.com/dir";
t = "ftp://ftp.example.com/dir";
assertEquals(t, new HttpURL(s).toString());
} |
public static MocoEventAction get(final String url, final HttpHeader... headers) {
return get(text(checkNotNullOrEmpty(url, "URL should not be null")),
checkNotNull(headers, "Headers should not be null"));
} | @Test
public void should_throw_exception_for_unknown_request() {
assertThrows(HttpResponseException.class, () ->
running(server, () -> assertThat(helper.get(root()), is("bar"))));
} |
@Override
protected void verifyConditions(ScesimModelDescriptor scesimModelDescriptor,
ScenarioRunnerData scenarioRunnerData,
ExpressionEvaluatorFactory expressionEvaluatorFactory,
Map<String, Object> requestContext) {
DMNResult dmnResult = (DMNResult) requestContext.get(DMNScenarioExecutableBuilder.DMN_RESULT);
List<DMNMessage> dmnMessages = dmnResult.getMessages();
for (ScenarioExpect output : scenarioRunnerData.getExpects()) {
FactIdentifier factIdentifier = output.getFactIdentifier();
String decisionName = factIdentifier.getName();
DMNDecisionResult decisionResult = dmnResult.getDecisionResultByName(decisionName);
if (decisionResult == null) {
throw new ScenarioException("DMN execution has not generated a decision result with name " + decisionName);
}
for (FactMappingValue expectedResult : output.getExpectedResult()) {
ExpressionIdentifier expressionIdentifier = expectedResult.getExpressionIdentifier();
FactMapping factMapping = scesimModelDescriptor.getFactMapping(factIdentifier, expressionIdentifier)
.orElseThrow(() -> new IllegalStateException("Wrong expression, this should not happen"));
ExpressionEvaluator expressionEvaluator = expressionEvaluatorFactory.getOrCreate(expectedResult);
ScenarioResult scenarioResult = fillResult(expectedResult,
() -> getSingleFactValueResult(factMapping,
expectedResult,
decisionResult,
dmnMessages,
expressionEvaluator),
expressionEvaluator);
scenarioRunnerData.addResult(scenarioResult);
}
}
} | @Test
public void verifyConditions_noDecisionGeneratedForSpecificName() {
// test 1 - no decision generated for specific decisionName
ScenarioRunnerData scenarioRunnerData = new ScenarioRunnerData();
scenarioRunnerData.addExpect(new ScenarioExpect(personFactIdentifier, List.of(firstNameExpectedValue)));
assertThatThrownBy(() -> runnerHelper.verifyConditions(simulation.getScesimModelDescriptor(), scenarioRunnerData, expressionEvaluatorFactory, requestContextMock))
.isInstanceOf(ScenarioException.class)
.hasMessage("DMN execution has not generated a decision result with name Fact 1");
} |
@Override
public double sd() {
return Math.sqrt(r * (1 - p)) / p;
} | @Test
public void testSd() {
System.out.println("sd");
NegativeBinomialDistribution instance = new NegativeBinomialDistribution(3, 0.3);
instance.rand();
assertEquals(Math.sqrt(7/0.3), instance.sd(), 1E-7);
} |
@Override
public Map<String, Object> assembleFrom(OAuth2AccessTokenEntity accessToken, UserInfo userInfo, Set<String> authScopes) {
Map<String, Object> result = newLinkedHashMap();
OAuth2Authentication authentication = accessToken.getAuthenticationHolder().getAuthentication();
result.put(ACTIVE, true);
if (accessToken.getPermissions() != null && !accessToken.getPermissions().isEmpty()) {
Set<Object> permissions = Sets.newHashSet();
for (Permission perm : accessToken.getPermissions()) {
Map<String, Object> o = newLinkedHashMap();
o.put("resource_set_id", perm.getResourceSet().getId().toString());
Set<String> scopes = Sets.newHashSet(perm.getScopes());
o.put("scopes", scopes);
permissions.add(o);
}
result.put("permissions", permissions);
} else {
Set<String> scopes = Sets.intersection(authScopes, accessToken.getScope());
result.put(SCOPE, Joiner.on(SCOPE_SEPARATOR).join(scopes));
}
if (accessToken.getExpiration() != null) {
try {
result.put(EXPIRES_AT, dateFormat.valueToString(accessToken.getExpiration()));
result.put(EXP, accessToken.getExpiration().getTime() / 1000L);
} catch (ParseException e) {
logger.error("Parse exception in token introspection", e);
}
}
if (userInfo != null) {
// if we have a UserInfo, use that for the subject
result.put(SUB, userInfo.getSub());
} else {
// otherwise, use the authentication's username
result.put(SUB, authentication.getName());
}
if(authentication.getUserAuthentication() != null) {
result.put(USER_ID, authentication.getUserAuthentication().getName());
}
result.put(CLIENT_ID, authentication.getOAuth2Request().getClientId());
result.put(TOKEN_TYPE, accessToken.getTokenType());
return result;
} | @Test
public void shouldAssembleExpectedResultForRefreshToken() throws ParseException {
// given
OAuth2RefreshTokenEntity refreshToken = refreshToken(new Date(123 * 1000L),
oauth2AuthenticationWithUser(oauth2Request("clientId", scopes("foo", "bar")), "name"));
UserInfo userInfo = userInfo("sub");
Set<String> authScopes = scopes("foo", "bar", "baz");
// when
Map<String, Object> result = assembler.assembleFrom(refreshToken, userInfo, authScopes);
// then
Map<String, Object> expected = new ImmutableMap.Builder<String, Object>()
.put("sub", "sub")
.put("exp", 123L)
.put("expires_at", dateFormat.valueToString(new Date(123 * 1000L)))
.put("scope", "bar foo")
.put("active", Boolean.TRUE)
.put("user_id", "name")
.put("client_id", "clientId")
.build();
assertThat(result, is(equalTo(expected)));
} |
@GET
@Path("{noteId}/revision")
@ZeppelinApi
public Response getNoteRevisionHistory(@PathParam("noteId") String noteId) throws IOException {
LOGGER.info("Get revision history of note {}", noteId);
List<NotebookRepoWithVersionControl.Revision> revisions = notebookService.listRevisionHistory(noteId, getServiceContext(), new RestServiceCallback<>());
return new JsonResponse<>(Status.OK, revisions).build();
} | @Test
void testGetNoteRevisionHistory() throws IOException {
LOG.info("Running testGetNoteRevisionHistory");
String note1Id = null;
try {
String notePath = "note1";
note1Id = notebook.createNote(notePath, anonymous);
//Add a paragraph and commit
NotebookRepoWithVersionControl.Revision first_commit =
notebook.processNote(note1Id, note -> {
Paragraph p1 = note.addNewParagraph(anonymous);
p1.setText("text1");
notebook.saveNote(note, AuthenticationInfo.ANONYMOUS);
return notebook.checkpointNote(note.getId(), note.getPath(), "first commit", anonymous);
});
//Add a paragraph again
notebook.processNote(note1Id, note -> {
Paragraph p2 = note.addNewParagraph(anonymous);
p2.setText("text2");
notebook.saveNote(note, AuthenticationInfo.ANONYMOUS);
return null;
});
// Verify
CloseableHttpResponse get1 = httpGet("/notebook/" + note1Id + "/revision");
assertThat(get1, isAllowed());
Map<String, Object> resp = gson.fromJson(EntityUtils.toString(get1.getEntity(), StandardCharsets.UTF_8),
new TypeToken<Map<String, Object>>() {
}.getType());
List<Map<String, Object>> body = (List<Map<String, Object>>) resp.get("body");
assertEquals(1, body.size());
assertEquals(first_commit.id, body.get(0).get("id"));
get1.close();
// Second commit
NotebookRepoWithVersionControl.Revision second_commit = notebook.processNote(note1Id, note -> notebook.checkpointNote(note.getId(), note.getPath(), "Second commit", anonymous));
// Verify
CloseableHttpResponse get2 = httpGet("/notebook/" + note1Id + "/revision");
assertThat(get2, isAllowed());
resp = gson.fromJson(EntityUtils.toString(get2.getEntity(), StandardCharsets.UTF_8),
new TypeToken<Map<String, Object>>() {
}.getType());
body = (List<Map<String, Object>>) resp.get("body");
assertEquals(2, body.size());
assertEquals(second_commit.id, body.get(0).get("id"));
get2.close();
} finally {
// cleanup
if (null != note1Id) {
notebook.removeNote(note1Id, anonymous);
}
}
} |
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
getRawMapping().setConf(conf);
} | @Test(timeout=60000)
public void testBadFile() throws IOException {
File mapFile = File.createTempFile(getClass().getSimpleName() +
".testBadFile", ".txt");
Files.asCharSink(mapFile, StandardCharsets.UTF_8).write("bad contents");
mapFile.deleteOnExit();
TableMapping mapping = new TableMapping();
Configuration conf = new Configuration();
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, mapFile.getCanonicalPath());
mapping.setConf(conf);
List<String> names = new ArrayList<String>();
names.add(hostName1);
names.add(hostName2);
List<String> result = mapping.resolve(names);
assertEquals(names.size(), result.size());
assertEquals(result.get(0), NetworkTopology.DEFAULT_RACK);
assertEquals(result.get(1), NetworkTopology.DEFAULT_RACK);
} |
public FinalizedFeatures(
MetadataVersion metadataVersion,
Map<String, Short> finalizedFeatures,
long finalizedFeaturesEpoch,
boolean kraftMode
) {
this.metadataVersion = metadataVersion;
this.finalizedFeatures = new HashMap<>(finalizedFeatures);
this.finalizedFeaturesEpoch = finalizedFeaturesEpoch;
// In KRaft mode, we always include the metadata version in the features map.
// In ZK mode, we never include it.
if (kraftMode) {
this.finalizedFeatures.put(MetadataVersion.FEATURE_NAME, metadataVersion.featureLevel());
} else {
this.finalizedFeatures.remove(MetadataVersion.FEATURE_NAME);
}
} | @Test
public void testKRaftModeFeatures() {
FinalizedFeatures finalizedFeatures = new FinalizedFeatures(MINIMUM_KRAFT_VERSION,
Collections.singletonMap("foo", (short) 2), 123, true);
assertEquals(MINIMUM_KRAFT_VERSION.featureLevel(),
finalizedFeatures.finalizedFeatures().get(FEATURE_NAME));
assertEquals((short) 2,
finalizedFeatures.finalizedFeatures().get("foo"));
assertEquals(2, finalizedFeatures.finalizedFeatures().size());
} |
public static byte[] compress(String urlString) throws MalformedURLException {
byte[] compressedBytes = null;
if (urlString != null) {
// Figure the compressed bytes can't be longer than the original string.
byte[] byteBuffer = new byte[urlString.length()];
int byteBufferIndex = 0;
Arrays.fill(byteBuffer, (byte) 0x00);
Pattern urlPattern = Pattern.compile(EDDYSTONE_URL_REGEX);
Matcher urlMatcher = urlPattern.matcher(urlString);
if (urlMatcher.matches()) {
// www.
String wwwdot = urlMatcher.group(EDDYSTONE_URL_WWW_GROUP);
boolean haswww = (wwwdot != null);
// Protocol.
String rawProtocol = urlMatcher.group(EDDYSTONE_URL_PROTOCOL_GROUP);
String protocol = rawProtocol.toLowerCase();
if (protocol.equalsIgnoreCase(URL_PROTOCOL_HTTP)) {
byteBuffer[byteBufferIndex] = (haswww ? EDDYSTONE_URL_PROTOCOL_HTTP_WWW : EDDYSTONE_URL_PROTOCOL_HTTP);
}
else {
byteBuffer[byteBufferIndex] = (haswww ? EDDYSTONE_URL_PROTOCOL_HTTPS_WWW : EDDYSTONE_URL_PROTOCOL_HTTPS);
}
byteBufferIndex++;
// Fully-qualified domain name (FQDN). This includes the hostname and any other components after the dots
// but BEFORE the first single slash in the URL.
byte[] hostnameBytes = urlMatcher.group(EDDYSTONE_URL_FQDN_GROUP).getBytes();
String rawHostname = new String(hostnameBytes);
String hostname = rawHostname.toLowerCase();
String[] domains = hostname.split(Pattern.quote("."));
boolean consumedSlash = false;
if (domains != null) {
// Write the hostname/subdomains prior to the last one. If there's only one (e. g. http://localhost)
// then that's the only thing to write out.
byte[] periodBytes = {'.'};
int writableDomainsCount = (domains.length == 1 ? 1 : domains.length - 1);
for (int domainIndex = 0; domainIndex < writableDomainsCount; domainIndex++) {
// Write out leading period, if necessary.
if (domainIndex > 0) {
System.arraycopy(periodBytes, 0, byteBuffer, byteBufferIndex, periodBytes.length);
byteBufferIndex += periodBytes.length;
}
byte[] domainBytes = domains[domainIndex].getBytes();
int domainLength = domainBytes.length;
System.arraycopy(domainBytes, 0, byteBuffer, byteBufferIndex, domainLength);
byteBufferIndex += domainLength;
}
// Is the TLD one that we can encode?
if (domains.length > 1) {
String tld = "." + domains[domains.length - 1];
String slash = urlMatcher.group(EDDYSTONE_URL_SLASH_GROUP);
String encodableTLDCandidate = (slash == null ? tld : tld + slash);
byte encodedTLDByte = encodedByteForTopLevelDomain(encodableTLDCandidate);
if (encodedTLDByte != TLD_NOT_ENCODABLE) {
byteBuffer[byteBufferIndex++] = encodedTLDByte;
consumedSlash = (slash != null);
} else {
byte[] tldBytes = tld.getBytes();
int tldLength = tldBytes.length;
System.arraycopy(tldBytes, 0, byteBuffer, byteBufferIndex, tldLength);
byteBufferIndex += tldLength;
}
}
}
// Optional slash.
if (! consumedSlash) {
String slash = urlMatcher.group(EDDYSTONE_URL_SLASH_GROUP);
if (slash != null) {
int slashLength = slash.length();
System.arraycopy(slash.getBytes(), 0, byteBuffer, byteBufferIndex, slashLength);
byteBufferIndex += slashLength;
}
}
// Path.
String path = urlMatcher.group(EDDYSTONE_URL_PATH_GROUP);
if (path != null) {
int pathLength = path.length();
System.arraycopy(path.getBytes(), 0, byteBuffer, byteBufferIndex, pathLength);
byteBufferIndex += pathLength;
}
// Copy the result.
compressedBytes = new byte[byteBufferIndex];
System.arraycopy(byteBuffer, 0, compressedBytes, 0, compressedBytes.length);
}
else {
throw new MalformedURLException();
}
}
else {
throw new MalformedURLException();
}
return compressedBytes;
} | @Test
public void testCompressWithSubdomains() throws MalformedURLException {
String testURL = "http://www.forums.google.com";
byte[] expectedBytes = {0x00, 'f', 'o', 'r', 'u', 'm', 's', '.', 'g', 'o', 'o', 'g', 'l', 'e', 0x07};
assertTrue(Arrays.equals(expectedBytes, UrlBeaconUrlCompressor.compress(testURL)));
} |
public static String fix(final String raw) {
if ( raw == null || "".equals( raw.trim() )) {
return raw;
}
MacroProcessor macroProcessor = new MacroProcessor();
macroProcessor.setMacros( macros );
return macroProcessor.parse( raw );
} | @Test
public void testAdd__Handle__withNewLines() {
final String result = KnowledgeHelperFixerTest.fixer.fix( "\n\t\n\tupdate( myObject );" );
assertEqualsIgnoreWhitespace( "\n\t\n\tdrools.update( myObject );",
result );
} |
public static List<UpdateRequirement> forReplaceView(
ViewMetadata base, List<MetadataUpdate> metadataUpdates) {
Preconditions.checkArgument(null != base, "Invalid view metadata: null");
Preconditions.checkArgument(null != metadataUpdates, "Invalid metadata updates: null");
Builder builder = new Builder(null, false);
builder.require(new UpdateRequirement.AssertViewUUID(base.uuid()));
metadataUpdates.forEach(builder::update);
return builder.build();
} | @Test
public void emptyUpdatesForReplaceView() {
assertThat(UpdateRequirements.forReplaceView(viewMetadata, ImmutableList.of()))
.hasSize(1)
.hasOnlyElementsOfType(UpdateRequirement.AssertViewUUID.class);
} |
@Override
public String named() {
return PluginEnum.DIVIDE.getName();
} | @Test
public void namedTest() {
assertEquals(PluginEnum.DIVIDE.getName(), dividePlugin.named());
} |
public Map<String, String> getExtraInfo() {
return extraInfo;
} | @Test
void getExtraInfo() {} |
@Override
@Transactional(rollbackFor = Exception.class)
public void deleteCombinationActivity(Long id) {
// 校验存在
CombinationActivityDO activity = validateCombinationActivityExists(id);
// 校验状态
if (CommonStatusEnum.isEnable(activity.getStatus())) {
throw exception(COMBINATION_ACTIVITY_DELETE_FAIL_STATUS_NOT_CLOSED_OR_END);
}
// 删除
combinationActivityMapper.deleteById(id);
} | @Test
public void testDeleteCombinationActivity_success() {
// mock 数据
CombinationActivityDO dbCombinationActivity = randomPojo(CombinationActivityDO.class);
combinationActivityMapper.insert(dbCombinationActivity);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbCombinationActivity.getId();
// 调用
combinationActivityService.deleteCombinationActivity(id);
// 校验数据不存在了
assertNull(combinationActivityMapper.selectById(id));
} |
public IsJson(Matcher<? super ReadContext> jsonMatcher) {
this.jsonMatcher = jsonMatcher;
} | @Test
public void shouldMatchJsonStringEvaluatedToTrue() {
assertThat(BOOKS_JSON_STRING, isJson(withPathEvaluatedTo(true)));
} |
public String getSha1sum() {
if (sha1sum == null) {
this.sha1sum = determineHashes(SHA1_HASHING_FUNCTION);
}
return this.sha1sum;
} | @Test
public void testGetSha1sum() {
//File file = new File(this.getClass().getClassLoader().getResource("struts2-core-2.1.2.jar").getPath());
File file = BaseTest.getResourceAsFile(this, "struts2-core-2.1.2.jar");
Dependency instance = new Dependency(file);
//String expResult = "89CE9E36AA9A9E03F1450936D2F4F8DD0F961F8B";
String expResult = "89ce9e36aa9a9e03f1450936d2f4f8dd0f961f8b";
String result = instance.getSha1sum();
assertEquals(expResult, result);
} |
@NonNull
public HealthStateAggregator healthStateAggregator() {
if (healthStateAggregator == null) {
final String message = "Cannot access the HealthStateAggregator before HealthFactory setup has occurred";
LOGGER.error(message);
throw new IllegalStateException(message);
}
return healthStateAggregator;
} | @Test
void gettingHealthStateAggregatorBeforeSetShouldResultInException() {
assertThrows(IllegalStateException.class, () -> healthEnvironment.healthStateAggregator());
} |
@GET
@Path("config")
public Response getAllPackageConfigs() {
try {
Map<String, Map<String, Object>> config = helium.getAllPackageConfig();
return new JsonResponse<>(Response.Status.OK, config).build();
} catch (RuntimeException e) {
logger.error(e.getMessage(), e);
return new JsonResponse<>(Response.Status.INTERNAL_SERVER_ERROR, e.getMessage()).build();
}
} | @Test
void testGetAllPackageConfigs() throws IOException {
CloseableHttpResponse get = httpGet("/helium/config/");
assertThat(get, isAllowed());
Map<String, Object> resp = gson.fromJson(EntityUtils.toString(get.getEntity(), StandardCharsets.UTF_8),
new TypeToken<Map<String, Object>>() { }.getType());
Map body = (Map) resp.get("body");
// ToDo: Apply config with POST command and check update
assertEquals(0, body.size());
get.close();
} |
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("transactionRole:");
sb.append(transactionRole.name());
sb.append(",");
sb.append("address:");
sb.append(address);
sb.append(",");
sb.append("msg:< ");
sb.append(message.toString());
sb.append(" >");
return sb.toString();
} | @Test
public void testToString() {
String expectStr = "transactionRole:RMROLE,address:127.0.0.1:8091,msg:< " + MSG1.toString() + " >";
Assertions.assertEquals(nettyPoolKey.toString(), expectStr);
} |
public Page<ConnectionResponse> convertToConnectionResponse(Page<Connection> allConnections) {
List<ConnectionResponse> convertedConnection = connectionMapper.toConnectionResponse(allConnections.getContent());
return new PageImpl<>(convertedConnection, allConnections.getPageable(), allConnections.getTotalElements());
} | @Test
void convertToConnectionResponse() {
Page<ConnectionResponse> result = connectionServiceMock.convertToConnectionResponse(getPageConnections());
assertEquals(result.getTotalPages(), getPageConnections().getTotalPages());
assertNotNull(result);
} |
@Override
public <T> T clone(T object) {
if (object instanceof String) {
return object;
} else if (object instanceof Collection) {
Object firstElement = findFirstNonNullElement((Collection) object);
if (firstElement != null && !(firstElement instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), firstElement.getClass());
return objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
} else if (object instanceof Map) {
Map.Entry firstEntry = this.findFirstNonNullEntry((Map) object);
if (firstEntry != null) {
Object key = firstEntry.getKey();
Object value = firstEntry.getValue();
if (!(key instanceof Serializable) || !(value instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), key.getClass(), value.getClass());
return (T) objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
}
} else if (object instanceof JsonNode) {
return (T) ((JsonNode) object).deepCopy();
}
if (object instanceof Serializable) {
try {
return (T) SerializationHelper.clone((Serializable) object);
} catch (SerializationException e) {
//it is possible that object itself implements java.io.Serializable, but underlying structure does not
//in this case we switch to the other JSON marshaling strategy which doesn't use the Java serialization
}
}
return jsonClone(object);
} | @Test
public void should_clone_map_of_non_serializable_key() {
Map<NonSerializableObject, String> original = new HashMap<>();
original.put(new NonSerializableObject("key"), "value");
Object cloned = serializer.clone(original);
assertEquals(original, cloned);
assertNotSame(original, cloned);
} |
public List<ChangeStreamRecord> toChangeStreamRecords(
PartitionMetadata partition,
ChangeStreamResultSet resultSet,
ChangeStreamResultSetMetadata resultSetMetadata) {
if (this.isPostgres()) {
// In PostgresQL, change stream records are returned as JsonB.
return Collections.singletonList(
toChangeStreamRecordJson(partition, resultSet.getPgJsonb(0), resultSetMetadata));
}
// In GoogleSQL, change stream records are returned as an array of structs.
return resultSet.getCurrentRowAsStruct().getStructList(0).stream()
.flatMap(struct -> toChangeStreamRecord(partition, struct, resultSetMetadata))
.collect(Collectors.toList());
} | @Test
public void testMappingDeleteStructRowNewRowAndOldValuesToDataChangeRecord() {
final DataChangeRecord dataChangeRecord =
new DataChangeRecord(
"partitionToken",
Timestamp.ofTimeSecondsAndNanos(10L, 20),
"transactionId",
false,
"1",
"tableName",
Arrays.asList(
new ColumnType("column1", new TypeCode("{\"code\":\"INT64\"}"), true, 1L),
new ColumnType("column2", new TypeCode("{\"code\":\"BYTES\"}"), false, 2L)),
Collections.singletonList(
new Mod("{\"column1\":\"value1\"}", "{\"column2\":\"oldValue2\"}", null)),
ModType.DELETE,
ValueCaptureType.NEW_ROW_AND_OLD_VALUES,
10L,
2L,
"transactionTag",
true,
null);
final Struct jsonFieldsStruct = recordsToStructWithJson(dataChangeRecord);
ChangeStreamResultSet resultSet = mock(ChangeStreamResultSet.class);
when(resultSet.getCurrentRowAsStruct()).thenReturn(jsonFieldsStruct);
assertEquals(
Collections.singletonList(dataChangeRecord),
mapper.toChangeStreamRecords(partition, resultSet, resultSetMetadata));
} |
@Override
public CheckResult runCheck() {
try {
final String filter = buildQueryFilter(stream.getId(), query);
// TODO we don't support cardinality yet
final FieldStatsResult fieldStatsResult = searches.fieldStats(field, "*", filter,
RelativeRange.create(time * 60), false, true, false);
if (fieldStatsResult.count() == 0) {
LOG.debug("Alert check <{}> did not match any messages. Returning not triggered.", type);
return new NegativeCheckResult();
}
final double result;
switch (type) {
case MEAN:
result = fieldStatsResult.mean();
break;
case MIN:
result = fieldStatsResult.min();
break;
case MAX:
result = fieldStatsResult.max();
break;
case SUM:
result = fieldStatsResult.sum();
break;
case STDDEV:
result = fieldStatsResult.stdDeviation();
break;
default:
LOG.error("No such field value check type: [{}]. Returning not triggered.", type);
return new NegativeCheckResult();
}
LOG.debug("Alert check <{}> result: [{}]", id, result);
if (Double.isInfinite(result)) {
// This happens when there are no ES results/docs.
LOG.debug("Infinite value. Returning not triggered.");
return new NegativeCheckResult();
}
final boolean triggered;
switch (thresholdType) {
case HIGHER:
triggered = result > threshold.doubleValue();
break;
case LOWER:
triggered = result < threshold.doubleValue();
break;
default:
triggered = false;
}
if (triggered) {
final String resultDescription = "Field " + field + " had a " + type + " of "
+ decimalFormat.format(result) + " in the last " + time + " minutes with trigger condition "
+ thresholdType + " than " + decimalFormat.format(threshold) + ". "
+ "(Current grace time: " + grace + " minutes)";
final List<MessageSummary> summaries;
if (getBacklog() > 0) {
final List<ResultMessage> searchResult = fieldStatsResult.searchHits();
summaries = Lists.newArrayListWithCapacity(searchResult.size());
for (ResultMessage resultMessage : searchResult) {
final Message msg = resultMessage.getMessage();
summaries.add(new MessageSummary(resultMessage.getIndex(), msg));
}
} else {
summaries = Collections.emptyList();
}
return new CheckResult(true, this, resultDescription, Tools.nowUTC(), summaries);
} else {
return new NegativeCheckResult();
}
} catch (InvalidRangeParametersException e) {
// cannot happen lol
LOG.error("Invalid timerange.", e);
return null;
} catch (FieldTypeException e) {
LOG.debug("Field [{}] seems not to have a numerical type or doesn't even exist at all. Returning not triggered.", field, e);
return new NegativeCheckResult();
}
} | @Test
public void testRunCheckLowerNegative() throws Exception {
for (FieldValueAlertCondition.CheckType checkType : FieldValueAlertCondition.CheckType.values()) {
final double threshold = 50.0;
final double higherThanThreshold = threshold + 10;
FieldValueAlertCondition fieldValueAlertCondition = getFieldValueAlertCondition(getParametersMap(0, 0,
FieldValueAlertCondition.ThresholdType.LOWER,
checkType, threshold, "response_time"),
alertConditionTitle);
fieldStatsShouldReturn(getFieldStatsResult(checkType, higherThanThreshold));
AlertCondition.CheckResult result = fieldValueAlertCondition.runCheck();
assertNotTriggered(result);
}
} |
@Override
public PageResult<SocialClientDO> getSocialClientPage(SocialClientPageReqVO pageReqVO) {
return socialClientMapper.selectPage(pageReqVO);
} | @Test
public void testGetSocialClientPage() {
// mock 数据
SocialClientDO dbSocialClient = randomPojo(SocialClientDO.class, o -> { // 等会查询到
o.setName("芋头");
o.setSocialType(SocialTypeEnum.GITEE.getType());
o.setUserType(UserTypeEnum.ADMIN.getValue());
o.setClientId("yudao");
o.setStatus(CommonStatusEnum.ENABLE.getStatus());
});
socialClientMapper.insert(dbSocialClient);
// 测试 name 不匹配
socialClientMapper.insert(cloneIgnoreId(dbSocialClient, o -> o.setName(randomString())));
// 测试 socialType 不匹配
socialClientMapper.insert(cloneIgnoreId(dbSocialClient, o -> o.setSocialType(SocialTypeEnum.DINGTALK.getType())));
// 测试 userType 不匹配
socialClientMapper.insert(cloneIgnoreId(dbSocialClient, o -> o.setUserType(UserTypeEnum.MEMBER.getValue())));
// 测试 clientId 不匹配
socialClientMapper.insert(cloneIgnoreId(dbSocialClient, o -> o.setClientId("dao")));
// 测试 status 不匹配
socialClientMapper.insert(cloneIgnoreId(dbSocialClient, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus())));
// 准备参数
SocialClientPageReqVO reqVO = new SocialClientPageReqVO();
reqVO.setName("芋");
reqVO.setSocialType(SocialTypeEnum.GITEE.getType());
reqVO.setUserType(UserTypeEnum.ADMIN.getValue());
reqVO.setClientId("yu");
reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus());
// 调用
PageResult<SocialClientDO> pageResult = socialClientService.getSocialClientPage(reqVO);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbSocialClient, pageResult.getList().get(0));
} |
public static List<Event> computeEventDiff(final Params params) {
final List<Event> events = new ArrayList<>();
emitPerNodeDiffEvents(createBaselineParams(params), events);
emitWholeClusterDiffEvent(createBaselineParams(params), events);
emitDerivedBucketSpaceStatesDiffEvents(params, events);
return events;
} | @Test
void node_down_edge_with_group_down_reason_has_separate_event_emitted() {
// We sneakily use a flat cluster here but still use a 'group down' reason. Differ doesn't currently care.
final EventFixture fixture = EventFixture.createForNodes(3)
.clusterStateBefore("distributor:3 storage:3")
.clusterStateAfter("distributor:3 storage:3 .1.s:d")
.storageNodeReasonAfter(1, NodeStateReason.GROUP_IS_DOWN);
final List<Event> events = fixture.computeEventDiff();
assertThat(events.size(), equalTo(2));
// Both the regular edge event and the group down event is emitted
assertThat(events, hasItem(allOf(
eventForNode(storageNode(1)),
nodeEventWithDescription("Altered node state in cluster state from 'U' to 'D'"))));
assertThat(events, hasItem(allOf(
eventForNode(storageNode(1)),
eventTypeIs(NodeEvent.Type.CURRENT),
nodeEventWithDescription("Group node availability is below configured threshold"))));
} |
public void scheduleUpdateIfAbsent(String serviceName, String groupName, String clusters) {
if (!asyncQuerySubscribeService) {
return;
}
String serviceKey = ServiceInfo.getKey(NamingUtils.getGroupedName(serviceName, groupName), clusters);
if (futureMap.get(serviceKey) != null) {
return;
}
synchronized (futureMap) {
if (futureMap.get(serviceKey) != null) {
return;
}
ScheduledFuture<?> future = addTask(new UpdateTask(serviceName, groupName, clusters));
futureMap.put(serviceKey, future);
}
} | @Test
void testScheduleUpdateIfAbsentUpdateOlderWithInstance() throws InterruptedException, NacosException {
info.setCacheMillis(10000L);
nacosClientProperties.setProperty(PropertyKeyConst.NAMING_ASYNC_QUERY_SUBSCRIBE_SERVICE, "true");
serviceInfoUpdateService = new ServiceInfoUpdateService(nacosClientProperties, holder, proxy, notifier);
serviceInfoUpdateService.scheduleUpdateIfAbsent(serviceName, group, clusters);
Map<String, ServiceInfo> map = new HashMap<>();
map.put(ServiceInfo.getKey(group + "@@" + serviceName, clusters), info);
when(holder.getServiceInfoMap()).thenReturn(map);
info.setHosts(Collections.singletonList(new Instance()));
TimeUnit.MILLISECONDS.sleep(1500);
Mockito.verify(proxy).queryInstancesOfService(serviceName, group, clusters, false);
} |
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
} | @Test
public void testFetchForgetTopicIdWhenUnassigned() {
buildFetcher();
TopicIdPartition foo = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0));
TopicIdPartition bar = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 0));
// Assign foo and bar.
subscriptions.assignFromUser(singleton(foo.topicPartition()));
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(foo), tp -> validLeaderEpoch));
subscriptions.seek(foo.topicPartition(), 0);
assertEquals(1, sendFetches());
// Fetch should use latest version.
client.prepareResponse(
fetchRequestMatcher(ApiKeys.FETCH.latestVersion(),
singletonMap(foo, new PartitionData(
foo.topicId(),
0,
FetchRequest.INVALID_LOG_START_OFFSET,
fetchSize,
Optional.of(validLeaderEpoch))
),
emptyList()
),
fullFetchResponse(1, foo, records, Errors.NONE, 100L, 0)
);
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchRecords();
// Assign bar and un-assign foo.
subscriptions.assignFromUser(singleton(bar.topicPartition()));
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(bar), tp -> validLeaderEpoch));
subscriptions.seek(bar.topicPartition(), 0);
// Fetch should use latest version.
assertEquals(1, sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(
fetchRequestMatcher(ApiKeys.FETCH.latestVersion(),
singletonMap(bar, new PartitionData(
bar.topicId(),
0,
FetchRequest.INVALID_LOG_START_OFFSET,
fetchSize,
Optional.of(validLeaderEpoch))
),
singletonList(foo)
),
fullFetchResponse(1, bar, records, Errors.NONE, 100L, 0)
);
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchRecords();
} |
void patchIngressClassName(Ingress current, Ingress desired) {
if (desired.getSpec() != null
&& current.getSpec() != null
&& desired.getSpec().getIngressClassName() == null) {
desired.getSpec().setIngressClassName(current.getSpec().getIngressClassName());
}
} | @Test
public void testIngressClassPatching() {
KubernetesClient client = mock(KubernetesClient.class);
Ingress current = new IngressBuilder()
.withNewMetadata()
.withNamespace(NAMESPACE)
.withName(RESOURCE_NAME)
.endMetadata()
.withNewSpec()
.withIngressClassName("nginx")
.withTls(new IngressTLSBuilder().withHosts("my-host").build())
.endSpec()
.build();
Ingress desired = new IngressBuilder()
.withNewMetadata()
.withNamespace(NAMESPACE)
.withName(RESOURCE_NAME)
.endMetadata()
.withNewSpec()
.withIngressClassName(null)
.withTls(new IngressTLSBuilder().withHosts("my-host").build())
.endSpec()
.build();
IngressOperator op = new IngressOperator(vertx, client);
op.patchIngressClassName(current, desired);
assertThat(desired.getSpec().getIngressClassName(), is(current.getSpec().getIngressClassName()));
} |
public Query initParams(Map<String, String> params) {
if (MapUtil.isNotEmpty(params)) {
for (Map.Entry<String, String> entry : params.entrySet()) {
addParam(entry.getKey(), entry.getValue());
}
}
return this;
} | @Test
void testInitParams() {
Map<String, String> parameters = new LinkedHashMap<String, String>();
parameters.put(CommonParams.NAMESPACE_ID, "namespace");
parameters.put(CommonParams.SERVICE_NAME, "service");
parameters.put(CommonParams.GROUP_NAME, "group");
parameters.put(CommonParams.CLUSTER_NAME, null);
parameters.put("ip", "1.1.1.1");
parameters.put("port", String.valueOf(9999));
parameters.put("weight", String.valueOf(1.0));
parameters.put("ephemeral", String.valueOf(true));
String excepted = "namespaceId=namespace&serviceName=service&groupName=group&ip=1.1.1.1&port=9999&weight=1.0&ephemeral=true";
Query actual = Query.newInstance().initParams(parameters);
assertEquals(excepted, actual.toQueryUrl());
assertEquals("namespace", actual.getValue(CommonParams.NAMESPACE_ID));
} |
@Override
public void run() {
if (processor != null) {
processor.execute();
} else {
if (!beforeHook()) {
logger.info("before-feature hook returned [false], aborting: {}", this);
} else {
scenarios.forEachRemaining(this::processScenario);
}
afterFeature();
}
} | @Test
void testSchemaLike() {
run("schema-like.feature");
} |
public void parse(String[] args) throws FileNotFoundException, ParseException {
line = parseArgs(args);
if (line != null) {
validateArgs();
}
} | @Test
public void testParse() throws Exception {
String[] args = {};
ByteArrayOutputStream baos = new ByteArrayOutputStream();
System.setOut(new PrintStream(baos));
CliParser instance = new CliParser(getSettings());
instance.parse(args);
Assert.assertFalse(instance.isGetVersion());
Assert.assertFalse(instance.isGetHelp());
Assert.assertFalse(instance.isRunScan());
} |
public static VerificationMode atLeast(final int count) {
checkArgument(count > 0, "Times count must be greater than zero");
return new AtLeastVerification(count);
} | @Test
public void should_fail_to_verify_at_least_expected_request_while_expectation_can_not_be_met() {
httpServer(port(), hit);
assertThrows(VerificationException.class, () ->
hit.verify(by(uri("/foo")), atLeast(1)));
} |
static void readFullyDirectBuffer(InputStream f, ByteBuffer buf, byte[] temp) throws IOException {
int nextReadLength = Math.min(buf.remaining(), temp.length);
int bytesRead = 0;
while (nextReadLength > 0 && (bytesRead = f.read(temp, 0, nextReadLength)) >= 0) {
buf.put(temp, 0, bytesRead);
nextReadLength = Math.min(buf.remaining(), temp.length);
}
if (bytesRead < 0 && buf.remaining() > 0) {
throw new EOFException("Reached the end of stream with " + buf.remaining() + " bytes left to read");
}
} | @Test
public void testDirectReadFullyPositionAndLimit() throws Exception {
final ByteBuffer readBuffer = ByteBuffer.allocateDirect(10);
readBuffer.position(3);
readBuffer.limit(7);
readBuffer.mark();
MockInputStream stream = new MockInputStream(2, 3, 3);
DelegatingSeekableInputStream.readFullyDirectBuffer(stream, readBuffer, TEMP.get());
Assert.assertEquals(7, readBuffer.position());
Assert.assertEquals(7, readBuffer.limit());
DelegatingSeekableInputStream.readFullyDirectBuffer(stream, readBuffer, TEMP.get());
Assert.assertEquals(7, readBuffer.position());
Assert.assertEquals(7, readBuffer.limit());
readBuffer.reset();
Assert.assertEquals("Buffer contents should match", ByteBuffer.wrap(TEST_ARRAY, 0, 4), readBuffer);
readBuffer.position(7);
readBuffer.limit(10);
DelegatingSeekableInputStream.readFullyDirectBuffer(stream, readBuffer, TEMP.get());
Assert.assertEquals(10, readBuffer.position());
Assert.assertEquals(10, readBuffer.limit());
readBuffer.reset();
Assert.assertEquals("Buffer contents should match", ByteBuffer.wrap(TEST_ARRAY, 0, 7), readBuffer);
} |
@Override
public boolean isDetected() {
return "true".equals(system.envVariable("CI")) && "true".equals(system.envVariable("DRONE"));
} | @Test
public void isDetected() {
setEnvVariable("CI", "true");
setEnvVariable("DRONE", "true");
assertThat(underTest.isDetected()).isTrue();
setEnvVariable("CI", "true");
setEnvVariable("DRONE", null);
assertThat(underTest.isDetected()).isFalse();
} |
public PipelineConfigs getFirstEditablePartOrNull() {
for (PipelineConfigs part : parts) {
if (isEditable(part))
return part;
}
return null;
} | @Test
public void shouldReturnFirstEditablePartWhenExists() {
PipelineConfig pipe1 = PipelineConfigMother.pipelineConfig("pipeline1");
BasicPipelineConfigs part1 = new BasicPipelineConfigs(pipe1);
part1.setOrigin(new FileConfigOrigin());
MergePipelineConfigs group = new MergePipelineConfigs(
part1, new BasicPipelineConfigs());
assertThat(group.getFirstEditablePartOrNull(), Matchers.is(part1));
} |
public URL convert(String value) {
if (isBlank(value)) {
throw new ParameterException(getErrorString("a blank value", "a valid URL"));
}
try {
return URLUtil.parseURL(value);
} catch (IllegalArgumentException e) {
throw new ParameterException(getErrorString(value, "a valid URL"));
}
} | @Test(expected = ParameterException.class)
public void nullValueThrowsParameterException() {
converter.convert(null);
} |
@Override
public void open(Map<String, Object> map, SinkContext sinkContext) throws Exception {
try {
val configV2 = InfluxDBSinkConfig.load(map, sinkContext);
configV2.validate();
sink = new InfluxDBSink();
} catch (Exception e) {
try {
val configV1 = org.apache.pulsar.io.influxdb.v1.InfluxDBSinkConfig.load(map, sinkContext);
configV1.validate();
sink = new org.apache.pulsar.io.influxdb.v1.InfluxDBGenericRecordSink();
} catch (Exception e1) {
throw new Exception("For InfluxDB V2: \n" + e.toString() + "\n"
+ "For InfluxDB V1: \n" + e1.toString());
}
}
sink.open(map, sinkContext);
} | @Test(expectedExceptions = Exception.class,
expectedExceptionsMessageRegExp = "For InfluxDB V2:.*")
public void openInvalidInfluxConfig() throws Exception {
InfluxDBGenericRecordSink sink = new InfluxDBGenericRecordSink();
sink.open(new HashMap<>(), mock(SinkContext.class));
} |
public static NamespaceName get(String tenant, String namespace) {
validateNamespaceName(tenant, namespace);
return get(tenant + '/' + namespace);
} | @Test(expectedExceptions = IllegalArgumentException.class)
public void namespace_nullNamespace() {
NamespaceName.get("pulsar", "cluster", null);
} |
public WithJsonPath(JsonPath jsonPath, Matcher<T> resultMatcher) {
this.jsonPath = jsonPath;
this.resultMatcher = resultMatcher;
} | @Test
public void shouldMatchJsonPathEvaluatedToStringValue() {
assertThat(BOOKS_JSON, withJsonPath(compile("$.store.bicycle.color"), equalTo("red")));
assertThat(BOOKS_JSON, withJsonPath(compile("$.store.book[2].title"), equalTo("Moby Dick")));
assertThat(BOOKS_JSON, withJsonPath("$.store.name", equalTo("Little Shop")));
assertThat(BOOKS_JSON, withJsonPath("$.store.book[2].title", equalTo("Moby Dick")));
} |
@Override
public JSONObject getSuperProperties() {
return new JSONObject();
} | @Test
public void getSuperProperties() {
Assert.assertEquals(0, mSensorsAPI.getSuperProperties().length());
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.