focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public boolean exists(String path) throws IOException {
return fs.exists(makePath(path));
} | @Test
public void testExists() throws IOException {
System.out.println("pre-create test path");
fs.mkdirs(new Path("test/registryTestNode"));
System.out.println("Check for existing node");
boolean exists = registry.exists("test/registryTestNode");
Assert.assertTrue(exists);
System.out.println("Check for non-existing node");
exists = registry.exists("test/nonExistentNode");
Assert.assertFalse(exists);
} |
@Override
public List<String> list() {
final List<String> list = new ArrayList<>();
try {
final javax.net.ssl.X509KeyManager manager = this.getKeystore();
{
final String[] aliases = manager.getClientAliases("RSA", null);
if(null != aliases) {
Collections.addAll(list, aliases);
}
}
{
final String[] aliases = manager.getClientAliases("DSA", null);
if(null != aliases) {
Collections.addAll(list, aliases);
}
}
}
catch(IOException e) {
log.warn(String.format("Failure listing aliases. %s", e.getMessage()));
return Collections.emptyList();
}
return list;
} | @Test
public void testList() {
assertTrue(new DefaultX509KeyManager().init().list().isEmpty());
} |
public static PDImageXObject createFromStream(PDDocument document, InputStream stream)
throws IOException
{
return createFromByteArray(document, stream.readAllBytes());
} | @Test
void testCreateFromStream() throws IOException
{
PDDocument document = new PDDocument();
InputStream stream = JPEGFactoryTest.class.getResourceAsStream("jpeg.jpg");
PDImageXObject ximage = JPEGFactory.createFromStream(document, stream);
validate(ximage, 8, 344, 287, "jpg", PDDeviceRGB.INSTANCE.getName());
doWritePDF(document, ximage, TESTRESULTSDIR, "jpegrgbstream.pdf");
checkJpegStream(TESTRESULTSDIR, "jpegrgbstream.pdf", JPEGFactoryTest.class.getResourceAsStream("jpeg.jpg"));
} |
public ServiceInfo getData(Service service) {
return serviceDataIndexes.containsKey(service) ? serviceDataIndexes.get(service) : getPushData(service);
} | @Test
void testGetData() {
ServiceInfo serviceInfo = serviceStorage.getData(SERVICE);
assertNotNull(serviceInfo);
} |
@Override
public void verify(byte[] data, byte[] signature, MessageDigest digest) {
final byte[] decrypted = engine.processBlock(signature, 0, signature.length);
final int delta = checkSignature(decrypted, digest);
final int offset = decrypted.length - digest.getDigestLength() - delta;
digest.update(decrypted, 1, offset - 1);
digest.update(data);
if (!CryptoUtils.compare(digest.digest(), decrypted, offset)) {
throw new VerificationException("Invalid signature");
}
} | @Test
public void shouldThrowVerificationExceptionIfSignatureIsInvalid() {
final byte[] challenge = CryptoUtils.random(40);
final byte[] invalid = challenge.clone();
invalid[0]++;
final byte[] signature = sign(0x54, invalid, ISOTrailers.TRAILER_SHA1, "SHA1");
thrown.expect(VerificationException.class);
thrown.expectMessage("Invalid signature");
new DssRsaSignatureVerifier(PUBLIC).verify(challenge, signature, "SHA1");
} |
@Override
public String rpcType() {
return RpcTypeEnum.MOTAN.getName();
} | @Test
public void testRpcType() {
String rpcType = shenyuClientRegisterMotanService.rpcType();
assertEquals(RpcTypeEnum.MOTAN.getName(), rpcType);
} |
@Override
String getInterfaceName(Invoker invoker, String prefix) {
return DubboUtils.getInterfaceName(invoker, prefix);
} | @Test
public void testDegradeAsync() throws InterruptedException {
try (MockedStatic<TimeUtil> mocked = super.mockTimeUtil()) {
setCurrentMillis(mocked, 1740000000000L);
Invocation invocation = DubboTestUtil.getDefaultMockInvocationOne();
Invoker invoker = DubboTestUtil.getDefaultMockInvoker();
when(invocation.getAttachment(ASYNC_KEY)).thenReturn(Boolean.TRUE.toString());
initDegradeRule(DubboUtils.getInterfaceName(invoker));
Result result = invokeDubboRpc(false, invoker, invocation);
verifyInvocationStructureForCallFinish(invoker, invocation);
assertEquals("normal", result.getValue());
// inc the clusterNode's exception to trigger the fallback
for (int i = 0; i < 5; i++) {
invokeDubboRpc(true, invoker, invocation);
verifyInvocationStructureForCallFinish(invoker, invocation);
}
Result result2 = invokeDubboRpc(false, invoker, invocation);
assertEquals("fallback", result2.getValue());
// sleeping 1000 ms to reset exception
sleep(mocked, 1000);
Result result3 = invokeDubboRpc(false, invoker, invocation);
assertEquals("normal", result3.getValue());
Context context = ContextUtil.getContext();
assertNull(context);
}
} |
@Override
protected void analyzeDependency(Dependency dependency, Engine engine) throws AnalysisException {
final Set<Evidence> remove;
if (dependency.getVersion() != null) {
remove = dependency.getEvidence(EvidenceType.VERSION).stream()
.filter(e -> !e.isFromHint() && !dependency.getVersion().equals(e.getValue()))
.collect(Collectors.toSet());
} else {
remove = new HashSet<>();
String fileVersion = null;
String pomVersion = null;
String manifestVersion = null;
for (Evidence e : dependency.getEvidence(EvidenceType.VERSION)) {
if (FILE.equals(e.getSource()) && VERSION.equals(e.getName())) {
fileVersion = e.getValue();
} else if ((NEXUS.equals(e.getSource()) || CENTRAL.equals(e.getSource())
|| POM.equals(e.getSource())) && VERSION.equals(e.getName())) {
pomVersion = e.getValue();
} else if (MANIFEST.equals(e.getSource()) && IMPLEMENTATION_VERSION.equals(e.getName())) {
manifestVersion = e.getValue();
}
}
//ensure we have at least two not null
if (((fileVersion == null ? 0 : 1) + (pomVersion == null ? 0 : 1) + (manifestVersion == null ? 0 : 1)) > 1) {
final DependencyVersion dvFile = new DependencyVersion(fileVersion);
final DependencyVersion dvPom = new DependencyVersion(pomVersion);
final DependencyVersion dvManifest = new DependencyVersion(manifestVersion);
final boolean fileMatch = Objects.equals(dvFile, dvPom) || Objects.equals(dvFile, dvManifest);
final boolean manifestMatch = Objects.equals(dvManifest, dvPom) || Objects.equals(dvManifest, dvFile);
final boolean pomMatch = Objects.equals(dvPom, dvFile) || Objects.equals(dvPom, dvManifest);
if (fileMatch || manifestMatch || pomMatch) {
LOGGER.debug("filtering evidence from {}", dependency.getFileName());
for (Evidence e : dependency.getEvidence(EvidenceType.VERSION)) {
if (!e.isFromHint()
&& !(pomMatch && VERSION.equals(e.getName())
&& (NEXUS.equals(e.getSource()) || CENTRAL.equals(e.getSource()) || POM.equals(e.getSource())))
&& !(fileMatch && VERSION.equals(e.getName()) && FILE.equals(e.getSource()))
&& !(manifestMatch && MANIFEST.equals(e.getSource()) && IMPLEMENTATION_VERSION.equals(e.getName()))) {
remove.add(e);
}
}
}
}
}
remove.forEach((e) -> dependency.removeEvidence(EvidenceType.VERSION, e));
if (dependency.getVersion() == null) {
final Set<Evidence> evidence = dependency.getEvidence(EvidenceType.VERSION);
final DependencyVersion version;
final Evidence e = evidence.stream().findFirst().orElse(null);
if (e != null) {
version = DependencyVersionUtil.parseVersion(e.getValue(), true);
if (version != null && evidence.stream()
.map(ev -> DependencyVersionUtil.parseVersion(ev.getValue(), true))
.allMatch(version::equals)) {
dependency.setVersion(version.toString());
}
}
}
} | @Test
public void testAnalyzeDependencyFileManifest() throws Exception {
Dependency dependency = new Dependency();
dependency.addEvidence(EvidenceType.VERSION, "util", "version", "33.3", Confidence.HIGHEST);
dependency.addEvidence(EvidenceType.VERSION, "other", "version", "alpha", Confidence.HIGHEST);
dependency.addEvidence(EvidenceType.VERSION, "other", "Implementation-Version", "1.2.3", Confidence.HIGHEST);
VersionFilterAnalyzer instance = new VersionFilterAnalyzer();
instance.initialize(getSettings());
instance.analyzeDependency(dependency, null);
assertEquals(3, dependency.getEvidence(EvidenceType.VERSION).size());
dependency.addEvidence(EvidenceType.VERSION, "Manifest", "Implementation-Version", "1.2.3", Confidence.HIGHEST);
instance.analyzeDependency(dependency, null);
assertEquals(4, dependency.getEvidence(EvidenceType.VERSION).size());
dependency.addEvidence(EvidenceType.VERSION, "file", "version", "1.2.3", Confidence.HIGHEST);
instance.analyzeDependency(dependency, null);
assertEquals(2, dependency.getEvidence(EvidenceType.VERSION).size());
} |
@Override
public Iterable<K> loadAllKeys() {
// If loadAllKeys property is disabled, don't load anything
if (!genericMapStoreProperties.loadAllKeys) {
return Collections.emptyList();
}
awaitSuccessfulInit();
String sql = queries.loadAllKeys();
SqlResult keysResult = sqlService.execute(sql);
// The contract for loadAllKeys says that if iterator implements Closable
// then it will be closed when the iteration is over
return () -> new MappingClosingIterator<>(
keysResult.iterator(),
(SqlRow row) -> row.getObject(genericMapStoreProperties.idColumn),
keysResult::close
);
} | @Test
public void givenRowAndIdColumn_whenLoadAllKeysWithSingleColumn_thenReturnKeys() {
ObjectSpec spec = objectProvider.createObject(mapName, true);
objectProvider.insertItems(spec, 1);
Properties properties = new Properties();
properties.setProperty(DATA_CONNECTION_REF_PROPERTY, TEST_DATABASE_REF);
properties.setProperty(SINGLE_COLUMN_AS_VALUE, "true");
properties.setProperty(ID_COLUMN_PROPERTY, "person-id");
mapLoaderSingleColumn = createMapLoader(properties, hz);
List<Integer> ids = newArrayList(mapLoaderSingleColumn.loadAllKeys());
assertThat(ids).contains(0);
} |
public int getDelegationTokenFailedRetrieved() {
return numGetDelegationTokenFailedRetrieved.value();
} | @Test
public void testGetDelegationTokenRetrievedFailed() {
long totalBadBefore = metrics.getDelegationTokenFailedRetrieved();
badSubCluster.getDelegationTokenFailed();
Assert.assertEquals(totalBadBefore + 1,
metrics.getDelegationTokenFailedRetrieved());
} |
public static boolean contains(final Object[] array, final Object objectToFind) {
if (array == null) {
return false;
}
return Arrays.asList(array).contains(objectToFind);
} | @Test
void contains() {
assertFalse(ArrayUtils.contains(nullArr, "a"));
assertFalse(ArrayUtils.contains(nullArr, null));
assertFalse(ArrayUtils.contains(nothingArr, "b"));
Integer[] arr = new Integer[] {1, 2, 3};
assertFalse(ArrayUtils.contains(arr, null));
Integer[] arr1 = new Integer[] {1, 2, 3, null};
assertTrue(ArrayUtils.contains(arr1, null));
assertTrue(ArrayUtils.contains(arr, 1));
assertFalse(ArrayUtils.contains(arr, "1"));
} |
public static void initKalkanProvider() {
if (!isAddedKalkan) {
synchronized (SecurityProviderInitializer.class) {
if (!isAddedKalkan) {
logger.info("Trying to add KalkanProvider to Security providers..");
Security.addProvider(new KalkanProvider());
KncaXS.loadXMLSecurity();
logger.info("Successfully added KalkanProvider to Security providers..");
isAddedKalkan = true;
}
}
}
} | @Test
public void initKalkanProvider_AddsKalkanProviderOnce() {
SecurityProviderInitializer.initKalkanProvider();
logger.info("Checking if KalkanProvider is added to Security providers..");
assertSecurityProvidersContains(KalkanProvider.class);
logger.info("Initialized again to check if KalkanProvider is added twice..");
SecurityProviderInitializer.initKalkanProvider();
assertSecurityProvidersContainsExactlyOne(KalkanProvider.class);
} |
static void readFullyHeapBuffer(InputStream f, ByteBuffer buf) throws IOException {
readFully(f, buf.array(), buf.arrayOffset() + buf.position(), buf.remaining());
buf.position(buf.limit());
} | @Test
public void testHeapReadFullyPosition() throws Exception {
final ByteBuffer readBuffer = ByteBuffer.allocate(10);
readBuffer.position(3);
readBuffer.mark();
MockInputStream stream = new MockInputStream(2, 3, 3);
DelegatingSeekableInputStream.readFullyHeapBuffer(stream, readBuffer);
Assert.assertEquals(10, readBuffer.position());
Assert.assertEquals(10, readBuffer.limit());
DelegatingSeekableInputStream.readFullyHeapBuffer(stream, readBuffer);
Assert.assertEquals(10, readBuffer.position());
Assert.assertEquals(10, readBuffer.limit());
readBuffer.reset();
Assert.assertEquals("Buffer contents should match", ByteBuffer.wrap(TEST_ARRAY, 0, 7), readBuffer);
} |
public final void containsAnyOf(
@Nullable Object first, @Nullable Object second, @Nullable Object @Nullable ... rest) {
containsAnyIn(accumulate(first, second, rest));
} | @Test
public void iterableContainsAnyOfFailsWithSameToStringAndNullInSubject() {
expectFailureWhenTestingThat(asList(null, "abc")).containsAnyOf("def", "null");
assertFailureKeys(
"expected to contain any of", "but did not", "though it did contain", "full contents");
assertFailureValue("expected to contain any of", "[def, null] (java.lang.String)");
assertFailureValue("though it did contain", "[null (null type)]");
assertFailureValue("full contents", "[null, abc]");
} |
@VisibleForTesting
static boolean isUriValid(String uri) {
Matcher matcher = URI_PATTERN.matcher(uri);
return matcher.matches();
} | @Test
public void testInvalidUri() {
assertThat(
HttpCacheServerHandler.isUriValid(
"http://localhost:8080/ac_e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"))
.isFalse();
assertThat(
HttpCacheServerHandler.isUriValid(
"http://localhost:8080/cas_e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"))
.isFalse();
assertThat(HttpCacheServerHandler.isUriValid("http://localhost:8080/ac/111111111111111111111"))
.isFalse();
assertThat(HttpCacheServerHandler.isUriValid("http://localhost:8080/cas/111111111111111111111"))
.isFalse();
assertThat(HttpCacheServerHandler.isUriValid("http://localhost:8080/cas/823rhf&*%OL%_^"))
.isFalse();
assertThat(HttpCacheServerHandler.isUriValid("http://localhost:8080/ac/823rhf&*%OL%_^"))
.isFalse();
} |
static void applySchemaUpdates(Table table, SchemaUpdate.Consumer updates) {
if (updates == null || updates.empty()) {
// no updates to apply
return;
}
Tasks.range(1)
.retry(IcebergSinkConfig.SCHEMA_UPDATE_RETRIES)
.run(notUsed -> commitSchemaUpdates(table, updates));
} | @Test
public void testApplyNestedSchemaUpdates() {
UpdateSchema updateSchema = mock(UpdateSchema.class);
Table table = mock(Table.class);
when(table.schema()).thenReturn(NESTED_SCHEMA);
when(table.updateSchema()).thenReturn(updateSchema);
// the updates to "st.i" should be ignored as it already exists and is the same type
SchemaUpdate.Consumer consumer = new SchemaUpdate.Consumer();
consumer.addColumn("st", "i", IntegerType.get());
consumer.updateType("st.i", IntegerType.get());
consumer.makeOptional("st.i");
consumer.updateType("st.f", DoubleType.get());
consumer.addColumn("st", "s", StringType.get());
SchemaUtils.applySchemaUpdates(table, consumer);
verify(table).refresh();
verify(table).updateSchema();
verify(updateSchema).addColumn(eq("st"), eq("s"), isA(StringType.class));
verify(updateSchema).updateColumn(eq("st.f"), isA(DoubleType.class));
verify(updateSchema).makeColumnOptional(eq("st.i"));
verify(updateSchema).commit();
// check that there are no unexpected invocations...
verify(updateSchema).addColumn(anyString(), anyString(), any());
verify(updateSchema).updateColumn(any(), any());
verify(updateSchema).makeColumnOptional(any());
} |
public Long getUsableSpace() {
return usableSpace;
} | @Test
public void shouldInitializeTheFreeSpaceAtAgentSide() {
AgentIdentifier id = new Agent("uuid", "localhost", "176.19.4.1").getAgentIdentifier();
AgentRuntimeInfo agentRuntimeInfo = new AgentRuntimeInfo(id, AgentRuntimeStatus.Idle, currentWorkingDirectory(), "cookie");
assertThat(agentRuntimeInfo.getUsableSpace(), is(not(0L)));
} |
public static Optional<IndexSetValidator.Violation> validate(ElasticsearchConfiguration elasticsearchConfiguration,
IndexLifetimeConfig retentionConfig) {
Period indexLifetimeMin = retentionConfig.indexLifetimeMin();
Period indexLifetimeMax = retentionConfig.indexLifetimeMax();
final Period leeway = indexLifetimeMax.minus(indexLifetimeMin);
if (leeway.toStandardSeconds().getSeconds() < 0) {
return Optional.of(IndexSetValidator.Violation.create(f("%s <%s> is shorter than %s <%s>", FIELD_INDEX_LIFETIME_MAX, indexLifetimeMax,
FIELD_INDEX_LIFETIME_MIN, indexLifetimeMin)));
}
if (leeway.toStandardSeconds().isLessThan(elasticsearchConfiguration.getTimeSizeOptimizingRotationPeriod().toStandardSeconds())) {
return Optional.of(IndexSetValidator.Violation.create(f("The duration between %s and %s <%s> cannot be shorter than %s <%s>", FIELD_INDEX_LIFETIME_MAX, FIELD_INDEX_LIFETIME_MIN,
leeway, TIME_SIZE_OPTIMIZING_ROTATION_PERIOD, elasticsearchConfiguration.getTimeSizeOptimizingRotationPeriod())));
}
Period fixedLeeway = elasticsearchConfiguration.getTimeSizeOptimizingRetentionFixedLeeway();
if (Objects.nonNull(fixedLeeway) && leeway.toStandardSeconds().isLessThan(fixedLeeway.toStandardSeconds())) {
return Optional.of(IndexSetValidator.Violation.create(f("The duration between %s and %s <%s> cannot be shorter than %s <%s>", FIELD_INDEX_LIFETIME_MAX, FIELD_INDEX_LIFETIME_MIN,
leeway, TIME_SIZE_OPTIMIZING_RETENTION_FIXED_LEEWAY, fixedLeeway)));
}
final Period maxRetentionPeriod = elasticsearchConfiguration.getMaxIndexRetentionPeriod();
if (maxRetentionPeriod != null
&& indexLifetimeMax.toStandardSeconds().isGreaterThan(maxRetentionPeriod.toStandardSeconds())) {
return Optional.of(IndexSetValidator.Violation.create(f("Lifetime setting %s <%s> exceeds the configured maximum of %s=%s.",
FIELD_INDEX_LIFETIME_MAX, indexLifetimeMax,
ElasticsearchConfiguration.MAX_INDEX_RETENTION_PERIOD, maxRetentionPeriod)));
}
if (periodOtherThanDays(indexLifetimeMax) && !elasticsearchConfiguration.allowFlexibleRetentionPeriod()) {
return Optional.of(IndexSetValidator.Violation.create(f("Lifetime setting %s <%s> can only be a multiple of days",
FIELD_INDEX_LIFETIME_MAX, indexLifetimeMax)));
}
if (periodOtherThanDays(indexLifetimeMin) && !elasticsearchConfiguration.allowFlexibleRetentionPeriod()) {
return Optional.of(IndexSetValidator.Violation.create(f("Lifetime setting %s <%s> can only be a multiple of days",
FIELD_INDEX_LIFETIME_MIN, indexLifetimeMin)));
}
return Optional.empty();
} | @Test
void testAllowFlexiblePeriodFlag() {
when(elasticConfig.getTimeSizeOptimizingRotationPeriod()).thenReturn(Period.minutes(1));
when(elasticConfig.getTimeSizeOptimizingRetentionFixedLeeway()).thenReturn(Period.minutes(1));
IndexLifetimeConfig config = IndexLifetimeConfig.builder()
.indexLifetimeMin(Period.minutes(3))
.indexLifetimeMax(Period.minutes(5))
.build();
assertThat(validate(elasticConfig, config)).hasValueSatisfying(v -> assertThat(v.message())
.contains("can only be a multiple of days"));
when(elasticConfig.allowFlexibleRetentionPeriod()).thenReturn(true);
assertThat(validate(elasticConfig, config)).isEmpty();
} |
public WeightedItem<T> addOrVote(T item) {
for (int i = 0; i < list.size(); i++) {
WeightedItem<T> weightedItem = list.get(i);
if (weightedItem.item.equals(item)) {
voteFor(weightedItem);
return weightedItem;
}
}
return organizeAndAdd(item);
} | @Test
public void testScenario() {
WeightedEvictableList<String> list = new WeightedEvictableList<>(3, 3);
list.addOrVote("a");
list.addOrVote("b");
list.addOrVote("c");
// 3 iterations are here, now list will organize itself. Since all items have 1 vote, order does not change
assertItemsInOrder(list, "a", "b", "c");
assertWeightsInOrder(list, 1, 1, 1);
list.addOrVote("c");
list.addOrVote("b");
list.addOrVote("c");
// 3 iterations are here, now list will organize itself
assertItemsInOrder(list, "c", "b", "a");
assertWeightsInOrder(list, 3, 2, 1);
list.addOrVote("b");
list.addOrVote("c");
list.addOrVote("d");
// "c" has the most votes. 2 items has to go.
assertItemsInOrder(list, "c", "d");
assertWeightsInOrder(list, 0, 1);
list.addOrVote("d"); //let the list re-organize now
assertItemsInOrder(list, "d", "c");
list.addOrVote("x");
assertItemsInOrder(list, "d", "c", "x");
assertWeightsInOrder(list, 2, 0, 1);
list.addOrVote("x");
list.addOrVote("x");
list.addOrVote("x");
assertItemsInOrder(list, "x", "d", "c");
assertWeightsInOrder(list, 4, 2, 0);
} |
@GET
@Path("/entity-uid/{uid}/")
@Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8)
public TimelineEntity getEntity(
@Context HttpServletRequest req,
@Context HttpServletResponse res,
@PathParam("uid") String uId,
@QueryParam("confstoretrieve") String confsToRetrieve,
@QueryParam("metricstoretrieve") String metricsToRetrieve,
@QueryParam("fields") String fields,
@QueryParam("metricslimit") String metricsLimit,
@QueryParam("metricstimestart") String metricsTimeStart,
@QueryParam("metricstimeend") String metricsTimeEnd) {
String url = req.getRequestURI() +
(req.getQueryString() == null ? "" :
QUERY_STRING_SEP + req.getQueryString());
UserGroupInformation callerUGI =
TimelineReaderWebServicesUtils.getUser(req);
LOG.info("Received URL {} from user {}",
url, TimelineReaderWebServicesUtils.getUserName(callerUGI));
long startTime = Time.monotonicNow();
boolean succeeded = false;
init(res);
TimelineReaderManager timelineReaderManager = getTimelineReaderManager();
TimelineEntity entity = null;
try {
TimelineReaderContext context =
TimelineUIDConverter.GENERIC_ENTITY_UID.decodeUID(uId);
if (context == null) {
throw new BadRequestException("Incorrect UID " + uId);
}
entity = timelineReaderManager.getEntity(context,
TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
metricsTimeStart, metricsTimeEnd));
checkAccessForGenericEntity(entity, callerUGI);
succeeded = true;
} catch (Exception e) {
handleException(e, url, startTime, "Either metricslimit or metricstime"
+ " start/end");
} finally {
long latency = Time.monotonicNow() - startTime;
METRICS.addGetEntitiesLatency(latency, succeeded);
LOG.info("Processed URL {} (Took {} ms.)", url, latency);
}
if (entity == null) {
LOG.info("Processed URL {} but entity not found" + " (Took {} ms.)",
url, (Time.monotonicNow() - startTime));
throw new NotFoundException("Timeline entity with uid: " + uId +
"is not found");
}
return entity;
} | @Test
void testGetEntitiesByRelations() throws Exception {
Client client = createClient();
try {
URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
"timeline/clusters/cluster1/apps/app1/entities/app?relatesto=" +
"flow:flow1");
ClientResponse resp = getResponse(client, uri);
Set<TimelineEntity> entities =
resp.getEntity(new GenericType<Set<TimelineEntity>>(){
});
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
resp.getType().toString());
assertNotNull(entities);
assertEquals(1, entities.size());
assertTrue(entities.contains(newEntity("app", "id_1")),
"Entity with id_1 should have been present in response.");
uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
"clusters/cluster1/apps/app1/entities/app?isrelatedto=" +
"type1:tid1_2,type2:tid2_1%60");
resp = getResponse(client, uri);
entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){
});
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
resp.getType().toString());
assertNotNull(entities);
assertEquals(1, entities.size());
assertTrue(entities.contains(newEntity("app", "id_1")),
"Entity with id_1 should have been present in response.");
uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
"clusters/cluster1/apps/app1/entities/app?isrelatedto=" +
"type1:tid1_1:tid1_2,type2:tid2_1%60");
resp = getResponse(client, uri);
entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){
});
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
resp.getType().toString());
assertNotNull(entities);
assertEquals(1, entities.size());
assertTrue(entities.contains(newEntity("app", "id_1")),
"Entity with id_1 should have been present in response.");
} finally {
client.destroy();
}
} |
@Override
public boolean isSubscribed(String serviceName, String groupName, String clusters) throws NacosException {
return true;
} | @Test
void testIsSubscribed() throws NacosException {
assertTrue(clientProxy.isSubscribed("serviceName", "group1", "clusters"));
} |
public NODE remove(int index) {
throw e;
} | @Test
void require_that_remove_index_throws_exception() {
assertThrows(NodeVector.ReadOnlyException.class, () -> new TestNodeVector("foo").remove(0));
} |
public static List<Interval> normalize(List<Interval> intervals) {
if (intervals.size() <= 1) {
return intervals;
}
List<Interval> valid =
intervals.stream().filter(Interval::isValid).collect(Collectors.toList());
if (valid.size() <= 1) {
return valid;
}
// 2 or more intervals
List<Interval> result = new ArrayList<>(valid.size());
Collections.sort(valid);
long start = valid.get(0).getStartMs();
long end = valid.get(0).getEndMs();
// scan entire list from the second interval
for (int i = 1; i < valid.size(); i++) {
Interval interval = valid.get(i);
if (interval.getStartMs() <= end) {
// continue with the same interval
end = Math.max(end, interval.getEndMs());
} else {
// These are disjoint. add the previous interval
result.add(Interval.between(start, end));
start = interval.getStartMs();
end = interval.getEndMs();
}
}
// add the last interval
result.add(Interval.between(start, end));
if (result.isEmpty()) {
return Collections.emptyList();
}
return result;
} | @Test
public void normalizeSimple() {
List<Interval> i;
i = IntervalUtils.normalize(Collections.emptyList());
Assert.assertTrue(i.isEmpty());
i = IntervalUtils.normalize(Arrays.asList(Interval.NEVER, Interval.NEVER, Interval.NEVER));
Assert.assertTrue(i.isEmpty());
i = IntervalUtils.normalize(Arrays.asList(Interval.ALWAYS, Interval.ALWAYS, Interval.ALWAYS));
Assert.assertEquals(1, i.size());
Assert.assertEquals(Interval.ALWAYS, i.get(0));
i = IntervalUtils.normalize(Arrays.asList(Interval.NEVER, Interval.ALWAYS));
Assert.assertEquals(1, i.size());
Assert.assertEquals(Interval.ALWAYS, i.get(0));
i = IntervalUtils.normalize(Arrays.asList(Interval.between(1, 2)));
Assert.assertEquals(1, i.size());
Assert.assertEquals(Interval.between(1, 2), i.get(0));
i = IntervalUtils.normalize(Arrays.asList(Interval.NEVER, Interval.between(1, 2)));
Assert.assertEquals(1, i.size());
Assert.assertEquals(Interval.between(1, 2), i.get(0));
} |
@Override
public <K> Iterable<K> findIds(Class<?> entityClass) {
return findIds(entityClass, 10);
} | @Test
public void testFindIds() {
RLiveObjectService s = redisson.getLiveObjectService();
TestIndexed1 t1 = new TestIndexed1();
t1.setId("1");
t1.setKeywords(Collections.singletonList("132323"));
TestIndexed1 t2 = new TestIndexed1();
t2.setId("2");
t2.setKeywords(Collections.singletonList("fjdklj"));
s.persist(t1, t2);
Iterable<String> ids = s.findIds(TestIndexed1.class);
assertThat(ids).containsExactlyInAnyOrder("1", "2");
} |
public static UArrayTypeTree create(UExpression elementType) {
return new AutoValue_UArrayTypeTree(elementType);
} | @Test
public void equality() {
new EqualsTester()
.addEqualityGroup(UArrayTypeTree.create(UPrimitiveTypeTree.INT))
.addEqualityGroup(UArrayTypeTree.create(UClassIdent.create("java.lang.String")))
.addEqualityGroup(UArrayTypeTree.create(UArrayTypeTree.create(UPrimitiveTypeTree.INT)))
.testEquals();
} |
@Override
public SecurityModeState getState(ApplicationId appId) {
return states.asJavaMap().getOrDefault(appId, new SecurityInfo(null, null)).getState();
} | @Test
public void testGetState() {
assertEquals(SECURED, states.get(appId).getState());
} |
@Override
public AlterUserScramCredentialsResult alterUserScramCredentials(List<UserScramCredentialAlteration> alterations,
AlterUserScramCredentialsOptions options) {
final long now = time.milliseconds();
final Map<String, KafkaFutureImpl<Void>> futures = new HashMap<>();
for (UserScramCredentialAlteration alteration: alterations) {
futures.put(alteration.user(), new KafkaFutureImpl<>());
}
final Map<String, Exception> userIllegalAlterationExceptions = new HashMap<>();
// We need to keep track of users with deletions of an unknown SCRAM mechanism
final String usernameMustNotBeEmptyMsg = "Username must not be empty";
String passwordMustNotBeEmptyMsg = "Password must not be empty";
final String unknownScramMechanismMsg = "Unknown SCRAM mechanism";
alterations.stream().filter(a -> a instanceof UserScramCredentialDeletion).forEach(alteration -> {
final String user = alteration.user();
if (user == null || user.isEmpty()) {
userIllegalAlterationExceptions.put(alteration.user(), new UnacceptableCredentialException(usernameMustNotBeEmptyMsg));
} else {
UserScramCredentialDeletion deletion = (UserScramCredentialDeletion) alteration;
ScramMechanism mechanism = deletion.mechanism();
if (mechanism == null || mechanism == ScramMechanism.UNKNOWN) {
userIllegalAlterationExceptions.put(user, new UnsupportedSaslMechanismException(unknownScramMechanismMsg));
}
}
});
// Creating an upsertion may throw InvalidKeyException or NoSuchAlgorithmException,
// so keep track of which users are affected by such a failure so we can fail all their alterations later
final Map<String, Map<ScramMechanism, AlterUserScramCredentialsRequestData.ScramCredentialUpsertion>> userInsertions = new HashMap<>();
alterations.stream().filter(a -> a instanceof UserScramCredentialUpsertion)
.filter(alteration -> !userIllegalAlterationExceptions.containsKey(alteration.user()))
.forEach(alteration -> {
final String user = alteration.user();
if (user == null || user.isEmpty()) {
userIllegalAlterationExceptions.put(alteration.user(), new UnacceptableCredentialException(usernameMustNotBeEmptyMsg));
} else {
UserScramCredentialUpsertion upsertion = (UserScramCredentialUpsertion) alteration;
try {
byte[] password = upsertion.password();
if (password == null || password.length == 0) {
userIllegalAlterationExceptions.put(user, new UnacceptableCredentialException(passwordMustNotBeEmptyMsg));
} else {
ScramMechanism mechanism = upsertion.credentialInfo().mechanism();
if (mechanism == null || mechanism == ScramMechanism.UNKNOWN) {
userIllegalAlterationExceptions.put(user, new UnsupportedSaslMechanismException(unknownScramMechanismMsg));
} else {
userInsertions.putIfAbsent(user, new HashMap<>());
userInsertions.get(user).put(mechanism, getScramCredentialUpsertion(upsertion));
}
}
} catch (NoSuchAlgorithmException e) {
// we might overwrite an exception from a previous alteration, but we don't really care
// since we just need to mark this user as having at least one illegal alteration
// and make an exception instance available for completing the corresponding future exceptionally
userIllegalAlterationExceptions.put(user, new UnsupportedSaslMechanismException(unknownScramMechanismMsg));
} catch (InvalidKeyException e) {
// generally shouldn't happen since we deal with the empty password case above,
// but we still need to catch/handle it
userIllegalAlterationExceptions.put(user, new UnacceptableCredentialException(e.getMessage(), e));
}
}
});
// submit alterations only for users that do not have an illegal alteration as identified above
Call call = new Call("alterUserScramCredentials", calcDeadlineMs(now, options.timeoutMs()),
new ControllerNodeProvider()) {
@Override
public AlterUserScramCredentialsRequest.Builder createRequest(int timeoutMs) {
return new AlterUserScramCredentialsRequest.Builder(
new AlterUserScramCredentialsRequestData().setUpsertions(alterations.stream()
.filter(a -> a instanceof UserScramCredentialUpsertion)
.filter(a -> !userIllegalAlterationExceptions.containsKey(a.user()))
.map(a -> userInsertions.get(a.user()).get(((UserScramCredentialUpsertion) a).credentialInfo().mechanism()))
.collect(Collectors.toList()))
.setDeletions(alterations.stream()
.filter(a -> a instanceof UserScramCredentialDeletion)
.filter(a -> !userIllegalAlterationExceptions.containsKey(a.user()))
.map(d -> getScramCredentialDeletion((UserScramCredentialDeletion) d))
.collect(Collectors.toList())));
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
AlterUserScramCredentialsResponse response = (AlterUserScramCredentialsResponse) abstractResponse;
// Check for controller change
for (Errors error : response.errorCounts().keySet()) {
if (error == Errors.NOT_CONTROLLER) {
handleNotControllerError(error);
}
}
/* Now that we have the results for the ones we sent,
* fail any users that have an illegal alteration as identified above.
* Be sure to do this after the NOT_CONTROLLER error check above
* so that all errors are consistent in that case.
*/
userIllegalAlterationExceptions.entrySet().stream().forEach(entry ->
futures.get(entry.getKey()).completeExceptionally(entry.getValue())
);
response.data().results().forEach(result -> {
KafkaFutureImpl<Void> future = futures.get(result.user());
if (future == null) {
log.warn("Server response mentioned unknown user {}", result.user());
} else {
Errors error = Errors.forCode(result.errorCode());
if (error != Errors.NONE) {
future.completeExceptionally(error.exception(result.errorMessage()));
} else {
future.complete(null);
}
}
});
completeUnrealizedFutures(
futures.entrySet().stream(),
user -> "The broker response did not contain a result for user " + user);
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
};
runnable.call(call, now);
return new AlterUserScramCredentialsResult(new HashMap<>(futures));
} | @Test
public void testAlterUserScramCredentials() {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
final String user0Name = "user0";
ScramMechanism user0ScramMechanism0 = ScramMechanism.SCRAM_SHA_256;
ScramMechanism user0ScramMechanism1 = ScramMechanism.SCRAM_SHA_512;
final String user1Name = "user1";
ScramMechanism user1ScramMechanism0 = ScramMechanism.SCRAM_SHA_256;
final String user2Name = "user2";
ScramMechanism user2ScramMechanism0 = ScramMechanism.SCRAM_SHA_512;
AlterUserScramCredentialsResponseData responseData = new AlterUserScramCredentialsResponseData();
responseData.setResults(Stream.of(user0Name, user1Name, user2Name).map(u ->
new AlterUserScramCredentialsResponseData.AlterUserScramCredentialsResult()
.setUser(u).setErrorCode(Errors.NONE.code())).collect(Collectors.toList()));
env.kafkaClient().prepareResponse(new AlterUserScramCredentialsResponse(responseData));
AlterUserScramCredentialsResult result = env.adminClient().alterUserScramCredentials(asList(
new UserScramCredentialDeletion(user0Name, user0ScramMechanism0),
new UserScramCredentialUpsertion(user0Name, new ScramCredentialInfo(user0ScramMechanism1, 8192), "password"),
new UserScramCredentialUpsertion(user1Name, new ScramCredentialInfo(user1ScramMechanism0, 8192), "password"),
new UserScramCredentialDeletion(user2Name, user2ScramMechanism0)));
Map<String, KafkaFuture<Void>> resultData = result.values();
assertEquals(3, resultData.size());
Stream.of(user0Name, user1Name, user2Name).forEach(u -> {
assertTrue(resultData.containsKey(u));
assertFalse(resultData.get(u).isCompletedExceptionally());
});
}
} |
GuardedScheduler(Scheduler delegate) {
this.delegate = requireNonNull(delegate);
} | @Test
public void guardedScheduler() {
var future = Scheduler.guardedScheduler((r, e, d, u) -> Futures.immediateVoidFuture())
.schedule(Runnable::run, () -> {}, 1, TimeUnit.MINUTES);
assertThat(future).isSameInstanceAs(Futures.immediateVoidFuture());
} |
public Bson parseSingleExpression(final String filterExpression, final List<EntityAttribute> attributes) {
final Filter filter = singleFilterParser.parseSingleExpression(filterExpression, attributes);
return filter.toBson();
} | @Test
void parsesFilterExpressionForStringFieldsCorrectlyEvenIfValueContainsRangeSeparator() {
final List<EntityAttribute> entityAttributes = List.of(EntityAttribute.builder()
.id("text")
.title("Text")
.type(SearchQueryField.Type.STRING)
.filterable(true)
.build());
assertEquals(
Filters.eq("text", "42" + RANGE_VALUES_SEPARATOR + "53"),
toTest.parseSingleExpression("text:42" + RANGE_VALUES_SEPARATOR + "53",
entityAttributes
));
} |
public GoConfigHolder loadConfigHolder(final String content, Callback callback) throws Exception {
CruiseConfig configForEdit;
CruiseConfig config;
LOGGER.debug("[Config Save] Loading config holder");
configForEdit = deserializeConfig(content);
if (callback != null) callback.call(configForEdit);
config = preprocessAndValidate(configForEdit);
return new GoConfigHolder(config, configForEdit);
} | @Test
void shouldLoadConfigWithNoEnvironment() throws Exception {
String content = configWithEnvironments("", CONFIG_SCHEMA_VERSION);
EnvironmentsConfig environmentsConfig = xmlLoader.loadConfigHolder(content).config.getEnvironments();
EnvironmentPipelineMatchers matchers = environmentsConfig.matchers();
assertThat(matchers).isNotNull();
assertThat(matchers.size()).isEqualTo(0);
} |
@Override
public void load(String mountTableConfigPath, Configuration conf)
throws IOException {
this.mountTable = new Path(mountTableConfigPath);
String scheme = mountTable.toUri().getScheme();
FsGetter fsGetter = new ViewFileSystemOverloadScheme.ChildFsGetter(scheme);
try (FileSystem fs = fsGetter.getNewInstance(mountTable.toUri(), conf)) {
RemoteIterator<LocatedFileStatus> listFiles =
fs.listFiles(mountTable, false);
LocatedFileStatus lfs = null;
int higherVersion = -1;
while (listFiles.hasNext()) {
LocatedFileStatus curLfs = listFiles.next();
String cur = curLfs.getPath().getName();
String[] nameParts = cur.split(REGEX_DOT);
if (nameParts.length < 2) {
logInvalidFileNameFormat(cur);
continue; // invalid file name
}
int curVersion = higherVersion;
try {
curVersion = Integer.parseInt(nameParts[nameParts.length - 2]);
} catch (NumberFormatException nfe) {
logInvalidFileNameFormat(cur);
continue;
}
if (curVersion > higherVersion) {
higherVersion = curVersion;
lfs = curLfs;
}
}
if (lfs == null) {
// No valid mount table file found.
// TODO: Should we fail? Currently viewfs init will fail if no mount
// links anyway.
LOGGER.warn("No valid mount-table file exist at: {}. At least one "
+ "mount-table file should present with the name format: "
+ "mount-table.<versionNumber>.xml", mountTableConfigPath);
return;
}
// Latest version file.
Path latestVersionMountTable = lfs.getPath();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Loading the mount-table {} into configuration.",
latestVersionMountTable);
}
try (FSDataInputStream open = fs.open(latestVersionMountTable)) {
Configuration newConf = new Configuration(false);
newConf.addResource(open);
// This will add configuration props as resource, instead of stream
// itself. So, that stream can be closed now.
conf.addResource(newConf);
}
}
} | @Test
public void testMountTableFileWithInvalidFormatWithNoDotsInName()
throws Exception {
Path path = new Path(new URI(targetTestRoot.toString()
+ "/testMountTableFileWithInvalidFormatWithNoDots/"));
fsTarget.mkdirs(path);
File invalidMountFileName =
new File(new URI(path.toString() + "/tableInvalidVersionxml"));
invalidMountFileName.createNewFile();
// Pass mount table directory
loader.load(path.toString(), conf);
Assert.assertEquals(null, conf.get(MOUNT_LINK_KEY_SRC_TWO));
Assert.assertEquals(null, conf.get(MOUNT_LINK_KEY_SRC_ONE));
invalidMountFileName.delete();
} |
@Override
public <V> MultiLabel generateOutput(V label) {
if (label instanceof Collection) {
Collection<?> c = (Collection<?>) label;
List<Pair<String,Boolean>> dimensions = new ArrayList<>();
for (Object o : c) {
dimensions.add(MultiLabel.parseElement(o.toString()));
}
return MultiLabel.createFromPairList(dimensions);
}
return MultiLabel.parseString(label.toString());
} | @Test
public void testGenerateOutput_unparseable() {
MultiLabelFactory factory = new MultiLabelFactory();
MultiLabel output = factory.generateOutput(new Unparseable());
assertEquals(1, output.getLabelSet().size());
assertTrue(output.getLabelString().startsWith("org.tribuo.multilabel.MultiLabelFactoryTest$Unparseable"));
} |
public abstract String dn(); | @Test
void dn() {
final LDAPEntry entry = LDAPEntry.builder()
.dn("cn=jane,ou=people,dc=example,dc=com")
.base64UniqueId(Base64.encode("unique-id"))
.addAttribute("foo", "bar")
.build();
assertThat(entry.dn()).isEqualTo("cn=jane,ou=people,dc=example,dc=com");
} |
public static String formatSimple(long amount) {
if (amount < 1_0000 && amount > -1_0000) {
return String.valueOf(amount);
}
String res;
if (amount < 1_0000_0000 && amount > -1_0000_0000) {
res = NumberUtil.div(amount, 1_0000, 2) + "万";
} else if (amount < 1_0000_0000_0000L && amount > -1_0000_0000_0000L) {
res = NumberUtil.div(amount, 1_0000_0000, 2) + "亿";
} else {
res = NumberUtil.div(amount, 1_0000_0000_0000L, 2) + "万亿";
}
return res;
} | @Test
public void formatSimpleTest() {
String f1 = NumberChineseFormatter.formatSimple(1_2345);
assertEquals("1.23万", f1);
f1 = NumberChineseFormatter.formatSimple(-5_5555);
assertEquals("-5.56万", f1);
f1 = NumberChineseFormatter.formatSimple(1_2345_6789);
assertEquals("1.23亿", f1);
f1 = NumberChineseFormatter.formatSimple(-5_5555_5555);
assertEquals("-5.56亿", f1);
f1 = NumberChineseFormatter.formatSimple(1_2345_6789_1011L);
assertEquals("1.23万亿", f1);
f1 = NumberChineseFormatter.formatSimple(-5_5555_5555_5555L);
assertEquals("-5.56万亿", f1);
f1 = NumberChineseFormatter.formatSimple(123);
assertEquals("123", f1);
f1 = NumberChineseFormatter.formatSimple(-123);
assertEquals("-123", f1);
} |
@Override
public void marshal(Exchange exchange, Object graph, OutputStream stream) throws Exception {
ResourceConverter converter = new ResourceConverter(dataFormatTypeClasses);
byte[] objectAsBytes = converter.writeDocument(new JSONAPIDocument<>(graph));
stream.write(objectAsBytes);
} | @Test
public void testJsonApiMarshalWrongType() {
Class<?>[] formats = { MyBook.class, MyAuthor.class };
JsonApiDataFormat jsonApiDataFormat = new JsonApiDataFormat(formats);
Exchange exchange = new DefaultExchange(context);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
assertThrows(DocumentSerializationException.class,
() -> jsonApiDataFormat.marshal(exchange, new MyFooBar("bar"), baos));
} |
@Override
public String getId() {
return id;
} | @Test
void recurringJobWithAVeryLongIdUsesMD5HashingForId() {
RecurringJob recurringJob = aDefaultRecurringJob().withoutId().withJobDetails(new JobDetails(new SimpleJobRequest())).build();
assertThat(recurringJob.getId()).isEqualTo("045101544c9006c596e6bc7c59506913");
} |
public String getHiveMetastoreURIs() {
return metastoreURIs;
} | @Test
public void testFromStmt(@Mocked GlobalStateMgr globalStateMgr) throws UserException {
String name = "hudi0";
String type = "hudi";
String metastoreURIs = "thrift://127.0.0.1:9380";
Map<String, String> properties = Maps.newHashMap();
properties.put("type", type);
properties.put("hive.metastore.uris", metastoreURIs);
CreateResourceStmt stmt = new CreateResourceStmt(true, name, properties);
Analyzer analyzer = new Analyzer(Analyzer.AnalyzerVisitor.getInstance());
new Expectations() {
{
globalStateMgr.getAnalyzer();
result = analyzer;
}
};
com.starrocks.sql.analyzer.Analyzer.analyze(stmt, connectContext);
HudiResource resource = (HudiResource) Resource.fromStmt(stmt);
Assert.assertEquals("hudi0", resource.getName());
Assert.assertEquals(type, resource.getType().name().toLowerCase());
Assert.assertEquals(metastoreURIs, resource.getHiveMetastoreURIs());
} |
@Override
public String getSinkTableName(Table table) {
String tableName = table.getName();
Map<String, String> sink = config.getSink();
// Add table name mapping logic
String mappingRoute = sink.get(FlinkCDCConfig.TABLE_MAPPING_ROUTES);
if (mappingRoute != null) {
Map<String, String> mappingRules = parseMappingRoute(mappingRoute);
if (mappingRules.containsKey(tableName)) {
tableName = mappingRules.get(tableName);
}
}
tableName = sink.getOrDefault(FlinkCDCConfig.TABLE_PREFIX, "")
+ tableName
+ sink.getOrDefault(FlinkCDCConfig.TABLE_SUFFIX, "");
// table.lower and table.upper can not be true at the same time
if (Boolean.parseBoolean(sink.get(FlinkCDCConfig.TABLE_LOWER))
&& Boolean.parseBoolean(sink.get(FlinkCDCConfig.TABLE_UPPER))) {
throw new IllegalArgumentException("table.lower and table.upper can not be true at the same time");
}
if (Boolean.parseBoolean(sink.get(FlinkCDCConfig.TABLE_UPPER))) {
tableName = tableName.toUpperCase();
}
if (Boolean.parseBoolean(sink.get(FlinkCDCConfig.TABLE_LOWER))) {
tableName = tableName.toLowerCase();
}
// Implement regular expressions to replace table names through
// sink.table.replace.pattern and table.replace.with
String replacePattern = sink.get(FlinkCDCConfig.TABLE_REPLACE_PATTERN);
String replaceWith = sink.get(FlinkCDCConfig.TABLE_REPLACE_WITH);
if (replacePattern != null && replaceWith != null) {
Pattern pattern = Pattern.compile(replacePattern);
Matcher matcher = pattern.matcher(tableName);
tableName = matcher.replaceAll(replaceWith);
}
// add schema
if (Boolean.parseBoolean(sink.get("table.prefix.schema"))) {
tableName = table.getSchema() + "_" + tableName;
}
return tableName;
} | @Test
public void testGetSinkTableNameWithConversionUpperCase() {
Map<String, String> sinkConfig = new HashMap<>();
sinkConfig.put("table.prefix", "");
sinkConfig.put("table.suffix", "");
sinkConfig.put("table.lower", "false");
sinkConfig.put("table.upper", "true");
when(config.getSink()).thenReturn(sinkConfig);
Table table = new Table("TestTable", "TestSchema", null);
String expectedTableName = "TESTTABLE";
Assert.assertEquals(expectedTableName, sinkBuilder.getSinkTableName(table));
} |
public Host get(final String url) throws HostParserException {
final StringReader reader = new StringReader(url);
final Protocol parsedProtocol, protocol;
if((parsedProtocol = findProtocol(reader, factory)) != null) {
protocol = parsedProtocol;
}
else {
protocol = defaultScheme;
}
final Consumer<HostParserException> parsedProtocolDecorator = e -> e.withProtocol(parsedProtocol);
final Host host = new Host(protocol);
final URITypes uriType = findURIType(reader);
if(uriType == URITypes.Undefined) {
// scheme:
if(StringUtils.isBlank(protocol.getDefaultHostname())) {
throw decorate(new HostParserException(String.format("Missing hostname in URI %s", url)), parsedProtocolDecorator);
}
return host;
}
if(uriType == URITypes.Authority) {
if(host.getProtocol().isHostnameConfigurable()) {
parseAuthority(reader, host, parsedProtocolDecorator);
}
else {
parseRootless(reader, host, parsedProtocolDecorator);
}
}
else if(uriType == URITypes.Rootless) {
parseRootless(reader, host, parsedProtocolDecorator);
}
else if(uriType == URITypes.Absolute) {
parseAbsolute(reader, host, parsedProtocolDecorator);
}
if(log.isDebugEnabled()) {
log.debug(String.format("Parsed %s as %s", url, host));
}
return host;
} | @Test
public void parseDefaultHostnameWithUserRelativePath() throws Exception {
final Host host = new HostParser(new ProtocolFactory(Collections.singleton(new TestProtocol(Scheme.https) {
@Override
public String getDefaultHostname() {
return "defaultHostname";
}
@Override
public boolean isHostnameConfigurable() {
return false;
}
}))).get("https://user@folder/file");
assertEquals("defaultHostname", host.getHostname());
assertEquals("user", host.getCredentials().getUsername());
assertEquals("folder/file", host.getDefaultPath());
} |
@Override
public Set<String> getAvailableBrokers() {
try {
return getAvailableBrokersAsync().get(conf.getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS);
} catch (Exception e) {
log.warn("Error when trying to get active brokers", e);
return loadData.getBrokerData().keySet();
}
} | @Test
public void testBrokerStopCacheUpdate() throws Exception {
ModularLoadManagerWrapper loadManagerWrapper = (ModularLoadManagerWrapper) pulsar1.getLoadManager().get();
ModularLoadManagerImpl lm = (ModularLoadManagerImpl) loadManagerWrapper.getLoadManager();
assertEquals(lm.getAvailableBrokers().size(), 2);
pulsar2.close();
Awaitility.await().untilAsserted(() -> assertEquals(lm.getAvailableBrokers().size(), 1));
} |
public CategoricalInfo(String name) {
super(name);
} | @Test
void testCategoricalInfo() throws Exception {
CategoricalInfo info = new CategoricalInfo("cat");
IntStream.range(0, 10).forEach(i -> {
IntStream.range(0, i*2).forEach(j -> {
info.observe(i);
});
});
VariableInfoProto infoProto = info.serialize();
assertEquals(0, infoProto.getVersion());
assertEquals("org.tribuo.CategoricalInfo", infoProto.getClassName());
CategoricalInfoProto proto = infoProto.getSerializedData().unpack(CategoricalInfoProto.class);
assertEquals("cat", proto.getName());
assertEquals(90, proto.getCount());
assertEquals(0, proto.getObservedCount());
assertEquals(Double.NaN, proto.getObservedValue());
List<Double> keyList = proto.getKeyList();
List<Long> valueList = proto.getValueList();
assertEquals(9, keyList.size());
assertEquals(9, valueList.size());
Map<Double, Long> expectedCounts = new HashMap<>();
IntStream.range(0, 10).forEach(i -> {
long count = info.getObservationCount(i);
expectedCounts.put((double)i, count);
});
for (int i=0; i<keyList.size(); i++) {
assertEquals(expectedCounts.get(keyList.get(i)), valueList.get(i));
}
VariableInfo infoD = ProtoUtil.deserialize(infoProto);
assertEquals(info, infoD);
} |
@Override
public Collection<String> getDistributedTableNames() {
return Collections.emptySet();
} | @Test
void assertGetDistributedTableMapper() {
assertThat(new LinkedList<>(ruleAttribute.getDistributedTableNames()), is(Collections.emptyList()));
} |
@Override
public void writeLong(final long v) throws IOException {
ensureAvailable(LONG_SIZE_IN_BYTES);
MEM.putLong(buffer, ARRAY_BYTE_BASE_OFFSET + pos, v);
pos += LONG_SIZE_IN_BYTES;
} | @Test
public void testWriteLongForPositionV() throws Exception {
long expected = 100;
out.writeLong(2, expected);
long actual = Bits.readLong(out.buffer, 2, ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN);
assertEquals(expected, actual);
} |
public static <T extends Throwable> void checkNotEmpty(final String value, final Supplier<T> exceptionSupplierIfUnexpected) throws T {
if (Strings.isNullOrEmpty(value)) {
throw exceptionSupplierIfUnexpected.get();
}
} | @Test
void assertCheckNotEmptyWithMapToThrowsException() {
assertThrows(SQLException.class, () -> ShardingSpherePreconditions.checkNotEmpty(Collections.emptyMap(), SQLException::new));
} |
public static boolean isDirectOutOfMemoryError(@Nullable Throwable t) {
return isOutOfMemoryErrorWithMessageContaining(t, "Direct buffer memory");
} | @Test
void testIsDirectOutOfMemoryErrorCanHandleNullValue() {
assertThat(ExceptionUtils.isDirectOutOfMemoryError(null)).isFalse();
} |
@Operation(summary = "Redirect with SAML artifact")
@GetMapping(value = {"/frontchannel/saml/v4/redirect_with_artifact", "/frontchannel/saml/v4/idp/redirect_with_artifact"})
public RedirectView redirectWithArtifact(@RequestParam(value = "SAMLart") String artifact, HttpServletRequest request) throws SamlSessionException, UnsupportedEncodingException {
logger.info("Receive redirect with SAML artifact");
return new RedirectView(assertionConsumerServiceUrlService.generateRedirectUrl(artifact, null, request.getRequestedSessionId(), null));
} | @Test
void redirectWithArtifactTest() throws SamlSessionException, UnsupportedEncodingException {
String redirectUrl = "redirectUrl";
httpServletRequestMock.setRequestedSessionId("sessionId");
when(assertionConsumerServiceUrlServiceMock.generateRedirectUrl(anyString(), any(), anyString(), any())).thenReturn(redirectUrl);
RedirectView result = artifactController.redirectWithArtifact("artifact", httpServletRequestMock);
assertNotNull(result);
assertEquals(redirectUrl, result.getUrl());
verify(assertionConsumerServiceUrlServiceMock, times(1)).generateRedirectUrl(anyString(), any(), anyString(), any());
} |
@Override
public int actionUpgradeComponents(String appName, List<String> components)
throws IOException, YarnException {
int result;
Component[] toUpgrade = new Component[components.size()];
try {
int idx = 0;
for (String compName : components) {
Component component = new Component();
component.setName(compName);
component.setState(ComponentState.UPGRADING);
toUpgrade[idx++] = component;
}
String buffer = ServiceApiUtil.COMP_JSON_SERDE.toJson(toUpgrade);
ClientResponse response = getApiClient(getComponentsPath(appName))
.put(ClientResponse.class, buffer);
result = processResponse(response);
} catch (Exception e) {
LOG.error("Failed to upgrade components: ", e);
result = EXIT_EXCEPTION_THROWN;
}
return result;
} | @Test
void testComponentsUpgrade() {
String appName = "example-app";
try {
int result = asc.actionUpgradeComponents(appName, Lists.newArrayList(
"comp"));
assertEquals(EXIT_SUCCESS, result);
} catch (IOException | YarnException e) {
fail();
}
} |
@Override
public Optional<ShardingConditionValue> generate(final BinaryOperationExpression predicate, final Column column, final List<Object> params, final TimestampServiceRule timestampServiceRule) {
String operator = predicate.getOperator().toUpperCase();
if (!isSupportedOperator(operator)) {
return Optional.empty();
}
ExpressionSegment valueExpression = predicate.getLeft() instanceof ColumnSegment ? predicate.getRight() : predicate.getLeft();
ConditionValue conditionValue = new ConditionValue(valueExpression, params);
if (conditionValue.isNull()) {
return generate(null, column, operator, conditionValue.getParameterMarkerIndex().orElse(-1));
}
Optional<Comparable<?>> value = conditionValue.getValue();
if (value.isPresent()) {
return generate(value.get(), column, operator, conditionValue.getParameterMarkerIndex().orElse(-1));
}
if (ExpressionConditionUtils.isNowExpression(valueExpression)) {
return generate(timestampServiceRule.getTimestamp(), column, operator, -1);
}
return Optional.empty();
} | @Test
void assertGenerateNullConditionValueWithLessThanOperator() {
BinaryOperationExpression rightValue = new BinaryOperationExpression(0, 0, mock(ColumnSegment.class), new LiteralExpressionSegment(0, 0, null), "<", null);
Optional<ShardingConditionValue> shardingConditionValue = generator.generate(rightValue, column, new LinkedList<>(), mock(TimestampServiceRule.class));
assertFalse(shardingConditionValue.isPresent());
} |
public static String getRandomName(int number) {
int combinationIdx = number % (LEFT.length * RIGHT.length);
int rightIdx = combinationIdx / LEFT.length;
int leftIdx = combinationIdx % LEFT.length;
String name = String.format(NAME_FORMAT, LEFT[leftIdx], RIGHT[rightIdx]);
String prefix = System.getProperty(MOBY_NAMING_PREFIX);
if (prefix != null) {
name = prefix + "_" + name;
}
return name;
} | @Test
public void getRandomNameNotEmpty() {
String randomName = MobyNames.getRandomName(0);
assertFalse(isNullOrEmptyAfterTrim(randomName));
} |
public boolean eval(ContentFile<?> file) {
// TODO: detect the case where a column is missing from the file using file's max field id.
return new MetricsEvalVisitor().eval(file);
} | @Test
public void testIntegerGt() {
boolean shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, greaterThan("id", INT_MAX_VALUE + 6)).eval(FILE);
assertThat(shouldRead).as("Should not read: id range above upper bound (85 < 79)").isFalse();
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, greaterThan("id", INT_MAX_VALUE)).eval(FILE);
assertThat(shouldRead)
.as("Should not read: id range above upper bound (79 is not > 79)")
.isFalse();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, greaterThan("id", INT_MAX_VALUE - 1)).eval(FILE);
assertThat(shouldRead).as("Should read: one possible id").isTrue();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, greaterThan("id", INT_MAX_VALUE - 4)).eval(FILE);
assertThat(shouldRead).as("Should read: may possible ids").isTrue();
} |
@VisibleForTesting
public static Optional<QueryId> getMaxMemoryConsumingQuery(ListMultimap<QueryId, SqlTask> queryIDToSqlTaskMap)
{
if (queryIDToSqlTaskMap.isEmpty()) {
return Optional.empty();
}
Comparator<Map.Entry<QueryId, Long>> comparator = Comparator.comparingLong(Map.Entry::getValue);
Optional<QueryId> maxMemoryConsumpingQueryId = queryIDToSqlTaskMap.asMap().entrySet().stream()
.map(entry ->
new AbstractMap.SimpleEntry<>(entry.getKey(), entry.getValue().stream()
.map(SqlTask::getTaskInfo)
.map(TaskInfo::getStats)
.mapToLong(stats -> stats.getUserMemoryReservationInBytes() + stats.getSystemMemoryReservationInBytes() + stats.getRevocableMemoryReservationInBytes())
.sum())
).max(comparator).map(Map.Entry::getKey);
return maxMemoryConsumpingQueryId;
} | @Test
public void testMaxMemoryConsumingQuery()
throws Exception
{
QueryId highMemoryQueryId = new QueryId("query1");
SqlTask highMemoryTask = createInitialTask(highMemoryQueryId);
updateTaskMemory(highMemoryTask, 200);
QueryId lowMemoryQueryId = new QueryId("query2");
SqlTask lowMemoryTask = createInitialTask(lowMemoryQueryId);
updateTaskMemory(lowMemoryTask, 100);
List<SqlTask> activeTasks = ImmutableList.of(highMemoryTask, lowMemoryTask);
ListMultimap<QueryId, SqlTask> activeQueriesToTasksMap = activeTasks.stream()
.collect(toImmutableListMultimap(task -> task.getQueryContext().getQueryId(), Function.identity()));
Optional<QueryId> optionalQueryId = HighMemoryTaskKiller.getMaxMemoryConsumingQuery(activeQueriesToTasksMap);
assertTrue(optionalQueryId.isPresent());
assertEquals(optionalQueryId.get(), highMemoryQueryId);
} |
public static long getNumSector(String requestSize, String sectorSize) {
Double memSize = Double.parseDouble(requestSize);
Double sectorBytes = Double.parseDouble(sectorSize);
Double nSectors = memSize / sectorBytes;
Double memSizeKB = memSize / 1024;
Double memSizeGB = memSize / (1024 * 1024 * 1024);
Double memSize100GB = memSizeGB / 100;
// allocation bitmap file: one bit per sector
Double allocBitmapSize = nSectors / 8;
// extend overflow file: 4MB, plus 4MB per 100GB
Double extOverflowFileSize = memSize100GB * 1024 * 1024 * 4;
// journal file: 8MB, plus 8MB per 100GB
Double journalFileSize = memSize100GB * 1024 * 1024 * 8;
// catalog file: 10bytes per KB
Double catalogFileSize = memSizeKB * 10;
// hot files: 5bytes per KB
Double hotFileSize = memSizeKB * 5;
// quota users file and quota groups file
Double quotaUsersFileSize = (memSizeGB * 256 + 1) * 64;
Double quotaGroupsFileSize = (memSizeGB * 32 + 1) * 64;
Double metadataSize = allocBitmapSize + extOverflowFileSize + journalFileSize
+ catalogFileSize + hotFileSize + quotaUsersFileSize + quotaGroupsFileSize;
Double allocSize = memSize + metadataSize;
Double numSectors = allocSize / sectorBytes;
System.out.println(numSectors.longValue() + 1); // round up
return numSectors.longValue() + 1;
} | @Test
public void getSectorTest20() {
String testRequestSize = "20";
String testSectorSize = "512";
long result = HFSUtils.getNumSector(testRequestSize, testSectorSize);
assertEquals(1L, result);
} |
@Override
public int run(String[] args) throws Exception {
try {
webServiceClient = WebServiceClient.getWebServiceClient().createClient();
return runCommand(args);
} finally {
if (yarnClient != null) {
yarnClient.close();
}
if (webServiceClient != null) {
webServiceClient.destroy();
}
}
} | @Test (timeout = 5000)
public void testWithInvalidApplicationId() throws Exception {
LogsCLI cli = createCli();
// Specify an invalid applicationId
int exitCode = cli.run(new String[] {"-applicationId", "123"});
assertTrue(exitCode == -1);
assertTrue(sysErrStream.toString().contains(
"Invalid ApplicationId specified"));
} |
@Override
public boolean check(final Session<?> session, final CancelCallback callback) throws BackgroundException {
final Host bookmark = session.getHost();
if(bookmark.getProtocol().isHostnameConfigurable() && StringUtils.isBlank(bookmark.getHostname())) {
throw new ConnectionCanceledException();
}
if(session.isConnected()) {
if(log.isDebugEnabled()) {
log.debug(String.format("Skip opening connection for session %s", session));
}
// Connection already open
return false;
}
// Obtain password from keychain or prompt
synchronized(login) {
login.validate(bookmark, prompt, new LoginOptions(bookmark.getProtocol()));
}
this.connect(session, callback);
return true;
} | @Test
public void testConnectDnsFailure() throws Exception {
final Session session = new NullSession(new Host(new TestProtocol(), "unknownhost.local", new Credentials("user", "p"))) {
@Override
public boolean isConnected() {
return false;
}
};
final LoginConnectionService s = new LoginConnectionService(new DisabledLoginCallback(), new HostKeyCallback() {
@Override
public boolean verify(final Host hostname, final PublicKey key) {
assertEquals(Session.State.opening, session.getState());
return true;
}
}, new DisabledPasswordStore(),
new DisabledProgressListener()
);
try {
s.check(session, new DisabledCancelCallback());
fail();
}
catch(ResolveFailedException e) {
assertEquals("Connection failed", e.getMessage());
assertEquals("DNS lookup for unknownhost.local failed. DNS is the network service that translates a server name to its Internet address. This error is most often caused by having no connection to the Internet or a misconfigured network. It can also be caused by an unresponsive DNS server or a firewall preventing access to the network.", e.getDetail());
assertEquals(UnknownHostException.class, e.getCause().getClass());
assertEquals(Session.State.closed, session.getState());
}
try {
s.check(new NullSession(new Host(new TestProtocol(), "localhost", new Credentials("user", ""))) {
@Override
public boolean isConnected() {
return false;
}
}, new DisabledCancelCallback());
fail();
}
catch(LoginCanceledException e) {
}
} |
@Override
public ClusterHealth checkCluster() {
checkState(!nodeInformation.isStandalone(), "Clustering is not enabled");
checkState(sharedHealthState != null, "HealthState instance can't be null when clustering is enabled");
Set<NodeHealth> nodeHealths = sharedHealthState.readAll();
Health health = clusterHealthChecks.stream()
.map(clusterHealthCheck -> clusterHealthCheck.check(nodeHealths))
.reduce(Health.GREEN, HealthReducer::merge);
return new ClusterHealth(health, nodeHealths);
} | @Test
public void checkCluster_passes_set_of_NodeHealth_returns_by_HealthState_to_all_ClusterHealthChecks() {
when(nodeInformation.isStandalone()).thenReturn(false);
ClusterHealthCheck[] mockedClusterHealthChecks = IntStream.range(0, 1 + random.nextInt(3))
.mapToObj(i -> mock(ClusterHealthCheck.class))
.toArray(ClusterHealthCheck[]::new);
Set<NodeHealth> nodeHealths = IntStream.range(0, 1 + random.nextInt(4)).mapToObj(i -> randomNodeHealth()).collect(Collectors.toSet());
when(sharedHealthState.readAll()).thenReturn(nodeHealths);
for (ClusterHealthCheck mockedClusterHealthCheck : mockedClusterHealthChecks) {
when(mockedClusterHealthCheck.check(same(nodeHealths))).thenReturn(Health.GREEN);
}
HealthCheckerImpl underTest = new HealthCheckerImpl(nodeInformation, new NodeHealthCheck[0], mockedClusterHealthChecks, sharedHealthState);
underTest.checkCluster();
for (ClusterHealthCheck mockedClusterHealthCheck : mockedClusterHealthChecks) {
verify(mockedClusterHealthCheck).check(same(nodeHealths));
}
} |
public void sendAcks(List<String> messagesToAck) throws IOException {
try (SubscriberStub subscriber = pubsubQueueClient.getSubscriber(subscriberStubSettings)) {
int numberOfBatches = (int) Math.ceil((double) messagesToAck.size() / DEFAULT_BATCH_SIZE_ACK_API);
CompletableFuture.allOf(IntStream.range(0, numberOfBatches)
.parallel()
.boxed()
.map(batchIndex -> getTask(subscriber, messagesToAck, batchIndex)).toArray(CompletableFuture[]::new))
.get(MAX_WAIT_TIME_TO_ACK_MESSAGES, TimeUnit.MILLISECONDS);
LOG.debug("Flushed out all outstanding acknowledged messages: " + messagesToAck.size());
} catch (ExecutionException | InterruptedException | TimeoutException e) {
throw new IOException("Failed to ack messages from PubSub", e);
}
} | @Test
public void testSendAcks() throws IOException {
doNothing().when(mockSubscriber).close();
when(mockPubsubQueueClient.getSubscriber(any())).thenReturn(mockSubscriber);
List<String> messageAcks = IntStream.range(0, 20).mapToObj(i -> "msg_" + i).collect(Collectors.toList());
doNothing().when(mockPubsubQueueClient).makeAckRequest(eq(mockSubscriber), eq(SUBSCRIPTION_NAME), any());
PubsubMessagesFetcher fetcher = new PubsubMessagesFetcher(
PROJECT_ID, SUBSCRIPTION_ID, SMALL_BATCH_SIZE,
MAX_MESSAGES_IN_REQUEST, MAX_WAIT_TIME_IN_REQUEST, mockPubsubQueueClient
);
fetcher.sendAcks(messageAcks);
verify(mockPubsubQueueClient, times(2)).makeAckRequest(eq(mockSubscriber), eq(SUBSCRIPTION_NAME), any());
} |
public static Class<?> getParamClass(final String className) throws ClassNotFoundException {
if (PRIMITIVE_TYPE.containsKey(className)) {
return PRIMITIVE_TYPE.get(className).getClazz();
} else {
return Class.forName(className);
}
} | @Test
public void testGetParamClass() throws Exception {
assertEquals(int.class, PrxInfoUtil.getParamClass("int"));
assertEquals(long.class, PrxInfoUtil.getParamClass("long"));
assertEquals(short.class, PrxInfoUtil.getParamClass("short"));
assertEquals(byte.class, PrxInfoUtil.getParamClass("byte"));
assertEquals(boolean.class, PrxInfoUtil.getParamClass("boolean"));
assertEquals(char.class, PrxInfoUtil.getParamClass("char"));
assertEquals(float.class, PrxInfoUtil.getParamClass("float"));
assertEquals(Integer.class, PrxInfoUtil.getParamClass("java.lang.Integer"));
} |
@Override
@Deprecated
public void process(final org.apache.kafka.streams.processor.ProcessorSupplier<? super K, ? super V> processorSupplier,
final String... stateStoreNames) {
process(processorSupplier, Named.as(builder.newProcessorName(PROCESSOR_NAME)), stateStoreNames);
} | @Test
public void shouldNotAllowNullStoreNamesOnProcess() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.process(processorSupplier, (String[]) null));
assertThat(exception.getMessage(), equalTo("stateStoreNames can't be a null array"));
} |
@Override
public int countWords(Note note) {
return countChars(note);
} | @Test
public void getChecklistWords() {
String content = CHECKED_SYM + "這是中文測試\n" + UNCHECKED_SYM + "これは日本語のテストです";
Note note = getNote(1L, "这是中文测试", content);
note.setChecklist(true);
assertEquals(24, new IdeogramsWordCounter().countWords(note));
} |
public static MapViewController create(MapView mapView, Model model) {
MapViewController mapViewController = new MapViewController(mapView);
model.mapViewPosition.addObserver(mapViewController);
return mapViewController;
} | @Test
public void repaintTest() {
DummyMapView dummyMapView = new DummyMapView();
Model model = new Model();
MapViewController.create(dummyMapView, model);
Assert.assertEquals(0, dummyMapView.repaintCounter);
model.mapViewPosition.setZoomLevel((byte) 1);
// this does not hold with zoom animation
// Assert.assertEquals(1, dummyMapView.repaintCounter);
} |
public void setHost(String host) {
this.host = host;
} | @Test
void testSetHost() {
assertNull(addressContext.getHost());
addressContext.setHost("127.0.0.1");
assertEquals("127.0.0.1", addressContext.getHost());
} |
public static void main(String[] args) {
final List<Long> numbers = Arrays.asList(1L, 3L, 4L, 7L, 8L);
LOGGER.info("Numbers to be squared and get sum --> {}", numbers);
final List<SquareNumberRequest> requests =
numbers.stream().map(SquareNumberRequest::new).toList();
var consumer = new Consumer(0L);
// Pass the request and the consumer to fanOutFanIn or sometimes referred as Orchestrator
// function
final Long sumOfSquaredNumbers = FanOutFanIn.fanOutFanIn(requests, consumer);
LOGGER.info("Sum of all squared numbers --> {}", sumOfSquaredNumbers);
} | @Test
void shouldLaunchApp() {
assertDoesNotThrow(() -> App.main(new String[]{}));
} |
public void setThreadFactory(ThreadFactory threadFactory) {
this.threadFactory = checkNotNull(threadFactory, "threadFactory");
} | @Test
public void test_setThreadFactory_whenNull() {
ReactorBuilder builder = newBuilder();
assertThrows(NullPointerException.class, () -> builder.setThreadFactory(null));
} |
public static BigDecimal cast(final Integer value, final int precision, final int scale) {
if (value == null) {
return null;
}
return cast(value.longValue(), precision, scale);
} | @Test
public void shouldCastNullInt() {
// When:
final BigDecimal decimal = DecimalUtil.cast((Integer)null, 2, 1);
// Then:
assertThat(decimal, is(nullValue()));
} |
public static Expression create(final String value) {
/* remove the start and end braces */
final String expression = stripBraces(value);
if (expression == null || expression.isEmpty()) {
throw new IllegalArgumentException("an expression is required.");
}
/* Check if the expression is too long */
if (expression.length() > MAX_EXPRESSION_LENGTH) {
throw new IllegalArgumentException(
"expression is too long. Max length: " + MAX_EXPRESSION_LENGTH);
}
/* create a new regular expression matcher for the expression */
String variableName = null;
String variablePattern = null;
String operator = null;
Matcher matcher = EXPRESSION_PATTERN.matcher(value);
if (matcher.matches()) {
/* grab the operator */
operator = matcher.group(2).trim();
/* we have a valid variable expression, extract the name from the first group */
variableName = matcher.group(3).trim();
if (variableName.contains(":")) {
/* split on the colon and ensure the size of parts array must be 2 */
String[] parts = variableName.split(":", 2);
variableName = parts[0];
variablePattern = parts[1];
}
/* look for nested expressions */
if (variableName.contains("{")) {
/* nested, literal */
return null;
}
}
/* check for an operator */
if (PATH_STYLE_OPERATOR.equalsIgnoreCase(operator)) {
return new PathStyleExpression(variableName, variablePattern);
}
/* default to simple */
return SimpleExpression.isSimpleExpression(value)
? new SimpleExpression(variableName, variablePattern)
: null; // Return null if it can't be validated as a Simple Expression -- Probably a Literal
} | @Test
void malformedBodyTemplate() {
String bodyTemplate = "{" + "a".repeat(65536) + "}";
try {
BodyTemplate template = BodyTemplate.create(bodyTemplate);
} catch (Throwable e) {
assertThatObject(e).isNotInstanceOf(StackOverflowError.class);
}
} |
@Override
public RedisClusterNode clusterGetNodeForSlot(int slot) {
Iterable<RedisClusterNode> res = clusterGetNodes();
for (RedisClusterNode redisClusterNode : res) {
if (redisClusterNode.isMaster() && redisClusterNode.getSlotRange().contains(slot)) {
return redisClusterNode;
}
}
return null;
} | @Test
public void testClusterGetNodeForSlot() {
RedisClusterNode node1 = connection.clusterGetNodeForSlot(1);
RedisClusterNode node2 = connection.clusterGetNodeForSlot(16000);
assertThat(node1.getId()).isNotEqualTo(node2.getId());
} |
@Override
public void close() {
close(Duration.ofMillis(Long.MAX_VALUE));
} | @Test
public void testConstructorWithSerializers() {
Properties producerProps = new Properties();
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
new KafkaProducer<>(producerProps, new ByteArraySerializer(), new ByteArraySerializer()).close();
} |
public static Env valueOf(String name) {
name = getWellFormName(name);
if (exists(name)) {
return STRING_ENV_MAP.get(name);
} else {
throw new IllegalArgumentException(name + " not exist");
}
} | @Test(expected = IllegalArgumentException.class)
public void valueOf() {
String name = "notexist";
assertFalse(Env.exists(name));
assertEquals(Env.valueOf(name), Env.UNKNOWN);
assertEquals(Env.valueOf("dev"), Env.DEV);
assertEquals(Env.valueOf("UAT"), Env.UAT);
} |
@GetMapping("/tagId/{tagId}")
public ShenyuAdminResult queryApiByTagId(@PathVariable("tagId") @Valid final String tagId) {
List<TagRelationDO> tagRelationDOS = Optional.ofNullable(tagRelationService.findByTagId(tagId)).orElse(Lists.newArrayList());
return ShenyuAdminResult.success(ShenyuResultMessage.DETAIL_SUCCESS, tagRelationDOS);
} | @Test
public void testQueryApiByTagId() throws Exception {
List<TagRelationDO> tagRelationDOS = new ArrayList<>();
tagRelationDOS.add(buildTagRelationDO());
given(tagRelationService.findByTagId(anyString())).willReturn(tagRelationDOS);
this.mockMvc.perform(MockMvcRequestBuilders.get("/tag-relation/tagId/{tagId}", "123"))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.DETAIL_SUCCESS)))
.andReturn();
} |
public Collection<NodeInfo> load() {
NodeStatsResponse response = esClient.nodesStats();
List<NodeInfo> result = new ArrayList<>();
response.getNodeStats().forEach(nodeStat -> result.add(toNodeInfo(nodeStat)));
return result;
} | @Test
public void return_info_from_elasticsearch_api() {
Collection<NodeInfo> nodes = underTest.load();
assertThat(nodes).hasSize(1);
NodeInfo node = nodes.iterator().next();
assertThat(node.getName()).isNotEmpty();
assertThat(node.getHost()).isNotEmpty();
assertThat(node.getSections()).hasSize(1);
ProtobufSystemInfo.Section stateSection = node.getSections().get(0);
assertThat(stateSection.getAttributesList())
.extracting(ProtobufSystemInfo.Attribute::getKey)
.contains(
"Disk Available", "Store Size",
"JVM Heap Usage", "JVM Heap Used", "JVM Heap Max", "JVM Non Heap Used",
"JVM Threads",
"Field Data Memory", "Field Data Circuit Breaker Limit", "Field Data Circuit Breaker Estimation",
"Request Circuit Breaker Limit", "Request Circuit Breaker Estimation",
"Query Cache Memory", "Request Cache Memory");
} |
@GetMapping(
path = "/api/{namespace}/{extension}",
produces = MediaType.APPLICATION_JSON_VALUE
)
@CrossOrigin
@Operation(summary = "Provides metadata of the latest version of an extension")
@ApiResponses({
@ApiResponse(
responseCode = "200",
description = "The extension metadata are returned in JSON format"
),
@ApiResponse(
responseCode = "404",
description = "The specified extension could not be found",
content = @Content()
),
@ApiResponse(
responseCode = "429",
description = "A client has sent too many requests in a given amount of time",
content = @Content(),
headers = {
@Header(
name = "X-Rate-Limit-Retry-After-Seconds",
description = "Number of seconds to wait after receiving a 429 response",
schema = @Schema(type = "integer", format = "int32")
),
@Header(
name = "X-Rate-Limit-Remaining",
description = "Remaining number of requests left",
schema = @Schema(type = "integer", format = "int32")
)
}
)
})
public ResponseEntity<ExtensionJson> getExtension(
@PathVariable @Parameter(description = "Extension namespace", example = "redhat")
String namespace,
@PathVariable @Parameter(description = "Extension name", example = "java")
String extension
) {
for (var registry : getRegistries()) {
try {
return ResponseEntity.ok()
.cacheControl(CacheControl.noCache().cachePublic())
.body(registry.getExtension(namespace, extension, null));
} catch (NotFoundException exc) {
// Try the next registry
}
}
var json = ExtensionJson.error("Extension not found: " + NamingUtil.toExtensionId(namespace, extension));
return new ResponseEntity<>(json, HttpStatus.NOT_FOUND);
} | @Test
public void testPostExistingReview() throws Exception {
var user = mockUserData();
var extVersion = mockExtension();
var extension = extVersion.getExtension();
Mockito.when(repositories.findExtension("bar", "foo"))
.thenReturn(extension);
Mockito.when(repositories.hasActiveReview(extension, user))
.thenReturn(true);
mockMvc.perform(post("/api/{namespace}/{extension}/review", "foo", "bar")
.contentType(MediaType.APPLICATION_JSON)
.content(reviewJson(r -> {
r.rating = 3;
}))
.with(user("test_user"))
.with(csrf().asHeader()))
.andExpect(status().isBadRequest())
.andExpect(content().json(errorJson("You must not submit more than one review for an extension.")));
} |
@Override
public ScheduledFuture<@Nullable ?> schedule(Runnable command, long delay, TimeUnit unit) {
if (command == null || unit == null) {
throw new NullPointerException();
}
ScheduledFutureTask<Void> task =
new ScheduledFutureTask<>(command, null, triggerTime(delay, unit));
runNowOrScheduleInTheFuture(task);
return task;
} | @Test
public void testSchedule() throws Exception {
List<AtomicInteger> callCounts = new ArrayList<>();
List<ScheduledFutureTask<?>> futures = new ArrayList<>();
FastNanoClockAndSleeper fastNanoClockAndSleeper = new FastNanoClockAndSleeper();
UnboundedScheduledExecutorService executorService =
new UnboundedScheduledExecutorService(fastNanoClockAndSleeper);
callCounts.add(new AtomicInteger());
futures.add(
(ScheduledFutureTask<?>)
executorService.schedule(
(Runnable) callCounts.get(callCounts.size() - 1)::incrementAndGet,
100,
MILLISECONDS));
callCounts.add(new AtomicInteger());
futures.add(
(ScheduledFutureTask<?>)
executorService.schedule(
callCounts.get(callCounts.size() - 1)::incrementAndGet, 100, MILLISECONDS));
// No tasks should have been picked up
wakeUpAndCheckTasks(executorService);
for (int i = 0; i < callCounts.size(); ++i) {
assertEquals(0, callCounts.get(i).get());
}
// No tasks should have been picked up even if the time advances 99 seconds
fastNanoClockAndSleeper.sleep(99);
wakeUpAndCheckTasks(executorService);
for (int i = 0; i < callCounts.size(); ++i) {
assertEquals(0, callCounts.get(i).get());
}
// All tasks should wake up and pick-up tasks
fastNanoClockAndSleeper.sleep(1);
wakeUpAndCheckTasks(executorService);
assertNull(futures.get(0).get());
assertEquals(1, futures.get(1).get());
for (int i = 0; i < callCounts.size(); ++i) {
assertFalse(futures.get(i).isPeriodic());
assertEquals(1, callCounts.get(i).get());
}
assertThat(executorService.shutdownNow(), empty());
} |
public void submitIndexingErrors(Collection<IndexingError> indexingErrors) {
try {
final FailureBatch fb = FailureBatch.indexingFailureBatch(
indexingErrors.stream()
.filter(ie -> {
if (!ie.message().supportsFailureHandling()) {
logger.warn("Submitted a message with indexing errors, which doesn't support failure handling!");
return false;
} else {
return true;
}
})
.map(this::fromIndexingError)
.collect(Collectors.toList()));
if (fb.size() > 0) {
failureSubmissionQueue.submitBlocking(fb);
}
} catch (InterruptedException ignored) {
logger.warn("Failed to submit {} indexing errors for failure handling. The thread has been interrupted!",
indexingErrors.size());
Thread.currentThread().interrupt();
}
} | @Test
public void submitIndexingErrors_allIndexingErrorsTransformedAndSubmittedToFailureQueue() throws Exception {
// given
final Message msg1 = Mockito.mock(Message.class);
when(msg1.getMessageId()).thenReturn("msg-1");
when(msg1.supportsFailureHandling()).thenReturn(true);
final Message msg2 = Mockito.mock(Message.class);
when(msg2.getMessageId()).thenReturn("msg-2");
when(msg2.supportsFailureHandling()).thenReturn(true);
final List<IndexingError> indexingErrors = List.of(
IndexingError.create(msg1, "index-1", MappingError, "Error"),
IndexingError.create(msg2, "index-2", Unknown, "Error2")
);
// when
underTest.submitIndexingErrors(indexingErrors);
// then
verify(failureSubmissionQueue, times(1)).submitBlocking(failureBatchCaptor.capture());
assertThat(failureBatchCaptor.getValue()).satisfies(fb -> {
assertThat(fb.containsIndexingFailures()).isTrue();
assertThat(fb.size()).isEqualTo(2);
assertThat(fb.getFailures().get(0)).satisfies(indexingFailure -> {
assertThat(indexingFailure.failureType()).isEqualTo(FailureType.INDEXING);
assertThat(indexingFailure.failureCause().label()).isEqualTo("MappingError");
assertThat(indexingFailure.message()).isEqualTo("Failed to index message with id 'msg-1' targeting 'index-1'");
assertThat(indexingFailure.failureDetails()).isEqualTo("Error");
assertThat(indexingFailure.failureTimestamp()).isNotNull();
assertThat(indexingFailure.failedMessage()).isEqualTo(msg1);
assertThat(indexingFailure.targetIndex()).isEqualTo("index-1");
assertThat(indexingFailure.requiresAcknowledgement()).isFalse();
});
assertThat(fb.getFailures().get(1)).satisfies(indexingFailure -> {
assertThat(indexingFailure.failureType()).isEqualTo(FailureType.INDEXING);
assertThat(indexingFailure.failureCause().label()).isEqualTo("UNKNOWN");
assertThat(indexingFailure.message()).isEqualTo("Failed to index message with id 'msg-2' targeting 'index-2'");
assertThat(indexingFailure.failureDetails()).isEqualTo("Error2");
assertThat(indexingFailure.failureTimestamp()).isNotNull();
assertThat(indexingFailure.failedMessage()).isEqualTo(msg2);
assertThat(indexingFailure.targetIndex()).isEqualTo("index-2");
assertThat(indexingFailure.requiresAcknowledgement()).isFalse();
});
});
} |
public MapStoreConfig setImplementation(@Nonnull Object implementation) {
this.implementation = checkNotNull(implementation, "Map store cannot be null!");
this.className = null;
return this;
} | @Test
public void setImplementation() {
Object mapStoreImpl = new Object();
MapStoreConfig cfg = new MapStoreConfig().setImplementation(mapStoreImpl);
assertEquals(mapStoreImpl, cfg.getImplementation());
assertEquals(new MapStoreConfig().setImplementation(mapStoreImpl), cfg);
} |
@Operation(summary = "Gets the status of ongoing database migrations, if any", description = "Return the detailed status of ongoing database migrations" +
" including starting date. If no migration is ongoing or needed it is still possible to call this endpoint and receive appropriate information.")
@GetMapping
public DatabaseMigrationsResponse getStatus() {
Optional<Long> currentVersion = databaseVersion.getVersion();
checkState(currentVersion.isPresent(), NO_CONNECTION_TO_DB);
DatabaseVersion.Status status = databaseVersion.getStatus();
if (status == DatabaseVersion.Status.UP_TO_DATE || status == DatabaseVersion.Status.REQUIRES_DOWNGRADE) {
return new DatabaseMigrationsResponse(databaseMigrationState);
} else if (!database.getDialect().supportsMigration()) {
return new DatabaseMigrationsResponse(DatabaseMigrationState.Status.STATUS_NOT_SUPPORTED);
} else {
return switch (databaseMigrationState.getStatus()) {
case RUNNING, FAILED, SUCCEEDED -> new DatabaseMigrationsResponse(databaseMigrationState);
case NONE -> new DatabaseMigrationsResponse(DatabaseMigrationState.Status.MIGRATION_REQUIRED);
default -> throw new IllegalArgumentException(UNSUPPORTED_DATABASE_MIGRATION_STATUS);
};
}
} | @Test
void getStatus_whenDbMigrationsFailed_returnFailed() throws Exception {
when(databaseVersion.getStatus()).thenReturn(DatabaseVersion.Status.REQUIRES_UPGRADE);
when(dialect.supportsMigration()).thenReturn(true);
when(migrationState.getStatus()).thenReturn(DatabaseMigrationState.Status.FAILED);
when(migrationState.getStartedAt()).thenReturn(Optional.of(SOME_DATE));
mockMvc.perform(get(DATABASE_MIGRATIONS_ENDPOINT)).andExpectAll(status().isOk(),
content().json("{\"status\":\"MIGRATION_FAILED\",\"message\":\"Migration failed: %s.<br/> Please check logs.\"}"));
} |
@Override
@MethodNotAvailable
public void clear() {
throw new MethodNotAvailableException();
} | @Test(expected = MethodNotAvailableException.class)
public void testClear() {
adapter.clear();
} |
public static void extractToken(HttpURLConnection conn, Token token) throws IOException, AuthenticationException {
int respCode = conn.getResponseCode();
if (respCode == HttpURLConnection.HTTP_OK
|| respCode == HttpURLConnection.HTTP_CREATED
|| respCode == HttpURLConnection.HTTP_ACCEPTED) {
// cookie handler should have already extracted the token. try again
// for backwards compatibility if this method is called on a connection
// not opened via this instance.
token.cookieHandler.put(null, conn.getHeaderFields());
} else if (respCode == HttpURLConnection.HTTP_NOT_FOUND) {
LOG.trace("Setting token value to null ({}), resp={}", token, respCode);
token.set(null);
throw new FileNotFoundException(conn.getURL().toString());
} else {
LOG.trace("Setting token value to null ({}), resp={}", token, respCode);
token.set(null);
throw new AuthenticationException("Authentication failed" +
", URL: " + conn.getURL() +
", status: " + conn.getResponseCode() +
", message: " + conn.getResponseMessage());
}
} | @Test
public void testExtractTokenLowerCaseCookieHeader() throws Exception {
HttpURLConnection conn = Mockito.mock(HttpURLConnection.class);
Mockito.when(conn.getResponseCode()).thenReturn(HttpURLConnection.HTTP_OK);
String tokenStr = "foo";
Map<String, List<String>> headers = new HashMap<>();
List<String> cookies = new ArrayList<>();
cookies.add(AuthenticatedURL.AUTH_COOKIE + "=" + tokenStr);
headers.put("set-cookie", cookies);
Mockito.when(conn.getHeaderFields()).thenReturn(headers);
AuthenticatedURL.Token token = new AuthenticatedURL.Token();
AuthenticatedURL.extractToken(conn, token);
Assert.assertTrue(token.isSet());
} |
public void toXMLUTF8(Object obj, OutputStream out) throws IOException {
Writer w = new OutputStreamWriter(out, StandardCharsets.UTF_8);
w.write("<?xml version=\"1.1\" encoding=\"UTF-8\"?>\n");
toXML(obj, w);
} | @Issue("JENKINS-71139")
@Test
public void nullsWithEncodingDeclaration() throws Exception {
Bar b = new Bar();
b.s = "x\u0000y";
try {
new XStream2().toXMLUTF8(b, new ByteArrayOutputStream());
fail("expected to fail fast; not supported to read either");
} catch (RuntimeException x) {
assertThat("cause is com.thoughtworks.xstream.io.StreamException: Invalid character 0x0 in XML stream", Functions.printThrowable(x), containsString("0x0"));
}
} |
public void cancel(Throwable throwable) {
cancellationContext.cancel(throwable);
} | @Test
void cancel() {} |
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
} | @Test
public void setChatTitle() {
BaseResponse response = bot.execute(new SetChatTitle(groupId, "Test Bot Group " + System.currentTimeMillis()));
assertTrue(response.isOk());
} |
public boolean checkIfEnabled() {
try {
this.gitCommand = locateDefaultGit();
MutableString stdOut = new MutableString();
this.processWrapperFactory.create(null, l -> stdOut.string = l, gitCommand, "--version").execute();
return stdOut.string != null && stdOut.string.startsWith("git version") && isCompatibleGitVersion(stdOut.string);
} catch (Exception e) {
LOG.debug("Failed to find git native client", e);
return false;
}
} | @Test
public void git_should_not_be_enabled_if_version_is_less_than_required_minimum() {
ProcessWrapperFactory mockFactory = mockGitVersionCommand("git version 1.9.0");
NativeGitBlameCommand blameCommand = new NativeGitBlameCommand(System2.INSTANCE, mockFactory);
assertThat(blameCommand.checkIfEnabled()).isFalse();
} |
@Override
public <T> T clone(T object) {
if (object instanceof String) {
return object;
} else if (object instanceof Collection) {
Object firstElement = findFirstNonNullElement((Collection) object);
if (firstElement != null && !(firstElement instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), firstElement.getClass());
return objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
} else if (object instanceof Map) {
Map.Entry firstEntry = this.findFirstNonNullEntry((Map) object);
if (firstEntry != null) {
Object key = firstEntry.getKey();
Object value = firstEntry.getValue();
if (!(key instanceof Serializable) || !(value instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), key.getClass(), value.getClass());
return (T) objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
}
} else if (object instanceof JsonNode) {
return (T) ((JsonNode) object).deepCopy();
}
if (object instanceof Serializable) {
try {
return (T) SerializationHelper.clone((Serializable) object);
} catch (SerializationException e) {
//it is possible that object itself implements java.io.Serializable, but underlying structure does not
//in this case we switch to the other JSON marshaling strategy which doesn't use the Java serialization
}
}
return jsonClone(object);
} | @Test
public void should_clone_collection_of_serializable_object() {
List<SerializableObject> original = new ArrayList<>();
original.add(new SerializableObject("value"));
List<SerializableObject> cloned = serializer.clone(original);
assertEquals(original, cloned);
assertNotSame(original, cloned);
} |
@Override
public Time getTime(final int columnIndex) throws SQLException {
return (Time) ResultSetUtils.convertValue(mergeResultSet.getValue(columnIndex, Time.class), Time.class);
} | @Test
void assertGetTimeAndCalendarWithColumnIndex() throws SQLException {
Calendar calendar = Calendar.getInstance();
when(mergeResultSet.getCalendarValue(1, Time.class, calendar)).thenReturn(new Time(0L));
assertThat(shardingSphereResultSet.getTime(1, calendar), is(new Time(0L)));
} |
@Override
public void request(Payload grpcRequest, StreamObserver<Payload> responseObserver) {
traceIfNecessary(grpcRequest, true);
String type = grpcRequest.getMetadata().getType();
long startTime = System.nanoTime();
//server is on starting.
if (!ApplicationUtils.isStarted()) {
Payload payloadResponse = GrpcUtils.convert(
ErrorResponse.build(NacosException.INVALID_SERVER_STATUS, "Server is starting,please try later."));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
NacosException.INVALID_SERVER_STATUS, null, null, System.nanoTime() - startTime);
return;
}
// server check.
if (ServerCheckRequest.class.getSimpleName().equals(type)) {
Payload serverCheckResponseP = GrpcUtils.convert(new ServerCheckResponse(GrpcServerConstants.CONTEXT_KEY_CONN_ID.get(), true));
traceIfNecessary(serverCheckResponseP, false);
responseObserver.onNext(serverCheckResponseP);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, true,
0, null, null, System.nanoTime() - startTime);
return;
}
RequestHandler requestHandler = requestHandlerRegistry.getByRequestType(type);
//no handler found.
if (requestHandler == null) {
Loggers.REMOTE_DIGEST.warn(String.format("[%s] No handler for request type : %s :", "grpc", type));
Payload payloadResponse = GrpcUtils
.convert(ErrorResponse.build(NacosException.NO_HANDLER, "RequestHandler Not Found"));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
NacosException.NO_HANDLER, null, null, System.nanoTime() - startTime);
return;
}
//check connection status.
String connectionId = GrpcServerConstants.CONTEXT_KEY_CONN_ID.get();
boolean requestValid = connectionManager.checkValid(connectionId);
if (!requestValid) {
Loggers.REMOTE_DIGEST
.warn("[{}] Invalid connection Id ,connection [{}] is un registered ,", "grpc", connectionId);
Payload payloadResponse = GrpcUtils
.convert(ErrorResponse.build(NacosException.UN_REGISTER, "Connection is unregistered."));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
NacosException.UN_REGISTER, null, null, System.nanoTime() - startTime);
return;
}
Object parseObj = null;
try {
parseObj = GrpcUtils.parse(grpcRequest);
} catch (Exception e) {
Loggers.REMOTE_DIGEST
.warn("[{}] Invalid request receive from connection [{}] ,error={}", "grpc", connectionId, e);
Payload payloadResponse = GrpcUtils.convert(ErrorResponse.build(NacosException.BAD_GATEWAY, e.getMessage()));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
NacosException.BAD_GATEWAY, e.getClass().getSimpleName(), null, System.nanoTime() - startTime);
return;
}
if (parseObj == null) {
Loggers.REMOTE_DIGEST.warn("[{}] Invalid request receive ,parse request is null", connectionId);
Payload payloadResponse = GrpcUtils
.convert(ErrorResponse.build(NacosException.BAD_GATEWAY, "Invalid request"));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
NacosException.BAD_GATEWAY, null, null, System.nanoTime() - startTime);
return;
}
if (!(parseObj instanceof Request)) {
Loggers.REMOTE_DIGEST
.warn("[{}] Invalid request receive ,parsed payload is not a request,parseObj={}", connectionId,
parseObj);
Payload payloadResponse = GrpcUtils
.convert(ErrorResponse.build(NacosException.BAD_GATEWAY, "Invalid request"));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
NacosException.BAD_GATEWAY, null, null, System.nanoTime() - startTime);
return;
}
Request request = (Request) parseObj;
try {
Connection connection = connectionManager.getConnection(GrpcServerConstants.CONTEXT_KEY_CONN_ID.get());
RequestMeta requestMeta = new RequestMeta();
requestMeta.setClientIp(connection.getMetaInfo().getClientIp());
requestMeta.setConnectionId(GrpcServerConstants.CONTEXT_KEY_CONN_ID.get());
requestMeta.setClientVersion(connection.getMetaInfo().getVersion());
requestMeta.setLabels(connection.getMetaInfo().getLabels());
requestMeta.setAbilityTable(connection.getAbilityTable());
connectionManager.refreshActiveTime(requestMeta.getConnectionId());
prepareRequestContext(request, requestMeta, connection);
Response response = requestHandler.handleRequest(request, requestMeta);
Payload payloadResponse = GrpcUtils.convert(response);
traceIfNecessary(payloadResponse, false);
if (response.getErrorCode() == NacosException.OVER_THRESHOLD) {
RpcScheduledExecutor.CONTROL_SCHEDULER.schedule(() -> {
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
}, 1000L, TimeUnit.MILLISECONDS);
} else {
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
}
MetricsMonitor.recordGrpcRequestEvent(type, response.isSuccess(),
response.getErrorCode(), null, request.getModule(), System.nanoTime() - startTime);
} catch (Throwable e) {
Loggers.REMOTE_DIGEST
.error("[{}] Fail to handle request from connection [{}] ,error message :{}", "grpc", connectionId,
e);
Payload payloadResponse = GrpcUtils.convert(ErrorResponse.build(e));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
ResponseCode.FAIL.getCode(), e.getClass().getSimpleName(), request.getModule(), System.nanoTime() - startTime);
} finally {
RequestContextHolder.removeContext();
}
} | @Test
void testConnectionNotRegister() {
ApplicationUtils.setStarted(true);
Mockito.when(requestHandlerRegistry.getByRequestType(Mockito.anyString())).thenReturn(mockHandler);
Mockito.when(connectionManager.checkValid(Mockito.any())).thenReturn(false);
RequestMeta metadata = new RequestMeta();
metadata.setClientIp("127.0.0.1");
metadata.setConnectionId(connectId);
InstanceRequest instanceRequest = new InstanceRequest();
instanceRequest.setRequestId(requestId);
Payload request = GrpcUtils.convert(instanceRequest, metadata);
StreamObserver<Payload> streamObserver = new StreamObserver<Payload>() {
@Override
public void onNext(Payload payload) {
System.out.println("Receive data from server: " + payload);
Object res = GrpcUtils.parse(payload);
assertTrue(res instanceof ErrorResponse);
ErrorResponse errorResponse = (ErrorResponse) res;
assertEquals(NacosException.UN_REGISTER, errorResponse.getErrorCode());
}
@Override
public void onError(Throwable throwable) {
fail(throwable.getMessage());
}
@Override
public void onCompleted() {
System.out.println("complete");
}
};
streamStub.request(request, streamObserver);
ApplicationUtils.setStarted(false);
} |
@Override
public boolean supports(FailureBatch failureBatch) {
return failureBatch.containsIndexingFailures();
} | @Test
public void supports_indexingFailuresSupported() {
assertThat(underTest.supports(FailureBatch.indexingFailureBatch(new ArrayList<>()))).isTrue();
} |
public void createBackupLog(UUID callerUuid, UUID txnId) {
createBackupLog(callerUuid, txnId, false);
} | @Test(expected = TransactionException.class)
public void createBackupLog_whenAlreadyExist() {
UUID callerUuid = UuidUtil.newUnsecureUUID();
txService.createBackupLog(callerUuid, TXN);
txService.createBackupLog(callerUuid, TXN);
} |
public Collection<File> getUploadedFiles() throws IOException {
if (uploadDirectory == null) {
return Collections.emptyList();
}
FileAdderVisitor visitor = new FileAdderVisitor();
Files.walkFileTree(uploadDirectory, visitor);
return Collections.unmodifiableCollection(visitor.getContainedFiles());
} | @Test
void testEmptyDirectory() throws IOException {
Path rootDir = Paths.get("root");
Path tmp = temporaryFolder;
Files.createDirectory(tmp.resolve(rootDir));
try (FileUploads fileUploads = new FileUploads(tmp.resolve(rootDir))) {
Collection<File> detectedFiles = fileUploads.getUploadedFiles();
assertThat(detectedFiles).isEmpty();
}
} |
public Collection<File> getUploadedFiles() throws IOException {
if (uploadDirectory == null) {
return Collections.emptyList();
}
FileAdderVisitor visitor = new FileAdderVisitor();
Files.walkFileTree(uploadDirectory, visitor);
return Collections.unmodifiableCollection(visitor.getContainedFiles());
} | @Test
void testDirectoryScan() throws IOException {
Path rootDir = Paths.get("root");
Path rootFile = rootDir.resolve("rootFile");
Path subDir = rootDir.resolve("sub");
Path subFile = subDir.resolve("subFile");
Path tmp = temporaryFolder;
Files.createDirectory(tmp.resolve(rootDir));
Files.createDirectory(tmp.resolve(subDir));
Files.createFile(tmp.resolve(rootFile));
Files.createFile(tmp.resolve(subFile));
try (FileUploads fileUploads = new FileUploads(tmp.resolve(rootDir))) {
Collection<Path> detectedFiles =
fileUploads.getUploadedFiles().stream()
.map(File::toPath)
.collect(Collectors.toList());
assertThat(detectedFiles).hasSize(2);
assertThat(detectedFiles).contains(tmp.resolve(rootFile));
assertThat(detectedFiles).contains(tmp.resolve(subFile));
}
} |
@Override
public Object convert(String value) {
if (isNullOrEmpty(value)) {
return value;
}
if (value.contains("=")) {
final Map<String, String> fields = new HashMap<>();
Matcher m = PATTERN.matcher(value);
while (m.find()) {
if (m.groupCount() != 2) {
continue;
}
fields.put(removeQuotes(m.group(1)), removeQuotes(m.group(2)));
}
return fields;
} else {
return Collections.emptyMap();
}
} | @Test
public void testFilterWithKVOnly() {
TokenizerConverter f = new TokenizerConverter(new HashMap<String, Object>());
@SuppressWarnings("unchecked")
Map<String, String> result = (Map<String, String>) f.convert("k1=v1");
assertEquals(1, result.size());
assertEquals("v1", result.get("k1"));
} |
public static BadRequestException namespaceAlreadyExists(String namespaceName) {
return new BadRequestException("namespace already exists for namespaceName:%s", namespaceName);
} | @Test
public void testNamespaceAlreadyExists() {
BadRequestException namespaceAlreadyExists = BadRequestException.namespaceAlreadyExists(namespaceName);
assertEquals("namespace already exists for namespaceName:application", namespaceAlreadyExists.getMessage());
} |
@Override
public int getMaxColumnsInOrderBy() {
return 0;
} | @Test
void assertGetMaxColumnsInOrderBy() {
assertThat(metaData.getMaxColumnsInOrderBy(), is(0));
} |
public Map<FeatureOption, MergingStrategy> computeMergingStrategies(
List<SqlTableLikeOption> mergingOptions) {
Map<FeatureOption, MergingStrategy> result = new HashMap<>(defaultMergingStrategies);
Optional<SqlTableLikeOption> maybeAllOption =
mergingOptions.stream()
.filter(option -> option.getFeatureOption() == FeatureOption.ALL)
.findFirst();
maybeAllOption.ifPresent(
(allOption) -> {
MergingStrategy strategy = allOption.getMergingStrategy();
for (FeatureOption featureOption : FeatureOption.values()) {
if (featureOption != FeatureOption.ALL) {
result.put(featureOption, strategy);
}
}
});
for (SqlTableLikeOption mergingOption : mergingOptions) {
result.put(mergingOption.getFeatureOption(), mergingOption.getMergingStrategy());
}
return result;
} | @Test
void defaultMergeStrategies() {
Map<FeatureOption, MergingStrategy> mergingStrategies =
util.computeMergingStrategies(Collections.emptyList());
assertThat(mergingStrategies.get(FeatureOption.OPTIONS))
.isEqualTo(MergingStrategy.OVERWRITING);
assertThat(mergingStrategies.get(FeatureOption.PARTITIONS))
.isEqualTo(MergingStrategy.INCLUDING);
assertThat(mergingStrategies.get(FeatureOption.CONSTRAINTS))
.isEqualTo(MergingStrategy.INCLUDING);
assertThat(mergingStrategies.get(FeatureOption.GENERATED))
.isEqualTo(MergingStrategy.INCLUDING);
assertThat(mergingStrategies.get(FeatureOption.WATERMARKS))
.isEqualTo(MergingStrategy.INCLUDING);
} |
@Override
public JSONObject getLastScreenTrackProperties() {
return new JSONObject();
} | @Test
public void getLastScreenTrackProperties() {
Assert.assertEquals(0, mSensorsAPI.getLastScreenTrackProperties().length());
} |
public List<LoadJob> getLoadJobsByDb(long dbId, String labelValue, boolean accurateMatch) {
List<LoadJob> loadJobList = Lists.newArrayList();
readLock();
try {
if (dbId != -1 && !dbIdToLabelToLoadJobs.containsKey(dbId)) {
return loadJobList;
}
for (Map<String, List<LoadJob>> dbJobs : dbIdToLabelToLoadJobs.values()) {
Map<String, List<LoadJob>> labelToLoadJobs = dbId == -1 ? dbJobs : dbIdToLabelToLoadJobs.get(dbId);
if (Strings.isNullOrEmpty(labelValue)) {
loadJobList.addAll(labelToLoadJobs.values()
.stream().flatMap(Collection::stream).collect(Collectors.toList()));
} else {
// check label value
if (accurateMatch) {
if (!labelToLoadJobs.containsKey(labelValue)) {
return loadJobList;
}
loadJobList.addAll(labelToLoadJobs.get(labelValue));
} else {
// non-accurate match
for (Map.Entry<String, List<LoadJob>> entry : labelToLoadJobs.entrySet()) {
if (entry.getKey().contains(labelValue)) {
loadJobList.addAll(entry.getValue());
}
}
}
}
if (dbId != -1) {
break;
}
}
return loadJobList;
} finally {
readUnlock();
}
} | @Test
public void testGetLoadJobsByDb(@Mocked GlobalStateMgr globalStateMgr) throws MetaNotFoundException {
LoadMgr loadMgr = new LoadMgr(new LoadJobScheduler());
LoadJob job1 = new InsertLoadJob("job1", 1L, 1L, System.currentTimeMillis(), "", "", null);
Deencapsulation.invoke(loadMgr, "addLoadJob", job1);
Assert.assertTrue(loadMgr.getLoadJobsByDb(2L, "job1", true).isEmpty());
Assert.assertEquals(1, loadMgr.getLoadJobsByDb(1L, "job1", true).size());
} |
@Override
public void onWorkflowFinalized(Workflow workflow) {
WorkflowSummary summary = StepHelper.retrieveWorkflowSummary(objectMapper, workflow.getInput());
WorkflowRuntimeSummary runtimeSummary = retrieveWorkflowRuntimeSummary(workflow);
String reason = workflow.getReasonForIncompletion();
LOG.info(
"Workflow {} with execution_id [{}] is finalized with internal state [{}] and reason [{}]",
summary.getIdentity(),
workflow.getWorkflowId(),
workflow.getStatus(),
reason);
metrics.counter(
MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC,
getClass(),
TYPE_TAG,
"onWorkflowFinalized",
MetricConstants.STATUS_TAG,
workflow.getStatus().name());
if (reason != null
&& workflow.getStatus() == Workflow.WorkflowStatus.FAILED
&& reason.startsWith(MaestroStartTask.DEDUP_FAILURE_PREFIX)) {
LOG.info(
"Workflow {} with execution_id [{}] has not actually started, thus skip onWorkflowFinalized.",
summary.getIdentity(),
workflow.getWorkflowId());
return; // special case doing nothing
}
WorkflowInstance.Status instanceStatus =
instanceDao.getWorkflowInstanceStatus(
summary.getWorkflowId(), summary.getWorkflowInstanceId(), summary.getWorkflowRunId());
if (instanceStatus == null
|| (instanceStatus.isTerminal() && workflow.getStatus().isTerminal())) {
LOG.info(
"Workflow {} with execution_id [{}] does not exist or already "
+ "in a terminal state [{}] with internal state [{}], thus skip onWorkflowFinalized.",
summary.getIdentity(),
workflow.getWorkflowId(),
instanceStatus,
workflow.getStatus());
return;
}
Map<String, Task> realTaskMap = TaskHelper.getUserDefinedRealTaskMap(workflow);
// cancel internally failed tasks
realTaskMap.values().stream()
.filter(task -> !StepHelper.retrieveStepStatus(task.getOutputData()).isTerminal())
.forEach(task -> maestroTask.cancel(workflow, task, null));
WorkflowRuntimeOverview overview =
TaskHelper.computeOverview(
objectMapper, summary, runtimeSummary.getRollupBase(), realTaskMap);
try {
validateAndUpdateOverview(overview, summary);
switch (workflow.getStatus()) {
case TERMINATED: // stopped due to stop request
if (reason != null && reason.startsWith(FAILURE_REASON_PREFIX)) {
update(workflow, WorkflowInstance.Status.FAILED, summary, overview);
} else {
update(workflow, WorkflowInstance.Status.STOPPED, summary, overview);
}
break;
case TIMED_OUT:
update(workflow, WorkflowInstance.Status.TIMED_OUT, summary, overview);
break;
default: // other status (FAILED, COMPLETED, PAUSED, RUNNING) to be handled here.
Optional<Task.Status> done =
TaskHelper.checkProgress(realTaskMap, summary, overview, true);
switch (done.orElse(Task.Status.IN_PROGRESS)) {
/**
* This is a special status to indicate that the workflow has succeeded. Check {@link
* TaskHelper#checkProgress} for more details.
*/
case FAILED_WITH_TERMINAL_ERROR:
WorkflowInstance.Status nextStatus =
AggregatedViewHelper.deriveAggregatedStatus(
instanceDao, summary, WorkflowInstance.Status.SUCCEEDED, overview);
if (!nextStatus.isTerminal()) {
throw new MaestroInternalError(
"Invalid status: [%s], expecting a terminal one", nextStatus);
}
update(workflow, nextStatus, summary, overview);
break;
case FAILED:
case CANCELED: // due to step failure
update(workflow, WorkflowInstance.Status.FAILED, summary, overview);
break;
case TIMED_OUT:
update(workflow, WorkflowInstance.Status.TIMED_OUT, summary, overview);
break;
// all other status are invalid
default:
metrics.counter(
MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC,
getClass(),
TYPE_TAG,
"invalidStatusOnWorkflowFinalized");
throw new MaestroInternalError(
"Invalid status [%s] onWorkflowFinalized", workflow.getStatus());
}
break;
}
} catch (MaestroInternalError | IllegalArgumentException e) {
// non-retryable error and still fail the instance
LOG.warn("onWorkflowFinalized is failed with a non-retryable error", e);
metrics.counter(
MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC,
getClass(),
TYPE_TAG,
"nonRetryableErrorOnWorkflowFinalized");
update(
workflow,
WorkflowInstance.Status.FAILED,
summary,
overview,
Details.create(
e.getMessage(), "onWorkflowFinalized is failed with non-retryable error."));
}
} | @Test
public void testDaoErrorOnWorkflowFinalized() {
when(workflow.getStatus()).thenReturn(Workflow.WorkflowStatus.TERMINATED);
when(instanceDao.getWorkflowInstanceStatus(eq("test-workflow-id"), anyLong(), anyLong()))
.thenReturn(WorkflowInstance.Status.IN_PROGRESS);
when(instanceDao.updateWorkflowInstance(any(), any(), any(), any(), anyLong()))
.thenReturn(Optional.of(Details.create("test errors")));
AssertHelper.assertThrows(
"instance dao failure and will retry",
MaestroRetryableError.class,
"Failed to update workflow instance",
() -> statusListener.onWorkflowFinalized(workflow));
Assert.assertEquals(
1L,
metricRepo
.getCounter(
MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC,
MaestroWorkflowStatusListener.class,
"type",
"onWorkflowFinalized",
"status",
"TERMINATED")
.count());
} |
public static Credentials loadCredentials(String password, String source)
throws IOException, CipherException {
return loadCredentials(password, new File(source));
} | @Test
public void testLoadCredentialsFromFile() throws Exception {
Credentials credentials =
WalletUtils.loadCredentials(
PASSWORD,
new File(
WalletUtilsTest.class
.getResource(
"/keyfiles/"
+ "UTC--2016-11-03T05-55-06."
+ "340672473Z--ef678007d18427e6022059dbc264f27507cd1ffc")
.getFile()));
assertEquals(credentials, (CREDENTIALS));
} |
public static <K> KTableHolder<K> build(
final KTableHolder<K> left,
final KTableHolder<K> right,
final TableTableJoin<K> join
) {
final LogicalSchema leftSchema;
final LogicalSchema rightSchema;
if (join.getJoinType().equals(RIGHT)) {
leftSchema = right.getSchema();
rightSchema = left.getSchema();
} else {
leftSchema = left.getSchema();
rightSchema = right.getSchema();
}
final JoinParams joinParams = JoinParamsFactory
.create(join.getKeyColName(), leftSchema, rightSchema);
final KTable<K, GenericRow> result;
switch (join.getJoinType()) {
case INNER:
result = left.getTable().join(right.getTable(), joinParams.getJoiner());
break;
case LEFT:
result = left.getTable().leftJoin(right.getTable(), joinParams.getJoiner());
break;
case RIGHT:
result = right.getTable().leftJoin(left.getTable(), joinParams.getJoiner());
break;
case OUTER:
result = left.getTable().outerJoin(right.getTable(), joinParams.getJoiner());
break;
default:
throw new IllegalStateException("invalid join type: " + join.getJoinType());
}
return KTableHolder.unmaterialized(
result,
joinParams.getSchema(),
left.getExecutionKeyFactory());
} | @Test
public void shouldDoLeftJoinWithSyntheticKey() {
// Given:
givenLeftJoin(SYNTH_KEY);
// When:
join.build(planBuilder, planInfo);
// Then:
verify(leftKTable).leftJoin(
same(rightKTable),
eq(new KsqlValueJoiner(LEFT_SCHEMA.value().size(), RIGHT_SCHEMA.value().size(), 1))
);
} |
@Override
public RFuture<Boolean> removeAsync(Object value) {
CompletableFuture<Boolean> f = CompletableFuture.supplyAsync(() -> remove(value), getServiceManager().getExecutor());
return new CompletableFutureWrapper<>(f);
} | @Test
public void testRemoveAsync() throws InterruptedException, ExecutionException {
RSortedSet<Integer> set = redisson.getSortedSet("simple");
set.add(1);
set.add(3);
set.add(7);
Assertions.assertTrue(set.removeAsync(1).get());
Assertions.assertFalse(set.contains(1));
assertThat(set).containsExactly(3, 7);
Assertions.assertFalse(set.removeAsync(1).get());
assertThat(set).containsExactly(3, 7);
set.removeAsync(3).get();
Assertions.assertFalse(set.contains(3));
assertThat(set).containsExactly(7);
} |
public static MapBackedDMNContext of(Map<String, Object> ctx) {
return new MapBackedDMNContext(ctx);
} | @Test
void contextWithEntries() {
MapBackedDMNContext ctx1 = MapBackedDMNContext.of(new HashMap<>(DEFAULT_ENTRIES));
testCloneAndAlter(ctx1, DEFAULT_ENTRIES, Collections.emptyMap());
MapBackedDMNContext ctx2 = MapBackedDMNContext.of(new HashMap<>(DEFAULT_ENTRIES));
testPushAndPopScope(ctx2, DEFAULT_ENTRIES, Collections.emptyMap());
} |
@Override
public ClusterHealth checkCluster() {
checkState(!nodeInformation.isStandalone(), "Clustering is not enabled");
checkState(sharedHealthState != null, "HealthState instance can't be null when clustering is enabled");
Set<NodeHealth> nodeHealths = sharedHealthState.readAll();
Health health = clusterHealthChecks.stream()
.map(clusterHealthCheck -> clusterHealthCheck.check(nodeHealths))
.reduce(Health.GREEN, HealthReducer::merge);
return new ClusterHealth(health, nodeHealths);
} | @Test
public void checkCluster_returns_YELLOW_status_if_only_GREEN_and_at_least_one_YELLOW_statuses_returned_by_ClusterHealthChecks() {
when(nodeInformation.isStandalone()).thenReturn(false);
List<Health.Status> statuses = new ArrayList<>();
Stream.concat(
IntStream.range(0, 1 + random.nextInt(20)).mapToObj(i -> YELLOW), // at least 1 YELLOW
IntStream.range(0, random.nextInt(20)).mapToObj(i -> GREEN)).forEach(statuses::add); // between 0 and 19 GREEN
Collections.shuffle(statuses);
HealthCheckerImpl underTest = newClusterHealthCheckerImpl(statuses.stream());
assertThat(underTest.checkCluster().getHealth().getStatus())
.describedAs("%s should have been computed from %s statuses", YELLOW, statuses)
.isEqualTo(YELLOW);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.