focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static <T> List<List<T>> splitAvg(List<T> list, int limit) {
if (CollUtil.isEmpty(list)) {
return empty();
}
return (list instanceof RandomAccess)
? new RandomAccessAvgPartition<>(list, limit)
: new AvgPartition<>(list, limit);
} | @Test
public void splitAvgNotZero() {
assertThrows(IllegalArgumentException.class, () -> {
// limit不能小于等于0
ListUtil.splitAvg(Arrays.asList(1, 2, 3, 4), 0);
});
} |
@Override
public boolean containsAll(Collection<?> c) {
return get(containsAllAsync(c));
} | @Test
public void testContainsAll() {
RScoredSortedSet<Integer> set = redisson.getScoredSortedSet("simple");
for (int i = 0; i < 200; i++) {
set.add(i, i);
}
Assertions.assertTrue(set.containsAll(Arrays.asList(30, 11)));
Assertions.assertFalse(set.containsAll(Arrays.asList(30, 711, 11)));
} |
protected String getFileName(double lat, double lon) {
lon = 1 + (180 + lon) / LAT_DEGREE;
int lonInt = (int) lon;
lat = 1 + (60 - lat) / LAT_DEGREE;
int latInt = (int) lat;
if (Math.abs(latInt - lat) < invPrecision / LAT_DEGREE)
latInt--;
// replace String.format as it seems to be slow
// String.format("srtm_%02d_%02d", lonInt, latInt);
String str = "srtm_";
str += lonInt < 10 ? "0" : "";
str += lonInt;
str += latInt < 10 ? "_0" : "_";
str += latInt;
return str;
} | @Test
public void testFileNotFound() {
File file = new File(instance.getCacheDir(), instance.getFileName(46, -20) + ".gh");
File zipFile = new File(instance.getCacheDir(), instance.getFileName(46, -20) + ".zip");
file.delete();
zipFile.delete();
instance.setDownloader(new Downloader("test GH") {
@Override
public void downloadFile(String url, String toFile) throws IOException {
throw new FileNotFoundException("xyz");
}
});
assertEquals(0, instance.getEle(46, -20), 1);
// file not found
assertTrue(file.exists());
assertEquals(1048676, file.length());
instance.setDownloader(new Downloader("test GH") {
@Override
public void downloadFile(String url, String toFile) throws IOException {
throw new SocketTimeoutException("xyz");
}
});
try {
instance.setSleep(30);
instance.getEle(16, -20);
fail();
} catch (Exception ex) {
}
file.delete();
zipFile.delete();
} |
@Override
public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException {
if(containerService.isContainer(folder)) {
final S3BucketCreateService service = new S3BucketCreateService(session);
service.create(folder, StringUtils.isBlank(status.getRegion()) ?
new S3LocationFeature(session, session.getClient().getRegionEndpointCache()).getDefault().getIdentifier() : status.getRegion());
return folder;
}
else {
final EnumSet<Path.Type> type = EnumSet.copyOf(folder.getType());
type.add(Path.Type.placeholder);
return new S3TouchFeature(session, acl).withWriter(writer).touch(folder
.withType(type), status
// Add placeholder object
.withMime(MIMETYPE)
.withChecksum(writer.checksum(folder, status).compute(new NullInputStream(0L), status)));
}
} | @Test
public void testCreatePlaceholder() throws Exception {
final AtomicBoolean b = new AtomicBoolean();
final String name = new AlphanumericRandomStringService().random();
session.withListener(new TranscriptListener() {
@Override
public void log(final Type request, final String message) {
switch(request) {
case request:
if(("PUT /" + name + "/ HTTP/1.1").equals(message)) {
b.set(true);
}
}
}
});
final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final S3AccessControlListFeature acl = new S3AccessControlListFeature(session);
final Path test = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir(new Path(container, name, EnumSet.of(Path.Type.directory)), new TransferStatus());
assertTrue(test.getType().contains(Path.Type.placeholder));
assertTrue(b.get());
assertTrue(new S3FindFeature(session, acl).find(test));
assertTrue(new S3ObjectListService(session, acl).list(container, new DisabledListProgressListener()).contains(test));
assertTrue(new S3ObjectListService(session, acl).list(test, new DisabledListProgressListener()).isEmpty());
assertTrue(new S3VersionedObjectListService(session, acl).list(container, new DisabledListProgressListener()).contains(test));
assertTrue(new S3VersionedObjectListService(session, acl).list(test, new DisabledListProgressListener()).isEmpty());
assertTrue(new DefaultFindFeature(session).find(test));
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
ApolloNotificationMessages transformMessages(String messagesAsString) {
ApolloNotificationMessages notificationMessages = null;
if (!Strings.isNullOrEmpty(messagesAsString)) {
try {
notificationMessages = gson.fromJson(messagesAsString, ApolloNotificationMessages.class);
} catch (Throwable ex) {
Tracer.logError(ex);
}
}
return notificationMessages;
} | @Test
public void testTransformMessages() throws Exception {
String someKey = "someKey";
long someNotificationId = 1;
String anotherKey = "anotherKey";
long anotherNotificationId = 2;
ApolloNotificationMessages notificationMessages = new ApolloNotificationMessages();
notificationMessages.put(someKey, someNotificationId);
notificationMessages.put(anotherKey, anotherNotificationId);
String someMessagesAsString = gson.toJson(notificationMessages);
ApolloNotificationMessages result = configController.transformMessages(someMessagesAsString);
assertEquals(notificationMessages.getDetails(), result.getDetails());
} |
public ArtifactResponse buildArtifactResponse(ArtifactResolveRequest artifactResolveRequest, String entityId, SignType signType) throws InstantiationException, ValidationException, ArtifactBuildException, BvdException {
final var artifactResponse = OpenSAMLUtils.buildSAMLObject(ArtifactResponse.class);
final var status = OpenSAMLUtils.buildSAMLObject(Status.class);
final var statusCode = OpenSAMLUtils.buildSAMLObject(StatusCode.class);
final var issuer = OpenSAMLUtils.buildSAMLObject(Issuer.class);
return ArtifactResponseBuilder
.newInstance(artifactResponse)
.addID()
.addIssueInstant()
.addInResponseTo(artifactResolveRequest.getArtifactResolve().getID())
.addStatus(StatusBuilder
.newInstance(status)
.addStatusCode(statusCode, StatusCode.SUCCESS)
.build())
.addIssuer(issuer, entityId)
.addMessage(buildResponse(artifactResolveRequest, entityId, signType))
.addSignature(signatureService, signType)
.build();
} | @Test
void parseArtifactResolveFailed() throws ValidationException, SamlParseException, ArtifactBuildException, BvdException, InstantiationException {
ArtifactResponse artifactResponse = artifactResponseService.buildArtifactResponse(getArtifactResolveRequest("failed", true,false, SAML_COMBICONNECT, EncryptionType.BSN, ENTRANCE_ENTITY_ID), ENTRANCE_ENTITY_ID, TD);
assertEquals("urn:oasis:names:tc:SAML:2.0:status:Responder", ((Response) artifactResponse.getMessage()).getStatus().getStatusCode().getValue());
assertEquals("urn:oasis:names:tc:SAML:2.0:status:RequestDenied", ((Response) artifactResponse.getMessage()).getStatus().getStatusCode().getStatusCode().getValue());
} |
@Override
public void add(T item) {
final int sizeAtTimeOfAdd;
synchronized (items) {
items.add(item);
sizeAtTimeOfAdd = items.size();
}
/*
WARNING: It is possible that the item that was just added to the list
has been processed by an existing idle task at this point.
By rescheduling the following timers, it is possible that a
superfluous maxTask is generated now OR that the idle task and max
task are scheduled at their specified delays. This could result in
calls to processItems sooner than expected.
*/
// Did we hit the max item threshold?
if (sizeAtTimeOfAdd >= maxItems) {
if (maxIdleMillis < maxBatchMillis) {
cancelTask(idleTask);
}
rescheduleTask(maxTask, 0 /* now! */);
} else {
// Otherwise, schedule idle task and if this is a first item
// also schedule the max batch age task.
if (maxIdleMillis < maxBatchMillis) {
rescheduleTask(idleTask, maxIdleMillis);
}
if (sizeAtTimeOfAdd == 1) {
rescheduleTask(maxTask, maxBatchMillis);
}
}
} | @Test
public void readyMaxTrigger() {
TestAccumulator accumulator = new TestAccumulator();
accumulator.ready = false;
accumulator.add(new TestItem("a"));
accumulator.add(new TestItem("b"));
accumulator.add(new TestItem("c"));
accumulator.add(new TestItem("d"));
accumulator.add(new TestItem("e"));
accumulator.add(new TestItem("f"));
assertTrue("should not have fired yet", accumulator.batch.isEmpty());
accumulator.ready = true;
accumulator.add(new TestItem("g"));
timer.advanceTimeMillis(10, LONG_REAL_TIME_DELAY);
assertFalse("should have fired", accumulator.batch.isEmpty());
assertEquals("incorrect batch", "abcdefg", accumulator.batch);
} |
public BaseIdentityProvider.Context newContext(HttpRequest request, HttpResponse response, BaseIdentityProvider identityProvider) {
return new ContextImpl(request, response, identityProvider);
} | @Test
public void authenticate() {
JavaxHttpRequest httpRequest = new JavaxHttpRequest(request);
JavaxHttpResponse httpResponse = new JavaxHttpResponse(response);
BaseIdentityProvider.Context context = underTest.newContext(httpRequest, httpResponse, identityProvider);
ArgumentCaptor<UserDto> userArgumentCaptor = ArgumentCaptor.forClass(UserDto.class);
context.authenticate(USER_IDENTITY);
assertThat(userIdentityAuthenticator.isAuthenticated()).isTrue();
verify(threadLocalUserSession).set(any(UserSession.class));
verify(jwtHttpHandler).generateToken(userArgumentCaptor.capture(), eq(httpRequest), eq(httpResponse));
assertThat(userArgumentCaptor.getValue().getExternalId()).isEqualTo(USER_IDENTITY.getProviderId());
assertThat(userArgumentCaptor.getValue().getExternalLogin()).isEqualTo(USER_IDENTITY.getProviderLogin());
assertThat(userArgumentCaptor.getValue().getExternalIdentityProvider()).isEqualTo("github");
} |
@Nullable static String route(ContainerRequest request) {
ExtendedUriInfo uriInfo = request.getUriInfo();
List<UriTemplate> templates = uriInfo.getMatchedTemplates();
int templateCount = templates.size();
if (templateCount == 0) return "";
StringBuilder builder = null; // don't allocate unless you need it!
String basePath = uriInfo.getBaseUri().getPath();
String result = null;
if (!"/" .equals(basePath)) { // skip empty base paths
result = basePath;
}
for (int i = templateCount - 1; i >= 0; i--) {
String template = templates.get(i).getTemplate();
if ("/" .equals(template)) continue; // skip allocation
if (builder != null) {
builder.append(template);
} else if (result != null) {
builder = new StringBuilder(result).append(template);
result = null;
} else {
result = template;
}
}
return result != null ? result : builder != null ? builder.toString() : "";
} | @Test void ignoresEventsExceptFinish() {
setBaseUri("/");
when(uriInfo.getMatchedTemplates()).thenReturn(Arrays.asList(
new PathTemplate("/"),
new PathTemplate("/items/{itemId}")
));
assertThat(SpanCustomizingApplicationEventListener.route(request))
.isEqualTo("/items/{itemId}");
} |
@Override
public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return true;
}
try {
new SFTPAttributesFinderFeature(session).find(file, listener);
return true;
}
catch(NotfoundException e) {
// We expect SSH_FXP_STATUS if the file is not found
return false;
}
} | @Test
public void testFindFile() throws Exception {
final Path file = new Path(new SFTPHomeDirectoryService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new SFTPTouchFeature(session).touch(file, new TransferStatus());
assertTrue(new SFTPFindFeature(session).find(file));
assertFalse(new SFTPFindFeature(session).find(new Path(file.getAbsolute(), EnumSet.of(Path.Type.directory))));
new SFTPDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public void registerService(String serviceName, String groupName, Instance instance) throws NacosException {
getExecuteClientProxy(instance).registerService(serviceName, groupName, instance);
} | @Test
void testRegisterPersistentServiceByHttp() throws NacosException, NoSuchFieldException, IllegalAccessException {
NamingHttpClientProxy mockHttpClient = Mockito.mock(NamingHttpClientProxy.class);
Field mockHttpClientField = NamingClientProxyDelegate.class.getDeclaredField("httpClientProxy");
mockHttpClientField.setAccessible(true);
mockHttpClientField.set(delegate, mockHttpClient);
String serviceName = "service1";
String groupName = "group1";
Instance instance = new Instance();
instance.setServiceName(serviceName);
instance.setClusterName(groupName);
instance.setIp("1.1.1.1");
instance.setPort(1);
// persistent instance
instance.setEphemeral(false);
// when server do not support register persistent instance by grpc, will use http to register
delegate.registerService(serviceName, groupName, instance);
verify(mockHttpClient, times(1)).registerService(serviceName, groupName, instance);
} |
final void logJobFilterTime(JobFilter jobFilter, long durationInNanos) {
if (NANOSECONDS.toMillis(durationInNanos) > 10) {
getLogger().warn("JobFilter of type '{}' has slow performance of {}ms (a Job Filter should run under 10ms) which negatively impacts the overall functioning of JobRunr. JobRunr Pro can run slow running Job Filters without a negative performance impact.", jobFilter.getClass().getName(), NANOSECONDS.toMillis(durationInNanos));
}
} | @Test
void ifJobFilterIsTooSlowAMessageIsLogged() {
MyJobFilter myJobFilter = new MyJobFilter();
JobCreationFilters jobCreationFilters = new JobCreationFilters(anEnqueuedJob().build(), new JobDefaultFilters(myJobFilter));
final ListAppender<ILoggingEvent> logger = LoggerAssert.initFor(jobCreationFilters);
jobCreationFilters.logJobFilterTime(myJobFilter, 11000000);
jobCreationFilters.logJobFilterTime(myJobFilter, 11000000);
jobCreationFilters.logJobFilterTime(myJobFilter, 11000000);
jobCreationFilters.logJobFilterTime(myJobFilter, 11000000);
jobCreationFilters.logJobFilterTime(myJobFilter, 11000000);
assertThat(logger).hasWarningMessageContaining(
"JobFilter of type 'org.jobrunr.jobs.filters.AbstractJobFiltersTest$MyJobFilter' has slow performance of 11ms (a Job Filter should run under 10ms) which negatively impacts the overall functioning of JobRunr",
5,
emptyMap()
);
} |
public ParResponse requestPushedUri(
URI pushedAuthorizationRequestUri, ParBodyBuilder parBodyBuilder) {
var headers =
List.of(
new Header(HttpHeaders.ACCEPT, MediaType.APPLICATION_JSON),
new Header(HttpHeaders.CONTENT_TYPE, UrlFormBodyBuilder.MEDIA_TYPE));
var req = new Request(pushedAuthorizationRequestUri, "POST", headers, parBodyBuilder.build());
var res = httpClient.call(req);
if (res.status() != 201) {
throw HttpExceptions.httpFailBadStatus(
req.method(), pushedAuthorizationRequestUri, res.status());
}
return JsonCodec.readValue(res.body(), ParResponse.class);
} | @Test
void requestPushedUri_badStatus(WireMockRuntimeInfo wm) {
var path = "/auth/par";
stubFor(post(path).willReturn(badRequest()));
var base = URI.create(wm.getHttpBaseUrl());
var parUri = base.resolve(path);
var e =
assertThrows(
HttpException.class, () -> client.requestPushedUri(parUri, ParBodyBuilder.create()));
assertEquals(
"http request failed: bad status 'POST %s' status=400".formatted(parUri), e.getMessage());
} |
@Override
public boolean isWarnEnabled() {
return logger.isWarnEnabled();
} | @Test
public void testIsWarnEnabled() {
Logger mockLogger = mock(Logger.class);
when(mockLogger.getName()).thenReturn("foo");
when(mockLogger.isWarnEnabled()).thenReturn(true);
InternalLogger logger = new Slf4JLogger(mockLogger);
assertTrue(logger.isWarnEnabled());
verify(mockLogger).getName();
verify(mockLogger).isWarnEnabled();
} |
public ProtocolBuilder telnet(String telnet) {
this.telnet = telnet;
return getThis();
} | @Test
void telnet() {
ProtocolBuilder builder = new ProtocolBuilder();
builder.telnet("mocktelnethandler");
Assertions.assertEquals("mocktelnethandler", builder.build().getTelnet());
} |
public static InstancePort swapStaleLocation(InstancePort instPort) {
return DefaultInstancePort.builder()
.deviceId(instPort.oldDeviceId())
.portNumber(instPort.oldPortNumber())
.state(instPort.state())
.ipAddress(instPort.ipAddress())
.macAddress(instPort.macAddress())
.networkId(instPort.networkId())
.portId(instPort.portId())
.build();
} | @Test
public void testSwapStaleLocation() {
InstancePort swappedInstancePort = swapStaleLocation(instancePort3);
assertEquals(instancePort3.oldDeviceId(), swappedInstancePort.deviceId());
assertEquals(instancePort3.oldPortNumber(), swappedInstancePort.portNumber());
} |
public MetadataReportBuilder syncReport(Boolean syncReport) {
this.syncReport = syncReport;
return getThis();
} | @Test
void syncReport() {
MetadataReportBuilder builder = new MetadataReportBuilder();
builder.syncReport(true);
Assertions.assertTrue(builder.build().getSyncReport());
builder.syncReport(false);
Assertions.assertFalse(builder.build().getSyncReport());
builder.syncReport(null);
Assertions.assertNull(builder.build().getSyncReport());
} |
public static DataSchema avroToDataSchema(String avroSchemaInJson, AvroToDataSchemaTranslationOptions options)
throws IllegalArgumentException
{
ValidationOptions validationOptions = SchemaParser.getDefaultSchemaParserValidationOptions();
validationOptions.setAvroUnionMode(true);
SchemaParserFactory parserFactory = SchemaParserFactory.instance(validationOptions);
DataSchemaResolver resolver = getResolver(parserFactory, options);
PegasusSchemaParser parser = parserFactory.create(resolver);
parser.parse(avroSchemaInJson);
if (parser.hasError())
{
throw new IllegalArgumentException(parser.errorMessage());
}
assert(parser.topLevelDataSchemas().size() == 1);
DataSchema dataSchema = parser.topLevelDataSchemas().get(0);
DataSchema resultDataSchema = null;
AvroToDataSchemaTranslationMode translationMode = options.getTranslationMode();
if (translationMode == AvroToDataSchemaTranslationMode.RETURN_EMBEDDED_SCHEMA ||
translationMode == AvroToDataSchemaTranslationMode.VERIFY_EMBEDDED_SCHEMA)
{
// check for embedded schema
Object dataProperty = dataSchema.getProperties().get(SchemaTranslator.DATA_PROPERTY);
if (dataProperty != null && dataProperty.getClass() == DataMap.class)
{
Object schemaProperty = ((DataMap) dataProperty).get(SchemaTranslator.SCHEMA_PROPERTY);
if (schemaProperty.getClass() == DataMap.class)
{
SchemaParser embeddedSchemaParser = SchemaParserFactory.instance().create(null);
embeddedSchemaParser.parse(Arrays.asList(schemaProperty));
if (embeddedSchemaParser.hasError())
{
throw new IllegalArgumentException("Embedded schema is invalid\n" + embeddedSchemaParser.errorMessage());
}
assert(embeddedSchemaParser.topLevelDataSchemas().size() == 1);
resultDataSchema = embeddedSchemaParser.topLevelDataSchemas().get(0);
if (translationMode == AvroToDataSchemaTranslationMode.VERIFY_EMBEDDED_SCHEMA)
{
// additional verification to make sure that embedded schema translates to Avro schema
DataToAvroSchemaTranslationOptions dataToAvroSchemaOptions = new DataToAvroSchemaTranslationOptions();
Object optionalDefaultModeProperty = ((DataMap) dataProperty).get(SchemaTranslator.OPTIONAL_DEFAULT_MODE_PROPERTY);
dataToAvroSchemaOptions.setOptionalDefaultMode(OptionalDefaultMode.valueOf(optionalDefaultModeProperty.toString()));
Schema avroSchemaFromEmbedded = dataToAvroSchema(resultDataSchema, dataToAvroSchemaOptions);
Schema avroSchemaFromJson = AvroCompatibilityHelper.parse(avroSchemaInJson, SchemaParseConfiguration.STRICT, null).getMainSchema();
Object embededSchemaPropertyVal = avroSchemaFromJson.getObjectProp(DATA_PROPERTY);
if (embededSchemaPropertyVal != null)
{
avroSchemaFromEmbedded.addProp(DATA_PROPERTY, embededSchemaPropertyVal);
}
if (!avroSchemaFromEmbedded.equals(avroSchemaFromJson))
{
throw new IllegalArgumentException("Embedded schema does not translate to input Avro schema: " + avroSchemaInJson);
}
}
}
}
}
if (resultDataSchema == null)
{
// translationMode == TRANSLATE or no embedded schema
DataSchemaTraverse traverse = new DataSchemaTraverse();
traverse.traverse(dataSchema, AvroToDataSchemaConvertCallback.INSTANCE);
// convert default values
traverse.traverse(dataSchema, DefaultAvroToDataConvertCallback.INSTANCE);
// make sure it can round-trip
String dataSchemaJson = dataSchema.toString();
resultDataSchema = DataTemplateUtil.parseSchema(dataSchemaJson);
}
return resultDataSchema;
} | @Test
public void testAvroPartialDefaultFields() throws IOException
{
String schemaWithPartialDefaultFields = "{" +
" \"type\": \"record\"," +
" \"name\": \"testRecord\"," +
" \"fields\": [" +
" {" +
" \"name\": \"recordFieldWithDefault\"," +
" \"type\": {" +
" \"type\": \"record\"," +
" \"name\": \"recordType\"," +
" \"fields\": [" +
" {" +
" \"name\": \"mapField\"," +
" \"type\": {" +
" \"type\": \"map\"," +
" \"values\": \"string\"" +
" }" +
" }," +
" {" +
" \"name\": \"optionalRecordField\"," +
" \"type\": [" +
" \"null\"," +
" {" +
" \"type\": \"record\"," +
" \"name\": \"simpleRecordType\"," +
" \"fields\": [" +
" {" +
" \"name\": \"stringField\"," +
" \"type\": \"string\"" +
" }" + " ]" +
" }" +
" ]," +
" \"default\": null" +
" }" +
" ]" +
" }," +
" \"default\": {" +
" \"mapField\": {}" +
" }" +
" }" +
" ]" +
"}";
Schema schema = Schema.parse(schemaWithPartialDefaultFields);
DataSchema dataSchema = SchemaTranslator.avroToDataSchema(schema);
Assert.assertNotNull(dataSchema);
} |
public List<KuduPredicate> convert(ScalarOperator operator) {
if (operator == null) {
return null;
}
return operator.accept(this, null);
} | @Test
public void testLt() {
ConstantOperator value = ConstantOperator.createInt(5);
ScalarOperator op = new BinaryPredicateOperator(BinaryType.LT, F0, value);
List<KuduPredicate> result = CONVERTER.convert(op);
Assert.assertEquals(result.get(0).toString(), "`f0` < 5");
} |
public boolean deleteRole(Role role) {
return rolesConfig.remove(role);
} | @Test
public void shouldReturnTrueIfDeletingARoleGoesThroughSuccessfully() throws Exception {
SecurityConfig securityConfig = security(passwordFileAuthConfig(), admins());
securityConfig.deleteRole(ROLE1);
assertUserRoles(securityConfig, "chris", ROLE2);
assertUserRoles(securityConfig, "jez");
} |
@Override
public ByteBuf writeLongLE(long value) {
ensureWritable0(8);
_setLongLE(writerIndex, value);
writerIndex += 8;
return this;
} | @Test
public void testWriteLongLEAfterRelease() {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().writeLongLE(1);
}
});
} |
protected ThrowableHandlingConverter createThrowableProxyConverter(LoggerContext context) {
if (exceptionFormat == null) {
return new RootCauseFirstThrowableProxyConverter();
}
ThrowableHandlingConverter throwableHandlingConverter;
if (exceptionFormat.isRootFirst()) {
throwableHandlingConverter = new RootCauseFirstThrowableProxyConverter();
} else {
throwableHandlingConverter = new ExtendedThrowableProxyConverter();
}
List<String> options = new ArrayList<>();
// depth must be added first
options.add(exceptionFormat.getDepth());
options.addAll(exceptionFormat.getEvaluators());
throwableHandlingConverter.setOptionList(options);
throwableHandlingConverter.setContext(context);
return throwableHandlingConverter;
} | @Test
void testCreateThrowableProxyConverter_Default() throws Exception {
EventJsonLayoutBaseFactory factory = new EventJsonLayoutBaseFactory();
ThrowableHandlingConverter converter = factory.createThrowableProxyConverter(new LoggerContext());
converter.start();
assertThat(converter.isStarted()).isTrue();
int originalSize = (int)getStackTraceAsString(proxy.getThrowable()).lines().count();
assertThat(converter.convert(event))
.hasLineCount(originalSize) // Verify that the full stack is included
.containsSubsequence("r00t", "wrapp3d"); // Verify that the root is first
} |
@VisibleForTesting
static void persistIndexMaps(List<IndexEntry> entries, PrintWriter writer) {
for (IndexEntry entry : entries) {
persistIndexMap(entry, writer);
}
} | @Test
public void testPersistIndexMaps() {
ByteArrayOutputStream output = new ByteArrayOutputStream(1024 * 1024);
try (PrintWriter pw = new PrintWriter(output)) {
List<IndexEntry> entries = Arrays
.asList(new IndexEntry(new IndexKey("foo", StandardIndexes.inverted()), 0, 1024),
new IndexEntry(new IndexKey("bar", StandardIndexes.inverted()), 1024, 100),
new IndexEntry(new IndexKey("baz", StandardIndexes.inverted()), 1124, 200));
SingleFileIndexDirectory.persistIndexMaps(entries, pw);
}
assertEquals(output.toString(), "foo.inverted_index.startOffset = 0\nfoo.inverted_index.size = 1024\n"
+ "bar.inverted_index.startOffset = 1024\nbar.inverted_index.size = 100\n"
+ "baz.inverted_index.startOffset = 1124\nbaz.inverted_index.size = 200\n");
} |
@Override
public void deleteFileConfig(Long id) {
// 校验存在
FileConfigDO config = validateFileConfigExists(id);
if (Boolean.TRUE.equals(config.getMaster())) {
throw exception(FILE_CONFIG_DELETE_FAIL_MASTER);
}
// 删除
fileConfigMapper.deleteById(id);
// 清空缓存
clearCache(id, null);
} | @Test
public void testDeleteFileConfig_master() {
// mock 数据
FileConfigDO dbFileConfig = randomFileConfigDO().setMaster(true);
fileConfigMapper.insert(dbFileConfig);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbFileConfig.getId();
// 调用, 并断言异常
assertServiceException(() -> fileConfigService.deleteFileConfig(id), FILE_CONFIG_DELETE_FAIL_MASTER);
} |
@Override
public Iterable<ConnectPoint> getEdgePoints() {
checkPermission(TOPOLOGY_READ);
ImmutableSet.Builder<ConnectPoint> builder = ImmutableSet.builder();
connectionPoints.forEach((k, v) -> v.forEach(builder::add));
return builder.build();
} | @Test
public void testDeviceUpdates() {
//Setup
Device referenceDevice;
DeviceEvent event;
int numDevices = 10;
int numInfraPorts = 5;
totalPorts = 10;
defaultPopulator(numDevices, numInfraPorts);
events.clear();
//Test response to device added events
referenceDevice = NetTestTools.device("11");
devices.put(referenceDevice.id(), referenceDevice);
for (int port = 1; port <= numInfraPorts; port++) {
infrastructurePorts.add(NetTestTools.connectPoint("11", port));
}
event = new DeviceEvent(DEVICE_ADDED, referenceDevice,
new DefaultPort(referenceDevice, PortNumber.portNumber(1), true));
postTopologyEvent(event);
//Check that ports were populated correctly
assertTrue("Unexpected number of new ports added",
mgr.deviceService.getPorts(NetTestTools.did("11")).size() == 10);
//Check that of the ten ports the half that are infrastructure ports aren't added
assertEquals("Unexpected number of new edge ports added", (totalPorts - numInfraPorts), events.size());
for (int index = 0; index < numInfraPorts; index++) {
assertTrue("Unexpected type of event", events.get(index).type() == EDGE_PORT_ADDED);
}
//Names here are irrelevant, the first 5 ports are populated as infrastructure, 6-10 are edge
for (int index = 0; index < events.size(); index++) {
assertEquals("Port added had unexpected port number.",
events.get(index).subject().port(),
NetTestTools.connectPoint("a", index + numInfraPorts + 1).port());
}
events.clear();
//Repost the event to test repeated posts
postTopologyEvent(event);
assertEquals("The redundant notification should not have created additional notifications.",
0, events.size());
//Calculate the size of the returned iterable of edge points.
Iterable<ConnectPoint> pts = mgr.getEdgePoints();
Iterator pointIterator = pts.iterator();
int count = 0;
for (; pointIterator.hasNext(); count++) {
pointIterator.next();
}
assertEquals("Unexpected number of edge points", (numDevices + 1) * numInfraPorts, count);
//Testing device removal
events.clear();
event = (new DeviceEvent(DEVICE_REMOVED, referenceDevice,
new DefaultPort(referenceDevice, PortNumber.portNumber(1), true)));
postTopologyEvent(event);
assertEquals("There should be five new events from removal of edge points",
totalPorts - numInfraPorts, events.size());
for (int index = 0; index < events.size(); index++) {
//Assert that the correct port numbers were removed in the correct order
assertThat("Port removed had unexpected port number.",
events.get(index).subject().port().toLong(),
is(greaterThanOrEqualTo((long) numInfraPorts)));
//Assert that the events are of the correct type
assertEquals("Unexpected type of event", events.get(index).type(), EDGE_PORT_REMOVED);
}
events.clear();
//Rebroadcast event to check that it triggers no new behavior
postTopologyEvent(event);
assertEquals("Rebroadcast of removal event should not produce additional events",
0, events.size());
//Testing device status change, changed from unavailable to available
events.clear();
//Make sure that the devicemanager shows the device as available.
addDevice(referenceDevice, "1", 5);
devices.put(referenceDevice.id(), referenceDevice);
event = new DeviceEvent(DEVICE_AVAILABILITY_CHANGED, referenceDevice);
postTopologyEvent(event);
//An earlier setup set half of the reference device ports to infrastructure
assertEquals("An unexpected number of events were generated.", totalPorts - numInfraPorts, events.size());
for (int i = 0; i < 5; i++) {
assertEquals("The event was not of the right type", events.get(i).type(), EDGE_PORT_ADDED);
}
events.clear();
postTopologyEvent(event);
assertEquals("No events should have been generated for a set of existing ports.", 0, events.size());
//Test removal when state changes when the device becomes unavailable
//Ensure that the deviceManager shows the device as unavailable
removeDevice(referenceDevice);
// This variable copies the behavior of the topology by returning ports
// attached to an unavailable device this behavior is necessary for the
// following event to execute properly, if these statements are removed
// no events will be generated since no ports will be provided in
// getPorts() to EdgeManager.
alwaysReturnPorts = true;
postTopologyEvent(event);
alwaysReturnPorts = false;
assertEquals("An unexpected number of events were created.", totalPorts - numInfraPorts, events.size());
for (int i = 0; i < 5; i++) {
EdgePortEvent edgeEvent = events.get(i);
assertEquals("The event is of an unexpected type.",
EdgePortEvent.Type.EDGE_PORT_REMOVED, edgeEvent.type());
assertThat("The event pertains to an unexpected port",
edgeEvent.subject().port().toLong(),
is(greaterThanOrEqualTo((long) numInfraPorts)));
}
} |
public InterpreterResult hidePasswords(InterpreterResult ret) {
if (ret == null) {
return null;
}
return new InterpreterResult(ret.code(), replacePasswords(ret.message()));
} | @Test
void hidePasswordsNoResult() {
UserCredentials userCredentials = mock(UserCredentials.class);
CredentialInjector testee = new CredentialInjector(userCredentials);
assertNull(testee.hidePasswords(null));
} |
public static Comparator<StructLike> forType(Types.StructType struct) {
return new StructLikeComparator(struct);
} | @Test
public void testTime() {
assertComparesCorrectly(Comparators.forType(Types.TimeType.get()), 111, 222);
} |
@Override
public boolean alterOffsets(Map<String, String> connectorConfig, Map<Map<String, ?>, Map<String, ?>> offsets) {
AbstractConfig config = new AbstractConfig(CONFIG_DEF, connectorConfig);
String filename = config.getString(FILE_CONFIG);
if (filename == null || filename.isEmpty()) {
throw new ConnectException("Offsets cannot be modified if the '" + FILE_CONFIG + "' configuration is unspecified. " +
"This is because stdin is used for input and offsets are not tracked.");
}
// This connector makes use of a single source partition at a time which represents the file that it is configured to read from.
// However, there could also be source partitions from previous configurations of the connector.
for (Map.Entry<Map<String, ?>, Map<String, ?>> partitionOffset : offsets.entrySet()) {
Map<String, ?> offset = partitionOffset.getValue();
if (offset == null) {
// We allow tombstones for anything; if there's garbage in the offsets for the connector, we don't
// want to prevent users from being able to clean it up using the REST API
continue;
}
if (!offset.containsKey(POSITION_FIELD)) {
throw new ConnectException("Offset objects should either be null or contain the key '" + POSITION_FIELD + "'");
}
// The 'position' in the offset represents the position in the file's byte stream and should be a non-negative long value
if (!(offset.get(POSITION_FIELD) instanceof Long)) {
throw new ConnectException("The value for the '" + POSITION_FIELD + "' key in the offset is expected to be a Long value");
}
long offsetPosition = (Long) offset.get(POSITION_FIELD);
if (offsetPosition < 0) {
throw new ConnectException("The value for the '" + POSITION_FIELD + "' key in the offset should be a non-negative value");
}
Map<String, ?> partition = partitionOffset.getKey();
if (partition == null) {
throw new ConnectException("Partition objects cannot be null");
}
if (!partition.containsKey(FILENAME_FIELD)) {
throw new ConnectException("Partition objects should contain the key '" + FILENAME_FIELD + "'");
}
}
// Let the task check whether the actual value for the offset position is valid for the configured file on startup
return true;
} | @Test
public void testAlterOffsetsOffsetPositionValues() {
Function<Object, Boolean> alterOffsets = offset -> connector.alterOffsets(sourceProperties, Collections.singletonMap(
Collections.singletonMap(FILENAME_FIELD, FILENAME),
Collections.singletonMap(POSITION_FIELD, offset)
));
assertThrows(ConnectException.class, () -> alterOffsets.apply("nan"));
assertThrows(ConnectException.class, () -> alterOffsets.apply(null));
assertThrows(ConnectException.class, () -> alterOffsets.apply(new Object()));
assertThrows(ConnectException.class, () -> alterOffsets.apply(3.14));
assertThrows(ConnectException.class, () -> alterOffsets.apply(-420));
assertThrows(ConnectException.class, () -> alterOffsets.apply("-420"));
assertThrows(ConnectException.class, () -> alterOffsets.apply(10));
assertThrows(ConnectException.class, () -> alterOffsets.apply("10"));
assertThrows(ConnectException.class, () -> alterOffsets.apply(-10L));
assertTrue(() -> alterOffsets.apply(10L));
} |
public void process()
throws Exception {
if (_segmentMetadata.getTotalDocs() == 0) {
LOGGER.info("Skip preprocessing empty segment: {}", _segmentMetadata.getName());
return;
}
// Segment processing has to be done with a local directory.
File indexDir = new File(_indexDirURI);
// This fixes the issue of temporary files not getting deleted after creating new inverted indexes.
removeInvertedIndexTempFiles(indexDir);
try (SegmentDirectory.Writer segmentWriter = _segmentDirectory.createWriter()) {
// Update default columns according to the schema.
if (_schema != null) {
DefaultColumnHandler defaultColumnHandler = DefaultColumnHandlerFactory
.getDefaultColumnHandler(indexDir, _segmentMetadata, _indexLoadingConfig, _schema, segmentWriter);
defaultColumnHandler.updateDefaultColumns();
_segmentMetadata = new SegmentMetadataImpl(indexDir);
_segmentDirectory.reloadMetadata();
} else {
LOGGER.warn("Skip creating default columns for segment: {} without schema", _segmentMetadata.getName());
}
// Update single-column indices, like inverted index, json index etc.
List<IndexHandler> indexHandlers = new ArrayList<>();
// We cannot just create all the index handlers in a random order.
// Specifically, ForwardIndexHandler needs to be executed first. This is because it modifies the segment metadata
// while rewriting forward index to create a dictionary. Some other handlers (like the range one) assume that
// metadata was already been modified by ForwardIndexHandler.
IndexHandler forwardHandler = createHandler(StandardIndexes.forward());
indexHandlers.add(forwardHandler);
forwardHandler.updateIndices(segmentWriter);
// Now that ForwardIndexHandler.updateIndices has been updated, we can run all other indexes in any order
_segmentMetadata = new SegmentMetadataImpl(indexDir);
_segmentDirectory.reloadMetadata();
for (IndexType<?, ?, ?> type : IndexService.getInstance().getAllIndexes()) {
if (type != StandardIndexes.forward()) {
IndexHandler handler = createHandler(type);
indexHandlers.add(handler);
handler.updateIndices(segmentWriter);
// Other IndexHandler classes may modify the segment metadata while creating a temporary forward
// index to generate their respective indexes from if the forward index was disabled. This new metadata is
// needed to construct other indexes like RangeIndex.
_segmentMetadata = _segmentDirectory.getSegmentMetadata();
}
}
// Perform post-cleanup operations on the index handlers.
for (IndexHandler handler : indexHandlers) {
handler.postUpdateIndicesCleanup(segmentWriter);
}
// Add min/max value to column metadata according to the prune mode.
ColumnMinMaxValueGeneratorMode columnMinMaxValueGeneratorMode =
_indexLoadingConfig.getColumnMinMaxValueGeneratorMode();
if (columnMinMaxValueGeneratorMode != ColumnMinMaxValueGeneratorMode.NONE) {
ColumnMinMaxValueGenerator columnMinMaxValueGenerator =
new ColumnMinMaxValueGenerator(_segmentMetadata, segmentWriter, columnMinMaxValueGeneratorMode);
columnMinMaxValueGenerator.addColumnMinMaxValue();
// NOTE: This step may modify the segment metadata. When adding new steps after this, un-comment the next line.
// _segmentMetadata = new SegmentMetadataImpl(indexDir);
}
segmentWriter.save();
}
// Startree creation will load the segment again, so we need to close and re-open the segment writer to make sure
// that the other required indices (e.g. forward index) are up-to-date.
try (SegmentDirectory.Writer segmentWriter = _segmentDirectory.createWriter()) {
// Create/modify/remove star-trees if required.
processStarTrees(indexDir);
_segmentDirectory.reloadMetadata();
segmentWriter.save();
}
} | @Test
public void testV3UpdateDefaultColumns()
throws Exception {
constructV3Segment();
SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(_indexDir);
assertEquals(segmentMetadata.getVersion(), SegmentVersion.v3);
IngestionConfig ingestionConfig = new IngestionConfig();
ingestionConfig.setTransformConfigs(
ImmutableList.of(
new TransformConfig(NEW_INT_SV_DIMENSION_COLUMN_NAME, "plus(column1, 1)"),
new TransformConfig(NEW_RAW_STRING_SV_DIMENSION_COLUMN_NAME, "reverse(column3)"),
// Ensure that null values returned by transform functions for derived columns are handled appropriately
// during segment reload
new TransformConfig(NEW_NULL_RETURN_STRING_SV_DIMENSION_COLUMN_NAME,
"json_path_string(column21, 'non-existent-path', null)"),
// Ensure that any transform function failures result in a null value if error on failure is false
new TransformConfig(NEW_WRONG_ARG_DATE_TRUNC_DERIVED_COLUMN_NAME, "dateTrunc('abcd', column1)")
));
_tableConfig.setIngestionConfig(ingestionConfig);
_indexLoadingConfig.addInvertedIndexColumns(NEW_COLUMN_INVERTED_INDEX);
_indexLoadingConfig.addNoDictionaryColumns(NEW_RAW_STRING_SV_DIMENSION_COLUMN_NAME);
_indexLoadingConfig.setErrorOnColumnBuildFailure(false);
checkUpdateDefaultColumns();
// Try to use the third schema and update default value again.
// For the third schema, we changed the default value for column 'newStringMVDimension' to 'notSameLength', which
// is not the same length as before. This should be fine for segment format v3 as well.
// We added two new columns and also removed the NEW_INT_SV_DIMENSION_COLUMN_NAME from schema.
// NEW_INT_SV_DIMENSION_COLUMN_NAME exists before processing but removed afterwards.
segmentMetadata = new SegmentMetadataImpl(_indexDir);
assertNotNull(segmentMetadata.getColumnMetadataFor(NEW_INT_SV_DIMENSION_COLUMN_NAME));
try (SegmentDirectory segmentDirectory = SegmentDirectoryLoaderRegistry.getDefaultSegmentDirectoryLoader()
.load(_indexDir.toURI(),
new SegmentDirectoryLoaderContext.Builder().setSegmentDirectoryConfigs(_configuration).build());
SegmentPreProcessor processor = new SegmentPreProcessor(segmentDirectory, _indexLoadingConfig,
_newColumnsSchema3)) {
processor.process();
}
segmentMetadata = new SegmentMetadataImpl(_indexDir);
assertNull(segmentMetadata.getColumnMetadataFor(NEW_INT_SV_DIMENSION_COLUMN_NAME));
ColumnMetadata hllMetricMetadata = segmentMetadata.getColumnMetadataFor(NEW_HLL_BYTE_METRIC_COLUMN_NAME);
FieldSpec expectedHllMetricFieldSpec = _newColumnsSchema3.getFieldSpecFor(NEW_HLL_BYTE_METRIC_COLUMN_NAME);
assertEquals(hllMetricMetadata.getFieldSpec(), expectedHllMetricFieldSpec);
ByteArray expectedDefaultValue = new ByteArray((byte[]) expectedHllMetricFieldSpec.getDefaultNullValue());
assertEquals(hllMetricMetadata.getMinValue(), expectedDefaultValue);
assertEquals(hllMetricMetadata.getMaxValue(), expectedDefaultValue);
ColumnMetadata tDigestMetricMetadata = segmentMetadata.getColumnMetadataFor(NEW_TDIGEST_BYTE_METRIC_COLUMN_NAME);
FieldSpec expectedTDigestMetricFieldSpec = _newColumnsSchema3.getFieldSpecFor(NEW_TDIGEST_BYTE_METRIC_COLUMN_NAME);
assertEquals(tDigestMetricMetadata.getFieldSpec(), expectedTDigestMetricFieldSpec);
expectedDefaultValue = new ByteArray((byte[]) expectedTDigestMetricFieldSpec.getDefaultNullValue());
assertEquals(tDigestMetricMetadata.getMinValue(), expectedDefaultValue);
assertEquals(tDigestMetricMetadata.getMaxValue(), expectedDefaultValue);
} |
@Override
public Set<EntityExcerpt> listEntityExcerpts() {
return dataAdapterService.findAll().stream()
.map(this::createExcerpt)
.collect(Collectors.toSet());
} | @Test
@MongoDBFixtures("LookupDataAdapterFacadeTest.json")
public void listEntityExcerpts() {
final EntityExcerpt expectedEntityExcerpt = EntityExcerpt.builder()
.id(ModelId.of("5adf24a04b900a0fdb4e52c8"))
.type(ModelTypes.LOOKUP_ADAPTER_V1)
.title("HTTP DSV")
.build();
final Set<EntityExcerpt> entityExcerpts = facade.listEntityExcerpts();
assertThat(entityExcerpts).containsOnly(expectedEntityExcerpt);
} |
public static Character toChar(Object value, Character defaultValue) {
return convertQuietly(Character.class, value, defaultValue);
} | @Test
public void toCharTest() {
final String str = "aadfdsfs";
final Character c = Convert.toChar(str);
assertEquals(Character.valueOf('a'), c);
// 转换失败
final Object str2 = "";
final Character c2 = Convert.toChar(str2);
assertNull(c2);
} |
public static <T> T copyProperties(Object source, Class<T> tClass, String... ignoreProperties) {
if (null == source) {
return null;
}
T target = ReflectUtil.newInstanceIfPossible(tClass);
copyProperties(source, target, CopyOptions.create().setIgnoreProperties(ignoreProperties));
return target;
} | @Test
public void issueI41WKPTest() {
final Test1 t1 = new Test1().setStrList(ListUtil.toList("list"));
final Test2 t2_hu = new Test2();
BeanUtil.copyProperties(t1, t2_hu, CopyOptions.create().setIgnoreError(true));
assertNull(t2_hu.getStrList());
} |
@Override
public Set<Link> getDeviceLinks(DeviceId deviceId) {
checkNotNull(deviceId, DEVICE_NULL);
return manager.getVirtualLinks(this.networkId())
.stream()
.filter(link -> (deviceId.equals(link.src().elementId()) ||
deviceId.equals(link.dst().elementId())))
.collect(Collectors.toSet());
} | @Test(expected = NullPointerException.class)
public void testGetDeviceLinksByNullId() {
manager.registerTenantId(TenantId.tenantId(tenantIdValue1));
VirtualNetwork virtualNetwork = manager.createVirtualNetwork(TenantId.tenantId(tenantIdValue1));
LinkService linkService = manager.get(virtualNetwork.id(), LinkService.class);
// test the getDeviceLinks() method with a null device identifier.
linkService.getDeviceLinks(null);
} |
public static StatementExecutorResponse execute(
final ConfiguredStatement<Explain> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
return StatementExecutorResponse.handled(Optional
.of(ExplainExecutor.explain(
serviceContext,
statement,
executionContext,
sessionProperties)));
} | @Test
public void shouldExplainPersistentStatement() {
// Given:
engine.givenSource(DataSourceType.KSTREAM, "Y");
final String statementText = "CREATE STREAM X AS SELECT * FROM Y;";
final ConfiguredStatement<?> explain = engine.configure("EXPLAIN " + statementText);
// When:
final QueryDescriptionEntity query = (QueryDescriptionEntity) CustomExecutors.EXPLAIN.execute(
explain,
sessionProperties,
engine.getEngine(),
engine.getServiceContext()
).getEntity().orElseThrow(IllegalStateException::new);
// Then:
assertThat(query.getQueryDescription().getStatementText(), equalTo(statementText));
assertThat(query.getQueryDescription().getSources(), containsInAnyOrder("Y"));
assertThat("No side effects should happen", engine.getEngine().getPersistentQueries(), is(empty()));
assertThat(query.getQueryDescription().getKsqlHostQueryStatus(), equalTo(Collections.emptyMap()));
} |
public static int readVInt(ByteData arr, long position) {
byte b = arr.get(position++);
if(b == (byte) 0x80)
throw new RuntimeException("Attempting to read null value as int");
int value = b & 0x7F;
while ((b & 0x80) != 0) {
b = arr.get(position++);
value <<= 7;
value |= (b & 0x7F);
}
return value;
} | @Test
public void testReadVIntInputStream() throws IOException {
InputStream is = new ByteArrayInputStream(BYTES_VALUE_129);
Assert.assertEquals(129, VarInt.readVInt(is));
} |
@Override
public PageResult<AiVideoConfigDO> getAiVideoConfigPage(AiVideoConfigPageReqVO pageReqVO) {
return aiVideoConfigMapper.selectPage(pageReqVO);
} | @Test
@Disabled // TODO 请修改 null 为需要的值,然后删除 @Disabled 注解
public void testGetAiVideoConfigPage() {
// mock 数据
AiVideoConfigDO dbAiVideoConfig = randomPojo(AiVideoConfigDO.class, o -> { // 等会查询到
o.setType(null);
o.setValue(null);
o.setStatus(null);
o.setCreateTime(null);
});
aiVideoConfigMapper.insert(dbAiVideoConfig);
// 测试 type 不匹配
aiVideoConfigMapper.insert(cloneIgnoreId(dbAiVideoConfig, o -> o.setType(null)));
// 测试 value 不匹配
aiVideoConfigMapper.insert(cloneIgnoreId(dbAiVideoConfig, o -> o.setValue(null)));
// 测试 status 不匹配
aiVideoConfigMapper.insert(cloneIgnoreId(dbAiVideoConfig, o -> o.setStatus(null)));
// 测试 createTime 不匹配
aiVideoConfigMapper.insert(cloneIgnoreId(dbAiVideoConfig, o -> o.setCreateTime(null)));
// 准备参数
AiVideoConfigPageReqVO reqVO = new AiVideoConfigPageReqVO();
reqVO.setType(null);
reqVO.setValue(null);
reqVO.setStatus(null);
reqVO.setCreateTime(buildBetweenTime(2023, 2, 1, 2023, 2, 28));
// 调用
PageResult<AiVideoConfigDO> pageResult = aiVideoConfigService.getAiVideoConfigPage(reqVO);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbAiVideoConfig, pageResult.getList().get(0));
} |
@Action(name = "purge", resourceLevel = ResourceLevel.COLLECTION)
public int purge()
{
final int numPurged = _db.getData().size();
_db.getData().clear();
AlbumEntryResource.purge(_entryDb, null, null);
return numPurged;
} | @Test
public void testResourcePurge()
{
// photo database is initialized to have 1 photo
// at any time of the test, that initial photo should present for purge
Assert.assertTrue(_res.purge() > 0);
} |
@ExecuteOn(TaskExecutors.IO)
@Post(uri = "{namespace}/files", consumes = MediaType.MULTIPART_FORM_DATA)
@Operation(tags = {"Files"}, summary = "Create a file")
public void createFile(
@Parameter(description = "The namespace id") @PathVariable String namespace,
@Parameter(description = "The internal storage uri") @QueryValue URI path,
@Part CompletedFileUpload fileContent
) throws IOException, URISyntaxException {
String tenantId = tenantService.resolveTenant();
if(fileContent.getFilename().toLowerCase().endsWith(".zip")) {
try (ZipInputStream archive = new ZipInputStream(fileContent.getInputStream())) {
ZipEntry entry;
while ((entry = archive.getNextEntry()) != null) {
if (entry.isDirectory()) {
continue;
}
putNamespaceFile(tenantId, namespace, URI.create("/" + entry.getName()), new BufferedInputStream(new ByteArrayInputStream(archive.readAllBytes())));
}
}
} else {
try(BufferedInputStream inputStream = new BufferedInputStream(fileContent.getInputStream()) {
// Done to bypass the wrong available() output of the CompletedFileUpload InputStream
@Override
public synchronized int available() {
return (int) fileContent.getSize();
}
}) {
putNamespaceFile(tenantId, namespace, path, inputStream);
}
}
} | @Test
void createFile() throws IOException {
MultipartBody body = MultipartBody.builder()
.addPart("fileContent", "test.txt", "Hello".getBytes())
.build();
client.toBlocking().exchange(
HttpRequest.POST("/api/v1/namespaces/" + NAMESPACE + "/files?path=/test.txt", body)
.contentType(MediaType.MULTIPART_FORM_DATA_TYPE)
);
assertNamespaceFileContent(URI.create("/test.txt"), "Hello");
} |
public static Application fromServicesXml(String xml, Networking networking) {
Path applicationDir = StandaloneContainerRunner.createApplicationPackage(xml);
return new Application(applicationDir, networking, true);
} | @Test
void http_interface_is_on_when_networking_is_enabled() throws Exception {
int httpPort = getFreePort();
try (Application application = Application.fromServicesXml(servicesXmlWithServer(httpPort), Networking.enable)) {
try (DefaultHttpClient client = new org.apache.http.impl.client.DefaultHttpClient()) {
HttpResponse response = client.execute(new HttpGet("http://localhost:" + httpPort));
assertEquals(200, response.getStatusLine().getStatusCode());
BufferedReader r = new BufferedReader(new InputStreamReader(response.getEntity().getContent()));
String line;
StringBuilder sb = new StringBuilder();
while ((line = r.readLine()) != null) {
sb.append(line).append("\n");
}
assertTrue(sb.toString().contains("Handler"));
}
}
} |
@Override
public Object convert(String value) {
if (isNullOrEmpty(value)) {
return value;
}
if (value.contains("=")) {
final Map<String, String> fields = new HashMap<>();
Matcher m = PATTERN.matcher(value);
while (m.find()) {
if (m.groupCount() != 2) {
continue;
}
fields.put(removeQuotes(m.group(1)), removeQuotes(m.group(2)));
}
return fields;
} else {
return Collections.emptyMap();
}
} | @Test
public void testFilterWithKVAtEnd() {
TokenizerConverter f = new TokenizerConverter(new HashMap<String, Object>());
@SuppressWarnings("unchecked")
Map<String, String> result = (Map<String, String>) f.convert("lolwat Awesome! k1=v1");
assertEquals(1, result.size());
assertEquals("v1", result.get("k1"));
} |
public void start() {
if (isRunning()) {
return;
}
super.start();
if (getCookieStore() != null) {
getCookieStore().start();
}
} | @Test
public void testNoResponse() throws Exception {
NoResponseServer noResponseServer = new NoResponseServer("localhost", 7780);
noResponseServer.start();
// Give the server time to start up:
Thread.sleep(1000);
// CrawlURI curi = makeCrawlURI("http://stats.bbc.co.uk/robots.txt");
CrawlURI curi = makeCrawlURI("http://localhost:7780");
fetcher().process(curi);
assertEquals(1, curi.getNonFatalFailures().size());
assertTrue(curi.getNonFatalFailures().toArray()[0] instanceof NoHttpResponseException);
assertEquals(FetchStatusCodes.S_CONNECT_FAILED, curi.getFetchStatus());
assertEquals(0, curi.getFetchCompletedTime());
noResponseServer.beDone();
noResponseServer.join();
} |
@Override
public byte[] serialize(FileSourceSplit split) throws IOException {
checkArgument(
split.getClass() == FileSourceSplit.class,
"Cannot serialize subclasses of FileSourceSplit");
// optimization: the splits lazily cache their own serialized form
if (split.serializedFormCache != null) {
return split.serializedFormCache;
}
final DataOutputSerializer out = SERIALIZER_CACHE.get();
out.writeUTF(split.splitId());
Path.serializeToDataOutputView(split.path(), out);
out.writeLong(split.offset());
out.writeLong(split.length());
out.writeLong(split.fileModificationTime());
out.writeLong(split.fileSize());
writeStringArray(out, split.hostnames());
final Optional<CheckpointedPosition> readerPosition = split.getReaderPosition();
out.writeBoolean(readerPosition.isPresent());
if (readerPosition.isPresent()) {
out.writeLong(readerPosition.get().getOffset());
out.writeLong(readerPosition.get().getRecordsAfterOffset());
}
final byte[] result = out.getCopyOfBuffer();
out.clear();
// optimization: cache the serialized from, so we avoid the byte work during repeated
// serialization
split.serializedFormCache = result;
return result;
} | @Test
void repeatedSerializationCaches() throws Exception {
final FileSourceSplit split =
new FileSourceSplit(
"random-id",
new Path("hdfs://namenode:14565/some/path/to/a/file"),
100_000_000,
64_000_000,
System.currentTimeMillis(),
200_000_000,
"host1",
"host2",
"host3");
final byte[] ser1 = FileSourceSplitSerializer.INSTANCE.serialize(split);
final byte[] ser2 = FileSourceSplitSerializer.INSTANCE.serialize(split);
assertThat(ser1).isSameAs(ser2);
} |
@Override
public void loadConfiguration(NacosLoggingProperties loggingProperties) {
Log4j2NacosLoggingPropertiesHolder.setProperties(loggingProperties);
String location = loggingProperties.getLocation();
loadConfiguration(location);
} | @Test
void testLoadConfigurationWithoutLocation() {
System.setProperty("nacos.logging.default.config.enabled", "false");
nacosLoggingProperties = new NacosLoggingProperties("classpath:nacos-log4j2.xml", System.getProperties());
log4J2NacosLoggingAdapter = new Log4J2NacosLoggingAdapter();
log4J2NacosLoggingAdapter.loadConfiguration(nacosLoggingProperties);
verify(propertyChangeListener, never()).propertyChange(any());
} |
public byte[] encode(String val, String delimiters) {
return codecs[0].encode(val);
} | @Test
public void testEncodeChinesePersonNameGB18030() {
assertArrayEquals(CHINESE_PERSON_NAME_GB18030_BYTES,
gb18030().encode(CHINESE_PERSON_NAME_GB18030, PN_DELIMS));
} |
public static Read read() {
return new AutoValue_MongoDbIO_Read.Builder()
.setMaxConnectionIdleTime(60000)
.setNumSplits(0)
.setBucketAuto(false)
.setSslEnabled(false)
.setIgnoreSSLCertificate(false)
.setSslInvalidHostNameAllowed(false)
.setQueryFn(FindQuery.create())
.build();
} | @Test
public void testReadWithCustomConnectionOptions() {
MongoDbIO.Read read =
MongoDbIO.read()
.withUri("mongodb://localhost:" + port)
.withMaxConnectionIdleTime(10)
.withDatabase(DATABASE_NAME)
.withCollection(COLLECTION_NAME);
assertEquals(10, read.maxConnectionIdleTime());
PCollection<Document> documents = pipeline.apply(read);
PAssert.thatSingleton(documents.apply("Count All", Count.globally())).isEqualTo(1000L);
PAssert.that(
documents
.apply("Map Scientist", MapElements.via(new DocumentToKVFn()))
.apply("Count Scientist", Count.perKey()))
.satisfies(
input -> {
for (KV<String, Long> element : input) {
assertEquals(100L, element.getValue().longValue());
}
return null;
});
pipeline.run();
} |
@Nullable
public String getInstanceRegion(InstanceInfo instanceInfo) {
if (instanceInfo.getDataCenterInfo() == null || instanceInfo.getDataCenterInfo().getName() == null) {
logger.warn("Cannot get region for instance id:{}, app:{} as dataCenterInfo is null. Returning local:{} by default",
instanceInfo.getId(), instanceInfo.getAppName(), localRegion);
return localRegion;
}
if (DataCenterInfo.Name.Amazon.equals(instanceInfo.getDataCenterInfo().getName())) {
AmazonInfo amazonInfo = (AmazonInfo) instanceInfo.getDataCenterInfo();
Map<String, String> metadata = amazonInfo.getMetadata();
String availabilityZone = metadata.get(AmazonInfo.MetaDataKey.availabilityZone.getName());
if (null != availabilityZone) {
return azToRegionMapper.getRegionForAvailabilityZone(availabilityZone);
}
}
return null;
} | @Test
public void testDefaultOverride() throws Exception {
ConfigurationManager.getConfigInstance().setProperty("eureka.us-east-1.availabilityZones", "abc,def");
PropertyBasedAzToRegionMapper azToRegionMapper = new PropertyBasedAzToRegionMapper(new DefaultEurekaClientConfig());
InstanceRegionChecker checker = new InstanceRegionChecker(azToRegionMapper, "us-east-1");
azToRegionMapper.setRegionsToFetch(new String[]{"us-east-1"});
AmazonInfo dcInfo = AmazonInfo.Builder.newBuilder().addMetadata(AmazonInfo.MetaDataKey.availabilityZone,
"def").build();
InstanceInfo instanceInfo = InstanceInfo.Builder.newBuilder().setAppName("app").setDataCenterInfo(
dcInfo).build();
String instanceRegion = checker.getInstanceRegion(instanceInfo);
Assert.assertEquals("Invalid instance region.", "us-east-1", instanceRegion);
} |
@PostMapping("create")
public String createProduct(NewProductPayload payload,
Model model,
HttpServletResponse response) {
try {
Product product = this.productsRestClient.createProduct(payload.title(), payload.details());
return "redirect:/catalogue/products/%d".formatted(product.id());
} catch (BadRequestException exception) {
response.setStatus(HttpStatus.BAD_REQUEST.value());
model.addAttribute("payload", payload);
model.addAttribute("errors", exception.getErrors());
return "catalogue/products/new_product";
}
} | @Test
@DisplayName("createProduct создаст новый товар и перенаправит на страницу товара")
void createProduct_RequestIsValid_ReturnsRedirectionToProductPage() {
// given
var payload = new NewProductPayload("Новый товар", "Описание нового товара");
var model = new ConcurrentModel();
var response = new MockHttpServletResponse();
doReturn(new Product(1, "Новый товар", "Описание нового товара"))
.when(this.productsRestClient)
.createProduct("Новый товар", "Описание нового товара");
// when
var result = this.controller.createProduct(payload, model, response);
// then
assertEquals("redirect:/catalogue/products/1", result);
verify(this.productsRestClient).createProduct("Новый товар", "Описание нового товара");
verifyNoMoreInteractions(this.productsRestClient);
} |
boolean canFilterPlayer(String playerName)
{
boolean isMessageFromSelf = playerName.equals(client.getLocalPlayer().getName());
return !isMessageFromSelf &&
(config.filterFriends() || !client.isFriended(playerName, false)) &&
(config.filterFriendsChat() || !isFriendsChatMember(playerName)) &&
(config.filterClanChat() || !isClanChatMember(playerName));
} | @Test
public void testMessageFromFriendIsFiltered()
{
when(chatFilterConfig.filterFriends()).thenReturn(true);
assertTrue(chatFilterPlugin.canFilterPlayer("Iron Mammal"));
} |
@Override
public List<Intent> compile(HostToHostIntent intent, List<Intent> installable) {
// If source and destination are the same, there are never any installables.
if (Objects.equals(intent.one(), intent.two())) {
return ImmutableList.of();
}
boolean isAsymmetric = intent.constraints().contains(new AsymmetricPathConstraint());
Path pathOne = getPathOrException(intent, intent.one(), intent.two());
Path pathTwo = isAsymmetric ?
getPathOrException(intent, intent.two(), intent.one()) : invertPath(pathOne);
Host one = hostService.getHost(intent.one());
Host two = hostService.getHost(intent.two());
return Arrays.asList(createLinkCollectionIntent(pathOne, one, two, intent),
createLinkCollectionIntent(pathTwo, two, one, intent));
} | @Test
public void testBandwidthConstrainedIntentAllocation() {
final double bpsTotal = 1000.0;
final double bpsToReserve = 100.0;
ContinuousResource resourceSw1P1 =
Resources.continuous(DID_S1, PORT_1, Bandwidth.class)
.resource(bpsToReserve);
ContinuousResource resourceSw1P2 =
Resources.continuous(DID_S1, PORT_2, Bandwidth.class)
.resource(bpsToReserve);
ContinuousResource resourceSw2P1 =
Resources.continuous(DID_S2, PORT_1, Bandwidth.class)
.resource(bpsToReserve);
ContinuousResource resourceSw2P2 =
Resources.continuous(DID_S2, PORT_2, Bandwidth.class)
.resource(bpsToReserve);
ContinuousResource resourceSw3P1 =
Resources.continuous(DID_S3, PORT_1, Bandwidth.class)
.resource(bpsToReserve);
ContinuousResource resourceSw3P2 =
Resources.continuous(DID_S3, PORT_2, Bandwidth.class)
.resource(bpsToReserve);
String[] hops = {HOST_ONE, S1, S2, S3, HOST_TWO};
final ResourceService resourceService =
MockResourceService.makeCustomBandwidthResourceService(bpsTotal);
final List<Constraint> constraints =
Collections.singletonList(new BandwidthConstraint(Bandwidth.bps(bpsToReserve)));
final HostToHostIntent intent = makeIntent(HOST_ONE, HOST_TWO, constraints);
HostToHostIntentCompiler compiler = makeCompiler(hops, resourceService);
compiler.compile(intent, null);
Key intentKey = intent.key();
ResourceAllocation rAOne = new ResourceAllocation(resourceSw1P1, intentKey);
ResourceAllocation rATwo = new ResourceAllocation(resourceSw1P2, intentKey);
ResourceAllocation rAThree = new ResourceAllocation(resourceSw2P1, intentKey);
ResourceAllocation rAFour = new ResourceAllocation(resourceSw2P2, intentKey);
ResourceAllocation rAFive = new ResourceAllocation(resourceSw3P1, intentKey);
ResourceAllocation rASix = new ResourceAllocation(resourceSw3P2, intentKey);
Set<ResourceAllocation> expectedresourceAllocations =
ImmutableSet.of(rAOne, rATwo, rAThree, rAFour, rAFive, rASix);
Set<ResourceAllocation> resourceAllocations =
ImmutableSet.copyOf(resourceService.getResourceAllocations(intentKey));
assertThat(resourceAllocations, hasSize(6));
assertEquals(expectedresourceAllocations, resourceAllocations);
} |
public static Read<DynamicMessage> readProtoDynamicMessages(
ProtoDomain domain, String fullMessageName) {
SerializableFunction<PubsubMessage, DynamicMessage> parser =
message -> {
try {
return DynamicMessage.parseFrom(
domain.getDescriptor(fullMessageName), message.getPayload());
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException("Could not parse Pub/Sub message", e);
}
};
ProtoDynamicMessageSchema<DynamicMessage> schema =
ProtoDynamicMessageSchema.forDescriptor(domain, domain.getDescriptor(fullMessageName));
return Read.newBuilder(parser)
.setCoder(
SchemaCoder.of(
schema.getSchema(),
TypeDescriptor.of(DynamicMessage.class),
schema.getToRowFunction(),
schema.getFromRowFunction()))
.build();
} | @Test
public void testProtoDynamicMessages() {
ProtoCoder<Primitive> coder = ProtoCoder.of(Primitive.class);
ImmutableList<Primitive> inputs =
ImmutableList.of(
Primitive.newBuilder().setPrimitiveInt32(42).build(),
Primitive.newBuilder().setPrimitiveBool(true).build(),
Primitive.newBuilder().setPrimitiveString("Hello, World!").build());
setupTestClient(inputs, coder);
ProtoDomain domain = ProtoDomain.buildFrom(Primitive.getDescriptor());
String name = Primitive.getDescriptor().getFullName();
PCollection<Primitive> read =
pipeline
.apply(
PubsubIO.readProtoDynamicMessages(domain, name)
.fromSubscription(SUBSCRIPTION.getPath())
.withClock(CLOCK)
.withClientFactory(clientFactory))
// DynamicMessage doesn't work well with PAssert, but if the content can be successfully
// converted back into the original Primitive, then that should be good enough to
// consider it a successful read.
.apply(
"Return To Primitive",
MapElements.into(TypeDescriptor.of(Primitive.class))
.via(
(DynamicMessage message) -> {
try {
return Primitive.parseFrom(message.toByteArray());
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException("Could not return to Primitive", e);
}
}));
PAssert.that(read).containsInAnyOrder(inputs);
pipeline.run();
} |
@Deprecated
static Class<?> loadInjectorSourceFromProperties(Map<String, String> properties) {
String injectorSourceClassName = properties.get(GUICE_INJECTOR_SOURCE_KEY);
if (injectorSourceClassName == null) {
return null;
}
log.warn(
() -> format("The '%s' property has been deprecated." +
"Add a class implementing '%s' on the glue path instead",
GUICE_INJECTOR_SOURCE_KEY, InjectorSource.class.getName()));
try {
return Class.forName(injectorSourceClassName, true, Thread.currentThread().getContextClassLoader());
} catch (Exception e) {
String message = format("Instantiation of '%s' failed. Check the caused by exception and ensure your " +
"InjectorSource implementation is accessible and has a public zero args constructor.",
injectorSourceClassName);
throw new InjectorSourceInstantiationFailed(message, e);
}
} | @Test
void failsToLoadNonExistantClass() {
Map<String, String> properties = new HashMap<>();
properties.put(GUICE_INJECTOR_SOURCE_KEY, "some.bogus.Class");
InjectorSourceInstantiationFailed actualThrown = assertThrows(InjectorSourceInstantiationFailed.class,
() -> InjectorSourceFactory.loadInjectorSourceFromProperties(properties));
assertAll(
() -> assertThat("Unexpected exception message", actualThrown.getMessage(), is(equalTo(
"Instantiation of 'some.bogus.Class' failed. Check the caused by exception and ensure your InjectorSource implementation is accessible and has a public zero args constructor."))),
() -> assertThat("Unexpected exception cause class", actualThrown.getCause(),
isA(ClassNotFoundException.class)));
} |
@Override
protected void refresh(final List<SelectorData> data) {
if (CollectionUtils.isEmpty(data)) {
LOG.info("clear all selector cache, old cache");
data.forEach(pluginDataSubscriber::unSelectorSubscribe);
pluginDataSubscriber.refreshSelectorDataAll();
} else {
// update cache for UpstreamCacheManager
pluginDataSubscriber.refreshSelectorDataAll();
data.forEach(pluginDataSubscriber::onSelectorSubscribe);
}
} | @Test
public void testRefreshCoverage() {
final SelectorDataRefresh selectorDataRefresh = mockSelectorDataRefresh;
SelectorData selectorData = new SelectorData();
List<SelectorData> selectorDataList = new ArrayList<>();
selectorDataRefresh.refresh(selectorDataList);
selectorDataList.add(selectorData);
selectorDataRefresh.refresh(selectorDataList);
} |
public void destroy() {
ensureOperational();
destroyServices();
log.info("Server [{}] shutdown!", name);
log.info("======================================================");
if (!Boolean.getBoolean("test.circus")) {
LogManager.shutdown();
}
status = Status.SHUTDOWN;
} | @Test(expected = IllegalStateException.class)
@TestDir
public void illegalState1() throws Exception {
Server server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
server.destroy();
} |
public boolean checkIfEnabled() {
try {
this.gitCommand = locateDefaultGit();
MutableString stdOut = new MutableString();
this.processWrapperFactory.create(null, l -> stdOut.string = l, gitCommand, "--version").execute();
return stdOut.string != null && stdOut.string.startsWith("git version") && isCompatibleGitVersion(stdOut.string);
} catch (Exception e) {
LOG.debug("Failed to find git native client", e);
return false;
}
} | @Test
public void git_should_not_be_detected() {
NativeGitBlameCommand blameCommand = new NativeGitBlameCommand("randomcmdthatwillneverbefound", System2.INSTANCE, processWrapperFactory);
assertThat(blameCommand.checkIfEnabled()).isFalse();
} |
@Config("function-implementation-type")
public SqlFunctionLanguageConfig setFunctionImplementationType(String implementationType)
{
this.functionImplementationType = FunctionImplementationType.valueOf(implementationType.toUpperCase());
return this;
} | @Test
public void testExplicitPropertyMappings()
{
Map<String, String> properties = new ImmutableMap.Builder<String, String>()
.put("function-implementation-type", "THRIFT")
.build();
SqlFunctionLanguageConfig expected = new SqlFunctionLanguageConfig()
.setFunctionImplementationType("THRIFT");
assertFullMapping(properties, expected);
} |
@Override public HashSlotCursor12byteKey cursor() {
return new CursorIntKey2();
} | @Test
public void testCursor_key2() {
final long key1 = randomKey();
final int key2 = randomKey();
insert(key1, key2);
HashSlotCursor12byteKey cursor = hsa.cursor();
cursor.advance();
assertEquals(key2, cursor.key2());
} |
@Override
public HttpResponse get() throws InterruptedException, ExecutionException {
try {
final Object result = process(0, null);
if (result instanceof Throwable) {
throw new ExecutionException((Throwable) result);
}
return (HttpResponse) result;
} finally {
isDone = true;
}
} | @Test(expected = ExecutionException.class)
public void errGetExecution() throws ExecutionException, InterruptedException, TimeoutException {
get(new ExecutionException(new Exception("wrong")), false);
} |
InputFile.Status status(String moduleKeyWithBranch, DefaultInputFile inputFile, String hash) {
InputFile.Status statusFromScm = findStatusFromScm(inputFile);
if (statusFromScm != null) {
return statusFromScm;
}
return checkChangedWithProjectRepositories(moduleKeyWithBranch, inputFile, hash);
} | @Test
public void detect_status_branches_exclude() {
ScmChangedFiles changedFiles = new ScmChangedFiles(Set.of());
StatusDetection statusDetection = new StatusDetection(projectRepositories, changedFiles);
// normally changed
assertThat(statusDetection.status("foo", createFile("src/Foo.java"), "XXXXX")).isEqualTo(InputFile.Status.SAME);
// normally added
assertThat(statusDetection.status("foo", createFile("src/Other.java"), "QWERT")).isEqualTo(InputFile.Status.SAME);
} |
private AlarmId(DeviceId id, String uniqueIdentifier) {
super(id.toString() + ":" + uniqueIdentifier);
checkNotNull(id, "device id must not be null");
checkNotNull(uniqueIdentifier, "unique identifier must not be null");
checkArgument(!uniqueIdentifier.isEmpty(), "unique identifier must not be empty");
} | @Test
public void testEquals() {
final AlarmId id1 = AlarmId.alarmId(DEVICE_ID, UNIQUE_ID_1);
final AlarmId sameAsId1 = AlarmId.alarmId(DEVICE_ID, UNIQUE_ID_1);
final AlarmId id2 = AlarmId.alarmId(DEVICE_ID, UNIQUE_ID_2);
new EqualsTester()
.addEqualityGroup(id1, sameAsId1)
.addEqualityGroup(id2)
.testEquals();
} |
public static String resolveRaw(String str) {
int len = str.length();
if (len <= 4) {
return null;
}
int endPos = len - 1;
char last = str.charAt(endPos);
// optimize to not create new objects
if (last == ')') {
char char1 = str.charAt(0);
char char2 = str.charAt(1);
char char3 = str.charAt(2);
char char4 = str.charAt(3);
if (char1 == 'R' && char2 == 'A' && char3 == 'W' && char4 == '(') {
return str.substring(4, endPos);
}
} else if (last == '}') {
char char1 = str.charAt(0);
char char2 = str.charAt(1);
char char3 = str.charAt(2);
char char4 = str.charAt(3);
if (char1 == 'R' && char2 == 'A' && char3 == 'W' && char4 == '{') {
return str.substring(4, endPos);
}
}
// not RAW value
return null;
} | @Test
void testNotRawURIScanner() {
final String resolvedRaw = URIScanner.resolveRaw("foo");
Assertions.assertNull(resolvedRaw);
} |
@NonNull
public String processShownotes() {
String shownotes = rawShownotes;
if (TextUtils.isEmpty(shownotes)) {
Log.d(TAG, "shownotesProvider contained no shownotes. Returning 'no shownotes' message");
shownotes = "<html><head></head><body><p id='apNoShownotes'>" + noShownotesLabel + "</p></body></html>";
}
// replace ASCII line breaks with HTML ones if shownotes don't contain HTML line breaks already
if (!LINE_BREAK_REGEX.matcher(shownotes).find() && !shownotes.contains("<p>")) {
shownotes = shownotes.replace("\n", "<br />");
}
Document document = Jsoup.parse(shownotes);
cleanCss(document);
document.head().appendElement("style").attr("type", "text/css").text(webviewStyle);
addTimecodes(document);
return document.toString();
} | @Test
public void testProcessShownotesAddTimecodeMssNoChapters() {
final String timeStr = "1:12";
final long time = 60 * 1000 + 12 * 1000;
String shownotes = "<p> Some test text with a timecode " + timeStr + " here.</p>";
ShownotesCleaner t = new ShownotesCleaner(context, shownotes, 2 * 60 * 1000);
String res = t.processShownotes();
checkLinkCorrect(res, new long[]{time}, new String[]{timeStr});
} |
@Udf(description = "Converts a string representation of a date in the given format"
+ " into a DATE value.")
public Date parseDate(
@UdfParameter(
description = "The string representation of a date.") final String formattedDate,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.text.SimpleDateFormat.") final String formatPattern) {
if (formattedDate == null || formatPattern == null) {
return null;
}
try {
final long time = formatters.get(formatPattern).parse(formattedDate).getTime();
if (time % MILLIS_IN_DAY != 0) {
throw new KsqlFunctionException("Date format contains time field.");
}
return new Date(time);
} catch (final ExecutionException | RuntimeException | ParseException e) {
throw new KsqlFunctionException("Failed to parse date '" + formattedDate
+ "' with formatter '" + formatPattern
+ "': " + e.getMessage(), e);
}
} | @Test
public void shouldThrowIfParseFails() {
// When:
final Exception e = assertThrows(
KsqlFunctionException.class,
() -> udf.parseDate("invalid", "yyyy-MM-dd")
);
// Then:
assertThat(e.getMessage(), containsString("Failed to parse date 'invalid' with formatter 'yyyy-MM-dd'"));
} |
protected boolean configDevice(DeviceId deviceId) {
// Returns true if config was successful, false if not and a clean up is
// needed.
final Device device = deviceService.getDevice(deviceId);
if (device == null || !device.is(IntProgrammable.class)) {
return true;
}
if (isNotIntConfigured()) {
log.warn("Missing INT config, aborting programming of INT device {}", deviceId);
return true;
}
final boolean isEdge = !hostService.getConnectedHosts(deviceId).isEmpty();
final IntDeviceRole intDeviceRole =
isEdge ? IntDeviceRole.SOURCE_SINK : IntDeviceRole.TRANSIT;
log.info("Started programming of INT device {} with role {}...",
deviceId, intDeviceRole);
final IntProgrammable intProg = device.as(IntProgrammable.class);
if (!isIntStarted()) {
// Leave device with no INT configuration.
return true;
}
if (!intProg.init()) {
log.warn("Unable to init INT pipeline on {}", deviceId);
return false;
}
boolean supportSource = intProg.supportsFunctionality(IntProgrammable.IntFunctionality.SOURCE);
boolean supportSink = intProg.supportsFunctionality(IntProgrammable.IntFunctionality.SINK);
boolean supportPostcard = intProg.supportsFunctionality(IntProgrammable.IntFunctionality.POSTCARD);
if (intDeviceRole != IntDeviceRole.SOURCE_SINK && !supportPostcard) {
// Stop here, no more configuration needed for transit devices unless it support postcard.
return true;
}
if (supportSink || supportPostcard) {
if (!intProg.setupIntConfig(intConfig.get())) {
log.warn("Unable to apply INT report config on {}", deviceId);
return false;
}
}
// Port configuration.
final Set<PortNumber> hostPorts = deviceService.getPorts(deviceId)
.stream()
.map(port -> new ConnectPoint(deviceId, port.number()))
.filter(cp -> !hostService.getConnectedHosts(cp).isEmpty())
.map(ConnectPoint::port)
.collect(Collectors.toSet());
for (PortNumber port : hostPorts) {
if (supportSource) {
log.info("Setting port {}/{} as INT source port...", deviceId, port);
if (!intProg.setSourcePort(port)) {
log.warn("Unable to set INT source port {} on {}", port, deviceId);
return false;
}
}
if (supportSink) {
log.info("Setting port {}/{} as INT sink port...", deviceId, port);
if (!intProg.setSinkPort(port)) {
log.warn("Unable to set INT sink port {} on {}", port, deviceId);
return false;
}
}
}
if (!supportSource && !supportPostcard) {
// Stop here, no more configuration needed for sink devices unless
// it supports postcard mode.
return true;
}
// Apply intents.
// This is a trivial implementation where we simply get the
// corresponding INT objective from an intent and we apply to all
// device which support reporting.
int appliedCount = 0;
for (Versioned<IntIntent> versionedIntent : intentMap.values()) {
IntIntent intent = versionedIntent.value();
IntObjective intObjective = getIntObjective(intent);
if (intent.telemetryMode() == IntIntent.TelemetryMode.INBAND_TELEMETRY && supportSource) {
intProg.addIntObjective(intObjective);
appliedCount++;
} else if (intent.telemetryMode() == IntIntent.TelemetryMode.POSTCARD && supportPostcard) {
intProg.addIntObjective(intObjective);
appliedCount++;
} else {
log.warn("Device {} does not support intent {}.", deviceId, intent);
}
}
log.info("Completed programming of {}, applied {} INT objectives of {} total",
deviceId, appliedCount, intentMap.size());
return true;
} | @Test
public void testConfigPostcardOnlyDevice() {
reset(deviceService, hostService);
Device device = getMockDevice(true, DEVICE_ID);
IntProgrammable intProg = getMockIntProgrammable(false, false, false, true);
setUpDeviceTest(device, intProg, true, true);
IntObjective intObj = IntObjective.builder()
.withSelector(FLOW_SELECTOR1)
.build();
expect(intProg.addIntObjective(eq(intObj)))
.andReturn(true)
.once();
replay(deviceService, hostService, device, intProg);
installTestIntents();
assertTrue(manager.configDevice(DEVICE_ID));
verify(intProg);
} |
public static Request.Builder buildRequestBuilder(final String url, final Map<String, ?> form,
final HTTPMethod method) {
switch (method) {
case GET:
return new Request.Builder()
.url(buildHttpUrl(url, form))
.get();
case HEAD:
return new Request.Builder()
.url(buildHttpUrl(url, form))
.head();
case PUT:
return new Request.Builder()
.url(buildHttpUrl(url))
.put(buildFormBody(form));
case DELETE:
return new Request.Builder()
.url(buildHttpUrl(url))
.delete(buildFormBody(form));
default:
return new Request.Builder()
.url(buildHttpUrl(url))
.post(buildFormBody(form));
}
} | @Test
public void buildRequestBuilderForGETTest() {
Request.Builder builder = HttpUtils.buildRequestBuilder(TEST_URL, formMap, HttpUtils.HTTPMethod.GET);
Assert.assertNotNull(builder);
Assert.assertEquals(builder.build().method(), HttpUtils.HTTPMethod.GET.value());
Assert.assertEquals(builder.build().url().toString(), ACTUAL_PARAM_URL);
} |
@Override
public boolean next() throws SQLException {
if (orderByValuesQueue.isEmpty()) {
return false;
}
if (isFirstNext) {
isFirstNext = false;
return true;
}
OrderByValue firstOrderByValue = orderByValuesQueue.poll();
if (firstOrderByValue.next()) {
orderByValuesQueue.offer(firstOrderByValue);
}
if (orderByValuesQueue.isEmpty()) {
return false;
}
setCurrentQueryResult(orderByValuesQueue.peek().getQueryResult());
return true;
} | @Test
void assertNextForCaseSensitive() throws SQLException {
List<QueryResult> queryResults = Arrays.asList(mock(QueryResult.class), mock(QueryResult.class), mock(QueryResult.class));
for (int i = 0; i < 3; i++) {
QueryResultMetaData metaData = mock(QueryResultMetaData.class);
when(queryResults.get(i).getMetaData()).thenReturn(metaData);
when(metaData.getColumnName(1)).thenReturn("col1");
when(metaData.getColumnName(2)).thenReturn("col2");
}
when(queryResults.get(0).next()).thenReturn(true, false);
when(queryResults.get(0).getValue(1, Object.class)).thenReturn("b");
when(queryResults.get(1).next()).thenReturn(true, true, false);
when(queryResults.get(1).getValue(1, Object.class)).thenReturn("B", "B", "a", "a");
when(queryResults.get(2).next()).thenReturn(true, false);
when(queryResults.get(2).getValue(1, Object.class)).thenReturn("A");
ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "MySQL"));
MergedResult actual = resultMerger.merge(queryResults, selectStatementContext, createDatabase(), mock(ConnectionContext.class));
assertTrue(actual.next());
assertThat(actual.getValue(1, Object.class).toString(), is("A"));
assertTrue(actual.next());
assertThat(actual.getValue(1, Object.class).toString(), is("B"));
assertTrue(actual.next());
assertThat(actual.getValue(1, Object.class).toString(), is("a"));
assertTrue(actual.next());
assertThat(actual.getValue(1, Object.class).toString(), is("b"));
assertFalse(actual.next());
} |
@Override
public void start() throws Exception {
LOG.info("Starting split enumerator for source {}.", operatorName);
// we mark this as started first, so that we can later distinguish the cases where
// 'start()' wasn't called and where 'start()' failed.
started = true;
// there are two ways the SplitEnumerator can get created:
// (1) Source.restoreEnumerator(), in which case the 'resetToCheckpoint()' method creates
// it
// (2) Source.createEnumerator, in which case it has not been created, yet, and we create
// it here
if (enumerator == null) {
final ClassLoader userCodeClassLoader =
context.getCoordinatorContext().getUserCodeClassloader();
try (TemporaryClassLoaderContext ignored =
TemporaryClassLoaderContext.of(userCodeClassLoader)) {
enumerator = source.createEnumerator(context);
} catch (Throwable t) {
ExceptionUtils.rethrowIfFatalErrorOrOOM(t);
LOG.error("Failed to create Source Enumerator for source {}", operatorName, t);
context.failJob(t);
return;
}
}
// The start sequence is the first task in the coordinator executor.
// We rely on the single-threaded coordinator executor to guarantee
// the other methods are invoked after the enumerator has started.
runInEventLoop(() -> enumerator.start(), "starting the SplitEnumerator.");
if (coordinatorListeningID != null) {
coordinatorStore.compute(
coordinatorListeningID,
(key, oldValue) -> {
// The value for a listener ID can be a source coordinator listening to an
// event, or an event waiting to be retrieved
if (oldValue == null || oldValue instanceof OperatorCoordinator) {
// The coordinator has not registered or needs to be recreated after
// global failover.
return this;
} else {
checkState(
oldValue instanceof OperatorEvent,
"The existing value for "
+ coordinatorStore
+ "is expected to be an operator event, but it is in fact "
+ oldValue);
LOG.info(
"Handling event {} received before the source coordinator with ID {} is registered",
oldValue,
coordinatorListeningID);
handleEventFromOperator(0, 0, (OperatorEvent) oldValue);
// Since for non-global failover the coordinator will not be recreated
// and for global failover both the sender and receiver need to restart,
// the coordinator will receive the event only once.
// As the event has been processed, it can be removed safely and there's
// no need to register the coordinator for further events as well.
return null;
}
});
}
if (watermarkAlignmentParams.isEnabled()) {
LOG.info("Starting schedule the period announceCombinedWatermark task");
coordinatorStore.putIfAbsent(
watermarkAlignmentParams.getWatermarkGroup(), new WatermarkAggregator<>());
context.schedulePeriodTask(
this::announceCombinedWatermark,
watermarkAlignmentParams.getUpdateInterval(),
watermarkAlignmentParams.getUpdateInterval(),
TimeUnit.MILLISECONDS);
}
} | @Test
void testStart() throws Exception {
sourceCoordinator.start();
waitForCoordinatorToProcessActions();
assertThat(getEnumerator().isStarted()).isTrue();
} |
@Override
public List<HasMetadata> buildAccompanyingKubernetesResources() throws IOException {
return getTaskManagerPodTemplateFile()
.map(
FunctionUtils.uncheckedFunction(
file -> {
final Map<String, String> data = new HashMap<>();
data.put(
TASK_MANAGER_POD_TEMPLATE_FILE_NAME,
Files.toString(file, StandardCharsets.UTF_8));
final HasMetadata flinkConfConfigMap =
new ConfigMapBuilder()
.withApiVersion(Constants.API_VERSION)
.withNewMetadata()
.withName(podTemplateConfigMapName)
.withLabels(
kubernetesComponentConf
.getCommonLabels())
.endMetadata()
.addToData(data)
.build();
return Collections.singletonList(flinkConfConfigMap);
}))
.orElse(Collections.emptyList());
} | @Test
void testBuildAccompanyingKubernetesResourcesAddsPodTemplateAsConfigMap() throws IOException {
KubernetesTestUtils.createTemporyFile(
POD_TEMPLATE_DATA, flinkConfDir, POD_TEMPLATE_FILE_NAME);
final List<HasMetadata> additionalResources =
podTemplateMountDecorator.buildAccompanyingKubernetesResources();
assertThat(additionalResources).hasSize(1);
final ConfigMap resultConfigMap = (ConfigMap) additionalResources.get(0);
final Map<String, String> resultData = resultConfigMap.getData();
assertThat(resultData.get(TASK_MANAGER_POD_TEMPLATE_FILE_NAME))
.isEqualTo(POD_TEMPLATE_DATA);
} |
public FEELFnResult<Boolean> invoke(@ParameterName( "point" ) Comparable point, @ParameterName( "range" ) Range range) {
if ( point == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be null"));
}
if ( range == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range", "cannot be null"));
}
try {
boolean result = ( range.getHighBoundary() == Range.RangeBoundary.CLOSED && point.compareTo( range.getHighEndPoint() ) == 0 );
return FEELFnResult.ofResult( result );
} catch( Exception e ) {
// points are not comparable
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be compared to range"));
}
} | @Test
void invokeParamsCantBeCompared() {
FunctionTestUtil.assertResultError( finishesFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.CLOSED, 1, 2, Range.RangeBoundary.CLOSED ) ), InvalidParametersEvent.class );
} |
@Override
public void dropFunction(QualifiedObjectName functionName, Optional<List<TypeSignature>> parameterTypes, boolean exists)
{
checkCatalog(functionName);
jdbi.useTransaction(handle -> {
FunctionNamespaceDao transactionDao = handle.attach(functionNamespaceDaoClass);
List<SqlInvokedFunction> functions = getSqlFunctions(transactionDao, functionName, parameterTypes);
checkExists(functions, functionName, parameterTypes);
if (!parameterTypes.isPresent()) {
transactionDao.setDeleted(functionName.getCatalogName(), functionName.getSchemaName(), functionName.getObjectName());
}
else {
SqlInvokedFunction latest = getOnlyElement(functions);
checkState(latest.hasVersion(), "Function version missing: %s", latest.getFunctionId());
transactionDao.setDeletionStatus(hash(latest.getFunctionId()), latest.getFunctionId(), getLongVersion(latest), true);
}
});
refreshFunctionsCache(functionName);
} | @Test(expectedExceptions = PrestoException.class, expectedExceptionsMessageRegExp = "Function not found: unittest\\.memory\\.tangent\\(double\\)")
public void testDropFunctionFailed()
{
dropFunction(TANGENT, Optional.of(ImmutableList.of(parseTypeSignature(DOUBLE))), false);
} |
@Override
public boolean matches(ConditionContext conditionContext, AnnotatedTypeMetadata annotatedTypeMetadata) {
return AuthSystemTypes.LDAP.name().equalsIgnoreCase(EnvUtil.getProperty(Constants.Auth.NACOS_CORE_AUTH_SYSTEM_TYPE));
} | @Test
void matches() {
boolean matches = conditionOnLdapAuth.matches(conditionContext, annotatedTypeMetadata);
assertFalse(matches);
} |
void runOnce() {
if (transactionManager != null) {
try {
transactionManager.maybeResolveSequences();
RuntimeException lastError = transactionManager.lastError();
// do not continue sending if the transaction manager is in a failed state
if (transactionManager.hasFatalError()) {
if (lastError != null)
maybeAbortBatches(lastError);
client.poll(retryBackoffMs, time.milliseconds());
return;
}
if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) {
return;
}
// Check whether we need a new producerId. If so, we will enqueue an InitProducerId
// request which will be sent below
transactionManager.bumpIdempotentEpochAndResetIdIfNeeded();
if (maybeSendAndPollTransactionalRequest()) {
return;
}
} catch (AuthenticationException e) {
// This is already logged as error, but propagated here to perform any clean ups.
log.trace("Authentication exception while processing transactional request", e);
transactionManager.authenticationFailed(e);
}
}
long currentTimeMs = time.milliseconds();
long pollTimeout = sendProducerData(currentTimeMs);
client.poll(pollTimeout, currentTimeMs);
} | @Test
public void testUnknownProducerErrorShouldBeRetriedWhenLogStartOffsetIsUnknown() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = createTransactionManager();
setupWithTransactionState(transactionManager);
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
assertEquals(0, transactionManager.sequenceNumber(tp0));
// Send first ProduceRequest
Future<RecordMetadata> request1 = appendToAccumulator(tp0);
sender.runOnce();
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, transactionManager.sequenceNumber(tp0));
assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0));
sendIdempotentProducerResponse(0, tp0, Errors.NONE, 1000L, 10L);
sender.runOnce(); // receive the response.
assertTrue(request1.isDone());
assertEquals(1000L, request1.get().offset());
assertEquals(OptionalInt.of(0), transactionManager.lastAckedSequence(tp0));
assertEquals(OptionalLong.of(1000L), transactionManager.lastAckedOffset(tp0));
// Send second ProduceRequest
Future<RecordMetadata> request2 = appendToAccumulator(tp0);
sender.runOnce();
assertEquals(2, transactionManager.sequenceNumber(tp0));
assertEquals(OptionalInt.of(0), transactionManager.lastAckedSequence(tp0));
assertFalse(request2.isDone());
sendIdempotentProducerResponse(1, tp0, Errors.UNKNOWN_PRODUCER_ID, -1L, -1L);
sender.runOnce(); // receive response 0, should be retried without resetting the sequence numbers since the log start offset is unknown.
// We should have reset the sequence number state of the partition because the state was lost on the broker.
assertEquals(OptionalInt.of(0), transactionManager.lastAckedSequence(tp0));
assertEquals(2, transactionManager.sequenceNumber(tp0));
assertFalse(request2.isDone());
assertFalse(client.hasInFlightRequests());
sender.runOnce(); // should retry request 1
// resend the request. Note that the expected sequence is 1, since we never got the logStartOffset in the previous
// response and hence we didn't reset the sequence numbers.
sendIdempotentProducerResponse(1, tp0, Errors.NONE, 1011L, 1010L);
sender.runOnce(); // receive response 1
assertEquals(OptionalInt.of(1), transactionManager.lastAckedSequence(tp0));
assertEquals(2, transactionManager.sequenceNumber(tp0));
assertFalse(client.hasInFlightRequests());
assertTrue(request2.isDone());
assertEquals(1011L, request2.get().offset());
assertEquals(OptionalLong.of(1011L), transactionManager.lastAckedOffset(tp0));
} |
@Override
public boolean test(final Path test) {
return this.equals(new CaseInsensitivePathPredicate(test));
} | @Test
public void testCollision() {
final Path t = new Path("/d/2R", EnumSet.of(Path.Type.directory));
assertFalse(new SimplePathPredicate(t).test(new Path("/d/33", EnumSet.of(Path.Type.directory))));
} |
public Canvas canvas() {
Canvas canvas = new Canvas(getLowerBound(), getUpperBound());
canvas.add(this);
if (name != null) {
canvas.setTitle(name);
}
return canvas;
} | @Test
public void testHeatmap() throws Exception {
System.out.println("Heatmap");
var canvas = Heatmap.of(Z, Palette.jet(256)).canvas();
canvas.window();
} |
CompletableFuture<Void> beginExecute(
@Nonnull List<? extends Tasklet> tasklets,
@Nonnull CompletableFuture<Void> cancellationFuture,
@Nonnull ClassLoader jobClassLoader
) {
final ExecutionTracker executionTracker = new ExecutionTracker(tasklets.size(), cancellationFuture);
try {
final Map<Boolean, List<Tasklet>> byCooperation =
tasklets.stream().collect(partitioningBy(
tasklet -> doWithClassLoader(jobClassLoader, tasklet::isCooperative)
));
submitCooperativeTasklets(executionTracker, jobClassLoader, byCooperation.get(true));
submitBlockingTasklets(executionTracker, jobClassLoader, byCooperation.get(false));
} catch (Throwable t) {
executionTracker.future.internalCompleteExceptionally(t);
}
return executionTracker.future;
} | @Test
public void when_tryCompleteExceptionallyOnReturnedFuture_then_fails() {
// Given
final MockTasklet t = new MockTasklet().callsBeforeDone(Integer.MAX_VALUE);
CompletableFuture<Void> f = tes.beginExecute(singletonList(t), cancellationFuture, classLoader);
// When - Then
assertThrows(UnsupportedOperationException.class, () -> f.completeExceptionally(new RuntimeException()));
} |
public static UpdateTableRequest fromJson(String json) {
return JsonUtil.parse(json, UpdateTableRequestParser::fromJson);
} | @Test
public void invalidRequirements() {
assertThatThrownBy(
() ->
UpdateTableRequestParser.fromJson(
"{\"identifier\":{\"namespace\":[\"ns1\"],\"name\":\"table1\"},"
+ "\"requirements\":[23],\"updates\":[]}"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse update requirement from non-object value: 23");
assertThatThrownBy(
() ->
UpdateTableRequestParser.fromJson(
"{\"identifier\":{\"namespace\":[\"ns1\"],\"name\":\"table1\"},"
+ "\"requirements\":[{}],\"updates\":[]}"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse update requirement. Missing field: type");
assertThatThrownBy(
() ->
UpdateTableRequestParser.fromJson(
"{\"identifier\":{\"namespace\":[\"ns1\"],\"name\":\"table1\"},"
+ "\"requirements\":[{\"type\":\"assert-table-uuid\"}],\"updates\":[]}"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse missing string: uuid");
} |
@Override
public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException {
DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook);
defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis()));
try {
boolean result = false;
defaultMQAdminExt.start();
if (commandLine.hasOption('b')) {
String addr = commandLine.getOptionValue('b').trim();
result = defaultMQAdminExt.cleanUnusedTopicByAddr(addr);
} else {
String cluster = commandLine.getOptionValue('c');
if (null != cluster)
cluster = cluster.trim();
result = defaultMQAdminExt.cleanUnusedTopic(cluster);
}
System.out.printf(result ? "success" : "false");
} catch (Exception e) {
throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e);
} finally {
defaultMQAdminExt.shutdown();
}
} | @Test
public void testExecute() throws SubCommandException {
CleanUnusedTopicCommand cmd = new CleanUnusedTopicCommand();
Options options = ServerUtil.buildCommandlineOptions(new Options());
String[] subargs = new String[] {"-b 127.0.0.1:" + listenPort(), "-c default-cluster"};
final CommandLine commandLine =
ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs,
cmd.buildCommandlineOptions(options), new DefaultParser());
cmd.execute(commandLine, options, null);
} |
public String format(Date then)
{
if (then == null)
then = now();
Duration d = approximateDuration(then);
return format(d);
} | @Test
public void testMonthsFromNow() throws Exception
{
PrettyTime t = new PrettyTime(now);
Assert.assertEquals("3 months from now", t.format(now.plusMonths(3)));
} |
public void unschedule(String eventDefinitionId) {
final EventDefinitionDto eventDefinition = getEventDefinitionOrThrowIAE(eventDefinitionId);
if (SystemNotificationEventEntityScope.NAME.equals(eventDefinition.scope())) {
LOG.debug("Ignoring disable for system notification events");
return;
}
getJobDefinition(eventDefinition)
.ifPresent(jobDefinition -> deleteJobDefinitionAndTrigger(jobDefinition, eventDefinition));
eventDefinitionService.updateState(eventDefinitionId, EventDefinition.State.DISABLED);
} | @Test
@MongoDBFixtures("event-processors.json")
public void unschedule() {
assertThat(eventDefinitionService.get("54e3deadbeefdeadbeef0000")).isPresent();
assertThat(jobDefinitionService.get("54e3deadbeefdeadbeef0001")).isPresent();
assertThat(jobTriggerService.get("54e3deadbeefdeadbeef0002")).isPresent();
handler.unschedule("54e3deadbeefdeadbeef0000");
// Unschedule should NOT delete the event definition!
assertThat(eventDefinitionService.get("54e3deadbeefdeadbeef0000")).isPresent();
// Only the job definition and the trigger
assertThat(jobDefinitionService.get("54e3deadbeefdeadbeef0001")).isNotPresent();
assertThat(jobTriggerService.get("54e3deadbeefdeadbeef0002")).isNotPresent();
} |
@Override
public String toString() {
return "EmbeddedRocksDBStateBackend{"
+ ", localRocksDbDirectories="
+ Arrays.toString(localRocksDbDirectories)
+ ", enableIncrementalCheckpointing="
+ enableIncrementalCheckpointing
+ ", numberOfTransferThreads="
+ numberOfTransferThreads
+ ", writeBatchSize="
+ writeBatchSize
+ '}';
} | @TestTemplate
public void testSmallFilesCompaction() throws Exception {
ValueStateDescriptor<String> kvId = new ValueStateDescriptor<>("id", String.class);
SharedStateRegistry sharedStateRegistry = new SharedStateRegistryImpl();
CheckpointStreamFactory streamFactory = createStreamFactory();
final KeyGroupRange range = KeyGroupRange.of(0, 49);
double expectedNumSstFiles = range.getNumberOfKeyGroups() * .5;
final CheckpointableKeyedStateBackend<Integer> backend =
createKeyedBackend(
IntSerializer.INSTANCE,
range.getEndKeyGroup() - range.getStartKeyGroup() + 1,
range,
env);
try {
ValueState<String> state =
backend.getPartitionedState(
VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, kvId);
for (int i = range.getStartKeyGroup(); i < range.getEndKeyGroup(); i++) {
backend.setCurrentKey(i);
state.update(Integer.toString(i));
// snapshot to force flushing memtables to disk and create a new SST file
runSnapshot(
backend.snapshot(
i, // checkpoint id
i, // timestamp
streamFactory,
CheckpointOptions.forCheckpointWithDefaultLocation()),
sharedStateRegistry);
}
// expect files under dpPath: job_123_op_456/db/*.sst
File sstPath = new File(dbPath).listFiles()[0].listFiles()[0];
int length = sstPath.listFiles((dir, name) -> name.endsWith(".sst")).length;
assertThat(length)
.isLessThanOrEqualTo((int) expectedNumSstFiles)
.withFailMessage("actual: " + length + ", expected: " + expectedNumSstFiles);
} finally {
IOUtils.closeQuietly(backend);
backend.dispose();
}
// allow some time for the background compaction to fail hard by calling closed db
Thread.sleep(100);
} |
public Command create(
final ConfiguredStatement<? extends Statement> statement,
final KsqlExecutionContext context) {
return create(statement, context.getServiceContext(), context);
} | @Test
public void shouldCreateCommandForPlannedQuery() {
// Given:
givenPlannedQuery();
// When:
final Command command = commandFactory.create(configuredStatement, executionContext);
// Then:
assertThat(command, is(Command.of(ConfiguredKsqlPlan.of(A_PLAN, SessionConfig.of(config, overrides)))));
} |
public static String generateResourceId(
String baseString,
Pattern illegalChars,
String replaceChar,
int targetLength,
DateTimeFormatter timeFormat) {
// first, make sure the baseString, typically the test ID, is not empty
checkArgument(baseString.length() != 0, "baseString cannot be empty.");
// next, replace all illegal characters from given string with given replacement character
String illegalCharsRemoved =
illegalChars.matcher(baseString.toLowerCase()).replaceAll(replaceChar);
// finally, append the date/time and return the substring that does not exceed the length limit
LocalDateTime localDateTime = LocalDateTime.now(ZoneId.of(TIME_ZONE));
String timeAddOn = localDateTime.format(timeFormat);
return illegalCharsRemoved.subSequence(
0, min(targetLength - timeAddOn.length() - 1, illegalCharsRemoved.length()))
+ replaceChar
+ localDateTime.format(timeFormat);
} | @Test
public void testGenerateResourceIdShouldReplaceDollarSignWithHyphen() {
String testBaseString = "test$instance";
String actual =
generateResourceId(
testBaseString,
ILLEGAL_INSTANCE_CHARS,
REPLACE_INSTANCE_CHAR,
MAX_INSTANCE_ID_LENGTH,
TIME_FORMAT);
assertThat(actual).matches("test-instance-\\d{8}-\\d{6}-\\d{6}");
} |
public static <T> VerboseCondition<T> verboseCondition(Predicate<T> predicate, String description,
Function<T, String> objectUnderTestDescriptor) {
return new VerboseCondition<>(predicate, description, objectUnderTestDescriptor);
} | @Test
public void should_throw_NullPointerException_if_condition_predicate_is_null() {
assertThatNullPointerException().isThrownBy(() -> verboseCondition(null, "description", t -> ""));
} |
@Override
@CheckForNull
public EmailMessage format(Notification notif) {
if (!(notif instanceof ChangesOnMyIssuesNotification)) {
return null;
}
ChangesOnMyIssuesNotification notification = (ChangesOnMyIssuesNotification) notif;
if (notification.getChange() instanceof AnalysisChange) {
checkState(!notification.getChangedIssues().isEmpty(), "changedIssues can't be empty");
return formatAnalysisNotification(notification.getChangedIssues().keySet().iterator().next(), notification);
}
return formatMultiProject(notification);
} | @Test
public void formats_returns_html_message_with_projects_ordered_by_name_when_user_change() {
Project project1 = newProject("1");
Project project1Branch1 = newBranch("1", "a");
Project project1Branch2 = newBranch("1", "b");
Project project2 = newProject("B");
Project project2Branch1 = newBranch("B", "a");
Project project3 = newProject("C");
String host = randomAlphabetic(15);
List<ChangedIssue> changedIssues = Stream.of(project1, project1Branch1, project1Branch2, project2, project2Branch1, project3)
.map(project -> newChangedIssue("issue_" + project.getUuid(), randomValidStatus(), project, newRule(randomAlphabetic(2), randomRuleTypeHotspotExcluded())))
.collect(toList());
Collections.shuffle(changedIssues);
UserChange userChange = newUserChange();
when(emailSettings.getServerBaseURL()).thenReturn(host);
EmailMessage emailMessage = underTest.format(new ChangesOnMyIssuesNotification(userChange, ImmutableSet.copyOf(changedIssues)));
HtmlFragmentAssert.assertThat(emailMessage.getMessage())
.hasParagraph().hasParagraph() // skip header
.hasParagraph(project1.getProjectName())
.hasList()
.hasParagraph(project1Branch1.getProjectName() + ", " + project1Branch1.getBranchName().get())
.hasList()
.hasParagraph(project1Branch2.getProjectName() + ", " + project1Branch2.getBranchName().get())
.hasList()
.hasParagraph(project2.getProjectName())
.hasList()
.hasParagraph(project2Branch1.getProjectName() + ", " + project2Branch1.getBranchName().get())
.hasList()
.hasParagraph(project3.getProjectName())
.hasList()
.hasParagraph().hasParagraph() // skip footer
.noMoreBlock();
} |
public static int calculateDefaultNumSlots(
ResourceProfile totalResourceProfile, ResourceProfile defaultSlotResourceProfile) {
// For ResourceProfile.ANY in test case, return the maximum integer
if (totalResourceProfile.equals(ResourceProfile.ANY)) {
return Integer.MAX_VALUE;
}
Preconditions.checkArgument(!defaultSlotResourceProfile.equals(ResourceProfile.ZERO));
int numSlots = 0;
ResourceProfile remainResource = totalResourceProfile;
while (remainResource.allFieldsNoLessThan(defaultSlotResourceProfile)) {
remainResource = remainResource.subtract(defaultSlotResourceProfile);
numSlots += 1;
}
return numSlots;
} | @Test
void testCalculateDefaultNumSlotsFailZeroDefaultSlotProfile() {
assertThatThrownBy(
() ->
SlotManagerUtils.calculateDefaultNumSlots(
ResourceProfile.fromResources(1.0, 1),
ResourceProfile.ZERO))
.isInstanceOf(IllegalArgumentException.class);
} |
@Override
public ObjectNode encode(Instruction instruction, CodecContext context) {
checkNotNull(instruction, "Instruction cannot be null");
return new EncodeInstructionCodecHelper(instruction, context).encode();
} | @Test
public void modIPv6DstInstructionTest() {
final Ip6Address ip = Ip6Address.valueOf("1111::2222");
final L3ModificationInstruction.ModIPInstruction instruction =
(L3ModificationInstruction.ModIPInstruction)
Instructions.modL3IPv6Dst(ip);
final ObjectNode instructionJson =
instructionCodec.encode(instruction, context);
assertThat(instructionJson, matchesInstruction(instruction));
} |
@Override
public void writeFiltered(List<FilteredMessage> filteredMessages) throws Exception {
final var messages = filteredMessages.stream()
.filter(message -> !message.destinations().get(FILTER_KEY).isEmpty())
.toList();
writes.mark(messages.size());
ignores.mark(filteredMessages.size() - messages.size());
writeMessageEntries(messages);
} | @Test
public void writeFilteredWithMultipleStreams() throws Exception {
final List<Message> messageList = buildMessages(2);
messageList.forEach(message -> message.addStream(testStream));
output.writeFiltered(List.of(
// The first message should not be written to the output because the output's filter key is not included.
DefaultFilteredMessage.forDestinationKeys(messageList.get(0), Set.of("foo")),
DefaultFilteredMessage.forDestinationKeys(messageList.get(1), Set.of("foo", ElasticSearchOutput.FILTER_KEY))
));
verify(messages, times(1)).bulkIndex(argThat(argument -> {
assertThat(argument).size().isEqualTo(2);
assertThat(argument).containsExactlyInAnyOrderElementsOf(List.of(
new MessageWithIndex(wrap(messageList.get(1)), defaultIndexSet),
new MessageWithIndex(wrap(messageList.get(1)), testIndexSet)
));
return true;
}));
verifyNoMoreInteractions(messages);
} |
@Override
public NodeHealth get() {
Health nodeHealth = healthChecker.checkNode();
this.nodeHealthBuilder
.clearCauses()
.setStatus(NodeHealth.Status.valueOf(nodeHealth.getStatus().name()));
nodeHealth.getCauses().forEach(this.nodeHealthBuilder::addCause);
return this.nodeHealthBuilder
.setDetails(nodeDetails)
.build();
} | @Test
public void get_returns_host_from_property_if_set_at_constructor_time() {
String host = randomAlphanumeric(4);
mapSettings.setProperty(CLUSTER_NODE_NAME.getKey(), randomAlphanumeric(3));
mapSettings.setProperty(CLUSTER_NODE_HZ_PORT.getKey(), 1 + random.nextInt(4));
mapSettings.setProperty(CLUSTER_NODE_HOST.getKey(), host);
setStartedAt();
when(healthChecker.checkNode()).thenReturn(Health.builder()
.setStatus(Health.Status.values()[random.nextInt(Health.Status.values().length)])
.build());
NodeHealthProviderImpl underTest = new NodeHealthProviderImpl(mapSettings.asConfig(), healthChecker, server, networkUtils);
NodeHealth nodeHealth = underTest.get();
assertThat(nodeHealth.getDetails().getHost()).isEqualTo(host);
// change values in properties
mapSettings.setProperty(CLUSTER_NODE_HOST.getKey(), randomAlphanumeric(66));
NodeHealth newNodeHealth = underTest.get();
assertThat(newNodeHealth.getDetails().getHost()).isEqualTo(host);
} |
public static StreamingService getServiceByUrl(final String url) throws ExtractionException {
for (final StreamingService service : ServiceList.all()) {
if (service.getLinkTypeByUrl(url) != StreamingService.LinkType.NONE) {
return service;
}
}
throw new ExtractionException("No service can handle the url = \"" + url + "\"");
} | @Test
public void getServiceWithUrl() throws Exception {
assertEquals(getServiceByUrl("https://www.youtube.com/watch?v=_r6CgaFNAGg"), YouTube);
assertEquals(getServiceByUrl("https://www.youtube.com/channel/UCi2bIyFtz-JdI-ou8kaqsqg"), YouTube);
assertEquals(getServiceByUrl("https://www.youtube.com/playlist?list=PLRqwX-V7Uu6ZiZxtDDRCi6uhfTH4FilpH"), YouTube);
assertEquals(getServiceByUrl("https://www.google.it/url?sa=t&rct=j&q=&esrc=s&cd=&cad=rja&uact=8&url=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DHu80uDzh8RY&source=video"), YouTube);
assertEquals(getServiceByUrl("https://soundcloud.com/pegboardnerds"), SoundCloud);
assertEquals(getServiceByUrl("https://www.google.com/url?sa=t&url=https%3A%2F%2Fsoundcloud.com%2Fciaoproduction&rct=j&q=&esrc=s&source=web&cd="), SoundCloud);
} |
public WorkflowInstance getWorkflowInstance(
String workflowId, long workflowInstanceId, String workflowRun, boolean aggregated) {
long runId = Constants.LATEST_ONE;
if (!Constants.LATEST_INSTANCE_RUN.equalsIgnoreCase(workflowRun)) {
runId = Long.parseLong(workflowRun);
}
WorkflowInstance instance = getWorkflowInstanceRun(workflowId, workflowInstanceId, runId);
if (aggregated) {
instance.setAggregatedInfo(AggregatedViewHelper.computeAggregatedView(instance, true));
}
return instance;
} | @Test
public void testGetWorkflowInstance() {
WorkflowInstance instanceRun =
instanceDao.getWorkflowInstance(
wfi.getWorkflowId(), wfi.getWorkflowInstanceId(), Constants.LATEST_INSTANCE_RUN, false);
instanceRun.setModifyTime(null);
assertEquals(wfi, instanceRun);
instanceRun =
instanceDao.getWorkflowInstance(
wfi.getWorkflowId(), wfi.getWorkflowInstanceId(), "1", false);
instanceRun.setModifyTime(null);
assertEquals(wfi, instanceRun);
} |
public static String getOperatingSystemCompleteName() {
return OS_COMPLETE_NAME;
} | @Test
@EnabledOnOs(OS.WINDOWS)
public void shouldGetCompleteNameOnWindows() {
assertThat(SystemInfo.getOperatingSystemCompleteName()).matches("Windows( \\w+)? [0-9.]+( \\(.*\\))?");
} |
public static Map<String, Object> getRowDataMap(Row source) {
Map<String, Object> toReturn = new HashMap<>();
List<Element> elements = source.getContent().stream()
.filter(Element.class::isInstance)
.map(Element.class::cast)
.collect(Collectors.toList());
if (!elements.isEmpty()) {
elements.forEach(el -> populateWithElement(toReturn, el));
} else {
InputCell inputCell = source.getContent().stream()
.filter(InputCell.class::isInstance)
.map(InputCell.class::cast)
.findFirst()
.orElse(null);
OutputCell outputCell = source.getContent().stream()
.filter(OutputCell.class::isInstance)
.map(OutputCell.class::cast)
.findFirst()
.orElse(null);
populateWithCells(toReturn, inputCell, outputCell);
}
return toReturn;
} | @Test
void getRowDataMap() {
Row source = getRandomRowWithCells();
Map<String, Object> retrieved = ModelUtils.getRowDataMap(source);
InputCell inputCell = source.getContent().stream()
.filter(InputCell.class::isInstance)
.map(InputCell.class::cast)
.findFirst()
.get();
OutputCell outputCell = source.getContent().stream()
.filter(OutputCell.class::isInstance)
.map(OutputCell.class::cast)
.findFirst()
.get();
assertThat(retrieved).hasSize(2);
String expected = getPrefixedName(inputCell.getName());
assertThat(retrieved).containsKey(expected);
assertThat(retrieved.get(expected)).isEqualTo(inputCell.getValue());
expected = getPrefixedName(outputCell.getName());
assertThat(retrieved).containsKey(expected);
assertThat(retrieved.get(expected)).isEqualTo(outputCell.getValue());
} |
public static Builder builder() {
return new Builder();
} | @Test
public void testBuilder_fail() {
try {
CachedLayer.builder().build();
Assert.fail("missing required");
} catch (NullPointerException ex) {
MatcherAssert.assertThat(ex.getMessage(), CoreMatchers.containsString("layerDigest"));
}
try {
CachedLayer.builder().setLayerDigest(mockLayerDigest).build();
Assert.fail("missing required");
} catch (NullPointerException ex) {
MatcherAssert.assertThat(ex.getMessage(), CoreMatchers.containsString("layerDiffId"));
}
try {
CachedLayer.builder().setLayerDigest(mockLayerDigest).setLayerDiffId(mockLayerDiffId).build();
Assert.fail("missing required");
} catch (NullPointerException ex) {
MatcherAssert.assertThat(ex.getMessage(), CoreMatchers.containsString("layerBlob"));
}
} |
@Override
public Collection<RedisServer> masters() {
List<Map<String, String>> masters = connection.sync(StringCodec.INSTANCE, RedisCommands.SENTINEL_MASTERS);
return toRedisServersList(masters);
} | @Test
public void testMasters() {
Collection<RedisServer> masters = connection.masters();
assertThat(masters).hasSize(1);
} |
@Nullable
@Override
public Message decode(@Nonnull RawMessage rawMessage) {
final byte[] payload = rawMessage.getPayload();
final Map<String, Object> event;
try {
event = objectMapper.readValue(payload, TypeReferences.MAP_STRING_OBJECT);
} catch (IOException e) {
LOG.error("Couldn't decode raw message {}", rawMessage);
return null;
}
return parseEvent(event);
} | @Test
public void decodeMessagesHandlesGenericBeatWithCloudGCE() throws Exception {
final Message message = codec.decode(messageFromJson("generic-with-cloud-gce.json"));
assertThat(message).isNotNull();
assertThat(message.getMessage()).isEqualTo("null");
assertThat(message.getSource()).isEqualTo("unknown");
assertThat(message.getTimestamp()).isEqualTo(new DateTime(2016, 4, 1, 0, 0, DateTimeZone.UTC));
assertThat(message.getField("facility")).isEqualTo("genericbeat");
assertThat(message.getField("beat_foo")).isEqualTo("bar");
assertThat(message.getField("beat_meta_cloud_provider")).isEqualTo("gce");
assertThat(message.getField("beat_meta_cloud_machine_type")).isEqualTo("projects/1234567890/machineTypes/f1-micro");
assertThat(message.getField("beat_meta_cloud_instance_id")).isEqualTo("1234556778987654321");
assertThat(message.getField("beat_meta_cloud_project_id")).isEqualTo("my-dev");
assertThat(message.getField("beat_meta_cloud_availability_zone")).isEqualTo("projects/1234567890/zones/us-east1-b");
} |
static HttpRequest setOperation(HttpRequest request, com.yahoo.jdisc.http.HttpRequest.Method method) {
return switch (method) {
case GET -> request.setHttpOperation(HttpRequest.HttpOp.GET);
case POST -> request.setHttpOperation(HttpRequest.HttpOp.POST);
case PUT -> request.setHttpOperation(HttpRequest.HttpOp.PUT);
case DELETE -> request.setHttpOperation(HttpRequest.HttpOp.DELETE);
default -> throw new IllegalStateException("Unhandled method " + method);
};
} | @Test
void testInvalidMethod() {
try {
HttpRequest request = new HttpRequest();
JDiscHttpRequestHandler.setOperation(request, com.yahoo.jdisc.http.HttpRequest.Method.CONNECT);
fail("Control should not reach here");
} catch (IllegalStateException e) {
assertEquals("Unhandled method CONNECT", e.getMessage());
}
} |
@Deprecated
@SuppressWarnings("InlineMeSuggester")
public static final <T extends Activity> T setupActivity(Class<T> activityClass) {
return buildActivity(activityClass).setup().get();
} | @Test
@Config(sdk = Config.NEWEST_SDK)
public void setupActivity_returnsAVisibleActivity() throws Exception {
LifeCycleActivity activity = Robolectric.setupActivity(LifeCycleActivity.class);
assertThat(activity.isCreated()).isTrue();
assertThat(activity.isStarted()).isTrue();
assertThat(activity.isResumed()).isTrue();
assertThat(activity.isVisible()).isTrue();
} |
public void setMaxApps(int max) {
maxApps.set(max);
} | @Test
public void testSetMaxApps() {
FSQueueMetrics metrics = setupMetrics(RESOURCE_NAME);
metrics.setMaxApps(25);
assertEquals(getErrorMessage("maxApps"), 25L, metrics.getMaxApps());
} |
public static PMML4Result evaluate(final KiePMMLModel model, final PMMLRuntimeContext context) {
if (logger.isDebugEnabled()) {
logger.debug("evaluate {} {}", model, context);
}
addStep(() -> getStep(START, model, context.getRequestData()), context);
final ProcessingDTO processingDTO = preProcess(model, context);
addStep(() -> getStep(PRE_EVALUATION, model, context.getRequestData()), context);
PMMLModelEvaluator executor = getFromPMMLModelType(model.getPmmlMODEL())
.orElseThrow(() -> new KiePMMLException(String.format("PMMLModelEvaluator not found for model %s",
model.getPmmlMODEL())));
PMML4Result toReturn = executor.evaluate(model, context);
addStep(() -> getStep(POST_EVALUATION, model, context.getRequestData()), context);
postProcess(toReturn, model, context, processingDTO);
addStep(() -> getStep(END, model, context.getRequestData()), context);
return toReturn;
} | @Test
public void evaluateWithPMMLContextListeners() {
modelLocalUriId = getModelLocalUriIdFromPmmlIdFactory(FILE_NAME, MODEL_NAME);
final List<PMMLStep> pmmlSteps = new ArrayList<>();
PMMLRuntimeContext pmmlContext = getPMMLContext(FILE_NAME, MODEL_NAME,
Collections.singleton(getPMMLListener(pmmlSteps)),
memoryCompilerClassLoader);
KiePMMLModelFactory kiePmmlModelFactory = PMMLLoaderUtils.loadKiePMMLModelFactory(modelLocalUriId, pmmlContext);
KiePMMLModel kiePMMLModel = kiePmmlModelFactory.getKiePMMLModels().get(0);
PMMLRuntimeHelper.evaluate(kiePMMLModel, pmmlContext);
Arrays.stream(PMML_STEP.values()).forEach(pmml_step -> {
Optional<PMMLStep> retrieved =
pmmlSteps.stream().filter(pmmlStep -> pmml_step.equals(((PMMLRuntimeStep) pmmlStep).getPmmlStep
()))
.findFirst();
assertThat(retrieved).isPresent();
commonValuateStep(retrieved.get(), pmml_step, kiePMMLModel, pmmlContext.getRequestData());
});
} |
static String normalizeMemory(String memory) {
return formatMemory(parseMemory(memory));
} | @Test
public void testNormalizeMemory() {
assertThat(normalizeMemory("1K"), is("1000"));
assertThat(normalizeMemory("1Ki"), is("1024"));
assertThat(normalizeMemory("1M"), is("1000000"));
assertThat(normalizeMemory("1Mi"), is("1048576"));
assertThat(normalizeMemory("12345"), is("12345"));
assertThat(normalizeMemory("500Mi"), is("524288000"));
assertThat(normalizeMemory("1.1Gi"), is("1181116006"));
assertThat(normalizeMemory("1.2Gi"), is("1288490188"));
} |
public MemoryManager(float ratio, long minAllocation) {
checkRatio(ratio);
memoryPoolRatio = ratio;
minMemoryAllocation = minAllocation;
totalMemoryPool = Math.round((double)
ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax()
* ratio);
LOG.debug("Allocated total memory pool is: {}", totalMemoryPool);
} | @Test
public void testMemoryManager() throws Exception {
long poolSize = ParquetOutputFormat.getMemoryManager().getTotalMemoryPool();
long rowGroupSize = poolSize / 2;
conf.setLong(ParquetOutputFormat.BLOCK_SIZE, rowGroupSize);
Assert.assertTrue("Pool should hold 2 full row groups", (2 * rowGroupSize) <= poolSize);
Assert.assertTrue("Pool should not hold 3 full row groups", poolSize < (3 * rowGroupSize));
Assert.assertEquals("Allocations should start out at 0", 0, getTotalAllocation());
RecordWriter writer1 = createWriter(1);
Assert.assertTrue("Allocations should never exceed pool size", getTotalAllocation() <= poolSize);
Assert.assertEquals("First writer should be limited by row group size", rowGroupSize, getTotalAllocation());
RecordWriter writer2 = createWriter(2);
Assert.assertTrue("Allocations should never exceed pool size", getTotalAllocation() <= poolSize);
Assert.assertEquals(
"Second writer should be limited by row group size", 2 * rowGroupSize, getTotalAllocation());
RecordWriter writer3 = createWriter(3);
Assert.assertTrue("Allocations should never exceed pool size", getTotalAllocation() <= poolSize);
writer1.close(null);
Assert.assertTrue("Allocations should never exceed pool size", getTotalAllocation() <= poolSize);
Assert.assertEquals(
"Allocations should be increased to the row group size", 2 * rowGroupSize, getTotalAllocation());
writer2.close(null);
Assert.assertTrue("Allocations should never exceed pool size", getTotalAllocation() <= poolSize);
Assert.assertEquals(
"Allocations should be increased to the row group size", rowGroupSize, getTotalAllocation());
writer3.close(null);
Assert.assertEquals("Allocations should be increased to the row group size", 0, getTotalAllocation());
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.