focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@PostMapping("")
@RequiresPermissions("system:manager:add")
public ShenyuAdminResult createDashboardUser(@Valid @RequestBody final DashboardUserDTO dashboardUserDTO) {
return Optional.ofNullable(dashboardUserDTO)
.map(item -> {
item.setPassword(DigestUtils.sha512Hex(item.getPassword()));
Integer createCount = dashboardUserService.createOrUpdate(item);
return ShenyuAdminResult.success(ShenyuResultMessage.CREATE_SUCCESS, createCount);
})
.orElseGet(() -> ShenyuAdminResult.error(ShenyuResultMessage.DASHBOARD_CREATE_USER_ERROR));
} | @Test
public void createDashboardUser() throws Exception {
final String url = "/dashboardUser";
given(dashboardUserService.createOrUpdate(any())).willReturn(1);
mockMvc.perform(post(url, dashboardUserDTO)
.content(GsonUtils.getInstance().toJson(dashboardUserDTO))
.contentType(MediaType.APPLICATION_JSON))
.andExpect(status().isOk())
.andDo(print())
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.CREATE_SUCCESS)))
.andExpect(jsonPath("$.data", is(1)));
} |
public static String sha1Hex(byte[] input) {
return hashBytesToHex(Hashing.sha1(), input);
} | @Test
void sha1Hex() {
Assertions.assertEquals("aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d", Digests.sha1Hex("hello"));
} |
@Override
public Collection<LocalDataQueryResultRow> getRows(final ShowRulesUsedStorageUnitStatement sqlStatement, final ContextManager contextManager) {
String resourceName = sqlStatement.getStorageUnitName().orElse(null);
return database.getResourceMetaData().getStorageUnits().containsKey(resourceName) ? getRows(sqlStatement) : Collections.emptyList();
} | @Test
void assertGetRowData() {
executor.setDatabase(mockDatabase());
ShowRulesUsedStorageUnitStatement sqlStatement = new ShowRulesUsedStorageUnitStatement("foo_ds", mock(DatabaseSegment.class));
Collection<LocalDataQueryResultRow> rowData = executor.getRows(sqlStatement, mock(ContextManager.class));
assertThat(rowData.size(), is(1));
Iterator<LocalDataQueryResultRow> actual = rowData.iterator();
LocalDataQueryResultRow row = actual.next();
assertThat(row.getCell(1), is("dist_s_q_l_handler_fixture"));
assertThat(row.getCell(2), is("foo_tbl"));
} |
public static boolean isBearerToken(final String authorizationHeader) {
return StringUtils.hasText(authorizationHeader) &&
authorizationHeader.startsWith(TOKEN_PREFIX);
} | @Test
void testIsBearerToken_WithInvalidBearerToken() {
// Given
String authorizationHeader = "sampleAccessToken";
// When
boolean result = Token.isBearerToken(authorizationHeader);
// Then
assertFalse(result);
} |
protected Credentials parse(final InputStream in) throws IOException {
final JsonReader reader = new JsonReader(new InputStreamReader(in, StandardCharsets.UTF_8));
reader.beginObject();
String key = null;
String secret = null;
String token = null;
Date expiration = null;
while(reader.hasNext()) {
final String name = reader.nextName();
final String value = reader.nextString();
switch(name) {
case "AccessKeyId":
key = value;
break;
case "SecretAccessKey":
secret = value;
break;
case "Token":
token = value;
break;
case "Expiration":
try {
expiration = new ISO8601DateFormatter().parse(value);
}
catch(InvalidDateException e) {
log.warn(String.format("Failure %s parsing %s", e, value));
}
break;
}
}
reader.endObject();
return new Credentials().withTokens(new TemporaryAccessTokens(key, secret, token, expiration != null ? expiration.getTime() : -1L));
} | @Test
public void testParse() throws Exception {
final Credentials c = new AWSSessionCredentialsRetriever(new DisabledX509TrustManager(), new DefaultX509KeyManager(),
"http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access")
.parse(IOUtils.toInputStream("{\n" +
" \"Code\" : \"Success\",\n" +
" \"LastUpdated\" : \"2012-04-26T16:39:16Z\",\n" +
" \"Type\" : \"AWS-HMAC\",\n" +
" \"AccessKeyId\" : \"AKIAIOSFODNN7EXAMPLE\",\n" +
" \"SecretAccessKey\" : \"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\",\n" +
" \"Token\" : \"token\",\n" +
" \"Expiration\" : \"2012-04-27T22:39:16Z\"\n" +
"}", Charset.defaultCharset()));
assertEquals("AKIAIOSFODNN7EXAMPLE", c.getTokens().getAccessKeyId());
assertEquals("wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", c.getTokens().getSecretAccessKey());
assertEquals("token", c.getTokens().getSessionToken());
assertEquals(1335566356000L, c.getTokens().getExpiryInMilliseconds(), 0L);
} |
@Override
public void collect(OUT outputRecord) {
output.collect(reuse.replace(outputRecord));
} | @Test
void testCollect() {
List<StreamElement> list = new ArrayList<>();
CollectorOutput<Integer> collectorOutput = new CollectorOutput<>(list);
OutputCollector<Integer> collector = new OutputCollector<>(collectorOutput);
collector.collect(1);
collector.collect(2);
assertThat(list).containsExactly(new StreamRecord<>(1), new StreamRecord<>(2));
} |
public static boolean isKafkaInvokeBySermant(StackTraceElement[] stackTrace) {
return isInvokeBySermant(KAFKA_CONSUMER_CLASS_NAME, KAFKA_CONSUMER_CONTROLLER_CLASS_NAME, stackTrace);
} | @Test
public void testNotInvokeBySermantWithoutInvoker() {
StackTraceElement[] stackTrace = new StackTraceElement[3];
stackTrace[0] = new StackTraceElement("testClass0", "testMethod0", "testFileName0", 0);
stackTrace[1] = new StackTraceElement("testClass1", "testMethod1", "testFileName1", 1);
stackTrace[2] = new StackTraceElement("org.apache.kafka.clients.consumer.KafkaConsumer", "subscribe",
"testFileName2", 2);
Assert.assertFalse(InvokeUtils.isKafkaInvokeBySermant(stackTrace));
} |
@Override
public boolean supportsUnion() {
return false;
} | @Test
void assertSupportsUnion() {
assertFalse(metaData.supportsUnion());
} |
public static List<FieldInfo> buildSourceSchemaEntity(final LogicalSchema schema) {
final List<FieldInfo> allFields = schema.columns().stream()
.map(EntityUtil::toFieldInfo)
.collect(Collectors.toList());
if (allFields.isEmpty()) {
throw new IllegalArgumentException("Root schema should contain columns: " + schema);
}
return allFields;
} | @Test
public void shouldSupportSchemasWithAllHeadersColumn() {
// Given:
final LogicalSchema schema = LogicalSchema.builder()
.headerColumn(ColumnName.of("field1"), Optional.empty())
.build();
// When:
final List<FieldInfo> fields = EntityUtil.buildSourceSchemaEntity(schema);
// Then:
assertThat(fields, hasSize(1));
assertThat(fields.get(0).getName(), equalTo("field1"));
assertThat(fields.get(0).getSchema().getTypeName(), equalTo("ARRAY"));
assertThat(fields.get(0).getType(), equalTo(Optional.of(FieldType.HEADER)));
assertThat(fields.get(0).getHeaderKey(), equalTo(Optional.empty()));
} |
public ArrayListTotal<Template> find(
Pageable pageable,
@Nullable String query,
@Nullable String tenantId,
@Nullable String namespace
) {
return this.jdbcRepository
.getDslContextWrapper()
.transactionResult(configuration -> {
DSLContext context = DSL.using(configuration);
SelectConditionStep<Record1<Object>> select = context
.select(
field("value")
)
.hint(context.configuration().dialect().supports(SQLDialect.MYSQL) ? "SQL_CALC_FOUND_ROWS" : null)
.from(this.jdbcRepository.getTable())
.where(this.defaultFilter(tenantId));
if (query != null) {
select.and(this.findCondition(query));
}
if (namespace != null) {
select.and(DSL.or(field("namespace").eq(namespace), field("namespace").likeIgnoreCase(namespace + ".%")));
}
return this.jdbcRepository.fetchPage(context, select, pageable);
});
} | @Test
void find() {
templateRepository.create(builder("io.kestra.unitest").build());
templateRepository.create(builder("com.kestra.test").build());
List<Template> save = templateRepository.find(Pageable.from(1, 10, Sort.UNSORTED), null, null, null);
assertThat(save.size(), is(2));
save = templateRepository.find(Pageable.from(1, 10, Sort.UNSORTED), "kestra", null, "com");
assertThat(save.size(), is(1));
save = templateRepository.find(Pageable.from(1, 10, Sort.of(Sort.Order.asc("id"))), "kestra unit", null, null);
assertThat(save.size(), is(1));
} |
public String resolve(String ensName) {
if (Strings.isBlank(ensName) || (ensName.trim().length() == 1 && ensName.contains("."))) {
return null;
}
try {
if (isValidEnsName(ensName, addressLength)) {
OffchainResolverContract resolver = obtainOffchainResolver(ensName);
boolean supportWildcard =
resolver.supportsInterface(EnsUtils.ENSIP_10_INTERFACE_ID).send();
byte[] nameHash = NameHash.nameHashAsBytes(ensName);
String resolvedName;
if (supportWildcard) {
String dnsEncoded = NameHash.dnsEncode(ensName);
String addrFunction = resolver.addr(nameHash).encodeFunctionCall();
String lookupDataHex =
resolver.resolve(
Numeric.hexStringToByteArray(dnsEncoded),
Numeric.hexStringToByteArray(addrFunction))
.send();
resolvedName = resolveOffchain(lookupDataHex, resolver, LOOKUP_LIMIT);
} else {
try {
resolvedName = resolver.addr(nameHash).send();
} catch (Exception e) {
throw new RuntimeException("Unable to execute Ethereum request: ", e);
}
}
if (!WalletUtils.isValidAddress(resolvedName)) {
throw new EnsResolutionException(
"Unable to resolve address for name: " + ensName);
} else {
return resolvedName;
}
} else {
return ensName;
}
} catch (Exception e) {
throw new EnsResolutionException(e);
}
} | @Test
public void testResolveWildCardSuccess() throws Exception {
String resolvedAddress = "0x41563129cdbbd0c5d3e1c86cf9563926b243834d";
EnsResolverForTest ensResolverForTest = new EnsResolverForTest(web3j);
OffchainResolverContract resolverMock = mock(OffchainResolverContract.class);
ensResolverForTest.setResolverMock(resolverMock);
RemoteFunctionCall suppIntResp = mock(RemoteFunctionCall.class);
when(resolverMock.supportsInterface(any())).thenReturn(suppIntResp);
when(suppIntResp.send()).thenReturn(true);
RemoteFunctionCall addrResp = mock(RemoteFunctionCall.class);
when(resolverMock.addr(any())).thenReturn(addrResp);
when(addrResp.encodeFunctionCall()).thenReturn("0x12345");
RemoteFunctionCall resolveResp = mock(RemoteFunctionCall.class);
when(resolverMock.resolve(any(), any())).thenReturn(resolveResp);
when(resolveResp.send()).thenReturn(resolvedAddress);
String result = ensResolverForTest.resolve("1.offchainexample.eth");
assertNotNull(result);
assertEquals(resolvedAddress, result);
} |
public Optional<Measure> toMeasure(@Nullable LiveMeasureDto measureDto, Metric metric) {
requireNonNull(metric);
if (measureDto == null) {
return Optional.empty();
}
Double value = measureDto.getValue();
String data = measureDto.getDataAsString();
switch (metric.getType().getValueType()) {
case INT:
return toIntegerMeasure(value, data);
case LONG:
return toLongMeasure(value, data);
case DOUBLE:
return toDoubleMeasure(value, data);
case BOOLEAN:
return toBooleanMeasure(value, data);
case STRING:
return toStringMeasure(data);
case LEVEL:
return toLevelMeasure(data);
case NO_VALUE:
return toNoValueMeasure();
default:
throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType());
}
} | @Test
public void toMeasure_returns_no_value_if_dto_has_no_value_for_Boolean_metric() {
Optional<Measure> measure = underTest.toMeasure(EMPTY_MEASURE_DTO, SOME_BOOLEAN_METRIC);
assertThat(measure).isPresent();
assertThat(measure.get().getValueType()).isEqualTo(Measure.ValueType.NO_VALUE);
} |
public static NotControllerException newPreMigrationException(OptionalInt controllerId) {
if (controllerId.isPresent()) {
return new NotControllerException("The controller is in pre-migration mode.");
} else {
return new NotControllerException("No controller appears to be active.");
}
} | @Test
public void testNewPreMigrationExceptionWithActiveController() {
assertExceptionsMatch(new NotControllerException("The controller is in pre-migration mode."),
newPreMigrationException(OptionalInt.of(1)));
} |
@Override
public String convert(final SingleRuleConfiguration ruleConfig) {
if (ruleConfig.getTables().isEmpty() && !ruleConfig.getDefaultDataSource().isPresent()) {
return "";
}
StringBuilder result = new StringBuilder();
if (!ruleConfig.getTables().isEmpty()) {
result.append(convertLoadTable(ruleConfig));
}
if (ruleConfig.getDefaultDataSource().isPresent()) {
if (!ruleConfig.getTables().isEmpty()) {
result.append(System.lineSeparator()).append(System.lineSeparator());
}
result.append(convertSetDefaultSingleTableStorageUnit(ruleConfig.getDefaultDataSource().get()));
}
return result.toString();
} | @Test
void assertConvert() {
SingleRuleConfiguration singleRuleConfig = new SingleRuleConfiguration(new LinkedList<>(Arrays.asList("t_0", "t_1")), "foo_ds");
SingleRuleConfigurationToDistSQLConverter singleRuleConfigurationToDistSQLConverter = new SingleRuleConfigurationToDistSQLConverter();
assertThat(singleRuleConfigurationToDistSQLConverter.convert(singleRuleConfig),
is("LOAD SINGLE TABLE t_0,t_1;" + System.lineSeparator() + System.lineSeparator() + "SET DEFAULT SINGLE TABLE STORAGE UNIT = foo_ds;"));
} |
public static <V> TimestampedValue<V> of(V value, Instant timestamp) {
return new TimestampedValue<>(value, timestamp);
} | @Test
public void testNullTimestamp() {
thrown.expect(NullPointerException.class);
thrown.expectMessage("timestamp");
TimestampedValue.of("foobar", null);
} |
public static CoderProvider getCoderProvider() {
return new ProtoCoderProvider();
} | @Test
public void testProviderCannotProvideCoder() throws Exception {
thrown.expect(CannotProvideCoderException.class);
thrown.expectMessage("java.lang.Integer is not a subclass of com.google.protobuf.Message");
ProtoCoder.getCoderProvider()
.coderFor(new TypeDescriptor<Integer>() {}, Collections.emptyList());
} |
public String siteUrlFor(String givenUrl) throws URISyntaxException {
return siteUrlFor(givenUrl, false);
} | @Test
public void shouldGenerateSiteUrlForGivenPath() throws URISyntaxException {
ServerSiteUrlConfig url = new SiteUrl("http://someurl.com");
assertThat(url.siteUrlFor("/foo/bar"), is("/foo/bar"));
assertThat(url.siteUrlFor("http/bar"), is("http/bar"));
} |
@Override
public int partition() {
if (recordContext == null) {
// This is only exposed via the deprecated ProcessorContext,
// in which case, we're preserving the pre-existing behavior
// of returning dummy values when the record context is undefined.
// For partition, the dummy value is `-1`.
return -1;
} else {
return recordContext.partition();
}
} | @Test
public void shouldReturnPartitionFromRecordContext() {
assertThat(context.partition(), equalTo(recordContext.partition()));
} |
@DELETE
@Path("{id}")
@Timed
@ApiOperation(value = "Delete index set")
@AuditEvent(type = AuditEventTypes.INDEX_SET_DELETE)
@ApiResponses(value = {
@ApiResponse(code = 403, message = "Unauthorized"),
@ApiResponse(code = 404, message = "Index set not found"),
})
public void delete(@ApiParam(name = "id", required = true)
@PathParam("id") String id,
@ApiParam(name = "delete_indices")
@QueryParam("delete_indices") @DefaultValue("true") boolean deleteIndices) {
checkPermission(RestPermissions.INDEXSETS_DELETE, id);
final IndexSet indexSet = getIndexSet(indexSetRegistry, id);
final IndexSet defaultIndexSet = indexSetRegistry.getDefault();
if (indexSet.equals(defaultIndexSet)) {
throw new BadRequestException("Default index set <" + indexSet.getConfig().id() + "> cannot be deleted!");
}
if (indexSetService.delete(id) == 0) {
throw new NotFoundException("Couldn't delete index set with ID <" + id + ">");
} else {
if (deleteIndices) {
try {
systemJobManager.submit(indexSetCleanupJobFactory.create(indexSet));
} catch (SystemJobConcurrencyException e) {
LOG.error("Error running system job", e);
}
}
}
} | @Test
public void delete() throws Exception {
final IndexSet indexSet = mock(IndexSet.class);
final IndexSetConfig indexSetConfig = mock(IndexSetConfig.class);
when(indexSet.getConfig()).thenReturn(indexSetConfig);
when(indexSetRegistry.get("id")).thenReturn(Optional.of(indexSet));
when(indexSetCleanupJobFactory.create(indexSet)).thenReturn(mock(IndexSetCleanupJob.class));
when(indexSetRegistry.getDefault()).thenReturn(null);
when(indexSetService.delete("id")).thenReturn(1);
indexSetsResource.delete("id", false);
indexSetsResource.delete("id", true);
verify(indexSetRegistry, times(2)).getDefault();
verify(indexSetService, times(2)).delete("id");
verify(systemJobManager, times(1)).submit(any(IndexSetCleanupJob.class));
verifyNoMoreInteractions(indexSetService);
} |
public RunResponse start(
@NotNull String workflowId, @NotNull String version, @NotNull RunRequest runRequest) {
WorkflowDefinition definition = workflowDao.getWorkflowDefinition(workflowId, version);
validateRequest(version, definition, runRequest);
RunProperties runProperties =
RunProperties.from(
Checks.notNull(
definition.getPropertiesSnapshot(),
"property snapshot cannot be null for workflow: " + workflowId));
// create and initiate a new instance with overrides and param evaluation
WorkflowInstance instance =
workflowHelper.createWorkflowInstance(
definition.getWorkflow(),
definition.getInternalId(),
definition.getMetadata().getWorkflowVersionId(),
runProperties,
runRequest);
RunStrategy runStrategy = definition.getRunStrategyOrDefault();
int ret = runStrategyDao.startWithRunStrategy(instance, runStrategy);
RunResponse response = RunResponse.from(instance, ret);
LOG.info("Created a workflow instance with response {}", response);
return response;
} | @Test
public void testStartWithInvalidStepRunParams() {
RunRequest request =
RunRequest.builder()
.initiator(new ManualInitiator())
.currentPolicy(RunPolicy.START_FRESH_NEW_RUN)
.requestId(UUID.fromString("41f0281e-41a2-468d-b830-56141b2f768b"))
.stepRunParams(Collections.singletonMap("job1", Collections.emptyMap()))
.build();
AssertHelper.assertThrows(
"caller cannot be null to activate workflow",
IllegalArgumentException.class,
"non-existing step id detected in step param overrides: inputs [job1] vs dag",
() -> actionHandler.start("sample-minimal-wf", "active", request));
verify(workflowDao, times(1)).getWorkflowDefinition("sample-minimal-wf", "active");
verify(dagTranslator, times(1)).translate(any());
} |
protected String convertHeaderValueToString(Exchange exchange, Object headerValue) {
if ((headerValue instanceof Date || headerValue instanceof Locale)
&& convertDateAndLocaleLocally(exchange)) {
if (headerValue instanceof Date) {
return toHttpDate((Date) headerValue);
} else {
return toHttpLanguage((Locale) headerValue);
}
} else {
return exchange.getContext().getTypeConverter().convertTo(String.class, headerValue);
}
} | @Test
public void testConvertLocale() {
DefaultHttpBinding binding = new DefaultHttpBinding();
Locale l = Locale.SIMPLIFIED_CHINESE;
Exchange exchange = super.createExchangeWithBody(null);
String value = binding.convertHeaderValueToString(exchange, l);
assertNotEquals(value, l.toString());
assertEquals("zh-CN", value);
} |
boolean isInsideOpenClosed(Number toEvaluate) {
if (leftMargin == null) {
return toEvaluate.doubleValue() <= rightMargin.doubleValue();
} else if (rightMargin == null) {
return toEvaluate.doubleValue() > leftMargin.doubleValue();
} else {
return toEvaluate.doubleValue() > leftMargin.doubleValue() && toEvaluate.doubleValue() <= rightMargin.doubleValue();
}
} | @Test
void isInsideOpenClosed() {
KiePMMLInterval kiePMMLInterval = new KiePMMLInterval(null, 20, CLOSURE.OPEN_CLOSED);
assertThat(kiePMMLInterval.isInsideOpenClosed(10)).isTrue();
assertThat(kiePMMLInterval.isInsideOpenClosed(20)).isTrue();
assertThat(kiePMMLInterval.isInsideOpenClosed(30)).isFalse();
kiePMMLInterval = new KiePMMLInterval(20, null, CLOSURE.OPEN_CLOSED);
assertThat(kiePMMLInterval.isInsideOpenClosed(30)).isTrue();
assertThat(kiePMMLInterval.isInsideOpenClosed(20)).isFalse();
assertThat(kiePMMLInterval.isInsideOpenClosed(10)).isFalse();
kiePMMLInterval = new KiePMMLInterval(20, 40, CLOSURE.OPEN_CLOSED);
assertThat(kiePMMLInterval.isInsideOpenClosed(30)).isTrue();
assertThat(kiePMMLInterval.isInsideOpenClosed(10)).isFalse();
assertThat(kiePMMLInterval.isInsideOpenClosed(20)).isFalse();
assertThat(kiePMMLInterval.isInsideOpenClosed(40)).isTrue();
assertThat(kiePMMLInterval.isInsideOpenClosed(50)).isFalse();
} |
public String encode(String name, String value) {
return encode(new DefaultCookie(name, value));
} | @Test
public void illegalCharInWrappedValueAppearsInException() {
try {
ServerCookieEncoder.STRICT.encode(new DefaultCookie("name", "\"value,\""));
} catch (IllegalArgumentException e) {
assertThat(e.getMessage().toLowerCase(), containsString("cookie value contains an invalid char: ,"));
}
} |
public DataVersion queryBrokerTopicConfig(final String clusterName, final String brokerAddr) {
BrokerAddrInfo addrInfo = new BrokerAddrInfo(clusterName, brokerAddr);
BrokerLiveInfo prev = this.brokerLiveTable.get(addrInfo);
if (prev != null) {
return prev.getDataVersion();
}
return null;
} | @Test
public void testQueryBrokerTopicConfig() {
{
DataVersion targetVersion = new DataVersion();
targetVersion.setCounter(new AtomicLong(10L));
targetVersion.setTimestamp(100L);
DataVersion dataVersion = routeInfoManager.queryBrokerTopicConfig("default-cluster", "127.0.0.1:10911");
assertThat(dataVersion.equals(targetVersion)).isTrue();
}
{
// register broker default-cluster-1 with the same addr, then test
DataVersion targetVersion = new DataVersion();
targetVersion.setCounter(new AtomicLong(20L));
targetVersion.setTimestamp(200L);
ConcurrentHashMap<String, TopicConfig> topicConfigConcurrentHashMap = new ConcurrentHashMap<>();
topicConfigConcurrentHashMap.put("unit-test-0", new TopicConfig("unit-test-0"));
topicConfigConcurrentHashMap.put("unit-test-1", new TopicConfig("unit-test-1"));
TopicConfigSerializeWrapper topicConfigSerializeWrapper = new TopicConfigSerializeWrapper();
topicConfigSerializeWrapper.setDataVersion(targetVersion);
topicConfigSerializeWrapper.setTopicConfigTable(topicConfigConcurrentHashMap);
Channel channel = mock(Channel.class);
RegisterBrokerResult registerBrokerResult = routeInfoManager.registerBroker("default-cluster-1", "127.0.0.1:10911", "default-broker-1", 1234, "127.0.0.1:1001", "",
null, topicConfigSerializeWrapper, new ArrayList<>(), channel);
assertThat(registerBrokerResult).isNotNull();
DataVersion dataVersion0 = routeInfoManager.queryBrokerTopicConfig("default-cluster", "127.0.0.1:10911");
assertThat(targetVersion.equals(dataVersion0)).isFalse();
DataVersion dataVersion1 = routeInfoManager.queryBrokerTopicConfig("default-cluster-1", "127.0.0.1:10911");
assertThat(targetVersion.equals(dataVersion1)).isTrue();
}
// unregister broker default-cluster-1, then test
{
routeInfoManager.unregisterBroker("default-cluster-1", "127.0.0.1:10911", "default-broker-1", 1234);
assertThat(null != routeInfoManager.queryBrokerTopicConfig("default-cluster", "127.0.0.1:10911")).isTrue();
assertThat(null == routeInfoManager.queryBrokerTopicConfig("default-cluster-1", "127.0.0.1:10911")).isTrue();
}
} |
public static DefaultProcessCommands secondary(File directory, int processNumber) {
return new DefaultProcessCommands(directory, processNumber, false);
} | @Test
public void secondary_fails_if_processNumber_is_less_than_0() throws Exception {
int processNumber = -2;
expectProcessNumberNoValidIAE(() -> DefaultProcessCommands.secondary(temp.newFolder(), processNumber), processNumber);
} |
public void parse(InputStream stream, ContentHandler handler, Metadata metadata,
ParseContext context) throws IOException, SAXException, TikaException {
PDFParserConfig localConfig = defaultConfig;
PDFParserConfig userConfig = context.get(PDFParserConfig.class);
if (userConfig != null) {
localConfig = defaultConfig.cloneAndUpdate(userConfig);
}
if (localConfig.isSetKCMS()) {
System.setProperty("sun.java2d.cmm", "sun.java2d.cmm.kcms.KcmsServiceProvider");
}
IncrementalUpdateRecord incomingIncrementalUpdateRecord = context.get(IncrementalUpdateRecord.class);
context.set(IncrementalUpdateRecord.class, null);
initRenderer(localConfig, context);
PDDocument pdfDocument = null;
String password = "";
PDFRenderingState incomingRenderingState = context.get(PDFRenderingState.class);
TikaInputStream tstream = null;
boolean shouldClose = false;
OCRPageCounter prevOCRCounter = context.get(OCRPageCounter.class);
context.set(OCRPageCounter.class, new OCRPageCounter());
try {
if (shouldSpool(localConfig)) {
if (stream instanceof TikaInputStream) {
tstream = (TikaInputStream) stream;
} else {
tstream = TikaInputStream.get(CloseShieldInputStream.wrap(stream));
shouldClose = true;
}
context.set(PDFRenderingState.class, new PDFRenderingState(tstream));
} else {
tstream = TikaInputStream.cast(stream);
}
scanXRefOffsets(localConfig, tstream, metadata, context);
password = getPassword(metadata, context);
MemoryUsageSetting memoryUsageSetting = null;
if (localConfig.getMaxMainMemoryBytes() >= 0) {
memoryUsageSetting =
MemoryUsageSetting.setupMixed(localConfig.getMaxMainMemoryBytes());
} else {
memoryUsageSetting = MemoryUsageSetting.setupMainMemoryOnly();
}
pdfDocument = getPDDocument(stream, tstream, password,
memoryUsageSetting.streamCache, metadata, context);
boolean hasCollection = hasCollection(pdfDocument, metadata);
checkEncryptedPayload(pdfDocument, hasCollection, localConfig);
boolean hasXFA = hasXFA(pdfDocument, metadata);
boolean hasMarkedContent = hasMarkedContent(pdfDocument, metadata);
extractMetadata(pdfDocument, metadata, context);
extractSignatures(pdfDocument, metadata);
checkIllustrator(pdfDocument, metadata);
AccessChecker checker = localConfig.getAccessChecker();
checker.check(metadata);
renderPagesBeforeParse(tstream, handler, metadata, context, localConfig);
if (handler != null) {
if (shouldHandleXFAOnly(hasXFA, localConfig)) {
handleXFAOnly(pdfDocument, handler, metadata, context);
} else if (localConfig.getOcrStrategy()
.equals(PDFParserConfig.OCR_STRATEGY.OCR_ONLY)) {
OCR2XHTML.process(pdfDocument, handler, context, metadata,
localConfig);
} else if (hasMarkedContent && localConfig.isExtractMarkedContent()) {
PDFMarkedContent2XHTML
.process(pdfDocument, handler, context, metadata,
localConfig);
} else {
PDF2XHTML.process(pdfDocument, handler, context, metadata,
localConfig);
}
}
} catch (InvalidPasswordException e) {
metadata.set(PDF.IS_ENCRYPTED, "true");
throw new EncryptedDocumentException(e);
} finally {
metadata.set(OCR_PAGE_COUNT, context.get(OCRPageCounter.class).getCount());
context.set(OCRPageCounter.class, prevOCRCounter);
//reset the incrementalUpdateRecord even if null
context.set(IncrementalUpdateRecord.class, incomingIncrementalUpdateRecord);
PDFRenderingState currState = context.get(PDFRenderingState.class);
try {
if (currState != null && currState.getRenderResults() != null) {
currState.getRenderResults().close();
}
if (pdfDocument != null) {
pdfDocument.close();
}
} finally {
//replace the one that was here
context.set(PDFRenderingState.class, incomingRenderingState);
if (shouldClose && tstream != null) {
tstream.close();
}
}
}
} | @Test
public void testProtectedPDF() throws Exception {
XMLResult r = getXML("testPDF_protected.pdf");
Metadata metadata = r.metadata;
assertEquals("true", metadata.get("pdf:encrypted"));
assertEquals("application/pdf", metadata.get(Metadata.CONTENT_TYPE));
assertEquals("The Bank of England", metadata.get(TikaCoreProperties.CREATOR));
assertEquals("Speeches by Andrew G Haldane",
metadata.get(TikaCoreProperties.SUBJECT));
assertEquals(
"Rethinking the Financial Network, Speech by Andrew G Haldane, " +
"Executive Director, Financial Stability " +
"delivered at the Financial Student " +
"Association, Amsterdam on 28 April 2009",
metadata.get(TikaCoreProperties.TITLE));
assertContains("RETHINKING THE FINANCIAL NETWORK", r.xml);
assertContains("On 16 November 2002", r.xml);
assertContains("In many important respects", r.xml);
// Try again with an explicit empty password
ParseContext context = new ParseContext();
context.set(PasswordProvider.class, new PasswordProvider() {
public String getPassword(Metadata metadata) {
return "";
}
});
r = getXML("testPDF_protected.pdf", context);
metadata = r.metadata;
assertEquals("true", metadata.get("pdf:encrypted"));
assertEquals("application/pdf", metadata.get(Metadata.CONTENT_TYPE));
assertEquals("The Bank of England", metadata.get(TikaCoreProperties.CREATOR));
assertEquals("Speeches by Andrew G Haldane", metadata.get(TikaCoreProperties.SUBJECT));
assertEquals(
"Rethinking the Financial Network, Speech by Andrew G Haldane, " +
"Executive Director, Financial Stability delivered at the " +
"Financial Student Association, Amsterdam on 28 April 2009",
metadata.get(TikaCoreProperties.TITLE));
assertContains("RETHINKING THE FINANCIAL NETWORK", r.xml);
assertContains("On 16 November 2002", r.xml);
assertContains("In many important respects", r.xml);
//now test wrong password
context.set(PasswordProvider.class, new PasswordProvider() {
public String getPassword(Metadata metadata) {
return "WRONG!!!!";
}
});
boolean ex = false;
ContentHandler handler = new BodyContentHandler();
metadata = new Metadata();
try (InputStream stream = getResourceAsStream("/test-documents/testPDF_protected.pdf")) {
AUTO_DETECT_PARSER.parse(stream, handler, metadata, context);
} catch (EncryptedDocumentException e) {
ex = true;
}
assertTrue(ex, "encryption exception");
assertEquals("application/pdf", metadata.get(Metadata.CONTENT_TYPE));
assertEquals("true", metadata.get("pdf:encrypted"));
//pdf:encrypted, X-Parsed-By and Content-Type
assertEquals(5, metadata.names().length, "very little metadata should be parsed");
assertEquals(0, handler.toString().length());
} |
@Override
public void onAppVisible() {
} | @Test
public void onAppVisible_neverClearAllNotifications() throws Exception {
createUUT().onAppVisible();
verify(mNotificationManager, never()).cancelAll();
} |
String buildUrl( JmsDelegate delegate, boolean debug ) {
StringBuilder finalUrl = new StringBuilder( delegate.amqUrl.trim() );
// verify user hit the checkbox on the dialogue *and* also has not specified these values on the URL already
// end result: default to SSL settings in the URL if present, otherwise use data from the security tab
if ( delegate.sslEnabled && !finalUrl.toString().contains( "sslEnabled" ) ) {
appendSslOptions( delegate, finalUrl, debug );
}
return finalUrl.toString();
} | @Test public void testUseDefaultSslContext() {
ActiveMQProvider provider = new ActiveMQProvider();
JmsDelegate delegate = new JmsDelegate( Collections.singletonList( provider ) );
delegate.amqUrl = AMQ_URL_BASE;
delegate.sslEnabled = true;
delegate.sslTruststorePath = TRUST_STORE_PATH_VAL;
delegate.sslTruststorePassword = TRUST_STORE_PASS_VAL;
delegate.sslKeystorePath = KEY_STORE_PATH_VAL;
delegate.sslKeystorePassword = KEY_STORE_PASS_VAL;
delegate.sslCipherSuite = ENABLED_CIPHER_SUITES_VAL;
delegate.sslContextAlgorithm = ENABLED_PROTOCOLS_VAL;
delegate.amqSslVerifyHost = VERIFY_HOST_VAL;
delegate.amqSslTrustAll = TRUST_ALL_VAL;
delegate.amqSslProvider = SSL_PROVIDER_VAL;
delegate.sslUseDefaultContext = true;
String urlString = provider.buildUrl( delegate, false );
try {
URI url = new URI( urlString );
} catch ( URISyntaxException e ) {
fail( e.getMessage() );
}
assertFalse( "Should not include trust store path", urlString.contains( "trustStorePath=" + TRUST_STORE_PATH_VAL ) );
assertFalse( "Should not include trust store password", urlString.contains( "trustStorePassword=" + TRUST_STORE_PASS_VAL ) );
assertFalse( "Should not include key store path", urlString.contains( "keyStorePath=" + KEY_STORE_PATH_VAL ) );
assertFalse( "Should not include key store password", urlString.contains( "keyStorePassword=" + KEY_STORE_PASS_VAL ) );
assertFalse( "Should not include cipher suite", urlString.contains( "enabledCipherSuites=" + ENABLED_CIPHER_SUITES_VAL ) );
assertFalse( "Should not include protocols", urlString.contains( "enabledProtocols=" + ENABLED_PROTOCOLS_VAL ) );
assertFalse( "Should not include verify host", urlString.contains( "verifyHost=" + VERIFY_HOST_VAL ) );
assertFalse( "Should not include trust all", urlString.contains( "trustAll=" + TRUST_ALL_VAL ) );
assertFalse( "Should not include ssl provider", urlString.contains( "sslProvider=" + SSL_PROVIDER_VAL ) );
assertTrue( "Missing Use default SSL context", urlString.contains( "useDefaultSslContext=true" ) );
assertTrue( "URL base incorrect", urlString.startsWith( AMQ_URL_BASE + "?" ) );
} |
@Override
public void start() {
DatabaseCharsetChecker.State state = DatabaseCharsetChecker.State.STARTUP;
if (upgradeStatus.isUpgraded()) {
state = DatabaseCharsetChecker.State.UPGRADE;
} else if (upgradeStatus.isFreshInstall()) {
state = DatabaseCharsetChecker.State.FRESH_INSTALL;
}
charsetChecker.check(state);
} | @Test
public void test_regular_startup() {
when(upgradeStatus.isFreshInstall()).thenReturn(false);
underTest.start();
verify(charsetChecker).check(DatabaseCharsetChecker.State.STARTUP);
} |
public void patchBoardById(
final Long boardId,
final Long memberId,
final BoardUpdateRequest request
) {
Board board = findBoardWithImages(boardId);
board.validateWriter(memberId);
BoardUpdateResult result = board.update(request.title(), request.content(), request.addedImages(), request.deletedImages(), imageConverter);
imageUploader.upload(result.addedImages(), request.addedImages());
imageUploader.delete(result.deletedImages());
} | @Test
void ๊ฒ์๊ธ์ด_์๋ค๋ฉด_์์ ํ์ง_๋ชปํ๋ค() {
// given
BoardUpdateRequest req = new BoardUpdateRequest("์์ ", "์์ ", new ArrayList<>(), new ArrayList<>());
// when & then
assertThatThrownBy(() -> boardService.patchBoardById(1L, 1L, req))
.isInstanceOf(BoardNotFoundException.class);
} |
public SearchHits<ExtensionSearch> search(Options options) {
var resultWindow = options.requestedOffset + options.requestedSize;
if(resultWindow > getMaxResultWindow()) {
return new SearchHitsImpl<>(0, TotalHitsRelation.OFF, 0f, null, null, Collections.emptyList(), null, null);
}
var queryBuilder = new NativeQueryBuilder();
queryBuilder.withQuery(builder -> builder.bool(boolQuery -> createSearchQuery(boolQuery, options)));
// Sort search results according to 'sortOrder' and 'sortBy' options
sortResults(queryBuilder, options.sortOrder, options.sortBy);
var pages = new ArrayList<Pageable>();
pages.add(PageRequest.of(options.requestedOffset / options.requestedSize, options.requestedSize));
if(options.requestedOffset % options.requestedSize > 0) {
// size is not exact multiple of offset; this means we need to get two pages
// e.g. when offset is 20 and size is 50, you want results 20 to 70 which span pages 0 and 1 of a 50 item page
pages.add(pages.get(0).next());
}
var searchHitsList = new ArrayList<SearchHits<ExtensionSearch>>(pages.size());
for(var page : pages) {
queryBuilder.withPageable(page);
try {
rwLock.readLock().lock();
var searchHits = searchOperations.search(queryBuilder.build(), ExtensionSearch.class, searchOperations.indexOps(ExtensionSearch.class).getIndexCoordinates());
searchHitsList.add(searchHits);
} finally {
rwLock.readLock().unlock();
}
}
if(searchHitsList.size() == 2) {
var firstSearchHitsPage = searchHitsList.get(0);
var secondSearchHitsPage = searchHitsList.get(1);
List<SearchHit<ExtensionSearch>> searchHits = new ArrayList<>(firstSearchHitsPage.getSearchHits());
searchHits.addAll(secondSearchHitsPage.getSearchHits());
var endIndex = Math.min(searchHits.size(), options.requestedOffset + options.requestedSize);
var startIndex = Math.min(endIndex, options.requestedOffset);
searchHits = searchHits.subList(startIndex, endIndex);
return new SearchHitsImpl<>(
firstSearchHitsPage.getTotalHits(),
firstSearchHitsPage.getTotalHitsRelation(),
firstSearchHitsPage.getMaxScore(),
null,
null,
searchHits,
null,
null
);
} else {
return searchHitsList.get(0);
}
} | @Test
public void testSearchResultWindowTooLarge() {
mockIndex(true);
var options = new ISearchService.Options("foo", "bar", "universal", 50, 10000, null, null, false);
var searchHits = search.search(options);
assertThat(searchHits.getSearchHits()).isEmpty();
assertThat(searchHits.getTotalHits()).isEqualTo(0L);
} |
@Override
public List<Change> computeDiff(List<T> source, List<T> target, DiffAlgorithmListener progress) {
Objects.requireNonNull(source, "source list must not be null");
Objects.requireNonNull(target, "target list must not be null");
if (progress != null) {
progress.diffStart();
}
DiffData data = new DiffData(source, target);
int maxIdx = source.size() + target.size();
buildScript(data, 0, source.size(), 0, target.size(), idx -> {
if (progress != null) {
progress.diffStep(idx, maxIdx);
}
});
if (progress != null) {
progress.diffEnd();
}
return data.script;
} | @Test
public void testDiffMyersExample1ForwardWithListener() {
List<String> original = Arrays.asList("A", "B", "C", "A", "B", "B", "A");
List<String> revised = Arrays.asList("C", "B", "A", "B", "A", "C");
List<String> logdata = new ArrayList<>();
final Patch<String> patch = Patch.generate(original, revised,
new MyersDiffWithLinearSpace<String>().computeDiff(original, revised, new DiffAlgorithmListener() {
@Override
public void diffStart() {
logdata.add("start");
}
@Override
public void diffStep(int value, int max) {
logdata.add(value + " - " + max);
}
@Override
public void diffEnd() {
logdata.add("end");
}
}));
assertNotNull(patch);
System.out.println(patch);
assertEquals(5, patch.getDeltas().size());
assertEquals("Patch{deltas=[[InsertDelta, position: 0, lines: [C]], [DeleteDelta, position: 0, lines: [A]], [DeleteDelta, position: 2, lines: [C]], [DeleteDelta, position: 5, lines: [B]], [InsertDelta, position: 7, lines: [C]]]}", patch.toString());
System.out.println(logdata);
assertEquals(11, logdata.size());
} |
@GET
@Path("/{connector}/config")
@Operation(summary = "Get the configuration for the specified connector")
public Map<String, String> getConnectorConfig(final @PathParam("connector") String connector) throws Throwable {
FutureCallback<Map<String, String>> cb = new FutureCallback<>();
herder.connectorConfig(connector, cb);
return requestHandler.completeRequest(cb);
} | @Test
public void testGetConnectorConfigConnectorNotFound() {
final ArgumentCaptor<Callback<Map<String, String>>> cb = ArgumentCaptor.forClass(Callback.class);
expectAndCallbackException(cb, new NotFoundException("not found"))
.when(herder).connectorConfig(eq(CONNECTOR_NAME), cb.capture());
assertThrows(NotFoundException.class, () -> connectorsResource.getConnectorConfig(CONNECTOR_NAME));
} |
public MergePolicyConfig getMergePolicyConfig() {
return mergePolicyConfig;
} | @Test
public void cacheConfigXmlTest_CustomMergePolicy() throws IOException {
Config config = new XmlConfigBuilder(configUrl1).build();
CacheSimpleConfig cacheWithCustomMergePolicyConfig = config.getCacheConfig("cacheWithCustomMergePolicy");
assertNotNull(cacheWithCustomMergePolicyConfig);
assertEquals("MyDummyMergePolicy", cacheWithCustomMergePolicyConfig.getMergePolicyConfig().getPolicy());
} |
public <T> Future<Iterable<TimestampedValue<T>>> orderedListFuture(
Range<Long> range, ByteString encodedTag, String stateFamily, Coder<T> elemCoder) {
// First request has no continuation position.
StateTag<ByteString> stateTag =
StateTag.<ByteString>of(StateTag.Kind.ORDERED_LIST, encodedTag, stateFamily)
.toBuilder()
.setSortedListRange(Preconditions.checkNotNull(range))
.build();
return valuesToPagingIterableFuture(stateTag, elemCoder, this.stateFuture(stateTag, elemCoder));
} | @Test
public void testReadSortedListRanges() throws Exception {
Future<Iterable<TimestampedValue<Integer>>> future1 =
underTest.orderedListFuture(Range.closedOpen(0L, 5L), STATE_KEY_1, STATE_FAMILY, INT_CODER);
// Should be put into a subsequent batch as it has the same key and state family.
Future<Iterable<TimestampedValue<Integer>>> future2 =
underTest.orderedListFuture(Range.closedOpen(5L, 6L), STATE_KEY_1, STATE_FAMILY, INT_CODER);
Future<Iterable<TimestampedValue<Integer>>> future3 =
underTest.orderedListFuture(
Range.closedOpen(6L, 10L), STATE_KEY_2, STATE_FAMILY, INT_CODER);
Future<Iterable<TimestampedValue<Integer>>> future4 =
underTest.orderedListFuture(
Range.closedOpen(11L, 12L), STATE_KEY_2, STATE_FAMILY2, INT_CODER);
Mockito.verifyNoMoreInteractions(mockWindmill);
// Fetch the entire list.
Windmill.KeyedGetDataRequest.Builder expectedRequest1 =
Windmill.KeyedGetDataRequest.newBuilder()
.setKey(DATA_KEY)
.setShardingKey(SHARDING_KEY)
.setWorkToken(WORK_TOKEN)
.setMaxBytes(WindmillStateReader.MAX_KEY_BYTES)
.addSortedListsToFetch(
Windmill.TagSortedListFetchRequest.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.addFetchRanges(SortedListRange.newBuilder().setStart(0).setLimit(5))
.setFetchMaxBytes(WindmillStateReader.MAX_ORDERED_LIST_BYTES))
.addSortedListsToFetch(
Windmill.TagSortedListFetchRequest.newBuilder()
.setTag(STATE_KEY_2)
.setStateFamily(STATE_FAMILY)
.addFetchRanges(SortedListRange.newBuilder().setStart(6).setLimit(10))
.setFetchMaxBytes(WindmillStateReader.MAX_ORDERED_LIST_BYTES))
.addSortedListsToFetch(
Windmill.TagSortedListFetchRequest.newBuilder()
.setTag(STATE_KEY_2)
.setStateFamily(STATE_FAMILY2)
.addFetchRanges(SortedListRange.newBuilder().setStart(11).setLimit(12))
.setFetchMaxBytes(WindmillStateReader.MAX_ORDERED_LIST_BYTES));
Windmill.KeyedGetDataRequest.Builder expectedRequest2 =
Windmill.KeyedGetDataRequest.newBuilder()
.setKey(DATA_KEY)
.setShardingKey(SHARDING_KEY)
.setWorkToken(WORK_TOKEN)
.setMaxBytes(WindmillStateReader.MAX_KEY_BYTES)
.addSortedListsToFetch(
Windmill.TagSortedListFetchRequest.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.addFetchRanges(SortedListRange.newBuilder().setStart(5).setLimit(6))
.setFetchMaxBytes(WindmillStateReader.MAX_ORDERED_LIST_BYTES));
Windmill.KeyedGetDataResponse.Builder response1 =
Windmill.KeyedGetDataResponse.newBuilder()
.setKey(DATA_KEY)
.addTagSortedLists(
Windmill.TagSortedListFetchResponse.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.addEntries(
SortedListEntry.newBuilder().setValue(intData(5)).setSortKey(5000).setId(5))
.addFetchRanges(SortedListRange.newBuilder().setStart(0).setLimit(5)))
.addTagSortedLists(
Windmill.TagSortedListFetchResponse.newBuilder()
.setTag(STATE_KEY_2)
.setStateFamily(STATE_FAMILY)
.addEntries(
SortedListEntry.newBuilder().setValue(intData(8)).setSortKey(8000).setId(8))
.addFetchRanges(SortedListRange.newBuilder().setStart(6).setLimit(10)))
.addTagSortedLists(
Windmill.TagSortedListFetchResponse.newBuilder()
.setTag(STATE_KEY_2)
.setStateFamily(STATE_FAMILY2)
.addFetchRanges(SortedListRange.newBuilder().setStart(11).setLimit(12)));
Windmill.KeyedGetDataResponse.Builder response2 =
Windmill.KeyedGetDataResponse.newBuilder()
.setKey(DATA_KEY)
.addTagSortedLists(
Windmill.TagSortedListFetchResponse.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.addEntries(
SortedListEntry.newBuilder().setValue(intData(6)).setSortKey(6000).setId(5))
.addEntries(
SortedListEntry.newBuilder().setValue(intData(7)).setSortKey(7000).setId(7))
.addFetchRanges(SortedListRange.newBuilder().setStart(5).setLimit(6)));
Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest1.build()))
.thenReturn(response1.build());
Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest2.build()))
.thenReturn(response2.build());
// Trigger reads of batching. By fetching future2 which is not part of the first batch we ensure
// that all batches are fetched.
{
Iterable<TimestampedValue<Integer>> results = future2.get();
Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest1.build());
Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest2.build());
Mockito.verifyNoMoreInteractions(mockWindmill);
assertThat(
results,
Matchers.contains(
TimestampedValue.of(6, Instant.ofEpochMilli(6)),
TimestampedValue.of(7, Instant.ofEpochMilli(7))));
assertNoReader(future2);
}
{
Iterable<TimestampedValue<Integer>> results = future1.get();
assertThat(results, Matchers.contains(TimestampedValue.of(5, Instant.ofEpochMilli(5))));
assertNoReader(future1);
}
{
Iterable<TimestampedValue<Integer>> results = future3.get();
assertThat(results, Matchers.contains(TimestampedValue.of(8, Instant.ofEpochMilli(8))));
assertNoReader(future3);
}
{
Iterable<TimestampedValue<Integer>> results = future4.get();
assertThat(results, Matchers.emptyIterable());
assertNoReader(future4);
}
} |
@GetMapping("/selector")
public ShenyuAdminResult listPageSelectorDataPermissions(@RequestParam("currentPage") final Integer currentPage,
@RequestParam("pageSize") final Integer pageSize,
@RequestParam("userId") final String userId,
@RequestParam("pluginId") final String pluginId,
@RequestParam(value = "name", required = false) final String name) {
CommonPager<DataPermissionPageVO> selectorList = dataPermissionService.listSelectorsByPage(
new SelectorQuery(pluginId, name, new PageParameter(currentPage, pageSize)), userId);
return ShenyuAdminResult.success(ShenyuResultMessage.QUERY_SUCCESS, selectorList);
} | @Test
public void listPageSelectorDataPermissions() throws Exception {
Integer currentPage = 1;
Integer pageSize = 10;
String userId = "testUserId";
String pluginId = "testPluginId";
String name = "testName";
final PageParameter pageParameter = new PageParameter(currentPage, pageSize);
final CommonPager<DataPermissionPageVO> commonPager = new CommonPager<>(pageParameter, Collections.singletonList(dataPermissionPageVO));
given(this.dataPermissionService.listSelectorsByPage(
new SelectorQuery(pluginId, name, pageParameter), userId)).willReturn(commonPager);
this.mockMvc.perform(MockMvcRequestBuilders.get("/data-permission/selector")
.param("currentPage", String.valueOf(currentPage))
.param("pageSize", String.valueOf(pageSize))
.param("userId", userId)
.param("pluginId", pluginId)
.param("name", name))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.QUERY_SUCCESS)))
.andExpect(jsonPath("$.data.dataList[0].dataId", is(dataPermissionPageVO.getDataId())))
.andReturn();
} |
@Override
public void open() throws Exception {
this.timerService =
getInternalTimerService("processing timer", VoidNamespaceSerializer.INSTANCE, this);
this.keySet = new HashSet<>();
super.open();
} | @Test
void testCheckKey() throws Exception {
KeyedTwoInputNonBroadcastProcessOperator<Long, Integer, Long, Long> processOperator =
new KeyedTwoInputNonBroadcastProcessOperator<>(
new TwoInputNonBroadcastStreamProcessFunction<Integer, Long, Long>() {
@Override
public void processRecordFromFirstInput(
Integer record,
Collector<Long> output,
PartitionedContext ctx) {
output.collect(Long.valueOf(record));
}
@Override
public void processRecordFromSecondInput(
Long record, Collector<Long> output, PartitionedContext ctx) {
output.collect(record);
}
},
// -1 is an invalid key in this suite.
(out) -> -1L);
try (KeyedTwoInputStreamOperatorTestHarness<Long, Integer, Long, Long> testHarness =
new KeyedTwoInputStreamOperatorTestHarness<>(
processOperator,
(KeySelector<Integer, Long>) Long::valueOf,
(KeySelector<Long, Long>) value -> value,
Types.LONG)) {
testHarness.open();
assertThatThrownBy(() -> testHarness.processElement1(new StreamRecord<>(1)))
.isInstanceOf(IllegalStateException.class);
assertThatThrownBy(() -> testHarness.processElement2(new StreamRecord<>(1L)))
.isInstanceOf(IllegalStateException.class);
}
} |
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal n) {
return invoke(n, BigDecimal.ZERO);
} | @Test
void invokePositive() {
FunctionTestUtil.assertResultBigDecimal(floorFunction.invoke(BigDecimal.valueOf(10.2)), BigDecimal.valueOf(10));
} |
public static SegmentGenerationJobSpec getSegmentGenerationJobSpec(String jobSpecFilePath, String propertyFilePath,
Map<String, Object> context, Map<String, String> environmentValues) {
Properties properties = new Properties();
if (propertyFilePath != null) {
try {
properties.load(FileUtils.openInputStream(new File(propertyFilePath)));
} catch (IOException e) {
throw new RuntimeException(
String.format("Unable to read property file [%s] into properties.", propertyFilePath), e);
}
}
Map<String, Object> propertiesMap = (Map) properties;
if (environmentValues != null) {
for (String propertyName: propertiesMap.keySet()) {
if (environmentValues.get(propertyName) != null) {
propertiesMap.put(propertyName, environmentValues.get(propertyName));
}
}
}
if (context != null) {
propertiesMap.putAll(context);
}
String jobSpecTemplate;
try {
jobSpecTemplate = IOUtils.toString(new BufferedReader(new FileReader(jobSpecFilePath)));
} catch (IOException e) {
throw new RuntimeException(String.format("Unable to read ingestion job spec file [%s].", jobSpecFilePath), e);
}
String jobSpecStr;
try {
jobSpecStr = GroovyTemplateUtils.renderTemplate(jobSpecTemplate, propertiesMap);
} catch (Exception e) {
throw new RuntimeException(String
.format("Unable to render templates on ingestion job spec template file - [%s] with propertiesMap - [%s].",
jobSpecFilePath, Arrays.toString(propertiesMap.entrySet().toArray())), e);
}
String jobSpecFormat = (String) propertiesMap.getOrDefault(JOB_SPEC_FORMAT, YAML);
if (jobSpecFormat.equals(JSON)) {
try {
return JsonUtils.stringToObject(jobSpecStr, SegmentGenerationJobSpec.class);
} catch (IOException e) {
throw new RuntimeException(String
.format("Unable to parse job spec - [%s] to JSON with propertiesMap - [%s]", jobSpecFilePath,
Arrays.toString(propertiesMap.entrySet().toArray())), e);
}
}
return new Yaml().loadAs(jobSpecStr, SegmentGenerationJobSpec.class);
} | @Test
public void testIngestionJobLauncherWithJsonTemplate() {
SegmentGenerationJobSpec spec = IngestionJobLauncher.getSegmentGenerationJobSpec(
GroovyTemplateUtils.class.getClassLoader().getResource("ingestion_job_json_spec_template.json").getFile(),
GroovyTemplateUtils.class.getClassLoader().getResource("job_json.config").getFile(), null, null);
Assert.assertEquals(spec.getInputDirURI(), "file:///path/to/input/2020/07/22");
Assert.assertEquals(spec.getOutputDirURI(), "file:///path/to/output/2020/07/22");
Assert.assertEquals(spec.getSegmentCreationJobParallelism(), 0);
} |
static long getPidJiffies(VespaService service) {
int pid = service.getPid();
try {
BufferedReader in = new BufferedReader(new FileReader("/proc/" + pid + "/stat"));
return getPidJiffies(in);
} catch (FileNotFoundException ex) {
log.log(Level.FINE, () -> "Unable to find pid " + pid + " in proc directory, for service " + service.getInstanceName());
service.setAlive(false);
return 0;
}
} | @Test
public void testPerProcessJiffies() {
assertEquals(PER_PROC_JIFFIES[0], SystemPoller.getPidJiffies(new BufferedReader(new StringReader(perProcStats[0]))));
assertEquals(PER_PROC_JIFFIES[1], SystemPoller.getPidJiffies(new BufferedReader(new StringReader(perProcStats[1]))));
} |
public static List<LayoutLocation> fromCompactListString(String compactList) {
List<LayoutLocation> locs = new ArrayList<>();
if (!Strings.isNullOrEmpty(compactList)) {
String[] items = compactList.split(TILDE);
for (String s : items) {
locs.add(fromCompactString(s));
}
}
return locs;
} | @Test
public void fromCompactListEmpty() {
List<LayoutLocation> locs = fromCompactListString("");
assertEquals("non-empty list", 0, locs.size());
} |
@Override
public NetworkId networkId() {
return networkId;
} | @Test
public void testEquality() {
DefaultVirtualDevice device1 =
new DefaultVirtualDevice(NetworkId.networkId(0), DID1);
DefaultVirtualDevice device2 =
new DefaultVirtualDevice(NetworkId.networkId(0), DID1);
DefaultVirtualDevice device3 =
new DefaultVirtualDevice(NetworkId.networkId(0), DID2);
DefaultVirtualDevice device4 =
new DefaultVirtualDevice(NetworkId.networkId(1), DID1);
new EqualsTester().addEqualityGroup(device1, device2).addEqualityGroup(device3)
.addEqualityGroup(device4).testEquals();
} |
static ConfigNode propsToNode(Map<String, String> properties) {
String rootNode = findRootNode(properties);
ConfigNode root = new ConfigNode(rootNode);
for (Map.Entry<String, String> e : properties.entrySet()) {
parseEntry(e.getKey().replaceFirst(rootNode + ".", ""), e.getValue(), root);
}
return root;
} | @Test(expected = InvalidConfigurationException.class)
public void shouldThrowWhenNoSharedRootNode() {
Map<String, String> m = new HashMap<>();
m.put("foo1.bar1", "1");
m.put("foo2.bar2", "2");
ConfigNode configNode = PropertiesToNodeConverter.propsToNode(m);
} |
public static synchronized void createMissingParents(File baseDir) {
checkNotNull(baseDir, "Base dir has to be provided.");
if (!baseDir.exists()) {
try {
FileUtils.forceMkdirParent(baseDir);
LOG.info("Created parents for base dir: {}", baseDir);
} catch (Exception e) {
throw new FlinkRuntimeException(
String.format("Failed to create parent(s) for given base dir: %s", baseDir),
e);
}
}
} | @Test
void testCreateMissingParents(@TempDir Path tempDir) {
File targetDir = tempDir.resolve("p1").resolve("p2").resolve("base-dir").toFile();
assertThat(targetDir.getParentFile().getParentFile()).doesNotExist();
ArtifactUtils.createMissingParents(targetDir);
assertThat(targetDir.getParentFile()).isDirectory();
} |
public RemotingCommand rewriteRequestForStaticTopic(final UpdateConsumerOffsetRequestHeader requestHeader,
final TopicQueueMappingContext mappingContext) {
try {
if (mappingContext.getMappingDetail() == null) {
return null;
}
TopicQueueMappingDetail mappingDetail = mappingContext.getMappingDetail();
if (!mappingContext.isLeader()) {
return buildErrorResponse(ResponseCode.NOT_LEADER_FOR_QUEUE, String.format("%s-%d does not exit in request process of current broker %s", requestHeader.getTopic(), requestHeader.getQueueId(), mappingDetail.getBname()));
}
Long globalOffset = requestHeader.getCommitOffset();
LogicQueueMappingItem mappingItem = TopicQueueMappingUtils.findLogicQueueMappingItem(mappingContext.getMappingItemList(), globalOffset, true);
requestHeader.setQueueId(mappingItem.getQueueId());
requestHeader.setLo(false);
requestHeader.setBrokerName(mappingItem.getBname());
requestHeader.setCommitOffset(mappingItem.computePhysicalQueueOffset(globalOffset));
//leader, let it go, do not need to rewrite the response
if (mappingDetail.getBname().equals(mappingItem.getBname())) {
return null;
}
RpcRequest rpcRequest = new RpcRequest(RequestCode.UPDATE_CONSUMER_OFFSET, requestHeader, null);
RpcResponse rpcResponse = this.brokerController.getBrokerOuterAPI().getRpcClient().invoke(rpcRequest, this.brokerController.getBrokerConfig().getForwardTimeout()).get();
if (rpcResponse.getException() != null) {
throw rpcResponse.getException();
}
return RpcClientUtils.createCommandForRpcResponse(rpcResponse);
} catch (Throwable t) {
return buildErrorResponse(ResponseCode.SYSTEM_ERROR, t.getMessage());
}
} | @Test
public void testRewriteRequestForStaticTopic() throws RpcException, ExecutionException, InterruptedException {
UpdateConsumerOffsetRequestHeader requestHeader = new UpdateConsumerOffsetRequestHeader();
requestHeader.setConsumerGroup(group);
requestHeader.setTopic(topic);
requestHeader.setQueueId(0);
requestHeader.setCommitOffset(0L);
RemotingCommand response = consumerManageProcessor.rewriteRequestForStaticTopic(requestHeader, mappingContext);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.NOT_LEADER_FOR_QUEUE);
List<LogicQueueMappingItem> items = new ArrayList<>();
LogicQueueMappingItem item = createLogicQueueMappingItem("BrokerC", 0, 0L, 0L);
items.add(item);
when(mappingContext.getMappingItemList()).thenReturn(items);
when(mappingContext.isLeader()).thenReturn(true);
RpcResponse rpcResponse = new RpcResponse(ResponseCode.SUCCESS,new UpdateConsumerOffsetResponseHeader(),null);
when(responseFuture.get()).thenReturn(rpcResponse);
response = consumerManageProcessor.rewriteRequestForStaticTopic(requestHeader, mappingContext);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
} |
@Override
public int run(String[] args) throws Exception {
if (args.length != 2) {
return usage(args);
}
String action = args[0];
String name = args[1];
int result;
if (A_LOAD.equals(action)) {
result = loadClass(name);
} else if (A_CREATE.equals(action)) {
//first load to separate load errors from create
result = loadClass(name);
if (result == SUCCESS) {
//class loads, so instantiate it
result = createClassInstance(name);
}
} else if (A_RESOURCE.equals(action)) {
result = loadResource(name);
} else if (A_PRINTRESOURCE.equals(action)) {
result = dumpResource(name);
} else {
result = usage(args);
}
return result;
} | @Test
public void testLoadWithErrorInStaticInit() throws Throwable {
run(FindClass.E_LOAD_FAILED,
FindClass.A_LOAD,
"org.apache.hadoop.util.TestFindClass$FailInStaticInit");
} |
public int merge(final int key, final int value, final IntIntFunction remappingFunction)
{
requireNonNull(remappingFunction);
final int missingValue = this.missingValue;
if (missingValue == value)
{
throw new IllegalArgumentException("cannot accept missingValue");
}
final int[] entries = this.entries;
@DoNotSub final int mask = entries.length - 1;
@DoNotSub int index = Hashing.evenHash(key, mask);
int oldValue;
while (missingValue != (oldValue = entries[index + 1]))
{
if (key == entries[index])
{
break;
}
index = next(index, mask);
}
final int newValue = missingValue == oldValue ? value : remappingFunction.apply(oldValue, value);
if (missingValue != newValue)
{
entries[index + 1] = newValue;
if (oldValue == missingValue)
{
entries[index] = key;
size++;
increaseCapacity();
}
}
else
{
entries[index + 1] = missingValue;
size--;
compactChain(index);
}
return newValue;
} | @Test
void mergeThrowsIllegalArgumentExceptionIfValueIsMissingValue()
{
final int missingValue = 42;
final Int2IntHashMap map = new Int2IntHashMap(missingValue);
final int key = -9;
final IntIntFunction remappingFunction = mock(IntIntFunction.class);
final IllegalArgumentException exception =
assertThrowsExactly(IllegalArgumentException.class, () -> map.merge(key, missingValue, remappingFunction));
assertEquals("cannot accept missingValue", exception.getMessage());
} |
@Override
public boolean isAvailable() {
return true;
} | @Test
public void testIsAvailable() {
assertTrue(randomLoadBalancerProvider.isAvailable());
} |
public List<String> getJvmFlags() {
return jvmFlags;
} | @Test
public void testParse_jvmFlags() {
Jar jarCommand =
CommandLine.populateCommand(
new Jar(), "--target=test-image-ref", "--jvm-flags=jvm-flag1,jvm-flag2", "my-app.jar");
assertThat(jarCommand.getJvmFlags()).isEqualTo(ImmutableList.of("jvm-flag1", "jvm-flag2"));
} |
public SimpleRabbitListenerContainerFactory decorateSimpleRabbitListenerContainerFactory(
SimpleRabbitListenerContainerFactory factory
) {
return decorateRabbitListenerContainerFactory(factory);
} | @Test void decorateSimpleRabbitListenerContainerFactory_appends_TracingMessagePostProcessor_when_absent() {
SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory();
factory.setBeforeSendReplyPostProcessors(new UnzipPostProcessor());
assertThat(rabbitTracing.decorateSimpleRabbitListenerContainerFactory(factory))
.extracting("beforeSendReplyPostProcessors")
.asInstanceOf(array(MessagePostProcessor[].class))
.matches(adviceArray -> adviceArray[1] instanceof TracingMessagePostProcessor);
} |
@Retries.RetryTranslated
public void retry(String action,
String path,
boolean idempotent,
Retried retrying,
InvocationRaisingIOE operation)
throws IOException {
retry(action, path, idempotent, retrying,
() -> {
operation.apply();
return null;
});
} | @Test
public void testRetryAWSConnectivity() throws Throwable {
final AtomicInteger counter = new AtomicInteger(0);
invoker.retry("test", null, false,
() -> {
if (counter.incrementAndGet() < ACTIVE_RETRY_LIMIT) {
throw CLIENT_TIMEOUT_EXCEPTION;
}
});
assertEquals(ACTIVE_RETRY_LIMIT, counter.get());
} |
public void set(int index, E value) {
assert value != null;
Storage32 newStorage = storage.set(index, value);
if (newStorage != storage) {
storage = newStorage;
}
} | @Test
public void testSetSparseToDense32() {
// add some sparse entries
for (int i = 0; i < ARRAY_STORAGE_32_MAX_SPARSE_SIZE / 2; ++i) {
set(i * 2);
verify();
}
// restore density
for (int i = 0; i < ARRAY_STORAGE_32_MAX_SPARSE_SIZE / 2; ++i) {
set(i * 2 + 1);
verify();
}
// make sure we still can insert something
for (int i = 0; i < ARRAY_STORAGE_32_MAX_SPARSE_SIZE * 5; ++i) {
set(i);
verify();
}
} |
public static JsonElement parseString(String json) throws JsonSyntaxException {
return parseReader(new StringReader(json));
} | @Test
public void testParseUnquotedSingleWordStringFails() {
assertThat(JsonParser.parseString("Test").getAsString()).isEqualTo("Test");
} |
@Override
public Optional<Period> chooseBin(final List<Period> availablePeriods, final QueryExecutionStats stats) {
return availablePeriods.stream()
.filter(per -> matches(per, stats.effectiveTimeRange()))
.findFirst();
} | @Test
void testReturnsEmptyOptionalWhenRangeExceedsLongestPeriod() {
assertTrue(
toTest.chooseBin(
List.of(Period.days(1), Period.days(2)),
getQueryExecutionStats(13, AbsoluteRange.create(
DateTime.now(DateTimeZone.UTC).minusDays(2).minusHours(1),
DateTime.now(DateTimeZone.UTC))
)
).isEmpty());
} |
public LoggerContext apply(LogLevelConfig logLevelConfig, Props props) {
if (!ROOT_LOGGER_NAME.equals(logLevelConfig.getRootLoggerName())) {
throw new IllegalArgumentException("Value of LogLevelConfig#rootLoggerName must be \"" + ROOT_LOGGER_NAME + "\"");
}
LoggerContext rootContext = getRootContext();
logLevelConfig.getConfiguredByProperties().forEach((key, value) -> applyLevelByProperty(props, rootContext.getLogger(key), value));
logLevelConfig.getConfiguredByHardcodedLevel().forEach((key, value) -> applyHardcodedLevel(rootContext, key, value));
Level propertyValueAsLevel = getPropertyValueAsLevel(props, LOG_LEVEL.getKey());
boolean traceGloballyEnabled = propertyValueAsLevel == Level.TRACE;
logLevelConfig.getOffUnlessTrace().forEach(logger -> applyHardUnlessTrace(rootContext, logger, traceGloballyEnabled));
return rootContext;
} | @Test
public void apply_sets_logger_to_process_property_over_global_property_if_both_set() {
LogLevelConfig config = newLogLevelConfig().rootLevelFor(WEB_SERVER).build();
props.set("sonar.log.level", "DEBUG");
props.set("sonar.log.level.web", "TRACE");
LoggerContext context = underTest.apply(config, props);
assertThat(context.getLogger(ROOT_LOGGER_NAME).getLevel()).isEqualTo(Level.TRACE);
} |
@Override
public ApplicationAttemptId getApplicationAttemptId() {
return this.appAttemptId;
} | @Test (timeout = 180000)
public void testStoreAllContainerMetrics() throws Exception {
Configuration conf = new Configuration();
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
conf.setBoolean(
YarnConfiguration.APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO,
true);
MockRM rm1 = new MockRM(conf);
SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class);
rm1.getRMContext().setSystemMetricsPublisher(publisher);
rm1.start();
MockNM nm1 = rm1.registerNode("unknownhost:1234", 8000);
RMApp app1 = MockRMAppSubmitter.submitWithMemory(1024, rm1);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 1, ContainerState.RUNNING);
// request a container.
am1.allocate("127.0.0.1", 1024, 1, new ArrayList<ContainerId>());
ContainerId containerId2 = ContainerId.newContainerId(
am1.getApplicationAttemptId(), 2);
rm1.waitForState(nm1, containerId2, RMContainerState.ALLOCATED);
am1.allocate(new ArrayList<ResourceRequest>(), new ArrayList<ContainerId>())
.getAllocatedContainers();
rm1.waitForState(nm1, containerId2, RMContainerState.ACQUIRED);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 2, ContainerState.RUNNING);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 2, ContainerState.COMPLETE);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
rm1.waitForState(nm1, containerId2, RMContainerState.COMPLETED);
rm1.stop();
// RMContainer should be publishing system metrics for all containers.
// Since there is 1 AM container and 1 non-AM container, there should be 2
// container created events and 2 container finished events.
verify(publisher, times(2)).containerCreated(any(RMContainer.class),
anyLong());
verify(publisher, times(2)).containerFinished(any(RMContainer.class), anyLong());
} |
public AssessmentResult verify(
final Action expectedAction,
final String input,
final String ip) throws IOException {
final String[] parts = input.split("\\" + SEPARATOR, 4);
// we allow missing actions, if we're missing 1 part, assume it's the action
if (parts.length < 4) {
throw new BadRequestException("too few parts");
}
final String prefix = parts[0];
final String siteKey = parts[1].toLowerCase(Locale.ROOT).strip();
final String action = parts[2];
String token = parts[3];
String provider = prefix;
if (prefix.endsWith(SHORT_SUFFIX)) {
// This is a "short" solution that points to the actual solution. We need to fetch the
// full solution before proceeding
provider = prefix.substring(0, prefix.length() - SHORT_SUFFIX.length());
token = shortCodeExpander.retrieve(token).orElseThrow(() -> new BadRequestException("invalid shortcode"));
}
final CaptchaClient client = this.captchaClientMap.get(provider);
if (client == null) {
throw new BadRequestException("invalid captcha scheme");
}
final Action parsedAction = Action.parse(action)
.orElseThrow(() -> {
Metrics.counter(INVALID_ACTION_COUNTER_NAME, "action", action).increment();
return new BadRequestException("invalid captcha action");
});
if (!parsedAction.equals(expectedAction)) {
Metrics.counter(INVALID_ACTION_COUNTER_NAME, "action", action).increment();
throw new BadRequestException("invalid captcha action");
}
final Set<String> allowedSiteKeys = client.validSiteKeys(parsedAction);
if (!allowedSiteKeys.contains(siteKey)) {
logger.debug("invalid site-key {}, action={}, token={}", siteKey, action, token);
Metrics.counter(INVALID_SITEKEY_COUNTER_NAME, "action", action).increment();
throw new BadRequestException("invalid captcha site-key");
}
final AssessmentResult result = client.verify(siteKey, parsedAction, token, ip);
Metrics.counter(ASSESSMENTS_COUNTER_NAME,
"action", action,
"score", result.getScoreString(),
"provider", provider)
.increment();
return result;
} | @Test
public void choose() throws IOException {
String ainput = String.join(SEPARATOR, PREFIX_A, CHALLENGE_SITE_KEY, "challenge", TOKEN);
String binput = String.join(SEPARATOR, PREFIX_B, CHALLENGE_SITE_KEY, "challenge", TOKEN);
final CaptchaClient a = mockClient(PREFIX_A);
final CaptchaClient b = mockClient(PREFIX_B);
new CaptchaChecker(null, List.of(a, b)).verify(Action.CHALLENGE, ainput, null);
verify(a, times(1)).verify(any(), any(), any(), any());
new CaptchaChecker(null, List.of(a, b)).verify(Action.CHALLENGE, binput, null);
verify(b, times(1)).verify(any(), any(), any(), any());
} |
@Override
public void run() {
if (backgroundJobServer.isNotReadyToProcessJobs()) return;
try (PeriodicTaskRunInfo runInfo = taskStatistics.startRun(backgroundJobServerConfiguration())) {
tasks.forEach(task -> task.run(runInfo));
runInfo.markRunAsSucceeded();
} catch (Exception e) {
taskStatistics.handleException(e);
if (taskStatistics.hasTooManyExceptions()) {
if (e instanceof StorageException) {
LOGGER.error("FATAL - JobRunr encountered too many storage exceptions. Shutting down. Did you know JobRunr Pro has built-in database fault tolerance? Check out https://www.jobrunr.io/en/documentation/pro/database-fault-tolerance/", e);
} else {
LOGGER.error("FATAL - JobRunr encountered too many processing exceptions. Shutting down.", shouldNotHappenException(e));
}
backgroundJobServer.stop();
} else {
LOGGER.warn(JobRunrException.SHOULD_NOT_HAPPEN_MESSAGE + " - Processing will continue.", e);
}
}
} | @Test
void jobHandlersStopsBackgroundJobServerAndLogsStorageProviderExceptionIfTooManyStorageExceptions() {
Task mockedTask = mockTaskThatThrows(new StorageException("a storage exception"));
JobHandler jobHandler = createJobHandlerWithTask(mockedTask);
for (int i = 0; i <= 5; i++) {
jobHandler.run();
}
verify(backgroundJobServer).stop();
assertThat(logger).hasErrorMessage("FATAL - JobRunr encountered too many storage exceptions. Shutting down. Did you know JobRunr Pro has built-in database fault tolerance? Check out https://www.jobrunr.io/en/documentation/pro/database-fault-tolerance/");
} |
@Override
public void upgrade() {
if (hasBeenRunSuccessfully()) {
LOG.debug("Migration already completed.");
return;
}
final Set<String> dashboardIdToViewId = new HashSet<>();
final Consumer<String> recordMigratedDashboardIds = dashboardIdToViewId::add;
final Map<String, Set<String>> widgetIdMigrationMapping = new HashMap<>();
final Consumer<Map<String, Set<String>>> recordMigratedWidgetIds = widgetIdMigrationMapping::putAll;
final Map<View, Search> newViews = this.dashboardsService.streamAll()
.sorted(Comparator.comparing(Dashboard::id))
.map(dashboard -> migrateDashboard(dashboard, recordMigratedDashboardIds, recordMigratedWidgetIds))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
writeViews(newViews);
final MigrationCompleted migrationCompleted = MigrationCompleted.create(dashboardIdToViewId, widgetIdMigrationMapping);
writeMigrationCompleted(migrationCompleted);
} | @Test
@MongoDBFixtures("sample_dashboard_with_unknown_widget.json")
public void migrateSampleDashboardWithUnknownWidget() {
this.migration.upgrade();
final MigrationCompleted migrationCompleted = captureMigrationCompleted();
assertThat(migrationCompleted.migratedDashboardIds()).containsExactly("5c7fc3f9f38ed741ac154697");
assertThat(migrationCompleted.widgetMigrationIds()).containsAllEntriesOf(
ImmutableMap.<String, Set<String>>builder()
.put("05b03c7b-fe23-4789-a1c8-a38a583d3ba6", ImmutableSet.of("0000016e-b690-426f-0000-016eb690426f"))
.put("10c1b3f9-6b34-4b34-9457-892d12b84151", ImmutableSet.of("0000016e-b690-4270-0000-016eb690426f"))
.put("2afb1838-24ee-489f-929f-ef7d47485021", ImmutableSet.of("0000016e-b690-4271-0000-016eb690426f"))
.put("40c9cf4e-0956-4dc1-9ccd-83868fa83277", ImmutableSet.of("0000016e-b690-4272-0000-016eb690426f"))
.put("4a192616-51d3-474e-9e18-a680f2577769", ImmutableSet.of("0000016e-b690-4273-0000-016eb690426f"))
.put("5020d62d-24a0-4b0c-8819-78e668cc2428", ImmutableSet.of("5020d62d-24a0-4b0c-8819-78e668cc2428"))
.put("6f2cc355-bcbb-4b3f-be01-bfba299aa51a", ImmutableSet.of("0000016e-b690-4274-0000-016eb690426f", "0000016e-b690-4275-0000-016eb690426f"))
.put("76b7f7e1-76ac-486b-894b-bc31bf4808f1", ImmutableSet.of("0000016e-b690-4276-0000-016eb690426f"))
.put("91b37752-e3a8-4274-910f-3d66d19f1028", ImmutableSet.of("0000016e-b690-4277-0000-016eb690426f"))
.put("9b55d975-a5d4-4df6-8b2e-6fc7b48d52c3", ImmutableSet.of("0000016e-b690-4278-0000-016eb690426f"))
.put("a8eadf94-6494-4271-8c0e-3c8d08e65623", ImmutableSet.of("0000016e-b690-4279-0000-016eb690426f"))
.put("d9be20a1-82d7-427b-8a2d-c7ea9cd114de", ImmutableSet.of("0000016e-b690-427a-0000-016eb690426f"))
.put("da111daa-0d0a-47b9-98ed-8b8aa8a4f575", ImmutableSet.of("0000016e-b690-427b-0000-016eb690426f"))
.put("e9efdfaf-f7be-47ca-97fe-871c05a24d3c", ImmutableSet.of("0000016e-b690-427c-0000-016eb690426f"))
.put("f6e9d960-9cc8-4d16-b3c8-770501b2709f", ImmutableSet.of("0000016e-b690-427d-0000-016eb690426f"))
.build()
);
final ArgumentCaptor<View> viewCaptor = ArgumentCaptor.forClass(View.class);
verify(viewService, times(1)).save(viewCaptor.capture());
final View view = viewCaptor.getValue();
final Optional<ViewWidget> nonImplementedWidget = view.state().get("0000016e-b690-428f-0000-016eb690426f").widgets()
.stream()
.filter(widget -> widget instanceof NonImplementedWidget)
.findFirst();
assertThat(nonImplementedWidget).isPresent();
assertThat(nonImplementedWidget.get()).isEqualTo(NonImplementedWidget.create(
"5020d62d-24a0-4b0c-8819-78e668cc2428",
"TOTALLY_UNKNOWN_WIDGET",
ImmutableMap.<String, Object>builder()
.put("valuetype", "total")
.put("renderer", "line")
.put("interpolation", "linear")
.put("timerange", ImmutableMap.<String, Object>of(
"type", "relative",
"range", 28800
))
.put("rangeType", "relative")
.put("field", "nf_bytes")
.put("query", "")
.put("interval", "minute")
.put("relative", 28800)
.build()
)
);
} |
@Override
public V put(K key, V value, Duration ttl) {
return get(putAsync(key, value, ttl));
} | @Test
public void testRemoveValueTTL() throws InterruptedException {
RMapCacheNative<SimpleKey, SimpleValue> map = redisson.getMapCacheNative("simple");
map.put(new SimpleKey("1"), new SimpleValue("2"), Duration.ofSeconds(1));
boolean res = map.remove(new SimpleKey("1"), new SimpleValue("2"));
Assertions.assertTrue(res);
SimpleValue val1 = map.get(new SimpleKey("1"));
Assertions.assertNull(val1);
Assertions.assertEquals(0, map.size());
map.put(new SimpleKey("3"), new SimpleValue("4"), Duration.ofSeconds(1));
Thread.sleep(1000);
assertThat(map.remove(new SimpleKey("3"), new SimpleValue("4"))).isFalse();
assertThat(map.get(new SimpleKey("3"))).isNull();
map.destroy();
} |
@Override
public String toString() {
var roles = Arrays.toString(this.roles.keySet().toArray());
return "Customer{roles=" + roles + "}";
} | @Test
void toStringTest() {
var core = new CustomerCore();
core.addRole(Role.BORROWER);
assertEquals("Customer{roles=[BORROWER]}", core.toString());
core = new CustomerCore();
core.addRole(Role.INVESTOR);
assertEquals("Customer{roles=[INVESTOR]}", core.toString());
core = new CustomerCore();
assertEquals("Customer{roles=[]}", core.toString());
} |
public RingbufferConfig setAsyncBackupCount(int asyncBackupCount) {
this.asyncBackupCount = checkAsyncBackupCount(backupCount, asyncBackupCount);
return this;
} | @Test
public void setAsyncBackupCount() {
RingbufferConfig config = new RingbufferConfig(NAME);
config.setAsyncBackupCount(4);
assertEquals(4, config.getAsyncBackupCount());
} |
private String getEnv(String envName, InterpreterLaunchContext context) {
String env = context.getProperties().getProperty(envName);
if (StringUtils.isBlank(env)) {
env = System.getenv(envName);
}
if (StringUtils.isBlank(env)) {
LOGGER.warn("environment variable: {} is empty", envName);
}
return env;
} | @Test
void testConnectTimeOut() throws IOException {
SparkInterpreterLauncher launcher = new SparkInterpreterLauncher(zConf, null);
Properties properties = new Properties();
properties.setProperty("SPARK_HOME", sparkHome);
properties.setProperty(
ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_CONNECT_TIMEOUT.getVarName(), "10000");
InterpreterOption option = new InterpreterOption();
option.setUserImpersonate(true);
InterpreterLaunchContext context = new InterpreterLaunchContext(properties, option, null, "user1", "intpGroupId", "groupId", "groupName", "name", 0, "host");
InterpreterClient client = launcher.launch(context);
assertTrue(client instanceof ExecRemoteInterpreterProcess);
try (ExecRemoteInterpreterProcess interpreterProcess = (ExecRemoteInterpreterProcess) client) {
assertEquals("name", interpreterProcess.getInterpreterSettingName());
assertEquals(zeppelinHome + "/interpreter/groupName", interpreterProcess.getInterpreterDir());
assertEquals(zeppelinHome + "/local-repo/groupId", interpreterProcess.getLocalRepoDir());
assertEquals(10000, interpreterProcess.getConnectTimeout());
assertEquals(zConf.getInterpreterRemoteRunnerPath(), interpreterProcess.getInterpreterRunner());
assertTrue(interpreterProcess.getEnv().size() >= 2);
assertEquals(true, interpreterProcess.isUserImpersonated());
}
} |
@Override
@MethodNotAvailable
public CompletionStage<Void> setAsync(K key, V value) {
throw new MethodNotAvailableException();
} | @Test(expected = MethodNotAvailableException.class)
public void testSetAsyncWithTtl() {
adapter.setAsync(42, "value", 1, TimeUnit.MILLISECONDS);
} |
@Udf(description = "Splits a string into an array of substrings based on a delimiter.")
public List<String> split(
@UdfParameter(
description = "The string to be split. If NULL, then function returns NULL.")
final String string,
@UdfParameter(
description = "The delimiter to split a string by. If NULL, then function returns NULL.")
final String delimiter) {
if (string == null || delimiter == null) {
return null;
}
// Java split() accepts regular expressions as a delimiter, but the behavior of this UDF split()
// is to accept only literal strings. This method uses Guava Splitter instead, which does not
// accept any regex pattern. This is to avoid a confusion to users when splitting by regex
// special characters, such as '.' and '|'.
try {
// Guava Splitter does not accept empty delimiters. Use the Java split() method instead.
if (delimiter.isEmpty()) {
return Arrays.asList(EMPTY_DELIMITER.split(string));
} else {
return Splitter.on(delimiter).splitToList(string);
}
} catch (final Exception e) {
throw new KsqlFunctionException(
String.format("Invalid delimiter '%s' in the split() function.", delimiter), e);
}
} | @Test
public void shouldSplitBytesByGivenDelimiter() {
assertThat(
splitUdf.split(
X_DASH_Y_BYTES,
ByteBuffer.wrap(new byte[]{'-'})),
contains(
ByteBuffer.wrap(new byte[]{'x'}),
ByteBuffer.wrap(new byte[]{'y'})));
assertThat(
splitUdf.split(
X_DASH_Y_BYTES,
ByteBuffer.wrap(new byte[]{'x'})),
contains(ByteBuffer.wrap(new byte[]{}),
ByteBuffer.wrap(new byte[]{'-','y'})));
assertThat(
splitUdf.split(
X_DASH_Y_BYTES,
ByteBuffer.wrap(new byte[]{'y'})),
contains(
ByteBuffer.wrap(new byte[]{'x', '-'}),
ByteBuffer.wrap(new byte[]{})));
assertThat(
splitUdf.split(
ByteBuffer.wrap(new byte[]{'a', '.', 'b', '.', 'c', '.', 'd'}),
ByteBuffer.wrap(new byte[]{'.'})),
contains(
ByteBuffer.wrap(new byte[]{'a'}),
ByteBuffer.wrap(new byte[]{'b'}),
ByteBuffer.wrap(new byte[]{'c'}),
ByteBuffer.wrap(new byte[]{'d'})));
} |
@SuppressWarnings("DataFlowIssue")
public static CommandExecutor newInstance(final MySQLCommandPacketType commandPacketType, final CommandPacket commandPacket, final ConnectionSession connectionSession) throws SQLException {
if (commandPacket instanceof SQLReceivedPacket) {
log.debug("Execute packet type: {}, sql: {}", commandPacketType, ((SQLReceivedPacket) commandPacket).getSQL());
} else {
log.debug("Execute packet type: {}", commandPacketType);
}
switch (commandPacketType) {
case COM_QUIT:
return new MySQLComQuitExecutor();
case COM_INIT_DB:
return new MySQLComInitDbExecutor((MySQLComInitDbPacket) commandPacket, connectionSession);
case COM_FIELD_LIST:
return new MySQLComFieldListPacketExecutor((MySQLComFieldListPacket) commandPacket, connectionSession);
case COM_QUERY:
return new MySQLComQueryPacketExecutor((MySQLComQueryPacket) commandPacket, connectionSession);
case COM_PING:
return new MySQLComPingExecutor(connectionSession);
case COM_STMT_PREPARE:
return new MySQLComStmtPrepareExecutor((MySQLComStmtPreparePacket) commandPacket, connectionSession);
case COM_STMT_EXECUTE:
return new MySQLComStmtExecuteExecutor((MySQLComStmtExecutePacket) commandPacket, connectionSession);
case COM_STMT_SEND_LONG_DATA:
return new MySQLComStmtSendLongDataExecutor((MySQLComStmtSendLongDataPacket) commandPacket, connectionSession);
case COM_STMT_RESET:
return new MySQLComStmtResetExecutor((MySQLComStmtResetPacket) commandPacket, connectionSession);
case COM_STMT_CLOSE:
return new MySQLComStmtCloseExecutor((MySQLComStmtClosePacket) commandPacket, connectionSession);
case COM_SET_OPTION:
return new MySQLComSetOptionExecutor((MySQLComSetOptionPacket) commandPacket, connectionSession);
case COM_RESET_CONNECTION:
return new MySQLComResetConnectionExecutor(connectionSession);
default:
return new MySQLUnsupportedCommandExecutor(commandPacketType);
}
} | @Test
void assertNewInstanceWithComResetConnection() throws SQLException {
assertThat(MySQLCommandExecutorFactory.newInstance(MySQLCommandPacketType.COM_RESET_CONNECTION, mock(MySQLComSetOptionPacket.class), connectionSession),
instanceOf(MySQLComResetConnectionExecutor.class));
} |
@Override
public RENAME3Response rename(XDR xdr, RpcInfo info) {
return rename(xdr, getSecurityHandler(info), info.remoteAddress());
} | @Test(timeout = 60000)
public void testRename() throws Exception {
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
long dirId = status.getFileId();
int namenodeId = Nfs3Utils.getNamenodeId(config);
XDR xdr_req = new XDR();
FileHandle handle = new FileHandle(dirId, namenodeId);
RENAME3Request req = new RENAME3Request(handle, "bar", handle, "fubar");
req.serialize(xdr_req);
// Attempt by an unprivileged user should fail.
RENAME3Response response1 = nfsd.rename(xdr_req.asReadOnlyWrap(),
securityHandlerUnpriviledged,
new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
response1.getStatus());
// Attempt by a privileged user should pass.
RENAME3Response response2 = nfsd.rename(xdr_req.asReadOnlyWrap(),
securityHandler, new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
response2.getStatus());
} |
@Nullable
Integer getHttpTimeout() {
return httpTimeout;
} | @Test
public void testGetHttpTimeout() {
Request request = Request.builder().build();
Assert.assertNull(request.getHttpTimeout());
} |
@Override
public SqlRequest refactor(QueryParamEntity entity, Object... args) {
if (injector == null) {
initInjector();
}
return injector.refactor(entity, args);
} | @Test
void testWith() {
QueryAnalyzerImpl analyzer = new QueryAnalyzerImpl(
database,
"WITH RECURSIVE Tree AS (\n" +
"\n" +
" SELECT id\n" +
" FROM s_test\n" +
" WHERE id = ? \n" +
"\t\n" +
" UNION ALL\n" +
"\t\n" +
" SELECT ai.id\n" +
" FROM s_test AS ai\n" +
" INNER JOIN Tree AS tr ON ai.id = tr.id\n" +
")\n" +
"SELECT t1.id\n" +
"FROM Tree AS t1;");
SqlRequest request = analyzer
.refactor(QueryParamEntity.of().and("id", "eq", "test"), 1);
System.out.println(request);
} |
@Subscribe
public void onChatMessage(ChatMessage event)
{
if (event.getType() == ChatMessageType.GAMEMESSAGE || event.getType() == ChatMessageType.SPAM)
{
String message = Text.removeTags(event.getMessage());
Matcher dodgyCheckMatcher = DODGY_CHECK_PATTERN.matcher(message);
Matcher dodgyProtectMatcher = DODGY_PROTECT_PATTERN.matcher(message);
Matcher dodgyBreakMatcher = DODGY_BREAK_PATTERN.matcher(message);
Matcher bindingNecklaceCheckMatcher = BINDING_CHECK_PATTERN.matcher(message);
Matcher bindingNecklaceUsedMatcher = BINDING_USED_PATTERN.matcher(message);
Matcher ringOfForgingCheckMatcher = RING_OF_FORGING_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryCheckMatcher = AMULET_OF_CHEMISTRY_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryUsedMatcher = AMULET_OF_CHEMISTRY_USED_PATTERN.matcher(message);
Matcher amuletOfChemistryBreakMatcher = AMULET_OF_CHEMISTRY_BREAK_PATTERN.matcher(message);
Matcher amuletOfBountyCheckMatcher = AMULET_OF_BOUNTY_CHECK_PATTERN.matcher(message);
Matcher amuletOfBountyUsedMatcher = AMULET_OF_BOUNTY_USED_PATTERN.matcher(message);
Matcher chronicleAddMatcher = CHRONICLE_ADD_PATTERN.matcher(message);
Matcher chronicleUseAndCheckMatcher = CHRONICLE_USE_AND_CHECK_PATTERN.matcher(message);
Matcher slaughterActivateMatcher = BRACELET_OF_SLAUGHTER_ACTIVATE_PATTERN.matcher(message);
Matcher slaughterCheckMatcher = BRACELET_OF_SLAUGHTER_CHECK_PATTERN.matcher(message);
Matcher expeditiousActivateMatcher = EXPEDITIOUS_BRACELET_ACTIVATE_PATTERN.matcher(message);
Matcher expeditiousCheckMatcher = EXPEDITIOUS_BRACELET_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceCheckMatcher = BLOOD_ESSENCE_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceExtractMatcher = BLOOD_ESSENCE_EXTRACT_PATTERN.matcher(message);
Matcher braceletOfClayCheckMatcher = BRACELET_OF_CLAY_CHECK_PATTERN.matcher(message);
if (message.contains(RING_OF_RECOIL_BREAK_MESSAGE))
{
notifier.notify(config.recoilNotification(), "Your Ring of Recoil has shattered");
}
else if (dodgyBreakMatcher.find())
{
notifier.notify(config.dodgyNotification(), "Your dodgy necklace has crumbled to dust.");
updateDodgyNecklaceCharges(MAX_DODGY_CHARGES);
}
else if (dodgyCheckMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyCheckMatcher.group(1)));
}
else if (dodgyProtectMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyProtectMatcher.group(1)));
}
else if (amuletOfChemistryCheckMatcher.find())
{
updateAmuletOfChemistryCharges(Integer.parseInt(amuletOfChemistryCheckMatcher.group(1)));
}
else if (amuletOfChemistryUsedMatcher.find())
{
final String match = amuletOfChemistryUsedMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateAmuletOfChemistryCharges(charges);
}
else if (amuletOfChemistryBreakMatcher.find())
{
notifier.notify(config.amuletOfChemistryNotification(), "Your amulet of chemistry has crumbled to dust.");
updateAmuletOfChemistryCharges(MAX_AMULET_OF_CHEMISTRY_CHARGES);
}
else if (amuletOfBountyCheckMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyCheckMatcher.group(1)));
}
else if (amuletOfBountyUsedMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyUsedMatcher.group(1)));
}
else if (message.equals(AMULET_OF_BOUNTY_BREAK_TEXT))
{
updateAmuletOfBountyCharges(MAX_AMULET_OF_BOUNTY_CHARGES);
}
else if (message.contains(BINDING_BREAK_TEXT))
{
notifier.notify(config.bindingNotification(), BINDING_BREAK_TEXT);
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateBindingNecklaceCharges(MAX_BINDING_CHARGES + 1);
}
else if (bindingNecklaceUsedMatcher.find())
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
if (equipment.contains(ItemID.BINDING_NECKLACE))
{
updateBindingNecklaceCharges(getItemCharges(ItemChargeConfig.KEY_BINDING_NECKLACE) - 1);
}
}
else if (bindingNecklaceCheckMatcher.find())
{
final String match = bindingNecklaceCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateBindingNecklaceCharges(charges);
}
else if (ringOfForgingCheckMatcher.find())
{
final String match = ringOfForgingCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateRingOfForgingCharges(charges);
}
else if (message.equals(RING_OF_FORGING_USED_TEXT) || message.equals(RING_OF_FORGING_VARROCK_PLATEBODY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player smelted with a Ring of Forging equipped.
if (equipment == null)
{
return;
}
if (equipment.contains(ItemID.RING_OF_FORGING) && (message.equals(RING_OF_FORGING_USED_TEXT) || inventory.count(ItemID.IRON_ORE) > 1))
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_RING_OF_FORGING) - 1, 0, MAX_RING_OF_FORGING_CHARGES);
updateRingOfForgingCharges(charges);
}
}
else if (message.equals(RING_OF_FORGING_BREAK_TEXT))
{
notifier.notify(config.ringOfForgingNotification(), "Your ring of forging has melted.");
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateRingOfForgingCharges(MAX_RING_OF_FORGING_CHARGES + 1);
}
else if (chronicleAddMatcher.find())
{
final String match = chronicleAddMatcher.group(1);
if (match.equals("one"))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(match));
}
}
else if (chronicleUseAndCheckMatcher.find())
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(chronicleUseAndCheckMatcher.group(1)));
}
else if (message.equals(CHRONICLE_ONE_CHARGE_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else if (message.equals(CHRONICLE_EMPTY_TEXT) || message.equals(CHRONICLE_NO_CHARGES_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 0);
}
else if (message.equals(CHRONICLE_FULL_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1000);
}
else if (slaughterActivateMatcher.find())
{
final String found = slaughterActivateMatcher.group(1);
if (found == null)
{
updateBraceletOfSlaughterCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.slaughterNotification(), BRACELET_OF_SLAUGHTER_BREAK_TEXT);
}
else
{
updateBraceletOfSlaughterCharges(Integer.parseInt(found));
}
}
else if (slaughterCheckMatcher.find())
{
updateBraceletOfSlaughterCharges(Integer.parseInt(slaughterCheckMatcher.group(1)));
}
else if (expeditiousActivateMatcher.find())
{
final String found = expeditiousActivateMatcher.group(1);
if (found == null)
{
updateExpeditiousBraceletCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.expeditiousNotification(), EXPEDITIOUS_BRACELET_BREAK_TEXT);
}
else
{
updateExpeditiousBraceletCharges(Integer.parseInt(found));
}
}
else if (expeditiousCheckMatcher.find())
{
updateExpeditiousBraceletCharges(Integer.parseInt(expeditiousCheckMatcher.group(1)));
}
else if (bloodEssenceCheckMatcher.find())
{
updateBloodEssenceCharges(Integer.parseInt(bloodEssenceCheckMatcher.group(1)));
}
else if (bloodEssenceExtractMatcher.find())
{
updateBloodEssenceCharges(getItemCharges(ItemChargeConfig.KEY_BLOOD_ESSENCE) - Integer.parseInt(bloodEssenceExtractMatcher.group(1)));
}
else if (message.contains(BLOOD_ESSENCE_ACTIVATE_TEXT))
{
updateBloodEssenceCharges(MAX_BLOOD_ESSENCE_CHARGES);
}
else if (braceletOfClayCheckMatcher.find())
{
updateBraceletOfClayCharges(Integer.parseInt(braceletOfClayCheckMatcher.group(1)));
}
else if (message.equals(BRACELET_OF_CLAY_USE_TEXT) || message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN))
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player mined with a Bracelet of Clay equipped.
if (equipment != null && equipment.contains(ItemID.BRACELET_OF_CLAY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
// Charge is not used if only 1 inventory slot is available when mining in Prifddinas
boolean ignore = inventory != null
&& inventory.count() == 27
&& message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN);
if (!ignore)
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_BRACELET_OF_CLAY) - 1, 0, MAX_BRACELET_OF_CLAY_CHARGES);
updateBraceletOfClayCharges(charges);
}
}
}
else if (message.equals(BRACELET_OF_CLAY_BREAK_TEXT))
{
notifier.notify(config.braceletOfClayNotification(), "Your bracelet of clay has crumbled to dust");
updateBraceletOfClayCharges(MAX_BRACELET_OF_CLAY_CHARGES);
}
}
} | @Test
public void testBraceletOfClayUseTrahaearn1FreeInvSlot()
{
// Equip bracelet of clay
ItemContainer equipmentItemContainer = mock(ItemContainer.class);
when(client.getItemContainer(InventoryID.EQUIPMENT))
.thenReturn(equipmentItemContainer);
when(equipmentItemContainer.contains(ItemID.BRACELET_OF_CLAY))
.thenReturn(true);
// Set inventory to 1 free slots
ItemContainer inventoryItemContainer = mock(ItemContainer.class);
when(inventoryItemContainer.count())
.thenReturn(27);
when(client.getItemContainer(InventoryID.INVENTORY))
.thenReturn(inventoryItemContainer);
// Verify bracelet of clay charges were not changed
ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.GAMEMESSAGE, "", USED_BRACELET_OF_CLAY_TRAHAEARN, "", 0);
itemChargePlugin.onChatMessage(chatMessage);
verify(configManager, Mockito.times(0)).setRSProfileConfiguration(Mockito.anyString(), Mockito.anyString(), Mockito.anyInt());
} |
public static void mergeMetrics(ClusterMetricsInfo metrics,
ClusterMetricsInfo metricsResponse) {
metrics.setAppsSubmitted(
metrics.getAppsSubmitted() + metricsResponse.getAppsSubmitted());
metrics.setAppsCompleted(
metrics.getAppsCompleted() + metricsResponse.getAppsCompleted());
metrics.setAppsPending(
metrics.getAppsPending() + metricsResponse.getAppsPending());
metrics.setAppsRunning(
metrics.getAppsRunning() + metricsResponse.getAppsRunning());
metrics.setAppsFailed(
metrics.getAppsFailed() + metricsResponse.getAppsFailed());
metrics.setAppsKilled(
metrics.getAppsKilled() + metricsResponse.getAppsKilled());
metrics.setReservedMB(
metrics.getReservedMB() + metricsResponse.getReservedMB());
metrics.setAvailableMB(
metrics.getAvailableMB() + metricsResponse.getAvailableMB());
metrics.setAllocatedMB(
metrics.getAllocatedMB() + metricsResponse.getAllocatedMB());
metrics.setReservedVirtualCores(metrics.getReservedVirtualCores()
+ metricsResponse.getReservedVirtualCores());
metrics.setAvailableVirtualCores(metrics.getAvailableVirtualCores()
+ metricsResponse.getAvailableVirtualCores());
metrics.setAllocatedVirtualCores(metrics.getAllocatedVirtualCores()
+ metricsResponse.getAllocatedVirtualCores());
metrics.setContainersAllocated(metrics.getContainersAllocated()
+ metricsResponse.getContainersAllocated());
metrics.setContainersReserved(metrics.getReservedContainers()
+ metricsResponse.getReservedContainers());
metrics.setContainersPending(metrics.getPendingContainers()
+ metricsResponse.getPendingContainers());
metrics.setTotalMB(metrics.getTotalMB()
+ metricsResponse.getTotalMB());
metrics.setUtilizedMB(metrics.getUtilizedMB()
+ metricsResponse.getUtilizedMB());
metrics.setTotalVirtualCores(metrics.getTotalVirtualCores()
+ metricsResponse.getTotalVirtualCores());
metrics.setTotalNodes(metrics.getTotalNodes()
+ metricsResponse.getTotalNodes());
metrics.setUtilizedVirtualCores(metrics.getUtilizedVirtualCores()
+ metricsResponse.getUtilizedVirtualCores());
metrics.setLostNodes(metrics.getLostNodes()
+ metricsResponse.getLostNodes());
metrics.setUnhealthyNodes(metrics.getUnhealthyNodes()
+ metricsResponse.getUnhealthyNodes());
metrics.setDecommissioningNodes(metrics.getDecommissioningNodes()
+ metricsResponse.getDecommissioningNodes());
metrics.setDecommissionedNodes(metrics.getDecommissionedNodes()
+ metricsResponse.getDecommissionedNodes());
metrics.setRebootedNodes(metrics.getRebootedNodes()
+ metricsResponse.getRebootedNodes());
metrics.setActiveNodes(metrics.getActiveNodes()
+ metricsResponse.getActiveNodes());
metrics.setShutdownNodes(metrics.getShutdownNodes()
+ metricsResponse.getShutdownNodes());
int utilizedVirtualCoresPercent = metrics.getTotalVirtualCores() <= 0 ? 0 :
(int) (metrics.getUtilizedVirtualCores() * 100 / metrics.getTotalVirtualCores());
metrics.setUtilizedVirtualCoresPercent(utilizedVirtualCoresPercent);
int utilizedMBPercent = metrics.getTotalMB() <= 0 ? 0 :
(int) (metrics.getUtilizedMB() * 100 / metrics.getTotalMB());
metrics.setUtilizedMBPercent(utilizedMBPercent);
} | @Test
public void testMergeMetrics() {
ClusterMetricsInfo metrics = new ClusterMetricsInfo();
ClusterMetricsInfo metricsResponse = new ClusterMetricsInfo();
long seed = System.currentTimeMillis();
setUpClusterMetrics(metrics, seed);
// ensure that we don't reuse the same seed when setting up metricsResponse
// or it might mask bugs
seed += 1000000000;
setUpClusterMetrics(metricsResponse, seed);
ClusterMetricsInfo metricsClone = createClusterMetricsClone(metrics);
RouterWebServiceUtil.mergeMetrics(metrics, metricsResponse);
Assert.assertEquals(
metricsResponse.getAppsSubmitted() + metricsClone.getAppsSubmitted(),
metrics.getAppsSubmitted());
Assert.assertEquals(
metricsResponse.getAppsCompleted() + metricsClone.getAppsCompleted(),
metrics.getAppsCompleted());
Assert.assertEquals(
metricsResponse.getAppsPending() + metricsClone.getAppsPending(),
metrics.getAppsPending());
Assert.assertEquals(
metricsResponse.getAppsRunning() + metricsClone.getAppsRunning(),
metrics.getAppsRunning());
Assert.assertEquals(
metricsResponse.getAppsFailed() + metricsClone.getAppsFailed(),
metrics.getAppsFailed());
Assert.assertEquals(
metricsResponse.getAppsKilled() + metricsClone.getAppsKilled(),
metrics.getAppsKilled());
Assert.assertEquals(
metricsResponse.getReservedMB() + metricsClone.getReservedMB(),
metrics.getReservedMB());
Assert.assertEquals(
metricsResponse.getAvailableMB() + metricsClone.getAvailableMB(),
metrics.getAvailableMB());
Assert.assertEquals(
metricsResponse.getAllocatedMB() + metricsClone.getAllocatedMB(),
metrics.getAllocatedMB());
Assert.assertEquals(
metricsResponse.getReservedVirtualCores()
+ metricsClone.getReservedVirtualCores(),
metrics.getReservedVirtualCores());
Assert.assertEquals(
metricsResponse.getAvailableVirtualCores()
+ metricsClone.getAvailableVirtualCores(),
metrics.getAvailableVirtualCores());
Assert.assertEquals(
metricsResponse.getAllocatedVirtualCores()
+ metricsClone.getAllocatedVirtualCores(),
metrics.getAllocatedVirtualCores());
Assert.assertEquals(
metricsResponse.getContainersAllocated()
+ metricsClone.getContainersAllocated(),
metrics.getContainersAllocated());
Assert.assertEquals(
metricsResponse.getReservedContainers()
+ metricsClone.getReservedContainers(),
metrics.getReservedContainers());
Assert.assertEquals(
metricsResponse.getPendingContainers()
+ metricsClone.getPendingContainers(),
metrics.getPendingContainers());
Assert.assertEquals(
metricsResponse.getTotalMB() + metricsClone.getTotalMB(),
metrics.getTotalMB());
Assert.assertEquals(
metricsResponse.getUtilizedMB() + metricsClone.getUtilizedMB(),
metrics.getUtilizedMB());
Assert.assertEquals(
metricsResponse.getTotalVirtualCores()
+ metricsClone.getTotalVirtualCores(),
metrics.getTotalVirtualCores());
Assert.assertEquals(
metricsResponse.getUtilizedVirtualCores() + metricsClone.getUtilizedVirtualCores(),
metrics.getUtilizedVirtualCores());
Assert.assertEquals(
metricsResponse.getTotalNodes() + metricsClone.getTotalNodes(),
metrics.getTotalNodes());
Assert.assertEquals(
metricsResponse.getLostNodes() + metricsClone.getLostNodes(),
metrics.getLostNodes());
Assert.assertEquals(
metricsResponse.getUnhealthyNodes() + metricsClone.getUnhealthyNodes(),
metrics.getUnhealthyNodes());
Assert.assertEquals(
metricsResponse.getDecommissioningNodes()
+ metricsClone.getDecommissioningNodes(),
metrics.getDecommissioningNodes());
Assert.assertEquals(
metricsResponse.getDecommissionedNodes()
+ metricsClone.getDecommissionedNodes(),
metrics.getDecommissionedNodes());
Assert.assertEquals(
metricsResponse.getRebootedNodes() + metricsClone.getRebootedNodes(),
metrics.getRebootedNodes());
Assert.assertEquals(
metricsResponse.getActiveNodes() + metricsClone.getActiveNodes(),
metrics.getActiveNodes());
Assert.assertEquals(
metricsResponse.getShutdownNodes() + metricsClone.getShutdownNodes(),
metrics.getShutdownNodes());
} |
public void notify(PluginJarChangeListener listener, Collection<BundleOrPluginFileDetails> knowPluginFiles, Collection<BundleOrPluginFileDetails> currentPluginFiles) {
List<BundleOrPluginFileDetails> oldPlugins = new ArrayList<>(knowPluginFiles);
subtract(oldPlugins, currentPluginFiles).forEach(listener::pluginJarRemoved);
currentPluginFiles.forEach(newPlugin -> {
int index = oldPlugins.indexOf(newPlugin);
if (index < 0) {
listener.pluginJarAdded(newPlugin);
} else if (newPlugin.doesTimeStampDiffer(oldPlugins.get(index))) {
listener.pluginJarUpdated(newPlugin);
}
});
} | @Test
void shouldNotifyWhenPluginIsRemoved() {
final PluginJarChangeListener listener = mock(PluginJarChangeListener.class);
BundleOrPluginFileDetails pluginOne = mock(BundleOrPluginFileDetails.class);
BundleOrPluginFileDetails pluginTwo = mock(BundleOrPluginFileDetails.class);
BundleOrPluginFileDetails pluginThree = mock(BundleOrPluginFileDetails.class);
List<BundleOrPluginFileDetails> knownPlugins = List.of(pluginOne, pluginTwo, pluginThree);
List<BundleOrPluginFileDetails> newPlugins = List.of(pluginOne, pluginTwo);
pluginChangeNotifier.notify(listener, knownPlugins, newPlugins);
verify(listener).pluginJarRemoved(pluginThree);
verify(listener, never()).pluginJarAdded(any());
verify(listener, never()).pluginJarUpdated(any());
} |
public static String readFile(String path, String fileName) {
File file = openFile(path, fileName);
if (file.exists()) {
return readFile(file);
}
return null;
} | @Test
void testReadFileWithPath() {
assertNotNull(DiskUtils.readFile(testFile.getParent(), testFile.getName()));
} |
public static IntrinsicMapTaskExecutor withSharedCounterSet(
List<Operation> operations,
CounterSet counters,
ExecutionStateTracker executionStateTracker) {
return new IntrinsicMapTaskExecutor(operations, counters, executionStateTracker);
} | @Test
public void testNoReadOperation() throws Exception {
// Test MapTaskExecutor without ReadOperation.
List<Operation> operations =
Arrays.<Operation>asList(createOperation("o1", 1), createOperation("o2", 2));
ExecutionStateTracker stateTracker = ExecutionStateTracker.newForTest();
try (IntrinsicMapTaskExecutor executor =
IntrinsicMapTaskExecutor.withSharedCounterSet(operations, counterSet, stateTracker)) {
thrown.expect(IllegalStateException.class);
thrown.expectMessage("is not a ReadOperation");
executor.getReadOperation();
}
} |
@Override
public Optional<QueryId> chooseQueryToKill(List<QueryMemoryInfo> runningQueries, List<MemoryInfo> nodes)
{
QueryId biggestQuery = null;
long maxMemory = 0;
for (QueryMemoryInfo query : runningQueries) {
long bytesUsed = query.getMemoryReservation();
if (bytesUsed > maxMemory && GENERAL_POOL.equals(query.getMemoryPoolId())) {
biggestQuery = query.getQueryId();
maxMemory = bytesUsed;
}
}
return Optional.ofNullable(biggestQuery);
} | @Test
public void testGeneralPoolHasNoReservation()
{
int reservePool = 10;
int generalPool = 12;
Map<String, Map<String, Long>> queries = ImmutableMap.<String, Map<String, Long>>builder()
.put("q_1", ImmutableMap.of("n1", 0L, "n2", 0L, "n3", 0L, "n4", 0L, "n5", 0L))
.put("q_r", ImmutableMap.of("n1", 6L, "n2", 6L, "n3", 6L, "n4", 6L, "n5", 6L))
.build();
assertEquals(
lowMemoryKiller.chooseQueryToKill(
toQueryMemoryInfoList("q_r", queries),
toNodeMemoryInfoList(reservePool, generalPool, "q_r", queries)),
Optional.empty());
} |
@Override
public SchemaAndValue toConnectData(String topic, byte[] value) {
JsonNode jsonValue;
// This handles a tombstone message
if (value == null) {
return SchemaAndValue.NULL;
}
try {
jsonValue = deserializer.deserialize(topic, value);
} catch (SerializationException e) {
throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e);
}
if (config.schemasEnabled() && (!jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME) || !jsonValue.has(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME)))
throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." +
" If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration.");
// The deserialized data should either be an envelope object containing the schema and the payload or the schema
// was stripped during serialization and we need to fill in an all-encompassing schema.
if (!config.schemasEnabled()) {
ObjectNode envelope = JSON_NODE_FACTORY.objectNode();
envelope.set(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME, null);
envelope.set(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME, jsonValue);
jsonValue = envelope;
}
Schema schema = asConnectSchema(jsonValue.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME));
return new SchemaAndValue(
schema,
convertToConnect(schema, jsonValue.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME), config)
);
} | @Test
public void bytesToConnect() {
ByteBuffer reference = ByteBuffer.wrap(Utils.utf8("test-string"));
String msg = "{ \"schema\": { \"type\": \"bytes\" }, \"payload\": \"dGVzdC1zdHJpbmc=\" }";
SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes());
ByteBuffer converted = ByteBuffer.wrap((byte[]) schemaAndValue.value());
assertEquals(reference, converted);
} |
@Override
public UpdateSchema requireColumn(String name) {
internalUpdateColumnRequirement(name, false);
return this;
} | @Test
public void testRequireColumn() {
Schema schema = new Schema(optional(1, "id", Types.IntegerType.get()));
Schema expected = new Schema(required(1, "id", Types.IntegerType.get()));
assertThatThrownBy(() -> new SchemaUpdate(schema, 1).requireColumn("id"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot change column nullability: id: optional -> required");
// required to required is not an incompatible change
new SchemaUpdate(expected, 1).requireColumn("id").apply();
Schema result =
new SchemaUpdate(schema, 1).allowIncompatibleChanges().requireColumn("id").apply();
assertThat(result.asStruct()).isEqualTo(expected.asStruct());
} |
public RegistryBuilder subscribe(Boolean subscribe) {
this.subscribe = subscribe;
return getThis();
} | @Test
void subscribe() {
RegistryBuilder builder = new RegistryBuilder();
builder.subscribe(true);
Assertions.assertTrue(builder.build().isSubscribe());
} |
List<ParsedTerm> identifyUnknownFields(final Set<String> availableFields, final List<ParsedTerm> terms) {
final Map<String, List<ParsedTerm>> groupedByField = terms.stream()
.filter(t -> !t.isDefaultField())
.filter(term -> !SEARCHABLE_ES_FIELDS.contains(term.getRealFieldName()))
.filter(term -> !RESERVED_SETTABLE_FIELDS.contains(term.getRealFieldName()))
.filter(term -> !availableFields.contains(term.getRealFieldName()))
.distinct()
.collect(Collectors.groupingBy(ParsedTerm::getRealFieldName));
return unknownFieldsListLimiter.filterElementsContainingUsefulInformation(groupedByField);
} | @Test
void testDoesNotIdentifyDefaultFieldAsUnknown() {
final List<ParsedTerm> unknownFields = toTest.identifyUnknownFields(
Set.of("some_normal_field"),
List.of(ParsedTerm.create(ParsedTerm.DEFAULT_FIELD, "Haba, haba, haba!"))
);
assertTrue(unknownFields.isEmpty());
} |
@Bean
public BulkheadRegistry bulkheadRegistry(
BulkheadConfigurationProperties bulkheadConfigurationProperties,
EventConsumerRegistry<BulkheadEvent> bulkheadEventConsumerRegistry,
RegistryEventConsumer<Bulkhead> bulkheadRegistryEventConsumer,
@Qualifier("compositeBulkheadCustomizer") CompositeCustomizer<BulkheadConfigCustomizer> compositeBulkheadCustomizer) {
BulkheadRegistry bulkheadRegistry = createBulkheadRegistry(bulkheadConfigurationProperties,
bulkheadRegistryEventConsumer, compositeBulkheadCustomizer);
registerEventConsumer(bulkheadRegistry, bulkheadEventConsumerRegistry,
bulkheadConfigurationProperties);
initBulkheadRegistry(bulkheadConfigurationProperties, compositeBulkheadCustomizer, bulkheadRegistry);
return bulkheadRegistry;
} | @Test
public void testCreateBulkHeadRegistryWithSharedConfigs() {
//Given
io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties defaultProperties = new io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties();
defaultProperties.setMaxConcurrentCalls(3);
defaultProperties.setMaxWaitDuration(Duration.ofMillis(50L));
assertThat(defaultProperties.getEventConsumerBufferSize()).isNull();
io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties sharedProperties = new io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties();
sharedProperties.setMaxConcurrentCalls(2);
sharedProperties.setMaxWaitDuration(Duration.ofMillis(100L));
assertThat(sharedProperties.getEventConsumerBufferSize()).isNull();
io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties backendWithDefaultConfig = new io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties();
backendWithDefaultConfig.setBaseConfig("default");
backendWithDefaultConfig.setMaxWaitDuration(Duration.ofMillis(200L));
assertThat(backendWithDefaultConfig.getEventConsumerBufferSize()).isNull();
io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties backendWithSharedConfig = new io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties();
backendWithSharedConfig.setBaseConfig("sharedConfig");
backendWithSharedConfig.setMaxWaitDuration(Duration.ofMillis(300L));
assertThat(backendWithSharedConfig.getEventConsumerBufferSize()).isNull();
BulkheadConfigurationProperties bulkheadConfigurationProperties = new BulkheadConfigurationProperties();
bulkheadConfigurationProperties.getConfigs().put("default", defaultProperties);
bulkheadConfigurationProperties.getConfigs().put("sharedConfig", sharedProperties);
bulkheadConfigurationProperties.getInstances()
.put("backendWithDefaultConfig", backendWithDefaultConfig);
bulkheadConfigurationProperties.getInstances()
.put("backendWithSharedConfig", backendWithSharedConfig);
BulkheadConfiguration bulkheadConfiguration = new BulkheadConfiguration();
DefaultEventConsumerRegistry<BulkheadEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>();
//When
BulkheadRegistry bulkheadRegistry = bulkheadConfiguration
.bulkheadRegistry(bulkheadConfigurationProperties, eventConsumerRegistry,
new CompositeRegistryEventConsumer<>(emptyList()),
new CompositeCustomizer<>(Collections.emptyList()));
//Then
assertThat(bulkheadRegistry.getAllBulkheads().size()).isEqualTo(2);
// Should get default config and overwrite max calls and wait time
Bulkhead bulkhead1 = bulkheadRegistry.bulkhead("backendWithDefaultConfig");
assertThat(bulkhead1).isNotNull();
assertThat(bulkhead1.getBulkheadConfig().getMaxConcurrentCalls()).isEqualTo(3);
assertThat(bulkhead1.getBulkheadConfig().getMaxWaitDuration().toMillis()).isEqualTo(200L);
// Should get shared config and overwrite wait time
Bulkhead bulkhead2 = bulkheadRegistry.bulkhead("backendWithSharedConfig");
assertThat(bulkhead2).isNotNull();
assertThat(bulkhead2.getBulkheadConfig().getMaxConcurrentCalls()).isEqualTo(2);
assertThat(bulkhead2.getBulkheadConfig().getMaxWaitDuration().toMillis()).isEqualTo(300L);
// Unknown backend should get default config of Registry
Bulkhead bulkhead3 = bulkheadRegistry.bulkhead("unknownBackend");
assertThat(bulkhead3).isNotNull();
assertThat(bulkhead3.getBulkheadConfig().getMaxWaitDuration().toMillis()).isEqualTo(50L);
assertThat(eventConsumerRegistry.getAllEventConsumer()).hasSize(3);
} |
public byte[] verifyMessage(ContentInfo signedMessage, Date date, String oid) {
return encapsulatedData(verify(signedMessage, date), oid);
} | @Test
public void verifyValidCmsWithOid() throws Exception {
final ContentInfo signedMessage = ContentInfo.getInstance(fixture());
final byte[] data = new CmsVerifier(new CertificateVerifier.None()).verifyMessage(
signedMessage, LdsSecurityObject.OID
);
assertEquals("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", Hex.toHexString(
DigestUtils.digest("SHA1").digest(data)
));
} |
public V get(K key) {
V value = getNullable(key);
if (value == null) {
throw new IllegalStateException("No cache entry found for key: " + key);
}
return value;
} | @Test
public void get_throws_exception_if_not_exists() {
when(loader.load("foo")).thenReturn("bar");
assertThat(cache.get("foo")).isEqualTo("bar");
assertThat(cache.get("foo")).isEqualTo("bar");
verify(loader, times(1)).load("foo");
assertThatThrownBy(() -> cache.get("not_exists"))
.isInstanceOf(IllegalStateException.class)
.hasMessage("No cache entry found for key: not_exists");
} |
public void setAuthenticationTokenFactory(AuthenticationTokenFactory authenticationTokenFactory) {
this.authenticationTokenFactory = authenticationTokenFactory;
} | @Test
public void testSetAuthenticationTokenFactory() {
AuthenticationTokenFactory factory = new AuthenticationTokenFactory() {
@Override
public AuthenticationToken getAuthenticationToken(SubjectConnectionReference ref) throws Exception {
return null;
}
};
filter.setAuthenticationTokenFactory(factory);
assertSame(factory, filter.getAuthenticationTokenFactory());
} |
public void setField(String name, String value) {
validateField(name, value);
objectMap.put(name, new ConfigPayloadBuilder(value));
} | @Test
public void require_that_simple_fields_can_be_overwritten() {
ConfigPayloadBuilder builder = new ConfigPayloadBuilder();
builder.setField("foo", "bar");
builder.setField("foo", "baz");
Cursor root = createSlime(builder);
// XXX: Not sure if this is the _right_ behavior.
assertEquals("baz", root.field("foo").asString());
} |
@Override
public ConsumerBuilder<T> loadConf(Map<String, Object> config) {
this.conf = ConfigurationDataUtils.loadData(config, conf, ConsumerConfigurationData.class);
return this;
} | @Test
public void testLoadConf() throws Exception {
ConsumerBuilderImpl<byte[]> consumerBuilder = createConsumerBuilder();
String jsonConf = ("{\n"
+ " 'topicNames' : [ 'new-topic' ],\n"
+ " 'topicsPattern' : 'new-topics-pattern',\n"
+ " 'subscriptionName' : 'new-subscription',\n"
+ " 'subscriptionType' : 'Key_Shared',\n"
+ " 'subscriptionProperties' : {\n"
+ " 'new-sub-prop' : 'new-sub-prop-value'\n"
+ " },\n"
+ " 'subscriptionMode' : 'NonDurable',\n"
+ " 'receiverQueueSize' : 2,\n"
+ " 'acknowledgementsGroupTimeMicros' : 2,\n"
+ " 'maxAcknowledgmentGroupSize' : 2,\n"
+ " 'negativeAckRedeliveryDelayMicros' : 2,\n"
+ " 'maxTotalReceiverQueueSizeAcrossPartitions' : 2,\n"
+ " 'consumerName' : 'new-consumer',\n"
+ " 'ackTimeoutMillis' : 2,\n"
+ " 'tickDurationMillis' : 2,\n"
+ " 'priorityLevel' : 2,\n"
+ " 'maxPendingChunkedMessage' : 2,\n"
+ " 'autoAckOldestChunkedMessageOnQueueFull' : true,\n"
+ " 'expireTimeOfIncompleteChunkedMessageMillis' : 2,\n"
+ " 'cryptoFailureAction' : 'DISCARD',\n"
+ " 'properties' : {\n"
+ " 'new-prop' : 'new-prop-value'\n"
+ " },\n"
+ " 'readCompacted' : true,\n"
+ " 'subscriptionInitialPosition' : 'Earliest',\n"
+ " 'patternAutoDiscoveryPeriod' : 2,\n"
+ " 'regexSubscriptionMode' : 'AllTopics',\n"
+ " 'deadLetterPolicy' : {\n"
+ " 'retryLetterTopic' : 'new-retry',\n"
+ " 'initialSubscriptionName' : 'new-dlq-sub',\n"
+ " 'deadLetterTopic' : 'new-dlq',\n"
+ " 'maxRedeliverCount' : 2\n"
+ " },\n"
+ " 'retryEnable' : true,\n"
+ " 'autoUpdatePartitions' : false,\n"
+ " 'autoUpdatePartitionsIntervalSeconds' : 2,\n"
+ " 'replicateSubscriptionState' : true,\n"
+ " 'resetIncludeHead' : true,\n"
+ " 'batchIndexAckEnabled' : true,\n"
+ " 'ackReceiptEnabled' : true,\n"
+ " 'poolMessages' : true,\n"
+ " 'startPaused' : true,\n"
+ " 'autoScaledReceiverQueueSizeEnabled' : true\n"
+ " }").replace("'", "\"");
Map<String, Object> conf = new ObjectMapper().readValue(jsonConf, new TypeReference<HashMap<String,Object>>() {});
MessageListener<byte[]> messageListener = (consumer, message) -> {};
conf.put("messageListener", messageListener);
ConsumerEventListener consumerEventListener = createMockConsumerEventListener();
conf.put("consumerEventListener", consumerEventListener);
RedeliveryBackoff negativeAckRedeliveryBackoff = MultiplierRedeliveryBackoff.builder().build();
conf.put("negativeAckRedeliveryBackoff", negativeAckRedeliveryBackoff);
RedeliveryBackoff ackTimeoutRedeliveryBackoff = MultiplierRedeliveryBackoff.builder().build();;
conf.put("ackTimeoutRedeliveryBackoff", ackTimeoutRedeliveryBackoff);
CryptoKeyReader cryptoKeyReader = DefaultCryptoKeyReader.builder().build();
conf.put("cryptoKeyReader", cryptoKeyReader);
MessageCrypto messageCrypto = new MessageCryptoBc("ctx2", true);
conf.put("messageCrypto", messageCrypto);
BatchReceivePolicy batchReceivePolicy = BatchReceivePolicy.builder().maxNumBytes(2).build();
conf.put("batchReceivePolicy", batchReceivePolicy);
KeySharedPolicy keySharedPolicy = KeySharedPolicy.stickyHashRange();
conf.put("keySharedPolicy", keySharedPolicy);
MessagePayloadProcessor payloadProcessor = createMockMessagePayloadProcessor();
conf.put("payloadProcessor", payloadProcessor);
consumerBuilder.loadConf(conf);
ConsumerConfigurationData<byte[]> configurationData = consumerBuilder.getConf();
assertEquals(configurationData.getTopicNames(), new HashSet<>(Collections.singletonList("new-topic")));
assertEquals(configurationData.getTopicsPattern().pattern(), "new-topics-pattern");
assertEquals(configurationData.getSubscriptionName(), "new-subscription");
assertEquals(configurationData.getSubscriptionType(), SubscriptionType.Key_Shared);
assertThat(configurationData.getSubscriptionProperties()).hasSize(1)
.hasFieldOrPropertyWithValue("new-sub-prop", "new-sub-prop-value");
assertEquals(configurationData.getSubscriptionMode(), SubscriptionMode.NonDurable);
assertEquals(configurationData.getReceiverQueueSize(), 2);
assertEquals(configurationData.getAcknowledgementsGroupTimeMicros(), 2);
assertEquals(configurationData.getMaxAcknowledgmentGroupSize(), 2);
assertEquals(configurationData.getNegativeAckRedeliveryDelayMicros(), 2);
assertEquals(configurationData.getMaxTotalReceiverQueueSizeAcrossPartitions(), 2);
assertEquals(configurationData.getConsumerName(), "new-consumer");
assertEquals(configurationData.getAckTimeoutMillis(), 2);
assertEquals(configurationData.getTickDurationMillis(), 2);
assertEquals(configurationData.getPriorityLevel(), 2);
assertEquals(configurationData.getMaxPendingChunkedMessage(), 2);
assertTrue(configurationData.isAutoAckOldestChunkedMessageOnQueueFull());
assertEquals(configurationData.getExpireTimeOfIncompleteChunkedMessageMillis(), 2);
assertEquals(configurationData.getCryptoFailureAction(), ConsumerCryptoFailureAction.DISCARD);
assertThat(configurationData.getProperties()).hasSize(1)
.hasFieldOrPropertyWithValue("new-prop", "new-prop-value");
assertTrue(configurationData.isReadCompacted());
assertEquals(configurationData.getSubscriptionInitialPosition(), SubscriptionInitialPosition.Earliest);
assertEquals(configurationData.getPatternAutoDiscoveryPeriod(), 2);
assertEquals(configurationData.getRegexSubscriptionMode(), RegexSubscriptionMode.AllTopics);
assertEquals(configurationData.getDeadLetterPolicy().getDeadLetterTopic(), "new-dlq");
assertEquals(configurationData.getDeadLetterPolicy().getRetryLetterTopic(), "new-retry");
assertEquals(configurationData.getDeadLetterPolicy().getInitialSubscriptionName(), "new-dlq-sub");
assertEquals(configurationData.getDeadLetterPolicy().getMaxRedeliverCount(), 2);
assertTrue(configurationData.isRetryEnable());
assertFalse(configurationData.isAutoUpdatePartitions());
assertEquals(configurationData.getAutoUpdatePartitionsIntervalSeconds(), 2);
assertTrue(configurationData.isReplicateSubscriptionState());
assertTrue(configurationData.isResetIncludeHead());
assertTrue(configurationData.isBatchIndexAckEnabled());
assertTrue(configurationData.isAckReceiptEnabled());
assertTrue(configurationData.isPoolMessages());
assertTrue(configurationData.isStartPaused());
assertTrue(configurationData.isAutoScaledReceiverQueueSizeEnabled());
assertNull(configurationData.getMessageListener());
assertNull(configurationData.getConsumerEventListener());
assertNull(configurationData.getNegativeAckRedeliveryBackoff());
assertNull(configurationData.getAckTimeoutRedeliveryBackoff());
assertNull(configurationData.getMessageListener());
assertNull(configurationData.getMessageCrypto());
assertNull(configurationData.getCryptoKeyReader());
assertNull(configurationData.getBatchReceivePolicy());
assertNull(configurationData.getKeySharedPolicy());
assertNull(configurationData.getPayloadProcessor());
} |
public CompletableFuture<Void> deleteBackups(final UUID accountUuid) {
final ExternalServiceCredentials credentials = secureValueRecoveryCredentialsGenerator.generateForUuid(accountUuid);
final HttpRequest request = HttpRequest.newBuilder()
.uri(deleteUri)
.DELETE()
.header(HttpHeaders.AUTHORIZATION, basicAuthHeader(credentials))
.build();
return httpClient.sendAsync(request, HttpResponse.BodyHandlers.ofString()).thenApply(response -> {
if (HttpUtils.isSuccessfulResponse(response.statusCode())) {
return null;
}
throw new SecureValueRecoveryException("Failed to delete backup: " + response.statusCode());
});
} | @Test
void deleteStoredData() {
final String username = RandomStringUtils.randomAlphabetic(16);
final String password = RandomStringUtils.randomAlphanumeric(32);
when(credentialsGenerator.generateForUuid(accountUuid)).thenReturn(
new ExternalServiceCredentials(username, password));
wireMock.stubFor(delete(urlEqualTo(SecureValueRecovery2Client.DELETE_PATH))
.withBasicAuth(username, password)
.willReturn(aResponse().withStatus(202)));
assertDoesNotThrow(() -> secureValueRecovery2Client.deleteBackups(accountUuid).join());
} |
public static Date parse(String date, ParsePosition pos) throws ParseException {
Exception fail = null;
try {
int offset = pos.getIndex();
// extract year
int year = parseInt(date, offset, offset += 4);
if (checkOffset(date, offset, '-')) {
offset += 1;
}
// extract month
int month = parseInt(date, offset, offset += 2);
if (checkOffset(date, offset, '-')) {
offset += 1;
}
// extract day
int day = parseInt(date, offset, offset += 2);
// default time value
int hour = 0;
int minutes = 0;
int seconds = 0;
// always use 0 otherwise returned date will include millis of current time
int milliseconds = 0;
// if the value has no time component (and no time zone), we are done
boolean hasT = checkOffset(date, offset, 'T');
if (!hasT && (date.length() <= offset)) {
Calendar calendar = new GregorianCalendar(year, month - 1, day);
calendar.setLenient(false);
pos.setIndex(offset);
return calendar.getTime();
}
if (hasT) {
// extract hours, minutes, seconds and milliseconds
hour = parseInt(date, offset += 1, offset += 2);
if (checkOffset(date, offset, ':')) {
offset += 1;
}
minutes = parseInt(date, offset, offset += 2);
if (checkOffset(date, offset, ':')) {
offset += 1;
}
// second and milliseconds can be optional
if (date.length() > offset) {
char c = date.charAt(offset);
if (c != 'Z' && c != '+' && c != '-') {
seconds = parseInt(date, offset, offset += 2);
if (seconds > 59 && seconds < 63) {
seconds = 59; // truncate up to 3 leap seconds
}
// milliseconds can be optional in the format
if (checkOffset(date, offset, '.')) {
offset += 1;
int endOffset = indexOfNonDigit(date, offset + 1); // assume at least one digit
int parseEndOffset = Math.min(endOffset, offset + 3); // parse up to 3 digits
int fraction = parseInt(date, offset, parseEndOffset);
// compensate for "missing" digits
switch (parseEndOffset - offset) { // number of digits parsed
case 2:
milliseconds = fraction * 10;
break;
case 1:
milliseconds = fraction * 100;
break;
default:
milliseconds = fraction;
}
offset = endOffset;
}
}
}
}
// extract timezone
if (date.length() <= offset) {
throw new IllegalArgumentException("No time zone indicator");
}
TimeZone timezone = null;
char timezoneIndicator = date.charAt(offset);
if (timezoneIndicator == 'Z') {
timezone = TIMEZONE_UTC;
offset += 1;
} else if (timezoneIndicator == '+' || timezoneIndicator == '-') {
String timezoneOffset = date.substring(offset);
// When timezone has no minutes, we should append it, valid timezones are, for example:
// +00:00, +0000 and +00
timezoneOffset = timezoneOffset.length() >= 5 ? timezoneOffset : timezoneOffset + "00";
offset += timezoneOffset.length();
// 18-Jun-2015, tatu: Minor simplification, skip offset of "+0000"/"+00:00"
if (timezoneOffset.equals("+0000") || timezoneOffset.equals("+00:00")) {
timezone = TIMEZONE_UTC;
} else {
// 18-Jun-2015, tatu: Looks like offsets only work from GMT, not UTC...
// not sure why, but that's the way it looks. Further, Javadocs for
// `java.util.TimeZone` specifically instruct use of GMT as base for
// custom timezones... odd.
String timezoneId = "GMT" + timezoneOffset;
// String timezoneId = "UTC" + timezoneOffset;
timezone = TimeZone.getTimeZone(timezoneId);
String act = timezone.getID();
if (!act.equals(timezoneId)) {
/* 22-Jan-2015, tatu: Looks like canonical version has colons, but we may be given
* one without. If so, don't sweat.
* Yes, very inefficient. Hopefully not hit often.
* If it becomes a perf problem, add 'loose' comparison instead.
*/
String cleaned = act.replace(":", "");
if (!cleaned.equals(timezoneId)) {
throw new IndexOutOfBoundsException(
"Mismatching time zone indicator: "
+ timezoneId
+ " given, resolves to "
+ timezone.getID());
}
}
}
} else {
throw new IndexOutOfBoundsException(
"Invalid time zone indicator '" + timezoneIndicator + "'");
}
Calendar calendar = new GregorianCalendar(timezone);
calendar.setLenient(false);
calendar.set(Calendar.YEAR, year);
calendar.set(Calendar.MONTH, month - 1);
calendar.set(Calendar.DAY_OF_MONTH, day);
calendar.set(Calendar.HOUR_OF_DAY, hour);
calendar.set(Calendar.MINUTE, minutes);
calendar.set(Calendar.SECOND, seconds);
calendar.set(Calendar.MILLISECOND, milliseconds);
pos.setIndex(offset);
return calendar.getTime();
// If we get a ParseException it'll already have the right message/offset.
// Other exception types can convert here.
} catch (IndexOutOfBoundsException | IllegalArgumentException e) {
fail = e;
}
String input = (date == null) ? null : ('"' + date + '"');
String msg = fail.getMessage();
if (msg == null || msg.isEmpty()) {
msg = "(" + fail.getClass().getName() + ")";
}
ParseException ex =
new ParseException("Failed to parse date [" + input + "]: " + msg, pos.getIndex());
ex.initCause(fail);
throw ex;
} | @Test
@SuppressWarnings("UndefinedEquals")
public void testDateParseWithTimezone() throws ParseException {
String dateStr = "2018-06-25T00:00:00-03:00";
Date date = ISO8601Utils.parse(dateStr, new ParsePosition(0));
GregorianCalendar calendar = createUtcCalendar();
calendar.set(2018, Calendar.JUNE, 25, 3, 0);
Date expectedDate = calendar.getTime();
assertThat(date).isEqualTo(expectedDate);
} |
@Override
public List<Document> get() {
try (var input = markdownResource.getInputStream()) {
Node node = parser.parseReader(new InputStreamReader(input));
DocumentVisitor documentVisitor = new DocumentVisitor(config);
node.accept(documentVisitor);
return documentVisitor.getDocuments();
}
catch (IOException e) {
throw new RuntimeException(e);
}
} | @Test
void testWithFormatting() {
MarkdownDocumentReader reader = new MarkdownDocumentReader("classpath:/with-formatting.md");
List<Document> documents = reader.get();
assertThat(documents).hasSize(2)
.extracting(Document::getMetadata, Document::getContent)
.containsOnly(tuple(Map.of("category", "header_1", "title", "This is a fancy header name"),
"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec tincidunt velit non bibendum gravida. Cras accumsan tincidunt ornare. Donec hendrerit consequat tellus blandit accumsan. Aenean aliquam metus at arcu elementum dignissim."),
tuple(Map.of("category", "header_3", "title", "Header 3"),
"Aenean eu leo eu nibh tristique posuere quis quis massa."));
} |
public static boolean needsQuotes(final String identifier) {
return !(isValid(identifier) && upperCase(identifier));
} | @Test
public void shouldNotNeedBackQuotes() {
// Given:
final String[] identifiers = new String[]{
"FOO", // nothing special
"TABLES", // in vocabulary but non-reserved
"`SELECT`" // already has back quotes
};
// Then:
for (final String identifier : identifiers) {
assertThat("Expected no quotes for " + identifier, !IdentifierUtil.needsQuotes(identifier));
}
} |
public QueryCacheConfig setBatchSize(int batchSize) {
this.batchSize = checkPositive("batchSize", batchSize);
return this;
} | @Test(expected = IllegalArgumentException.class)
public void testSetBatchSize_throwsException_whenNotPositive() {
QueryCacheConfig config = new QueryCacheConfig();
config.setBatchSize(-1);
} |
@Override
public void countDown() {
get(countDownAsync());
} | @Test
public void testCountDown() throws InterruptedException {
RCountDownLatch latch = redisson.getCountDownLatch("latch");
latch.trySetCount(2);
Assertions.assertEquals(2, latch.getCount());
latch.countDown();
Assertions.assertEquals(1, latch.getCount());
latch.countDown();
Assertions.assertEquals(0, latch.getCount());
latch.await();
latch.countDown();
Assertions.assertEquals(0, latch.getCount());
latch.await();
latch.countDown();
Assertions.assertEquals(0, latch.getCount());
latch.await();
RCountDownLatch latch1 = redisson.getCountDownLatch("latch1");
latch1.trySetCount(1);
latch1.countDown();
Assertions.assertEquals(0, latch.getCount());
latch1.countDown();
Assertions.assertEquals(0, latch.getCount());
latch1.await();
RCountDownLatch latch2 = redisson.getCountDownLatch("latch2");
latch2.trySetCount(1);
latch2.countDown();
latch2.await();
latch2.await();
RCountDownLatch latch3 = redisson.getCountDownLatch("latch3");
Assertions.assertEquals(0, latch.getCount());
latch3.await();
RCountDownLatch latch4 = redisson.getCountDownLatch("latch4");
Assertions.assertEquals(0, latch.getCount());
latch4.countDown();
Assertions.assertEquals(0, latch.getCount());
latch4.await();
} |
public static String getRejectTips(PolarisRateLimitProperties polarisRateLimitProperties) {
String tips = polarisRateLimitProperties.getRejectRequestTips();
if (StringUtils.hasText(tips)) {
return tips;
}
String rejectFilePath = polarisRateLimitProperties.getRejectRequestTipsFilePath();
if (StringUtils.hasText(rejectFilePath)) {
try {
tips = ResourceFileUtils.readFile(rejectFilePath);
}
catch (Exception e) {
LOG.error("[RateLimit] Read custom reject tips file error. path = {}",
rejectFilePath, e);
}
}
if (StringUtils.hasText(tips)) {
return tips;
}
return RateLimitConstant.QUOTA_LIMITED_INFO;
} | @Test
public void testGetRejectTips() {
PolarisRateLimitProperties polarisRateLimitProperties = new PolarisRateLimitProperties();
// RejectRequestTips
polarisRateLimitProperties.setRejectRequestTips("RejectRequestTips");
assertThat(RateLimitUtils.getRejectTips(polarisRateLimitProperties)).isEqualTo("RejectRequestTips");
// RejectRequestTipsFilePath
polarisRateLimitProperties.setRejectRequestTips(null);
polarisRateLimitProperties.setRejectRequestTipsFilePath("reject-tips.html");
assertThat(RateLimitUtils.getRejectTips(polarisRateLimitProperties)).isEqualTo("RejectRequestTips");
// RejectRequestTipsFilePath with Exception
polarisRateLimitProperties.setRejectRequestTips(null);
polarisRateLimitProperties.setRejectRequestTipsFilePath("exception.html");
assertThat(RateLimitUtils.getRejectTips(polarisRateLimitProperties)).isEqualTo(QUOTA_LIMITED_INFO);
} |
public void addChild(Entry entry) {
childEntries.add(entry);
entry.setParent(this);
} | @Test
public void equalEntriesWithChild() {
Entry firstStructureWithEntry = new Entry();
final Entry firstEntry = new Entry();
firstStructureWithEntry.addChild(firstEntry);
Entry otherStructureWithEntry = new Entry();
final Entry otherEntry = new Entry();
otherStructureWithEntry.addChild(otherEntry);
assertThat(firstStructureWithEntry, equalTo(otherStructureWithEntry));
} |
public void validate(String effectivePath, String artifactMD5, ChecksumValidationPublisher checksumValidationPublisher) {
if (artifactMd5Checksums == null) {
checksumValidationPublisher.md5ChecksumFileNotFound();
return;
}
String expectedMd5 = artifactMd5Checksums.md5For(effectivePath);
if (StringUtils.isBlank(expectedMd5)) {
checksumValidationPublisher.md5NotFoundFor(effectivePath);
return;
}
if (expectedMd5.equals(artifactMD5)) {
checksumValidationPublisher.md5Match(effectivePath);
} else {
checksumValidationPublisher.md5Mismatch(effectivePath);
}
} | @Test
public void shouldCallbackWhenMd5Mismatch() throws IOException {
when(checksums.md5For("path")).thenReturn(CachedDigestUtils.md5Hex("something"));
final ByteArrayInputStream stream = new ByteArrayInputStream("foo".getBytes());
new ChecksumValidator(checksums).validate("path", CachedDigestUtils.md5Hex(stream), checksumValidationPublisher);
verify(checksumValidationPublisher).md5Mismatch("path");
} |
@Override
protected URIRegisterDTO buildURIRegisterDTO(final ApplicationContext context, final Map<String, Object> beans) {
try {
return URIRegisterDTO.builder()
.contextPath(getContextPath())
.appName(getAppName())
.protocol(protocol)
.host(super.getHost())
.port(Integer.valueOf(getPort()))
.rpcType(RpcTypeEnum.WEB_SOCKET.getName())
.eventType(EventType.REGISTER)
.build();
} catch (ShenyuException e) {
throw new ShenyuException(e.getMessage() + "please config ${shenyu.client.http.props.port} in xml/yml !");
}
} | @Test
public void testBuildURIRegisterDTO() {
URIRegisterDTO uriRegisterDTO = eventListener.buildURIRegisterDTO(applicationContext, Collections.emptyMap());
assertNotNull(uriRegisterDTO);
assertEquals("/contextPath", uriRegisterDTO.getContextPath());
assertEquals("appName", uriRegisterDTO.getAppName());
assertEquals("127.0.0.1", uriRegisterDTO.getHost());
assertEquals(8080, uriRegisterDTO.getPort());
} |
@Override
public boolean isSatisfied(int index, TradingRecord tradingRecord) {
boolean satisfied = false;
// No trading history or no position opened, no loss
if (tradingRecord != null) {
Position currentPosition = tradingRecord.getCurrentPosition();
if (currentPosition.isOpened()) {
Num entryPrice = currentPosition.getEntry().getNetPrice();
Num currentPrice = closePrice.getValue(index);
if (currentPosition.getEntry().isBuy()) {
satisfied = isBuyStopSatisfied(entryPrice, currentPrice);
} else {
satisfied = isSellStopSatisfied(entryPrice, currentPrice);
}
}
}
traceIsSatisfied(index, satisfied);
return satisfied;
} | @Test
public void isSatisfiedWorksForSell() {
final TradingRecord tradingRecord = new BaseTradingRecord(Trade.TradeType.SELL);
final Num tradedAmount = numOf(1);
// 5% stop-loss
StopLossRule rule = new StopLossRule(closePrice, numOf(5));
assertFalse(rule.isSatisfied(0, null));
assertFalse(rule.isSatisfied(1, tradingRecord));
// Enter at 108
tradingRecord.enter(1, numOf(108), tradedAmount);
assertFalse(rule.isSatisfied(1, tradingRecord));
assertFalse(rule.isSatisfied(2, tradingRecord));
assertTrue(rule.isSatisfied(3, tradingRecord));
// Exit
tradingRecord.exit(4);
// Enter at 114
tradingRecord.enter(2, numOf(114), tradedAmount);
assertFalse(rule.isSatisfied(2, tradingRecord));
assertTrue(rule.isSatisfied(3, tradingRecord));
assertFalse(rule.isSatisfied(4, tradingRecord));
assertTrue(rule.isSatisfied(5, tradingRecord));
} |
public String generateJWE(String data, String jwksUri) {
JWEHeader header = new JWEHeader(JWEAlgorithm.RSA_OAEP, EncryptionMethod.A256GCM);
JWEObject jwsObject = new JWEObject(header, new Payload(data));
logger.debug("jwt data: {}", data);
try {
var publicEncryptionKey = getPublicEncryptionKey(jwksUri);
jwsObject.encrypt(new RSAEncrypter(publicEncryptionKey));
} catch (JOSEException | IOException | ParseException e) {
return null;
}
return jwsObject.serialize();
} | @Test
void generateJWETest() {
//given
//when
provider.generateJWE("data", "jwskUri");
//then
} |
public static <T> LengthPrefixCoder<T> of(Coder<T> valueCoder) {
checkNotNull(valueCoder, "Coder not expected to be null");
return new LengthPrefixCoder<>(valueCoder);
} | @Test
public void testRegisterByteSizeObserver() throws Exception {
CoderProperties.testByteCount(
LengthPrefixCoder.of(VarIntCoder.of()), Coder.Context.NESTED, new Integer[] {0, 10, 1000});
} |
protected List<ScenarioResult> getScenarioResultsFromGivenFacts(ScesimModelDescriptor scesimModelDescriptor,
List<ScenarioExpect> scenarioOutputsPerFact,
InstanceGiven input,
ExpressionEvaluatorFactory expressionEvaluatorFactory) {
Object factInstance = input.getValue();
List<ScenarioResult> scenarioResults = new ArrayList<>();
for (ScenarioExpect scenarioExpect : scenarioOutputsPerFact) {
if (scenarioExpect.isNewFact()) {
continue;
}
for (FactMappingValue expectedResult : scenarioExpect.getExpectedResult()) {
ExpressionEvaluator expressionEvaluator = expressionEvaluatorFactory.getOrCreate(expectedResult);
ScenarioResult scenarioResult = fillResult(expectedResult,
() -> createExtractorFunction(expressionEvaluator, expectedResult, scesimModelDescriptor)
.apply(factInstance),
expressionEvaluator);
scenarioResults.add(scenarioResult);
}
}
return scenarioResults;
} | @Test
public void getScenarioResultsTest() {
List<InstanceGiven> scenario1Inputs = extractGivenValuesForScenario1();
List<ScenarioExpect> scenario1Outputs = runnerHelper.extractExpectedValues(scenario1.getUnmodifiableFactMappingValues());
assertThat(scenario1Inputs).isNotEmpty();
InstanceGiven input1 = scenario1Inputs.get(0);
scenario1Outputs = scenario1Outputs.stream().filter(elem -> elem.getFactIdentifier().equals(input1.getFactIdentifier())).collect(toList());
List<ScenarioResult> scenario1Results = runnerHelper.getScenarioResultsFromGivenFacts(simulation.getScesimModelDescriptor(), scenario1Outputs, input1, expressionEvaluatorFactory);
assertThat(scenario1Results).hasSize(1);
assertThat(scenario1Outputs.get(0).getExpectedResult().get(0).getStatus()).isEqualTo(SUCCESS);
List<InstanceGiven> scenario2Inputs = extractGivenValuesForScenario2();
List<ScenarioExpect> scenario2Outputs = runnerHelper.extractExpectedValues(scenario2.getUnmodifiableFactMappingValues());
assertThat(scenario2Inputs).isNotEmpty();
InstanceGiven input2 = scenario2Inputs.get(0);
scenario2Outputs = scenario2Outputs.stream().filter(elem -> elem.getFactIdentifier().equals(input2.getFactIdentifier())).collect(toList());
List<ScenarioResult> scenario2Results = runnerHelper.getScenarioResultsFromGivenFacts(simulation.getScesimModelDescriptor(), scenario2Outputs, input2, expressionEvaluatorFactory);
assertThat(scenario2Results).hasSize(1);
assertThat(scenario1Outputs.get(0).getExpectedResult().get(0).getStatus()).isEqualTo(SUCCESS);
List<ScenarioExpect> newFact = List.of(new ScenarioExpect(personFactIdentifier, List.of(), true));
List<ScenarioResult> scenario2NoResults = runnerHelper.getScenarioResultsFromGivenFacts(simulation.getScesimModelDescriptor(), newFact, input2, expressionEvaluatorFactory);
assertThat(scenario2NoResults).hasSize(0);
Person person = new Person();
person.setFirstName("ANOTHER STRING");
InstanceGiven newInput = new InstanceGiven(personFactIdentifier, person);
List<ScenarioResult> scenario3Results = runnerHelper.getScenarioResultsFromGivenFacts(simulation.getScesimModelDescriptor(), scenario1Outputs, newInput, expressionEvaluatorFactory);
assertThat(scenario1Outputs.get(0).getExpectedResult().get(0).getStatus()).isEqualTo(FactMappingValueStatus.FAILED_WITH_ERROR);
assertThat(scenario3Results).hasSize(1);
assertThat(scenario3Results.get(0).getResultValue().get()).isEqualTo(person.getFirstName());
assertThat(scenario3Results.get(0).getFactMappingValue().getRawValue()).isEqualTo("NAME");
} |
public Promise<Void> gracefullyShutdownClientChannels() {
return gracefullyShutdownClientChannels(ShutdownType.SHUTDOWN);
} | @Test
@SuppressWarnings("unchecked")
void discoveryShutdown() {
String configName = "server.outofservice.connections.shutdown";
AbstractConfiguration configuration = ConfigurationManager.getConfigInstance();
try {
configuration.setProperty(configName, "true");
EurekaClient eureka = Mockito.mock(EurekaClient.class);
EventExecutor executor = Mockito.mock(EventExecutor.class);
ArgumentCaptor<EurekaEventListener> captor = ArgumentCaptor.forClass(EurekaEventListener.class);
shutdown = spy(new ClientConnectionsShutdown(channels, executor, eureka));
verify(eureka).registerEventListener(captor.capture());
doReturn(executor.newPromise()).when(shutdown).gracefullyShutdownClientChannels();
EurekaEventListener listener = captor.getValue();
listener.onEvent(new StatusChangeEvent(InstanceStatus.UP, InstanceStatus.DOWN));
verify(executor).schedule(ArgumentMatchers.isA(Callable.class), anyLong(), eq(TimeUnit.MILLISECONDS));
Mockito.reset(executor);
listener.onEvent(new StatusChangeEvent(InstanceStatus.UP, InstanceStatus.OUT_OF_SERVICE));
verify(executor).schedule(ArgumentMatchers.isA(Callable.class), anyLong(), eq(TimeUnit.MILLISECONDS));
Mockito.reset(executor);
listener.onEvent(new StatusChangeEvent(InstanceStatus.STARTING, InstanceStatus.OUT_OF_SERVICE));
verify(executor, never())
.schedule(ArgumentMatchers.isA(Callable.class), anyLong(), eq(TimeUnit.MILLISECONDS));
} finally {
configuration.setProperty(configName, "false");
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.