focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public long getMaxOffset(final String addr, final MessageQueue messageQueue, final long timeoutMillis)
throws RemotingException, MQBrokerException, InterruptedException {
GetMaxOffsetRequestHeader requestHeader = new GetMaxOffsetRequestHeader();
requestHeader.setTopic(messageQueue.getTopic());
requestHeader.setQueueId(messageQueue.getQueueId());
requestHeader.setBrokerName(messageQueue.getBrokerName());
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_MAX_OFFSET, requestHeader);
RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr),
request, timeoutMillis);
assert response != null;
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
GetMaxOffsetResponseHeader responseHeader =
(GetMaxOffsetResponseHeader) response.decodeCommandCustomHeader(GetMaxOffsetResponseHeader.class);
return responseHeader.getOffset();
}
default:
break;
}
throw new MQBrokerException(response.getCode(), response.getRemark(), addr);
} | @Test
public void testGetMaxOffset() throws Exception {
doAnswer((Answer<RemotingCommand>) mock -> {
RemotingCommand request = mock.getArgument(1);
final RemotingCommand response = RemotingCommand.createResponseCommand(GetMaxOffsetResponseHeader.class);
final GetMaxOffsetResponseHeader responseHeader = (GetMaxOffsetResponseHeader) response.readCustomHeader();
responseHeader.setOffset(100L);
response.makeCustomHeaderToNet();
response.setCode(ResponseCode.SUCCESS);
response.setOpaque(request.getOpaque());
return response;
}).when(remotingClient).invokeSync(anyString(), any(RemotingCommand.class), anyLong());
long offset = mqClientAPI.getMaxOffset(brokerAddr, new MessageQueue(topic, brokerName, 0), 10000);
assertThat(offset).isEqualTo(100L);
} |
@Override
public boolean tryAcquirePermission() {
boolean callPermitted = tryEnterBulkhead();
publishBulkheadEvent(
() -> callPermitted ? new BulkheadOnCallPermittedEvent(name)
: new BulkheadOnCallRejectedEvent(name)
);
return callPermitted;
} | @Test
public void testZeroMaxConcurrentCalls() {
BulkheadConfig config = BulkheadConfig.custom()
.maxConcurrentCalls(0)
.maxWaitDuration(Duration.ofMillis(0))
.build();
SemaphoreBulkhead bulkhead = new SemaphoreBulkhead("test", config);
boolean entered = bulkhead.tryAcquirePermission();
assertThat(entered).isFalse();
} |
public static SecurityAdminService getSecurityService() {
if (System.getSecurityManager() != null) {
try {
SecurityAdminService securityService = serviceDirectory.get(SecurityAdminService.class);
if (securityService != null) {
return securityService;
}
} catch (ServiceNotFoundException e) {
return null;
}
}
return null;
} | @Test
public void testGetSecurityService() {
assertNull(System.getSecurityManager());
assertNotNull(service);
} |
public static StatementExecutorResponse execute(
final ConfiguredStatement<AssertSchema> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
return AssertExecutor.execute(
statement.getMaskedStatementText(),
statement.getStatement(),
executionContext.getKsqlConfig().getInt(KSQL_ASSERT_SCHEMA_DEFAULT_TIMEOUT_MS),
serviceContext,
(stmt, sc) -> assertSchema(
sc.getSchemaRegistryClient(),
((AssertSchema) stmt).getSubject(),
((AssertSchema) stmt).getId(),
stmt.checkExists()),
(str, stmt) -> new AssertSchemaEntity(
str,
((AssertSchema) stmt).getSubject(),
((AssertSchema) stmt).getId(),
stmt.checkExists())
);
} | @Test
public void shouldFailToAssertNotExistSchemaBySubject() {
// Given
final AssertSchema assertSchema = new AssertSchema(Optional.empty(), Optional.of("subjectName"), Optional.empty(), Optional.empty(), false);
final ConfiguredStatement<AssertSchema> statement = ConfiguredStatement
.of(KsqlParser.PreparedStatement.of("", assertSchema),
SessionConfig.of(ksqlConfig, ImmutableMap.of()));
// When:
final KsqlRestException e = assertThrows(KsqlRestException.class, () ->
AssertSchemaExecutor.execute(statement, mock(SessionProperties.class), engine, serviceContext));
// Then:
assertThat(e.getResponse().getStatus(), is(417));
assertThat(((KsqlErrorMessage) e.getResponse().getEntity()).getMessage(), is("Schema with subject name subjectName exists"));
} |
public boolean isBackPressured() {
if (invokable == null
|| partitionWriters.length == 0
|| (executionState != ExecutionState.INITIALIZING
&& executionState != ExecutionState.RUNNING)) {
return false;
}
for (int i = 0; i < partitionWriters.length; ++i) {
if (!partitionWriters[i].isAvailable()) {
return true;
}
}
return false;
} | @Test
public void testNoBackPressureIfTaskNotStarted() throws Exception {
final Task task = createTaskBuilder().build(Executors.directExecutor());
assertFalse(task.isBackPressured());
} |
public ClientTelemetrySender telemetrySender() {
return clientTelemetrySender;
} | @Test
public void testComputeStaggeredIntervalMs() {
ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender();
assertEquals(0, telemetrySender.computeStaggeredIntervalMs(0, 0.5, 1.5));
assertEquals(1, telemetrySender.computeStaggeredIntervalMs(1, 0.99, 1));
long timeMs = telemetrySender.computeStaggeredIntervalMs(1000, 0.5, 1.5);
assertTrue(timeMs >= 500 && timeMs <= 1500);
} |
static CommandLineOptions parse(Iterable<String> options) {
CommandLineOptions.Builder optionsBuilder = CommandLineOptions.builder();
List<String> expandedOptions = new ArrayList<>();
expandParamsFiles(options, expandedOptions);
Iterator<String> it = expandedOptions.iterator();
while (it.hasNext()) {
String option = it.next();
if (!option.startsWith("-")) {
optionsBuilder.filesBuilder().add(option).addAll(it);
break;
}
String flag;
String value;
int idx = option.indexOf('=');
if (idx >= 0) {
flag = option.substring(0, idx);
value = option.substring(idx + 1);
} else {
flag = option;
value = null;
}
// NOTE: update usage information in UsageException when new flags are added
switch (flag) {
case "-i":
case "-r":
case "-replace":
case "--replace":
optionsBuilder.inPlace(true);
break;
case "--lines":
case "-lines":
case "--line":
case "-line":
parseRangeSet(optionsBuilder.linesBuilder(), getValue(flag, it, value));
break;
case "--offset":
case "-offset":
optionsBuilder.addOffset(parseInteger(it, flag, value));
break;
case "--length":
case "-length":
optionsBuilder.addLength(parseInteger(it, flag, value));
break;
case "--aosp":
case "-aosp":
case "-a":
optionsBuilder.aosp(true);
break;
case "--version":
case "-version":
case "-v":
optionsBuilder.version(true);
break;
case "--help":
case "-help":
case "-h":
optionsBuilder.help(true);
break;
case "--fix-imports-only":
optionsBuilder.fixImportsOnly(true);
break;
case "--skip-sorting-imports":
optionsBuilder.sortImports(false);
break;
case "--skip-removing-unused-imports":
optionsBuilder.removeUnusedImports(false);
break;
case "--skip-reflowing-long-strings":
optionsBuilder.reflowLongStrings(false);
break;
case "--skip-javadoc-formatting":
optionsBuilder.formatJavadoc(false);
break;
case "-":
optionsBuilder.stdin(true);
break;
case "-n":
case "--dry-run":
optionsBuilder.dryRun(true);
break;
case "--set-exit-if-changed":
optionsBuilder.setExitIfChanged(true);
break;
case "-assume-filename":
case "--assume-filename":
optionsBuilder.assumeFilename(getValue(flag, it, value));
break;
default:
throw new IllegalArgumentException("unexpected flag: " + flag);
}
}
return optionsBuilder.build();
} | @Test
public void aosp() {
assertThat(CommandLineOptionsParser.parse(Arrays.asList("-aosp")).aosp()).isTrue();
} |
public static ShowResultSet execute(ShowStmt statement, ConnectContext context) {
return GlobalStateMgr.getCurrentState().getShowExecutor().showExecutorVisitor.visit(statement, context);
} | @Test
public void testShowCreateExternalCatalog() throws AnalysisException, DdlException {
new MockUp<CatalogMgr>() {
@Mock
public Catalog getCatalogByName(String name) {
Map<String, String> properties = new HashMap<>();
properties.put("hive.metastore.uris", "thrift://hadoop:9083");
properties.put("type", "hive");
Catalog catalog = new Catalog(1, "test_hive", properties, "hive_test");
return catalog;
}
};
ShowCreateExternalCatalogStmt stmt = new ShowCreateExternalCatalogStmt("test_hive");
ShowResultSet resultSet = ShowExecutor.execute(stmt, ctx);
Assert.assertEquals("test_hive", resultSet.getResultRows().get(0).get(0));
Assert.assertEquals("CREATE EXTERNAL CATALOG `test_hive`\n" +
"comment \"hive_test\"\n" +
"PROPERTIES (\"type\" = \"hive\",\n" +
"\"hive.metastore.uris\" = \"thrift://hadoop:9083\"\n" +
")", resultSet.getResultRows().get(0).get(1));
} |
@ExecuteOn(TaskExecutors.IO)
@Post(consumes = MediaType.APPLICATION_YAML)
@Operation(tags = {"Flows"}, summary = "Create a flow from yaml source")
public HttpResponse<FlowWithSource> create(
@Parameter(description = "The flow") @Body String flow
) throws ConstraintViolationException {
Flow flowParsed = yamlFlowParser.parse(flow, Flow.class);
return HttpResponse.ok(doCreate(flowParsed, flow));
} | @Test
void updateFlow() {
String flowId = IdUtils.create();
Flow flow = generateFlow(flowId, "io.kestra.unittest", "a");
Flow result = client.toBlocking().retrieve(POST("/api/v1/flows", flow), Flow.class);
assertThat(result.getId(), is(flow.getId()));
assertThat(result.getInputs().getFirst().getId(), is("a"));
flow = generateFlow(flowId, "io.kestra.unittest", "b");
Flow get = client.toBlocking().retrieve(
PUT("/api/v1/flows/" + flow.getNamespace() + "/" + flow.getId(), flow),
Flow.class
);
assertThat(get.getId(), is(flow.getId()));
assertThat(get.getInputs().getFirst().getId(), is("b"));
Flow finalFlow = flow;
HttpClientResponseException e = assertThrows(HttpClientResponseException.class, () -> {
HttpResponse<Void> response = client.toBlocking().exchange(
PUT("/api/v1/flows/" + finalFlow.getNamespace() + "/" + IdUtils.create(), finalFlow)
);
});
assertThat(e.getStatus(), is(NOT_FOUND));
} |
@Override
public final byte readByte() throws EOFException {
final int ch = read();
if (ch < 0) {
throw new EOFException();
}
return (byte) (ch);
} | @Test
public void testReadByte() throws Exception {
int read = in.readByte();
assertEquals(0, read);
} |
public static InternalLogger getInstance(Class<?> clazz) {
return getInstance(clazz.getName());
} | @Test
public void testIsDebugEnabled() {
when(mockLogger.isDebugEnabled()).thenReturn(true);
InternalLogger logger = InternalLoggerFactory.getInstance("mock");
assertTrue(logger.isDebugEnabled());
verify(mockLogger).isDebugEnabled();
} |
@Override
public WindowStore<K, V> build() {
if (storeSupplier.retainDuplicates() && enableCaching) {
log.warn("Disabling caching for {} since store was configured to retain duplicates", storeSupplier.name());
enableCaching = false;
}
return new MeteredWindowStore<>(
maybeWrapCaching(maybeWrapLogging(storeSupplier.get())),
storeSupplier.windowSize(),
storeSupplier.metricsScope(),
time,
keySerde,
valueSerde);
} | @Test
public void shouldHaveChangeLoggingStoreWhenLoggingEnabled() {
setUp();
final WindowStore<String, String> store = builder
.withLoggingEnabled(Collections.emptyMap())
.build();
final StateStore wrapped = ((WrappedStateStore) store).wrapped();
assertThat(store, instanceOf(MeteredWindowStore.class));
assertThat(wrapped, instanceOf(ChangeLoggingWindowBytesStore.class));
assertThat(((WrappedStateStore) wrapped).wrapped(), CoreMatchers.equalTo(inner));
} |
@Override
public Optional<ShardingConditionValue> generate(final BinaryOperationExpression predicate, final Column column, final List<Object> params, final TimestampServiceRule timestampServiceRule) {
String operator = predicate.getOperator().toUpperCase();
if (!isSupportedOperator(operator)) {
return Optional.empty();
}
ExpressionSegment valueExpression = predicate.getLeft() instanceof ColumnSegment ? predicate.getRight() : predicate.getLeft();
ConditionValue conditionValue = new ConditionValue(valueExpression, params);
if (conditionValue.isNull()) {
return generate(null, column, operator, conditionValue.getParameterMarkerIndex().orElse(-1));
}
Optional<Comparable<?>> value = conditionValue.getValue();
if (value.isPresent()) {
return generate(value.get(), column, operator, conditionValue.getParameterMarkerIndex().orElse(-1));
}
if (ExpressionConditionUtils.isNowExpression(valueExpression)) {
return generate(timestampServiceRule.getTimestamp(), column, operator, -1);
}
return Optional.empty();
} | @SuppressWarnings("unchecked")
@Test
void assertGenerateNullConditionValue() {
BinaryOperationExpression rightValue = new BinaryOperationExpression(0, 0, mock(ColumnSegment.class), new LiteralExpressionSegment(0, 0, null), "=", null);
Optional<ShardingConditionValue> shardingConditionValue = generator.generate(rightValue, column, new LinkedList<>(), mock(TimestampServiceRule.class));
assertTrue(shardingConditionValue.isPresent());
assertTrue(((ListShardingConditionValue<Integer>) shardingConditionValue.get()).getValues().contains(null));
assertTrue(shardingConditionValue.get().getParameterMarkerIndexes().isEmpty());
} |
public static String now() {
return formatDateTime(new DateTime());
} | @Test
public void nowTest() {
// 当前时间
final Date date = DateUtil.date();
assertNotNull(date);
// 当前时间
final Date date2 = DateUtil.date(Calendar.getInstance());
assertNotNull(date2);
// 当前时间
final Date date3 = DateUtil.date(System.currentTimeMillis());
assertNotNull(date3);
// 当前日期字符串,格式:yyyy-MM-dd HH:mm:ss
final String now = DateUtil.now();
assertNotNull(now);
// 当前日期字符串,格式:yyyy-MM-dd
final String today = DateUtil.today();
assertNotNull(today);
} |
public static String findJavaHomeFromJavaExecutable(String javaExecutable) {
String[] cmd = {javaExecutable, "-XshowSettings:properties", "-version"};
final List<String> output = new ArrayList<>();
exec(cmd, output);
return output.stream()
.filter(l -> l.contains(" java.home = "))
.map(l -> l.substring(l.indexOf('=') + 1).trim())
.findFirst()
.orElse(null);
} | @Test
void findJavaHomeFromPath() {
final String expectedJavaHome = System.getProperty("java.home");
Assertions.assertEquals(
expectedJavaHome, OsUtils.findJavaHomeFromJavaExecutable(expectedJavaHome + "/bin/java"));
} |
@Override
public MergedResult merge(final List<QueryResult> queryResults, final SQLStatementContext sqlStatementContext,
final ShardingSphereDatabase database, final ConnectionContext connectionContext) throws SQLException {
if (1 == queryResults.size() && !isNeedAggregateRewrite(sqlStatementContext)) {
return new IteratorStreamMergedResult(queryResults);
}
Map<String, Integer> columnLabelIndexMap = getColumnLabelIndexMap(queryResults.get(0));
SelectStatementContext selectStatementContext = (SelectStatementContext) sqlStatementContext;
selectStatementContext.setIndexes(columnLabelIndexMap);
MergedResult mergedResult = build(queryResults, selectStatementContext, columnLabelIndexMap, database);
return decorate(queryResults, selectStatementContext, mergedResult);
} | @Test
void assertBuildIteratorStreamMergedResultWithLimit() throws SQLException {
final ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "MySQL"));
ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS);
when(database.getSchema(DefaultDatabase.LOGIC_NAME)).thenReturn(mock(ShardingSphereSchema.class));
MySQLSelectStatement selectStatement = (MySQLSelectStatement) buildSelectStatement(new MySQLSelectStatement());
selectStatement.setProjections(new ProjectionsSegment(0, 0));
selectStatement.setLimit(new LimitSegment(0, 0, new NumberLiteralLimitValueSegment(0, 0, 1L), null));
SelectStatementContext selectStatementContext =
new SelectStatementContext(createShardingSphereMetaData(database), Collections.emptyList(), selectStatement, DefaultDatabase.LOGIC_NAME, Collections.emptyList());
assertThat(resultMerger.merge(Collections.singletonList(createQueryResult()), selectStatementContext, createDatabase(), mock(ConnectionContext.class)),
instanceOf(IteratorStreamMergedResult.class));
} |
@Override
public Predicate<FileInfo> get() {
long currentTimeMS = System.currentTimeMillis();
Interval interval = Interval.between(currentTimeMS, currentTimeMS + 1);
return FileInfo -> {
try {
return interval.intersect(mInterval.add(mGetter.apply(FileInfo))).isValid();
} catch (RuntimeException e) {
LOG.debug("Failed to filter: ", e);
return false;
}
};
} | @Test
public void testDateFromFileNameOlderThan() {
FileFilter filter = FileFilter.newBuilder().setName("dateFromFileNameOlderThan").setValue("2d")
.setPattern("YYYYMMDD").build();
FileInfo info = new FileInfo();
DateTimeFormatter dtf = DateTimeFormatter.ofPattern("yyyyMMdd");
LocalDateTime now = LocalDateTime.now();
LocalDateTime threeDaysBefore = now.minusDays(3);
String date = dtf.format(threeDaysBefore);
info.setName(date);
assertTrue(FilePredicate.create(filter).get().test(info));
LocalDateTime oneDayBefore = now.minusDays(1);
date = dtf.format(oneDayBefore);
info.setName(date);
assertFalse(FilePredicate.create(filter).get().test(info));
} |
@VisibleForTesting
void parseWorkflowParameter(
Map<String, Parameter> workflowParams, Parameter param, String workflowId) {
parseWorkflowParameter(workflowParams, param, workflowId, new HashSet<>());
} | @Test
public void testParseWorkflowParameterWithImplicitToBoolean() {
BooleanParameter bar = BooleanParameter.builder().name("bar").expression("foo + 'e';").build();
paramEvaluator.parseWorkflowParameter(
Collections.singletonMap("foo", StringParameter.builder().expression("'trU'").build()),
bar,
"test-workflow");
assertTrue(bar.getEvaluatedResult());
assertEquals(
"Implicitly converted the evaluated result to a boolean for type String",
bar.getMeta().get("warn"));
bar = BooleanParameter.builder().name("bar").expression("foo + 'e';").build();
paramEvaluator.parseWorkflowParameter(
Collections.singletonMap(
"foo", StringParameter.builder().evaluatedResult("FAls").evaluatedTime(123L).build()),
bar,
"test-workflow");
assertFalse(bar.getEvaluatedResult());
assertEquals(
"Implicitly converted the evaluated result to a boolean for type String",
bar.getMeta().get("warn"));
AssertHelper.assertThrows(
"Can only cast string to boolean",
IllegalArgumentException.class,
"Param [bar] is expected to be a Boolean compatible type but is [class java.lang.Double]",
() ->
paramEvaluator.parseWorkflowParameter(
Collections.emptyMap(),
BooleanParameter.builder().name("bar").expression("0.01;").build(),
"test-workflow"));
AssertHelper.assertThrows(
"Can only cast valid string to boolean",
IllegalArgumentException.class,
"Param [bar] is expected to have a Boolean compatible result but is [foo]",
() ->
paramEvaluator.parseWorkflowParameter(
Collections.emptyMap(),
BooleanParameter.builder().name("bar").expression("'foo';").build(),
"test-workflow"));
} |
public static long parse(String value, String format) {
try {
DateFormat dateFormat = new SimpleDateFormat(format);
return dateFormat.parse(value).getTime();
} catch (Exception e) {
return -1;
}
} | @Test
void TestParse () {
String stringDateStart = "1970-01-01T00:00:00Z";
TbDate d = new TbDate(stringDateStart);
long actualMillis = TbDate.parse("1970-01-01 T00:00:00");
Assertions.assertEquals(-d.getLocaleZoneOffset().getTotalSeconds() * 1000, actualMillis);
String pattern = "yyyy-MM-dd HH:mm:ss.SSS";
String stringDate = "1995-12-04 00:12:00.000";
Assertions.assertNotEquals(-1L, TbDate.parse(stringDate, pattern));
} |
@Override
public Result execute( Result previousResult, int nr ) throws KettleJobException {
previousResult.setResult( false );
previousResult.setNrErrors( previousResult.getNrErrors() + 1 );
getLogChannel().logError( BaseMessages.getString( MissingEntry.class, "MissingEntry.Log.CannotRunJob" ) );
return previousResult;
} | @Test
public void testExecute() throws KettleJobException {
MissingEntry entry = spy( new MissingEntry() );
when( entry.getLogChannel() ).thenReturn( mock( LogChannel.class ) );
entry.setName( "MissingTest" );
Result result = new Result();
result.setNrErrors( 0 );
result.setResult( true );
entry.execute( result, 0 );
assertEquals( 1, result.getNrErrors() );
assertEquals( false, result.getResult() );
} |
@Override
public ThreadPoolPluginSupport cancelManagement(String threadPoolId) {
return managedThreadPoolPluginSupports.remove(threadPoolId);
} | @Test
public void testCancelManagement() {
GlobalThreadPoolPluginManager manager = new DefaultGlobalThreadPoolPluginManager();
manager.enableThreadPoolPlugin(new TestPlugin("1"));
TestSupport support = new TestSupport("1");
manager.registerThreadPoolPluginSupport(support);
Assert.assertEquals(1, support.getAllPlugins().size());
manager.cancelManagement(support.getThreadPoolId());
manager.enableThreadPoolPlugin(new TestPlugin("2"));
Assert.assertEquals(1, support.getAllPlugins().size());
} |
@Override
public Map<String, String> getLabels(Properties properties) {
LOGGER.info("DefaultLabelsCollectorManager get labels.....");
Map<String, String> labels = getLabels(labelsCollectorsList, properties);
LOGGER.info("DefaultLabelsCollectorManager get labels finished,labels :{}", labels);
return labels;
} | @Test
void tagV2LabelsCollectorOrderTest() {
Properties properties = new Properties();
DefaultLabelsCollectorManager defaultLabelsCollectorManager = new DefaultLabelsCollectorManager();
Map<String, String> labels = defaultLabelsCollectorManager.getLabels(properties);
String test = labels.get("test");
assertEquals("test2", test);
} |
public static String decode(InputStream qrCodeInputStream) {
BufferedImage image = null;
try{
image = ImgUtil.read(qrCodeInputStream);
return decode(image);
} finally {
ImgUtil.flush(image);
}
} | @Test
@Disabled
public void decodeTest3() {
final String decode = QrCodeUtil.decode(ImgUtil.read("d:/test/qr_a.png"), false, true);
//Console.log(decode);
} |
@DELETE
@Path("{nodeId}")
@ApiOperation("Remove node from cluster")
@AuditEvent(type = DATANODE_REMOVE)
@RequiresPermissions(RestPermissions.DATANODE_REMOVE)
public DataNodeDto removeNode(@ApiParam(name = "nodeId", required = true) @PathParam("nodeId") String nodeId,
@Context UserContext userContext) {
try {
return dataNodeCommandService.removeNode(nodeId);
} catch (NodeNotFoundException e) {
throw new NotFoundException("Node " + nodeId + " not found");
}
} | @Test
public void removeUnavailableNode_throwsNotFoundException() throws NodeNotFoundException {
doThrow(NodeNotFoundException.class).when(dataNodeCommandService).removeNode(NODEID);
Exception e = assertThrows(NotFoundException.class, () -> classUnderTest.removeNode(NODEID, userContext));
assertEquals("Node " + NODEID + " not found", e.getMessage());
} |
public static boolean overrideExecutorTemplateEnabled(ApplicationSpec applicationSpec) {
return applicationSpec != null
&& applicationSpec.getExecutorSpec() != null
&& applicationSpec.getExecutorSpec().getPodTemplateSpec() != null;
} | @Test
void testOverrideExecutorTemplateEnabled() {
ApplicationSpec applicationSpec = new ApplicationSpec();
assertFalse(ModelUtils.overrideDriverTemplateEnabled(applicationSpec));
BaseApplicationTemplateSpec executorSpec = new BaseApplicationTemplateSpec();
applicationSpec.setExecutorSpec(executorSpec);
assertFalse(ModelUtils.overrideDriverTemplateEnabled(applicationSpec));
executorSpec.setPodTemplateSpec(buildSamplePodTemplateSpec());
applicationSpec.setDriverSpec(executorSpec);
assertTrue(ModelUtils.overrideDriverTemplateEnabled(applicationSpec));
} |
@Override
public void process(Exchange exchange) throws Exception {
JsonElement json = getBodyAsJsonElement(exchange);
String operation = exchange.getIn().getHeader(CouchDbConstants.HEADER_METHOD, String.class);
if (ObjectHelper.isEmpty(operation)) {
Response<DocumentResult> save = saveJsonElement(json);
if (save == null) {
throw new CouchDbException("Could not save document [unknown reason]", exchange);
}
if (LOG.isTraceEnabled()) {
LOG.trace("Document saved [_id={}, _rev={}]", save.getResult().getId(), save.getResult().getRev());
}
exchange.getIn().setHeader(CouchDbConstants.HEADER_DOC_REV, save.getResult().getRev());
exchange.getIn().setHeader(CouchDbConstants.HEADER_DOC_ID, save.getResult().getId());
} else {
if (operation.equalsIgnoreCase(CouchDbOperations.DELETE.toString())) {
Response<DocumentResult> delete = deleteJsonElement(json);
if (delete == null) {
throw new CouchDbException("Could not delete document [unknown reason]", exchange);
}
if (LOG.isTraceEnabled()) {
LOG.trace("Document saved [_id={}, _rev={}]", delete.getResult().getId(), delete.getResult().getRev());
}
exchange.getIn().setHeader(CouchDbConstants.HEADER_DOC_REV, delete.getResult().getRev());
exchange.getIn().setHeader(CouchDbConstants.HEADER_DOC_ID, delete.getResult().getId());
}
if (operation.equalsIgnoreCase(CouchDbOperations.GET.toString())) {
String docId = exchange.getIn().getHeader(CouchDbConstants.HEADER_DOC_ID, String.class);
if (docId == null) {
throw new CouchDbException("Could not get document, document id is missing", exchange);
}
Object response = getElement(docId);
if (LOG.isTraceEnabled()) {
LOG.trace("Document retrieved [_id={}]", docId);
}
exchange.getIn().setBody(response);
}
}
} | @Test
void testNullSaveResponseThrowsError() throws Exception {
when(exchange.getIn().getMandatoryBody()).thenThrow(InvalidPayloadException.class);
assertThrows(InvalidPayloadException.class, () -> {
producer.process(exchange);
});
} |
@JsonProperty("lookup_regions")
public abstract String lookupRegions(); | @Test
public void lookupRegions() throws Exception {
final AWSPluginConfiguration config = createDefault()
.toBuilder()
.lookupRegions("us-west-1,eu-west-1 , us-east-1 ")
.build();
assertThat(config.getLookupRegions()).containsExactly(Regions.US_WEST_1, Regions.EU_WEST_1, Regions.US_EAST_1);
} |
static boolean shouldUseRecordReaderFromInputFormat(Configuration configuration, Storage storage, Map<String, String> customSplitInfo)
{
if (customSplitInfo == null || !customSplitInfo.containsKey(CUSTOM_FILE_SPLIT_CLASS_KEY)) {
return false;
}
InputFormat<?, ?> inputFormat = HiveUtil.getInputFormat(configuration, storage.getStorageFormat().getInputFormat(), false);
return Arrays.stream(inputFormat.getClass().getAnnotations())
.map(Annotation::annotationType)
.map(Class::getSimpleName)
.anyMatch(USE_RECORD_READER_FROM_INPUT_FORMAT_ANNOTATION::equals);
} | @Test
public void testShouldUseRecordReaderFromInputFormat()
{
StorageFormat hudiStorageFormat = StorageFormat.create("parquet.hive.serde.ParquetHiveSerDe", "org.apache.hudi.hadoop.HoodieParquetInputFormat", "");
assertFalse(shouldUseRecordReaderFromInputFormat(new Configuration(), new Storage(hudiStorageFormat, "test", Optional.empty(), true, ImmutableMap.of(), ImmutableMap.of()), ImmutableMap.of()));
StorageFormat hudiRealtimeStorageFormat = StorageFormat.create("parquet.hive.serde.ParquetHiveSerDe", "org.apache.hudi.hadoop.realtime.HoodieParquetRealtimeInputFormat", "");
Map<String, String> customSplitInfo = ImmutableMap.of(
CUSTOM_FILE_SPLIT_CLASS_KEY, HoodieRealtimeFileSplit.class.getName(),
HUDI_BASEPATH_KEY, "/test/file.parquet",
HUDI_DELTA_FILEPATHS_KEY, "/test/.file_100.log",
HUDI_MAX_COMMIT_TIME_KEY, "100");
assertTrue(shouldUseRecordReaderFromInputFormat(new Configuration(), new Storage(hudiRealtimeStorageFormat, "test", Optional.empty(), true, ImmutableMap.of(), ImmutableMap.of()), customSplitInfo));
} |
@GET
@Path("job/{noteId}/{paragraphId}")
@ZeppelinApi
public Response getNoteParagraphJobStatus(@PathParam("noteId") String noteId,
@PathParam("paragraphId") String paragraphId)
throws IOException, IllegalArgumentException {
LOGGER.info("Get note paragraph job status.");
return notebook.processNote(noteId,
note -> {
checkIfNoteIsNotNull(note, noteId);
checkIfUserCanRead(noteId, "Insufficient privileges you cannot get job status");
Paragraph paragraph = note.getParagraph(paragraphId);
checkIfParagraphIsNotNull(paragraph, paragraphId);
return new JsonResponse<>(Status.OK, null, new ParagraphJobStatus(paragraph)).build();
});
} | @Test
void testGetNoteParagraphJobStatus() throws IOException {
LOG.info("Running testGetNoteParagraphJobStatus");
String note1Id = null;
try {
note1Id = notebook.createNote("note1", anonymous);
String paragraphId = notebook.processNote(note1Id,
note1 -> {
return note1.addNewParagraph(AuthenticationInfo.ANONYMOUS).getId();
});
CloseableHttpResponse get = httpGet("/notebook/job/" + note1Id + "/" + paragraphId);
assertThat(get, isAllowed());
Map<String, Object> resp = gson.fromJson(EntityUtils.toString(get.getEntity(), StandardCharsets.UTF_8),
new TypeToken<Map<String, Object>>() {}.getType());
Map<String, Set<String>> paragraphStatus = (Map<String, Set<String>>) resp.get("body");
// Check id and status have proper value
assertEquals(paragraphStatus.get("id"), paragraphId);
assertEquals(paragraphStatus.get("status"), "READY");
get.close();
} finally {
// cleanup
if (null != note1Id) {
notebook.removeNote(note1Id, anonymous);
}
}
} |
@Override
public PollResult poll(long currentTimeMs) {
return pollInternal(
prepareFetchRequests(),
this::handleFetchSuccess,
this::handleFetchFailure
);
} | @Test
public void testFetchDisconnectedShouldNotClearPreferredReadReplicaIfUnassigned() {
buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(),
Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED, Duration.ofMinutes(5).toMillis());
subscriptions.assignFromUser(singleton(tp0));
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds, false));
subscriptions.seek(tp0, 0);
assertEquals(1, sendFetches());
// Set preferred read replica to node=1
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L,
FetchResponse.INVALID_LAST_STABLE_OFFSET, 0, Optional.of(1)));
networkClientDelegate.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchRecords();
// Verify
Node selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds());
assertEquals(1, selected.id());
assertEquals(1, sendFetches());
assertFalse(fetcher.hasCompletedFetches());
// Disconnect and remove tp0 from assignment
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0), true);
subscriptions.assignFromUser(emptySet());
// Preferred read replica should not be cleared
networkClientDelegate.poll(time.timer(0));
assertFalse(fetcher.hasCompletedFetches());
fetchRecords();
selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds());
assertEquals(-1, selected.id());
} |
@Override
public List<SocialUserDO> getSocialUserList(Long userId, Integer userType) {
// 获得绑定
List<SocialUserBindDO> socialUserBinds = socialUserBindMapper.selectListByUserIdAndUserType(userId, userType);
if (CollUtil.isEmpty(socialUserBinds)) {
return Collections.emptyList();
}
// 获得社交用户
return socialUserMapper.selectBatchIds(convertSet(socialUserBinds, SocialUserBindDO::getSocialUserId));
} | @Test
public void testGetSocialUserList() {
Long userId = 1L;
Integer userType = UserTypeEnum.ADMIN.getValue();
// mock 获得社交用户
SocialUserDO socialUser = randomPojo(SocialUserDO.class).setType(SocialTypeEnum.GITEE.getType());
socialUserMapper.insert(socialUser); // 可被查到
socialUserMapper.insert(randomPojo(SocialUserDO.class)); // 不可被查到
// mock 获得绑定
socialUserBindMapper.insert(randomPojo(SocialUserBindDO.class) // 可被查询到
.setUserId(userId).setUserType(userType).setSocialType(SocialTypeEnum.GITEE.getType())
.setSocialUserId(socialUser.getId()));
socialUserBindMapper.insert(randomPojo(SocialUserBindDO.class) // 不可被查询到
.setUserId(2L).setUserType(userType).setSocialType(SocialTypeEnum.DINGTALK.getType()));
// 调用
List<SocialUserDO> result = socialUserService.getSocialUserList(userId, userType);
// 断言
assertEquals(1, result.size());
assertPojoEquals(socialUser, result.get(0));
} |
public static PCollection<String> coGroupByKeyTuple(
TupleTag<String> emailsTag,
TupleTag<String> phonesTag,
PCollection<KV<String, String>> emails,
PCollection<KV<String, String>> phones) {
// [START CoGroupByKeyTuple]
PCollection<KV<String, CoGbkResult>> results =
KeyedPCollectionTuple.of(emailsTag, emails)
.and(phonesTag, phones)
.apply(CoGroupByKey.create());
PCollection<String> contactLines =
results.apply(
ParDo.of(
new DoFn<KV<String, CoGbkResult>, String>() {
@ProcessElement
public void processElement(ProcessContext c) {
KV<String, CoGbkResult> e = c.element();
String name = e.getKey();
Iterable<String> emailsIter = e.getValue().getAll(emailsTag);
Iterable<String> phonesIter = e.getValue().getAll(phonesTag);
String formattedResult =
Snippets.formatCoGbkResults(name, emailsIter, phonesIter);
c.output(formattedResult);
}
}));
// [END CoGroupByKeyTuple]
return contactLines;
} | @Test
public void testCoGroupByKeyTuple() throws IOException {
// [START CoGroupByKeyTupleInputs]
final List<KV<String, String>> emailsList =
Arrays.asList(
KV.of("amy", "amy@example.com"),
KV.of("carl", "carl@example.com"),
KV.of("julia", "julia@example.com"),
KV.of("carl", "carl@email.com"));
final List<KV<String, String>> phonesList =
Arrays.asList(
KV.of("amy", "111-222-3333"),
KV.of("james", "222-333-4444"),
KV.of("amy", "333-444-5555"),
KV.of("carl", "444-555-6666"));
PCollection<KV<String, String>> emails = p.apply("CreateEmails", Create.of(emailsList));
PCollection<KV<String, String>> phones = p.apply("CreatePhones", Create.of(phonesList));
// [END CoGroupByKeyTupleInputs]
// [START CoGroupByKeyTupleOutputs]
final TupleTag<String> emailsTag = new TupleTag<>();
final TupleTag<String> phonesTag = new TupleTag<>();
final List<KV<String, CoGbkResult>> expectedResults =
Arrays.asList(
KV.of(
"amy",
CoGbkResult.of(emailsTag, Arrays.asList("amy@example.com"))
.and(phonesTag, Arrays.asList("111-222-3333", "333-444-5555"))),
KV.of(
"carl",
CoGbkResult.of(emailsTag, Arrays.asList("carl@email.com", "carl@example.com"))
.and(phonesTag, Arrays.asList("444-555-6666"))),
KV.of(
"james",
CoGbkResult.of(emailsTag, Arrays.asList())
.and(phonesTag, Arrays.asList("222-333-4444"))),
KV.of(
"julia",
CoGbkResult.of(emailsTag, Arrays.asList("julia@example.com"))
.and(phonesTag, Arrays.asList())));
// [END CoGroupByKeyTupleOutputs]
PCollection<String> actualFormattedResults =
Snippets.coGroupByKeyTuple(emailsTag, phonesTag, emails, phones);
// [START CoGroupByKeyTupleFormattedOutputs]
final List<String> formattedResults =
Arrays.asList(
"amy; ['amy@example.com']; ['111-222-3333', '333-444-5555']",
"carl; ['carl@email.com', 'carl@example.com']; ['444-555-6666']",
"james; []; ['222-333-4444']",
"julia; ['julia@example.com']; []");
// [END CoGroupByKeyTupleFormattedOutputs]
// Make sure that both 'expectedResults' and 'actualFormattedResults' match with the
// 'formattedResults'. 'expectedResults' will have to be formatted before comparing
List<String> expectedFormattedResultsList = new ArrayList<>(expectedResults.size());
for (KV<String, CoGbkResult> e : expectedResults) {
String name = e.getKey();
Iterable<String> emailsIter = e.getValue().getAll(emailsTag);
Iterable<String> phonesIter = e.getValue().getAll(phonesTag);
String formattedResult = Snippets.formatCoGbkResults(name, emailsIter, phonesIter);
expectedFormattedResultsList.add(formattedResult);
}
PCollection<String> expectedFormattedResultsPColl =
p.apply(Create.of(expectedFormattedResultsList));
PAssert.that(expectedFormattedResultsPColl).containsInAnyOrder(formattedResults);
PAssert.that(actualFormattedResults).containsInAnyOrder(formattedResults);
p.run();
} |
public static long noHeapMemoryFree() {
return Math.subtractExact(noHeapMemoryMax(), noHeapMemoryUsed());
} | @Test
public void noHeapMemoryFree() {
long memoryUsed = MemoryUtil.noHeapMemoryFree();
Assert.assertNotEquals(0, memoryUsed);
} |
@Override
public Object decode(Response response, Type type) throws IOException {
JsonAdapter<Object> jsonAdapter = moshi.adapter(type);
if (response.status() == 404 || response.status() == 204)
return Util.emptyValueOf(type);
if (response.body() == null)
return null;
try (BufferedSource source = Okio.buffer(Okio.source(response.body().asInputStream()))) {
if (source.exhausted()) {
return null; // empty body
}
return jsonAdapter.fromJson(source);
} catch (JsonDataException e) {
if (e.getCause() != null && e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
throw e;
}
} | @Test
void emptyBodyDecodesToNull() throws Exception {
Response response = Response.builder()
.status(204)
.reason("OK")
.headers(Collections.emptyMap())
.request(Request.create(Request.HttpMethod.GET, "/api", Collections.emptyMap(), null,
Util.UTF_8))
.body(new byte[0])
.build();
assertThat(new MoshiDecoder().decode(response, String.class)).isNull();
} |
public Optional<String> addStreamThread() {
if (isRunningOrRebalancing()) {
final StreamThread streamThread;
synchronized (changeThreadCount) {
final int threadIdx = nextThreadIndex();
final int numLiveThreads = numLiveStreamThreads();
final long cacheSizePerThread = cacheSizePerThread(numLiveThreads + 1);
log.info("Adding StreamThread-{}, there will now be {} live threads and the new cache size per thread is {}",
threadIdx, numLiveThreads + 1, cacheSizePerThread);
resizeThreadCache(cacheSizePerThread);
// Creating thread should hold the lock in order to avoid duplicate thread index.
// If the duplicate index happen, the metadata of thread may be duplicate too.
streamThread = createAndAddStreamThread(cacheSizePerThread, threadIdx);
}
synchronized (stateLock) {
if (isRunningOrRebalancing()) {
streamThread.start();
return Optional.of(streamThread.getName());
} else {
log.warn("Terminating the new thread because the Kafka Streams client is in state {}", state);
streamThread.shutdown();
threads.remove(streamThread);
final long cacheSizePerThread = cacheSizePerThread(numLiveStreamThreads());
log.info("Resizing thread cache due to terminating added thread, new cache size per thread is {}", cacheSizePerThread);
resizeThreadCache(cacheSizePerThread);
return Optional.empty();
}
}
} else {
log.warn("Cannot add a stream thread when Kafka Streams client is in state {}", state);
return Optional.empty();
}
} | @Test
public void shouldNotAddThreadWhenCreated() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
final int oldSize = streams.threads.size();
assertThat(streams.addStreamThread(), equalTo(Optional.empty()));
assertThat(streams.threads.size(), equalTo(oldSize));
}
} |
public static DispositionNotificationOptions parseDispositionNotificationOptions(
final String value,
DispositionNotificationOptionsParser parser)
throws ParseException {
if (value == null) {
return new DispositionNotificationOptions(null, null);
}
final CharArrayBuffer buffer = new CharArrayBuffer(value.length());
buffer.append(value);
final ParserCursor cursor = new ParserCursor(0, value.length());
return (parser != null ? parser : DispositionNotificationOptionsParser.INSTANCE)
.parseDispositionNotificationOptions(buffer, cursor);
} | @Test
public void parseDispositionNotificationOptionsTest() throws ParseException {
DispositionNotificationOptions dispositionNotificationOptions
= DispositionNotificationOptionsParser.parseDispositionNotificationOptions(TEST_NAME_VALUES, null);
Parameter signedReceiptProtocol = dispositionNotificationOptions.getSignedReceiptProtocol();
assertNotNull(signedReceiptProtocol, "signed receipt protocol not parsed");
assertEquals(SIGNED_RECEIPT_PROTOCOL_ATTRIBUTE, signedReceiptProtocol.getAttribute(),
"Unexpected value for signed receipt protocol attribute");
assertEquals(SIGNED_RECEIPT_PROTOCOL_IMPORTANCE, signedReceiptProtocol.getImportance().getImportance(),
"Unexpected value for signed receipt protocol importance");
assertArrayEquals(SIGNED_RECEIPT_PROTOCOL_VALUES, signedReceiptProtocol.getValues(),
"Unexpected value for parameter importance");
Parameter signedReceiptMicalg = dispositionNotificationOptions.getSignedReceiptMicalg();
assertNotNull(signedReceiptProtocol, "signed receipt micalg not parsed");
assertEquals(SIGNED_RECEIPT_MICALG_ATTRIBUTE, signedReceiptMicalg.getAttribute(),
"Unexpected value for signed receipt micalg attribute");
assertEquals(SIGNED_RECEIPT_MICALG_IMPORTANCE, signedReceiptMicalg.getImportance().getImportance(),
"Unexpected value for signed receipt micalg importance");
assertArrayEquals(SIGNED_RECEIPT_MICALG_VALUES, signedReceiptMicalg.getValues(),
"Unexpected value for micalg importance");
} |
@Override
public Column convert(BasicTypeDefine typeDefine) {
PhysicalColumn.PhysicalColumnBuilder builder =
PhysicalColumn.builder()
.name(typeDefine.getName())
.nullable(typeDefine.isNullable())
.defaultValue(typeDefine.getDefaultValue())
.comment(typeDefine.getComment());
String dmType = typeDefine.getDataType().toUpperCase();
switch (dmType) {
case DM_BIT:
builder.sourceType(DM_BIT);
builder.dataType(BasicType.BOOLEAN_TYPE);
break;
case DM_TINYINT:
builder.sourceType(DM_TINYINT);
builder.dataType(BasicType.BYTE_TYPE);
break;
case DM_BYTE:
builder.sourceType(DM_BYTE);
builder.dataType(BasicType.BYTE_TYPE);
break;
case DM_SMALLINT:
builder.sourceType(DM_SMALLINT);
builder.dataType(BasicType.SHORT_TYPE);
break;
case DM_INT:
builder.sourceType(DM_INT);
builder.dataType(BasicType.INT_TYPE);
break;
case DM_INTEGER:
builder.sourceType(DM_INTEGER);
builder.dataType(BasicType.INT_TYPE);
break;
case DM_PLS_INTEGER:
builder.sourceType(DM_PLS_INTEGER);
builder.dataType(BasicType.INT_TYPE);
break;
case DM_BIGINT:
builder.sourceType(DM_BIGINT);
builder.dataType(BasicType.LONG_TYPE);
break;
case DM_REAL:
builder.sourceType(DM_REAL);
builder.dataType(BasicType.FLOAT_TYPE);
break;
case DM_FLOAT:
builder.sourceType(DM_FLOAT);
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case DM_DOUBLE:
builder.sourceType(DM_DOUBLE);
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case DM_DOUBLE_PRECISION:
builder.sourceType(DM_DOUBLE_PRECISION);
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case DM_NUMERIC:
case DM_NUMBER:
case DM_DECIMAL:
case DM_DEC:
DecimalType decimalType;
if (typeDefine.getPrecision() != null && typeDefine.getPrecision() > 0) {
decimalType =
new DecimalType(
typeDefine.getPrecision().intValue(), typeDefine.getScale());
} else {
decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE);
}
builder.sourceType(
String.format(
"%s(%s,%s)",
DM_DECIMAL, decimalType.getPrecision(), decimalType.getScale()));
builder.dataType(decimalType);
builder.columnLength((long) decimalType.getPrecision());
builder.scale(decimalType.getScale());
break;
case DM_CHAR:
case DM_CHARACTER:
builder.sourceType(String.format("%s(%s)", DM_CHAR, typeDefine.getLength()));
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
break;
case DM_VARCHAR:
case DM_VARCHAR2:
builder.sourceType(String.format("%s(%s)", DM_VARCHAR2, typeDefine.getLength()));
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
break;
case DM_TEXT:
builder.sourceType(DM_TEXT);
builder.dataType(BasicType.STRING_TYPE);
// dm text max length is 2147483647
builder.columnLength(typeDefine.getLength());
break;
case DM_LONG:
builder.sourceType(DM_LONG);
builder.dataType(BasicType.STRING_TYPE);
// dm long max length is 2147483647
builder.columnLength(typeDefine.getLength());
break;
case DM_LONGVARCHAR:
builder.sourceType(DM_LONGVARCHAR);
builder.dataType(BasicType.STRING_TYPE);
// dm longvarchar max length is 2147483647
builder.columnLength(typeDefine.getLength());
break;
case DM_CLOB:
builder.sourceType(DM_CLOB);
builder.dataType(BasicType.STRING_TYPE);
// dm clob max length is 2147483647
builder.columnLength(typeDefine.getLength());
break;
case DM_BINARY:
builder.sourceType(String.format("%s(%s)", DM_BINARY, typeDefine.getLength()));
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(typeDefine.getLength());
break;
case DM_VARBINARY:
builder.sourceType(String.format("%s(%s)", DM_VARBINARY, typeDefine.getLength()));
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(typeDefine.getLength());
break;
case DM_LONGVARBINARY:
builder.sourceType(DM_LONGVARBINARY);
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(typeDefine.getLength());
break;
case DM_IMAGE:
builder.sourceType(DM_IMAGE);
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(typeDefine.getLength());
break;
case DM_BLOB:
builder.sourceType(DM_BLOB);
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(typeDefine.getLength());
break;
case DM_BFILE:
builder.sourceType(DM_BFILE);
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(typeDefine.getLength());
break;
case DM_DATE:
builder.sourceType(DM_DATE);
builder.dataType(LocalTimeType.LOCAL_DATE_TYPE);
break;
case DM_TIME:
if (typeDefine.getScale() == null) {
builder.sourceType(DM_TIME);
} else {
builder.sourceType(String.format("%s(%s)", DM_TIME, typeDefine.getScale()));
}
builder.dataType(LocalTimeType.LOCAL_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
case DM_TIME_WITH_TIME_ZONE:
if (typeDefine.getScale() == null) {
builder.sourceType(DM_TIME_WITH_TIME_ZONE);
} else {
builder.sourceType(
String.format("TIME(%s) WITH TIME ZONE", typeDefine.getScale()));
}
builder.dataType(LocalTimeType.LOCAL_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
case DM_TIMESTAMP:
if (typeDefine.getScale() == null) {
builder.sourceType(DM_TIMESTAMP);
} else {
builder.sourceType(
String.format("%s(%s)", DM_TIMESTAMP, typeDefine.getScale()));
}
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
case DM_DATETIME:
if (typeDefine.getScale() == null) {
builder.sourceType(DM_DATETIME);
} else {
builder.sourceType(String.format("%s(%s)", DM_DATETIME, typeDefine.getScale()));
}
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
case DM_DATETIME_WITH_TIME_ZONE:
if (typeDefine.getScale() == null) {
builder.sourceType(DM_DATETIME_WITH_TIME_ZONE);
} else {
builder.sourceType(
String.format("DATETIME(%s) WITH TIME ZONE", typeDefine.getScale()));
}
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
default:
throw CommonError.convertToSeaTunnelTypeError(
DatabaseIdentifier.DAMENG, typeDefine.getDataType(), typeDefine.getName());
}
return builder.build();
} | @Test
public void testConvertDouble() {
BasicTypeDefine<Object> typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("double")
.dataType("double")
.build();
Column column = DmdbTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(BasicType.DOUBLE_TYPE, column.getDataType());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType().toLowerCase());
typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("double precision")
.dataType("double precision")
.build();
column = DmdbTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(BasicType.DOUBLE_TYPE, column.getDataType());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType().toLowerCase());
typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("float")
.dataType("float")
.build();
column = DmdbTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(BasicType.DOUBLE_TYPE, column.getDataType());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType().toLowerCase());
} |
protected TransactionReceipt executeTransaction(Function function)
throws IOException, TransactionException {
return executeTransaction(function, BigInteger.ZERO);
} | @Test
public void testJsonRpcError() throws IOException {
EthSendTransaction ethSendTransaction = new EthSendTransaction();
Response.Error error = new Response.Error(1, "Invalid Transaction");
error.setData("Additional data");
ethSendTransaction.setError(error);
TransactionManager txManager =
spy(new RawTransactionManager(web3j, SampleKeys.CREDENTIALS));
doReturn(ethSendTransaction)
.when(txManager)
.sendTransaction(
any(BigInteger.class),
any(BigInteger.class),
anyString(),
anyString(),
any(BigInteger.class),
anyBoolean());
JsonRpcError exception =
assertThrows(
JsonRpcError.class,
() ->
txManager.executeTransaction(
BigInteger.ZERO,
BigInteger.ZERO,
"",
"",
BigInteger.ZERO,
false));
assertEquals(error.getCode(), exception.getCode());
assertEquals(error.getMessage(), exception.getMessage());
assertEquals(error.getData(), exception.getData());
} |
public static long readUIntBE(InputStream stream) throws IOException, BufferUnderrunException {
int ch1 = stream.read();
int ch2 = stream.read();
int ch3 = stream.read();
int ch4 = stream.read();
if ((ch1 | ch2 | ch3 | ch4) < 0) {
throw new BufferUnderrunException();
}
return ((ch1 << 24) + (ch2 << 16) + (ch3 << 8) + (ch4)) & 0x00FFFFFFFFl;
} | @Test
public void testReadUIntBE() throws Exception {
byte[] data = new byte[]{(byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x08};
assertEquals(8, EndianUtils.readUIntBE(new ByteArrayInputStream(data)));
data = new byte[]{(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xF0};
assertEquals(4294967280L, EndianUtils.readUIntBE(new ByteArrayInputStream(data)));
data = new byte[]{(byte) 0xFF, (byte) 0xFF, (byte) 0xFF};
try {
EndianUtils.readUIntLE(new ByteArrayInputStream(data));
fail("Should have thrown exception");
} catch (EndianUtils.BufferUnderrunException e) {
//swallow
}
} |
public static <T> T[] createCopy(T[] src) {
return Arrays.copyOf(src, src.length);
} | @Test
public void createCopy_whenZeroLengthArray_thenReturnDifferentZeroLengthArray() {
Object[] original = new Object[0];
Object[] result = ArrayUtils.createCopy(original);
assertThat(result).isNotSameAs(original);
assertThat(result).isEmpty();
} |
@Override
public DescriptiveUrl toDownloadUrl(final Path file, final Sharee sharee, final Void options, final PasswordCallback callback) throws BackgroundException {
final Acl permission = acl.getPermission(file);
final Acl.GroupUser everyone = new Acl.GroupUser(Acl.GroupUser.EVERYONE);
final Acl.Role read = new Acl.Role(Permission.PERMISSION_READ.toString());
if(!permission.asList().contains(new Acl.UserAndRole(everyone, read))) {
permission.addAll(everyone, read);
acl.setPermission(file, permission);
}
return new DefaultUrlProvider(session.getHost()).toUrl(file).find(DescriptiveUrl.Type.provider);
} | @Test
public void toDownloadUrl() throws Exception {
final Path bucket = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path test = new Path(bucket, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new S3TouchFeature(session, new S3AccessControlListFeature(session)).touch(test, new TransferStatus());
final S3PublicUrlProvider provider = new S3PublicUrlProvider(session, new S3AccessControlListFeature(session));
assertFalse(provider.isSupported(bucket, Share.Type.download));
assertTrue(provider.isSupported(test, Share.Type.download));
final DescriptiveUrl url = provider.toDownloadUrl(test, Share.Sharee.world, null, new DisabledPasswordCallback());
assertNotEquals(DescriptiveUrl.EMPTY, url);
assertNotNull(url.getUrl());
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
public static Schema convertToSchema(LogicalType schema) {
return convertToSchema(schema, true);
} | @Test
void testInvalidTimestampTypeAvroSchemaConversion() {
RowType rowType =
(RowType)
ResolvedSchema.of(
Column.physical("a", DataTypes.STRING()),
Column.physical("b", DataTypes.TIMESTAMP(9)))
.toSourceRowDataType()
.getLogicalType();
assertThatThrownBy(() -> AvroSchemaConverter.convertToSchema(rowType))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage(
"Avro does not support TIMESTAMP type with precision: 9, "
+ "it only supports precision less than 3.");
} |
@Override
public String toString() {
boolean traceHi = traceIdHigh != 0;
char[] result = new char[traceHi ? 32 : 16];
int pos = 0;
if (traceHi) {
writeHexLong(result, pos, traceIdHigh);
pos += 16;
}
writeHexLong(result, pos, traceId);
return new String(result);
} | @Test void testToString_lo() {
assertThat(base.toString())
.isEqualTo("000000000000014d");
} |
@NotNull
public SocialUserDO authSocialUser(Integer socialType, Integer userType, String code, String state) {
// 优先从 DB 中获取,因为 code 有且可以使用一次。
// 在社交登录时,当未绑定 User 时,需要绑定登录,此时需要 code 使用两次
SocialUserDO socialUser = socialUserMapper.selectByTypeAndCodeAnState(socialType, code, state);
if (socialUser != null) {
return socialUser;
}
// 请求获取
AuthUser authUser = socialClientService.getAuthUser(socialType, userType, code, state);
Assert.notNull(authUser, "三方用户不能为空");
// 保存到 DB 中
socialUser = socialUserMapper.selectByTypeAndOpenid(socialType, authUser.getUuid());
if (socialUser == null) {
socialUser = new SocialUserDO();
}
socialUser.setType(socialType).setCode(code).setState(state) // 需要保存 code + state 字段,保证后续可查询
.setOpenid(authUser.getUuid()).setToken(authUser.getToken().getAccessToken()).setRawTokenInfo((toJsonString(authUser.getToken())))
.setNickname(authUser.getNickname()).setAvatar(authUser.getAvatar()).setRawUserInfo(toJsonString(authUser.getRawUserInfo()));
if (socialUser.getId() == null) {
socialUserMapper.insert(socialUser);
} else {
socialUserMapper.updateById(socialUser);
}
return socialUser;
} | @Test
public void testAuthSocialUser_update() {
// 准备参数
Integer socialType = SocialTypeEnum.GITEE.getType();
Integer userType = randomEle(SocialTypeEnum.values()).getType();
String code = "tudou";
String state = "yuanma";
// mock 数据
socialUserMapper.insert(randomPojo(SocialUserDO.class).setType(socialType).setOpenid("test_openid"));
// mock 方法
AuthUser authUser = randomPojo(AuthUser.class);
when(socialClientService.getAuthUser(eq(socialType), eq(userType), eq(code), eq(state))).thenReturn(authUser);
// 调用
SocialUserDO result = socialUserService.authSocialUser(socialType, userType, code, state);
// 断言
assertBindSocialUser(socialType, result, authUser);
assertEquals(code, result.getCode());
assertEquals(state, result.getState());
} |
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof PiActionProfileMember)) {
return false;
}
PiActionProfileMember that = (PiActionProfileMember) o;
return Objects.equal(actionProfileId, that.actionProfileId) &&
Objects.equal(memberId, that.memberId) &&
Objects.equal(action, that.action);
} | @Test
public void testEquals() {
new EqualsTester()
.addEqualityGroup(piActionProfileMember1, sameAsPiActionProfileMember1)
.addEqualityGroup(piActionProfileMember2)
.addEqualityGroup(piActionProfileMember3)
.testEquals();
} |
@Override
public void run() {
final Instant now = time.get();
try {
final Collection<PersistentQueryMetadata> queries = engine.getPersistentQueries();
final Optional<Double> saturation = queries.stream()
.collect(Collectors.groupingBy(PersistentQueryMetadata::getQueryApplicationId))
.entrySet()
.stream()
.map(e -> measure(now, e.getKey(), e.getValue()))
.max(PersistentQuerySaturationMetrics::compareSaturation)
.orElse(Optional.of(0.0));
saturation.ifPresent(s -> report(now, s));
final Set<String> appIds = queries.stream()
.map(PersistentQueryMetadata::getQueryApplicationId)
.collect(Collectors.toSet());
for (final String appId
: Sets.difference(new HashSet<>(perKafkaStreamsStats.keySet()), appIds)) {
perKafkaStreamsStats.get(appId).cleanup(reporter);
perKafkaStreamsStats.remove(appId);
}
} catch (final RuntimeException e) {
LOGGER.error("Error collecting saturation", e);
throw e;
}
} | @Test
public void shouldComputeSaturationForThread() {
// Given:
final Instant start = Instant.now();
when(clock.get()).thenReturn(start);
givenMetrics(kafkaStreams1)
.withThreadStartTime("t1", start.minus(WINDOW.multipliedBy(2)))
.withBlockedTime("t1", Duration.ofMinutes(1));
collector.run();
when(clock.get()).thenReturn(start.plus(WINDOW));
givenMetrics(kafkaStreams1)
.withThreadStartTime("t1", start.minus(WINDOW.multipliedBy(2)))
.withBlockedTime("t1", Duration.ofMinutes(2));
// When:
collector.run();
// Then:
final DataPoint point = verifyAndGetLatestDataPoint(
"query-thread-saturation",
ImmutableMap.of("thread-id", "t1")
);
assertThat((Double) point.getValue(), closeTo(.9, .01));
} |
public void writeRow( RowMetaInterface rowMeta, Object[] r ) throws KettleStepException {
try {
if ( Utils.isEmpty( meta.getOutputFields() ) ) {
/*
* Write all values in stream to text file.
*/
for ( int i = 0; i < rowMeta.size(); i++ ) {
if ( i > 0 && data.binarySeparator.length > 0 ) {
data.writer.write( data.binarySeparator );
}
ValueMetaInterface v = rowMeta.getValueMeta( i );
Object valueData = r[i];
// no special null value default was specified since no fields are specified at all
// As such, we pass null
//
writeField( v, valueData, null );
}
} else {
/*
* Only write the fields specified!
*/
for ( int i = 0; i < meta.getOutputFields().length; i++ ) {
if ( i > 0 && data.binarySeparator.length > 0 ) {
data.writer.write( data.binarySeparator );
}
ValueMetaInterface v = meta.getMetaWithFieldOptions()[ i ];
Object valueData = r[ data.fieldnrs[ i ] ];
writeField( v, valueData, data.binaryNullValue[ i ] );
}
}
data.writer.write( data.binaryNewline );
incrementLinesOutput();
} catch ( Exception e ) {
throw new KettleStepException( "Error writing line", e );
}
} | @Test
public void testFastDumpDisableStreamEncodeTest() throws Exception {
textFileOutput =
new TextFileOutputTestHandler( stepMockHelper.stepMeta, stepMockHelper.stepDataInterface, 0,
stepMockHelper.transMeta,
stepMockHelper.trans );
textFileOutput.meta = stepMockHelper.processRowsStepMetaInterface;
String testString = "ÖÜä";
String inputEncode = StandardCharsets.UTF_8.name();
String outputEncode = StandardCharsets.ISO_8859_1.name();
Object[] rows = { testString.getBytes( inputEncode ) };
ValueMetaBase valueMetaInterface = new ValueMetaBase( "test", ValueMetaInterface.TYPE_STRING );
valueMetaInterface.setStringEncoding( inputEncode );
valueMetaInterface.setStorageType( ValueMetaInterface.STORAGE_TYPE_BINARY_STRING );
valueMetaInterface.setStorageMetadata( new ValueMetaString() );
TextFileOutputData data = new TextFileOutputData();
data.binarySeparator = " ".getBytes();
data.binaryEnclosure = "\"".getBytes();
data.binaryNewline = "\n".getBytes();
textFileOutput.data = data;
RowMeta rowMeta = new RowMeta();
rowMeta.addValueMeta( valueMetaInterface );
doReturn( outputEncode ).when( stepMockHelper.processRowsStepMetaInterface ).getEncoding();
textFileOutput.data.writer = mock( BufferedOutputStream.class );
textFileOutput.writeRow( rowMeta, rows );
verify( textFileOutput.data.writer ).write( testString.getBytes( outputEncode ) );
} |
@Override
public <VR> KTable<K, VR> aggregate(final Initializer<VR> initializer,
final Aggregator<? super K, ? super V, VR> aggregator,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
return aggregate(initializer, aggregator, NamedInternal.empty(), materialized);
} | @Test
public void shouldNotHaveNullAdderOnAggregate() {
assertThrows(NullPointerException.class, () -> groupedStream.aggregate(MockInitializer.STRING_INIT, null, Materialized.as("store")));
} |
@Override
public int read() throws IOException {
if (mPosition == mLength) { // at end of file
return -1;
}
updateStreamIfNeeded();
int res = mUfsInStream.get().read();
if (res == -1) {
return -1;
}
mPosition++;
Metrics.BYTES_READ_FROM_UFS.inc(1);
return res;
} | @Test
public void readNullBuffer() throws IOException, AlluxioException {
AlluxioURI ufsPath = getUfsPath();
createFile(ufsPath, CHUNK_SIZE);
try (FileInStream inStream = getStream(ufsPath)) {
assertThrows(NullPointerException.class,
() -> inStream.read((ByteBuffer) null, 0, CHUNK_SIZE));
}
} |
@Override
public void put(final Windowed<Bytes> sessionKey, final byte[] aggregate) {
wrapped().put(sessionKey, aggregate);
context.logChange(name(), SessionKeySchema.toBinary(sessionKey), aggregate, context.timestamp(), wrapped().getPosition());
} | @Test
public void shouldLogPuts() {
final Bytes binaryKey = SessionKeySchema.toBinary(key1);
when(inner.getPosition()).thenReturn(Position.emptyPosition());
store.put(key1, value1);
verify(inner).put(key1, value1);
verify(context).logChange(store.name(), binaryKey, value1, 0L, Position.emptyPosition());
} |
public static int nextCapacity(int current) {
assert current > 0 && Long.bitCount(current) == 1 : "Capacity must be a power of two.";
if (current < MIN_CAPACITY / 2) {
current = MIN_CAPACITY / 2;
}
current <<= 1;
if (current < 0) {
throw new RuntimeException("Maximum capacity exceeded.");
}
return current;
} | @Test
public void testNextCapacity_withInt_shouldIncreaseToHalfOfMinCapacity() {
int capacity = 1;
int nextCapacity = nextCapacity(capacity);
assertEquals(4, nextCapacity);
} |
@VisibleForTesting
static void setupAndModifyConfiguration(
Configuration configuration, String currDir, Map<String, String> variables)
throws Exception {
final String localDirs = variables.get(Environment.LOCAL_DIRS.key());
LOG.info("Current working/local Directory: {}", localDirs);
BootstrapTools.updateTmpDirectoriesInConfiguration(configuration, localDirs);
setupConfigurationFromVariables(configuration, currDir, variables);
} | @Test
public void testDefaultKerberosKeytabConfiguration() throws Exception {
final String resourceDirPath =
Paths.get("src", "test", "resources").toAbsolutePath().toString();
final Map<String, String> envs = new HashMap<>(2);
envs.put(YarnConfigKeys.KEYTAB_PRINCIPAL, "testuser1@domain");
envs.put(YarnConfigKeys.REMOTE_KEYTAB_PATH, resourceDirPath);
// Local keytab path will be populated from default YarnConfigOptions.LOCALIZED_KEYTAB_PATH
envs.put(
YarnConfigKeys.LOCAL_KEYTAB_PATH,
YarnConfigOptions.LOCALIZED_KEYTAB_PATH.defaultValue());
Configuration configuration = new Configuration();
YarnTaskExecutorRunner.setupAndModifyConfiguration(configuration, resourceDirPath, envs);
// the SecurityContext is installed on TaskManager startup
SecurityUtils.install(new SecurityConfiguration(configuration));
final List<SecurityModule> modules = SecurityUtils.getInstalledModules();
Optional<SecurityModule> moduleOpt =
modules.stream().filter(module -> module instanceof HadoopModule).findFirst();
if (moduleOpt.isPresent()) {
HadoopModule hadoopModule = (HadoopModule) moduleOpt.get();
assertThat(hadoopModule.getSecurityConfig().getPrincipal())
.isEqualTo("testuser1@domain");
assertThat(hadoopModule.getSecurityConfig().getKeytab())
.isEqualTo(
new File(
resourceDirPath,
YarnConfigOptions.LOCALIZED_KEYTAB_PATH.defaultValue())
.getAbsolutePath());
} else {
fail("Can not find HadoopModule!");
}
assertThat(configuration.get(SecurityOptions.KERBEROS_LOGIN_KEYTAB))
.isEqualTo(
new File(
resourceDirPath,
YarnConfigOptions.LOCALIZED_KEYTAB_PATH.defaultValue())
.getAbsolutePath());
assertThat(configuration.get(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL))
.isEqualTo("testuser1@domain");
} |
public static <T> List<T> page(int pageNo, int pageSize, List<T> list) {
if (CollUtil.isEmpty(list)) {
return new ArrayList<>(0);
}
int resultSize = list.size();
// 每页条目数大于总数直接返回所有
if (resultSize <= pageSize) {
if (pageNo < (PageUtil.getFirstPageNo() + 1)) {
return unmodifiable(list);
} else {
// 越界直接返回空
return new ArrayList<>(0);
}
}
// 相乘可能会导致越界 临时用long
if (((long) (pageNo - PageUtil.getFirstPageNo()) * pageSize) > resultSize) {
// 越界直接返回空
return new ArrayList<>(0);
}
final int[] startEnd = PageUtil.transToStartEnd(pageNo, pageSize);
if (startEnd[1] > resultSize) {
startEnd[1] = resultSize;
if (startEnd[0] > startEnd[1]) {
return new ArrayList<>(0);
}
}
return sub(list, startEnd[0], startEnd[1]);
} | @Test
public void pageTest() {
final List<Integer> a = ListUtil.toLinkedList(1, 2, 3, 4, 5);
PageUtil.setFirstPageNo(1);
final int[] a_1 = ListUtil.page(1, 2, a).stream().mapToInt(Integer::valueOf).toArray();
final int[] a1 = ListUtil.page(1, 2, a).stream().mapToInt(Integer::valueOf).toArray();
final int[] a2 = ListUtil.page(2, 2, a).stream().mapToInt(Integer::valueOf).toArray();
final int[] a3 = ListUtil.page(3, 2, a).stream().mapToInt(Integer::valueOf).toArray();
final int[] a4 = ListUtil.page(4, 2, a).stream().mapToInt(Integer::valueOf).toArray();
assertArrayEquals(new int[]{1, 2}, a_1);
assertArrayEquals(new int[]{1, 2}, a1);
assertArrayEquals(new int[]{3, 4}, a2);
assertArrayEquals(new int[]{5}, a3);
assertArrayEquals(new int[]{}, a4);
PageUtil.setFirstPageNo(2);
final int[] b_1 = ListUtil.page(1, 2, a).stream().mapToInt(Integer::valueOf).toArray();
final int[] b1 = ListUtil.page(2, 2, a).stream().mapToInt(Integer::valueOf).toArray();
final int[] b2 = ListUtil.page(3, 2, a).stream().mapToInt(Integer::valueOf).toArray();
final int[] b3 = ListUtil.page(4, 2, a).stream().mapToInt(Integer::valueOf).toArray();
final int[] b4 = ListUtil.page(5, 2, a).stream().mapToInt(Integer::valueOf).toArray();
assertArrayEquals(new int[]{1, 2}, b_1);
assertArrayEquals(new int[]{1, 2}, b1);
assertArrayEquals(new int[]{3, 4}, b2);
assertArrayEquals(new int[]{5}, b3);
assertArrayEquals(new int[]{}, b4);
PageUtil.setFirstPageNo(0);
final int[] c_1 = ListUtil.page(-1, 2, a).stream().mapToInt(Integer::valueOf).toArray();
final int[] c1 = ListUtil.page(0, 2, a).stream().mapToInt(Integer::valueOf).toArray();
final int[] c2 = ListUtil.page(1, 2, a).stream().mapToInt(Integer::valueOf).toArray();
final int[] c3 = ListUtil.page(2, 2, a).stream().mapToInt(Integer::valueOf).toArray();
final int[] c4 = ListUtil.page(3, 2, a).stream().mapToInt(Integer::valueOf).toArray();
assertArrayEquals(new int[]{1, 2}, c_1);
assertArrayEquals(new int[]{1, 2}, c1);
assertArrayEquals(new int[]{3, 4}, c2);
assertArrayEquals(new int[]{5}, c3);
assertArrayEquals(new int[]{}, c4);
PageUtil.setFirstPageNo(1);
final int[] d1 = ListUtil.page(0, 8, a).stream().mapToInt(Integer::valueOf).toArray();
assertArrayEquals(new int[]{1, 2, 3, 4, 5}, d1);
// page with consumer
final List<List<Integer>> pageListData = new ArrayList<>();
ListUtil.page(a, 2, pageListData::add);
assertArrayEquals(new int[]{1, 2}, pageListData.get(0).stream().mapToInt(Integer::valueOf).toArray());
assertArrayEquals(new int[]{3, 4}, pageListData.get(1).stream().mapToInt(Integer::valueOf).toArray());
assertArrayEquals(new int[]{5}, pageListData.get(2).stream().mapToInt(Integer::valueOf).toArray());
pageListData.clear();
ListUtil.page(a, 2, pageList -> {
pageListData.add(pageList);
if (pageList.get(0).equals(1)) {
pageList.clear();
}
});
assertArrayEquals(new int[]{}, pageListData.get(0).stream().mapToInt(Integer::valueOf).toArray());
assertArrayEquals(new int[]{3, 4}, pageListData.get(1).stream().mapToInt(Integer::valueOf).toArray());
assertArrayEquals(new int[]{5}, pageListData.get(2).stream().mapToInt(Integer::valueOf).toArray());
// 恢复默认值,避免影响其他测试用例
PageUtil.setFirstPageNo(0);
} |
public static byte[] parseHex(String string) {
return hexFormat.parseHex(string);
} | @Test
@Parameters(method = "bytesToHexStringVectors")
public void parseHexValid(byte[] expectedBytes, String hexString) {
byte[] actual = ByteUtils.parseHex(hexString);
assertArrayEquals("incorrect hex formatted string", expectedBytes, actual);
} |
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
try {
final EueApiClient client = new EueApiClient(session);
// Move to trash first as precondition of delete
this.delete(super.trash(files, prompt, callback));
for(Path f : files.keySet()) {
fileid.cache(f, null);
}
}
catch(ApiException e) {
for(Path f : files.keySet()) {
throw new EueExceptionMappingService().map("Cannot delete {0}", e, f);
}
}
} | @Test(expected = NotfoundException.class)
public void testNotfound() throws Exception {
final EueResourceIdProvider fileid = new EueResourceIdProvider(session);
new EueDeleteFeature(session, fileid).delete(Collections.singletonList(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file))
), new DisabledLoginCallback(), new Delete.DisabledCallback());
fail();
} |
@Override
public ModelMBean assemble(Object obj, ObjectName name) throws JMException {
ModelMBeanInfo mbi = null;
// use the default provided mbean which has been annotated with JMX annotations
LOGGER.trace("Assembling MBeanInfo for: {} from @ManagedResource object: {}", name, obj);
mbi = assembler.getMBeanInfo(obj, null, name.toString());
if (mbi == null) {
return null;
}
RequiredModelMBean mbean = new RequiredModelMBean(mbi);
try {
mbean.setManagedResource(obj, "ObjectReference");
} catch (InvalidTargetObjectTypeException e) {
throw new JMException(e.getMessage());
}
// Allows the managed object to send notifications
if (obj instanceof NotificationSenderAware) {
((NotificationSenderAware) obj).setNotificationSender(new NotificationSenderAdapter(mbean));
}
return mbean;
} | @Test
public void testNotificationAware() throws MalformedObjectNameException, JMException {
NotificationSenderAware mockedNotificationAwareMbean = mock(NotificationSenderAware.class);
ModelMBean modelBean = defaultManagementMBeanAssembler.assemble(mockedNotificationAwareMbean, new ObjectName("org.flowable.jmx.Mbeans:type=something"));
assertThat(modelBean).isNotNull();
ArgumentCaptor<NotificationSender> argument = ArgumentCaptor.forClass(NotificationSender.class);
verify(mockedNotificationAwareMbean).setNotificationSender(argument.capture());
assertThat(argument).isNotNull();
assertThat(argument.getValue()).isNotNull();
} |
public OpenAPI read(Class<?> cls) {
return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>());
} | @Test
public void testParameterMaximumValue() {
Reader reader = new Reader(new SwaggerConfiguration().openAPI(new OpenAPI()).openAPI31(true));
OpenAPI openAPI = reader.read(ParameterMaximumValueResource.class);
String yaml = "openapi: 3.1.0\n" +
"paths:\n" +
" /test/{petId}:\n" +
" get:\n" +
" operationId: getPetById\n" +
" parameters:\n" +
" - name: petId\n" +
" in: path\n" +
" description: ID of pet that needs to be fetched\n" +
" required: true\n" +
" schema:\n" +
" type: integer\n" +
" format: int64\n" +
" exclusiveMaximum: 10\n" +
" exclusiveMinimum: 1\n" +
" responses:\n" +
" default:\n" +
" description: default response\n" +
" content:\n" +
" '*/*': {}\n";
SerializationMatchers.assertEqualsToYaml31(openAPI, yaml);
} |
@Override
public int compare(Path o1, Path o2) {
final String c1 = PathNormalizer.name(o1.getAbsolute());
final boolean c2 = PathNormalizer.name(o2.getAbsolute()).matches(pattern);
if(c1.matches(pattern) && c2) {
return 0;
}
if(c1.matches(pattern)) {
return -1;
}
if(c2) {
return 1;
}
return super.compare(o1, o2);
} | @Test
public void testCompare() {
assertEquals(-1, new DownloadRegexPriorityComparator(".*\\.html").compare(
new Path("f.html", EnumSet.of(Path.Type.file)), new Path("g.t", EnumSet.of(Path.Type.file))));
assertEquals(1, new DownloadRegexPriorityComparator(".*\\.html").compare(
new Path("f.htm", EnumSet.of(Path.Type.file)), new Path("g.html", EnumSet.of(Path.Type.file))));
assertEquals(0, new DownloadRegexPriorityComparator(".*\\.html").compare(
new Path("f.html", EnumSet.of(Path.Type.file)), new Path("g.html", EnumSet.of(Path.Type.file))));
} |
@Override
public HttpServletRequest readRequest(AwsProxyRequest request, SecurityContext securityContext, Context lambdaContext, ContainerConfig config)
throws InvalidRequestEventException {
// Expect the HTTP method and context to be populated. If they are not, we are handling an
// unsupported event type.
if (request.getHttpMethod() == null || request.getHttpMethod().equals("") || request.getRequestContext() == null) {
throw new InvalidRequestEventException(INVALID_REQUEST_ERROR);
}
request.setPath(stripBasePath(request.getPath(), config));
if (request.getMultiValueHeaders() != null && request.getMultiValueHeaders().getFirst(HttpHeaders.CONTENT_TYPE) != null) {
String contentType = request.getMultiValueHeaders().getFirst(HttpHeaders.CONTENT_TYPE);
// put single as we always expect to have one and only one content type in a request.
request.getMultiValueHeaders().putSingle(HttpHeaders.CONTENT_TYPE, getContentTypeWithCharset(contentType, config));
}
AwsProxyHttpServletRequest servletRequest = new AwsProxyHttpServletRequest(request, lambdaContext, securityContext, config);
servletRequest.setServletContext(servletContext);
servletRequest.setAttribute(API_GATEWAY_CONTEXT_PROPERTY, request.getRequestContext());
servletRequest.setAttribute(API_GATEWAY_STAGE_VARS_PROPERTY, request.getStageVariables());
servletRequest.setAttribute(API_GATEWAY_EVENT_PROPERTY, request);
servletRequest.setAttribute(ALB_CONTEXT_PROPERTY, request.getRequestContext().getElb());
servletRequest.setAttribute(LAMBDA_CONTEXT_PROPERTY, lambdaContext);
servletRequest.setAttribute(JAX_SECURITY_CONTEXT_PROPERTY, securityContext);
return servletRequest;
} | @Test
void readRequest_validEventEmptyPath_expectException() {
try {
AwsProxyRequest req = new AwsProxyRequestBuilder(null, "GET").build();
HttpServletRequest servletReq = reader.readRequest(req, null, null, ContainerConfig.defaultConfig());
assertNotNull(servletReq);
} catch (InvalidRequestEventException e) {
e.printStackTrace();
fail("Could not read a request with a null path");
}
} |
@VisibleForTesting
public Supplier<PageProjection> compileProjection(
SqlFunctionProperties sqlFunctionProperties,
RowExpression projection,
Optional<String> classNameSuffix)
{
return compileProjection(sqlFunctionProperties, emptyMap(), projection, classNameSuffix);
} | @Test
public void testGeneratedClassName()
{
PageFunctionCompiler functionCompiler = new PageFunctionCompiler(createTestMetadataManager(), 0);
String planNodeId = "7";
String stageId = "20170707_223500_67496_zguwn.2";
String classSuffix = stageId + "_" + planNodeId;
Supplier<PageProjection> projectionSupplier = functionCompiler.compileProjection(SESSION.getSqlFunctionProperties(), ADD_10_EXPRESSION, Optional.of(classSuffix));
PageProjection projection = projectionSupplier.get();
Work<List<Block>> work = projection.project(SESSION.getSqlFunctionProperties(), new DriverYieldSignal(), createLongBlockPage(1, 0), SelectedPositions.positionsRange(0, 1));
// class name should look like PageProjectionOutput_20170707_223500_67496_zguwn_2_7_XX
assertTrue(work.getClass().getSimpleName().startsWith("PageProjectionWork_" + stageId.replace('.', '_') + "_" + planNodeId));
} |
public boolean rollbackClusterState(UUID txnId) {
clusterServiceLock.lock();
try {
final LockGuard currentLock = getStateLock();
if (!currentLock.allowsUnlock(txnId)) {
return false;
}
logger.fine("Rolling back cluster state transaction: " + txnId);
stateLockRef.set(LockGuard.NOT_LOCKED);
// if state allows join after rollback, then remove all members which left during transaction.
if (state.isJoinAllowed()) {
node.getClusterService().getMembershipManager().removeAllMissingMembers();
}
return true;
} finally {
clusterServiceLock.unlock();
}
} | @Test
public void test_unlockClusterState_fail_whenNotLocked() {
assertFalse(clusterStateManager.rollbackClusterState(TXN));
} |
@Override
public void run()
throws Exception {
// Get list of files to process.
List<String> filteredFiles = SegmentGenerationUtils.listMatchedFilesWithRecursiveOption(_inputDirFS, _inputDirURI,
_spec.getIncludeFileNamePattern(), _spec.getExcludeFileNamePattern(), _spec.isSearchRecursively());
if (_consistentPushEnabled) {
ConsistentDataPushUtils.configureSegmentPostfix(_spec);
}
File localTempDir = new File(FileUtils.getTempDirectory(), "pinot-" + UUID.randomUUID());
try {
int numInputFiles = filteredFiles.size();
_segmentCreationTaskCountDownLatch = new CountDownLatch(numInputFiles);
if (!SegmentGenerationJobUtils.useGlobalDirectorySequenceId(_spec.getSegmentNameGeneratorSpec())) {
Map<String, List<String>> localDirIndex = new HashMap<>();
for (String filteredFile : filteredFiles) {
java.nio.file.Path filteredParentPath = Paths.get(filteredFile).getParent();
localDirIndex.computeIfAbsent(filteredParentPath.toString(), k -> new ArrayList<>()).add(filteredFile);
}
for (String parentPath : localDirIndex.keySet()) {
List<String> siblingFiles = localDirIndex.get(parentPath);
Collections.sort(siblingFiles);
for (int i = 0; i < siblingFiles.size(); i++) {
URI inputFileURI = SegmentGenerationUtils
.getFileURI(siblingFiles.get(i), SegmentGenerationUtils.getDirectoryURI(parentPath));
submitSegmentGenTask(localTempDir, inputFileURI, i);
}
}
} else {
//iterate on the file list, for each
for (int i = 0; i < numInputFiles; i++) {
final URI inputFileURI = SegmentGenerationUtils.getFileURI(filteredFiles.get(i), _inputDirURI);
submitSegmentGenTask(localTempDir, inputFileURI, i);
}
}
_segmentCreationTaskCountDownLatch.await();
if (_failure.get() != null) {
_executorService.shutdownNow();
throw _failure.get();
}
} finally {
//clean up
FileUtils.deleteQuietly(localTempDir);
_executorService.shutdown();
}
} | @Test
public void testSegmentGeneration() throws Exception {
// TODO use common resource definitions & code shared with Hadoop unit test.
// So probably need a pinot-batch-ingestion-common tests jar that we depend on.
File testDir = makeTestDir();
File inputDir = new File(testDir, "input");
inputDir.mkdirs();
File inputFile = new File(inputDir, "input.csv");
FileUtils.writeLines(inputFile, Lists.newArrayList("col1,col2", "value1,1", "value2,2"));
// Create an output directory, with two empty files in it. One we'll overwrite,
// and one we'll leave alone.
final String outputFilename = "myTable_OFFLINE_0.tar.gz";
final String existingFilename = "myTable_OFFLINE_100.tar.gz";
File outputDir = new File(testDir, "output");
FileUtils.touch(new File(outputDir, outputFilename));
FileUtils.touch(new File(outputDir, existingFilename));
final String schemaName = "mySchema";
File schemaFile = makeSchemaFile(testDir, schemaName);
File tableConfigFile = makeTableConfigFile(testDir, schemaName);
SegmentGenerationJobSpec jobSpec = makeJobSpec(inputDir, outputDir, schemaFile, tableConfigFile);
jobSpec.setOverwriteOutput(false);
SegmentGenerationJobRunner jobRunner = new SegmentGenerationJobRunner(jobSpec);
jobRunner.run();
// The output directory should still have the original file in it.
File oldSegmentFile = new File(outputDir, existingFilename);
Assert.assertTrue(oldSegmentFile.exists());
// The output directory should have the original file in it (since we aren't overwriting)
File newSegmentFile = new File(outputDir, outputFilename);
Assert.assertTrue(newSegmentFile.exists());
Assert.assertTrue(newSegmentFile.isFile());
Assert.assertTrue(newSegmentFile.length() == 0);
// Now run again, but this time with overwriting of output files, and confirm we got a valid segment file.
jobSpec.setOverwriteOutput(true);
jobRunner = new SegmentGenerationJobRunner(jobSpec);
jobRunner.run();
// The original file should still be there.
Assert.assertTrue(oldSegmentFile.exists());
Assert.assertTrue(newSegmentFile.exists());
Assert.assertTrue(newSegmentFile.isFile());
Assert.assertTrue(newSegmentFile.length() > 0);
// FUTURE - validate contents of file?
} |
public void setTemplateEntriesForChild(CapacitySchedulerConfiguration conf,
QueuePath childQueuePath) {
setTemplateEntriesForChild(conf, childQueuePath, false);
} | @Test
public void testQueueSpecificTemplates() {
conf.set(getTemplateKey(ROOT, "capacity"), "2w");
conf.set(getLeafTemplateKey(ROOT,
"default-node-label-expression"), "test");
conf.set(getLeafTemplateKey(ROOT, "capacity"), "10w");
conf.setBoolean(getParentTemplateKey(
ROOT, AUTO_CREATE_CHILD_QUEUE_AUTO_REMOVAL_ENABLE), false);
AutoCreatedQueueTemplate template =
new AutoCreatedQueueTemplate(conf, ROOT);
template.setTemplateEntriesForChild(conf, TEST_QUEUE_A);
template.setTemplateEntriesForChild(conf, TEST_QUEUE_B, true);
Assert.assertNull("default-node-label-expression is set for parent",
conf.getDefaultNodeLabelExpression(TEST_QUEUE_A));
Assert.assertEquals("default-node-label-expression is not set for leaf",
"test", conf.getDefaultNodeLabelExpression(TEST_QUEUE_B));
Assert.assertFalse("auto queue removal is not disabled for parent",
conf.isAutoExpiredDeletionEnabled(TEST_QUEUE_A));
Assert.assertEquals("weight should not be overridden when set by " +
"queue type specific template",
10f, conf.getNonLabeledQueueWeight(TEST_QUEUE_B), 10e-6);
Assert.assertEquals("weight should be set by common template",
2f, conf.getNonLabeledQueueWeight(TEST_QUEUE_A), 10e-6);
} |
@Override
public void validateSmsCode(SmsCodeValidateReqDTO reqDTO) {
validateSmsCode0(reqDTO.getMobile(), reqDTO.getCode(), reqDTO.getScene());
} | @Test
public void validateSmsCode_success() {
// 准备参数
SmsCodeValidateReqDTO reqDTO = randomPojo(SmsCodeValidateReqDTO.class, o -> {
o.setMobile("15601691300");
o.setScene(randomEle(SmsSceneEnum.values()).getScene());
});
// mock 数据
SqlConstants.init(DbType.MYSQL);
smsCodeMapper.insert(randomPojo(SmsCodeDO.class, o -> o.setMobile(reqDTO.getMobile())
.setScene(reqDTO.getScene()).setCode(reqDTO.getCode()).setUsed(false)));
// 调用
smsCodeService.validateSmsCode(reqDTO);
} |
public Node deserializeObject(JsonReader reader) {
Log.info("Deserializing JSON to Node.");
JsonObject jsonObject = reader.readObject();
return deserializeObject(jsonObject);
} | @Test
void simpleTest() {
CompilationUnit cu = parse("public class X{} class Z{}");
String serialized = serialize(cu, false);
Node deserialized = deserializer.deserializeObject(Json.createReader(new StringReader(serialized)));
assertEqualsStringIgnoringEol("public class X {\n}\n\nclass Z {\n}\n", deserialized.toString());
assertEquals(cu.hashCode(), deserialized.hashCode());
} |
@Override
public int intersection(String... names) {
return get(intersectionAsync(names));
} | @Test
public void testIntersection() {
RScoredSortedSet<String> set1 = redisson.getScoredSortedSet("simple1");
set1.add(1, "one");
set1.add(2, "two");
RScoredSortedSet<String> set2 = redisson.getScoredSortedSet("simple2");
set2.add(1, "one");
set2.add(2, "two");
set2.add(3, "three");
RScoredSortedSet<String> out = redisson.getScoredSortedSet("out");
assertThat(out.intersection(set1.getName(), set2.getName())).isEqualTo(2);
assertThat(out.readAll()).containsOnly("one", "two");
assertThat(out.getScore("one")).isEqualTo(2);
assertThat(out.getScore("two")).isEqualTo(4);
} |
public double vincentyDistance(LatLong other) {
return LatLongUtils.vincentyDistance(this, other);
} | @Test
public void vincentyDistance_originToNorthPole_returnDistanceFromPoleToEquator() {
// This is the origin of the WGS-84 reference system
LatLong zeroZero = new LatLong(0d, 0d);
// Calculating the distance between the north pole and the equator
LatLong northPole = new LatLong(90d, 0d);
double vincenty = LatLongUtils.vincentyDistance(zeroZero, northPole);
assertEquals(DISTANCE_POLE_TO_EQUATOR, vincenty, 1);
} |
public static Comparator<InstanceInfo> comparatorByAppNameAndId() {
return INSTANCE_APP_ID_COMPARATOR;
} | @Test
public void testComparatorByAppNameAndIdIfNotNullReturnInt() {
InstanceInfo instanceInfo1 = Mockito.mock(InstanceInfo.class);
InstanceInfo instanceInfo2 = Mockito.mock(InstanceInfo.class);
InstanceInfo instanceInfo3 = createSingleInstanceApp("foo", "foo",
InstanceInfo.ActionType.ADDED).getByInstanceId("foo");
InstanceInfo instanceInfo4 = createSingleInstanceApp("bar", "bar",
InstanceInfo.ActionType.ADDED).getByInstanceId("bar");
Assert.assertTrue(EurekaEntityFunctions.comparatorByAppNameAndId()
.compare(instanceInfo1, instanceInfo2) > 0);
Assert.assertTrue(EurekaEntityFunctions.comparatorByAppNameAndId()
.compare(instanceInfo3, instanceInfo2) > 0);
Assert.assertTrue(EurekaEntityFunctions.comparatorByAppNameAndId()
.compare(instanceInfo1, instanceInfo3) < 0);
Assert.assertTrue(EurekaEntityFunctions.comparatorByAppNameAndId()
.compare(instanceInfo3, instanceInfo4) > 0);
Assert.assertTrue(EurekaEntityFunctions.comparatorByAppNameAndId()
.compare(instanceInfo3, instanceInfo3) == 0);
} |
@Override
public boolean alterOffsets(Map<String, String> connectorConfig, Map<Map<String, ?>, Map<String, ?>> offsets) {
for (Map.Entry<Map<String, ?>, Map<String, ?>> offsetEntry : offsets.entrySet()) {
Map<String, ?> sourceOffset = offsetEntry.getValue();
if (sourceOffset == null) {
// We allow tombstones for anything; if there's garbage in the offsets for the connector, we don't
// want to prevent users from being able to clean it up using the REST API
continue;
}
Map<String, ?> sourcePartition = offsetEntry.getKey();
if (sourcePartition == null) {
throw new ConnectException("Source partitions may not be null");
}
MirrorUtils.validateSourcePartitionString(sourcePartition, CONSUMER_GROUP_ID_KEY);
MirrorUtils.validateSourcePartitionString(sourcePartition, TOPIC_KEY);
MirrorUtils.validateSourcePartitionPartition(sourcePartition);
MirrorUtils.validateSourceOffset(sourcePartition, sourceOffset, true);
}
// We don't actually use these offsets in the task class, so no additional effort is required beyond just validating
// the format of the user-supplied offsets
return true;
} | @Test
public void testAlterOffsetsTombstones() {
MirrorCheckpointConnector connector = new MirrorCheckpointConnector();
Function<Map<String, ?>, Boolean> alterOffsets = partition -> connector.alterOffsets(
null,
Collections.singletonMap(partition, null)
);
Map<String, Object> partition = sourcePartition("consumer-app-2", "t", 3);
assertTrue(() -> alterOffsets.apply(partition));
partition.put(PARTITION_KEY, "a string");
assertTrue(() -> alterOffsets.apply(partition));
partition.remove(PARTITION_KEY);
assertTrue(() -> alterOffsets.apply(partition));
assertTrue(() -> alterOffsets.apply(null));
assertTrue(() -> alterOffsets.apply(Collections.emptyMap()));
assertTrue(() -> alterOffsets.apply(Collections.singletonMap("unused_partition_key", "unused_partition_value")));
} |
public static String decodeBase64ZippedString( String loggingString64 ) throws IOException {
if ( loggingString64 == null || loggingString64.isEmpty() ) {
return "";
}
StringWriter writer = new StringWriter();
// base 64 decode
byte[] bytes64 = Base64.decodeBase64( loggingString64.getBytes() );
// unzip to string encoding-wise
ByteArrayInputStream zip = new ByteArrayInputStream( bytes64 );
// PDI-4325 originally used xml encoding in servlet
try ( GZIPInputStream unzip = new GZIPInputStream( zip, HttpUtil.ZIP_BUFFER_SIZE );
BufferedInputStream in = new BufferedInputStream( unzip, HttpUtil.ZIP_BUFFER_SIZE );
InputStreamReader reader = new InputStreamReader( in, Const.XML_ENCODING ) ) {
// use same buffer size
char[] buff = new char[ HttpUtil.ZIP_BUFFER_SIZE ];
for ( int length; ( length = reader.read( buff ) ) > 0; ) {
writer.write( buff, 0, length );
}
}
return writer.toString();
} | @Test
public final void testDecodeBase64ZippedString() throws IOException, NoSuchAlgorithmException {
String enc64 = this.canonicalBase64Encode( STANDART );
// decode string
String decoded = HttpUtil.decodeBase64ZippedString( enc64 );
Assert.assertEquals( "Strings are the same after transformation", STANDART, decoded );
} |
public static ClassLoader findClassLoader(final ClassLoader proposed) {
ClassLoader classLoader = proposed;
if (classLoader == null) {
classLoader = ReflectHelpers.class.getClassLoader();
}
if (classLoader == null) {
classLoader = ClassLoader.getSystemClassLoader();
}
return classLoader;
} | @Test
public void testFindProperClassLoaderIfContextClassLoaderIsAvailable()
throws InterruptedException {
final ClassLoader[] classLoader = new ClassLoader[1];
Thread thread = new Thread(() -> classLoader[0] = ReflectHelpers.findClassLoader());
ClassLoader cl = new ClassLoader() {};
thread.setContextClassLoader(cl);
thread.start();
thread.join();
assertEquals(cl, classLoader[0]);
} |
public void persistInstanceWorkerId(final String instanceId, final int workerId) {
repository.persistEphemeral(ComputeNode.getInstanceWorkerIdNodePath(instanceId), String.valueOf(workerId));
} | @Test
void assertPersistInstanceWorkerId() {
InstanceMetaData instanceMetaData = new ProxyInstanceMetaData("foo_instance_id", 3307);
final String instanceId = instanceMetaData.getId();
new ComputeNodePersistService(repository).persistInstanceWorkerId(instanceId, 100);
verify(repository).persistEphemeral(ComputeNode.getInstanceWorkerIdNodePath(instanceId), String.valueOf(100));
} |
public CompletableFuture<ChangeInvisibleDurationResponse> changeInvisibleDuration(ProxyContext ctx,
ChangeInvisibleDurationRequest request) {
CompletableFuture<ChangeInvisibleDurationResponse> future = new CompletableFuture<>();
try {
validateTopicAndConsumerGroup(request.getTopic(), request.getGroup());
validateInvisibleTime(Durations.toMillis(request.getInvisibleDuration()));
ReceiptHandle receiptHandle = ReceiptHandle.decode(request.getReceiptHandle());
String group = request.getGroup().getName();
MessageReceiptHandle messageReceiptHandle = messagingProcessor.removeReceiptHandle(ctx, grpcChannelManager.getChannel(ctx.getClientID()), group, request.getMessageId(), receiptHandle.getReceiptHandle());
if (messageReceiptHandle != null) {
receiptHandle = ReceiptHandle.decode(messageReceiptHandle.getReceiptHandleStr());
}
return this.messagingProcessor.changeInvisibleTime(
ctx,
receiptHandle,
request.getMessageId(),
group,
request.getTopic().getName(),
Durations.toMillis(request.getInvisibleDuration())
).thenApply(ackResult -> convertToChangeInvisibleDurationResponse(ctx, request, ackResult));
} catch (Throwable t) {
future.completeExceptionally(t);
}
return future;
} | @Test
public void testChangeInvisibleDurationInvisibleTimeTooSmall() throws Throwable {
try {
this.changeInvisibleDurationActivity.changeInvisibleDuration(
createContext(),
ChangeInvisibleDurationRequest.newBuilder()
.setInvisibleDuration(Durations.fromSeconds(-1))
.setTopic(Resource.newBuilder().setName(TOPIC).build())
.setGroup(Resource.newBuilder().setName(CONSUMER_GROUP).build())
.setMessageId("msgId")
.setReceiptHandle(buildReceiptHandle(TOPIC, System.currentTimeMillis(), 3000))
.build()
).get();
} catch (ExecutionException executionException) {
GrpcProxyException exception = (GrpcProxyException) executionException.getCause();
assertEquals(Code.ILLEGAL_INVISIBLE_TIME, exception.getCode());
}
} |
@Override
public void onPartitionsDeleted(
List<TopicPartition> topicPartitions,
BufferSupplier bufferSupplier
) throws ExecutionException, InterruptedException {
throwIfNotActive();
CompletableFuture.allOf(
FutureUtils.mapExceptionally(
runtime.scheduleWriteAllOperation(
"on-partition-deleted",
Duration.ofMillis(config.offsetCommitTimeoutMs()),
coordinator -> coordinator.onPartitionsDeleted(topicPartitions)
),
exception -> {
log.error("Could not delete offsets for deleted partitions {} due to: {}.",
topicPartitions, exception.getMessage(), exception
);
return null;
}
).toArray(new CompletableFuture[0])
).get();
} | @Test
public void testOnPartitionsDeletedWhenServiceIsNotStarted() {
CoordinatorRuntime<GroupCoordinatorShard, CoordinatorRecord> runtime = mockRuntime();
GroupCoordinatorService service = new GroupCoordinatorService(
new LogContext(),
createConfig(),
runtime,
new GroupCoordinatorMetrics(),
createConfigManager()
);
assertThrows(CoordinatorNotAvailableException.class, () -> service.onPartitionsDeleted(
Collections.singletonList(new TopicPartition("foo", 0)),
BufferSupplier.NO_CACHING
));
} |
public void setPageSegMode(String pageSegMode) {
if (!pageSegMode.matches("[0-9]|10|11|12|13")) {
throw new IllegalArgumentException("Invalid page segmentation mode");
}
this.pageSegMode = pageSegMode;
userConfigured.add("pageSegMode");
} | @Test
public void testValidatePageSegMode() {
TesseractOCRConfig config = new TesseractOCRConfig();
config.setPageSegMode("0");
config.setPageSegMode("10");
assertTrue(true, "Couldn't set valid values");
assertThrows(IllegalArgumentException.class, () -> {
config.setPageSegMode("14");
});
} |
@SuppressWarnings("deprecation")
public static <K> KStreamHolder<K> build(
final KStreamHolder<K> left,
final KStreamHolder<K> right,
final StreamStreamJoin<K> join,
final RuntimeBuildContext buildContext,
final StreamJoinedFactory streamJoinedFactory) {
final QueryContext queryContext = join.getProperties().getQueryContext();
final QueryContext.Stacker stacker = QueryContext.Stacker.of(queryContext);
final LogicalSchema leftSchema;
final LogicalSchema rightSchema;
final Formats rightFormats;
final Formats leftFormats;
if (join.getJoinType().equals(RIGHT)) {
leftFormats = join.getRightInternalFormats();
rightFormats = join.getLeftInternalFormats();
leftSchema = right.getSchema();
rightSchema = left.getSchema();
} else {
leftFormats = join.getLeftInternalFormats();
rightFormats = join.getRightInternalFormats();
leftSchema = left.getSchema();
rightSchema = right.getSchema();
}
final PhysicalSchema leftPhysicalSchema = PhysicalSchema.from(
leftSchema,
leftFormats.getKeyFeatures(),
leftFormats.getValueFeatures()
);
final Serde<GenericRow> leftSerde = buildContext.buildValueSerde(
leftFormats.getValueFormat(),
leftPhysicalSchema,
stacker.push(LEFT_SERDE_CTX).getQueryContext()
);
final PhysicalSchema rightPhysicalSchema = PhysicalSchema.from(
rightSchema,
rightFormats.getKeyFeatures(),
rightFormats.getValueFeatures()
);
final Serde<GenericRow> rightSerde = buildContext.buildValueSerde(
rightFormats.getValueFormat(),
rightPhysicalSchema,
stacker.push(RIGHT_SERDE_CTX).getQueryContext()
);
final Serde<K> keySerde = left.getExecutionKeyFactory().buildKeySerde(
leftFormats.getKeyFormat(),
leftPhysicalSchema,
queryContext
);
final StreamJoined<K, GenericRow, GenericRow> joined = streamJoinedFactory.create(
keySerde,
leftSerde,
rightSerde,
StreamsUtil.buildOpName(queryContext),
StreamsUtil.buildOpName(queryContext)
);
final JoinParams joinParams = JoinParamsFactory
.create(join.getKeyColName(), leftSchema, rightSchema);
JoinWindows joinWindows;
// Grace, as optional, helps to identify if a user specified the GRACE PERIOD syntax in the
// join window. If specified, then we'll call the new KStreams API ofTimeDifferenceAndGrace()
// which enables the "spurious" results bugfix with left/outer joins (see KAFKA-10847).
if (join.getGraceMillis().isPresent()) {
joinWindows = JoinWindows.ofTimeDifferenceAndGrace(
join.getBeforeMillis(),
join.getGraceMillis().get());
} else {
joinWindows = JoinWindows.of(join.getBeforeMillis());
}
joinWindows = joinWindows.after(join.getAfterMillis());
final KStream<K, GenericRow> result;
switch (join.getJoinType()) {
case LEFT:
result = left.getStream().leftJoin(
right.getStream(), joinParams.getJoiner(), joinWindows, joined);
break;
case RIGHT:
result = right.getStream().leftJoin(
left.getStream(), joinParams.getJoiner(), joinWindows, joined);
break;
case OUTER:
result = left.getStream().outerJoin(
right.getStream(), joinParams.getJoiner(), joinWindows, joined);
break;
case INNER:
result = left.getStream().join(
right.getStream(), joinParams.getJoiner(), joinWindows, joined);
break;
default:
throw new IllegalStateException("invalid join type");
}
return left.withStream(result, joinParams.getSchema());
} | @Test
public void shouldDoOuterJoinWithGrace() {
// Given:
givenOuterJoin(Optional.of(GRACE));
// When:
final KStreamHolder<Struct> result = join.build(planBuilder, planInfo);
// Then:
verify(leftKStream).outerJoin(
same(rightKStream),
eq(new KsqlValueJoiner(LEFT_SCHEMA.value().size(), RIGHT_SCHEMA.value().size(), 1)),
eq(WINDOWS_WITH_GRACE),
same(joined)
);
verifyNoMoreInteractions(leftKStream, rightKStream, resultKStream);
assertThat(result.getStream(), is(resultKStream));
assertThat(result.getExecutionKeyFactory(), is(executionKeyFactory));
} |
@Override
public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) {
table.refresh();
if (lastPosition != null) {
return discoverIncrementalSplits(lastPosition);
} else {
return discoverInitialSplits();
}
} | @Test
public void testIncrementalFromSnapshotId() throws Exception {
appendTwoSnapshots();
ScanContext scanContext =
ScanContext.builder()
.startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_SNAPSHOT_ID)
.startSnapshotId(snapshot2.snapshotId())
.build();
ContinuousSplitPlannerImpl splitPlanner =
new ContinuousSplitPlannerImpl(TABLE_RESOURCE.tableLoader().clone(), scanContext, null);
ContinuousEnumerationResult initialResult = splitPlanner.planSplits(null);
assertThat(initialResult.fromPosition()).isNull();
// For inclusive behavior of snapshot2, the initial result should point to snapshot1 (as
// snapshot2's parent)
assertThat(initialResult.toPosition().snapshotId().longValue())
.isEqualTo(snapshot1.snapshotId());
assertThat(initialResult.toPosition().snapshotTimestampMs().longValue())
.isEqualTo(snapshot1.timestampMillis());
assertThat(initialResult.splits()).isEmpty();
ContinuousEnumerationResult secondResult = splitPlanner.planSplits(initialResult.toPosition());
assertThat(secondResult.fromPosition().snapshotId().longValue())
.isEqualTo(snapshot1.snapshotId());
assertThat(secondResult.fromPosition().snapshotTimestampMs().longValue())
.isEqualTo(snapshot1.timestampMillis());
assertThat(secondResult.toPosition().snapshotId().longValue())
.isEqualTo(snapshot2.snapshotId());
assertThat(secondResult.toPosition().snapshotTimestampMs().longValue())
.isEqualTo(snapshot2.timestampMillis());
IcebergSourceSplit split = Iterables.getOnlyElement(secondResult.splits());
assertThat(split.task().files()).hasSize(1);
Set<String> discoveredFiles =
split.task().files().stream()
.map(fileScanTask -> fileScanTask.file().path().toString())
.collect(Collectors.toSet());
// should discover dataFile2 appended in snapshot2
Set<String> expectedFiles = ImmutableSet.of(dataFile2.path().toString());
assertThat(discoveredFiles).containsExactlyElementsOf(expectedFiles);
IcebergEnumeratorPosition lastPosition = secondResult.toPosition();
for (int i = 0; i < 3; ++i) {
lastPosition = verifyOneCycle(splitPlanner, lastPosition).lastPosition;
}
} |
@Override
public String getConfig(String key, String group, long timeout) throws IllegalStateException {
try {
long nacosTimeout = timeout < 0 ? getDefaultTimeout() : timeout;
if (StringUtils.isEmpty(group)) {
group = DEFAULT_GROUP;
}
return configService.getConfig(key, group, nacosTimeout);
} catch (NacosException e) {
logger.error(CONFIG_ERROR_NACOS, "", "", e.getMessage(), e);
}
return null;
} | @Test
void testGetConfig() throws Exception {
put("org.apache.dubbo.nacos.testService.configurators", "hello");
Thread.sleep(200);
put("dubbo.properties", "test", "aaa=bbb");
Thread.sleep(200);
put("org.apache.dubbo.demo.DemoService:1.0.0.test:xxxx.configurators", "helloworld");
Thread.sleep(200);
Assertions.assertEquals(
"hello",
config.getConfig(
"org.apache.dubbo.nacos.testService.configurators", DynamicConfiguration.DEFAULT_GROUP));
Assertions.assertEquals("aaa=bbb", config.getConfig("dubbo.properties", "test"));
Assertions.assertEquals(
"helloworld",
config.getConfig(
"org.apache.dubbo.demo.DemoService:1.0.0.test:xxxx.configurators",
DynamicConfiguration.DEFAULT_GROUP));
} |
List<Token> tokenize() throws ScanException {
List<Token> tokenList = new ArrayList<Token>();
StringBuffer buf = new StringBuffer();
while (pointer < patternLength) {
char c = pattern.charAt(pointer);
pointer++;
switch (state) {
case LITERAL_STATE:
handleLiteralState(c, tokenList, buf);
break;
case FORMAT_MODIFIER_STATE:
handleFormatModifierState(c, tokenList, buf);
break;
case OPTION_STATE:
processOption(c, tokenList, buf);
break;
case KEYWORD_STATE:
handleKeywordState(c, tokenList, buf);
break;
case RIGHT_PARENTHESIS_STATE:
handleRightParenthesisState(c, tokenList, buf);
break;
default:
}
}
// EOS
switch (state) {
case LITERAL_STATE:
addValuedToken(Token.LITERAL, buf, tokenList);
break;
case KEYWORD_STATE:
tokenList.add(new Token(Token.SIMPLE_KEYWORD, buf.toString()));
break;
case RIGHT_PARENTHESIS_STATE:
tokenList.add(Token.RIGHT_PARENTHESIS_TOKEN);
break;
case FORMAT_MODIFIER_STATE:
case OPTION_STATE:
throw new ScanException("Unexpected end of pattern string");
}
return tokenList;
} | @Test
public void testOptions() throws ScanException {
{
List<Token> tl = new TokenStream("%x{t}").tokenize();
List<Token> witness = new ArrayList<Token>();
witness.add(Token.PERCENT_TOKEN);
witness.add(new Token(Token.SIMPLE_KEYWORD, "x"));
List<String> ol = new ArrayList<String>();
ol.add("t");
witness.add(new Token(Token.OPTION, ol));
assertEquals(witness, tl);
}
{
List<Token> tl = new TokenStream("%x{t,y}").tokenize();
List<Token> witness = new ArrayList<Token>();
witness.add(Token.PERCENT_TOKEN);
witness.add(new Token(Token.SIMPLE_KEYWORD, "x"));
List<String> ol = new ArrayList<String>();
ol.add("t");
ol.add("y");
witness.add(new Token(Token.OPTION, ol));
assertEquals(witness, tl);
}
{
List<Token> tl = new TokenStream("%x{\"hello world.\", \"12y \"}").tokenize();
List<Token> witness = new ArrayList<Token>();
witness.add(Token.PERCENT_TOKEN);
witness.add(new Token(Token.SIMPLE_KEYWORD, "x"));
List<String> ol = new ArrayList<String>();
ol.add("hello world.");
ol.add("12y ");
witness.add(new Token(Token.OPTION, ol));
assertEquals(witness, tl);
}
{
List<Token> tl = new TokenStream("%x{'opt}'}").tokenize();
List<Token> witness = new ArrayList<Token>();
witness.add(Token.PERCENT_TOKEN);
witness.add(new Token(Token.SIMPLE_KEYWORD, "x"));
List<String> ol = new ArrayList<String>();
ol.add("opt}");
witness.add(new Token(Token.OPTION, ol));
assertEquals(witness, tl);
}
} |
public static String trimEnd( final String source, char c ) {
if ( source == null ) {
return null;
}
int index = source.length();
while ( index > 0 && source.charAt( index - 1 ) == c ) {
index--;
}
return source.substring( 0, index );
} | @Test
public void testTrimEnd_Many() {
assertEquals( "/file/path", StringUtil.trimEnd( "/file/path///", '/' ) );
} |
@Override
public boolean hasPrivileges(final String database) {
return databases.contains(AuthorityConstants.PRIVILEGE_WILDCARD) || databases.contains(database);
} | @Test
void assertHasNotPrivileges() {
assertFalse(new DatabasePermittedPrivileges(Collections.singleton("foo_db")).hasPrivileges("bar_db"));
} |
public static int[] generateRandomNumber(int begin, int end, int size) {
// 种子你可以随意生成,但不能重复
final int[] seed = ArrayUtil.range(begin, end);
return generateRandomNumber(begin, end, size, seed);
} | @Test
public void generateRandomNumberTest2(){
// 检查边界
final int[] ints = NumberUtil.generateRandomNumber(1, 8, 7);
assertEquals(7, ints.length);
final Set<?> set = Convert.convert(Set.class, ints);
assertEquals(7, set.size());
} |
public static ConfigurableResource parseResourceConfigValue(String value)
throws AllocationConfigurationException {
return parseResourceConfigValue(value, Long.MAX_VALUE);
} | @Test
public void testOnlyCPU() throws Exception {
String value = "1024vcores";
expectUnparsableResource(value);
parseResourceConfigValue(value);
} |
@Override
public void deleteFiles(Iterable<String> pathsToDelete) throws BulkDeletionFailureException {
AtomicInteger failureCount = new AtomicInteger(0);
Tasks.foreach(pathsToDelete)
.executeWith(executorService())
.retry(DELETE_RETRY_ATTEMPTS)
.stopRetryOn(FileNotFoundException.class)
.suppressFailureWhenFinished()
.onFailure(
(f, e) -> {
LOG.error("Failure during bulk delete on file: {} ", f, e);
failureCount.incrementAndGet();
})
.run(this::deleteFile);
if (failureCount.get() != 0) {
throw new BulkDeletionFailureException(failureCount.get());
}
} | @Test
public void testDeleteFiles() {
Path parent = new Path(tempDir.toURI());
List<Path> filesCreated = createRandomFiles(parent, 10);
hadoopFileIO.deleteFiles(
filesCreated.stream().map(Path::toString).collect(Collectors.toList()));
filesCreated.forEach(
file -> assertThat(hadoopFileIO.newInputFile(file.toString()).exists()).isFalse());
} |
@Override
public void processElement(final StreamRecord<T> element) throws Exception {
final T event = element.getValue();
final long previousTimestamp =
element.hasTimestamp() ? element.getTimestamp() : Long.MIN_VALUE;
final long newTimestamp = timestampAssigner.extractTimestamp(event, previousTimestamp);
element.setTimestamp(newTimestamp);
output.collect(element);
watermarkGenerator.onEvent(event, newTimestamp, wmOutput);
} | @Test
void periodicWatermarksOnlyEmitOnPeriodicEmitStreamMode() throws Exception {
OneInputStreamOperatorTestHarness<Long, Long> testHarness =
createTestHarness(
WatermarkStrategy.forGenerator((ctx) -> new PeriodicWatermarkGenerator())
.withTimestampAssigner((ctx) -> new LongExtractor()));
testHarness.processElement(new StreamRecord<>(2L, 1));
assertThat(pollNextStreamRecord(testHarness)).is(matching(streamRecord(2L, 2L)));
assertThat(testHarness.getOutput()).isEmpty();
} |
public void validatePositionsIfNeeded() {
Map<TopicPartition, SubscriptionState.FetchPosition> partitionsToValidate =
offsetFetcherUtils.getPartitionsToValidate();
validatePositionsAsync(partitionsToValidate);
} | @Test
public void testOffsetValidationHandlesSeekWithInflightOffsetForLeaderRequest() {
buildFetcher();
assignFromUser(singleton(tp0));
Map<String, Integer> partitionCounts = new HashMap<>();
partitionCounts.put(tp0.topic(), 4);
final int epochOne = 1;
final Optional<Integer> epochOneOpt = Optional.of(epochOne);
metadata.updateWithCurrentRequestVersion(RequestTestUtils.metadataUpdateWithIds("dummy", 1,
Collections.emptyMap(), partitionCounts, tp -> epochOne, topicIds), false, 0L);
// Offset validation requires OffsetForLeaderEpoch request v3 or higher
Node node = metadata.fetch().nodes().get(0);
apiVersions.update(node.idString(), NodeApiVersions.create());
Metadata.LeaderAndEpoch leaderAndEpoch = new Metadata.LeaderAndEpoch(metadata.currentLeader(tp0).leader, epochOneOpt);
subscriptions.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(0, epochOneOpt, leaderAndEpoch));
offsetFetcher.validatePositionsIfNeeded();
consumerClient.poll(time.timer(Duration.ZERO));
assertTrue(subscriptions.awaitingValidation(tp0));
assertTrue(client.hasInFlightRequests());
// While the OffsetForLeaderEpoch request is in-flight, we seek to a different offset.
subscriptions.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(5, epochOneOpt, leaderAndEpoch));
assertTrue(subscriptions.awaitingValidation(tp0));
client.respond(
offsetsForLeaderEpochRequestMatcher(tp0),
prepareOffsetsForLeaderEpochResponse(tp0, 0, 0L));
consumerClient.poll(time.timer(Duration.ZERO));
// The response should be ignored since we were validating a different position.
assertTrue(subscriptions.awaitingValidation(tp0));
} |
public static long convertVersion(String version) throws IncompatibleVersionException {
if (StringUtils.isBlank(version)) {
throw new IllegalArgumentException("The version must not be blank.");
}
String[] parts = StringUtils.split(version, '.');
int size = parts.length;
if (size > MAX_VERSION_DOT + 1) {
throw new IncompatibleVersionException("incompatible version format:" + version);
}
long result = 0L;
int i = 1;
size = MAX_VERSION_DOT + 1;
for (String part : parts) {
if (StringUtils.isNumeric(part)) {
result += calculatePartValue(part, size, i);
} else {
String[] subParts = StringUtils.split(part, '-');
if (StringUtils.isNumeric(subParts[0])) {
result += calculatePartValue(subParts[0], size, i);
}
}
i++;
}
return result;
} | @Test
public void testConvertVersion() {
// case: success
Assertions.assertDoesNotThrow(() -> {
long v = Version.convertVersion(Version.getCurrent());
Assertions.assertTrue(v > 0);
});
Assertions.assertDoesNotThrow(() -> {
long v = Version.convertVersion("1.7.0-SNAPSHOT");
Assertions.assertEquals(1070000, v);
});
Assertions.assertDoesNotThrow(() -> {
long v = Version.convertVersion("1.7.0");
Assertions.assertEquals(1070000, v);
});
Assertions.assertDoesNotThrow(() -> {
long v = Version.convertVersion("1.7.0-native-rc1-SNAPSHOT");
Assertions.assertEquals(1070000, v);
});
Assertions.assertDoesNotThrow(() -> {
long v = Version.convertVersion("1.7.0-native-rc1");
Assertions.assertEquals(1070000, v);
});
// case: fail
Assertions.assertThrows(IllegalArgumentException.class, () -> {
Version.convertVersion(null);
});
Assertions.assertThrows(IllegalArgumentException.class, () -> {
Version.convertVersion(" ");
});
Assertions.assertThrows(IncompatibleVersionException.class, () -> {
Version.convertVersion("1.7.0.native.rc1-SNAPSHOT");
});
Assertions.assertThrows(IncompatibleVersionException.class, () -> {
Version.convertVersion("1.7.0.native.rc1");
});
} |
public static int modPowerOfTwo(int a, int b) {
return a & (b - 1);
} | @Test
public void testModPowerOfTwo() {
int[] aParams = new int[]{
0,
1,
Integer.MAX_VALUE / 2,
Integer.MAX_VALUE,
};
int[] bParams = new int[]{
1,
2,
1024,
powerOfTwo(10),
powerOfTwo(20),
};
for (int a : aParams) {
for (int b : bParams) {
assertEquals(a % b, QuickMath.modPowerOfTwo(a, b));
}
}
} |
static Map<Integer, Schema.Field> mapFieldPositions(CSVFormat format, Schema schema) {
List<String> header = Arrays.asList(format.getHeader());
Map<Integer, Schema.Field> indexToFieldMap = new HashMap<>();
for (Schema.Field field : schema.getFields()) {
int index = getIndex(header, field);
if (index >= 0) {
indexToFieldMap.put(index, field);
}
}
return indexToFieldMap;
} | @Test
public void givenNonNullableHeaderAndSchemaFieldMismatch_throws() {
Schema schema =
Schema.builder()
.addStringField("another_string")
.addInt32Field("an_integer")
.addStringField("a_string")
.build();
IllegalArgumentException e =
assertThrows(
IllegalArgumentException.class,
() ->
CsvIOParseHelpers.mapFieldPositions(
csvFormat().withHeader("an_integer", "a_string"), schema));
assertEquals(
"header does not contain required class org.apache.beam.sdk.schemas.Schema field: "
+ schema.getField("another_string").getName(),
e.getMessage());
} |
public boolean initWithCommittedOffsetsIfNeeded(Timer timer) {
final Set<TopicPartition> initializingPartitions = subscriptions.initializingPartitions();
final Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(initializingPartitions, timer);
// "offsets" will be null if the offset fetch requests did not receive responses within the given timeout
if (offsets == null)
return false;
refreshCommittedOffsets(offsets, this.metadata, this.subscriptions);
return true;
} | @Test
public void testRefreshOffset() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
subscriptions.assignFromUser(singleton(t1p));
client.prepareResponse(offsetFetchResponse(t1p, Errors.NONE, "", 100L));
coordinator.initWithCommittedOffsetsIfNeeded(time.timer(Long.MAX_VALUE));
assertEquals(Collections.emptySet(), subscriptions.initializingPartitions());
assertTrue(subscriptions.hasAllFetchPositions());
assertEquals(100L, subscriptions.position(t1p).offset);
} |
@Override
public CompletableFuture<ConsumeMessageDirectlyResult> consumeMessageDirectly(String address,
ConsumeMessageDirectlyResultRequestHeader requestHeader, long timeoutMillis) {
CompletableFuture<ConsumeMessageDirectlyResult> future = new CompletableFuture<>();
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.CONSUME_MESSAGE_DIRECTLY, requestHeader);
remotingClient.invoke(address, request, timeoutMillis).thenAccept(response -> {
if (response.getCode() == ResponseCode.SUCCESS) {
ConsumeMessageDirectlyResult info = ConsumeMessageDirectlyResult.decode(response.getBody(), ConsumeMessageDirectlyResult.class);
future.complete(info);
} else {
log.warn("consumeMessageDirectly getResponseCommand failed, {} {}", response.getCode(), response.getRemark());
future.completeExceptionally(new MQClientException(response.getCode(), response.getRemark()));
}
});
return future;
} | @Test
public void assertConsumeMessageDirectlyWithSuccess() throws Exception {
ConsumeMessageDirectlyResult responseBody = new ConsumeMessageDirectlyResult();
setResponseSuccess(RemotingSerializable.encode(responseBody));
ConsumeMessageDirectlyResultRequestHeader requestHeader = mock(ConsumeMessageDirectlyResultRequestHeader.class);
CompletableFuture<ConsumeMessageDirectlyResult> actual = mqClientAdminImpl.consumeMessageDirectly(defaultBrokerAddr, requestHeader, defaultTimeout);
ConsumeMessageDirectlyResult result = actual.get();
assertNotNull(result);
assertTrue(result.isAutoCommit());
} |
@Override
@SuppressWarnings("rawtypes")
public void report(SortedMap<String, Gauge> gauges,
SortedMap<String, Counter> counters,
SortedMap<String, Histogram> histograms,
SortedMap<String, Meter> meters,
SortedMap<String, Timer> timers) {
final String dateTime = dateFormat.format(new Date(clock.getTime()));
printWithBanner(dateTime, '=');
output.println();
if (!gauges.isEmpty()) {
printWithBanner("-- Gauges", '-');
for (Map.Entry<String, Gauge> entry : gauges.entrySet()) {
output.println(entry.getKey());
printGauge(entry.getValue());
}
output.println();
}
if (!counters.isEmpty()) {
printWithBanner("-- Counters", '-');
for (Map.Entry<String, Counter> entry : counters.entrySet()) {
output.println(entry.getKey());
printCounter(entry);
}
output.println();
}
if (!histograms.isEmpty()) {
printWithBanner("-- Histograms", '-');
for (Map.Entry<String, Histogram> entry : histograms.entrySet()) {
output.println(entry.getKey());
printHistogram(entry.getValue());
}
output.println();
}
if (!meters.isEmpty()) {
printWithBanner("-- Meters", '-');
for (Map.Entry<String, Meter> entry : meters.entrySet()) {
output.println(entry.getKey());
printMeter(entry.getValue());
}
output.println();
}
if (!timers.isEmpty()) {
printWithBanner("-- Timers", '-');
for (Map.Entry<String, Timer> entry : timers.entrySet()) {
output.println(entry.getKey());
printTimer(entry.getValue());
}
output.println();
}
output.println();
output.flush();
} | @Test
public void reportsHistogramValues() throws Exception {
final Histogram histogram = mock(Histogram.class);
when(histogram.getCount()).thenReturn(1L);
final Snapshot snapshot = mock(Snapshot.class);
when(snapshot.getMax()).thenReturn(2L);
when(snapshot.getMean()).thenReturn(3.0);
when(snapshot.getMin()).thenReturn(4L);
when(snapshot.getStdDev()).thenReturn(5.0);
when(snapshot.getMedian()).thenReturn(6.0);
when(snapshot.get75thPercentile()).thenReturn(7.0);
when(snapshot.get95thPercentile()).thenReturn(8.0);
when(snapshot.get98thPercentile()).thenReturn(9.0);
when(snapshot.get99thPercentile()).thenReturn(10.0);
when(snapshot.get999thPercentile()).thenReturn(11.0);
when(histogram.getSnapshot()).thenReturn(snapshot);
reporter.report(map(),
map(),
map("test.histogram", histogram),
map(),
map());
assertThat(consoleOutput())
.isEqualTo(lines(
dateHeader,
"",
"-- Histograms ------------------------------------------------------------------",
"test.histogram",
" count = 1",
" min = 4",
" max = 2",
" mean = 3.00",
" stddev = 5.00",
" median = 6.00",
" 75% <= 7.00",
" 95% <= 8.00",
" 98% <= 9.00",
" 99% <= 10.00",
" 99.9% <= 11.00",
"",
""
));
} |
@Override
public ConfigErrors errors() {
return errors;
} | @Test
public void shouldValidatePipelineLabelWithBrokenTruncationSyntax2() {
String labelFormat = "pipeline-${COUNT}-${git[7]}-alpha";
PipelineConfig pipelineConfig = createAndValidatePipelineLabel(labelFormat);
String expectedLabelTemplate = "Invalid label 'pipeline-${COUNT}-${git[7]}-alpha'.";
assertThat(pipelineConfig.errors().on(PipelineConfig.LABEL_TEMPLATE), startsWith(expectedLabelTemplate));
} |
@Override
public String toString() {
return major + "." + minor;
} | @Test
public void toStringTest() {
assertEquals("3.8", Version.of(3, 8).toString());
} |
@Override
public PageData<WidgetsBundle> findAllTenantWidgetsBundlesByTenantId(WidgetsBundleFilter widgetsBundleFilter, PageLink pageLink) {
return findTenantWidgetsBundlesByTenantIds(Arrays.asList(widgetsBundleFilter.getTenantId().getId(), NULL_UUID), widgetsBundleFilter, pageLink);
} | @Test
public void testOrderInFindAllWidgetsBundlesByTenantIdFullSearch() {
UUID tenantId1 = Uuids.timeBased();
for (int i = 0; i < 10; i++) {
createWidgetsBundle(TenantId.fromUUID(tenantId1), "WB1_" + i, "WB1_" + (10-i), i % 2 == 1 ? null : (int)(Math.random() * 1000));
createWidgetsBundle(TenantId.SYS_TENANT_ID, "WB_SYS_" + i, "WB_SYS_" + (10-i), i % 2 == 0 ? null : (int)(Math.random() * 1000));
}
widgetsBundles = widgetsBundleDao.find(TenantId.SYS_TENANT_ID).stream().sorted((o1, o2) -> {
int result = 0;
if (o1.getOrder() != null && o2.getOrder() != null) {
result = o1.getOrder() - o2.getOrder();
} else if (o1.getOrder() == null && o2.getOrder() != null) {
result = 1;
} else if (o1.getOrder() != null) {
result = -1;
}
if (result == 0) {
result = o1.getTitle().compareTo(o2.getTitle());
}
return result;
}).collect(Collectors.toList());;
assertEquals(20, widgetsBundles.size());
PageLink pageLink = new PageLink(100, 0, "", new SortOrder("title"));
PageData<WidgetsBundle> widgetsBundlesData = widgetsBundleDao.findAllTenantWidgetsBundlesByTenantId(WidgetsBundleFilter.fullSearchFromTenantId(TenantId.fromUUID(tenantId1)), pageLink);
assertEquals(20, widgetsBundlesData.getData().size());
assertEquals(widgetsBundles, widgetsBundlesData.getData());
} |
public static NotificationDispatcherMetadata newMetadata() {
return METADATA;
} | @Test
public void qgChange_notification_is_enable_at_project_level() {
NotificationDispatcherMetadata metadata = QGChangeNotificationHandler.newMetadata();
assertThat(metadata.getProperty(PER_PROJECT_NOTIFICATION)).isEqualTo("true");
} |
@Override
public boolean apply(Collection<Member> members) {
if (members.size() < minimumClusterSize) {
return false;
}
int count = 0;
long now = currentTimeMillis();
for (Member member : members) {
if (!isAlivePerIcmp(member)) {
continue;
}
if (member.localMember()) {
count++;
continue;
}
// apply and onHeartbeat are never executed concurrently
Long latestTimestamp = latestHeartbeatPerMember.get(member);
if (latestTimestamp == null) {
continue;
}
if ((now - latestTimestamp) < heartbeatToleranceMillis) {
count++;
}
}
return count >= minimumClusterSize;
} | @Test
public void testRecentlyActiveSplitBrainProtectionFunction_splitBrainProtectionPresent_allMembersRecentlyActive() {
splitBrainProtectionFunction = new RecentlyActiveSplitBrainProtectionFunction(splitBrainProtectionSize, 10000);
// heartbeat each second for all members for 5 seconds
heartbeat(5, 1000);
assertTrue(splitBrainProtectionFunction.apply(Arrays.asList(members)));
} |
public static URL parseURL(String address, Map<String, String> defaults) {
if (StringUtils.isEmpty(address)) {
throw new IllegalArgumentException("Address is not allowed to be empty, please re-enter.");
}
String url;
if (address.contains("://") || address.contains(URL_PARAM_STARTING_SYMBOL)) {
url = address;
} else {
String[] addresses = COMMA_SPLIT_PATTERN.split(address);
url = addresses[0];
if (addresses.length > 1) {
StringBuilder backup = new StringBuilder();
for (int i = 1; i < addresses.length; i++) {
if (i > 1) {
backup.append(',');
}
backup.append(addresses[i]);
}
url += URL_PARAM_STARTING_SYMBOL + RemotingConstants.BACKUP_KEY + "=" + backup.toString();
}
}
String defaultProtocol = defaults == null ? null : defaults.get(PROTOCOL_KEY);
if (StringUtils.isEmpty(defaultProtocol)) {
defaultProtocol = DUBBO_PROTOCOL;
}
String defaultUsername = defaults == null ? null : defaults.get(USERNAME_KEY);
String defaultPassword = defaults == null ? null : defaults.get(PASSWORD_KEY);
int defaultPort = StringUtils.parseInteger(defaults == null ? null : defaults.get(PORT_KEY));
String defaultPath = defaults == null ? null : defaults.get(PATH_KEY);
Map<String, String> defaultParameters = defaults == null ? null : new HashMap<>(defaults);
if (defaultParameters != null) {
defaultParameters.remove(PROTOCOL_KEY);
defaultParameters.remove(USERNAME_KEY);
defaultParameters.remove(PASSWORD_KEY);
defaultParameters.remove(HOST_KEY);
defaultParameters.remove(PORT_KEY);
defaultParameters.remove(PATH_KEY);
}
URL u = URL.cacheableValueOf(url);
boolean changed = false;
String protocol = u.getProtocol();
String username = u.getUsername();
String password = u.getPassword();
String host = u.getHost();
int port = u.getPort();
String path = u.getPath();
Map<String, String> parameters = new HashMap<>(u.getParameters());
if (StringUtils.isEmpty(protocol)) {
changed = true;
protocol = defaultProtocol;
}
if (StringUtils.isEmpty(username) && StringUtils.isNotEmpty(defaultUsername)) {
changed = true;
username = defaultUsername;
}
if (StringUtils.isEmpty(password) && StringUtils.isNotEmpty(defaultPassword)) {
changed = true;
password = defaultPassword;
}
/*if (u.isAnyHost() || u.isLocalHost()) {
changed = true;
host = NetUtils.getLocalHost();
}*/
if (port <= 0) {
if (defaultPort > 0) {
changed = true;
port = defaultPort;
} else {
changed = true;
port = 9090;
}
}
if (StringUtils.isEmpty(path)) {
if (StringUtils.isNotEmpty(defaultPath)) {
changed = true;
path = defaultPath;
}
}
if (defaultParameters != null && defaultParameters.size() > 0) {
for (Map.Entry<String, String> entry : defaultParameters.entrySet()) {
String key = entry.getKey();
String defaultValue = entry.getValue();
if (StringUtils.isNotEmpty(defaultValue)) {
String value = parameters.get(key);
if (StringUtils.isEmpty(value)) {
changed = true;
parameters.put(key, defaultValue);
}
}
}
}
if (changed) {
u = new ServiceConfigURL(protocol, username, password, host, port, path, parameters);
}
return u;
} | @Test
void testParseUrl() {
String address = "remote://root:alibaba@127.0.0.1:9090/dubbo.test.api";
URL url = UrlUtils.parseURL(address, null);
assertEquals(localAddress + ":9090", url.getAddress());
assertEquals("root", url.getUsername());
assertEquals("alibaba", url.getPassword());
assertEquals("dubbo.test.api", url.getPath());
assertEquals(9090, url.getPort());
assertEquals("remote", url.getProtocol());
} |
public void collectRequestStats(HttpRequestInfo req) {
// ipv4/ipv6 tracking
String clientIp;
final String xForwardedFor = req.getHeaders().getFirst(X_FORWARDED_FOR_HEADER);
if (xForwardedFor == null) {
clientIp = req.getClientIp();
} else {
clientIp = extractClientIpFromXForwardedFor(xForwardedFor);
}
final boolean isIPv6 = (clientIp != null) ? isIPv6(clientIp) : false;
final String ipVersionKey = isIPv6 ? "ipv6" : "ipv4";
incrementNamedCountingMonitor(ipVersionKey, ipVersionCounterMap);
// host header
String host = req.getHeaders().getFirst(HOST_HEADER);
if (host != null) {
int colonIdx;
if (isIPv6) {
// an ipv6 host might be a raw IP with 7+ colons
colonIdx = host.lastIndexOf(":");
} else {
// strips port from host
colonIdx = host.indexOf(":");
}
if (colonIdx > -1) {
host = host.substring(0, colonIdx);
}
incrementNamedCountingMonitor(hostKey(host), this.hostCounterMap);
}
// http vs. https
String protocol = req.getHeaders().getFirst(X_FORWARDED_PROTO_HEADER);
if (protocol == null) {
protocol = req.getScheme();
}
incrementNamedCountingMonitor(protocolKey(protocol), this.protocolCounterMap);
} | @Test
void testCollectRequestStats() {
final String host = "api.netflix.com";
final String proto = "https";
final HttpRequestInfo req = Mockito.mock(HttpRequestInfo.class);
Headers headers = new Headers();
when(req.getHeaders()).thenReturn(headers);
headers.set(StatsManager.HOST_HEADER, host);
headers.set(StatsManager.X_FORWARDED_PROTO_HEADER, proto);
when(req.getClientIp()).thenReturn("127.0.0.1");
final StatsManager sm = StatsManager.getManager();
sm.collectRequestStats(req);
final NamedCountingMonitor hostMonitor = sm.getHostMonitor(host);
assertNotNull(hostMonitor, "hostMonitor should not be null");
final NamedCountingMonitor protoMonitor = sm.getProtocolMonitor(proto);
assertNotNull(protoMonitor, "protoMonitor should not be null");
assertEquals(1, hostMonitor.getCount());
assertEquals(1, protoMonitor.getCount());
} |
@Override
@Transactional(rollbackFor = Exception.class)
public void deleteCodegen(Long tableId) {
// 校验是否已经存在
if (codegenTableMapper.selectById(tableId) == null) {
throw exception(CODEGEN_TABLE_NOT_EXISTS);
}
// 删除 table 表定义
codegenTableMapper.deleteById(tableId);
// 删除 column 字段定义
codegenColumnMapper.deleteListByTableId(tableId);
} | @Test
public void testDeleteCodegen_notExists() {
assertServiceException(() -> codegenService.deleteCodegen(randomLongId()),
CODEGEN_TABLE_NOT_EXISTS);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.