focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public double calculateDensity(Graph graph, boolean isGraphDirected) {
double result;
double edgesCount = graph.getEdgeCount();
double nodesCount = graph.getNodeCount();
double multiplier = 1;
if (!isGraphDirected) {
multiplier = 2;
}
result = (multiplier * edgesCount) / (nodesCount * nodesCount - nodesCount);
return result;
} | @Test
public void testSelfLoopNodeDensity() {
GraphModel graphModel = GraphModel.Factory.newInstance();
UndirectedGraph undirectedGraph = graphModel.getUndirectedGraph();
Node currentNode = graphModel.factory().newNode("0");
undirectedGraph.addNode(currentNode);
Edge currentEdge = graphModel.factory().newEdge(currentNode, currentNode, false);
undirectedGraph.addEdge(currentEdge);
Graph graph = graphModel.getGraph();
GraphDensity d = new GraphDensity();
double density = d.calculateDensity(graph, false);
assertEquals(density, Double.POSITIVE_INFINITY);
} |
public CsvRow nextRow() throws IORuntimeException {
List<String> currentFields;
int fieldCount;
while (false == finished) {
currentFields = readLine();
fieldCount = currentFields.size();
if (fieldCount < 1) {
// 空List表示读取结束
break;
}
// 读取范围校验
if(lineNo < config.beginLineNo){
// 未达到读取起始行,继续
continue;
}
if(lineNo > config.endLineNo){
// 超出结束行,读取结束
break;
}
// 跳过空行
if (config.skipEmptyRows && fieldCount == 1 && currentFields.get(0).isEmpty()) {
// [""]表示空行
continue;
}
// 检查每行的字段数是否一致
if (config.errorOnDifferentFieldCount) {
if (firstLineFieldCount < 0) {
firstLineFieldCount = fieldCount;
} else if (fieldCount != firstLineFieldCount) {
throw new IORuntimeException(String.format("Line %d has %d fields, but first line has %d fields", lineNo, fieldCount, firstLineFieldCount));
}
}
// 记录最大字段数
if (fieldCount > maxFieldCount) {
maxFieldCount = fieldCount;
}
//初始化标题
if (lineNo == config.headerLineNo && null == header) {
initHeader(currentFields);
// 作为标题行后,此行跳过,下一行做为第一行
continue;
}
return new CsvRow(lineNo, null == header ? null : header.headerMap, currentFields);
}
return null;
} | @Test
public void parseEscapeTest(){
// https://datatracker.ietf.org/doc/html/rfc4180#section-2
// 第七条规则
StringReader reader = StrUtil.getReader("\"b\"\"bb\"");
CsvParser parser = new CsvParser(reader, null);
CsvRow row = parser.nextRow();
assertNotNull(row);
assertEquals(1, row.size());
assertEquals("b\"bb", row.get(0));
} |
public ConvertedTime getConvertedTime(long duration) {
Set<Seconds> keys = RULES.keySet();
for (Seconds seconds : keys) {
if (duration <= seconds.getSeconds()) {
return RULES.get(seconds).getConvertedTime(duration);
}
}
return new TimeConverter.OverTwoYears().getConvertedTime(duration);
} | @Test
public void testShouldReturnNotAvailableWhenInputDateIsNull() throws Exception {
assertEquals(TimeConverter.ConvertedTime.NOT_AVAILABLE, timeConverter.getConvertedTime((Date) null));
} |
@Override
public void filterConsumer(Exchange exchange, WebServiceMessage response) {
if (exchange != null) {
AttachmentMessage responseMessage = exchange.getMessage(AttachmentMessage.class);
processHeaderAndAttachments(responseMessage, response);
}
} | @Test
public void removeCamelInternalHeaderAttributes() throws Exception {
exchange.getOut().getHeaders().put(SpringWebserviceConstants.SPRING_WS_SOAP_ACTION, "mustBeRemoved");
exchange.getOut().getHeaders().put(SpringWebserviceConstants.SPRING_WS_ADDRESSING_ACTION, "mustBeRemoved");
exchange.getOut().getHeaders().put(SpringWebserviceConstants.SPRING_WS_ADDRESSING_PRODUCER_FAULT_TO, "mustBeRemoved");
exchange.getOut().getHeaders().put(SpringWebserviceConstants.SPRING_WS_ADDRESSING_PRODUCER_REPLY_TO, "mustBeRemoved");
exchange.getOut().getHeaders().put(SpringWebserviceConstants.SPRING_WS_ADDRESSING_CONSUMER_FAULT_ACTION,
"mustBeRemoved");
exchange.getOut().getHeaders().put(SpringWebserviceConstants.SPRING_WS_ADDRESSING_CONSUMER_OUTPUT_ACTION,
"mustBeRemoved");
exchange.getOut().getHeaders().put(SpringWebserviceConstants.SPRING_WS_ENDPOINT_URI, "mustBeRemoved");
exchange.getOut().getHeaders().put("breadcrumbId", "mustBeRemoved");
filter.filterConsumer(exchange, message);
Assertions.assertThat(message.getAttachments()).isEmpty();
Assertions.assertThat(message.getSoapHeader().examineAllHeaderElements()).isEmpty();
Assertions.assertThat(message.getSoapHeader().getAllAttributes()).isEmpty();
} |
void activate(long newNextWriteOffset) {
if (active()) {
throw new RuntimeException("Can't activate already active OffsetControlManager.");
}
if (newNextWriteOffset < 0) {
throw new RuntimeException("Invalid negative newNextWriteOffset " +
newNextWriteOffset + ".");
}
// Before switching to active, create an in-memory snapshot at the last committed
// offset. This is required because the active controller assumes that there is always
// an in-memory snapshot at the last committed offset.
snapshotRegistry.idempotentCreateSnapshot(lastStableOffset);
this.nextWriteOffset = newNextWriteOffset;
metrics.setActive(true);
} | @Test
public void testActivate() {
OffsetControlManager offsetControl = new OffsetControlManager.Builder().build();
offsetControl.activate(1000L);
assertEquals(1000L, offsetControl.nextWriteOffset());
assertTrue(offsetControl.active());
assertTrue(offsetControl.metrics().active());
assertEquals(Collections.singletonList(-1L), offsetControl.snapshotRegistry().epochsList());
} |
@Override
int numBuffered(final TopicPartition partition) {
final RecordQueue recordQueue = partitionQueues.get(partition);
if (recordQueue == null) {
throw new IllegalStateException("Partition " + partition + " not found.");
}
return recordQueue.size();
} | @Test
public void shouldThrowIllegalStateExceptionUponNumBufferedIfPartitionUnknown() {
final PartitionGroup group = getBasicGroup();
final IllegalStateException exception = assertThrows(
IllegalStateException.class,
() -> group.numBuffered(unknownPartition));
assertThat(errMessage, equalTo(exception.getMessage()));
} |
public <T> T getStore(final StoreQueryParameters<T> storeQueryParameters) {
final String storeName = storeQueryParameters.storeName();
final QueryableStoreType<T> queryableStoreType = storeQueryParameters.queryableStoreType();
final List<T> globalStore = globalStoreProvider.stores(storeName, queryableStoreType);
if (!globalStore.isEmpty()) {
return queryableStoreType.create(globalStoreProvider, storeName);
}
return queryableStoreType.create(
new WrappingStoreProvider(storeProviders.values(), storeQueryParameters),
storeName
);
} | @Test
public void shouldReturnKVStoreWhenItExists() {
assertNotNull(storeProvider.getStore(StoreQueryParameters.fromNameAndType(keyValueStore, QueryableStoreTypes.keyValueStore())));
} |
@Override
public <T> @Nullable Schema schemaFor(TypeDescriptor<T> typeDescriptor) {
checkForDynamicType(typeDescriptor);
return ProtoSchemaTranslator.getSchema((Class<Message>) typeDescriptor.getRawType());
} | @Test
public void testNestedSchema() {
Schema schema = new ProtoMessageSchema().schemaFor(TypeDescriptor.of(Nested.class));
assertEquals(NESTED_SCHEMA, schema);
} |
public List<Tuple2<JobID, BlobKey>> checkLimit(long size) {
checkArgument(size >= 0);
synchronized (lock) {
List<Tuple2<JobID, BlobKey>> blobsToDelete = new ArrayList<>();
long current = total;
for (Map.Entry<Tuple2<JobID, BlobKey>, Long> entry : caches.entrySet()) {
if (current + size > sizeLimit) {
blobsToDelete.add(entry.getKey());
current -= entry.getValue();
}
}
return blobsToDelete;
}
} | @Test
void testCheckLimitForEmptyBlob() {
List<Tuple2<JobID, BlobKey>> keys = tracker.checkLimit(0L);
assertThat(keys).isEmpty();
} |
public static Sensor getInvocationSensor(
final Metrics metrics,
final String sensorName,
final String groupName,
final String functionDescription
) {
final Sensor sensor = metrics.sensor(sensorName);
if (sensor.hasMetrics()) {
return sensor;
}
final BiFunction<String, String, MetricName> metricNamer = (suffix, descPattern) -> {
final String description = String.format(descPattern, functionDescription);
return metrics.metricName(sensorName + "-" + suffix, groupName, description);
};
sensor.add(
metricNamer.apply("avg", AVG_DESC),
new Avg()
);
sensor.add(
metricNamer.apply("max", MAX_DESC),
new Max()
);
sensor.add(
metricNamer.apply("count", COUNT_DESC),
new WindowedCount()
);
sensor.add(
metricNamer.apply("rate", RATE_DESC),
new Rate(TimeUnit.SECONDS, new WindowedCount())
);
return sensor;
} | @Test
public void shouldRegisterRateMetric() {
// Given:
when(metrics.metricName(SENSOR_NAME + "-rate", GROUP_NAME, description(RATE_DESC)))
.thenReturn(specificMetricName);
// When:
FunctionMetrics
.getInvocationSensor(metrics, SENSOR_NAME, GROUP_NAME, FUNC_NAME);
// Then:
verify(sensor).add(eq(specificMetricName), isA(Rate.class));
} |
public static <T extends PipelineOptions> T validate(Class<T> klass, PipelineOptions options) {
return validate(klass, options, false);
} | @Test
public void testValidationOnOverriddenMethods() throws Exception {
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage(
"Missing required value for "
+ "[public abstract java.lang.String org.apache.beam."
+ "sdk.options.PipelineOptionsValidatorTest$Required.getObject(), \"Fake Description\"].");
SubClassValidation required = PipelineOptionsFactory.as(SubClassValidation.class);
PipelineOptionsValidator.validate(Required.class, required);
} |
@Override
public void subscribe(String serviceName, EventListener listener) throws NacosException {
subscribe(serviceName, new ArrayList<>(), listener);
} | @Test
public void testSubscribe5() throws NacosException {
String serviceName = "service1";
String groupName = "group1";
EventListener listener = event -> {
};
//when
client.subscribe(serviceName, groupName, NamingSelectorFactory.HEALTHY_SELECTOR, listener);
NamingSelectorWrapper wrapper = new NamingSelectorWrapper(serviceName, groupName, Constants.NULL,
NamingSelectorFactory.HEALTHY_SELECTOR, listener);
//then
verify(changeNotifier, times(1)).registerListener(groupName, serviceName, wrapper);
verify(proxy, times(1)).subscribe(serviceName, groupName, Constants.NULL);
} |
@Override
public String parseProperty(String key, String value, PropertiesLookup properties) {
log.trace("Parsing property '{}={}'", key, value);
if (value != null) {
initEncryptor();
Matcher matcher = PATTERN.matcher(value);
while (matcher.find()) {
if (log.isTraceEnabled()) {
log.trace("Decrypting part '{}'", matcher.group(0));
}
String decrypted = encryptor.decrypt(matcher.group(1));
value = value.replace(matcher.group(0), decrypted);
}
}
return value;
} | @Test
public void testDecryptsMultitplePartsOfPartiallyEncryptedProperty() {
StringBuilder propertyValue = new StringBuilder();
StringBuilder expected = new StringBuilder();
for (int i = 0; i < 100; i++) {
propertyValue.append(format("param%s=%s%s%s()&", i,
JASYPT_PREFIX_TOKEN, encryptor.encrypt("tiger" + i), JASYPT_SUFFIX_TOKEN));
expected.append(format("param%s=tiger%s()&", i, i));
}
String result = jasyptPropertiesParser.parseProperty(KEY, propertyValue.toString(), null);
assertThat(result, is(expected.toString()));
} |
public void initializeResources(Herder herder) {
this.herder = herder;
super.initializeResources();
} | @Test
public void testLoggerEndpointWithDefaults() throws IOException {
Map<String, String> configMap = new HashMap<>(baseServerProps());
final String logger = "a.b.c.s.W";
final String loggingLevel = "INFO";
final long lastModified = 789052637671L;
doReturn(KAFKA_CLUSTER_ID).when(herder).kafkaClusterId();
doReturn(plugins).when(herder).plugins();
expectEmptyRestExtensions();
doReturn(Collections.emptyList()).when(herder).setWorkerLoggerLevel(logger, loggingLevel);
doReturn(Collections.singletonMap(logger, new LoggerLevel(loggingLevel, lastModified))).when(herder).allLoggerLevels();
server = new ConnectRestServer(null, restClient, configMap);
server.initializeServer();
server.initializeResources(herder);
ObjectMapper mapper = new ObjectMapper();
URI serverUrl = server.advertisedUrl();
executePut(serverUrl, "/admin/loggers/" + logger, "{\"level\": \"" + loggingLevel + "\"}");
String responseStr = executeGet(serverUrl, "/admin/loggers");
Map<String, Object> expectedLogger = new HashMap<>();
expectedLogger.put("level", loggingLevel);
expectedLogger.put("last_modified", lastModified);
Map<String, Map<String, Object>> expectedLoggers = Collections.singletonMap(logger, expectedLogger);
Map<String, Map<String, Object>> actualLoggers = mapper.readValue(responseStr, new TypeReference<Map<String, Map<String, Object>>>() { });
assertEquals(expectedLoggers, actualLoggers);
} |
public static BsonTimestamp decodeTimestamp(BsonDocument resumeToken) {
BsonValue bsonValue =
Objects.requireNonNull(resumeToken, "Missing ResumeToken.").get(DATA_FIELD);
final byte[] keyStringBytes;
// Resume Tokens format: https://www.mongodb.com/docs/manual/changeStreams/#resume-tokens
if (bsonValue.isBinary()) { // BinData
keyStringBytes = bsonValue.asBinary().getData();
} else if (bsonValue.isString()) { // Hex-encoded string (v0 or v1)
keyStringBytes = hexToUint8Array(bsonValue.asString().getValue());
} else {
throw new IllegalArgumentException(
"Unknown resume token format: " + resumeToken.toJson());
}
ByteBuffer buffer = ByteBuffer.wrap(keyStringBytes).order(ByteOrder.BIG_ENDIAN);
int kType = buffer.get() & 0xff;
if (kType != K_TIMESTAMP) {
throw new IllegalArgumentException("Unknown keyType of timestamp: " + kType);
}
int t = buffer.getInt();
int i = buffer.getInt();
return new BsonTimestamp(t, i);
} | @Test
public void testDecodeHexFormatV1() {
BsonDocument resumeToken =
BsonDocument.parse(
"{\"_data\": \"82612E8513000000012B022C0100296E5A1004A5093ABB38FE4B9EA67F01BB1A96D812463C5F6964003C5F5F5F78000004\"}");
BsonTimestamp expected = new BsonTimestamp(1630438675, 1);
BsonTimestamp actual = ResumeTokenUtils.decodeTimestamp(resumeToken);
assertEquals(expected, actual);
} |
@Override
public List<Integer> embed(String text, Context context) {
throw new UnsupportedOperationException("This embedder only supports embed with tensor type");
} | @Test
public void testCachingInt() {
int initialEmbeddingsDone = runtime.embeddingsDone;
var context = new Embedder.Context("schema.indexing");
var input = "This is a test string to embed";
var t1 = (MixedTensor) embedder.embed(input, context, TensorType.fromSpec("tensor<int8>(dt{},x[8])"));
assertEquals(initialEmbeddingsDone + 1, runtime.embeddingsDone);
var t2 = (MixedTensor)embedder.embed(input, context, TensorType.fromSpec("tensor<int8>(dt{},x[4])"));
assertEquals("Cached value was used", initialEmbeddingsDone + 1, runtime.embeddingsDone);
assertNotEquals(t1, t2);
for(int token = 0; token < 7; token ++) {
for(int dim = 0; dim < 4; dim++) { // the four first should be equal
assertEquals(t1.get(TensorAddress.of(token,dim)), t2.get(TensorAddress.of(token,dim)), 1e-6);
}
}
// t2 only has 4 dimensions so this should be out of bounds which returns 0
assertEquals(0, t2.get(TensorAddress.of(0,4)), 1e-6);
input = "This is a different test string to embed";
embedder.embed(input, context,TensorType.fromSpec("tensor<float>(dt{},x[8])"));
assertEquals(initialEmbeddingsDone + 2, runtime.embeddingsDone);
} |
public Map<String, Object> getContext(Map<String, Object> modelMap, Class<? extends SparkController> controller, String viewName) {
Map<String, Object> context = new HashMap<>(modelMap);
context.put("currentGoCDVersion", CurrentGoCDVersion.getInstance().getGocdDistVersion());
context.put("railsAssetsService", railsAssetsService);
context.put("webpackAssetsService", webpackAssetsService);
context.put("securityService", securityService);
context.put("maintenanceModeService", maintenanceModeService);
context.put("currentUser", SessionUtils.currentUsername());
context.put("controllerName", humanizedControllerName(controller));
context.put("viewName", viewName);
context.put("currentVersion", CurrentGoCDVersion.getInstance());
context.put("toggles", Toggles.class);
context.put("goUpdate", versionInfoService.getGoUpdate());
context.put("goUpdateCheckEnabled", versionInfoService.isGOUpdateCheckEnabled());
context.put("serverTimezoneUTCOffset", TimeZone.getDefault().getOffset(new Date().getTime()));
context.put("spaRefreshInterval", SystemEnvironment.goSpaRefreshInterval());
context.put("spaTimeout", SystemEnvironment.goSpaTimeout());
context.put("showAnalyticsDashboard", showAnalyticsDashboard());
context.put("devMode", !new SystemEnvironment().useCompressedJs());
context.put("serverSiteUrls", GSON.toJson(serverConfigService.getServerSiteUrls()));
return context;
} | @Test
void shouldNotShowAnalyticsDashboardWhenUserIsNotAdmin() {
Map<String, Object> modelMap = new HashMap<>();
when(securityService.isUserAdmin(any(Username.class))).thenReturn(false);
CombinedPluginInfo combinedPluginInfo = new CombinedPluginInfo(analyticsPluginInfo());
when(pluginInfoFinder.allPluginInfos(PluginConstants.ANALYTICS_EXTENSION)).thenReturn(List.of(combinedPluginInfo));
Map<String, Object> contect = initialContextProvider.getContext(modelMap, dummySparkController.getClass(), "viewName");
assertThat(contect.get("showAnalyticsDashboard")).isEqualTo(false);
} |
@Override
public int removeAllCounted(Collection<? extends V> c) {
return get(removeAllCountedAsync(c));
} | @Test
public void testRemoveAllCounted() {
RSet<Integer> set = redisson.getSet("list", IntegerCodec.INSTANCE);
set.add(0);
set.add(1);
set.add(2);
set.add(3);
assertThat(set.removeAllCounted(Arrays.asList(1, 2, 3, 4, 5))).isEqualTo(3);
} |
private void announceBacklog(NetworkSequenceViewReader reader, int backlog) {
checkArgument(backlog > 0, "Backlog must be positive.");
NettyMessage.BacklogAnnouncement announcement =
new NettyMessage.BacklogAnnouncement(backlog, reader.getReceiverId());
ctx.channel()
.writeAndFlush(announcement)
.addListener(
(ChannelFutureListener)
future -> {
if (!future.isSuccess()) {
onChannelFutureFailure(future);
}
});
} | @Test
void testAnnounceBacklog() throws Exception {
PipelinedSubpartition subpartition =
PipelinedSubpartitionTest.createPipelinedSubpartition();
subpartition.add(createEventBufferConsumer(4096, Buffer.DataType.DATA_BUFFER));
subpartition.add(createEventBufferConsumer(4096, Buffer.DataType.DATA_BUFFER));
PipelinedSubpartitionView view =
subpartition.createReadView(new NoOpBufferAvailablityListener());
ResultPartition partition =
TestingResultPartition.newBuilder()
.setCreateSubpartitionViewFunction((index, listener) -> view)
.build();
PartitionRequestQueue queue = new PartitionRequestQueue();
InputChannelID receiverId = new InputChannelID();
CreditBasedSequenceNumberingViewReader reader =
new CreditBasedSequenceNumberingViewReader(receiverId, 0, queue);
EmbeddedChannel channel = new EmbeddedChannel(queue);
reader.notifySubpartitionsCreated(partition, new ResultSubpartitionIndexSet(0));
queue.notifyReaderCreated(reader);
reader.notifyDataAvailable(view);
channel.runPendingTasks();
Object data = channel.readOutbound();
assertThat(data).isInstanceOf(NettyMessage.BacklogAnnouncement.class);
NettyMessage.BacklogAnnouncement announcement = (NettyMessage.BacklogAnnouncement) data;
assertThat(announcement.receiverId).isEqualTo(receiverId);
assertThat(announcement.backlog).isEqualTo(subpartition.getBuffersInBacklogUnsafe());
subpartition.release();
reader.notifyDataAvailable(view);
channel.runPendingTasks();
assertThat((Object) channel.readOutbound()).isNotNull();
} |
public static String getNativeDataTypeSimpleName( ValueMetaInterface v ) {
try {
return v.getType() != ValueMetaInterface.TYPE_BINARY ? v.getNativeDataTypeClass().getSimpleName() : "Binary";
} catch ( KettleValueException e ) {
LogChannelInterface log = new LogChannel( v );
log.logDebug( BaseMessages.getString( PKG, "FieldHelper.Log.UnknownNativeDataTypeSimpleName" ) );
return "Object";
}
} | @Test
public void getNativeDataTypeSimpleName_Unknown() throws Exception {
KettleValueException e = new KettleValueException();
ValueMetaInterface v = mock( ValueMetaInterface.class );
doThrow( e ).when( v ).getNativeDataTypeClass();
assertEquals( "Object", FieldHelper.getNativeDataTypeSimpleName( v ) );
} |
public Node parse() throws ScanException {
return E();
} | @Test
public void testCompositeFormatting() throws Exception {
Parser<Object> p = new Parser<>("hello%5(XYZ)");
Node t = p.parse();
Node witness = new Node(Node.LITERAL, "hello");
CompositeNode composite = new CompositeNode(BARE);
composite.setFormatInfo(new FormatInfo(5, Integer.MAX_VALUE));
Node child = new Node(Node.LITERAL, "XYZ");
composite.setChildNode(child);
witness.next = composite;
Assertions.assertEquals(witness, t);
} |
public static StringBuilder print_json_diff(LogBuffer buffer, long len, String columnName, int columnIndex,
String charsetName) {
return print_json_diff(buffer, len, columnName, columnIndex, Charset.forName(charsetName));
} | @Test
public void print_json_diffInputNotNullZeroNotNullZeroNotNullOutputIllegalArgumentException2()
throws InvocationTargetException {
// Arrange
final LogBuffer buffer = new LogBuffer();
buffer.position = 15;
buffer.semival = 0;
final byte[] myByteArray = { (byte) 1, (byte) 1, (byte) 0, (byte) 0, (byte) 1, (byte) 1, (byte) 1, (byte) 1,
(byte) 1, (byte) 1, (byte) 0, (byte) 1, (byte) 1, (byte) 1, (byte) 1, (byte) 0, (byte) 1, (byte) 1,
(byte) 0, (byte) 0, (byte) 1, (byte) 1, (byte) 1, (byte) 1, (byte) 1, (byte) 1, (byte) 1, (byte) 0,
(byte) 1, (byte) 1 };
buffer.buffer = myByteArray;
buffer.limit = -1_215_751_986;
buffer.origin = 1_215_752_002;
final long len = 0L;
final String columnName = "foo";
final int columnIndex = 0;
final String charsetName = "gbk";
try {
// Act
thrown.expect(IllegalArgumentException.class);
JsonDiffConversion.print_json_diff(buffer, len, columnName, columnIndex, charsetName);
} catch (IllegalArgumentException ex) {
// Assert side effects
Assert.assertNotNull(buffer);
Assert.assertEquals(16, buffer.position);
Assert.assertEquals(0, buffer.semival);
Assert.assertArrayEquals(new byte[] { (byte) 1, (byte) 1, (byte) 0, (byte) 0, (byte) 1, (byte) 1, (byte) 1,
(byte) 1, (byte) 1, (byte) 1, (byte) 0, (byte) 1, (byte) 1, (byte) 1, (byte) 1, (byte) 0, (byte) 1,
(byte) 1, (byte) 0, (byte) 0, (byte) 1, (byte) 1, (byte) 1, (byte) 1, (byte) 1, (byte) 1, (byte) 1,
(byte) 0, (byte) 1, (byte) 1 }, buffer.buffer);
Assert.assertEquals(-1_215_751_986, buffer.limit);
Assert.assertEquals(1_215_752_002, buffer.origin);
throw ex;
}
} |
IpcPublication getSharedIpcPublication(final long streamId)
{
return findSharedIpcPublication(ipcPublications, streamId);
} | @Test
void shouldBeAbleToAddAndRemoveIpcPublication()
{
final long idAdd = driverProxy.addPublication(CHANNEL_IPC, STREAM_ID_1);
driverProxy.removePublication(idAdd);
doWorkUntil(() -> nanoClock.nanoTime() >= CLIENT_LIVENESS_TIMEOUT_NS);
final IpcPublication ipcPublication = driverConductor.getSharedIpcPublication(STREAM_ID_1);
assertNull(ipcPublication);
} |
public boolean isEmptyConfig() {
return !isNotEmptyConfig();
} | @Test
public void testIsEmptyConfig() {
RequestHandle handle = new RequestHandle();
handle.setHeader(handle.new ShenyuRequestHeader());
handle.setParameter(handle.new ShenyuRequestParameter());
handle.setCookie(handle.new ShenyuCookie());
assertThat(handle.isEmptyConfig(), is(true));
} |
public static boolean isApprovedCommentCounter(Counter counter) {
String sceneValue = counter.getId().getTag(SCENE);
if (StringUtils.isBlank(sceneValue)) {
return false;
}
return APPROVED_COMMENT_SCENE.equals(sceneValue);
} | @Test
void isApprovedCommentCounter() {
MeterRegistry meterRegistry = new SimpleMeterRegistry();
Counter approvedCommentCounter =
MeterUtils.approvedCommentCounter(meterRegistry, "posts.content.halo.run/fake-post");
assertThat(MeterUtils.isApprovedCommentCounter(approvedCommentCounter)).isTrue();
assertThat(MeterUtils.isVisitCounter(approvedCommentCounter)).isFalse();
} |
@Override
public void deleteSocialClient(Long id) {
// 校验存在
validateSocialClientExists(id);
// 删除
socialClientMapper.deleteById(id);
} | @Test
public void testDeleteSocialClient_notExists() {
// 准备参数
Long id = randomLongId();
// 调用, 并断言异常
assertServiceException(() -> socialClientService.deleteSocialClient(id), SOCIAL_CLIENT_NOT_EXISTS);
} |
void forwardToStateService(DeviceStateServiceMsgProto deviceStateServiceMsg, TbCallback callback) {
if (statsEnabled) {
stats.log(deviceStateServiceMsg);
}
stateService.onQueueMsg(deviceStateServiceMsg, callback);
} | @Test
public void givenStatsEnabled_whenForwardingActivityMsgToStateService_thenStatsAreRecorded() {
// GIVEN
ReflectionTestUtils.setField(defaultTbCoreConsumerServiceMock, "stats", statsMock);
ReflectionTestUtils.setField(defaultTbCoreConsumerServiceMock, "statsEnabled", true);
var activityMsg = TransportProtos.DeviceActivityProto.newBuilder()
.setTenantIdMSB(tenantId.getId().getMostSignificantBits())
.setTenantIdLSB(tenantId.getId().getLeastSignificantBits())
.setDeviceIdMSB(deviceId.getId().getMostSignificantBits())
.setDeviceIdLSB(deviceId.getId().getLeastSignificantBits())
.setLastActivityTime(time)
.build();
doCallRealMethod().when(defaultTbCoreConsumerServiceMock).forwardToStateService(activityMsg, tbCallbackMock);
// WHEN
defaultTbCoreConsumerServiceMock.forwardToStateService(activityMsg, tbCallbackMock);
// THEN
then(statsMock).should().log(activityMsg);
} |
T getFunction(final List<SqlArgument> arguments) {
// first try to get the candidates without any implicit casting
Optional<T> candidate = findMatchingCandidate(arguments, false);
if (candidate.isPresent()) {
return candidate.get();
} else if (!supportsImplicitCasts) {
throw createNoMatchingFunctionException(arguments);
}
// if none were found (candidate isn't present) try again with implicit casting
candidate = findMatchingCandidate(arguments, true);
if (candidate.isPresent()) {
return candidate.get();
}
throw createNoMatchingFunctionException(arguments);
} | @Test
public void shouldChooseSpecificOverVarArgsAtBeginning() {
// Given:
givenFunctions(
function(EXPECTED, -1, STRING, STRING, STRING, STRING, INT),
function(OTHER, 0, STRING_VARARGS, STRING, STRING, STRING, INT)
);
// When:
final KsqlScalarFunction fun = udfIndex.getFunction(ImmutableList.of(
SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING),
SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING),
SqlArgument.of(SqlTypes.INTEGER)
));
// Then:
assertThat(fun.name(), equalTo(EXPECTED));
} |
@Udf(schema = "ARRAY<STRUCT<K STRING, V BOOLEAN>>")
public List<Struct> entriesBoolean(
@UdfParameter(description = "The map to create entries from") final Map<String, Boolean> map,
@UdfParameter(description = "If true then the resulting entries are sorted by key")
final boolean sorted
) {
return entries(map, BOOLEAN_STRUCT_SCHEMA, sorted);
} | @Test
public void shouldReturnNullListForNullMapBoolean() {
assertNull(entriesUdf.entriesBoolean(null, false));
} |
public void checkForUpgradeAndExtraProperties() throws IOException {
if (upgradesEnabled()) {
checkForUpgradeAndExtraProperties(systemEnvironment.getAgentMd5(), systemEnvironment.getGivenAgentLauncherMd5(),
systemEnvironment.getAgentPluginsMd5(), systemEnvironment.getTfsImplMd5());
} else {
LOGGER.debug("[Agent Upgrade] Skipping check as there is no wrapping launcher to relaunch the agent JVM...");
}
} | @Test
void checkForUpgradeShouldKillAgentIfLauncherMD5doesNotMatch() {
when(systemEnvironment.getAgentMd5()).thenReturn("not-changing");
expectHeaderValue(SystemEnvironment.AGENT_CONTENT_MD5_HEADER, "not-changing");
when(systemEnvironment.getGivenAgentLauncherMd5()).thenReturn("old-launcher-md5");
expectHeaderValue(SystemEnvironment.AGENT_LAUNCHER_CONTENT_MD5_HEADER, "new-launcher-md5");
RuntimeException toBeThrown = new RuntimeException("Boo!");
doThrow(toBeThrown).when(jvmExitter).jvmExit(anyString(), anyString(), anyString());
try {
agentUpgradeService.checkForUpgradeAndExtraProperties();
fail("should have done jvm exit");
} catch (Exception e) {
assertThat(toBeThrown).isSameAs(e);
}
verify(jvmExitter).jvmExit("launcher", "old-launcher-md5", "new-launcher-md5");
} |
public void alterResource(AlterResourceStmt stmt) throws DdlException {
this.writeLock();
try {
// check if the target resource exists .
String name = stmt.getResourceName();
Resource resource = this.getResource(name);
if (resource == null) {
throw new DdlException("Resource(" + name + ") does not exist");
}
// 1. alter the resource properties
// 2. clear the cache
// 3. update the edit log
if (resource instanceof HiveResource) {
((HiveResource) resource).alterProperties(stmt.getProperties());
} else if (resource instanceof HudiResource) {
((HudiResource) resource).alterProperties(stmt.getProperties());
} else if (resource instanceof IcebergResource) {
((IcebergResource) resource).alterProperties(stmt.getProperties());
} else {
throw new DdlException("Alter resource statement only support external hive/hudi/iceberg now");
}
if (resource.needMappingCatalog()) {
String type = resource.getType().name().toLowerCase(Locale.ROOT);
String catalogName = getResourceMappingCatalogName(resource.getName(), type);
DropCatalogStmt dropCatalogStmt = new DropCatalogStmt(catalogName);
GlobalStateMgr.getCurrentState().getCatalogMgr().dropCatalog(dropCatalogStmt);
Map<String, String> properties = Maps.newHashMap(stmt.getProperties());
properties.put("type", type);
String uriInProperties = stmt.getProperties().get(HIVE_METASTORE_URIS);
String uris = uriInProperties == null ? resource.getHiveMetastoreURIs() : uriInProperties;
properties.put(HIVE_METASTORE_URIS, uris);
GlobalStateMgr.getCurrentState().getCatalogMgr().createCatalog(type, catalogName, "mapping catalog", properties);
}
GlobalStateMgr.getCurrentState().getEditLog().logCreateResource(resource);
} finally {
this.writeUnLock();
}
} | @Test(expected = DdlException.class)
public void testAllowAlterHiveResourceOnly(@Injectable BrokerMgr brokerMgr, @Injectable EditLog editLog,
@Mocked GlobalStateMgr globalStateMgr)
throws UserException {
ResourceMgr mgr = new ResourceMgr();
// add spark resource
addSparkResource(mgr, brokerMgr, editLog, globalStateMgr);
// alter spark resource
Map<String, String> properties = new HashMap<>();
properties.put("broker", "broker2");
AlterResourceStmt stmt = new AlterResourceStmt(name, properties);
com.starrocks.sql.analyzer.Analyzer.analyze(stmt, connectContext);
mgr.alterResource(stmt);
} |
public static boolean equalIncreasingByteArray(int len, byte[] arr) {
return equalIncreasingByteArray(0, len, arr);
} | @Test
public void equalIncreasingByteArray() {
class TestCase {
boolean mExpected;
byte[] mArray;
int mLength;
int mStart;
public TestCase(boolean expected, byte[] array, int length, int start) {
mExpected = expected;
mArray = array;
mLength = length;
mStart = start;
}
}
ArrayList<TestCase> testCases = new ArrayList<>();
testCases.add(new TestCase(false, null, 0, 0));
testCases.add(new TestCase(true, new byte[] {}, 0, 0));
testCases.add(new TestCase(false, new byte[] {1}, 0, 0));
testCases.add(new TestCase(true, new byte[] {}, 0, 3));
testCases.add(new TestCase(false, new byte[] {1}, 0, 3));
testCases.add(new TestCase(true, new byte[] {0}, 1, 0));
testCases.add(new TestCase(false, new byte[] {1}, 1, 0));
testCases.add(new TestCase(true, new byte[] {0, 1, 2}, 3, 0));
testCases.add(new TestCase(false, new byte[] {0, 1, 2, (byte) 0xFF}, 3, 0));
testCases.add(new TestCase(false, new byte[] {1, 2, 3}, 3, 0));
testCases.add(new TestCase(true, new byte[] {3}, 1, 3));
testCases.add(new TestCase(false, new byte[] {2}, 1, 3));
testCases.add(new TestCase(true, new byte[] {3, 4, 5}, 3, 3));
testCases.add(new TestCase(false, new byte[] {3, 4, 5, (byte) 0xFF}, 3, 3));
testCases.add(new TestCase(false, new byte[] {2, 3, 4}, 3, 3));
for (TestCase testCase : testCases) {
boolean result = BufferUtils.equalIncreasingByteArray(testCase.mStart, testCase.mLength,
testCase.mArray);
assertEquals(testCase.mExpected, result);
}
} |
public double[][] test(DataFrame data) {
DataFrame x = formula.x(data);
int n = x.nrow();
int ntrees = trees.length;
double[][] prediction = new double[ntrees][n];
for (int j = 0; j < n; j++) {
Tuple xj = x.get(j);
double base = b;
for (int i = 0; i < ntrees; i++) {
base += shrinkage * trees[i].predict(xj);
prediction[i][j] = base;
}
}
return prediction;
} | @Test
public void testAbaloneQuantile() {
test(Loss.quantile(0.5), "abalone", Abalone.formula, Abalone.train, 2.2958);
} |
public static RedissonClient create() {
Config config = new Config();
config.useSingleServer()
.setAddress("redis://127.0.0.1:6379");
return create(config);
} | @Test
public void testMasterSlaveConnectionFail2() {
Assertions.assertThrows(RedisConnectionException.class, () -> {
Config config = new Config();
config.useMasterSlaveServers()
.setMasterAddress("redis://gadfgdfgdsfg:1111")
.addSlaveAddress("redis://asdfasdfsdfaasdf:1111");
Redisson.create(config);
Thread.sleep(1500);
});
} |
@JsonCreator
public static DataSize parse(CharSequence size) {
return parse(size, DataSizeUnit.BYTES);
} | @Test
void parseCaseInsensitive() {
assertThat(DataSize.parse("1b")).isEqualTo(DataSize.parse("1B"));
} |
public static String getDomain(String url) {
ProviderInfo providerInfo = ProviderHelper.toProviderInfo(url);
return providerInfo.getHost();
} | @Test
public void testGetDomain() {
assertEquals("alipay.com", getDomain("bolt://alipay.com:80?a=b"));
assertEquals("alipay.com", getDomain("alipay.com:80?a=b"));
assertEquals("alipay.com", getDomain("bolt://alipay.com:80"));
assertEquals("alipay.com", getDomain("alipay.com:80"));
assertEquals("alipay", getDomain("bolt://alipay?a=b"));
assertEquals("alipay", getDomain("alipay"));
assertEquals("sofagw-pool", getDomain("sofagw-pool"));
assertEquals("1.1.1.1", getDomain("bolt://1.1.1.1:80?a=b"));
assertEquals("1.1.1.1", getDomain("1.1.1.1:80?a=b"));
assertEquals("1.1.1.1", getDomain("bolt://1.1.1.1:80"));
assertEquals("1.1.1.1", getDomain("1.1.1.1:80"));
assertEquals("1.1.1.1", getDomain("1.1.1.1"));
} |
protected final void ensureCapacity(final int index, final int length)
{
if (index < 0 || length < 0)
{
throw new IndexOutOfBoundsException("negative value: index=" + index + " length=" + length);
}
final long resultingPosition = index + (long)length;
if (resultingPosition > capacity)
{
if (resultingPosition > MAX_ARRAY_LENGTH)
{
throw new IndexOutOfBoundsException(
"index=" + index + " length=" + length + " maxCapacity=" + MAX_ARRAY_LENGTH);
}
final int newCapacity = calculateExpansion(capacity, resultingPosition);
byteArray = Arrays.copyOf(byteArray, newCapacity);
capacity = newCapacity;
}
} | @Test
void ensureCapacityIsANoOpIfExistingCapacityIsEnough()
{
final int index = 1;
final int capacity = 5;
final ExpandableArrayBuffer buffer = new ExpandableArrayBuffer(capacity);
buffer.ensureCapacity(index, capacity - index);
assertEquals(capacity, buffer.capacity());
} |
public void resizeRecordsMap(Map<Key, Record> recordsMap, int size) {
int numRecordsToEvict = recordsMap.size() - size;
if (numRecordsToEvict <= 0) {
return;
}
if (numRecordsToEvict <= size) {
// Fewer records to evict than retain, make a heap of records to evict
IntermediateRecord[] recordsToEvict =
getTopRecordsHeap(recordsMap, numRecordsToEvict, _intermediateRecordComparator);
for (IntermediateRecord recordToEvict : recordsToEvict) {
recordsMap.remove(recordToEvict._key);
}
} else {
// Fewer records to retain than evict, make a heap of records to retain
IntermediateRecord[] recordsToRetain =
getTopRecordsHeap(recordsMap, size, _intermediateRecordComparator.reversed());
recordsMap.clear();
for (IntermediateRecord recordToRetain : recordsToRetain) {
recordsMap.put(recordToRetain._key, recordToRetain._record);
}
}
} | @Test
public void testResizeRecordsMap() {
// Test resize algorithm with numRecordsToEvict < trimToSize.
// TotalRecords=5; trimToSize=3; numRecordsToEvict=2
// d1 asc
TableResizer tableResizer =
new TableResizer(DATA_SCHEMA, QueryContextConverterUtils.getQueryContext(QUERY_PREFIX + "d1"));
Map<Key, Record> recordsMap = new HashMap<>(_recordsMap);
tableResizer.resizeRecordsMap(recordsMap, TRIM_TO_SIZE);
assertEquals(recordsMap.size(), TRIM_TO_SIZE);
assertTrue(recordsMap.containsKey(_keys.get(0))); // a, b
assertTrue(recordsMap.containsKey(_keys.get(1)));
// d1 desc
tableResizer =
new TableResizer(DATA_SCHEMA, QueryContextConverterUtils.getQueryContext(QUERY_PREFIX + "d1 DESC"));
recordsMap = new HashMap<>(_recordsMap);
tableResizer.resizeRecordsMap(recordsMap, TRIM_TO_SIZE);
assertEquals(recordsMap.size(), TRIM_TO_SIZE);
assertTrue(recordsMap.containsKey(_keys.get(2))); // c, c, c
assertTrue(recordsMap.containsKey(_keys.get(3)));
assertTrue(recordsMap.containsKey(_keys.get(4)));
// d1 asc, d3 desc (tie breaking with 2nd comparator)
tableResizer =
new TableResizer(DATA_SCHEMA, QueryContextConverterUtils.getQueryContext(QUERY_PREFIX + "d1, d3 DESC"));
recordsMap = new HashMap<>(_recordsMap);
tableResizer.resizeRecordsMap(recordsMap, TRIM_TO_SIZE);
assertEquals(recordsMap.size(), TRIM_TO_SIZE);
assertTrue(recordsMap.containsKey(_keys.get(0))); // a, b, c (300)
assertTrue(recordsMap.containsKey(_keys.get(1)));
assertTrue(recordsMap.containsKey(_keys.get(4)));
// d1 asc, sum(m1) desc, max(m2) desc
tableResizer = new TableResizer(DATA_SCHEMA,
QueryContextConverterUtils.getQueryContext(QUERY_PREFIX + "d1, SUM(m1) DESC, max(m2) DESC"));
recordsMap = new HashMap<>(_recordsMap);
tableResizer.resizeRecordsMap(recordsMap, TRIM_TO_SIZE);
assertEquals(recordsMap.size(), TRIM_TO_SIZE);
assertTrue(recordsMap.containsKey(_keys.get(0))); // a, b, c (30, 300)
assertTrue(recordsMap.containsKey(_keys.get(1)));
assertTrue(recordsMap.containsKey(_keys.get(2)));
// avg(m4) asc (object type)
tableResizer =
new TableResizer(DATA_SCHEMA, QueryContextConverterUtils.getQueryContext(QUERY_PREFIX + "AVG(m4)"));
recordsMap = new HashMap<>(_recordsMap);
tableResizer.resizeRecordsMap(recordsMap, TRIM_TO_SIZE);
assertEquals(recordsMap.size(), TRIM_TO_SIZE);
assertTrue(recordsMap.containsKey(_keys.get(4))); // 2, 3, 3.33
assertTrue(recordsMap.containsKey(_keys.get(3)));
assertTrue(recordsMap.containsKey(_keys.get(1)));
// distinctcount(m3) desc, d1 asc (non-comparable intermediate result)
tableResizer = new TableResizer(DATA_SCHEMA,
QueryContextConverterUtils.getQueryContext(QUERY_PREFIX + "DISTINCTCOUNT(m3) DESC, d1"));
recordsMap = new HashMap<>(_recordsMap);
tableResizer.resizeRecordsMap(recordsMap, TRIM_TO_SIZE);
assertEquals(recordsMap.size(), TRIM_TO_SIZE);
assertTrue(recordsMap.containsKey(_keys.get(4))); // 4, 3, 2 (b)
assertTrue(recordsMap.containsKey(_keys.get(3)));
assertTrue(recordsMap.containsKey(_keys.get(1)));
// d2 + d3 asc (post-aggregation)
tableResizer =
new TableResizer(DATA_SCHEMA, QueryContextConverterUtils.getQueryContext(QUERY_PREFIX + "d2 + d3"));
recordsMap = new HashMap<>(_recordsMap);
tableResizer.resizeRecordsMap(recordsMap, TRIM_TO_SIZE);
assertEquals(recordsMap.size(), TRIM_TO_SIZE);
assertTrue(recordsMap.containsKey(_keys.get(0))); // 11, 12, 54
assertTrue(recordsMap.containsKey(_keys.get(1)));
assertTrue(recordsMap.containsKey(_keys.get(3)));
// sum(m1) * d3 desc (post-aggregation)
tableResizer = new TableResizer(DATA_SCHEMA,
QueryContextConverterUtils.getQueryContext(QUERY_PREFIX + "SUM(m1) * d3 DESC"));
recordsMap = new HashMap<>(_recordsMap);
tableResizer.resizeRecordsMap(recordsMap, TRIM_TO_SIZE);
assertEquals(recordsMap.size(), TRIM_TO_SIZE);
assertTrue(recordsMap.containsKey(_keys.get(3))); // 120, 100, 90
assertTrue(recordsMap.containsKey(_keys.get(4)));
assertTrue(recordsMap.containsKey(_keys.get(2)));
// d2 / (distinctcount(m3) + 1) asc, d1 desc (post-aggregation)
tableResizer = new TableResizer(DATA_SCHEMA,
QueryContextConverterUtils.getQueryContext(QUERY_PREFIX + "d2 / (DISTINCTCOUNT(m3) + 1), d1 DESC"));
recordsMap = new HashMap<>(_recordsMap);
tableResizer.resizeRecordsMap(recordsMap, TRIM_TO_SIZE);
assertEquals(recordsMap.size(), TRIM_TO_SIZE);
assertTrue(recordsMap.containsKey(_keys.get(1))); // 3.33, 12.5, 5
assertTrue(recordsMap.containsKey(_keys.get(0)));
assertTrue(recordsMap.containsKey(_keys.get(3)));
// Test resize algorithm with numRecordsToEvict > trimToSize.
// TotalRecords=5; trimToSize=2; numRecordsToEvict=3
int trimToSize = 2;
// d1 asc
tableResizer =
new TableResizer(DATA_SCHEMA, QueryContextConverterUtils.getQueryContext(QUERY_PREFIX + "d1"));
recordsMap = new HashMap<>(_recordsMap);
tableResizer.resizeRecordsMap(recordsMap, trimToSize);
assertEquals(recordsMap.size(), trimToSize);
assertTrue(recordsMap.containsKey(_keys.get(0))); // a, b
assertTrue(recordsMap.containsKey(_keys.get(1)));
// avg(m4) asc (object type)
tableResizer =
new TableResizer(DATA_SCHEMA, QueryContextConverterUtils.getQueryContext(QUERY_PREFIX + "AVG(m4)"));
recordsMap = new HashMap<>(_recordsMap);
tableResizer.resizeRecordsMap(recordsMap, trimToSize);
assertEquals(recordsMap.size(), trimToSize);
assertTrue(recordsMap.containsKey(_keys.get(4))); // 2, 3
assertTrue(recordsMap.containsKey(_keys.get(3)));
// distinctcount(m3) desc, d1 asc (non-comparable intermediate result)
tableResizer = new TableResizer(DATA_SCHEMA,
QueryContextConverterUtils.getQueryContext(QUERY_PREFIX + "DISTINCTCOUNT(m3) DESC, d1"));
recordsMap = new HashMap<>(_recordsMap);
tableResizer.resizeRecordsMap(recordsMap, trimToSize);
assertEquals(recordsMap.size(), trimToSize);
assertTrue(recordsMap.containsKey(_keys.get(4))); // 4, 3
assertTrue(recordsMap.containsKey(_keys.get(3)));
// d2 / (distinctcount(m3) + 1) asc, d1 desc (post-aggregation)
tableResizer = new TableResizer(DATA_SCHEMA,
QueryContextConverterUtils.getQueryContext(QUERY_PREFIX + "d2 / (DISTINCTCOUNT(m3) + 1), d1 DESC"));
recordsMap = new HashMap<>(_recordsMap);
tableResizer.resizeRecordsMap(recordsMap, trimToSize);
assertEquals(recordsMap.size(), trimToSize);
assertTrue(recordsMap.containsKey(_keys.get(1))); // 3.33, 12.5
assertTrue(recordsMap.containsKey(_keys.get(0)));
} |
@Override
public int run(String[] args) throws Exception {
YarnConfiguration yarnConf = getConf() == null ?
new YarnConfiguration() : new YarnConfiguration(getConf());
boolean isFederationEnabled = yarnConf.getBoolean(YarnConfiguration.FEDERATION_ENABLED,
YarnConfiguration.DEFAULT_FEDERATION_ENABLED);
if (args.length < 1 || !isFederationEnabled) {
printUsage(CMD_EMPTY);
return EXIT_ERROR;
}
String cmd = args[0];
if (CMD_HELP.equals(cmd)) {
if (args.length > 1) {
printUsage(args[1]);
} else {
printHelp();
}
return EXIT_SUCCESS;
} else if (CMD_SUBCLUSTER.equals(cmd)) {
return handleSubCluster(args);
} else if (CMD_POLICY.equals(cmd)) {
return handlePolicy(args);
} else if (CMD_APPLICATION.equals(cmd)) {
return handleApplication(args);
} else {
System.out.println("No related commands found.");
printHelp();
}
return EXIT_SUCCESS;
} | @Test
public void testSavePolicy() throws Exception {
PrintStream oldOutPrintStream = System.out;
ByteArrayOutputStream dataOut = new ByteArrayOutputStream();
System.setOut(new PrintStream(dataOut));
oldOutPrintStream.println(dataOut);
String[] args = {"-policy", "-s", "root.a;SC-1:0.1,SC-2:0.9;SC-1:0.7,SC-2:0.3;1.0"};
assertEquals(0, rmAdminCLI.run(args));
args = new String[]{"-policy", "-save", "root.a;SC-1:0.1,SC-2:0.9;SC-1:0.7,SC-2:0.3;1.0"};
assertEquals(0, rmAdminCLI.run(args));
} |
private CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> consumerGroupHeartbeat(
String groupId,
String memberId,
int memberEpoch,
String instanceId,
String rackId,
int rebalanceTimeoutMs,
String clientId,
String clientHost,
List<String> subscribedTopicNames,
String assignorName,
List<ConsumerGroupHeartbeatRequestData.TopicPartitions> ownedTopicPartitions
) throws ApiException {
final long currentTimeMs = time.milliseconds();
final List<CoordinatorRecord> records = new ArrayList<>();
// Get or create the consumer group.
boolean createIfNotExists = memberEpoch == 0;
final ConsumerGroup group = getOrMaybeCreateConsumerGroup(groupId, createIfNotExists, records);
throwIfConsumerGroupIsFull(group, memberId);
// Get or create the member.
if (memberId.isEmpty()) memberId = Uuid.randomUuid().toString();
final ConsumerGroupMember member;
if (instanceId == null) {
member = getOrMaybeSubscribeDynamicConsumerGroupMember(
group,
memberId,
memberEpoch,
ownedTopicPartitions,
createIfNotExists,
false
);
} else {
member = getOrMaybeSubscribeStaticConsumerGroupMember(
group,
memberId,
memberEpoch,
instanceId,
ownedTopicPartitions,
createIfNotExists,
false,
records
);
}
// 1. Create or update the member. If the member is new or has changed, a ConsumerGroupMemberMetadataValue
// record is written to the __consumer_offsets partition to persist the change. If the subscriptions have
// changed, the subscription metadata is updated and persisted by writing a ConsumerGroupPartitionMetadataValue
// record to the __consumer_offsets partition. Finally, the group epoch is bumped if the subscriptions have
// changed, and persisted by writing a ConsumerGroupMetadataValue record to the partition.
ConsumerGroupMember updatedMember = new ConsumerGroupMember.Builder(member)
.maybeUpdateInstanceId(Optional.ofNullable(instanceId))
.maybeUpdateRackId(Optional.ofNullable(rackId))
.maybeUpdateRebalanceTimeoutMs(ofSentinel(rebalanceTimeoutMs))
.maybeUpdateServerAssignorName(Optional.ofNullable(assignorName))
.maybeUpdateSubscribedTopicNames(Optional.ofNullable(subscribedTopicNames))
.setClientId(clientId)
.setClientHost(clientHost)
.setClassicMemberMetadata(null)
.build();
boolean bumpGroupEpoch = hasMemberSubscriptionChanged(
groupId,
member,
updatedMember,
records
);
int groupEpoch = group.groupEpoch();
Map<String, TopicMetadata> subscriptionMetadata = group.subscriptionMetadata();
Map<String, Integer> subscribedTopicNamesMap = group.subscribedTopicNames();
SubscriptionType subscriptionType = group.subscriptionType();
if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) {
// The subscription metadata is updated in two cases:
// 1) The member has updated its subscriptions;
// 2) The refresh deadline has been reached.
subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember);
subscriptionMetadata = group.computeSubscriptionMetadata(
subscribedTopicNamesMap,
metadataImage.topics(),
metadataImage.cluster()
);
int numMembers = group.numMembers();
if (!group.hasMember(updatedMember.memberId()) && !group.hasStaticMember(updatedMember.instanceId())) {
numMembers++;
}
subscriptionType = ModernGroup.subscriptionType(
subscribedTopicNamesMap,
numMembers
);
if (!subscriptionMetadata.equals(group.subscriptionMetadata())) {
log.info("[GroupId {}] Computed new subscription metadata: {}.",
groupId, subscriptionMetadata);
bumpGroupEpoch = true;
records.add(newConsumerGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata));
}
if (bumpGroupEpoch) {
groupEpoch += 1;
records.add(newConsumerGroupEpochRecord(groupId, groupEpoch));
log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch);
metrics.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME);
}
group.setMetadataRefreshDeadline(currentTimeMs + consumerGroupMetadataRefreshIntervalMs, groupEpoch);
}
// 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between
// the existing and the new target assignment is persisted to the partition.
final int targetAssignmentEpoch;
final Assignment targetAssignment;
if (groupEpoch > group.assignmentEpoch()) {
targetAssignment = updateTargetAssignment(
group,
groupEpoch,
member,
updatedMember,
subscriptionMetadata,
subscriptionType,
records
);
targetAssignmentEpoch = groupEpoch;
} else {
targetAssignmentEpoch = group.assignmentEpoch();
targetAssignment = group.targetAssignment(updatedMember.memberId(), updatedMember.instanceId());
}
// 3. Reconcile the member's assignment with the target assignment if the member is not
// fully reconciled yet.
updatedMember = maybeReconcile(
groupId,
updatedMember,
group::currentPartitionEpoch,
targetAssignmentEpoch,
targetAssignment,
ownedTopicPartitions,
records
);
scheduleConsumerGroupSessionTimeout(groupId, memberId);
// Prepare the response.
ConsumerGroupHeartbeatResponseData response = new ConsumerGroupHeartbeatResponseData()
.setMemberId(updatedMember.memberId())
.setMemberEpoch(updatedMember.memberEpoch())
.setHeartbeatIntervalMs(consumerGroupHeartbeatIntervalMs(groupId));
// The assignment is only provided in the following cases:
// 1. The member sent a full request. It does so when joining or rejoining the group with zero
// as the member epoch; or on any errors (e.g. timeout). We use all the non-optional fields
// (rebalanceTimeoutMs, subscribedTopicNames and ownedTopicPartitions) to detect a full request
// as those must be set in a full request.
// 2. The member's assignment has been updated.
boolean isFullRequest = memberEpoch == 0 || (rebalanceTimeoutMs != -1 && subscribedTopicNames != null && ownedTopicPartitions != null);
if (isFullRequest || hasAssignedPartitionsChanged(member, updatedMember)) {
response.setAssignment(createConsumerGroupResponseAssignment(updatedMember));
}
return new CoordinatorResult<>(records, response);
} | @Test
public void testUnknownMemberIdJoinsConsumerGroup() {
String groupId = "fooup";
// Use a static member id as it makes the test easier.
String memberId = Uuid.randomUuid().toString();
GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
.withConsumerGroupAssignors(Collections.singletonList(new NoOpPartitionAssignor()))
.build();
// A first member joins to create the group.
context.consumerGroupHeartbeat(
new ConsumerGroupHeartbeatRequestData()
.setGroupId(groupId)
.setMemberId(memberId)
.setMemberEpoch(0)
.setServerAssignor(NoOpPartitionAssignor.NAME)
.setRebalanceTimeoutMs(5000)
.setSubscribedTopicNames(Arrays.asList("foo", "bar"))
.setTopicPartitions(Collections.emptyList()));
// The second member is rejected because the member id is unknown and
// the member epoch is not zero.
assertThrows(UnknownMemberIdException.class, () ->
context.consumerGroupHeartbeat(
new ConsumerGroupHeartbeatRequestData()
.setGroupId(groupId)
.setMemberId(Uuid.randomUuid().toString())
.setMemberEpoch(1)
.setRebalanceTimeoutMs(5000)
.setSubscribedTopicNames(Arrays.asList("foo", "bar"))
.setTopicPartitions(Collections.emptyList())));
} |
public ParsedQuery parse(final String query) throws ParseException {
final TokenCollectingQueryParser parser = new TokenCollectingQueryParser(ParsedTerm.DEFAULT_FIELD, ANALYZER);
parser.setSplitOnWhitespace(true);
parser.setAllowLeadingWildcard(allowLeadingWildcard);
final Query parsed = parser.parse(query);
final ParsedQuery.Builder builder = ParsedQuery.builder().query(query);
builder.tokensBuilder().addAll(parser.getTokens());
final TermCollectingQueryVisitor visitor = new TermCollectingQueryVisitor(ANALYZER, parser.getTokenLookup());
parsed.visit(visitor);
builder.termsBuilder().addAll(visitor.getParsedTerms());
return builder.build();
} | @Test
void testOrQuery() throws ParseException {
final ParsedQuery query = parser.parse("unknown_field:(x OR y)");
assertThat(query.terms().size()).isEqualTo(2);
assertThat(query.terms())
.extracting(ParsedTerm::field)
.containsOnly("unknown_field");
} |
@Override
public Rule getByUuid(String uuid) {
ensureInitialized();
Rule rule = rulesByUuid.get(uuid);
checkArgument(rule != null, "Can not find rule for uuid %s. This rule does not exist in DB", uuid);
return rule;
} | @Test
public void first_call_to_getById_triggers_call_to_db_and_any_subsequent_get_or_find_call_does_not() {
underTest.getByUuid(AB_RULE.getUuid());
verify(ruleDao, times(1)).selectAll(any(DbSession.class));
verifyNoMethodCallTriggersCallToDB();
} |
@Override
public void validatePostList(Collection<Long> ids) {
if (CollUtil.isEmpty(ids)) {
return;
}
// 获得岗位信息
List<PostDO> posts = postMapper.selectBatchIds(ids);
Map<Long, PostDO> postMap = convertMap(posts, PostDO::getId);
// 校验
ids.forEach(id -> {
PostDO post = postMap.get(id);
if (post == null) {
throw exception(POST_NOT_FOUND);
}
if (!CommonStatusEnum.ENABLE.getStatus().equals(post.getStatus())) {
throw exception(POST_NOT_ENABLE, post.getName());
}
});
} | @Test
public void testValidatePostList_notEnable() {
// mock 数据
PostDO postDO = randomPostDO().setStatus(CommonStatusEnum.DISABLE.getStatus());
postMapper.insert(postDO);
// 准备参数
List<Long> ids = singletonList(postDO.getId());
// 调用, 并断言异常
assertServiceException(() -> postService.validatePostList(ids), POST_NOT_ENABLE,
postDO.getName());
} |
public GoConfigHolder loadConfigHolder(final String content, Callback callback) throws Exception {
CruiseConfig configForEdit;
CruiseConfig config;
LOGGER.debug("[Config Save] Loading config holder");
configForEdit = deserializeConfig(content);
if (callback != null) callback.call(configForEdit);
config = preprocessAndValidate(configForEdit);
return new GoConfigHolder(config, configForEdit);
} | @Test
void shouldNotAllowEmptyViewForPerforce() {
String p4XML = Objects.requireNonNull(this.getClass().getResource("/data/p4-cruise-config-empty-view.xml")).getFile();
assertThatThrownBy(() -> xmlLoader.loadConfigHolder(loadWithMigration(p4XML)))
.as("Should not accept p4 section with empty view.")
.hasMessageContaining("P4 view cannot be empty.");
} |
protected void mergeAndRevive(ConsumeReviveObj consumeReviveObj) throws Throwable {
ArrayList<PopCheckPoint> sortList = consumeReviveObj.genSortList();
POP_LOGGER.info("reviveQueueId={}, ck listSize={}", queueId, sortList.size());
if (sortList.size() != 0) {
POP_LOGGER.info("reviveQueueId={}, 1st ck, startOffset={}, reviveOffset={}; last ck, startOffset={}, reviveOffset={}", queueId, sortList.get(0).getStartOffset(),
sortList.get(0).getReviveOffset(), sortList.get(sortList.size() - 1).getStartOffset(), sortList.get(sortList.size() - 1).getReviveOffset());
}
long newOffset = consumeReviveObj.oldOffset;
for (PopCheckPoint popCheckPoint : sortList) {
if (!shouldRunPopRevive) {
POP_LOGGER.info("slave skip ck process, revive topic={}, reviveQueueId={}", reviveTopic, queueId);
break;
}
if (consumeReviveObj.endTime - popCheckPoint.getReviveTime() <= (PopAckConstants.ackTimeInterval + PopAckConstants.SECOND)) {
break;
}
// check normal topic, skip ck , if normal topic is not exist
String normalTopic = KeyBuilder.parseNormalTopic(popCheckPoint.getTopic(), popCheckPoint.getCId());
if (brokerController.getTopicConfigManager().selectTopicConfig(normalTopic) == null) {
POP_LOGGER.warn("reviveQueueId={}, can not get normal topic {}, then continue", queueId, popCheckPoint.getTopic());
newOffset = popCheckPoint.getReviveOffset();
continue;
}
if (null == brokerController.getSubscriptionGroupManager().findSubscriptionGroupConfig(popCheckPoint.getCId())) {
POP_LOGGER.warn("reviveQueueId={}, can not get cid {}, then continue", queueId, popCheckPoint.getCId());
newOffset = popCheckPoint.getReviveOffset();
continue;
}
while (inflightReviveRequestMap.size() > 3) {
waitForRunning(100);
Pair<Long, Boolean> pair = inflightReviveRequestMap.firstEntry().getValue();
if (!pair.getObject2() && System.currentTimeMillis() - pair.getObject1() > 1000 * 30) {
PopCheckPoint oldCK = inflightReviveRequestMap.firstKey();
rePutCK(oldCK, pair);
inflightReviveRequestMap.remove(oldCK);
POP_LOGGER.warn("stay too long, remove from reviveRequestMap, {}, {}, {}, {}", popCheckPoint.getTopic(),
popCheckPoint.getBrokerName(), popCheckPoint.getQueueId(), popCheckPoint.getStartOffset());
}
}
reviveMsgFromCk(popCheckPoint);
newOffset = popCheckPoint.getReviveOffset();
}
if (newOffset > consumeReviveObj.oldOffset) {
if (!shouldRunPopRevive) {
POP_LOGGER.info("slave skip commit, revive topic={}, reviveQueueId={}", reviveTopic, queueId);
return;
}
this.brokerController.getConsumerOffsetManager().commitOffset(PopAckConstants.LOCAL_HOST, PopAckConstants.REVIVE_GROUP, reviveTopic, queueId, newOffset);
}
reviveOffset = newOffset;
consumeReviveObj.newOffset = newOffset;
} | @Test
public void testReviveMsgFromCk_messageNotFound_noRetry() throws Throwable {
PopCheckPoint ck = buildPopCheckPoint(0, 0, 0);
PopReviveService.ConsumeReviveObj reviveObj = new PopReviveService.ConsumeReviveObj();
reviveObj.map.put("", ck);
reviveObj.endTime = System.currentTimeMillis();
when(escapeBridge.getMessageAsync(anyString(), anyLong(), anyInt(), anyString(), anyBoolean()))
.thenReturn(CompletableFuture.completedFuture(Triple.of(null, "", false)));
popReviveService.mergeAndRevive(reviveObj);
verify(escapeBridge, times(0)).putMessageToSpecificQueue(any(MessageExtBrokerInner.class)); // write retry
verify(messageStore, times(0)).putMessage(any(MessageExtBrokerInner.class)); // rewrite CK
} |
public MessageListener messageListener(MessageListener messageListener, boolean addConsumerSpan) {
if (messageListener instanceof TracingMessageListener) return messageListener;
return new TracingMessageListener(messageListener, this, addConsumerSpan);
} | @Test void messageListener_doesntDoubleWrap() {
MessageListener wrapped = jmsTracing.messageListener(mock(MessageListener.class), false);
assertThat(jmsTracing.messageListener(wrapped, false))
.isSameAs(wrapped);
} |
public static URepeated create(CharSequence identifier, UExpression expression) {
return new AutoValue_URepeated(identifier.toString(), expression);
} | @Test
public void serialization() {
SerializableTester.reserializeAndAssert(URepeated.create("foo", UFreeIdent.create("foo")));
} |
public Publisher<K> keyIterator() {
return keyIterator(null);
} | @Test
public void testKeyIterator() {
RMapRx<Integer, Integer> map = redisson.getMap("simple");
sync(map.put(1, 0));
sync(map.put(3, 5));
sync(map.put(4, 6));
sync(map.put(7, 8));
List<Integer> keys = new ArrayList<Integer>(Arrays.asList(1, 3, 4, 7));
for (Iterator<Integer> iterator = toIterator(map.keyIterator()); iterator.hasNext();) {
Integer value = iterator.next();
if (!keys.remove(value)) {
Assertions.fail();
}
}
Assertions.assertEquals(0, keys.size());
} |
@CanIgnoreReturnValue
public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) {
List<@Nullable Object> expected =
(varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs);
return containsExactlyElementsIn(
expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable);
} | @Test
public void iterableContainsExactlyWithElementsThatThrowWhenYouCallHashCodeFailureTooMany() {
HashCodeThrower one = new HashCodeThrower();
HashCodeThrower two = new HashCodeThrower();
expectFailureWhenTestingThat(asList(one, two)).containsExactly(one);
} |
boolean sendRecords() {
int processed = 0;
recordBatch(toSend.size());
final SourceRecordWriteCounter counter =
toSend.isEmpty() ? null : new SourceRecordWriteCounter(toSend.size(), sourceTaskMetricsGroup);
for (final SourceRecord preTransformRecord : toSend) {
ProcessingContext<SourceRecord> context = new ProcessingContext<>(preTransformRecord);
final SourceRecord record = transformationChain.apply(context, preTransformRecord);
final ProducerRecord<byte[], byte[]> producerRecord = convertTransformedRecord(context, record);
if (producerRecord == null || context.failed()) {
counter.skipRecord();
recordDropped(preTransformRecord);
processed++;
continue;
}
log.trace("{} Appending record to the topic {} with key {}, value {}", this, record.topic(), record.key(), record.value());
Optional<SubmittedRecords.SubmittedRecord> submittedRecord = prepareToSendRecord(preTransformRecord, producerRecord);
try {
final String topic = producerRecord.topic();
maybeCreateTopic(topic);
producer.send(
producerRecord,
(recordMetadata, e) -> {
if (e != null) {
if (producerClosed) {
log.trace("{} failed to send record to {}; this is expected as the producer has already been closed", AbstractWorkerSourceTask.this, topic, e);
} else {
log.error("{} failed to send record to {}: ", AbstractWorkerSourceTask.this, topic, e);
}
log.trace("{} Failed record: {}", AbstractWorkerSourceTask.this, preTransformRecord);
producerSendFailed(context, false, producerRecord, preTransformRecord, e);
if (retryWithToleranceOperator.getErrorToleranceType() == ToleranceType.ALL) {
counter.skipRecord();
submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::ack);
}
} else {
counter.completeRecord();
log.trace("{} Wrote record successfully: topic {} partition {} offset {}",
AbstractWorkerSourceTask.this,
recordMetadata.topic(), recordMetadata.partition(),
recordMetadata.offset());
recordSent(preTransformRecord, producerRecord, recordMetadata);
submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::ack);
if (topicTrackingEnabled) {
recordActiveTopic(producerRecord.topic());
}
}
});
// Note that this will cause retries to take place within a transaction
} catch (RetriableException | org.apache.kafka.common.errors.RetriableException e) {
log.warn("{} Failed to send record to topic '{}' and partition '{}'. Backing off before retrying: ",
this, producerRecord.topic(), producerRecord.partition(), e);
toSend = toSend.subList(processed, toSend.size());
submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::drop);
counter.retryRemaining();
return false;
} catch (ConnectException e) {
log.warn("{} Failed to send record to topic '{}' and partition '{}' due to an unrecoverable exception: ",
this, producerRecord.topic(), producerRecord.partition(), e);
log.trace("{} Failed to send {} with unrecoverable exception: ", this, producerRecord, e);
throw e;
} catch (KafkaException e) {
producerSendFailed(context, true, producerRecord, preTransformRecord, e);
}
processed++;
recordDispatched(preTransformRecord);
}
toSend = null;
batchDispatched();
return true;
} | @Test
public void testSendRecordsConvertsData() {
createWorkerTask();
// Can just use the same record for key and value
List<SourceRecord> records = Collections.singletonList(
new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD)
);
expectSendRecord(emptyHeaders());
expectApplyTransformationChain();
expectTopicCreation(TOPIC);
workerTask.toSend = records;
workerTask.sendRecords();
ArgumentCaptor<ProducerRecord<byte[], byte[]>> sent = verifySendRecord();
assertArrayEquals(SERIALIZED_KEY, sent.getValue().key());
assertArrayEquals(SERIALIZED_RECORD, sent.getValue().value());
verifyTaskGetTopic();
verifyTopicCreation();
} |
@Override
public void write(int b) throws IOException
{
checkClosed();
if (chunkSize - currentBufferPointer <= 0)
{
expandBuffer();
}
currentBuffer.put((byte) b);
currentBufferPointer++;
pointer++;
if (pointer > size)
{
size = pointer;
}
} | @Test
void testAlreadyClose() throws IOException
{
try (RandomAccess randomAccessReadWrite = new RandomAccessReadWriteBuffer())
{
byte[] bytes = new byte[RandomAccessReadBuffer.DEFAULT_CHUNK_SIZE_4KB];
randomAccessReadWrite.write(bytes);
randomAccessReadWrite.close();
assertThrows(IOException.class, () -> randomAccessReadWrite.seek(0));
}
} |
@Override
public <VOut> KStream<K, VOut> processValues(
final FixedKeyProcessorSupplier<? super K, ? super V, VOut> processorSupplier,
final String... stateStoreNames
) {
return processValues(
processorSupplier,
Named.as(builder.newProcessorName(PROCESSVALUES_NAME)),
stateStoreNames
);
} | @Test
public void shouldNotAllowNullStoreNameOnProcessValues() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.processValues(fixedKeyProcessorSupplier, (String) null));
assertThat(exception.getMessage(), equalTo("stateStoreNames can't be null"));
} |
@Override
public Properties processor(String config) throws IOException {
Properties properties = new Properties();
try (Reader reader = new InputStreamReader(new ByteArrayInputStream(config.getBytes()), StandardCharsets.UTF_8)) {
properties.load(reader);
}
return properties;
} | @Test
void processor() throws IOException {
String properties = "registry.type=file\n" +
"registry.file.name=file-test-pro.conf";
Properties processor = new ProcessorProperties().processor(properties);
Assertions.assertEquals("file", processor.get("registry.type"));
// not exist
Assertions.assertNull(processor.get("registry"));
Assertions.assertNull(processor.get("null"));
} |
public Optional<AlluxioURI> getRootUfsUri() {
return Optional.ofNullable(mRootUfsUri);
} | @Test
public void testGetRootUfsUri() {
mJCommander.parse("-m", "/tmp/fuse-mp", "-u", "scheme://host/path");
assertEquals(Optional.of(new AlluxioURI("scheme://host/path")), mOptions.getRootUfsUri());
} |
@Override
public void process(Exchange exchange) throws Exception {
final SchematronProcessor schematronProcessor = SchematronProcessorFactory.newSchematronEngine(endpoint.getRules());
final Object payload = exchange.getIn().getBody();
final String report;
if (payload instanceof Source) {
LOG.debug("Applying schematron validation on payload: {}", payload);
report = schematronProcessor.validate((Source) payload);
} else if (payload instanceof String) {
LOG.debug("Applying schematron validation on payload: {}", payload);
report = schematronProcessor.validate((String) payload);
} else {
String stringPayload = exchange.getIn().getBody(String.class);
LOG.debug("Applying schematron validation on payload: {}", stringPayload);
report = schematronProcessor.validate(stringPayload);
}
LOG.debug("Schematron validation report \n {}", report);
String status = getValidationStatus(report);
LOG.info("Schematron validation status : {}", status);
setValidationReport(exchange, report, status);
} | @Test
public void testProcessValidXML() throws Exception {
Exchange exc = new DefaultExchange(context, ExchangePattern.InOut);
exc.getIn().setBody(ClassLoader.getSystemResourceAsStream("xml/article-1.xml"));
// process xml payload
producer.process(exc);
// assert
assertEquals(Constants.SUCCESS, exc.getMessage().getHeader(Constants.VALIDATION_STATUS));
} |
@Override
public SofaResponse invoke(FilterInvoker invoker, SofaRequest request) throws SofaRpcException {
// consumer side, if in provider side,loadTest always false
SofaTracerSpan currentSpan = SofaTraceContextHolder.getSofaTraceContext().getCurrentSpan();
boolean loadTest = TracerUtils.isLoadTest(currentSpan);
if (loadTest) {
RpcInvokeContext.getContext().addCustomHeader(HEAD_KEY_TRAFFIC_TYPE.name(), PRESSURE);
}
// provider side ,if in consumer side, metadata == null
Metadata metadata = TracingContextKey.getKeyMetadata().get();
if (metadata != null) {
String s = metadata.get(HEAD_KEY_TRAFFIC_TYPE);
if (PRESSURE.equals(s)) {
currentSpan.getSofaTracerSpanContext().setBizBaggageItem(MARK, T);
}
}
return invoker.invoke(request);
} | @Test
public void testConsumerPressure() {
//consumer side
SofaTracerSpan currentSpan = SofaTraceContextHolder.getSofaTraceContext().getCurrentSpan();
Map<String, String> bizBaggage = currentSpan.getSofaTracerSpanContext().getBizBaggage();
bizBaggage.put("mark", "T");
Assert.assertTrue(TracerUtils.isLoadTest(currentSpan));
PressureMarkTransformFilter filter = new PressureMarkTransformFilter();
filter.invoke(invoker, request);
Assert.assertEquals(PRESSURE, invoker.getMetaHolder().get(HEAD_KEY_TRAFFIC_TYPE.name()));
} |
public static Future<Integer> authTlsHash(SecretOperator secretOperations, String namespace, KafkaClientAuthentication auth, List<CertSecretSource> certSecretSources) {
Future<Integer> tlsFuture;
if (certSecretSources == null || certSecretSources.isEmpty()) {
tlsFuture = Future.succeededFuture(0);
} else {
// get all TLS trusted certs, compute hash from each of them, sum hashes
tlsFuture = Future.join(certSecretSources.stream().map(certSecretSource ->
getCertificateAsync(secretOperations, namespace, certSecretSource)
.compose(cert -> Future.succeededFuture(cert.hashCode()))).collect(Collectors.toList()))
.compose(hashes -> Future.succeededFuture(hashes.list().stream().mapToInt(e -> (int) e).sum()));
}
if (auth == null) {
return tlsFuture;
} else {
// compute hash from Auth
if (auth instanceof KafkaClientAuthenticationScram) {
// only passwordSecret can be changed
return tlsFuture.compose(tlsHash -> getPasswordAsync(secretOperations, namespace, auth)
.compose(password -> Future.succeededFuture(password.hashCode() + tlsHash)));
} else if (auth instanceof KafkaClientAuthenticationPlain) {
// only passwordSecret can be changed
return tlsFuture.compose(tlsHash -> getPasswordAsync(secretOperations, namespace, auth)
.compose(password -> Future.succeededFuture(password.hashCode() + tlsHash)));
} else if (auth instanceof KafkaClientAuthenticationTls) {
// custom cert can be used (and changed)
return ((KafkaClientAuthenticationTls) auth).getCertificateAndKey() == null ? tlsFuture :
tlsFuture.compose(tlsHash -> getCertificateAndKeyAsync(secretOperations, namespace, (KafkaClientAuthenticationTls) auth)
.compose(crtAndKey -> Future.succeededFuture(crtAndKey.certAsBase64String().hashCode() + crtAndKey.keyAsBase64String().hashCode() + tlsHash)));
} else if (auth instanceof KafkaClientAuthenticationOAuth) {
List<Future<Integer>> futureList = ((KafkaClientAuthenticationOAuth) auth).getTlsTrustedCertificates() == null ?
new ArrayList<>() : ((KafkaClientAuthenticationOAuth) auth).getTlsTrustedCertificates().stream().map(certSecretSource ->
getCertificateAsync(secretOperations, namespace, certSecretSource)
.compose(cert -> Future.succeededFuture(cert.hashCode()))).collect(Collectors.toList());
futureList.add(tlsFuture);
futureList.add(addSecretHash(secretOperations, namespace, ((KafkaClientAuthenticationOAuth) auth).getAccessToken()));
futureList.add(addSecretHash(secretOperations, namespace, ((KafkaClientAuthenticationOAuth) auth).getClientSecret()));
futureList.add(addSecretHash(secretOperations, namespace, ((KafkaClientAuthenticationOAuth) auth).getRefreshToken()));
return Future.join(futureList)
.compose(hashes -> Future.succeededFuture(hashes.list().stream().mapToInt(e -> (int) e).sum()));
} else {
// unknown Auth type
return tlsFuture;
}
}
} | @Test
void getHashPatternNotMatching(VertxTestContext context) {
String namespace = "ns";
CertSecretSource cert1 = new CertSecretSourceBuilder()
.withSecretName("cert-secret")
.withPattern("*.pem")
.build();
Secret secret = new SecretBuilder()
.withData(Map.of("ca.crt", "value", "ca2.crt", "value2"))
.build();
SecretOperator secretOps = mock(SecretOperator.class);
when(secretOps.getAsync(eq(namespace), eq("cert-secret"))).thenReturn(Future.succeededFuture(secret));
Checkpoint async = context.checkpoint();
VertxUtil.authTlsHash(secretOps, "ns", null, singletonList(cert1)).onComplete(context.succeeding(res -> {
assertThat(res, is(0));
async.flag();
}));
} |
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) {
CharStream input = CharStreams.fromString(source);
FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input );
CommonTokenStream tokens = new CommonTokenStream( lexer );
FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens );
ParserHelper parserHelper = new ParserHelper(eventsManager);
additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol()));
parser.setHelper(parserHelper);
parser.setErrorHandler( new FEELErrorHandler() );
parser.removeErrorListeners(); // removes the error listener that prints to the console
parser.addErrorListener( new FEELParserErrorListener( eventsManager ) );
// pre-loads the parser with symbols
defineVariables( inputVariableTypes, inputVariables, parser );
if (typeRegistry != null) {
parserHelper.setTypeRegistry(typeRegistry);
}
return parser;
} | @Test
void negativeFloatLiteral() {
String inputExpression = "-10.5";
BaseNode number = parse( inputExpression );
assertThat( number).isInstanceOf(SignedUnaryNode.class);
assertThat( number.getResultType()).isEqualTo(BuiltInType.NUMBER);
assertLocation( inputExpression, number );
SignedUnaryNode sun = (SignedUnaryNode) number;
assertThat( sun.getSign()).isEqualTo(SignedUnaryNode.Sign.NEGATIVE);
assertThat( sun.getExpression()).isInstanceOf(NumberNode.class);
assertThat( sun.getExpression().getText()).isEqualTo("10.5");
} |
@Udf
public <T extends Comparable<? super T>> List<T> arraySortWithDirection(@UdfParameter(
description = "The array to sort") final List<T> input,
@UdfParameter(
description = "Marks the end of the series (inclusive)") final String direction) {
if (input == null || direction == null) {
return null;
}
if (SORT_DIRECTION_ASC.contains(direction.toUpperCase())) {
input.sort(nullsLast(naturalOrder()));
} else if (SORT_DIRECTION_DESC.contains(direction.toUpperCase())) {
input.sort(nullsLast(Collections.reverseOrder()));
} else {
return null;
}
return input;
} | @Test
public void shouldSortIntsAscending() {
final List<Integer> input = Arrays.asList(1, 3, -2);
final List<Integer> output = udf.arraySortWithDirection(input, "ascEnDing");
assertThat(output, contains(-2, 1, 3));
} |
public static void saveInstanceInfo(RequestInfo requestInfo) {
String key = requestInfo.getHost() + RemovalConstants.CONNECTOR + requestInfo.getPort();
InstanceInfo info = INSTANCE_MAP.computeIfAbsent(key, value -> {
InstanceInfo instanceInfo = new InstanceInfo();
instanceInfo.setHost(requestInfo.getHost());
instanceInfo.setPort(requestInfo.getPort());
return instanceInfo;
});
if (!requestInfo.isSuccess()) {
info.getRequestFailNum().getAndIncrement();
}
info.getRequestNum().getAndIncrement();
info.setLastInvokeTime(requestInfo.getRequestTime());
} | @Test
public void saveInstanceInfo() {
long time = System.currentTimeMillis();
for (int i = 0; i < NUM; i++) {
RequestInfo requestInfo = new RequestInfo();
requestInfo.setHost(HOST);
requestInfo.setPort(PORT);
requestInfo.setSuccess(true);
requestInfo.setRequestTime(time);
InstanceCache.saveInstanceInfo(requestInfo);
}
for (int i = 0; i < NUM; i++) {
RequestInfo requestInfo = new RequestInfo();
requestInfo.setHost(HOST);
requestInfo.setPort(PORT);
requestInfo.setSuccess(false);
requestInfo.setRequestTime(time);
InstanceCache.saveInstanceInfo(requestInfo);
}
Assert.assertTrue(InstanceCache.INSTANCE_MAP.containsKey(KEY));
InstanceInfo instanceInfo = InstanceCache.INSTANCE_MAP.get(KEY);
Assert.assertEquals(instanceInfo.getHost(), HOST);
Assert.assertEquals(instanceInfo.getPort(), PORT);
Assert.assertEquals(instanceInfo.getRequestNum().get(), NUM * 2);
Assert.assertEquals(instanceInfo.getLastInvokeTime(), time);
} |
@VisibleForTesting
public void validateConfigKeyUnique(Long id, String key) {
ConfigDO config = configMapper.selectByKey(key);
if (config == null) {
return;
}
// 如果 id 为空,说明不用比较是否为相同 id 的参数配置
if (id == null) {
throw exception(CONFIG_KEY_DUPLICATE);
}
if (!config.getId().equals(id)) {
throw exception(CONFIG_KEY_DUPLICATE);
}
} | @Test
public void testValidateConfigKeyUnique_success() {
// 调用,成功
configService.validateConfigKeyUnique(randomLongId(), randomString());
} |
protected synchronized void doRestartConnectorAndTasks(RestartRequest request) {
String connectorName = request.connectorName();
Optional<RestartPlan> maybePlan = buildRestartPlan(request);
if (!maybePlan.isPresent()) {
log.debug("Skipping restart of connector '{}' since no status is available: {}", connectorName, request);
return;
}
RestartPlan plan = maybePlan.get();
log.info("Executing {}", plan);
// If requested, stop the connector and any tasks, marking each as restarting
final ExtendedAssignment currentAssignments = assignment;
final Collection<ConnectorTaskId> assignedIdsToRestart = plan.taskIdsToRestart()
.stream()
.filter(taskId -> currentAssignments.tasks().contains(taskId))
.collect(Collectors.toList());
final boolean restartConnector = plan.shouldRestartConnector() && currentAssignments.connectors().contains(connectorName);
final boolean restartTasks = !assignedIdsToRestart.isEmpty();
if (restartConnector) {
String stageDescription = "stopping to-be-restarted connector " + connectorName;
try (TickThreadStage stage = new TickThreadStage(stageDescription)) {
worker.stopAndAwaitConnector(connectorName);
}
onRestart(connectorName);
}
if (restartTasks) {
String stageDescription = "stopping " + assignedIdsToRestart.size() + " to-be-restarted tasks for connector " + connectorName;
// Stop the tasks and mark as restarting
try (TickThreadStage stage = new TickThreadStage(stageDescription)) {
worker.stopAndAwaitTasks(assignedIdsToRestart);
}
assignedIdsToRestart.forEach(this::onRestart);
}
// Now restart the connector and tasks
if (restartConnector) {
try {
startConnector(connectorName, (error, targetState) -> {
if (error == null) {
log.info("Connector '{}' restart successful", connectorName);
} else {
log.error("Connector '{}' restart failed", connectorName, error);
}
});
} catch (Throwable t) {
log.error("Connector '{}' restart failed", connectorName, t);
}
}
if (restartTasks) {
log.debug("Restarting {} of {} tasks for {}", assignedIdsToRestart.size(), plan.totalTaskCount(), request);
assignedIdsToRestart.forEach(taskId -> {
try {
if (startTask(taskId)) {
log.info("Task '{}' restart successful", taskId);
} else {
log.error("Task '{}' restart failed", taskId);
}
} catch (Throwable t) {
log.error("Task '{}' restart failed", taskId, t);
}
});
log.debug("Restarted {} of {} tasks for {} as requested", assignedIdsToRestart.size(), plan.totalTaskCount(), request);
}
log.info("Completed {}", plan);
} | @Test
public void testDoRestartConnectorAndTasksEmptyPlan() {
RestartRequest restartRequest = new RestartRequest(CONN1, false, true);
doReturn(Optional.empty()).when(herder).buildRestartPlan(restartRequest);
herder.doRestartConnectorAndTasks(restartRequest);
verifyNoMoreInteractions(worker, member, configBackingStore, statusBackingStore);
} |
public static String stripTrailingSlash(String path) {
Preconditions.checkArgument(!Strings.isNullOrEmpty(path), "path must not be null or empty");
String result = path;
while (!result.endsWith("://") && result.endsWith("/")) {
result = result.substring(0, result.length() - 1);
}
return result;
} | @Test
void testDoNotStripTrailingSlashForRootPath() {
String rootPath = "blobstore://";
assertThat(LocationUtil.stripTrailingSlash(rootPath))
.as("Should be root path")
.isEqualTo(rootPath);
} |
public FEELFnResult<BigDecimal> invoke(@ParameterName( "list" ) List list) {
if ( list == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null"));
}
FEELFnResult<BigDecimal> s = sum.invoke( list );
Function<FEELEvent, FEELFnResult<BigDecimal>> ifLeft = event ->
FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "unable to sum the elements which is required to calculate the mean"));
Function<BigDecimal, FEELFnResult<BigDecimal>> ifRight = (sum) -> {
try {
return FEELFnResult.ofResult( sum.divide( BigDecimal.valueOf( list.size() ), MathContext.DECIMAL128 ) );
} catch (Exception e) {
return FEELFnResult.ofError( new InvalidParametersEvent(Severity.ERROR, "unable to perform division to calculate the mean", e) );
}
};
return s.cata(ifLeft, ifRight);
} | @Test
void invokeArrayEmpty() {
FunctionTestUtil.assertResultError(meanFunction.invoke(new Object[]{}), InvalidParametersEvent.class);
} |
public static Combine.CombineFn<Boolean, ?, Long> combineFn() {
return new CountIfFn();
} | @Test
public void testReturnsAccumulatorUnchangedForNullInput() {
Combine.CombineFn countIfFn = CountIf.combineFn();
long[] accumulator = (long[]) countIfFn.addInput(countIfFn.createAccumulator(), null);
assertEquals(0L, accumulator[0]);
} |
public static String[] split(String splittee, String splitChar, boolean truncate) { //NOSONAR
if (splittee == null || splitChar == null) {
return new String[0];
}
final String EMPTY_ELEMENT = "";
int spot;
final int splitLength = splitChar.length();
final String adjacentSplit = splitChar + splitChar;
final int adjacentSplitLength = adjacentSplit.length();
if (truncate) {
while ((spot = splittee.indexOf(adjacentSplit)) != -1) {
splittee = splittee.substring(0, spot + splitLength)
+ splittee.substring(spot + adjacentSplitLength, splittee.length());
}
if (splittee.startsWith(splitChar)) {
splittee = splittee.substring(splitLength);
}
if (splittee.endsWith(splitChar)) { // Remove trailing splitter
splittee = splittee.substring(0, splittee.length() - splitLength);
}
}
List<String> returns = new ArrayList<>();
final int length = splittee.length(); // This is the new length
int start = 0;
spot = 0;
while (start < length && (spot = splittee.indexOf(splitChar, start)) > -1) {
if (spot > 0) {
returns.add(splittee.substring(start, spot));
} else {
returns.add(EMPTY_ELEMENT);
}
start = spot + splitLength;
}
if (start < length) {
returns.add(splittee.substring(start));
} else if (spot == length - splitLength) {// Found splitChar at end of line
returns.add(EMPTY_ELEMENT);
}
return returns.toArray(new String[returns.size()]);
} | @Test
public void testSplitStringStringTrueDoubledSplitChar() throws Exception {
assertThat(JOrphanUtils.split("a;;b;;;;;;d;;e;;;;f", ";;", true),
CoreMatchers.equalTo(new String[]{"a", "b", "d", "e", "f"}));
} |
public boolean containsShardingTable(final Collection<String> logicTableNames) {
for (String each : logicTableNames) {
if (isShardingTable(each)) {
return true;
}
}
return false;
} | @Test
void assertContainsShardingTable() {
assertTrue(createMaximumShardingRule().containsShardingTable(Collections.singleton("logic_table")));
} |
public static String[] split(String splittee, String splitChar, boolean truncate) { //NOSONAR
if (splittee == null || splitChar == null) {
return new String[0];
}
final String EMPTY_ELEMENT = "";
int spot;
final int splitLength = splitChar.length();
final String adjacentSplit = splitChar + splitChar;
final int adjacentSplitLength = adjacentSplit.length();
if (truncate) {
while ((spot = splittee.indexOf(adjacentSplit)) != -1) {
splittee = splittee.substring(0, spot + splitLength)
+ splittee.substring(spot + adjacentSplitLength, splittee.length());
}
if (splittee.startsWith(splitChar)) {
splittee = splittee.substring(splitLength);
}
if (splittee.endsWith(splitChar)) { // Remove trailing splitter
splittee = splittee.substring(0, splittee.length() - splitLength);
}
}
List<String> returns = new ArrayList<>();
final int length = splittee.length(); // This is the new length
int start = 0;
spot = 0;
while (start < length && (spot = splittee.indexOf(splitChar, start)) > -1) {
if (spot > 0) {
returns.add(splittee.substring(start, spot));
} else {
returns.add(EMPTY_ELEMENT);
}
start = spot + splitLength;
}
if (start < length) {
returns.add(splittee.substring(start));
} else if (spot == length - splitLength) {// Found splitChar at end of line
returns.add(EMPTY_ELEMENT);
}
return returns.toArray(new String[returns.size()]);
} | @Test
public void testSplitSSSSingleDelimiterWithDefaultValue() {
// Test non-empty parameters
assertThat(JOrphanUtils.split("a,bc,,", ",", "?"), CoreMatchers.equalTo(new String[]{"a", "bc", "?", "?"}));
} |
@Nonnull
public BatchSource<T> build() {
requireNonNull(clientFn, "clientFn must be set");
requireNonNull(searchRequestFn, "searchRequestFn must be set");
requireNonNull(mapToItemFn, "mapToItemFn must be set");
ElasticSourceConfiguration<T> configuration = new ElasticSourceConfiguration<>(
restHighLevelClientFn(clientFn),
searchRequestFn, optionsFn, mapToItemFn, slicing, coLocatedReading,
scrollKeepAlive, retries
);
ElasticSourcePMetaSupplier<T> metaSupplier = new ElasticSourcePMetaSupplier<>(configuration);
return Sources.batchFromProcessor(DEFAULT_NAME, metaSupplier);
} | @Test
public void when_createElasticSourceUsingBuilder_then_sourceHasCorrectName() {
BatchSource<Object> source = builderWithRequiredParams()
.build();
assertThat(source.name()).isEqualTo("elasticSource");
} |
public void add(Task task) {
tasks.put('/' + task.getName(), task);
TaskExecutor taskExecutor = new TaskExecutor(task);
try {
final Method executeMethod = task.getClass().getMethod("execute",
Map.class, PrintWriter.class);
if (executeMethod.isAnnotationPresent(Timed.class)) {
final Timed annotation = executeMethod.getAnnotation(Timed.class);
final String name = chooseName(annotation.name(),
annotation.absolute(),
task);
taskExecutor = new TimedTask(taskExecutor, metricRegistry.timer(name));
}
if (executeMethod.isAnnotationPresent(Metered.class)) {
final Metered annotation = executeMethod.getAnnotation(Metered.class);
final String name = chooseName(annotation.name(),
annotation.absolute(),
task);
taskExecutor = new MeteredTask(taskExecutor, metricRegistry.meter(name));
}
if (executeMethod.isAnnotationPresent(ExceptionMetered.class)) {
final ExceptionMetered annotation = executeMethod.getAnnotation(ExceptionMetered.class);
final String name = chooseName(annotation.name(),
annotation.absolute(),
task,
ExceptionMetered.DEFAULT_NAME_SUFFIX);
taskExecutor = new ExceptionMeteredTask(taskExecutor, metricRegistry.meter(name), annotation.cause());
}
} catch (NoSuchMethodException ignored) {
}
taskExecutors.put(task, taskExecutor);
} | @Test
void testDoNotPrintStackTrackWhenDisabled() throws Exception {
final TaskConfiguration taskConfiguration = new TaskConfiguration();
taskConfiguration.setPrintStackTraceOnError(false);
final TaskServlet servlet = new TaskServlet(metricRegistry, taskConfiguration);
servlet.add(gc);
final ServletInputStream bodyStream = new TestServletInputStream(
new ByteArrayInputStream(new byte[0]));
when(request.getMethod()).thenReturn("POST");
when(request.getPathInfo()).thenReturn("/gc");
when(request.getParameterNames()).thenReturn(Collections.emptyEnumeration());
when(request.getInputStream()).thenReturn(bodyStream);
final StringWriter stringWriter = new StringWriter();
final PrintWriter output = new PrintWriter(stringWriter, true);
when(response.getWriter()).thenReturn(output);
doThrow(new RuntimeException("whoops")).when(gc).execute(any(), any());
servlet.service(request, response);
assertThat(stringWriter.toString().trim()).isEqualTo("whoops");
} |
public double[][] test(DataFrame data) {
DataFrame x = formula.x(data);
int n = x.nrow();
int ntrees = trees.length;
double[][] prediction = new double[ntrees][n];
for (int j = 0; j < n; j++) {
Tuple xj = x.get(j);
double base = b;
for (int i = 0; i < ntrees; i++) {
base += shrinkage * trees[i].predict(xj);
prediction[i][j] = base;
}
}
return prediction;
} | @Test
public void testKin8nmQuantile() {
test(Loss.quantile(0.5), "kin8nm", Kin8nm.formula, Kin8nm.data, 0.1814);
} |
@Override
public Optional<DatabaseAdminExecutor> create(final SQLStatementContext sqlStatementContext) {
SQLStatement sqlStatement = sqlStatementContext.getSqlStatement();
if (sqlStatement instanceof ShowStatement) {
return Optional.of(new PostgreSQLShowVariableExecutor((ShowStatement) sqlStatement));
}
return Optional.empty();
} | @Test
void assertCreateWithSelectNonPgCatalog() {
SelectStatementContext selectStatementContext = mock(SelectStatementContext.class);
when(selectStatementContext.getSqlStatement()).thenReturn(new PostgreSQLSelectStatement());
assertThat(new PostgreSQLAdminExecutorCreator().create(selectStatementContext, "select 1", "", Collections.emptyList()), is(Optional.empty()));
} |
public void resolveFields(SearchContext searchContext, String indexMapping) throws StarRocksConnectorException {
JSONObject jsonObject = new JSONObject(indexMapping);
// the indexName use alias takes the first mapping
Iterator<String> keys = jsonObject.keys();
String docKey = keys.next();
JSONObject docData = jsonObject.optJSONObject(docKey);
JSONObject mappings = docData.optJSONObject("mappings");
JSONObject rootSchema = mappings.optJSONObject(searchContext.type());
JSONObject properties;
// Elasticsearch 7.x, type was removed from ES mapping, default type is `_doc`
// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/removal-of-types.html
// From Elasticsearch 8.x, Specifying types in requests is no longer supported,
// The include_type_name parameter is removed
// https://www.elastic.co/guide/en/elasticsearch/reference/7.17/removal-of-types.html
if (rootSchema == null) {
// 1. before 7.0, if the `type` does not exist in index, rootSchema is null
// this can throw exception within the `properties == null` predicate
// 2. after or equal 8.x, type is removed from mappings
properties = mappings.optJSONObject("properties");
} else {
properties = rootSchema.optJSONObject("properties");
}
if (properties == null) {
throw new StarRocksConnectorException("index[" + searchContext.sourceIndex() + "] type[" + searchContext.type() +
"] mapping not found for the ES Cluster");
}
for (Column col : searchContext.columns()) {
String colName = col.getName();
// if column exists in StarRocks Table but no found in ES's mapping, we choose to ignore this situation?
if (!properties.has(colName)) {
continue;
}
JSONObject fieldObject = properties.optJSONObject(colName);
resolveKeywordFields(searchContext, fieldObject, colName);
resolveDocValuesFields(searchContext, fieldObject, colName);
}
} | @Test
public void testExtractFieldsNormal() throws Exception {
MappingPhase mappingPhase = new MappingPhase(null);
// ES version < 7.0
EsTable esTableBefore7X = fakeEsTable("fake", "test", "doc", columns);
SearchContext searchContext = new SearchContext(esTableBefore7X);
mappingPhase.resolveFields(searchContext, loadJsonFromFile("data/es/test_index_mapping.json"));
assertEquals("k3.keyword", searchContext.fetchFieldsContext().get("k3"));
assertEquals("k3.keyword", searchContext.docValueFieldsContext().get("k3"));
assertEquals("k1", searchContext.docValueFieldsContext().get("k1"));
assertEquals("k2", searchContext.docValueFieldsContext().get("k2"));
// ES version >= 7.0
EsTable esTableAfter7X = fakeEsTable("fake", "test", "_doc", columns);
SearchContext searchContext1 = new SearchContext(esTableAfter7X);
mappingPhase.resolveFields(searchContext1, loadJsonFromFile("data/es/test_index_mapping_after_7x.json"));
assertEquals("k3.keyword", searchContext1.fetchFieldsContext().get("k3"));
assertEquals("k3.keyword", searchContext1.docValueFieldsContext().get("k3"));
assertEquals("k1", searchContext1.docValueFieldsContext().get("k1"));
assertEquals("k2", searchContext1.docValueFieldsContext().get("k2"));
} |
@NonNull
@Override
public FileName toProviderFileName( @NonNull ConnectionFileName pvfsFileName, @NonNull T details )
throws KettleException {
StringBuilder providerUriBuilder = new StringBuilder();
appendProviderUriConnectionRoot( providerUriBuilder, details );
// Examples:
// providerUriBuilder: "hcp://domain.my:443/root/path" | "local:///C:/root/path" | "s3://"
// getPath(): "/folder/sub-folder" | "/"
appendProviderUriRestPath( providerUriBuilder, pvfsFileName.getPath(), details );
// Examples: "hcp://domain.my:443/root/path/folder/sub-folder" | "s3://folder/sub-folder"
// Preserve file type information.
if ( pvfsFileName.getType().hasChildren() ) {
providerUriBuilder.append( SEPARATOR );
}
return parseUri( providerUriBuilder.toString() );
} | @Test
public void testToProviderFileNameHandlesConnectionsWithDomainAndRootPath() throws Exception {
mockDetailsWithDomain( details1, "my-domain:8080" );
mockDetailsWithRootPath( details1, "my/root/path" );
ConnectionFileName pvfsFileName = mockPvfsFileNameWithPath( "/rest/path" );
FileName providerFileName = transformer.toProviderFileName( pvfsFileName, details1 );
assertEquals( "scheme1://my-domain:8080/my/root/path/rest/path", providerFileName.getURI() );
// Should do provider uri normalization.
verify( kettleVFS, times( 1 ) ).resolveURI( any() );
} |
public String namespace(Namespace ns) {
return SLASH.join("v1", prefix, "namespaces", RESTUtil.encodeNamespace(ns));
} | @Test
public void testNamespaceWithSlash() {
Namespace ns = Namespace.of("n/s");
assertThat(withPrefix.namespace(ns)).isEqualTo("v1/ws/catalog/namespaces/n%2Fs");
assertThat(withoutPrefix.namespace(ns)).isEqualTo("v1/namespaces/n%2Fs");
} |
List<Condition> run(boolean useKRaft) {
List<Condition> warnings = new ArrayList<>();
checkKafkaReplicationConfig(warnings);
checkKafkaBrokersStorage(warnings);
if (useKRaft) {
// Additional checks done for KRaft clusters
checkKRaftControllerStorage(warnings);
checkKRaftControllerCount(warnings);
checkKafkaMetadataVersion(warnings);
checkInterBrokerProtocolVersionInKRaft(warnings);
checkLogMessageFormatVersionInKRaft(warnings);
} else {
// Additional checks done for ZooKeeper-based clusters
checkKafkaLogMessageFormatVersion(warnings);
checkKafkaInterBrokerProtocolVersion(warnings);
checkKRaftMetadataStorageConfiguredForZooBasedCLuster(warnings);
}
return warnings;
} | @Test
public void checkKafkaJbodEphemeralStorageSingleBroker() {
Kafka kafka = new KafkaBuilder(KAFKA)
.editSpec()
.editKafka()
.withConfig(Map.of(
// We want to avoid unrelated warnings
KafkaConfiguration.DEFAULT_REPLICATION_FACTOR, 1,
KafkaConfiguration.MIN_INSYNC_REPLICAS, 1
))
.endKafka()
.endSpec()
.build();
KafkaNodePool singleNode = new KafkaNodePoolBuilder(POOL_A)
.editSpec()
.withReplicas(1)
.withStorage(
new JbodStorageBuilder().withVolumes(
new EphemeralStorageBuilder().withId(1).build(),
new EphemeralStorageBuilder().withId(2).build()
).build())
.endSpec()
.build();
KafkaSpecChecker checker = generateChecker(kafka, List.of(CONTROLLERS, singleNode), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE);
List<Condition> warnings = checker.run(true);
assertThat(warnings, hasSize(1));
Condition warning = warnings.get(0);
assertThat(warning.getReason(), is("KafkaStorage"));
assertThat(warning.getStatus(), is("True"));
assertThat(warning.getMessage(), is("A Kafka cluster with a single broker node and ephemeral storage will lose topic messages after any restart or rolling update."));
} |
public int add(Object o) {
HollowTypeMapper typeMapper = getTypeMapper(o.getClass(), null, null);
return typeMapper.write(o);
} | @Test
public void testIntPreassignedOrdinal() {
HollowObjectMapper mapper = new HollowObjectMapper(writeStateEngine);
TypeWithIntAssignedOrdinal o = new TypeWithIntAssignedOrdinal();
o.__assigned_ordinal = 1;
mapper.add(o);
// int fields are ignored
Assert.assertEquals(1, o.__assigned_ordinal);
} |
@Override
public void trace(String msg) {
logger.trace(msg);
} | @Test
void testMarkerTrace() {
jobRunrDashboardLogger.trace(marker, "trace");
verify(slfLogger).trace(marker, "trace");
} |
public String publish(TopicName topic, Map<String, String> attributes, ByteString data)
throws PubsubResourceManagerException {
checkIsUsable();
if (!createdTopics.contains(topic)) {
throw new IllegalArgumentException(
"Can not publish to a topic not managed by this instance.");
}
LOG.info("Publishing message with {} bytes to topic '{}'", data.size(), topic);
PubsubMessage pubsubMessage =
PubsubMessage.newBuilder().putAllAttributes(attributes).setData(data).build();
try {
Publisher publisher = publisherFactory.createPublisher(topic);
String messageId = publisher.publish(pubsubMessage).get();
LOG.info("Message published with id '{}'", messageId);
publisher.shutdown();
return messageId;
} catch (Exception e) {
throw new PubsubResourceManagerException("Error publishing message to Pubsub", e);
}
} | @Test
public void testPublishMessageUnmanagedTopicShouldFail() {
Map<String, String> attributes = ImmutableMap.of("key1", "value1");
ByteString data = ByteString.copyFromUtf8("valid message");
IllegalArgumentException exception =
assertThrows(
IllegalArgumentException.class,
() -> testManager.publish(TOPIC_REFERENCE, attributes, data));
assertThat(exception).hasMessageThat().contains("topic not managed");
} |
public void sendCouponNewsletter() {
try {
// Retrieve the list of contacts from the "weekly-coupons-newsletter" contact
// list
// snippet-start:[sesv2.java2.newsletter.ListContacts]
ListContactsRequest contactListRequest = ListContactsRequest.builder()
.contactListName(CONTACT_LIST_NAME)
.build();
List<String> contactEmails;
try {
ListContactsResponse contactListResponse = sesClient.listContacts(contactListRequest);
contactEmails = contactListResponse.contacts().stream()
.map(Contact::emailAddress)
.toList();
} catch (Exception e) {
// TODO: Remove when listContacts's GET body issue is resolved.
contactEmails = this.contacts;
}
// snippet-end:[sesv2.java2.newsletter.ListContacts]
// Send an email using the "weekly-coupons" template to each contact in the list
// snippet-start:[sesv2.java2.newsletter.SendEmail.template]
String coupons = Files.readString(Paths.get("resources/coupon_newsletter/sample_coupons.json"));
for (String emailAddress : contactEmails) {
SendEmailRequest newsletterRequest = SendEmailRequest.builder()
.destination(Destination.builder().toAddresses(emailAddress).build())
.content(EmailContent.builder()
.template(Template.builder()
.templateName(TEMPLATE_NAME)
.templateData(coupons)
.build())
.build())
.fromEmailAddress(this.verifiedEmail)
.listManagementOptions(ListManagementOptions.builder()
.contactListName(CONTACT_LIST_NAME)
.build())
.build();
SendEmailResponse newsletterResponse = sesClient.sendEmail(newsletterRequest);
System.out.println("Newsletter sent to " + emailAddress + ": " + newsletterResponse.messageId());
}
// snippet-end:[sesv2.java2.newsletter.SendEmail.template]
} catch (NotFoundException e) {
// If the contact list does not exist, fail the workflow and inform the user
System.err.println("The contact list is missing. Please create the contact list and try again.");
} catch (AccountSuspendedException e) {
// If the account is suspended, fail the workflow and inform the user
System.err.println("Your account is suspended. Please resolve the issue and try again.");
} catch (MailFromDomainNotVerifiedException e) {
// If the sending domain is not verified, fail the workflow and inform the user
System.err.println("The sending domain is not verified. Please verify your domain and try again.");
throw e;
} catch (MessageRejectedException e) {
// If the message is rejected due to invalid content, fail the workflow and
// inform the user
System.err.println("The message content is invalid. Please check your template and try again.");
throw e;
} catch (SendingPausedException e) {
// If sending is paused, fail the workflow and inform the user
System.err.println("Sending is currently paused for your account. Please resolve the issue and try again.");
throw e;
} catch (Exception e) {
System.err.println("Error occurred while sending the newsletter: " + e.getMessage());
e.printStackTrace();
}
} | @Test
public void test_sendCouponNewsletter_error_messageRejected() {
// Mock the necessary AWS SDK calls and responses
CreateEmailTemplateResponse templateResponse = CreateEmailTemplateResponse.builder().build();
when(sesClient.createEmailTemplate(any(CreateEmailTemplateRequest.class))).thenReturn(templateResponse);
ListContactsResponse contactListResponse = ListContactsResponse.builder()
.contacts(Contact.builder().emailAddress("user@example.com").build())
.build();
when(sesClient.listContacts(any(ListContactsRequest.class))).thenReturn(
contactListResponse);
when(sesClient.sendEmail(any(SendEmailRequest.class))).thenThrow(
MessageRejectedException.class);
try {
scenario.sendCouponNewsletter();
} catch (Exception e) {
}
String errorOutput = errContent.toString();
assertThat(errorOutput,
containsString("The message content is invalid. Please check your template and try again."));
} |
public static void loggedMute(CheckedRunnable runnable) {
try {
runnable.run();
} catch (Exception e) {
e.printStackTrace();
}
} | @Test
void loggedMuteShouldLogExceptionTraceBeforeSwallowingIt() {
var stream = new ByteArrayOutputStream();
System.setErr(new PrintStream(stream));
Mute.loggedMute(this::methodThrowingException);
assertTrue(stream.toString().contains(MESSAGE));
} |
public static RecordBatchingStateRestoreCallback adapt(final StateRestoreCallback restoreCallback) {
Objects.requireNonNull(restoreCallback, "stateRestoreCallback must not be null");
if (restoreCallback instanceof RecordBatchingStateRestoreCallback) {
return (RecordBatchingStateRestoreCallback) restoreCallback;
} else if (restoreCallback instanceof BatchingStateRestoreCallback) {
return records -> {
final List<KeyValue<byte[], byte[]>> keyValues = new ArrayList<>();
for (final ConsumerRecord<byte[], byte[]> record : records) {
keyValues.add(new KeyValue<>(record.key(), record.value()));
}
((BatchingStateRestoreCallback) restoreCallback).restoreAll(keyValues);
};
} else {
return records -> {
for (final ConsumerRecord<byte[], byte[]> record : records) {
restoreCallback.restore(record.key(), record.value());
}
};
}
} | @Test
public void shouldPassRecordsThrough() {
final ArrayList<ConsumerRecord<byte[], byte[]>> actual = new ArrayList<>();
final RecordBatchingStateRestoreCallback callback = actual::addAll;
final RecordBatchingStateRestoreCallback adapted = adapt(callback);
final byte[] key1 = {1};
final byte[] value1 = {2};
final byte[] key2 = {3};
final byte[] value2 = {4};
final List<ConsumerRecord<byte[], byte[]>> recordList = asList(
new ConsumerRecord<>("topic1", 0, 0L, key1, value1),
new ConsumerRecord<>("topic2", 1, 1L, key2, value2)
);
adapted.restoreBatch(recordList);
validate(actual, recordList);
} |
@Override
public RFuture<Boolean> removeAsync(Object o) {
String name = getRawName(o);
return commandExecutor.writeAsync(name, codec, RedisCommands.SREM_SINGLE, name, encode(o));
} | @Test
public void testRemoveAsync() throws InterruptedException, ExecutionException {
RSet<Integer> set = redisson.getSet("simple");
set.add(1);
set.add(3);
set.add(7);
Assertions.assertTrue(set.removeAsync(1).get());
Assertions.assertFalse(set.contains(1));
assertThat(set).containsOnly(3, 7);
Assertions.assertFalse(set.removeAsync(1).get());
assertThat(set).containsOnly(3, 7);
set.removeAsync(3).get();
Assertions.assertFalse(set.contains(3));
assertThat(set).contains(7);
} |
@Override
public final long readLong() throws EOFException {
final long l = readLong(pos);
pos += LONG_SIZE_IN_BYTES;
return l;
} | @Test
public void testReadLongForPositionByteOrder() throws Exception {
long readLong1 = in.readLong(0, LITTLE_ENDIAN);
long readLong2 = in.readLong(2, BIG_ENDIAN);
long longB1 = Bits.readLong(INIT_DATA, 0, false);
long longB2 = Bits.readLong(INIT_DATA, 2, true);
assertEquals(longB1, readLong1);
assertEquals(longB2, readLong2);
} |
@Override
public void closeRewardActivity(Long id) {
// 校验存在
RewardActivityDO dbRewardActivity = validateRewardActivityExists(id);
if (dbRewardActivity.getStatus().equals(PromotionActivityStatusEnum.CLOSE.getStatus())) { // 已关闭的活动,不能关闭噢
throw exception(REWARD_ACTIVITY_CLOSE_FAIL_STATUS_CLOSED);
}
if (dbRewardActivity.getStatus().equals(PromotionActivityStatusEnum.END.getStatus())) { // 已关闭的活动,不能关闭噢
throw exception(REWARD_ACTIVITY_CLOSE_FAIL_STATUS_END);
}
// 更新
RewardActivityDO updateObj = new RewardActivityDO().setId(id).setStatus(PromotionActivityStatusEnum.CLOSE.getStatus());
rewardActivityMapper.updateById(updateObj);
} | @Test
public void testCloseRewardActivity() {
// mock 数据
RewardActivityDO dbRewardActivity = randomPojo(RewardActivityDO.class, o -> o.setStatus(PromotionActivityStatusEnum.WAIT.getStatus()));
rewardActivityMapper.insert(dbRewardActivity);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbRewardActivity.getId();
// 调用
rewardActivityService.closeRewardActivity(id);
// 校验状态
RewardActivityDO rewardActivity = rewardActivityMapper.selectById(id);
assertEquals(rewardActivity.getStatus(), PromotionActivityStatusEnum.CLOSE.getStatus());
} |
@Override
public boolean trySetComparator(Comparator<? super V> comparator) {
String className = comparator.getClass().getName();
String comparatorSign = className + ":" + calcClassSign(className);
Boolean res = get(commandExecutor.writeAsync(getRawName(), StringCodec.INSTANCE, RedisCommands.SETNX, getComparatorKeyName(), comparatorSign));
if (res) {
this.comparator = comparator;
}
return res;
} | @Test
public void testTrySetComparator() {
RPriorityQueue<Integer> set = redisson.getPriorityQueue("set");
boolean setRes = set.trySetComparator(Collections.reverseOrder());
Assertions.assertTrue(setRes);
Assertions.assertTrue(set.add(1));
Assertions.assertTrue(set.add(2));
Assertions.assertTrue(set.add(3));
Assertions.assertTrue(set.add(4));
Assertions.assertTrue(set.add(5));
assertThat(set).containsExactly(5, 4, 3, 2, 1);
boolean setRes2 = set.trySetComparator(Collections.reverseOrder(Collections.reverseOrder()));
Assertions.assertFalse(setRes2);
assertThat(set).containsExactly(5, 4, 3, 2, 1);
set.clear();
boolean setRes3 = set.trySetComparator(Collections.reverseOrder(Collections.reverseOrder()));
Assertions.assertTrue(setRes3);
set.add(3);
set.add(1);
set.add(2);
assertThat(set).containsExactly(1, 2, 3);
} |
public void initialize() {
if ((!(loggerFactory instanceof LoggerContext))) {
System.err.println("Unable to initialize logback. It seems that slf4j is bound to an unexpected backend " + loggerFactory.getClass().getName());
return;
}
File logbackFile = new File(configDir, childLogbackConfigFile);
if (logbackFile.exists()) {
System.err.println("Using logback configuration from file " + logbackFile);
configureWith(logbackFile);
} else {
System.err.println("Could not find file `" + logbackFile + "'. Attempting to load from classpath.");
String resourcePath = "config/" + childLogbackConfigFile;
URL resource = getClass().getClassLoader().getResource(resourcePath);
if (resource == null) {
System.err.println("Could not find classpath resource `" + resourcePath + "'. Falling back to using a default logback configuration that writes to stdout.");
configureDefaultLogging();
} else {
System.err.println("Using classpath resource `" + resource + "'.");
configureWith(resource);
}
}
} | @Test
public void shouldUseDefaultConfigFromClasspathIfUserSpecifiedConfigFileIsNotFound() {
final URL[] initializeFromPropertyResource = {null};
LogConfigurator logConfigurator = new LogConfigurator("xxx", "logging-test-logback.xml") {
@Override
protected void configureWith(URL resource) {
initializeFromPropertyResource[0] = resource;
}
};
logConfigurator.initialize();
URL expectedResource = getClass().getClassLoader().getResource("config/logging-test-logback.xml");
assertThat(initializeFromPropertyResource[0], equalTo(expectedResource));
assertThat(stderr.toString(), containsString("Using classpath resource `" + expectedResource + "'"));
assertThat(stdout.toString(), is(""));
} |
public UsernamePasswordAuthenticationToken getAuthentication(final String token) {
final Jws<Claims> claimsJws = Jwts.parser()
.verifyWith(tokenConfigurationParameter.getPublicKey())
.build()
.parseSignedClaims(token);
final JwsHeader jwsHeader = claimsJws.getHeader();
final Claims payload = claimsJws.getPayload();
final Jwt jwt = new org.springframework.security.oauth2.jwt.Jwt(
token,
payload.getIssuedAt().toInstant(),
payload.getExpiration().toInstant(),
Map.of(
TokenClaims.TYP.getValue(), jwsHeader.getType(),
TokenClaims.ALGORITHM.getValue(), jwsHeader.getAlgorithm()
),
payload
);
final UserType userType = UserType.valueOf(payload.get(TokenClaims.USER_TYPE.getValue()).toString());
final List<SimpleGrantedAuthority> authorities = new ArrayList<>();
authorities.add(new SimpleGrantedAuthority(userType.name()));
return UsernamePasswordAuthenticationToken
.authenticated(jwt, null, authorities);
} | @Test
void givenToken_whenGetAuthentication_thenReturnAuthentication() {
// Given
String token = Jwts.builder()
.claim(TokenClaims.USER_ID.getValue(), "12345")
.claim(TokenClaims.USER_TYPE.getValue(), "ADMIN")
.issuedAt(new Date())
.expiration(new Date(System.currentTimeMillis() + 86400000L)) // 1 day expiration
.setHeaderParam(Header.TYPE, "JWT") // Add type information
.signWith(keyPair.getPrivate())
.compact();
final Jws<Claims> claimsJws = Jwts.parser()
.verifyWith(keyPair.getPublic())
.build()
.parseSignedClaims(token);
final JwsHeader jwsHeader = claimsJws.getHeader();
final Claims payload = claimsJws.getBody();
// Handle potential null values for jwsHeader
String tokenType = jwsHeader.getType() != null ? jwsHeader.getType() : "";
String algorithm = jwsHeader.getAlgorithm() != null ? jwsHeader.getAlgorithm() : "";
// Verify the created Jwt object
final org.springframework.security.oauth2.jwt.Jwt jwt = new org.springframework.security.oauth2.jwt.Jwt(
token,
payload.getIssuedAt().toInstant(),
payload.getExpiration().toInstant(),
Map.of(
TokenClaims.TYP.getValue(), tokenType,
TokenClaims.ALGORITHM.getValue(), algorithm
),
payload
);
final UserType userType = UserType.valueOf(payload.get(TokenClaims.USER_TYPE.getValue()).toString());
final List<SimpleGrantedAuthority> authorities = new ArrayList<>();
authorities.add(new SimpleGrantedAuthority(userType.name()));
// When
UsernamePasswordAuthenticationToken authentication = tokenService.getAuthentication(token);
// Then
assertThat(authentication).isNotNull();
assertThat(authentication.getAuthorities()).containsExactly(new SimpleGrantedAuthority("ADMIN"));
assertThat(authentication.getPrincipal()).isEqualTo(jwt);
} |
public ReliableTopicConfig setReadBatchSize(int readBatchSize) {
this.readBatchSize = checkPositive("readBatchSize", readBatchSize);
return this;
} | @Test(expected = IllegalArgumentException.class)
public void setReadBatchSize_whenNegative() {
ReliableTopicConfig config = new ReliableTopicConfig("foo");
config.setReadBatchSize(-1);
} |
public PutMessageResult putMessageToSpecificQueue(MessageExtBrokerInner messageExt) {
BrokerController masterBroker = this.brokerController.peekMasterBroker();
if (masterBroker != null) {
return masterBroker.getMessageStore().putMessage(messageExt);
} else if (this.brokerController.getBrokerConfig().isEnableSlaveActingMaster()
&& this.brokerController.getBrokerConfig().isEnableRemoteEscape()) {
try {
messageExt.setWaitStoreMsgOK(false);
final TopicPublishInfo topicPublishInfo = this.brokerController.getTopicRouteInfoManager().tryToFindTopicPublishInfo(messageExt.getTopic());
List<MessageQueue> mqs = topicPublishInfo.getMessageQueueList();
if (null == mqs || mqs.isEmpty()) {
return new PutMessageResult(PutMessageStatus.PUT_TO_REMOTE_BROKER_FAIL, null, true);
}
String id = messageExt.getTopic() + messageExt.getStoreHost();
final int index = Math.floorMod(id.hashCode(), mqs.size());
MessageQueue mq = mqs.get(index);
messageExt.setQueueId(mq.getQueueId());
String brokerNameToSend = mq.getBrokerName();
String brokerAddrToSend = this.brokerController.getTopicRouteInfoManager().findBrokerAddressInPublish(brokerNameToSend);
final SendResult sendResult = this.brokerController.getBrokerOuterAPI().sendMessageToSpecificBroker(
brokerAddrToSend, brokerNameToSend,
messageExt, this.getProducerGroup(messageExt), SEND_TIMEOUT);
return transformSendResult2PutResult(sendResult);
} catch (Exception e) {
LOG.error("sendMessageInFailover to remote failed", e);
return new PutMessageResult(PutMessageStatus.PUT_TO_REMOTE_BROKER_FAIL, null, true);
}
} else {
LOG.warn("Put message to specific queue failed, enableSlaveActingMaster={}, enableRemoteEscape={}.",
this.brokerController.getBrokerConfig().isEnableSlaveActingMaster(), this.brokerController.getBrokerConfig().isEnableRemoteEscape());
return new PutMessageResult(PutMessageStatus.SERVICE_NOT_AVAILABLE, null);
}
} | @Test
public void putMessageToSpecificQueueTest() {
// masterBroker is null
final PutMessageResult result1 = escapeBridge.putMessageToSpecificQueue(messageExtBrokerInner);
assert result1 != null;
assert PutMessageStatus.PUT_TO_REMOTE_BROKER_FAIL.equals(result1.getPutMessageStatus());
// masterBroker is not null
when(brokerController.peekMasterBroker()).thenReturn(brokerController);
Assertions.assertThatCode(() -> escapeBridge.putMessageToSpecificQueue(messageExtBrokerInner)).doesNotThrowAnyException();
} |
@Override
public T add(K name, V value) {
throw new UnsupportedOperationException("read only");
} | @Test
public void testAddStringValuesIterable() {
assertThrows(UnsupportedOperationException.class, new Executable() {
@Override
public void execute() {
HEADERS.add("name", Arrays.asList("value1", "value2"));
}
});
} |
@VisibleForTesting
protected void copyFromHost(MapHost host) throws IOException {
// reset retryStartTime for a new host
retryStartTime = 0;
// Get completed maps on 'host'
List<TaskAttemptID> maps = scheduler.getMapsForHost(host);
// Sanity check to catch hosts with only 'OBSOLETE' maps,
// especially at the tail of large jobs
if (maps.size() == 0) {
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Fetcher " + id + " going to fetch from " + host + " for: " + maps);
}
// List of maps to be fetched yet
Set<TaskAttemptID> remaining = new HashSet<TaskAttemptID>(maps);
// Construct the url and connect
URL url = getMapOutputURL(host, maps);
DataInputStream input = null;
try {
input = openShuffleUrl(host, remaining, url);
if (input == null) {
return;
}
// Loop through available map-outputs and fetch them
// On any error, faildTasks is not null and we exit
// after putting back the remaining maps to the
// yet_to_be_fetched list and marking the failed tasks.
TaskAttemptID[] failedTasks = null;
while (!remaining.isEmpty() && failedTasks == null) {
try {
failedTasks = copyMapOutput(host, input, remaining, fetchRetryEnabled);
} catch (IOException e) {
IOUtils.cleanupWithLogger(LOG, input);
//
// Setup connection again if disconnected by NM
connection.disconnect();
// Get map output from remaining tasks only.
url = getMapOutputURL(host, remaining);
input = openShuffleUrl(host, remaining, url);
if (input == null) {
return;
}
}
}
if(failedTasks != null && failedTasks.length > 0) {
LOG.warn("copyMapOutput failed for tasks "+Arrays.toString(failedTasks));
scheduler.hostFailed(host.getHostName());
for(TaskAttemptID left: failedTasks) {
scheduler.copyFailed(left, host, true, false);
}
}
// Sanity check
if (failedTasks == null && !remaining.isEmpty()) {
throw new IOException("server didn't return all expected map outputs: "
+ remaining.size() + " left.");
}
input.close();
input = null;
} finally {
if (input != null) {
IOUtils.cleanupWithLogger(LOG, input);
input = null;
}
for (TaskAttemptID left : remaining) {
scheduler.putBackKnownMapOutput(host, left);
}
}
} | @Test
public void testCopyFromHostExtraBytes() throws Exception {
Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(job, id, ss, mm,
r, metrics, except, key, connection);
String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
when(connection.getHeaderField(
SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH)).thenReturn(replyHash);
ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 14, 10, 1);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(bout);
IFileOutputStream ios = new IFileOutputStream(dos);
header.write(dos);
ios.write("MAPDATA123".getBytes());
ios.finish();
ShuffleHeader header2 = new ShuffleHeader(map2ID.toString(), 14, 10, 1);
IFileOutputStream ios2 = new IFileOutputStream(dos);
header2.write(dos);
ios2.write("MAPDATA456".getBytes());
ios2.finish();
ByteArrayInputStream in = new ByteArrayInputStream(bout.toByteArray());
when(connection.getInputStream()).thenReturn(in);
// 8 < 10 therefore there appear to be extra bytes in the IFileInputStream
IFileWrappedMapOutput<Text,Text> mapOut = new InMemoryMapOutput<Text, Text>(
job, map1ID, mm, 8, null, true );
IFileWrappedMapOutput<Text,Text> mapOut2 = new InMemoryMapOutput<Text, Text>(
job, map2ID, mm, 10, null, true );
when(mm.reserve(eq(map1ID), anyLong(), anyInt())).thenReturn(mapOut);
when(mm.reserve(eq(map2ID), anyLong(), anyInt())).thenReturn(mapOut2);
underTest.copyFromHost(host);
verify(allErrs).increment(1);
verify(ss).copyFailed(map1ID, host, true, false);
verify(ss, never()).copyFailed(map2ID, host, true, false);
verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map1ID));
verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map2ID));
} |
@Override
public Column convert(BasicTypeDefine typeDefine) {
PhysicalColumn.PhysicalColumnBuilder builder =
PhysicalColumn.builder()
.name(typeDefine.getName())
.sourceType(typeDefine.getColumnType())
.nullable(typeDefine.isNullable())
.defaultValue(typeDefine.getDefaultValue())
.comment(typeDefine.getComment());
String db2Type = typeDefine.getDataType().toUpperCase();
switch (db2Type) {
case DB2_BOOLEAN:
builder.sourceType(DB2_BOOLEAN);
builder.dataType(BasicType.BOOLEAN_TYPE);
break;
case DB2_SMALLINT:
builder.sourceType(DB2_SMALLINT);
builder.dataType(BasicType.SHORT_TYPE);
break;
case DB2_INT:
case DB2_INTEGER:
builder.sourceType(DB2_INT);
builder.dataType(BasicType.INT_TYPE);
break;
case DB2_BIGINT:
builder.sourceType(DB2_BIGINT);
builder.dataType(BasicType.LONG_TYPE);
break;
case DB2_REAL:
builder.sourceType(DB2_REAL);
builder.dataType(BasicType.FLOAT_TYPE);
break;
case DB2_DOUBLE:
builder.sourceType(DB2_DOUBLE);
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case DB2_DECFLOAT:
builder.sourceType(DB2_DECFLOAT);
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case DB2_DECIMAL:
builder.sourceType(
String.format(
"%s(%s,%s)",
DB2_DECIMAL, typeDefine.getPrecision(), typeDefine.getScale()));
builder.dataType(
new DecimalType(
Math.toIntExact(typeDefine.getPrecision()), typeDefine.getScale()));
builder.columnLength(typeDefine.getPrecision());
builder.scale(typeDefine.getScale());
break;
case DB2_CHARACTER:
case DB2_CHAR:
builder.sourceType(String.format("%s(%d)", DB2_CHAR, typeDefine.getLength()));
// For char/varchar this length is in bytes
builder.columnLength(typeDefine.getLength());
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_VARCHAR:
builder.sourceType(String.format("%s(%d)", DB2_VARCHAR, typeDefine.getLength()));
builder.columnLength(typeDefine.getLength());
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_LONG_VARCHAR:
builder.sourceType(DB2_LONG_VARCHAR);
// default length is 32700
builder.columnLength(typeDefine.getLength());
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_CLOB:
builder.sourceType(String.format("%s(%d)", DB2_CLOB, typeDefine.getLength()));
builder.columnLength(typeDefine.getLength());
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_GRAPHIC:
builder.sourceType(String.format("%s(%d)", DB2_GRAPHIC, typeDefine.getLength()));
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_VARGRAPHIC:
builder.sourceType(String.format("%s(%d)", DB2_VARGRAPHIC, typeDefine.getLength()));
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_DBCLOB:
builder.sourceType(String.format("%s(%d)", DB2_DBCLOB, typeDefine.getLength()));
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_XML:
builder.sourceType(DB2_XML);
builder.columnLength((long) Integer.MAX_VALUE);
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_BINARY:
builder.sourceType(String.format("%s(%d)", DB2_BINARY, typeDefine.getLength()));
builder.columnLength(typeDefine.getLength());
builder.dataType(PrimitiveByteArrayType.INSTANCE);
break;
case DB2_VARBINARY:
builder.sourceType(String.format("%s(%d)", DB2_VARBINARY, typeDefine.getLength()));
builder.columnLength(typeDefine.getLength());
builder.dataType(PrimitiveByteArrayType.INSTANCE);
break;
case DB2_BLOB:
builder.sourceType(String.format("%s(%d)", DB2_BLOB, typeDefine.getLength()));
builder.columnLength(typeDefine.getLength());
builder.dataType(PrimitiveByteArrayType.INSTANCE);
break;
case DB2_DATE:
builder.sourceType(DB2_DATE);
builder.dataType(LocalTimeType.LOCAL_DATE_TYPE);
break;
case DB2_TIME:
builder.sourceType(DB2_TIME);
builder.dataType(LocalTimeType.LOCAL_TIME_TYPE);
break;
case DB2_TIMESTAMP:
builder.sourceType(String.format("%s(%d)", DB2_TIMESTAMP, typeDefine.getScale()));
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
default:
throw CommonError.convertToSeaTunnelTypeError(
DatabaseIdentifier.DB_2, db2Type, typeDefine.getName());
}
return builder.build();
} | @Test
public void testConvertTime() {
BasicTypeDefine<Object> typeDefine =
BasicTypeDefine.builder().name("test").columnType("TIME").dataType("TIME").build();
Column column = DB2TypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(LocalTimeType.LOCAL_TIME_TYPE, column.getDataType());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
} |
public static boolean isWebdavMethod(String method) {
return method != null && WEBDAV_METHOD_PATTERN.matcher(method).matches();
} | @Test
public void testIsWebdavMethod() {
for (String method : VALID_METHODS) {
Assertions.assertTrue(HttpWebdav.isWebdavMethod(method), method + " is a HttpWebdav method");
}
for (String method : INVALID_METHODS) {
Assertions.assertFalse(HttpWebdav.isWebdavMethod(method), method + " is not a HttpWebdav method");
}
} |
@Override
protected Object createBody() {
if (command instanceof MessageRequest) {
MessageRequest msgRequest = (MessageRequest) command;
byte[] shortMessage = msgRequest.getShortMessage();
if (shortMessage == null || shortMessage.length == 0) {
return null;
}
Alphabet alphabet = Alphabet.parseDataCoding(msgRequest.getDataCoding());
if (SmppUtils.is8Bit(alphabet)) {
return shortMessage;
}
String encoding = ExchangeHelper.getCharsetName(getExchange(), false);
if (ObjectHelper.isEmpty(encoding) || !Charset.isSupported(encoding)) {
encoding = configuration.getEncoding();
}
try {
return new String(shortMessage, encoding);
} catch (UnsupportedEncodingException e) {
LOG.info("Unsupported encoding \"{}\". Using system default encoding.", encoding);
}
return new String(shortMessage);
}
return null;
} | @Test
public void createBodyShouldReturnNullIfTheCommandIsNotAMessageRequest() {
AlertNotification command = new AlertNotification();
message = new SmppMessage(camelContext, command, new SmppConfiguration());
assertNull(message.createBody());
} |
public void validateTopic(Resource topic) {
validateTopic(topic.getName());
} | @Test
public void testValidateTopic() {
assertThrows(GrpcProxyException.class, () -> grpcValidator.validateTopic(""));
assertThrows(GrpcProxyException.class, () -> grpcValidator.validateTopic("rmq_sys_xxxx"));
grpcValidator.validateTopic("topicName");
} |
@Override
public void deleteProjectProperty(DbSession session, String key, String projectUuid, String projectKey,
String projectName, String qualifier) {
// do nothing
} | @Test
public void deleteProjectProperty1() {
underTest.deleteProjectProperty(dbSession, null, null, null, null, null);
assertNoInteraction();
} |
@VisibleForTesting
SocketAddress getTcpServerLocalAddress() {
return tcpChannel.localAddress();
} | @Test(timeout = 10000)
public void testIdle() throws InterruptedException, IOException {
Socket s = new Socket();
try {
s.connect(pm.getTcpServerLocalAddress());
int i = 0;
while (!s.isConnected() && i < RETRY_TIMES) {
++i;
Thread.sleep(SHORT_TIMEOUT_MILLISECONDS);
}
Assert.assertTrue("Failed to connect to the server", s.isConnected()
&& i < RETRY_TIMES);
int b = s.getInputStream().read();
Assert.assertTrue("The server failed to disconnect", b == -1);
} finally {
s.close();
}
} |
@Nonnull
WanPublisherState getWanPublisherState(byte id) {
switch (id) {
case 0:
return WanPublisherState.REPLICATING;
case 1:
return WanPublisherState.PAUSED;
case 2:
return WanPublisherState.STOPPED;
default:
return WanBatchPublisherConfig.DEFAULT_INITIAL_PUBLISHER_STATE;
}
} | @Test
public void testGetWanPublisherState() {
assertEquals(WanPublisherState.REPLICATING, transformer.getWanPublisherState((byte) 0));
assertEquals(WanPublisherState.PAUSED, transformer.getWanPublisherState((byte) 1));
assertEquals(WanPublisherState.STOPPED, transformer.getWanPublisherState((byte) 2));
assertEquals(WanBatchPublisherConfig.DEFAULT_INITIAL_PUBLISHER_STATE, transformer.getWanPublisherState((byte) 3));
} |
@Override
public MapSettings setProperty(String key, String value) {
return (MapSettings) super.setProperty(key, value);
} | @Test
public void setProperty_methods_trims_value() {
Settings underTest = new MapSettings();
Random random = new Random();
String blankBefore = blank(random);
String blankAfter = blank(random);
String key = randomAlphanumeric(3);
String value = randomAlphanumeric(3);
underTest.setProperty(key, blankBefore + value + blankAfter);
assertThat(underTest.getString(key)).isEqualTo(value);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.