focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
private void stop(int numOfServicesStarted, boolean stopOnlyStartedServices) {
// stop in reverse order of start
Exception firstException = null;
List<Service> services = getServices();
for (int i = numOfServicesStarted - 1; i >= 0; i--) {
Service service = services.get(i);
if (LOG.isDebugEnabled()) {
LOG.debug("Stopping service #" + i + ": " + service);
}
STATE state = service.getServiceState();
//depending on the stop police
if (state == STATE.STARTED
|| (!stopOnlyStartedServices && state == STATE.INITED)) {
Exception ex = ServiceOperations.stopQuietly(LOG, service);
if (ex != null && firstException == null) {
firstException = ex;
}
}
}
//after stopping all services, rethrow the first exception raised
if (firstException != null) {
throw ServiceStateException.convert(firstException);
}
} | @Test(timeout = 10000)
public void testAddInitedChildInStart() throws Throwable {
CompositeService parent = new CompositeService("parent");
BreakableService child = new BreakableService();
child.init(new Configuration());
parent.init(new Configuration());
parent.start();
AddSiblingService.addChildToService(parent, child);
assertInState(STATE.INITED, child);
parent.stop();
assertInState(STATE.STOPPED, child);
} |
@Override
public void upgrade() {
if (clusterConfigService.get(MigrationCompleted.class) != null) {
LOG.debug("Migration already completed.");
return;
}
final Set<String> viewIds = new HashSet<>();
final FindIterable<Document> documents = viewsCollection.find();
boolean viewMigrated;
for (final Document view : documents) {
viewMigrated = false;
final Document states = view.get("state", Document.class);
for (Map.Entry<String, Object> obj : states.entrySet()) {
final Document state = (Document) obj.getValue();
if (state.get("widgets") instanceof List) {
@SuppressWarnings("unchecked")
final List<Document> widgets = (List) state.get("widgets");
for (final Document widget : widgets) {
final String type = widget.getString("type");
if (type.equals("aggregation")) {
final Document config = widget.get("config", Document.class);
final Document formatSettings = config.get("formatting_settings", Document.class);
if (formatSettings == null) {
continue;
}
final Object charColorsObj = formatSettings.get("chart_colors");
if (charColorsObj == null) {
continue;
}
viewMigrated = true;
@SuppressWarnings({"unchecked", "rawtypes"})
final Map<String, String> chartColors =
(Map) charColorsObj;
List<Document> chartColorSettings = chartColors.entrySet().stream().map(entry -> {
final Document chartColorFieldSetting = new Document();
chartColorFieldSetting.put("field_name", entry.getKey());
chartColorFieldSetting.put("chart_color", entry.getValue());
return chartColorFieldSetting;
}).collect(Collectors.toList());
formatSettings.put("chart_colors", chartColorSettings);
config.put("formatting_settings", formatSettings);
widget.put("config", config);
}
}
if (viewMigrated) {
state.put("widgets", widgets);
}
}
}
if (viewMigrated) {
viewsCollection.updateOne(new BasicDBObject("_id", view.getObjectId("_id")), new Document("$set", view));
final String viewId = view.getObjectId("_id").toString();
viewIds.add(viewId);
}
}
LOG.info("Migration completed. {} views where migrated.", viewIds.size());
clusterConfigService.write(V20190127111728_MigrateWidgetFormatSettings.MigrationCompleted.create(
viewIds.size(), viewIds));
} | @Test
@MongoDBFixtures("V20190127111728_MigrateWidgetFormatSettings.json")
public void testMigrationWithOneChartColorMapping() {
final BasicDBObject dbQuery1 = new BasicDBObject();
dbQuery1.put("_id", new ObjectId("5e2ee372b22d7970576b2eb3"));
final MongoCollection<Document> collection = mongoDB.mongoConnection()
.getMongoDatabase()
.getCollection("views");
migration.upgrade();
final FindIterable<Document> views = collection.find(dbQuery1);
final Document view1 = views.first();
@SuppressWarnings("unchecked")
final List<Document> widgets1 = (List) view1.get("state", Document.class).get("2c67cc0f-c62e-47c1-8b70-e3198925e6bc", Document.class).get("widgets");
assertThat(widgets1.size()).isEqualTo(2);
Set<Document> aggregationWidgets =widgets1.stream().filter(w -> w.getString("type")
.equals("aggregation")).collect(Collectors.toSet());
assertThat(aggregationWidgets.size()).isEqualTo(1);
final Document aggregationWidget = aggregationWidgets.iterator().next();
final Document config = aggregationWidget.get("config", Document.class);
final Document formattingSettings = config.get("formatting_settings", Document.class);
@SuppressWarnings("unchecked")
final List<Document> chartColors = (List) formattingSettings.get("chart_colors", List.class);
assertThat(chartColors.size()).isEqualTo(1);
final Document chartColor = chartColors.get(0);
assertThat(chartColor.getString("field_name")).isEqualTo("count()");
assertThat(chartColor.getString("chart_color")).isEqualTo("#e91e63");
} |
boolean shouldReplicateTopic(String topic) {
return (topicFilter.shouldReplicateTopic(topic) || replicationPolicy.isHeartbeatsTopic(topic))
&& !replicationPolicy.isInternalTopic(topic) && !isCycle(topic);
} | @Test
public void testReplicatesHeartbeatsByDefault() {
MirrorSourceConnector connector = new MirrorSourceConnector(new SourceAndTarget("source", "target"),
new DefaultReplicationPolicy(), new DefaultTopicFilter(), new DefaultConfigPropertyFilter());
assertTrue(connector.shouldReplicateTopic("heartbeats"), "should replicate heartbeats");
assertTrue(connector.shouldReplicateTopic("us-west.heartbeats"), "should replicate upstream heartbeats");
} |
public static AuditActor system(@Nonnull NodeId nodeId) {
return new AutoValue_AuditActor(URN_GRAYLOG_NODE + requireNonNull(nodeId, "nodeId must not be null").getNodeId());
} | @Test(expected = NullPointerException.class)
public void testNullSystem() {
AuditActor.system(null);
} |
public static Set<Integer> getProjectedIds(Schema schema) {
return ImmutableSet.copyOf(getIdsInternal(schema.asStruct(), true));
} | @Test
public void testGetProjectedIds() {
Schema schema =
new Schema(
Lists.newArrayList(
required(10, "a", Types.IntegerType.get()),
required(11, "A", Types.IntegerType.get()),
required(35, "emptyStruct", Types.StructType.of()),
required(
12,
"someStruct",
Types.StructType.of(
required(13, "b", Types.IntegerType.get()),
required(14, "B", Types.IntegerType.get()),
required(
15,
"anotherStruct",
Types.StructType.of(
required(16, "c", Types.IntegerType.get()),
required(17, "C", Types.IntegerType.get())))))));
Set<Integer> expectedIds = Sets.newHashSet(10, 11, 35, 12, 13, 14, 15, 16, 17);
Set<Integer> actualIds = TypeUtil.getProjectedIds(schema);
assertThat(actualIds).isEqualTo(expectedIds);
} |
public CaseInsensitiveMap() {
this(DEFAULT_INITIAL_CAPACITY);
} | @Test
public void caseInsensitiveMapTest() {
CaseInsensitiveMap<String, String> map = new CaseInsensitiveMap<>();
map.put("aAA", "OK");
assertEquals("OK", map.get("aaa"));
assertEquals("OK", map.get("AAA"));
} |
public static Gson instance() {
return SingletonHolder.INSTANCE;
} | @Test
void rejectsSerializationOfDESCipherProvider() {
final DESCipherProvider dcp = new DESCipherProvider(new TempSystemEnvironment());
try {
final IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () ->
Serialization.instance().toJson(dcp));
assertEquals(format("Refusing to serialize a %s instance and leak security details!", DESCipherProvider.class.getName()), e.getMessage());
} finally {
dcp.removeCachedKey();
}
} |
@Subscribe
public void onVarbitChanged(VarbitChanged varbitChanged)
{
if (varbitChanged.getVarbitId() == Varbits.WINTERTODT_TIMER)
{
int timeToNotify = config.roundNotification();
// Sometimes wt var updates are sent to players even after leaving wt.
// So only notify if in wt or after just having left.
if (timeToNotify > 0 && (isInWintertodt || needRoundNotif))
{
int timeInSeconds = varbitChanged.getValue() * 30 / 50;
int prevTimeInSeconds = previousTimerValue * 30 / 50;
log.debug("Seconds left until round start: {}", timeInSeconds);
if (prevTimeInSeconds > timeToNotify && timeInSeconds <= timeToNotify)
{
notifier.notify("Wintertodt round is about to start");
needRoundNotif = false;
}
}
previousTimerValue = varbitChanged.getValue();
}
} | @Test
public void matchStartingNotification_shouldNotNotify_whenNoneOptionSelected()
{
when(config.roundNotification()).thenReturn(5);
VarbitChanged varbitChanged = new VarbitChanged();
varbitChanged.setVarbitId(Varbits.WINTERTODT_TIMER);
wintertodtPlugin.onVarbitChanged(varbitChanged);
verify(notifier, times(0)).notify("Wintertodt round is about to start");
} |
static WebSocketServerHandshaker getHandshaker(Channel channel) {
return channel.attr(HANDSHAKER_ATTR_KEY).get();
} | @Test
public void testCheckInvalidWebSocketPath() {
HttpRequest httpRequest = new WebSocketRequestBuilder().httpVersion(HTTP_1_1)
.method(HttpMethod.GET)
.uri("/testabc")
.key(HttpHeaderNames.SEC_WEBSOCKET_KEY)
.connection("Upgrade")
.upgrade(HttpHeaderValues.WEBSOCKET)
.version13()
.build();
WebSocketServerProtocolConfig config = WebSocketServerProtocolConfig.newBuilder()
.websocketPath("/test")
.checkStartsWith(true)
.build();
EmbeddedChannel ch = new EmbeddedChannel(
new WebSocketServerProtocolHandler(config),
new HttpRequestDecoder(),
new HttpResponseEncoder(),
new MockOutboundHandler());
ch.writeInbound(httpRequest);
ChannelHandlerContext handshakerCtx = ch.pipeline().context(WebSocketServerProtocolHandshakeHandler.class);
assertNull(WebSocketServerProtocolHandler.getHandshaker(handshakerCtx.channel()));
} |
private static Slice decompressZstd(Slice input, int uncompressedSize)
{
byte[] buffer = new byte[uncompressedSize];
decompress(new ZstdDecompressor(), input, 0, input.length(), buffer, 0);
return wrappedBuffer(buffer);
} | @Test
public void testDecompressZSTD()
throws IOException
{
performTest(ZSTD, 0);
performTest(ZSTD, 1);
performTest(ZSTD, 100);
performTest(ZSTD, 256);
performTest(ZSTD, 512);
performTest(ZSTD, 1024);
} |
@Override
public byte convertToByte(CharSequence value) {
if (value instanceof AsciiString && value.length() == 1) {
return ((AsciiString) value).byteAt(0);
}
return Byte.parseByte(value.toString());
} | @Test
public void testByteFromEmptyAsciiString() {
assertThrows(NumberFormatException.class, new Executable() {
@Override
public void execute() {
converter.convertToByte(AsciiString.EMPTY_STRING);
}
});
} |
public List<Unstructured> load() {
List<Unstructured> unstructuredList = new ArrayList<>();
process((properties, map) -> {
Unstructured unstructured = JsonUtils.mapToObject(map, Unstructured.class);
unstructuredList.add(unstructured);
});
return unstructuredList;
} | @Test
void loadTest() {
Resource[] resources = yamlResources.toArray(Resource[]::new);
YamlUnstructuredLoader yamlUnstructuredLoader = new YamlUnstructuredLoader(resources);
List<Unstructured> unstructuredList = yamlUnstructuredLoader.load();
assertThat(unstructuredList).isNotNull();
assertThat(unstructuredList).hasSize(3);
assertThat(JsonUtils.objectToJson(unstructuredList)).isEqualToIgnoringWhitespace("""
[
{
"apiVersion": "v1alpha1",
"kind": "Fake",
"metadata": {
"name": "test1"
},
"hello": {
"world": "halo"
}
},
{
"apiVersion": "v1alpha1",
"kind": "Fake",
"metadata": {
"name": "test2"
},
"hello": {
"world": "haha"
}
},
{
"apiVersion": "v1alpha1",
"kind": "Fake",
"metadata": {
"name": "test2"
},
"hello": {
"world": "bang"
}
}
]
""");
}
@Test
void loadIgnore() {
InMemoryResource resource = new InMemoryResource(notSpecYaml);
YamlUnstructuredLoader yamlUnstructuredLoader = new YamlUnstructuredLoader(resource);
List<Unstructured> unstructuredList = yamlUnstructuredLoader.load();
assertThat(unstructuredList).isEmpty();
}
} |
@Override
public MethodConfig build() {
MethodConfig methodConfig = new MethodConfig();
super.build(methodConfig);
methodConfig.setArguments(arguments);
methodConfig.setDeprecated(deprecated);
methodConfig.setExecutes(executes);
methodConfig.setName(name);
methodConfig.setOninvoke(oninvoke);
methodConfig.setOninvokeMethod(oninvokeMethod);
methodConfig.setOnreturn(onreturn);
methodConfig.setOnreturnMethod(onreturnMethod);
methodConfig.setOnthrow(onthrow);
methodConfig.setOnthrowMethod(onthrowMethod);
methodConfig.setReturn(isReturn);
methodConfig.setService(service);
methodConfig.setServiceId(serviceId);
methodConfig.setSticky(sticky);
methodConfig.setReliable(reliable);
methodConfig.setStat(stat);
methodConfig.setRetry(retry);
return methodConfig;
} | @Test
void build() {
ArgumentConfig argument = new ArgumentConfig();
MethodBuilder builder = MethodBuilder.newBuilder();
builder.name("name")
.stat(1)
.retry(true)
.reliable(false)
.executes(2)
.deprecated(true)
.sticky(false)
.isReturn(true)
.oninvoke("on-invoke-object")
.oninvokeMethod("on-invoke-method")
.service("service")
.onreturn("on-return-object")
.onreturnMethod("on-return-method")
.serviceId("serviceId")
.onthrow("on-throw-object")
.onthrowMethod("on-throw-method")
.addArgument(argument);
MethodConfig config = builder.build();
MethodConfig config2 = builder.build();
Assertions.assertTrue(config.isRetry());
Assertions.assertFalse(config.isReliable());
Assertions.assertTrue(config.getDeprecated());
Assertions.assertFalse(config.getSticky());
Assertions.assertTrue(config.isReturn());
Assertions.assertEquals(1, config.getStat());
Assertions.assertEquals(2, config.getExecutes());
Assertions.assertEquals("on-invoke-object", config.getOninvoke());
Assertions.assertEquals("on-invoke-method", config.getOninvokeMethod());
Assertions.assertEquals("on-return-object", config.getOnreturn());
Assertions.assertEquals("on-return-method", config.getOnreturnMethod());
Assertions.assertEquals("on-throw-object", config.getOnthrow());
Assertions.assertEquals("on-throw-method", config.getOnthrowMethod());
Assertions.assertEquals("name", config.getName());
Assertions.assertEquals("service", config.getService());
Assertions.assertEquals("serviceId", config.getServiceId());
Assertions.assertNotSame(config, config2);
} |
@Override
public Num calculate(BarSeries series, Position position) {
return series.one();
} | @Test
public void calculateWithTwoPositions() {
MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 110, 100, 95, 105);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series), Trade.sellAt(2, series),
Trade.buyAt(3, series), Trade.sellAt(5, series));
AnalysisCriterion buyAndHold = getCriterion();
assertNumEquals(2, buyAndHold.calculate(series, tradingRecord));
} |
public Filter parseSingleExpression(final String filterExpression, final List<EntityAttribute> attributes) {
if (!filterExpression.contains(FIELD_AND_VALUE_SEPARATOR)) {
throw new IllegalArgumentException(WRONG_FILTER_EXPR_FORMAT_ERROR_MSG);
}
final String[] split = filterExpression.split(FIELD_AND_VALUE_SEPARATOR, 2);
final String fieldPart = split[0];
if (fieldPart == null || fieldPart.isEmpty()) {
throw new IllegalArgumentException(WRONG_FILTER_EXPR_FORMAT_ERROR_MSG);
}
final String valuePart = split[1];
if (valuePart == null || valuePart.isEmpty()) {
throw new IllegalArgumentException(WRONG_FILTER_EXPR_FORMAT_ERROR_MSG);
}
final EntityAttribute attributeMetaData = getAttributeMetaData(attributes, fieldPart);
final SearchQueryField.Type fieldType = attributeMetaData.type();
if (isRangeValueExpression(valuePart, fieldType)) {
if (valuePart.startsWith(RANGE_VALUES_SEPARATOR)) {
return new RangeFilter(attributeMetaData.id(),
null,
extractValue(fieldType, valuePart.substring(RANGE_VALUES_SEPARATOR.length()))
);
} else if (valuePart.endsWith(RANGE_VALUES_SEPARATOR)) {
return new RangeFilter(attributeMetaData.id(),
extractValue(fieldType, valuePart.substring(0, valuePart.length() - RANGE_VALUES_SEPARATOR.length())),
null
);
} else {
final String[] ranges = valuePart.split(RANGE_VALUES_SEPARATOR);
return new RangeFilter(attributeMetaData.id(),
extractValue(fieldType, ranges[0]),
extractValue(fieldType, ranges[1])
);
}
} else {
return new SingleValueFilter(attributeMetaData.id(), extractValue(fieldType, valuePart));
}
} | @Test
void parsesFilterExpressionCorrectlyForStringType() {
assertEquals(new SingleValueFilter("owner", "baldwin"),
toTest.parseSingleExpression("owner:baldwin",
List.of(EntityAttribute.builder()
.id("owner")
.title("Owner")
.filterable(true)
.build())
));
} |
@Override
public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException {
DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook);
defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis()));
try {
defaultMQAdminExt.start();
String group = commandLine.getOptionValue('g').trim();
ConsumerConnection cc = commandLine.hasOption('b')
? defaultMQAdminExt.examineConsumerConnectionInfo(group, commandLine.getOptionValue('b').trim())
: defaultMQAdminExt.examineConsumerConnectionInfo(group);
System.out.printf("%-36s %-22s %-10s %s%n", "#ClientId", "#ClientAddr", "#Language", "#Version");
for (Connection conn : cc.getConnectionSet()) {
System.out.printf("%-36s %-22s %-10s %s%n",
conn.getClientId(),
conn.getClientAddr(),
conn.getLanguage(),
MQVersion.getVersionDesc(conn.getVersion())
);
}
System.out.printf("%nBelow is subscription:\n");
Iterator<Entry<String, SubscriptionData>> it = cc.getSubscriptionTable().entrySet().iterator();
System.out.printf("%-20s %s%n", "#Topic", "#SubExpression");
while (it.hasNext()) {
Entry<String, SubscriptionData> entry = it.next();
SubscriptionData sd = entry.getValue();
System.out.printf("%-20s %s%n",
sd.getTopic(),
sd.getSubString()
);
}
System.out.printf("\n");
System.out.printf("ConsumeType: %s%n", cc.getConsumeType());
System.out.printf("MessageModel: %s%n", cc.getMessageModel());
System.out.printf("ConsumeFromWhere: %s%n", cc.getConsumeFromWhere());
} catch (Exception e) {
throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e);
} finally {
defaultMQAdminExt.shutdown();
}
} | @Test
public void testExecute() throws SubCommandException {
ConsumerConnectionSubCommand cmd = new ConsumerConnectionSubCommand();
Options options = ServerUtil.buildCommandlineOptions(new Options());
String[] subargs = new String[] {"-g default-consumer-group", "-b localhost:" + brokerMocker.listenPort()};
final CommandLine commandLine =
ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs, cmd.buildCommandlineOptions(options), new DefaultParser());
cmd.execute(commandLine, options, null);
} |
@Override
public SnowflakeTableMetadata loadTableMetadata(SnowflakeIdentifier tableIdentifier) {
Preconditions.checkArgument(
tableIdentifier.type() == SnowflakeIdentifier.Type.TABLE,
"loadTableMetadata requires a TABLE identifier, got '%s'",
tableIdentifier);
SnowflakeTableMetadata tableMeta;
try {
final String finalQuery = "SELECT SYSTEM$GET_ICEBERG_TABLE_INFORMATION(?) AS METADATA";
tableMeta =
connectionPool.run(
conn ->
queryHarness.query(
conn,
finalQuery,
TABLE_METADATA_RESULT_SET_HANDLER,
tableIdentifier.toIdentifierString()));
} catch (SQLException e) {
throw snowflakeExceptionToIcebergException(
tableIdentifier,
e,
String.format("Failed to get table metadata for '%s'", tableIdentifier));
} catch (InterruptedException e) {
throw new UncheckedInterruptedException(
e, "Interrupted while getting table metadata for '%s'", tableIdentifier);
}
return tableMeta;
} | @SuppressWarnings("unchecked")
@Test
public void testGetS3TableMetadata() throws SQLException {
when(mockResultSet.next()).thenReturn(true);
when(mockResultSet.getString("METADATA"))
.thenReturn(
"{\"metadataLocation\":\"s3://tab1/metadata/v3.metadata.json\",\"status\":\"success\"}");
SnowflakeTableMetadata actualMetadata =
snowflakeClient.loadTableMetadata(
SnowflakeIdentifier.ofTable("DB_1", "SCHEMA_1", "TABLE_1"));
verify(mockQueryHarness)
.query(
eq(mockConnection),
eq("SELECT SYSTEM$GET_ICEBERG_TABLE_INFORMATION(?) AS METADATA"),
any(JdbcSnowflakeClient.ResultSetParser.class),
eq("DB_1.SCHEMA_1.TABLE_1"));
SnowflakeTableMetadata expectedMetadata =
new SnowflakeTableMetadata(
"s3://tab1/metadata/v3.metadata.json",
"s3://tab1/metadata/v3.metadata.json",
"success",
null);
assertThat(actualMetadata).isEqualTo(expectedMetadata);
} |
@Override
public PageResult<AdminUserDO> getUserPage(UserPageReqVO reqVO) {
return userMapper.selectPage(reqVO, getDeptCondition(reqVO.getDeptId()));
} | @Test
public void testGetUserPage() {
// mock 数据
AdminUserDO dbUser = initGetUserPageData();
// 准备参数
UserPageReqVO reqVO = new UserPageReqVO();
reqVO.setUsername("tu");
reqVO.setMobile("1560");
reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus());
reqVO.setCreateTime(buildBetweenTime(2020, 12, 1, 2020, 12, 24));
reqVO.setDeptId(1L); // 其中,1L 是 2L 的父部门
// mock 方法
List<DeptDO> deptList = newArrayList(randomPojo(DeptDO.class, o -> o.setId(2L)));
when(deptService.getChildDeptList(eq(reqVO.getDeptId()))).thenReturn(deptList);
// 调用
PageResult<AdminUserDO> pageResult = userService.getUserPage(reqVO);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbUser, pageResult.getList().get(0));
} |
public void fillMaxSpeed(Graph graph, EncodingManager em) {
// In DefaultMaxSpeedParser and in OSMMaxSpeedParser we don't have the rural/urban info,
// but now we have and can fill the country-dependent max_speed value where missing.
EnumEncodedValue<UrbanDensity> udEnc = em.getEnumEncodedValue(UrbanDensity.KEY, UrbanDensity.class);
fillMaxSpeed(graph, em, edge -> edge.get(udEnc) != UrbanDensity.RURAL);
} | @Test
public void testRoundabout() {
ReaderWay way = new ReaderWay(0L);
way.setTag("country", Country.CRI);
way.setTag("highway", "primary");
EdgeIteratorState edge = createEdge(way).set(urbanDensity, CITY);
calc.fillMaxSpeed(graph, em);
assertEquals(50, edge.get(maxSpeedEnc), 1);
way = new ReaderWay(0L);
way.setTag("country", Country.CRI);
way.setTag("highway", "primary");
way.setTag("junction", "roundabout");
edge = createEdge(way).set(urbanDensity, CITY);
calc.fillMaxSpeed(graph, em);
assertEquals(30, edge.get(maxSpeedEnc), 1);
} |
@Override
public AuthorizationPluginInfo pluginInfoFor(GoPluginDescriptor descriptor) {
Capabilities capabilities = capabilities(descriptor.id());
PluggableInstanceSettings authConfigSettings = authConfigSettings(descriptor.id());
PluggableInstanceSettings roleSettings = roleSettings(descriptor.id(), capabilities);
Image image = image(descriptor.id());
return new AuthorizationPluginInfo(descriptor, authConfigSettings, roleSettings, image, capabilities);
} | @Test
public void shouldNotHaveRoleSettingsInPluginInfoIfPluginCannotAuthorize() {
GoPluginDescriptor descriptor = GoPluginDescriptor.builder().id("plugin1").build();
Capabilities capabilities = new Capabilities(SupportedAuthType.Password, true, false, false);
when(extension.getCapabilities(descriptor.id())).thenReturn(capabilities);
AuthorizationPluginInfo pluginInfo = new AuthorizationPluginInfoBuilder(extension).pluginInfoFor(descriptor);
assertNull(pluginInfo.getRoleSettings());
} |
@Override
public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException {
final boolean exists = Files.exists(session.toPath(file), LinkOption.NOFOLLOW_LINKS);
if(exists) {
if(Files.isSymbolicLink(session.toPath(file))) {
return true;
}
if(!file.isRoot()) {
try {
if(!StringUtils.equals(session.toPath(file).toFile().getCanonicalFile().getName(), file.getName())) {
return false;
}
}
catch(IOException e) {
log.warn(String.format("Failure obtaining canonical file reference for %s", file));
}
}
}
return exists;
} | @Test
public void testFindNotFound() throws Exception {
final LocalSession session = new LocalSession(new Host(new LocalProtocol(), new LocalProtocol().getDefaultHostname()));
session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback());
session.login(new DisabledLoginCallback(), new DisabledCancelCallback());
assertFalse(new LocalFindFeature(session).find(new Path(UUID.randomUUID().toString(), EnumSet.of(Path.Type.file))));
session.close();
} |
public static String toHexString(byte[] src) {
return toHexString(src, 0, src.length);
} | @Test
public void testToHexString() {
assertThat(toHexString(new byte[] { 0 }), is("0"));
assertThat(toHexString(new byte[] { 1 }), is("1"));
assertThat(toHexString(new byte[] { 0, 0 }), is("0"));
assertThat(toHexString(new byte[] { 1, 0 }), is("100"));
assertThat(toHexString(EmptyArrays.EMPTY_BYTES), is(""));
} |
public Connector createConnector(Props props) {
Connector connector = new Connector(HTTP_PROTOCOL);
connector.setURIEncoding("UTF-8");
connector.setProperty("address", props.value(WEB_HOST.getKey(), "0.0.0.0"));
connector.setProperty("socket.soReuseAddress", "true");
// See Tomcat configuration reference: https://tomcat.apache.org/tomcat-9.0-doc/config/http.html
connector.setProperty("relaxedQueryChars", "\"<>[\\]^`{|}");
connector.setProperty("maxHttpHeaderSize", String.valueOf(MAX_HTTP_HEADER_SIZE_BYTES));
connector.setMaxPostSize(MAX_POST_SIZE);
configurePort(connector, props);
configurePool(props, connector);
configureCompression(connector);
return connector;
} | @Test
public void createConnector_shouldUseHardcodedPropertiesWhereNeeded() {
Props props = getEmptyProps();
Connector connector = tomcatHttpConnectorFactory.createConnector(props);
// General properties
assertThat(connector.getURIEncoding()).isEqualTo("UTF-8");
assertThat(connector.getProperty("socket.soReuseAddress")).isEqualTo("true");
assertThat(connector.getProperty("relaxedQueryChars")).isEqualTo("\"<>[\\]^`{|}");
assertThat(connector.getProperty("maxHttpHeaderSize")).isEqualTo(49152);
assertThat(connector.getMaxPostSize()).isEqualTo(-1);
// Compression properties
assertThat(connector.getProperty("compression")).isEqualTo("on");
assertThat(connector.getProperty("compressionMinSize")).isEqualTo(1024);
assertThat(connector.getProperty("compressibleMimeType")).isEqualTo("text/html,text/xml,text/plain,text/css,application/json,application/javascript,text/javascript");
} |
@Override
public void execute( RunConfiguration runConfiguration, ExecutionConfiguration executionConfiguration,
AbstractMeta meta, VariableSpace variableSpace, Repository repository ) throws KettleException {
DefaultRunConfiguration defaultRunConfiguration = (DefaultRunConfiguration) runConfiguration;
if ( executionConfiguration instanceof TransExecutionConfiguration ) {
configureTransExecution( (TransExecutionConfiguration) executionConfiguration, defaultRunConfiguration,
variableSpace, meta, repository );
}
if ( executionConfiguration instanceof JobExecutionConfiguration ) {
configureJobExecution( (JobExecutionConfiguration) executionConfiguration, defaultRunConfiguration, variableSpace,
meta, repository );
}
variableSpace.setVariable( "engine", null );
variableSpace.setVariable( "engine.remote", null );
variableSpace.setVariable( "engine.scheme", null );
variableSpace.setVariable( "engine.url", null );
} | @Test
public void testExecutePentahoJob() throws Exception {
DefaultRunConfiguration defaultRunConfiguration = new DefaultRunConfiguration();
defaultRunConfiguration.setName( "Default Configuration" );
defaultRunConfiguration.setLocal( false );
defaultRunConfiguration.setPentaho( true );
defaultRunConfiguration.setRemote( false );
JobExecutionConfiguration jobExecutionConfiguration = new JobExecutionConfiguration();
defaultRunConfigurationExecutor
.execute( defaultRunConfiguration, jobExecutionConfiguration, abstractMeta, variableSpace, null );
assertFalse( jobExecutionConfiguration.isExecutingLocally() );
assertFalse( jobExecutionConfiguration.isExecutingRemotely() );
} |
@Override
public <U> ParSeqBasedCompletionStage<Void> thenAcceptBoth(CompletionStage<? extends U> other,
BiConsumer<? super T, ? super U> action)
{
Task<U> that = getOrGenerateTaskFromStage(other);
return nextStageByComposingTask(
Task.par(_task, that).flatMap("thenAcceptBoth", (t, u) -> Task.action(() -> action.accept(t, u))));
} | @Test public void testThenAcceptBoth() throws Exception {
CompletionStage<String> completionStage1 = createTestStage(TESTVALUE1);
CompletionStage<String> completionStage2 = createTestStage(TESTVALUE2);
BiConsumer<String, String> consumer = mock(BiConsumer.class);
finish(completionStage1.thenAcceptBoth(completionStage2, consumer));
verify(consumer).accept(TESTVALUE1, TESTVALUE2);
} |
public void execute() {
if (indexesAreEnabled()) {
stream(indexers)
.forEach(this::indexUninitializedTypes);
}
} | @Test
public void index_if_not_initialized() {
doReturn(false).when(metadataIndex).getInitialized(TYPE_FAKE);
underTest.execute();
verify(indexer).getIndexTypes();
verify(indexer).indexOnStartup(Mockito.eq(ImmutableSet.of(TYPE_FAKE)));
} |
public void remove(K key) {
checkState(
!isClosed,
"Multimap user state is no longer usable because it is closed for %s",
keysStateRequest.getStateKey());
Object keyStructuralValue = mapKeyCoder.structuralValue(key);
pendingAdds.remove(keyStructuralValue);
if (!isCleared) {
pendingRemoves.put(keyStructuralValue, key);
}
} | @Test
public void testRemove() throws Exception {
FakeBeamFnStateClient fakeClient =
new FakeBeamFnStateClient(
ImmutableMap.of(
createMultimapKeyStateKey(),
KV.of(ByteArrayCoder.of(), singletonList(A1)),
createMultimapValueStateKey(A1),
KV.of(StringUtf8Coder.of(), asList("V1", "V2"))));
MultimapUserState<byte[], String> userState =
new MultimapUserState<>(
Caches.noop(),
fakeClient,
"instructionId",
createMultimapKeyStateKey(),
ByteArrayCoder.of(),
StringUtf8Coder.of());
Iterable<String> initValues = userState.get(A1);
userState.put(A1, "V3");
userState.remove(A1);
assertArrayEquals(new String[] {"V1", "V2"}, Iterables.toArray(initValues, String.class));
assertThat(userState.keys(), is(emptyIterable()));
userState.asyncClose();
assertThrows(IllegalStateException.class, () -> userState.remove(A1));
} |
@VisibleForTesting
Integer convertSmsTemplateAuditStatus(int templateStatus) {
switch (templateStatus) {
case 1: return SmsTemplateAuditStatusEnum.CHECKING.getStatus();
case 0: return SmsTemplateAuditStatusEnum.SUCCESS.getStatus();
case -1: return SmsTemplateAuditStatusEnum.FAIL.getStatus();
default: throw new IllegalArgumentException(String.format("未知审核状态(%d)", templateStatus));
}
} | @Test
public void testConvertSmsTemplateAuditStatus() {
assertEquals(SmsTemplateAuditStatusEnum.SUCCESS.getStatus(),
smsClient.convertSmsTemplateAuditStatus(0));
assertEquals(SmsTemplateAuditStatusEnum.CHECKING.getStatus(),
smsClient.convertSmsTemplateAuditStatus(1));
assertEquals(SmsTemplateAuditStatusEnum.FAIL.getStatus(),
smsClient.convertSmsTemplateAuditStatus(-1));
assertThrows(IllegalArgumentException.class, () -> smsClient.convertSmsTemplateAuditStatus(3),
"未知审核状态(3)");
} |
public static NodeBadge glyph(String gid) {
return new NodeBadge(Status.INFO, true, nonNull(gid), null);
} | @Test
public void glyphWarn() {
badge = NodeBadge.glyph(Status.WARN, GID);
checkFields(badge, Status.WARN, true, GID, null);
} |
@Override
public int run(String[] args) throws Exception {
try {
webServiceClient = WebServiceClient.getWebServiceClient().createClient();
return runCommand(args);
} finally {
if (yarnClient != null) {
yarnClient.close();
}
if (webServiceClient != null) {
webServiceClient.destroy();
}
}
} | @Test (timeout = 15000)
public void testFetchFinishedApplictionLogs() throws Exception {
String remoteLogRootDir = "target/logs/";
conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
conf
.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogRootDir);
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL, "admin");
FileSystem fs = FileSystem.get(conf);
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId1 =
ApplicationAttemptId.newInstance(appId, 1);
ApplicationAttemptId appAttemptId2 =
ApplicationAttemptId.newInstance(appId, 2);
ContainerId containerId0 = ContainerId.newContainerId(appAttemptId1, 0);
ContainerId containerId1 = ContainerId.newContainerId(appAttemptId1, 1);
ContainerId containerId2 = ContainerId.newContainerId(appAttemptId1, 2);
ContainerId containerId3 = ContainerId.newContainerId(appAttemptId2, 3);
ContainerId containerId4 = ContainerId.newContainerId(appAttemptId2, 4);
final NodeId nodeId = NodeId.newInstance("localhost", 1234);
final NodeId badNodeId = NodeId.newInstance("badhost", 5678);
// create local logs
String rootLogDir = "target/LocalLogs";
Path rootLogDirPath = new Path(rootLogDir);
if (fs.exists(rootLogDirPath)) {
fs.delete(rootLogDirPath, true);
}
assertTrue(fs.mkdirs(rootLogDirPath));
Path appLogsDir = new Path(rootLogDirPath, appId.toString());
if (fs.exists(appLogsDir)) {
fs.delete(appLogsDir, true);
}
assertTrue(fs.mkdirs(appLogsDir));
List<String> rootLogDirs = Arrays.asList(rootLogDir);
List<String> logTypes = new ArrayList<String>();
logTypes.add("syslog");
// create container logs in localLogDir
createContainerLogInLocalDir(appLogsDir, containerId1, fs, logTypes,
ImmutableList.of("empty"));
createContainerLogInLocalDir(appLogsDir, containerId2, fs, logTypes,
Collections.emptyList());
// create two logs for container3 in localLogDir
logTypes.add("stdout");
logTypes.add("stdout1234");
createContainerLogInLocalDir(appLogsDir, containerId3, fs, logTypes,
Collections.emptyList());
Path path =
new Path(remoteLogRootDir + ugi.getShortUserName()
+ "/bucket-logs-tfile/0001/application_0_0001");
if (fs.exists(path)) {
fs.delete(path, true);
}
assertTrue(fs.mkdirs(path));
// upload container logs into remote directory
// the first two logs is empty. When we try to read first two logs,
// we will meet EOF exception, but it will not impact other logs.
// Other logs should be read successfully.
uploadEmptyContainerLogIntoRemoteDir(ugi, conf, rootLogDirs, nodeId,
containerId0, path, fs);
uploadEmptyContainerLogIntoRemoteDir(ugi, conf, rootLogDirs, nodeId,
containerId1, path, fs);
uploadContainerLogIntoRemoteDir(ugi, conf, rootLogDirs, nodeId,
containerId1, path, fs);
uploadContainerLogIntoRemoteDir(ugi, conf, rootLogDirs, nodeId,
containerId2, path, fs);
uploadContainerLogIntoRemoteDir(ugi, conf, rootLogDirs, nodeId,
containerId3, path, fs);
uploadTruncatedTFileIntoRemoteDir(ugi, conf, badNodeId,
containerId4, fs);
YarnClient mockYarnClient =
createMockYarnClient(
YarnApplicationState.FINISHED, ugi.getShortUserName());
LogsCLI cli = new LogsCLIForTest(mockYarnClient) {
@Override
public ContainerReport getContainerReport(String containerIdStr)
throws YarnException, IOException {
ContainerReport mockReport = mock(ContainerReport.class);
doReturn(nodeId).when(mockReport).getAssignedNode();
doReturn("http://localhost:2345").when(mockReport).getNodeHttpAddress();
return mockReport;
}
};
cli.setConf(conf);
int exitCode = cli.run(new String[] { "-applicationId", appId.toString() });
LOG.info(sysOutStream.toString());
assertTrue(exitCode == 0);
assertTrue(sysOutStream.toString().contains(
logMessage(containerId1, "syslog")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId2, "syslog")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "syslog")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "stdout")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "stdout1234")));
assertTrue(sysOutStream.toString().contains(
createEmptyLog("empty")));
sysOutStream.reset();
// Check fetching data for application attempt with applicationId defined
exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
"-applicationAttemptId", appAttemptId1.toString()});
LOG.info(sysOutStream.toString());
assertTrue(exitCode == 0);
assertTrue(sysOutStream.toString().contains(
logMessage(containerId1, "syslog")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId2, "syslog")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "syslog")));
assertFalse(sysOutStream.toString().contains(
logMessage(containerId3, "stdout")));
assertFalse(sysOutStream.toString().contains(
logMessage(containerId3, "stdout1234")));
assertTrue(sysOutStream.toString().contains(
createEmptyLog("empty")));
sysOutStream.reset();
// Check fetching data for application attempt without application defined
exitCode = cli.run(new String[] {
"-applicationAttemptId", appAttemptId1.toString()});
LOG.info(sysOutStream.toString());
assertTrue(exitCode == 0);
assertTrue(sysOutStream.toString().contains(
logMessage(containerId1, "syslog")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId2, "syslog")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "syslog")));
assertFalse(sysOutStream.toString().contains(
logMessage(containerId3, "stdout")));
assertFalse(sysOutStream.toString().contains(
logMessage(containerId3, "stdout1234")));
assertTrue(sysOutStream.toString().contains(
createEmptyLog("empty")));
sysOutStream.reset();
exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
"-log_files_pattern", ".*"});
assertTrue(exitCode == 0);
assertTrue(sysOutStream.toString().contains(
logMessage(containerId1, "syslog")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId2, "syslog")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "syslog")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "stdout")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "stdout1234")));
assertTrue(sysOutStream.toString().contains(
createEmptyLog("empty")));
sysOutStream.reset();
exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
"-log_files", "*"});
assertTrue(exitCode == 0);
assertTrue(sysOutStream.toString().contains(
logMessage(containerId1, "syslog")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId2, "syslog")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "syslog")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "stdout")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "stdout1234")));
assertTrue(sysOutStream.toString().contains(
createEmptyLog("empty")));
int fullSize = sysOutStream.toByteArray().length;
sysOutStream.reset();
exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
"-log_files", "stdout"});
assertTrue(exitCode == 0);
assertFalse(sysOutStream.toString().contains(
logMessage(containerId1, "syslog")));
assertFalse(sysOutStream.toString().contains(
logMessage(containerId2, "syslog")));
assertFalse(sysOutStream.toString().contains(
logMessage(containerId3, "syslog")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "stdout")));
assertFalse(sysOutStream.toString().contains(
logMessage(containerId3, "stdout1234")));
assertFalse(sysOutStream.toString().contains(
createEmptyLog("empty")));
sysOutStream.reset();
// Check backward compatibility for -logFiles
exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
"-logFiles", "stdout"});
assertTrue("Failed with -logFiles", exitCode == 0);
assertFalse("Failed with -logFiles", sysOutStream.toString().contains(
logMessage(containerId1, "syslog")));
assertFalse("Failed with -logFiles", sysOutStream.toString().contains(
logMessage(containerId2, "syslog")));
assertFalse("Failed with -logFiles", sysOutStream.toString().contains(
logMessage(containerId3, "syslog")));
assertTrue("Failed with -logFiles", sysOutStream.toString().contains(
logMessage(containerId3, "stdout")));
assertFalse("Failed with -logFiles", sysOutStream.toString().contains(
logMessage(containerId3, "stdout1234")));
assertFalse("Failed with -logFiles", sysOutStream.toString().contains(
createEmptyLog("empty")));
sysOutStream.reset();
// Check -log_files supercedes -logFiles
exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
"-log_files", "stdout", "-logFiles", "syslog"});
assertTrue("Failed with -logFiles and -log_files", exitCode == 0);
assertFalse("Failed with -logFiles and -log_files",
sysOutStream.toString().contains(
logMessage(containerId1, "syslog")));
assertFalse("Failed with -logFiles and -log_files",
sysOutStream.toString().contains(
logMessage(containerId2, "syslog")));
assertFalse("Failed with -logFiles and -log_files",
sysOutStream.toString().contains(
logMessage(containerId3, "syslog")));
assertTrue("Failed with -logFiles and -log_files",
sysOutStream.toString().contains(
logMessage(containerId3, "stdout")));
assertFalse("Failed with -logFiles and -log_files",
sysOutStream.toString().contains(
logMessage(containerId3, "stdout1234")));
assertFalse("Failed with -logFiles and -log_files",
sysOutStream.toString().contains(
createEmptyLog("empty")));
sysOutStream.reset();
exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
"-log_files_pattern", "std*"});
assertTrue(exitCode == 0);
assertFalse(sysOutStream.toString().contains(
logMessage(containerId1, "syslog")));
assertFalse(sysOutStream.toString().contains(
logMessage(containerId2, "syslog")));
assertFalse(sysOutStream.toString().contains(
logMessage(containerId3, "syslog")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "stdout")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "stdout1234")));
assertFalse(sysOutStream.toString().contains(
createEmptyLog("empty")));
sysOutStream.reset();
exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
"-log_files", "123"});
assertTrue(exitCode == -1);
assertTrue(sysErrStream.toString().contains(
"Can not find any log file matching the pattern: [123] "
+ "for the application: " + appId.toString()));
sysErrStream.reset();
// specify the bytes which is larger than the actual file size,
// we would get the full logs
exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
"-log_files", "*", "-size", "10000" });
assertTrue(exitCode == 0);
assertTrue(sysOutStream.toByteArray().length == fullSize);
sysOutStream.reset();
// uploaded two logs for container1. The first log is empty.
// The second one is not empty.
// We can still successfully read logs for container1.
exitCode =
cli.run(new String[] { "-applicationId", appId.toString(),
"-nodeAddress", nodeId.toString(), "-containerId",
containerId1.toString() });
assertTrue(exitCode == 0);
assertTrue(sysOutStream.toString().contains(
logMessage(containerId1, "syslog")));
assertTrue(sysOutStream.toString().contains("LogLastModifiedTime"));
assertTrue(!sysOutStream.toString().contains(
"Logs for container " + containerId1.toString()
+ " are not present in this log-file."));
sysOutStream.reset();
exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
"-containerId", containerId3.toString(), "-log_files", "123" });
assertTrue(exitCode == -1);
assertTrue(sysErrStream.toString().contains(
"Can not find any log file matching the pattern: [123] "
+ "for the container: " + containerId3
+ " within the application: " + appId.toString()));
sysErrStream.reset();
exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
"-containerId", containerId3.toString(), "-log_files", "stdout" });
assertTrue(exitCode == 0);
int fullContextSize = sysOutStream.toByteArray().length;
String fullContext = sysOutStream.toString();
sysOutStream.reset();
String logMessage = logMessage(containerId3, "stdout");
int fileContentSize = logMessage.getBytes().length;
StringBuilder sb = new StringBuilder();
String endOfFile = "End of LogType:stdout";
sb.append("\n" + endOfFile + "\n");
sb.append(StringUtils.repeat("*", endOfFile.length() + 50)
+ "\n\n");
int tailContentSize = sb.toString().length();
// specify how many bytes we should get from logs
// specify a position number, it would get the first n bytes from
// container log
exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
"-containerId", containerId3.toString(), "-log_files", "stdout",
"-size", "5"});
assertTrue(exitCode == 0);
Assert.assertEquals(new String(logMessage.getBytes(), 0, 5),
new String(sysOutStream.toByteArray(),
(fullContextSize - fileContentSize - tailContentSize), 5));
sysOutStream.reset();
// specify how many bytes we should get from an empty log
exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
"-containerId", containerId1.toString(), "-log_files", "empty",
"-size", "5"});
assertTrue(exitCode == 0);
assertTrue(sysOutStream.toString().contains(
createEmptyLog("empty")));
sysOutStream.reset();
// specify a negative number, it would get the last n bytes from
// container log
exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
"-containerId", containerId3.toString(), "-log_files", "stdout",
"-size", "-5"});
assertTrue(exitCode == 0);
Assert.assertEquals(new String(logMessage.getBytes(),
logMessage.getBytes().length - 5, 5),
new String(sysOutStream.toByteArray(),
(fullContextSize - fileContentSize - tailContentSize), 5));
sysOutStream.reset();
long negative = (fullContextSize + 1000) * (-1);
exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
"-containerId", containerId3.toString(), "-log_files", "stdout",
"-size", Long.toString(negative)});
assertTrue(exitCode == 0);
Assert.assertEquals(fullContext, sysOutStream.toString());
sysOutStream.reset();
// Uploaded the empty log for container0.
// We should see the message showing the log for container0
// are not present.
exitCode =
cli.run(new String[] { "-applicationId", appId.toString(),
"-nodeAddress", nodeId.toString(), "-containerId",
containerId0.toString() });
assertTrue(exitCode == -1);
assertTrue(sysErrStream.toString().contains(
"Can not find any log file matching the pattern"));
sysErrStream.reset();
// uploaded two logs for container3. The first log is named as syslog.
// The second one is named as stdout.
exitCode =
cli.run(new String[] { "-applicationId", appId.toString(),
"-nodeAddress", nodeId.toString(), "-containerId",
containerId3.toString() });
assertTrue(exitCode == 0);
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "syslog")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "stdout")));
sysOutStream.reset();
// set -log_files option as stdout
// should only print log with the name as stdout
exitCode =
cli.run(new String[] { "-applicationId", appId.toString(),
"-nodeAddress", nodeId.toString(), "-containerId",
containerId3.toString() , "-log_files", "stdout"});
assertTrue(exitCode == 0);
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "stdout")));
assertTrue(!sysOutStream.toString().contains(
logMessage(containerId3, "syslog")));
sysOutStream.reset();
YarnClient mockYarnClientWithException =
createMockYarnClientWithException();
cli = new LogsCLIForTest(mockYarnClientWithException);
cli.setConf(conf);
exitCode =
cli.run(new String[] { "-applicationId", appId.toString(),
"-containerId", containerId3.toString() });
assertTrue(exitCode == 0);
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "syslog")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "stdout")));
assertTrue(sysOutStream.toString().contains(
containerId3 + " on " + LogAggregationUtils.getNodeString(nodeId)));
sysOutStream.reset();
// The same should also work without the applicationId
exitCode =
cli.run(new String[] { "-containerId", containerId3.toString() });
assertTrue(exitCode == 0);
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "syslog")));
assertTrue(sysOutStream.toString().contains(
logMessage(containerId3, "stdout")));
assertTrue(sysOutStream.toString().contains(
containerId3 + " on " + LogAggregationUtils.getNodeString(nodeId)));
sysOutStream.reset();
exitCode = cli.run(new String[] { "-containerId", "invalid_container" });
assertTrue(exitCode == -1);
assertTrue(sysErrStream.toString().contains(
"Invalid ContainerId specified"));
sysErrStream.reset();
// Uploaded the empty log for container4. We should see a message
// showing the log for container4 is not present.
exitCode =
cli.run(new String[] {"-applicationId", appId.toString(),
"-nodeAddress", badNodeId.toString(), "-containerId",
containerId4.toString()});
assertTrue(exitCode == -1);
assertTrue(sysErrStream.toString().contains(
"Can not find any log file matching the pattern"));
sysErrStream.reset();
fs.delete(new Path(remoteLogRootDir), true);
fs.delete(new Path(rootLogDir), true);
} |
void start(Iterable<ShardCheckpoint> checkpoints) {
LOG.info(
"Pool {} - starting for stream {} consumer {}. Checkpoints = {}",
poolId,
read.getStreamName(),
consumerArn,
checkpoints);
for (ShardCheckpoint shardCheckpoint : checkpoints) {
checkState(
!state.containsKey(shardCheckpoint.getShardId()),
"Duplicate shard id %s",
shardCheckpoint.getShardId());
ShardState shardState =
new ShardState(
initShardSubscriber(shardCheckpoint), shardCheckpoint, watermarkPolicyFactory);
state.put(shardCheckpoint.getShardId(), shardState);
}
} | @Test
public void poolReSubscribesAndReadsRecordsAfterCheckPoint() throws Exception {
kinesis = new EFOStubbedKinesisAsyncClient(10);
kinesis.stubSubscribeToShard("shard-000", eventWithRecords(3));
kinesis.stubSubscribeToShard("shard-001", eventWithRecords(11, 3));
KinesisReaderCheckpoint initialCheckpoint =
new KinesisReaderCheckpoint(
ImmutableList.of(
afterCheckpoint("shard-000", "0"), afterCheckpoint("shard-001", "11")));
pool = new EFOShardSubscribersPool(readSpec, consumerArn, kinesis);
pool.start(initialCheckpoint);
PoolAssertion.assertPool(pool)
.givesCheckPointedRecords(
ShardAssertion.shard("shard-000")
.gives(KinesisRecordView.generate("shard-000", 1, 2))
.withLastCheckpointSequenceNumber(2),
ShardAssertion.shard("shard-001")
.gives(KinesisRecordView.generate("shard-001", 12, 2))
.withLastCheckpointSequenceNumber(13));
assertThat(kinesis.subscribeRequestsSeen())
.containsExactlyInAnyOrder(
subscribeAtSeqNumber("shard-000", "0"),
subscribeAtSeqNumber("shard-001", "11"),
subscribeAfterSeqNumber("shard-000", "2"),
subscribeAfterSeqNumber("shard-001", "13"));
} |
public BackgroundJobServerConfiguration andServerTimeoutPollIntervalMultiplicand(int multiplicand) {
if (multiplicand < 4) throw new IllegalArgumentException("The smallest possible ServerTimeoutPollIntervalMultiplicand is 4 (4 is also the default)");
this.serverTimeoutPollIntervalMultiplicand = multiplicand;
return this;
} | @Test
void isServerTimeoutMultiplicandIsSmallerThan4AnExceptionIsThrown() {
assertThatThrownBy(() -> backgroundJobServerConfiguration.andServerTimeoutPollIntervalMultiplicand(3))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("The smallest possible ServerTimeoutPollIntervalMultiplicand is 4 (4 is also the default)");
} |
public <T> T convert(String property, Class<T> targetClass) {
final AbstractPropertyConverter<?> converter = converterRegistry.get(targetClass);
if (converter == null) {
throw new MissingFormatArgumentException("converter not found, can't convert from String to " + targetClass.getCanonicalName());
}
return (T) converter.convert(property);
} | @Test
void testConvertBooleanIllegal() {
assertThrows(IllegalArgumentException.class, () -> {
compositeConverter.convert("aaa", Boolean.class);
});
} |
public static String rangeKey(long value) {
return encodeBase62(value, true);
} | @Test
public void testRangeKey() {
Assert.assertEquals("44C92", IdHelper.rangeKey(1000000L));
Assert.assertEquals("71l9Zo9o", IdHelper.rangeKey(100000000000L));
Assert.assertTrue(IdHelper.rangeKey(1000000L).compareTo(IdHelper.rangeKey(100000000000L)) < 0);
} |
@Override
public MaskRuleConfiguration findRuleConfiguration(final ShardingSphereDatabase database) {
return database.getRuleMetaData().findSingleRule(MaskRule.class)
.map(optional -> getConfiguration(optional.getConfiguration())).orElseGet(() -> new MaskRuleConfiguration(new LinkedList<>(), new LinkedHashMap<>()));
} | @Test
void assertFindRuleConfigurationWhenRuleDoesNotExist() {
ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS);
when(database.getRuleMetaData().findSingleRule(MaskRule.class)).thenReturn(Optional.empty());
assertTrue(new MaskAlgorithmChangedProcessor().findRuleConfiguration(database).getMaskAlgorithms().isEmpty());
} |
public static Set<String> findKeywordsFromCrashReport(String crashReport) {
Matcher matcher = CRASH_REPORT_STACK_TRACE_PATTERN.matcher(crashReport);
Set<String> result = new HashSet<>();
if (matcher.find()) {
for (String line : matcher.group("stacktrace").split("\\n")) {
Matcher lineMatcher = STACK_TRACE_LINE_PATTERN.matcher(line);
if (lineMatcher.find()) {
String[] method = lineMatcher.group("method").split("\\.");
for (int i = 0; i < method.length - 2; i++) {
if (PACKAGE_KEYWORD_BLACK_LIST.contains(method[i])) {
continue;
}
result.add(method[i]);
}
Matcher moduleMatcher = STACK_TRACE_LINE_MODULE_PATTERN.matcher(line);
if (moduleMatcher.find()) {
for (String module : moduleMatcher.group("tokens").split(",")) {
String[] split = module.split(":");
if (split.length >= 2 && "xf".equals(split[0])) {
if (PACKAGE_KEYWORD_BLACK_LIST.contains(split[1])) {
continue;
}
result.add(split[1]);
}
}
}
}
}
}
return result;
} | @Test
public void mapletree() throws IOException {
assertEquals(
new HashSet<>(Arrays.asList("MapleTree", "bamboo", "uraniummc", "ecru")),
CrashReportAnalyzer.findKeywordsFromCrashReport(loadLog("/crash-report/mod/mapletree.txt")));
} |
@Override
public void onChange(List<JobRunrMetadata> metadataList) {
if (this.serversWithPollIntervalInSecondsTimeBoxTooSmallMetadataList == null || this.serversWithPollIntervalInSecondsTimeBoxTooSmallMetadataList.size() != metadataList.size()) {
problems.removeProblemsOfType(PollIntervalInSecondsTimeBoxIsTooSmallProblem.PROBLEM_TYPE);
if (!metadataList.isEmpty() && !problems.containsProblemOfType(CpuAllocationIrregularityProblem.PROBLEM_TYPE)) {
problems.addProblem(new PollIntervalInSecondsTimeBoxIsTooSmallProblem(metadataList));
}
this.serversWithPollIntervalInSecondsTimeBoxTooSmallMetadataList = metadataList;
}
} | @Test
void ifPollIntervalInSecondsTimeBoxIsTooSmallIsDeletedThenProblemIsRemoved() {
final JobRunrMetadata jobRunrMetadata = new JobRunrMetadata(PollIntervalInSecondsTimeBoxIsTooSmallNotification.class.getSimpleName(), "BackgroundJobServer " + UUID.randomUUID(), "23");
pollIntervalInSecondsTimeBoxIsTooSmallProblemHandler.onChange(asList(jobRunrMetadata));
reset(problems);
pollIntervalInSecondsTimeBoxIsTooSmallProblemHandler.onChange(emptyList());
verify(problems).removeProblemsOfType(PollIntervalInSecondsTimeBoxIsTooSmallProblem.PROBLEM_TYPE);
verify(problems, never()).addProblem(any());
} |
Record deserialize(Object data) {
return (Record) fieldDeserializer.value(data);
} | @Test
public void testDeserializeEverySupportedType() {
assumeThat(HiveVersion.min(HiveVersion.HIVE_3))
.as("No test yet for Hive3 (Date/Timestamp creation)")
.isFalse();
Deserializer deserializer =
new Deserializer.Builder()
.schema(HiveIcebergTestUtils.FULL_SCHEMA)
.writerInspector(
(StructObjectInspector)
IcebergObjectInspector.create(HiveIcebergTestUtils.FULL_SCHEMA))
.sourceInspector(HiveIcebergTestUtils.FULL_SCHEMA_OBJECT_INSPECTOR)
.build();
Record expected = HiveIcebergTestUtils.getTestRecord();
Record actual = deserializer.deserialize(HiveIcebergTestUtils.valuesForTestRecord(expected));
HiveIcebergTestUtils.assertEquals(expected, actual);
} |
public ParseTree getRootNode() {
return parseTree.getChild(0);
} | @Test
void assertGetRootNode() {
ParseTree parseTree = mock(ParseTree.class);
when(parseTree.getChild(0)).thenReturn(parseTree);
assertThat(new ParseASTNode(parseTree, mock(CommonTokenStream.class)).getRootNode(), is(parseTree));
} |
public void add(long member) {
assert member >= 0;
int prefix = (int) (member >>> Integer.SIZE);
if (prefix == lastPrefix) {
Storage32 newStorage = lastStorage.add((int) member);
if (newStorage != lastStorage) {
// storage was upgraded
lastStorage = newStorage;
storages.set(prefix, newStorage);
}
} else {
lastPrefix = prefix;
Storage32 storage = storages.get(prefix);
if (storage == null) {
Storage32 createdStorage = new ArrayStorage32((int) member);
lastStorage = createdStorage;
storages.set(prefix, createdStorage);
} else {
Storage32 newStorage = storage.add((int) member);
if (newStorage == storage) {
lastStorage = storage;
} else {
// storage was upgraded
lastStorage = newStorage;
storages.set(prefix, newStorage);
}
}
}
} | @Test
public void testAdd() {
// try empty set
verify();
// at the beginning
for (long i = 0; i < ARRAY_STORAGE_32_MAX_SIZE / 2; ++i) {
set(i);
verify();
set(i);
verify();
}
// offset
for (long i = 1000000; i < 1000000 + ARRAY_STORAGE_32_MAX_SIZE; ++i) {
set(i);
verify();
set(i);
verify();
}
// clear everything we have added
for (long i = 0; i < ARRAY_STORAGE_32_MAX_SIZE / 2; ++i) {
clear(i);
verify();
}
for (long i = 1000000; i < 1000000 + ARRAY_STORAGE_32_MAX_SIZE; ++i) {
clear(i);
verify();
}
// test empty again
verify();
// try gaps
for (long i = 0; i < 1000; ++i) {
set(i * i);
verify();
set(i * i);
verify();
}
// try larger gaps
for (long i = (long) Math.sqrt(Long.MAX_VALUE) - 1000; i < (long) Math.sqrt(Long.MAX_VALUE); ++i) {
set(i * i);
verify();
set(i * i);
verify();
}
// try some edge cases
for (long i = 0; i <= 2; ++i) {
set(i);
verify();
}
for (long i = Short.MAX_VALUE - 2; i <= Short.MAX_VALUE + 2; ++i) {
set(i);
verify();
}
for (long i = Integer.MAX_VALUE - 2; i <= (long) Integer.MAX_VALUE + 2; ++i) {
set(i);
verify();
}
for (long i = Long.MAX_VALUE; i >= Long.MAX_VALUE - 2; --i) {
set(i);
verify();
}
} |
@Override
public Column convert(BasicTypeDefine typeDefine) {
PhysicalColumn.PhysicalColumnBuilder builder =
PhysicalColumn.builder()
.name(typeDefine.getName())
.sourceType(typeDefine.getColumnType())
.nullable(typeDefine.isNullable())
.defaultValue(typeDefine.getDefaultValue())
.comment(typeDefine.getComment());
String hanaType = typeDefine.getDataType().toUpperCase();
if (typeDefine.getColumnType().endsWith(" ARRAY")) {
typeDefine.setColumnType(typeDefine.getColumnType().replace(" ARRAY", ""));
typeDefine.setDataType(removeColumnSizeIfNeed(typeDefine.getColumnType()));
Column arrayColumn = convert(typeDefine);
SeaTunnelDataType<?> newType;
switch (arrayColumn.getDataType().getSqlType()) {
case STRING:
newType = ArrayType.STRING_ARRAY_TYPE;
break;
case BOOLEAN:
newType = ArrayType.BOOLEAN_ARRAY_TYPE;
break;
case TINYINT:
newType = ArrayType.BYTE_ARRAY_TYPE;
break;
case SMALLINT:
newType = ArrayType.SHORT_ARRAY_TYPE;
break;
case INT:
newType = ArrayType.INT_ARRAY_TYPE;
break;
case BIGINT:
newType = ArrayType.LONG_ARRAY_TYPE;
break;
case FLOAT:
newType = ArrayType.FLOAT_ARRAY_TYPE;
break;
case DOUBLE:
newType = ArrayType.DOUBLE_ARRAY_TYPE;
break;
case DATE:
newType = ArrayType.LOCAL_DATE_ARRAY_TYPE;
break;
case TIME:
newType = ArrayType.LOCAL_TIME_ARRAY_TYPE;
break;
case TIMESTAMP:
newType = ArrayType.LOCAL_DATE_TIME_ARRAY_TYPE;
break;
default:
throw CommonError.unsupportedDataType(
"SeaTunnel",
arrayColumn.getDataType().getSqlType().toString(),
typeDefine.getName());
}
return new PhysicalColumn(
arrayColumn.getName(),
newType,
arrayColumn.getColumnLength(),
arrayColumn.getScale(),
arrayColumn.isNullable(),
arrayColumn.getDefaultValue(),
arrayColumn.getComment(),
arrayColumn.getSourceType() + " ARRAY",
arrayColumn.getOptions());
}
switch (hanaType) {
case HANA_BINARY:
case HANA_VARBINARY:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
if (typeDefine.getLength() == null || typeDefine.getLength() == 0) {
builder.columnLength(MAX_BINARY_LENGTH);
} else {
builder.columnLength(typeDefine.getLength());
}
break;
case HANA_BOOLEAN:
builder.dataType(BasicType.BOOLEAN_TYPE);
break;
case HANA_VARCHAR:
case HANA_ALPHANUM:
case HANA_CLOB:
case HANA_NCLOB:
case HANA_TEXT:
case HANA_BINTEXT:
builder.dataType(BasicType.STRING_TYPE);
if (typeDefine.getLength() == null || typeDefine.getLength() == 0) {
builder.columnLength(MAX_LOB_LENGTH);
} else {
builder.columnLength(typeDefine.getLength());
}
break;
case HANA_NVARCHAR:
case HANA_SHORTTEXT:
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
break;
case HANA_DATE:
builder.dataType(LocalTimeType.LOCAL_DATE_TYPE);
break;
case HANA_TIME:
builder.dataType(LocalTimeType.LOCAL_TIME_TYPE);
builder.scale(0);
break;
case HANA_SECONDDATE:
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
builder.scale(0);
break;
case HANA_TIMESTAMP:
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
if (typeDefine.getScale() == null) {
builder.scale(TIMESTAMP_DEFAULT_SCALE);
} else {
builder.scale(typeDefine.getScale());
}
break;
case HANA_BLOB:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(typeDefine.getLength());
break;
case HANA_TINYINT:
case HANA_SMALLINT:
builder.dataType(BasicType.SHORT_TYPE);
break;
case HANA_INTEGER:
builder.dataType(BasicType.INT_TYPE);
break;
case HANA_BIGINT:
builder.dataType(BasicType.LONG_TYPE);
break;
case HANA_DECIMAL:
Integer scale = typeDefine.getScale();
long precision =
typeDefine.getLength() != null
? typeDefine.getLength().intValue()
: MAX_PRECISION - 4;
if (scale == null) {
builder.dataType(new DecimalType((int) precision, MAX_SCALE));
builder.columnLength(precision);
builder.scale(MAX_SCALE);
} else if (scale < 0) {
int newPrecision = (int) (precision - scale);
if (newPrecision == 1) {
builder.dataType(BasicType.SHORT_TYPE);
} else if (newPrecision <= 9) {
builder.dataType(BasicType.INT_TYPE);
} else if (newPrecision <= 18) {
builder.dataType(BasicType.LONG_TYPE);
} else if (newPrecision < 38) {
builder.dataType(new DecimalType(newPrecision, 0));
builder.columnLength((long) newPrecision);
} else {
builder.dataType(new DecimalType(DEFAULT_PRECISION, 0));
builder.columnLength((long) DEFAULT_PRECISION);
}
} else {
builder.dataType(new DecimalType((int) precision, scale));
builder.columnLength(precision);
builder.scale(scale);
}
break;
case HANA_SMALLDECIMAL:
if (typeDefine.getPrecision() == null) {
builder.dataType(new DecimalType(DEFAULT_PRECISION, MAX_SMALL_DECIMAL_SCALE));
builder.columnLength((long) DEFAULT_PRECISION);
builder.scale(MAX_SMALL_DECIMAL_SCALE);
} else {
builder.dataType(
new DecimalType(
typeDefine.getPrecision().intValue(), MAX_SMALL_DECIMAL_SCALE));
builder.columnLength(typeDefine.getPrecision());
builder.scale(MAX_SMALL_DECIMAL_SCALE);
}
break;
case HANA_REAL:
builder.dataType(BasicType.FLOAT_TYPE);
break;
case HANA_DOUBLE:
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case HANA_ST_POINT:
case HANA_ST_GEOMETRY:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
break;
default:
throw CommonError.convertToSeaTunnelTypeError(
DatabaseIdentifier.SAP_HANA, hanaType, typeDefine.getName());
}
return builder.build();
} | @Test
public void testConvertDecimal() {
BasicTypeDefine<Object> typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("DECIMAL")
.dataType("DECIMAL")
.build();
Column column = SapHanaTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(new DecimalType(34, 6176), column.getDataType());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
BasicTypeDefine<Object> typeDefine2 =
BasicTypeDefine.builder()
.name("test")
.columnType("DECIMAL")
.dataType("DECIMAL")
.precision(10L)
.length(10L)
.scale(5)
.build();
Column column2 = SapHanaTypeConverter.INSTANCE.convert(typeDefine2);
Assertions.assertEquals(typeDefine2.getName(), column2.getName());
Assertions.assertEquals(new DecimalType(10, 5), column2.getDataType());
Assertions.assertEquals(typeDefine2.getColumnType(), column2.getSourceType());
BasicTypeDefine<Object> typeDefine3 =
BasicTypeDefine.builder()
.name("test")
.columnType("DECIMAL")
.dataType("DECIMAL")
.precision(10L)
.length(10L)
.scale(0)
.build();
Column column3 = SapHanaTypeConverter.INSTANCE.convert(typeDefine3);
Assertions.assertEquals(typeDefine3.getName(), column3.getName());
Assertions.assertEquals(new DecimalType(10, 0), column3.getDataType());
Assertions.assertEquals(typeDefine3.getColumnType(), column3.getSourceType());
} |
public static NetworkPolicyPeer createPeer(Map<String, String> podSelector, LabelSelector namespaceSelector) {
return new NetworkPolicyPeerBuilder()
.withNewPodSelector()
.withMatchLabels(podSelector)
.endPodSelector()
.withNamespaceSelector(namespaceSelector)
.build();
} | @Test
public void testCreatePeerWithPodLabels() {
NetworkPolicyPeer peer = NetworkPolicyUtils.createPeer(Map.of("labelKey", "labelValue"));
assertThat(peer.getNamespaceSelector(), is(nullValue()));
assertThat(peer.getPodSelector().getMatchLabels(), is(Map.of("labelKey", "labelValue")));
} |
public static String decodeMessage(String raw) {
if (StringUtils.isEmpty(raw)) {
return "";
}
return QueryStringDecoder.decodeComponent(raw);
} | @Test
void decodeMessage() {
String message = "😯";
Assertions.assertEquals(message, TriRpcStatus.decodeMessage(TriRpcStatus.encodeMessage(message)));
Assertions.assertTrue(TriRpcStatus.decodeMessage("").isEmpty());
Assertions.assertTrue(TriRpcStatus.decodeMessage(null).isEmpty());
} |
@Override
public void executeSystemTask(WorkflowSystemTask systemTask, String taskId, int callbackTime) {
try {
Task task = executionDAOFacade.getTaskById(taskId);
if (task == null) {
LOG.error("TaskId: {} could not be found while executing SystemTask", taskId);
return;
}
LOG.debug("Task: {} fetched from execution DAO for taskId: {}", task, taskId);
String queueName = QueueUtils.getQueueName(task);
if (task.getStatus().isTerminal()) {
// Tune the SystemTaskWorkerCoordinator's queues - if the queue size is very big this can
// happen!
LOG.info("Task {}/{} was already completed.", task.getTaskType(), task.getTaskId());
queueDAO.remove(queueName, task.getTaskId());
return;
}
String workflowId = task.getWorkflowInstanceId();
Workflow workflow = executionDAOFacade.getWorkflowById(workflowId, true);
if (task.getStartTime() == 0) {
task.setStartTime(System.currentTimeMillis());
executionDAOFacade.updateTask(task);
Monitors.recordQueueWaitTime(task.getTaskDefName(), task.getQueueWaitTime());
}
if (workflow.getStatus().isTerminal()) {
LOG.info(
"Workflow {} has been completed for {}/{}",
workflow.getWorkflowId(),
systemTask.getName(),
task.getTaskId());
if (!task.getStatus().isTerminal()) {
task.setStatus(CANCELED);
}
executionDAOFacade.updateTask(task);
queueDAO.remove(queueName, task.getTaskId());
return;
}
LOG.debug("Executing {}/{}-{}", task.getTaskType(), task.getTaskId(), task.getStatus());
if (task.getStatus() == SCHEDULED || !systemTask.isAsyncComplete(task)) {
task.setPollCount(task.getPollCount() + 1);
// removed poll count DB update here
}
deciderService.populateTaskData(task);
// Stop polling for asyncComplete system tasks that are not in SCHEDULED state
if (systemTask.isAsyncComplete(task) && task.getStatus() != SCHEDULED) {
queueDAO.remove(QueueUtils.getQueueName(task), task.getTaskId());
return;
}
taskRunner.runMaestroTask(this, workflow, task, systemTask);
if (!task.getStatus().isTerminal()) {
task.setCallbackAfterSeconds(callbackTime);
try {
configureCallbackInterval(task); // overwrite if needed
} catch (Exception e) {
LOG.error(
"Error configuring callback interval for task [{}]. Please investigate it",
task.getTaskId(),
e);
}
}
updateTask(new TaskResult(task));
LOG.debug(
"Done Executing {}/{}-{} output={}",
task.getTaskType(),
task.getTaskId(),
task.getStatus(),
task.getOutputData());
} catch (Exception e) {
Monitors.error("MaestroWorkflowExecutor", "executeSystemTask");
LOG.error("Error executing system task - {}, with id: {}", systemTask, taskId, e);
}
} | @Test
public void testExecuteSystemTaskThrowExceptionDuringConfigureCallbackInterval() {
String workflowId = "workflow-id";
String taskId = "task-id-1";
Task maestroTask = new Task();
maestroTask.setTaskType(Constants.MAESTRO_TASK_NAME);
maestroTask.setReferenceTaskName("maestroTask");
maestroTask.setWorkflowInstanceId(workflowId);
maestroTask.setScheduledTime(System.currentTimeMillis());
maestroTask.setTaskId(taskId);
maestroTask.setStatus(Task.Status.IN_PROGRESS);
maestroTask.setStartTime(123);
maestroTask.setCallbackAfterSeconds(0);
maestroTask.setOutputData(Collections.singletonMap(Constants.STEP_RUNTIME_SUMMARY_FIELD, null));
Workflow workflow = new Workflow();
workflow.setWorkflowId(workflowId);
workflow.setStatus(Workflow.WorkflowStatus.RUNNING);
when(executionDAOFacade.getTaskById(anyString())).thenReturn(maestroTask);
when(executionDAOFacade.getWorkflowById(anyString(), anyBoolean())).thenReturn(workflow);
task2.setShouldThrow(false);
maestroWorkflowExecutor.executeSystemTask(task2, taskId, 30);
assertEquals(Task.Status.IN_PROGRESS, maestroTask.getStatus());
assertEquals(1, maestroTask.getPollCount());
verify(executionDAOFacade, times(0)).updateTask(any());
// fall back to input of executeSystemTask
assertEquals(30, maestroTask.getCallbackAfterSeconds());
} |
public static Date toDate(Object value, Date defaultValue) {
return convertQuietly(Date.class, value, defaultValue);
} | @Test
public void toDateTest() {
assertThrows(DateException.class, () -> {
// 默认转换失败报错而不是返回null
Convert.convert(Date.class, "aaaa");
});
} |
public static String jaasConfig(String moduleName, Map<String, String> options) {
StringJoiner joiner = new StringJoiner(" ");
for (Entry<String, String> entry : options.entrySet()) {
String key = Objects.requireNonNull(entry.getKey());
String value = Objects.requireNonNull(entry.getValue());
if (key.contains("=") || key.contains(";")) {
throw new IllegalArgumentException("Keys must not contain '=' or ';'");
}
if (moduleName.isEmpty() || moduleName.contains(";") || moduleName.contains("=")) {
throw new IllegalArgumentException("module name must be not empty and must not contain '=' or ';'");
} else {
joiner.add(key + "=\"" + value + "\"");
}
}
return moduleName + " required " + joiner + ";";
} | @Test
public void testValueContainsEqualSign() {
Map<String, String> options = new HashMap<>();
options.put("key1", "value=1");
String moduleName = "Module";
String expected = "Module required key1=\"value=1\";";
assertEquals(expected, AuthenticationUtils.jaasConfig(moduleName, options)); } |
@SuppressWarnings({"BooleanExpressionComplexity", "CyclomaticComplexity"})
public static boolean isScalablePushQuery(
final Statement statement,
final KsqlExecutionContext ksqlEngine,
final KsqlConfig ksqlConfig,
final Map<String, Object> overrides
) {
if (!isPushV2Enabled(ksqlConfig, overrides)) {
return false;
}
if (! (statement instanceof Query)) {
return false;
}
final Query query = (Query) statement;
final SourceFinder sourceFinder = new SourceFinder();
sourceFinder.process(query.getFrom(), null);
// It will be present if it's not a join, which we don't handle
if (!sourceFinder.getSourceName().isPresent()) {
return false;
}
// Find all of the writers to this particular source.
final SourceName sourceName = sourceFinder.getSourceName().get();
final Set<QueryId> upstreamQueries = ksqlEngine.getQueriesWithSink(sourceName);
// See if the config or override have set the stream to be "latest"
final boolean isLatest = isLatest(ksqlConfig, overrides);
// Cannot be a pull query, i.e. must be a push
return !query.isPullQuery()
// Group by is not supported
&& !query.getGroupBy().isPresent()
// Windowing is not supported
&& !query.getWindow().isPresent()
// Having clause is not supported
&& !query.getHaving().isPresent()
// Partition by is not supported
&& !query.getPartitionBy().isPresent()
// There must be an EMIT CHANGES clause
&& (query.getRefinement().isPresent()
&& query.getRefinement().get().getOutputRefinement() == OutputRefinement.CHANGES)
// Must be reading from "latest"
&& isLatest
// We only handle a single sink source at the moment from a CTAS/CSAS
&& upstreamQueries.size() == 1
// ROWPARTITION and ROWOFFSET are not currently supported in SPQs
&& !containsDisallowedColumns(query);
} | @Test
public void shouldNotMakeQueryWithRowpartitionInSelectClauseScalablePush() {
try(MockedStatic<ColumnExtractor> columnExtractor = mockStatic(ColumnExtractor.class)) {
// Given:
expectIsSPQ(SystemColumns.ROWPARTITION_NAME, columnExtractor);
// When:
final boolean isScalablePush = ScalablePushUtil.isScalablePushQuery(
query,
ksqlEngine,
ksqlConfig,
overrides
);
// Then:
assert(!isScalablePush);
}
} |
public static List<String> normalizeColumns(List<Object> columns) {
return columns.stream()
.map(TableDataUtils::normalizeColumn)
.collect(Collectors.toList());
} | @Test
void testColumns() {
assertEquals(Arrays.asList("hello world", "hello world"),
TableDataUtils.normalizeColumns(new Object[]{"hello\tworld", "hello\nworld"}));
assertEquals(Arrays.asList("hello world", "null"),
TableDataUtils.normalizeColumns(new String[]{"hello\tworld", null}));
} |
@Override
public byte[] serialize(MySqlSplit split) throws IOException {
if (split.isSnapshotSplit()) {
final MySqlSnapshotSplit snapshotSplit = split.asSnapshotSplit();
// optimization: the splits lazily cache their own serialized form
if (snapshotSplit.serializedFormCache != null) {
return snapshotSplit.serializedFormCache;
}
final DataOutputSerializer out = SERIALIZER_CACHE.get();
out.writeInt(SNAPSHOT_SPLIT_FLAG);
out.writeUTF(quote(snapshotSplit.getTableId()));
out.writeUTF(snapshotSplit.splitId());
out.writeUTF(snapshotSplit.getSplitKeyType().asSerializableString());
final Object[] splitStart = snapshotSplit.getSplitStart();
final Object[] splitEnd = snapshotSplit.getSplitEnd();
// rowToSerializedString deals null case
out.writeUTF(rowToSerializedString(splitStart));
out.writeUTF(rowToSerializedString(splitEnd));
writeBinlogPosition(snapshotSplit.getHighWatermark(), out);
writeTableSchemas(snapshotSplit.getTableSchemas(), out);
final byte[] result = out.getCopyOfBuffer();
out.clear();
// optimization: cache the serialized from, so we avoid the byte work during repeated
// serialization
snapshotSplit.serializedFormCache = result;
return result;
} else {
final MySqlBinlogSplit binlogSplit = split.asBinlogSplit();
// optimization: the splits lazily cache their own serialized form
if (binlogSplit.serializedFormCache != null) {
return binlogSplit.serializedFormCache;
}
final DataOutputSerializer out = SERIALIZER_CACHE.get();
out.writeInt(BINLOG_SPLIT_FLAG);
out.writeUTF(binlogSplit.splitId());
out.writeUTF("");
writeBinlogPosition(binlogSplit.getStartingOffset(), out);
writeBinlogPosition(binlogSplit.getEndingOffset(), out);
writeFinishedSplitsInfo(binlogSplit.getFinishedSnapshotSplitInfos(), out);
writeTableSchemas(binlogSplit.getTableSchemas(), out);
out.writeInt(binlogSplit.getTotalFinishedSplitSize());
out.writeBoolean(binlogSplit.isSuspended());
final byte[] result = out.getCopyOfBuffer();
out.clear();
// optimization: cache the serialized from, so we avoid the byte work during repeated
// serialization
binlogSplit.serializedFormCache = result;
return result;
}
} | @Test
public void testRepeatedSerializationCache() throws Exception {
final MySqlSplit split =
new MySqlSnapshotSplit(
TableId.parse("test_db.test_table"),
"test_db.test_table-0",
new RowType(
Collections.singletonList(
new RowType.RowField("id", new BigIntType()))),
null,
new Object[] {99L},
null,
new HashMap<>());
final byte[] ser1 = MySqlSplitSerializer.INSTANCE.serialize(split);
final byte[] ser2 = MySqlSplitSerializer.INSTANCE.serialize(split);
assertSame(ser1, ser2);
} |
@VisibleForTesting
static boolean isReaperThreadRunning() {
synchronized (REAPER_THREAD_LOCK) {
return null != REAPER_THREAD && REAPER_THREAD.isAlive();
}
} | @Test
void testReaperThreadSpawnAndStop() throws Exception {
assertThat(SafetyNetCloseableRegistry.isReaperThreadRunning()).isFalse();
try (SafetyNetCloseableRegistry ignored = new SafetyNetCloseableRegistry()) {
assertThat(SafetyNetCloseableRegistry.isReaperThreadRunning()).isTrue();
try (SafetyNetCloseableRegistry ignored2 = new SafetyNetCloseableRegistry()) {
assertThat(SafetyNetCloseableRegistry.isReaperThreadRunning()).isTrue();
}
assertThat(SafetyNetCloseableRegistry.isReaperThreadRunning()).isTrue();
}
assertThat(SafetyNetCloseableRegistry.isReaperThreadRunning()).isFalse();
} |
public static NormalKey createFromSpec(String spec) {
if (spec == null || !spec.contains(":")) {
throw new IllegalArgumentException("Invalid spec format");
}
String[] parts = spec.split(":", 2);
if (parts.length != 2) {
throw new IllegalArgumentException("Invalid spec format");
}
String algorithmName = parts[0];
String base64Key = parts[1];
EncryptionAlgorithmPB algorithm;
if (algorithmName.equalsIgnoreCase("AES_128")) {
algorithm = EncryptionAlgorithmPB.AES_128;
} else {
throw new IllegalArgumentException("Unsupported algorithm: " + algorithmName);
}
byte[] plainKey;
try {
plainKey = Base64.getDecoder().decode(base64Key);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Invalid Base64 key", e);
}
if (plainKey.length != 16) {
throw new IllegalArgumentException("Invalid key length " + plainKey.length * 8);
}
return new NormalKey(algorithm, plainKey, null);
} | @Test
public void testCreateFromSpec_InvalidSpecFormat() {
assertThrows(IllegalArgumentException.class, () -> {
NormalKey.createFromSpec("invalid_spec_format");
});
} |
public abstract boolean passes(T object); | @Test
public void testPasses() {
String keep = "keep";
String fail = "fail";
assertTrue("String contained keep - but passes returned false.", TEST_FILTER.passes(keep));
assertFalse("String contained fail - but passes returned true.", TEST_FILTER.passes(fail));
} |
public static SchemaAndValue parseString(String value) {
if (value == null) {
return NULL_SCHEMA_AND_VALUE;
}
if (value.isEmpty()) {
return new SchemaAndValue(Schema.STRING_SCHEMA, value);
}
ValueParser parser = new ValueParser(new Parser(value));
return parser.parse(false);
} | @Test
public void shouldParseArrayContainingMap() {
SchemaAndValue schemaAndValue = Values.parseString("[{}]");
assertEquals(Type.ARRAY, schemaAndValue.schema().type());
assertEquals(Type.MAP, schemaAndValue.schema().valueSchema().type());
} |
public synchronized <K, V> KStream<K, V> stream(final String topic) {
return stream(Collections.singleton(topic));
} | @Test
public void shouldAllowStreamsFromSameTopic() {
builder.stream("topic");
builder.stream("topic");
assertBuildDoesNotThrow(builder);
} |
@Override
public Collection<V> values() {
return wrapperMap.values();
} | @Test
public void testValues() {
CharSequenceMap<String> map = CharSequenceMap.create();
map.put("key1", "value1");
map.put("key2", "value2");
assertThat(map.values()).containsAll(ImmutableList.of("value1", "value2"));
} |
public static Integer parseRestBindPortFromWebInterfaceUrl(String webInterfaceUrl) {
if (webInterfaceUrl != null) {
final int lastColon = webInterfaceUrl.lastIndexOf(':');
if (lastColon == -1) {
return -1;
} else {
try {
return Integer.parseInt(webInterfaceUrl.substring(lastColon + 1));
} catch (NumberFormatException e) {
return -1;
}
}
} else {
return -1;
}
} | @Test
void testParseRestBindPortFromWebInterfaceUrlWithInvalidPort() {
assertThat(ResourceManagerUtils.parseRestBindPortFromWebInterfaceUrl("localhost:port1"))
.isEqualTo(-1);
} |
@GetMapping("")
@RequiresPermissions("system:manager:list")
public ShenyuAdminResult queryDashboardUsers(final String userName,
@RequestParam @NotNull(message = "currentPage not null") final Integer currentPage,
@RequestParam @NotNull(message = "pageSize not null") final Integer pageSize) {
CommonPager<DashboardUserVO> commonPager = dashboardUserService.listByPage(new DashboardUserQuery(userName,
new PageParameter(currentPage, pageSize)));
if (CollectionUtils.isNotEmpty(commonPager.getDataList())) {
return ShenyuAdminResult.success(ShenyuResultMessage.QUERY_SUCCESS, commonPager);
} else {
return ShenyuAdminResult.error(ShenyuResultMessage.DASHBOARD_QUERY_ERROR);
}
} | @Test
public void queryDashboardUsers() throws Exception {
final CommonPager<DashboardUserVO> commonPager = new CommonPager<>(new PageParameter(),
Collections.singletonList(dashboardUserVO));
given(dashboardUserService.listByPage(any())).willReturn(commonPager);
final String url = "/dashboardUser?currentPage=1&pageSize=12";
mockMvc.perform(get(url))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.QUERY_SUCCESS)))
.andReturn();
final CommonPager<DashboardUserVO> commonPagerError = new CommonPager<>(new PageParameter(),
Collections.emptyList());
given(dashboardUserService.listByPage(any())).willReturn(commonPagerError);
mockMvc.perform(get(url))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.DASHBOARD_QUERY_ERROR)))
.andReturn();
} |
public DirectoryEntry lookUp(
File workingDirectory, JimfsPath path, Set<? super LinkOption> options) throws IOException {
checkNotNull(path);
checkNotNull(options);
DirectoryEntry result = lookUp(workingDirectory, path, options, 0);
if (result == null) {
// an intermediate file in the path did not exist or was not a directory
throw new NoSuchFileException(path.toString());
}
return result;
} | @Test
public void testLookup_absolute_finalSymlink() throws IOException {
assertExists(lookup("/work/four/five"), "/", "foo");
assertExists(lookup("/work/four/six"), "work", "one");
} |
@Override
public T addTimeMillis(K name, long value) {
throw new UnsupportedOperationException("read only");
} | @Test
public void testAddTimeMillis() {
assertThrows(UnsupportedOperationException.class, new Executable() {
@Override
public void execute() {
HEADERS.addTimeMillis("name", 0);
}
});
} |
public int size()
{
return sha1.digest().length;
} | @Test
public void testSize()
{
byte[] buf = new byte[1024];
Arrays.fill(buf, (byte) 0xAA);
ZDigest digest = new ZDigest();
digest.update(buf);
int size = digest.size();
assertThat(size, is(20));
} |
@Override
public String convertDestination(ProtocolConverter converter, Destination d) {
if (d == null) {
return null;
}
ActiveMQDestination activeMQDestination = (ActiveMQDestination)d;
String physicalName = activeMQDestination.getPhysicalName();
String rc = converter.getCreatedTempDestinationName(activeMQDestination);
if( rc!=null ) {
return rc;
}
StringBuilder buffer = new StringBuilder();
if (activeMQDestination.isQueue()) {
if (activeMQDestination.isTemporary()) {
buffer.append("/remote-temp-queue/");
} else {
buffer.append("/queue/");
}
} else {
if (activeMQDestination.isTemporary()) {
buffer.append("/remote-temp-topic/");
} else {
buffer.append("/topic/");
}
}
buffer.append(physicalName);
return buffer.toString();
} | @Test(timeout = 10000)
public void testConvertQueue() throws Exception {
ActiveMQDestination destination = translator.convertDestination(converter, "/queue/test", false);
assertFalse(destination.isComposite());
assertEquals("test", destination.getPhysicalName());
assertEquals(ActiveMQDestination.QUEUE_TYPE, destination.getDestinationType());
} |
public static IOException maybeExtractIOException(
String path,
Throwable thrown,
String message) {
if (thrown == null) {
return null;
}
// walk down the chain of exceptions to find the innermost.
Throwable cause = getInnermostThrowable(thrown.getCause(), thrown);
// see if this is an http channel exception
HttpChannelEOFException channelException =
maybeExtractChannelException(path, message, cause);
if (channelException != null) {
return channelException;
}
// not a channel exception, not an IOE.
if (!(cause instanceof IOException)) {
return null;
}
// the cause can be extracted to an IOE.
// rather than just return it, we try to preserve the stack trace
// of the outer exception.
// as a new instance is created through reflection, the
// class of the returned instance will be that of the innermost,
// unless no suitable constructor is available.
final IOException ioe = (IOException) cause;
return wrapWithInnerIOE(path, message, thrown, ioe);
} | @Test
public void testNoRouteToHostExceptionExtraction() throws Throwable {
intercept(NoRouteToHostException.class, "top",
() -> {
throw maybeExtractIOException("p2",
sdkException("top",
sdkException("middle",
new NoRouteToHostException("bottom"))), null);
});
} |
public FEELFnResult<String> invoke(@ParameterName("from") Object val) {
if ( val == null ) {
return FEELFnResult.ofResult( null );
} else {
return FEELFnResult.ofResult( TypeUtil.formatValue(val, false) );
}
} | @Test
void invokeBigDecimal() {
FunctionTestUtil.assertResult(stringFunction.invoke(BigDecimal.valueOf(10.7)), "10.7");
} |
@Override
public void debug(String msg) {
logger.debug(msg);
} | @Test
void testMarkerDebugWithFormat() {
jobRunrDashboardLogger.debug(marker, "Debug with {}", "format");
verify(slfLogger).debug(marker, "Debug with {}", "format");
} |
@Override
public SearchResult<String> searchWorkflows(
String workflowName, String status, int start, int count, List<String> options) {
return withMetricLogError(
() ->
getSearchIds(
GET_WORKFLOW_INSTANCE_IDS_STATEMENT_TEMPLATE,
workflowName,
status,
start,
count,
options),
"searchWorkflows",
"Failed searching workflows by workflow name {} with status {}",
workflowName,
status);
} | @Test
public void searchWorkflowsTest() {
SearchResult<String> result = dao.searchWorkflows(TEST_WORKFLOW_NAME, "RUNNING", 0, 10, null);
assertEquals(1, result.getTotalHits());
assertEquals(TEST_WORKFLOW_ID, result.getResults().get(0));
result = dao.searchWorkflows(TEST_WORKFLOW_ID, "RUNNING", 1, 10, null);
assertEquals(0, result.getTotalHits());
} |
public static boolean isCaseSensitiveCustomerId(final String customerId) {
return NEW_CUSTOMER_CASE_SENSISTIVE_PATTERN.matcher(customerId).matches();
} | @Test
public void testCaseInsensitiveNewCustomerIds() {
for (String validValue : CustomerIdExamples.VALID_CASE_INSENSISTIVE_NEW_CUSTOMER_IDS) {
assertFalse(validValue + " is case-sensitive customer ID.",
BaseSupportConfig.isCaseSensitiveCustomerId(validValue));
}
} |
public void commit() throws IOException {
if (completed) return;
completed = true;
// move this object out of the scope first before save, or otherwise the save() method will do nothing.
pop();
saveable.save();
} | @Test
public void nestedBulkChange() throws Exception {
Point pt = new Point();
Point pt2 = new Point();
BulkChange bc1 = new BulkChange(pt);
try {
BulkChange bc2 = new BulkChange(pt2);
try {
BulkChange bc3 = new BulkChange(pt);
try {
pt.set(0, 0);
} finally {
bc3.commit();
}
} finally {
bc2.commit();
}
pt.set(0, 0);
} finally {
bc1.commit();
}
assertEquals(1, pt.saveCount);
} |
public void mergeExistingOpenIssue(DefaultIssue raw, DefaultIssue base) {
Preconditions.checkArgument(raw.isFromExternalRuleEngine() != (raw.type() == null), "At this stage issue type should be set for and only for external issues");
Rule rule = ruleRepository.getByKey(raw.ruleKey());
raw.setKey(base.key());
raw.setNew(false);
if (base.isChanged()) {
// In case issue was moved from module or folder to the root project
raw.setChanged(true);
}
setType(raw, rule);
setCleanCodeAttribute(raw, rule);
copyFields(raw, base);
base.changes().forEach(raw::addChange);
if (base.manualSeverity()) {
raw.setManualSeverity(true);
raw.setSeverity(base.severity());
} else {
updater.setPastSeverity(raw, base.severity(), changeContext);
}
// set component/module related fields from base in case current component has been moved
// (in which case base issue belongs to original file and raw issue to component)
raw.setComponentUuid(base.componentUuid());
raw.setComponentKey(base.componentKey());
// fields coming from raw
updater.setPastLine(raw, base.getLine());
updater.setPastLocations(raw, base.getLocations());
updater.setRuleDescriptionContextKey(raw, base.getRuleDescriptionContextKey().orElse(null));
updater.setPastMessage(raw, base.getMessage(), base.getMessageFormattings(), changeContext);
updater.setPastGap(raw, base.gap(), changeContext);
updater.setPastEffort(raw, base.effort(), changeContext);
updater.setCodeVariants(raw, requireNonNull(base.codeVariants()), changeContext);
updater.setImpacts(raw, base.impacts(), changeContext);
updater.setCleanCodeAttribute(raw, base.getCleanCodeAttribute(), changeContext);
updater.setPrioritizedRule(raw, base.isPrioritizedRule(), changeContext);
} | @Test
public void mergeExistingOpenIssue() {
DefaultIssue raw = new DefaultIssue()
.setNew(true)
.setKey("RAW_KEY")
.setRuleKey(XOO_X1)
.setRuleDescriptionContextKey("spring")
.setCleanCodeAttribute(CleanCodeAttribute.IDENTIFIABLE)
.setCodeVariants(Set.of("foo", "bar"))
.addImpact(SoftwareQuality.MAINTAINABILITY, Severity.HIGH)
.setCreationDate(parseDate("2015-10-01"))
.setUpdateDate(parseDate("2015-10-02"))
.setCloseDate(parseDate("2015-10-03"));
DbIssues.Locations issueLocations = DbIssues.Locations.newBuilder()
.setTextRange(DbCommons.TextRange.newBuilder()
.setStartLine(10)
.setEndLine(12)
.build())
.build();
DbIssues.MessageFormattings messageFormattings = DbIssues.MessageFormattings.newBuilder()
.addMessageFormatting(DbIssues.MessageFormatting
.newBuilder()
.setStart(13)
.setEnd(17)
.setType(DbIssues.MessageFormattingType.CODE)
.build())
.build();
DefaultIssue base = new DefaultIssue()
.setKey("BASE_KEY")
.setCreationDate(parseDate("2015-01-01"))
.setUpdateDate(parseDate("2015-01-02"))
.setCleanCodeAttribute(CleanCodeAttribute.FOCUSED)
.setResolution(RESOLUTION_FALSE_POSITIVE)
.setStatus(STATUS_RESOLVED)
.setSeverity(BLOCKER)
.setAssigneeUuid("base assignee uuid")
.setAssigneeLogin("base assignee login")
.setAuthorLogin("base author")
.setTags(newArrayList("base tag"))
.setOnDisabledRule(true)
.setSelectedAt(1000L)
.setLine(10)
.setMessage("message with code")
.setMessageFormattings(messageFormattings)
.setGap(15d)
.setRuleDescriptionContextKey("hibernate")
.setCodeVariants(Set.of("donut"))
.addImpact(SoftwareQuality.RELIABILITY, Severity.LOW)
.setEffort(Duration.create(15L))
.setManualSeverity(false)
.setLocations(issueLocations)
.addChange(new FieldDiffs().setDiff("foo", "bar", "donut"))
.addChange(new FieldDiffs().setDiff("file", "A", "B"));
when(debtCalculator.calculate(raw)).thenReturn(DEFAULT_DURATION);
underTest.mergeExistingOpenIssue(raw, base);
assertThat(raw.isNew()).isFalse();
assertThat(raw.key()).isEqualTo("BASE_KEY");
assertThat(raw.creationDate()).isEqualTo(base.creationDate());
assertThat(raw.updateDate()).isEqualTo(base.updateDate());
assertThat(raw.resolution()).isEqualTo(RESOLUTION_FALSE_POSITIVE);
assertThat(raw.status()).isEqualTo(STATUS_RESOLVED);
assertThat(raw.assignee()).isEqualTo("base assignee uuid");
assertThat(raw.assigneeLogin()).isEqualTo("base assignee login");
assertThat(raw.authorLogin()).isEqualTo("base author");
assertThat(raw.tags()).containsOnly("base tag");
assertThat(raw.codeVariants()).containsOnly("foo", "bar");
assertThat(raw.effort()).isEqualTo(DEFAULT_DURATION);
assertThat(raw.isOnDisabledRule()).isTrue();
assertThat(raw.selectedAt()).isEqualTo(1000L);
assertThat(raw.isChanged()).isFalse();
assertThat(raw.changes()).hasSize(2);
assertThat(raw.changes().get(0).diffs())
.containsOnly(entry("foo", new FieldDiffs.Diff<>("bar", "donut")));
assertThat(raw.changes().get(1).diffs())
.containsOnly(entry("file", new FieldDiffs.Diff<>("A", "B")));
assertThat(raw.impacts())
.containsEntry(SoftwareQuality.MAINTAINABILITY, Severity.HIGH);
verify(updater).setPastSeverity(raw, BLOCKER, issueChangeContext);
verify(updater).setPastLine(raw, 10);
verify(updater).setRuleDescriptionContextKey(raw, "hibernate");
verify(updater).setCodeVariants(raw, Set.of("donut"), issueChangeContext);
verify(updater).setPastMessage(raw, "message with code", messageFormattings, issueChangeContext);
verify(updater).setPastEffort(raw, Duration.create(15L), issueChangeContext);
verify(updater).setPastLocations(raw, issueLocations);
verify(updater).setCleanCodeAttribute(raw, CleanCodeAttribute.FOCUSED, issueChangeContext);
} |
@Override
public CompletionStage<Void> setAsync(K key, V value) {
return cache.putAsync(key, value);
} | @Test
public void testSetAsync() throws Exception {
cache.put(42, "oldValue");
Future<Void> future = adapter.setAsync(42, "newValue").toCompletableFuture();
Void oldValue = future.get();
assertNull(oldValue);
assertEquals("newValue", cache.get(42));
} |
static void process(int maxMessages, MessageFormatter formatter, ConsumerWrapper consumer, PrintStream output, boolean skipMessageOnError) {
while (messageCount < maxMessages || maxMessages == -1) {
ConsumerRecord<byte[], byte[]> msg;
try {
msg = consumer.receive();
} catch (WakeupException we) {
LOG.trace("Caught WakeupException because consumer is shutdown, ignore and terminate.");
// Consumer will be closed
return;
} catch (Throwable t) {
LOG.error("Error processing message, terminating consumer process: ", t);
// Consumer will be closed
return;
}
messageCount += 1;
try {
formatter.writeTo(new ConsumerRecord<>(msg.topic(), msg.partition(), msg.offset(), msg.timestamp(), msg.timestampType(),
0, 0, msg.key(), msg.value(), msg.headers(), Optional.empty()), output);
} catch (Throwable t) {
if (skipMessageOnError) {
LOG.error("Error processing message, skipping this message: ", t);
} else {
// Consumer will be closed
throw t;
}
}
if (checkErr(output)) {
// Consumer will be closed
return;
}
}
} | @Test
public void shouldResetUnConsumedOffsetsBeforeExit() throws IOException {
String topic = "test";
int maxMessages = 123;
int totalMessages = 700;
long startOffset = 0L;
MockConsumer<byte[], byte[]> mockConsumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
TopicPartition tp1 = new TopicPartition(topic, 0);
TopicPartition tp2 = new TopicPartition(topic, 1);
String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", topic,
"--timeout-ms", "1000"
};
ConsoleConsumer.ConsumerWrapper consumer = new ConsoleConsumer.ConsumerWrapper(
new ConsoleConsumerOptions(args),
mockConsumer
);
mockConsumer.rebalance(Arrays.asList(tp1, tp2));
Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(tp1, startOffset);
offsets.put(tp2, startOffset);
mockConsumer.updateBeginningOffsets(offsets);
for (int i = 0; i < totalMessages; i++) {
// add all records, each partition should have half of `totalMessages`
mockConsumer.addRecord(new ConsumerRecord<>(topic, i % 2, i / 2, "key".getBytes(), "value".getBytes()));
}
MessageFormatter formatter = mock(MessageFormatter.class);
ConsoleConsumer.process(maxMessages, formatter, consumer, System.out, false);
assertEquals(totalMessages, mockConsumer.position(tp1) + mockConsumer.position(tp2));
consumer.resetUnconsumedOffsets();
assertEquals(maxMessages, mockConsumer.position(tp1) + mockConsumer.position(tp2));
verify(formatter, times(maxMessages)).writeTo(any(), any());
consumer.cleanup();
} |
@Override
public ChannelFuture writeRstStream(
ChannelHandlerContext ctx, int streamId, long errorCode, ChannelPromise promise) {
ChannelPromise newPromise = handleOutstandingControlFrames(ctx, promise);
if (newPromise == null) {
return promise;
}
return super.writeRstStream(ctx, streamId, errorCode, newPromise);
} | @Test
public void testLimitRst() {
assertFalse(encoder.writeRstStream(ctx, 1, CANCEL.code(), newPromise()).isDone());
// The second write is always marked as success by our mock, which means it will also not be queued and so
// not count to the number of queued frames.
assertTrue(encoder.writeRstStream(ctx, 1, CANCEL.code(), newPromise()).isSuccess());
assertFalse(encoder.writeRstStream(ctx, 1, CANCEL.code(), newPromise()).isDone());
verifyFlushAndClose(0, false);
assertFalse(encoder.writeRstStream(ctx, 1, CANCEL.code(), newPromise()).isDone());
assertFalse(encoder.writeRstStream(ctx, 1, CANCEL.code(), newPromise()).isDone());
verifyFlushAndClose(1, true);
} |
@Override
public void shutdown(long awaitTerminateMillis) {
this.stopped = true;
this.scheduledExecutorService.shutdown();
ThreadUtils.shutdownGracefully(this.consumeExecutor, awaitTerminateMillis, TimeUnit.MILLISECONDS);
if (MessageModel.CLUSTERING.equals(this.defaultMQPushConsumerImpl.messageModel())) {
this.unlockAllMessageQueues();
}
} | @Test
public void testShutdown() throws IllegalAccessException {
popService.shutdown(3000L);
Field scheduledExecutorServiceField = FieldUtils.getDeclaredField(popService.getClass(), "scheduledExecutorService", true);
Field consumeExecutorField = FieldUtils.getDeclaredField(popService.getClass(), "consumeExecutor", true);
ScheduledExecutorService scheduledExecutorService = (ScheduledExecutorService) scheduledExecutorServiceField.get(popService);
ThreadPoolExecutor consumeExecutor = (ThreadPoolExecutor) consumeExecutorField.get(popService);
assertTrue(scheduledExecutorService.isShutdown());
assertTrue(scheduledExecutorService.isTerminated());
assertTrue(consumeExecutor.isShutdown());
assertTrue(consumeExecutor.isTerminated());
} |
public synchronized void insertJobEntryDatabase( ObjectId id_job, ObjectId id_jobentry, ObjectId id_database ) throws KettleException {
// First check if the relationship is already there.
// There is no need to store it twice!
RowMetaAndData check = getJobEntryDatabase( id_jobentry );
if ( check.getInteger( 0 ) == null ) {
RowMetaAndData table = new RowMetaAndData();
table.addValue( new ValueMetaInteger(
KettleDatabaseRepository.FIELD_JOBENTRY_DATABASE_ID_JOB ), id_job );
table.addValue(
new ValueMetaInteger(
KettleDatabaseRepository.FIELD_JOBENTRY_DATABASE_ID_JOBENTRY ),
id_jobentry );
table.addValue(
new ValueMetaInteger(
KettleDatabaseRepository.FIELD_JOBENTRY_DATABASE_ID_DATABASE ),
id_database );
connectionDelegate.insertTableRow( KettleDatabaseRepository.TABLE_R_JOBENTRY_DATABASE, table );
}
} | @Test
public void testInsertJobEntryDatabase() throws KettleException {
doReturn( getNullIntegerRow() ).when( repo.connectionDelegate ).getOneRow(
anyString(), anyString(), any( ObjectId.class ) );
ArgumentCaptor<String> argumentTableName = ArgumentCaptor.forClass( String.class );
ArgumentCaptor<RowMetaAndData> argumentTableData = ArgumentCaptor.forClass( RowMetaAndData.class );
doNothing().when( repo.connectionDelegate ).insertTableRow( argumentTableName.capture(), argumentTableData.capture() );
repo.insertJobEntryDatabase( new LongObjectId( 234 ), new LongObjectId( 345 ), new LongObjectId( 456 ) );
RowMetaAndData insertRecord = argumentTableData.getValue();
assertEquals( KettleDatabaseRepository.TABLE_R_JOBENTRY_DATABASE, argumentTableName.getValue() );
assertEquals( 3, insertRecord.size() );
assertEquals( ValueMetaInterface.TYPE_INTEGER, insertRecord.getValueMeta( 0 ).getType() );
assertEquals( KettleDatabaseRepository.FIELD_JOBENTRY_DATABASE_ID_JOB, insertRecord.getValueMeta( 0 ).getName() );
assertEquals( Long.valueOf( 234 ), insertRecord.getInteger( 0 ) );
assertEquals( ValueMetaInterface.TYPE_INTEGER, insertRecord.getValueMeta( 1 ).getType() );
assertEquals( KettleDatabaseRepository.FIELD_JOBENTRY_DATABASE_ID_JOBENTRY, insertRecord.getValueMeta( 1 ).getName() );
assertEquals( Long.valueOf( 345 ), insertRecord.getInteger( 1 ) );
assertEquals( ValueMetaInterface.TYPE_INTEGER, insertRecord.getValueMeta( 2 ).getType() );
assertEquals( KettleDatabaseRepository.FIELD_JOBENTRY_DATABASE_ID_DATABASE, insertRecord.getValueMeta( 2 ).getName() );
assertEquals( Long.valueOf( 456 ), insertRecord.getInteger( 2 ) );
} |
public static String[] parseUri(String uri) {
return doParseUri(uri, false);
} | @Test
public void testParseEmptyQuery() {
String[] out1 = CamelURIParser.parseUri("file:relative");
assertEquals("file", out1[0]);
assertEquals("relative", out1[1]);
assertNull(out1[2]);
String[] out2 = CamelURIParser.parseUri("file:relative?");
assertEquals("file", out2[0]);
assertEquals("relative", out2[1]);
assertNull(out2[2]);
} |
@Override
public PageResult<TenantDO> getTenantPage(TenantPageReqVO pageReqVO) {
return tenantMapper.selectPage(pageReqVO);
} | @Test
public void testGetTenantPage() {
// mock 数据
TenantDO dbTenant = randomPojo(TenantDO.class, o -> { // 等会查询到
o.setName("芋道源码");
o.setContactName("芋艿");
o.setContactMobile("15601691300");
o.setStatus(CommonStatusEnum.ENABLE.getStatus());
o.setCreateTime(buildTime(2020, 12, 12));
});
tenantMapper.insert(dbTenant);
// 测试 name 不匹配
tenantMapper.insert(cloneIgnoreId(dbTenant, o -> o.setName(randomString())));
// 测试 contactName 不匹配
tenantMapper.insert(cloneIgnoreId(dbTenant, o -> o.setContactName(randomString())));
// 测试 contactMobile 不匹配
tenantMapper.insert(cloneIgnoreId(dbTenant, o -> o.setContactMobile(randomString())));
// 测试 status 不匹配
tenantMapper.insert(cloneIgnoreId(dbTenant, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus())));
// 测试 createTime 不匹配
tenantMapper.insert(cloneIgnoreId(dbTenant, o -> o.setCreateTime(buildTime(2021, 12, 12))));
// 准备参数
TenantPageReqVO reqVO = new TenantPageReqVO();
reqVO.setName("芋道");
reqVO.setContactName("艿");
reqVO.setContactMobile("1560");
reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus());
reqVO.setCreateTime(buildBetweenTime(2020, 12, 1, 2020, 12, 24));
// 调用
PageResult<TenantDO> pageResult = tenantService.getTenantPage(reqVO);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbTenant, pageResult.getList().get(0));
} |
public void removeTemplateNamed(CaseInsensitiveString name) {
PipelineTemplateConfig toBeRemoved = null;
for (PipelineTemplateConfig templateConfig : this) {
if (templateConfig.matches(name)) {
toBeRemoved = templateConfig;
}
}
this.remove(toBeRemoved);
} | @Test
public void shouldIgnoreTryingToRemoveNonExistentTemplate() {
TemplatesConfig templates = new TemplatesConfig(template("template1"), template("template2"));
templates.removeTemplateNamed(new CaseInsensitiveString("sachin"));
assertThat(templates.size(), is(2));
} |
@Override
public void deleteDataSourceConfig(Long id) {
// 校验存在
validateDataSourceConfigExists(id);
// 删除
dataSourceConfigMapper.deleteById(id);
} | @Test
public void testDeleteDataSourceConfig_notExists() {
// 准备参数
Long id = randomLongId();
// 调用, 并断言异常
assertServiceException(() -> dataSourceConfigService.deleteDataSourceConfig(id), DATA_SOURCE_CONFIG_NOT_EXISTS);
} |
@Override
public ServerLoaderInfoResponse handle(ServerLoaderInfoRequest request, RequestMeta meta) throws NacosException {
ServerLoaderInfoResponse serverLoaderInfoResponse = new ServerLoaderInfoResponse();
serverLoaderInfoResponse.putMetricsValue("conCount", String.valueOf(connectionManager.currentClientsCount()));
Map<String, String> filter = new HashMap<>(2);
filter.put(RemoteConstants.LABEL_SOURCE, RemoteConstants.LABEL_SOURCE_SDK);
serverLoaderInfoResponse
.putMetricsValue("sdkConCount", String.valueOf(connectionManager.currentClientsCount(filter)));
serverLoaderInfoResponse.putMetricsValue("load", String.valueOf(EnvUtil.getLoad()));
serverLoaderInfoResponse.putMetricsValue("cpu", String.valueOf(EnvUtil.getCpu()));
return serverLoaderInfoResponse;
} | @Test
void testHandle() {
Mockito.when(connectionManager.currentClientsCount()).thenReturn(1);
Mockito.when(connectionManager.currentClientsCount(Mockito.any())).thenReturn(1);
ServerLoaderInfoRequest request = new ServerLoaderInfoRequest();
RequestMeta meta = new RequestMeta();
try {
ServerLoaderInfoResponse response = handler.handle(request, meta);
String sdkConCount = response.getMetricsValue("sdkConCount");
assertEquals("1", sdkConCount);
} catch (NacosException e) {
e.printStackTrace();
fail(e.getMessage());
}
} |
@Override
public boolean hasFooter() {
switch (super.getVersion()) {
case DEFAULT_VERSION:
return false;
case 1:
return true;
default:
return false;
}
} | @Test
public void testHasFooter() {
assertFalse(verDefault.hasFooter());
assertTrue(verCurrent.hasFooter());
HoodieLogFormatVersion verNew =
new HoodieLogFormatVersion(HoodieLogFormat.CURRENT_VERSION + 1);
assertFalse(verNew.hasFooter());
} |
@Override
public void fulfillFinishedTaskStatus(Map<OperatorID, OperatorState> operatorStates) {
if (!mayHaveFinishedTasks) {
return;
}
Map<JobVertexID, ExecutionJobVertex> partlyFinishedVertex = new HashMap<>();
for (Execution task : finishedTasks) {
JobVertexID jobVertexId = task.getVertex().getJobvertexId();
if (!fullyFinishedOrFinishedOnRestoreVertices.containsKey(jobVertexId)) {
partlyFinishedVertex.put(jobVertexId, task.getVertex().getJobVertex());
}
}
checkNoPartlyFinishedVertexUsedUnionListState(partlyFinishedVertex, operatorStates);
checkNoPartlyOperatorsFinishedVertexUsedUnionListState(
partlyFinishedVertex, operatorStates);
fulfillFullyFinishedOrFinishedOnRestoreOperatorStates(operatorStates);
fulfillSubtaskStateForPartiallyFinishedOperators(operatorStates);
} | @Test
void testFulfillFullyFinishedStatesWithCoordinator() throws Exception {
JobVertexID finishedJobVertexID = new JobVertexID();
OperatorID finishedOperatorID = new OperatorID();
ExecutionGraph executionGraph =
new CheckpointCoordinatorTestingUtils.CheckpointExecutionGraphBuilder()
.addJobVertex(
finishedJobVertexID,
1,
256,
Collections.singletonList(
OperatorIDPair.generatedIDOnly(finishedOperatorID)),
true)
.build(EXECUTOR_EXTENSION.getExecutor());
executionGraph
.getJobVertex(finishedJobVertexID)
.getTaskVertices()[0]
.getCurrentExecutionAttempt()
.markFinished();
CheckpointPlan checkpointPlan = createCheckpointPlan(executionGraph);
Map<OperatorID, OperatorState> operatorStates = new HashMap<>();
OperatorState operatorState = new OperatorState(finishedOperatorID, 1, 256);
operatorState.setCoordinatorState(new TestingStreamStateHandle());
operatorStates.put(finishedOperatorID, operatorState);
checkpointPlan.fulfillFinishedTaskStatus(operatorStates);
assertThat(operatorStates).hasSize(1);
assertThat(operatorStates.get(finishedOperatorID).isFullyFinished()).isTrue();
} |
public void registerUrl( String urlString ) {
if ( urlString == null || addedAllClusters == true ) {
return; //We got no url or already added all clusters so nothing to do.
}
if ( urlString.startsWith( VARIABLE_START ) ) {
addAllClusters();
}
Pattern r = Pattern.compile( URL_PATTERN );
Matcher m = r.matcher( urlString );
if ( m.find() ) {
String protocol = m.group( PARSE_URL_SCHEME );
String clusterName = m.group( PARSE_URL_AUTHORITY );
if ( "hc".equals( protocol ) ) {
if ( clusterName.startsWith( VARIABLE_START ) ) {
addAllClusters();
}
addClusterToMeta( clusterName );
}
}
} | @Test
public void testRegisterUrlNotNc() throws Exception {
namedClusterEmbedManager.registerUrl( "hdfs://" + CLUSTER1_NAME + "/dir1/dir2" );
verify( mockMetaStoreFactory, never() ).saveElement( any() );
} |
public static List<String> buildList(Object propertyValue, String listSeparator)
{
List<String> valueList = new ArrayList<>();
if (propertyValue != null)
{
if (propertyValue instanceof List<?>)
{
@SuppressWarnings("unchecked")
List<String> list = (List<String>)propertyValue;
valueList.addAll(list);
}
else
{
// list was expressed as a String in the config
String propertyValueString = (String)propertyValue;
if (listSeparator == null)
{
throw new IllegalArgumentException("The separator cannot be null!");
}
for (String value: propertyValueString.split(listSeparator))
{
if (!value.isEmpty())
{
valueList.add(value.trim());
}
}
}
}
return valueList;
} | @Test
public void testStringObject()
{
List<String> actualList = ConfigValueExtractor.buildList("foo, bar, baz", ",");
List<String> expectedList = Arrays.asList(new String[]{"foo", "bar", "baz"});
Assert.assertEquals(expectedList, actualList);
} |
public static List<SourceToTargetMapping> getCurrentMappings( List<String> sourceFields, List<String> targetFields, List<MappingValueRename> mappingValues ) {
List<SourceToTargetMapping> sourceToTargetMapping = new ArrayList<>( );
if ( sourceFields == null || targetFields == null || mappingValues == null ) {
return sourceToTargetMapping;
}
if ( !mappingValues.isEmpty() ) {
for ( MappingValueRename mappingValue : mappingValues ) {
String source = mappingValue.getSourceValueName();
String target = mappingValue.getTargetValueName();
int sourceIndex = sourceFields.indexOf( source );
int targetIndex = targetFields.indexOf( target );
sourceToTargetMapping.add( new SourceToTargetMapping( sourceIndex, targetIndex ) );
}
}
return sourceToTargetMapping;
} | @Test
public void getCurrentMapping() {
List<SourceToTargetMapping> currentMapping = MappingUtil.getCurrentMappings( sourceFields, targetFields, mappingValues );
assertEquals( 2, currentMapping.size() );
assertEquals( currentMapping.get( 0 ).getSourcePosition(), sourceFields.indexOf( "source1" ) );
assertEquals( currentMapping.get( 0 ).getTargetPosition(), targetFields.indexOf( "target2" ) );
assertEquals( currentMapping.get( 1 ).getSourcePosition(), sourceFields.indexOf( "source3" ) );
assertEquals( currentMapping.get( 1 ).getTargetPosition(), targetFields.indexOf( "target1" ) );
} |
public static boolean isAwsHostname(final String hostname) {
return isAwsHostname(hostname, true);
} | @Test
public void testAwsHostnames() {
assertFalse(S3Session.isAwsHostname("play.min.io"));
assertTrue(S3Session.isAwsHostname("test-eu-west-3-cyberduck.s3.amazonaws.com"));
assertTrue(S3Session.isAwsHostname("s3.dualstack.eu-west-3.amazonaws.com"));
assertTrue(S3Session.isAwsHostname("test-eu-west-3-cyberduck.s3.dualstack.eu-west-3.amazonaws.com"));
assertTrue(S3Session.isAwsHostname("s3.amazonaws.com"));
assertTrue(S3Session.isAwsHostname("s3.amazonaws.com.cn"));
assertFalse(S3Session.isAwsHostname("s3.amazonaws.com.cn", false));
assertTrue(S3Session.isAwsHostname("s3.cn-north-1.amazonaws.com.cn"));
assertFalse(S3Session.isAwsHostname("s3.cn-north-1.amazonaws.com.cn", false));
assertTrue(S3Session.isAwsHostname("vpce-0971cacd1f2.s3.eu-west-1.vpce.amazonaws.com"));
} |
@Override
public void unlock() {
unlockInner(locks);
} | @Test
public void testLockSuccess2() throws IOException, InterruptedException {
RedisProcess redis1 = redisTestMultilockInstance();
RedisProcess redis2 = redisTestMultilockInstance();
RedissonClient client1 = createClient(redis1.getRedisServerAddressAndPort());
RedissonClient client2 = createClient(redis2.getRedisServerAddressAndPort());
RLock lock1 = client1.getLock("lock1");
RLock lock2 = client1.getLock("lock2");
RLock lock3 = client2.getLock("lock3");
Thread t1 = new Thread() {
public void run() {
lock2.lock();
};
};
t1.start();
t1.join();
RedissonMultiLock lock = new RedissonRedLock(lock1, lock2, lock3);
assertThat(lock.tryLock(500, 5000, TimeUnit.MILLISECONDS)).isTrue();
Thread.sleep(3000);
lock.unlock();
client1.shutdown();
client2.shutdown();
assertThat(redis1.stop()).isEqualTo(0);
assertThat(redis2.stop()).isEqualTo(0);
} |
public ElasticAgentInformationDTO getElasticAgentInformationDTO(ElasticAgentInformation elasticAgentInformation) {
return elasticAgentInformationConverterV5.toDTO(elasticAgentInformation);
} | @Test
public void shouldGetRequestBodyForMigrateCall_withNewConfig() throws CryptoException {
ConfigurationProperty property1 = new ConfigurationProperty(new ConfigurationKey("key"), new ConfigurationValue("value"));
ConfigurationProperty property2 = new ConfigurationProperty(new ConfigurationKey("key2"), new EncryptedConfigurationValue(new GoCipher().encrypt("password")));
Configuration configuration = new Configuration();
configuration.add(property1);
configuration.add(property2);
Map<String, String> pluginSettings = configuration.getConfigurationAsMap(true);
List<ClusterProfile> clusterProfiles = new ArrayList<>();
clusterProfiles.add(new ClusterProfile("cluster_profile_id", "plugin_id", new ConfigurationProperty(new ConfigurationKey("some_key"), new ConfigurationValue("some_value")), new ConfigurationProperty(new ConfigurationKey("some_key2"), new EncryptedConfigurationValue(new GoCipher().encrypt("some_value2")))));
List<ElasticProfile> elasticAgentProfiles = new ArrayList<>();
elasticAgentProfiles.add(new ElasticProfile("profile_id", "cluster_profile_id", new ConfigurationProperty(new ConfigurationKey("some_key"), new ConfigurationValue("some_value")), new ConfigurationProperty(new ConfigurationKey("some_key2"), new EncryptedConfigurationValue(new GoCipher().encrypt("some_value2")))));
ElasticAgentInformation elasticAgentInformation = new ElasticAgentInformation(pluginSettings, clusterProfiles, elasticAgentProfiles);
ElasticAgentInformationDTO elasticAgentInformationDTO = new ElasticAgentExtensionConverterV5().getElasticAgentInformationDTO(elasticAgentInformation);
String requestBody = elasticAgentInformationDTO.toJSON().toString();
String expectedRequestBody = "{" +
" \"plugin_settings\":{" +
" \"key2\":\"password\", " +
" \"key\":\"value\"" +
" }," +
" \"cluster_profiles\":[" +
" {" +
" \"id\":\"cluster_profile_id\"," +
" \"plugin_id\":\"plugin_id\"," +
" \"properties\":{" +
" \"some_key\":\"some_value\"," +
" \"some_key2\":\"some_value2\"" +
" }" +
" }" +
" ]," +
" \"elastic_agent_profiles\":[" +
" {" +
" \"id\":\"profile_id\"," +
" \"plugin_id\":\"plugin_id\"," +
" \"cluster_profile_id\":\"cluster_profile_id\"," +
" \"properties\":{" +
" \"some_key\":\"some_value\", " +
" \"some_key2\":\"some_value2\"" +
" }" +
" }" +
" ]" +
"}\n";
assertThatJson(expectedRequestBody).isEqualTo(requestBody);
} |
public String getQueueName(final Exchange exchange) {
return getOption(QueueExchangeHeaders::getQueueNameFromHeaders, configuration::getQueueName, exchange);
} | @Test
public void testIfCorrectOptionsReturnedCorrectly() {
final QueueConfiguration configuration = new QueueConfiguration();
// first case: when exchange is set
final Exchange exchange = new DefaultExchange(context);
final QueueConfigurationOptionsProxy configurationOptionsProxy = new QueueConfigurationOptionsProxy(configuration);
exchange.getIn().setHeader(QueueConstants.QUEUE_NAME, "testQueueExchange");
configuration.setQueueName("testQueueConfig");
assertEquals("testQueueExchange", configurationOptionsProxy.getQueueName(exchange));
// second class: exchange is empty
exchange.getIn().setHeader(QueueConstants.QUEUE_NAME, null);
assertEquals("testQueueConfig", configurationOptionsProxy.getQueueName(exchange));
// third class: if exchange is null
assertEquals("testQueueConfig", configurationOptionsProxy.getQueueName(null));
// fourth class: if no option at all
configuration.setQueueName(null);
assertNull(configurationOptionsProxy.getQueueName(exchange));
} |
public static <T> T[] checkNonEmpty(T[] array, String name) {
//No String concatenation for check
if (checkNotNull(array, name).length == 0) {
throw new IllegalArgumentException("Param '" + name + "' must not be empty");
}
return array;
} | @Test
public void testCheckNonEmptyTString() {
Exception actualEx = null;
try {
ObjectUtil.checkNonEmpty((Object[]) NULL_OBJECT, NULL_NAME);
} catch (Exception e) {
actualEx = e;
}
assertNotNull(actualEx, TEST_RESULT_NULLEX_OK);
assertTrue(actualEx instanceof NullPointerException, TEST_RESULT_EXTYPE_NOK);
actualEx = null;
try {
ObjectUtil.checkNonEmpty((Object[]) NON_NULL_FILLED_OBJECT_ARRAY, NON_NULL_NAME);
} catch (Exception e) {
actualEx = e;
}
assertNull(actualEx, TEST_RESULT_NULLEX_NOK);
actualEx = null;
try {
ObjectUtil.checkNonEmpty((Object[]) NON_NULL_EMPTY_OBJECT_ARRAY, NON_NULL_EMPTY_NAME);
} catch (Exception e) {
actualEx = e;
}
assertNotNull(actualEx, TEST_RESULT_NULLEX_OK);
assertTrue(actualEx instanceof IllegalArgumentException, TEST_RESULT_EXTYPE_NOK);
} |
public static Set<X509Certificate> filterValid( X509Certificate... certificates )
{
final Set<X509Certificate> results = new HashSet<>();
if (certificates != null)
{
for ( X509Certificate certificate : certificates )
{
if ( certificate == null )
{
continue;
}
try
{
certificate.checkValidity();
}
catch ( CertificateExpiredException | CertificateNotYetValidException e )
{
// Not yet or no longer valid. Don't include in result.
continue;
}
results.add( certificate );
}
}
return results;
} | @Test
public void testFilterValidWithMixOfValidityAndDuplicates() throws Exception
{
// Setup fixture.
final X509Certificate validA = KeystoreTestUtils.generateValidCertificate().getCertificate();
final X509Certificate validB = KeystoreTestUtils.generateValidCertificate().getCertificate();
final X509Certificate invalid = KeystoreTestUtils.generateExpiredCertificate().getCertificate();
final Collection<X509Certificate> input = new ArrayList<>();
input.add( validA );
input.add( validA );
input.add( validB );
input.add( invalid );
// Execute system under test.
final Collection<X509Certificate> result = CertificateUtils.filterValid( input );
// Verify results.
assertEquals( 2, result.size() );
assertTrue( result.contains( validA ) );
assertTrue( result.contains( validB ) );
} |
public void isAbsent() {
if (actual == null) {
failWithActual(simpleFact("expected absent optional"));
} else if (actual.isPresent()) {
failWithoutActual(
simpleFact("expected to be absent"), fact("but was present with value", actual.get()));
}
} | @Test
public void isAbsent() {
assertThat(Optional.absent()).isAbsent();
} |
@Override
public boolean match(Message msg, StreamRule rule) {
Double msgVal = getDouble(msg.getField(rule.getField()));
if (msgVal == null) {
return false;
}
Double ruleVal = getDouble(rule.getValue());
if (ruleVal == null) {
return false;
}
return rule.getInverted() ^ (msgVal > ruleVal);
} | @Test
public void testMissedDoubleMatch() {
StreamRule rule = getSampleRule();
rule.setValue("25");
Message msg = getSampleMessage();
msg.addField("something", "12.4");
StreamRuleMatcher matcher = getMatcher(rule);
assertFalse(matcher.match(msg, rule));
} |
public static boolean matches(MetricsFilter filter, MetricKey key) {
if (filter == null) {
return true;
}
@Nullable String stepName = key.stepName();
if (stepName == null) {
if (!filter.steps().isEmpty()) {
// The filter specifies steps, but the metric is not associated with a step.
return false;
}
} else if (!matchesScope(stepName, filter.steps())) {
// The filter specifies steps that differ from the metric's step
return false;
}
// The filter's steps match the metric's step.
return matchesName(key.metricName(), filter.names());
} | @Test
public void testMatchCompositeStepNameFilters() {
// MetricsFilter with a Class-namespace + name filter + step filter.
// Successful match.
assertTrue(
MetricFiltering.matches(
MetricsFilter.builder()
.addNameFilter(MetricNameFilter.named(MetricFilteringTest.class, "myMetricName"))
.addStep("myStep")
.build(),
MetricKey.create(
"myBigStep/myStep", MetricName.named(MetricFilteringTest.class, "myMetricName"))));
// Unsuccessful match.
assertFalse(
MetricFiltering.matches(
MetricsFilter.builder()
.addNameFilter(MetricNameFilter.named(MetricFilteringTest.class, "myMetricName"))
.addStep("myOtherStep")
.build(),
MetricKey.create(
"myOtherStepNoMatch/myStep",
MetricName.named(MetricFilteringTest.class, "myMetricName"))));
} |
public void onStatsPersistMsg(StatsPersistMsg msg) {
if (msg.isEmpty()) {
return;
}
systemContext.getEventService().saveAsync(StatisticsEvent.builder()
.tenantId(msg.getTenantId())
.entityId(msg.getEntityId().getId())
.serviceId(systemContext.getServiceInfoProvider().getServiceId())
.messagesProcessed(msg.getMessagesProcessed())
.errorsOccurred(msg.getErrorsOccurred())
.build()
);
} | @Test
void givenNonEmptyStatMessage_whenOnStatsPersistMsg_thenNoAction() {
statsActor.onStatsPersistMsg(new StatsPersistMsg(0, 1, TenantId.SYS_TENANT_ID, TenantId.SYS_TENANT_ID));
verify(eventService, times(1)).saveAsync(any(Event.class));
statsActor.onStatsPersistMsg(new StatsPersistMsg(1, 0, TenantId.SYS_TENANT_ID, TenantId.SYS_TENANT_ID));
verify(eventService, times(2)).saveAsync(any(Event.class));
statsActor.onStatsPersistMsg(new StatsPersistMsg(1, 1, TenantId.SYS_TENANT_ID, TenantId.SYS_TENANT_ID));
verify(eventService, times(3)).saveAsync(any(Event.class));
} |
public RequestHandler getByRequestType(String requestType) {
return registryHandlers.get(requestType);
} | @Test
void testGetByRequestType() {
assertNotNull(registry.getByRequestType(HealthCheckRequest.class.getSimpleName()));
} |
public boolean consume(TokenQueue tokenQueue, List<Statement> statements) {
Token nextToken = tokenQueue.peek();
while (nextToken != null) {
boolean channelConsumed = false;
for (StatementChannel channel : channels) {
if (channel.consume(tokenQueue, statements)) {
channelConsumed = true;
break;
}
}
if (!channelConsumed) {
throw new IllegalStateException("None of the statement channel has been able to consume token: " + nextToken);
}
nextToken = tokenQueue.peek();
}
return true;
} | @Test
public void shouldConsume() {
TokenMatcher tokenMatcher = mock(TokenMatcher.class);
when(tokenMatcher.matchToken(any(TokenQueue.class), anyList())).thenReturn(true);
StatementChannel channel = StatementChannel.create(tokenMatcher);
StatementChannelDisptacher dispatcher = new StatementChannelDisptacher(asList(channel));
TokenQueue tokenQueue = mock(TokenQueue.class);
when(tokenQueue.peek()).thenReturn(new Token("a", 1, 0)).thenReturn(null);
List<Statement> statements = mock(List.class);
assertThat(dispatcher.consume(tokenQueue, statements), is(true));
verify(tokenQueue, times(2)).peek();
verifyNoMoreInteractions(tokenQueue);
verifyNoMoreInteractions(statements);
} |
public T multiply(BigDecimal multiplier) {
return create(value.multiply(multiplier));
} | @Test
void testMultiply() {
final Resource resource = new TestResource(0.3);
final BigDecimal by = BigDecimal.valueOf(0.2);
assertTestResourceValueEquals(0.06, resource.multiply(by));
} |
@Override
public HttpResponseOutputStream<File> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final String location = new StoregateWriteFeature(session, fileid).start(file, status);
final MultipartOutputStream proxy = new MultipartOutputStream(location, file, status);
return new HttpResponseOutputStream<File>(new MemorySegementingOutputStream(proxy,
new HostPreferences(session.getHost()).getInteger("storegate.upload.multipart.chunksize")),
new StoregateAttributesFinderFeature(session, fileid), status) {
@Override
public File getStatus() {
return proxy.getResult();
}
};
} | @Test
public void testWriteSingleByte() throws Exception {
final StoregateIdProvider nodeid = new StoregateIdProvider(session);
final Path room = new StoregateDirectoryFeature(session, nodeid).mkdir(
new Path(String.format("/My files/%s", new AlphanumericRandomStringService().random()),
EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final byte[] content = RandomUtils.nextBytes(1);
final TransferStatus status = new TransferStatus().withLength(content.length);
final Path test = new Path(room, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
final StoregateMultipartWriteFeature writer = new StoregateMultipartWriteFeature(session, nodeid);
final HttpResponseOutputStream<File> out = writer.write(test, status, new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
final String version = out.getStatus().getId();
assertNotNull(version);
assertTrue(new DefaultFindFeature(session).find(test));
new StoregateDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
static public boolean areOnSameFileStore(File a, File b) throws RolloverFailure {
if (!a.exists()) {
throw new IllegalArgumentException("File [" + a + "] does not exist.");
}
if (!b.exists()) {
throw new IllegalArgumentException("File [" + b + "] does not exist.");
}
// Implements the following by reflection
try {
Path pathA = a.toPath();
Path pathB = b.toPath();
FileStore fileStoreA = Files.getFileStore(pathA);
FileStore fileStoreB = Files.getFileStore(pathB);
return fileStoreA.equals(fileStoreB);
} catch (Exception e) {
throw new RolloverFailure("Failed to check file store equality for [" + a + "] and [" + b + "]", e);
}
} | @Test
public void filesOnSameFolderShouldBeOnTheSameFileStore() throws RolloverFailure, IOException {
if (!EnvUtil.isJDK7OrHigher())
return;
File parent = new File(pathPrefix);
File file = new File(pathPrefix + "filesOnSameFolderShouldBeOnTheSameFileStore");
FileUtil.createMissingParentDirectories(file);
file.createNewFile();
assertTrue(FileStoreUtil.areOnSameFileStore(parent, file));
} |
@Override
public Optional<RawFlag> fetch(FlagId id, FetchVector vector) {
return sources.stream()
.map(source -> source.fetch(id, vector))
.filter(Optional::isPresent)
.map(Optional::get)
.findFirst();
} | @Test
void test() {
FlagSource source1 = mock(FlagSource.class);
FlagSource source2 = mock(FlagSource.class);
OrderedFlagSource orderedSource = new OrderedFlagSource(source1, source2);
FlagId id = new FlagId("id");
FetchVector vector = new FetchVector();
when(source1.fetch(any(), any())).thenReturn(Optional.empty());
when(source2.fetch(any(), any())).thenReturn(Optional.empty());
assertFalse(orderedSource.fetch(id, vector).isPresent());
verify(source1, times(1)).fetch(any(), any());
verify(source2, times(1)).fetch(any(), any());
RawFlag rawFlag = mock(RawFlag.class);
when(source1.fetch(any(), any())).thenReturn(Optional.empty());
when(source2.fetch(any(), any())).thenReturn(Optional.of(rawFlag));
assertEquals(orderedSource.fetch(id, vector), Optional.of(rawFlag));
verify(source1, times(2)).fetch(any(), any());
verify(source2, times(2)).fetch(any(), any());
when(source1.fetch(any(), any())).thenReturn(Optional.of(rawFlag));
when(source2.fetch(any(), any())).thenReturn(Optional.empty());
assertEquals(orderedSource.fetch(id, vector), Optional.of(rawFlag));
verify(source1, times(3)).fetch(any(), any());
// Not invoked as source1 provided raw flag
verify(source2, times(2)).fetch(any(), any());
} |
public void addTimeline(TimelineEvent event) {
timeline.add(event);
} | @Test
public void testAddTimeline() {
WorkflowRuntimeSummary summary = new WorkflowRuntimeSummary();
TimelineEvent event = TimelineLogEvent.info("hello world");
summary.addTimeline(event);
assertEquals(Collections.singletonList(event), summary.getTimeline().getTimelineEvents());
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.