focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static Map<String, String[]> getQueryMap(String query) {
Map<String, String[]> map = new HashMap<>();
String[] params = query.split(PARAM_CONCATENATE);
for (String param : params) {
String[] paramSplit = param.split("=");
if (paramSplit.length == 0) {
continue; // We found no key-/value-pair, so continue on the next param
}
String name = decodeQuery(paramSplit[0]);
// hack for SOAP request (generally)
if (name.trim().startsWith("<?")) { // $NON-NLS-1$
map.put(" ", new String[] {query}); //blank name // $NON-NLS-1$
return map;
}
// the post payload is not key=value
if((param.startsWith("=") && paramSplit.length == 1) || paramSplit.length > 2) {
map.put(" ", new String[] {query}); //blank name // $NON-NLS-1$
return map;
}
String value = "";
if(paramSplit.length>1) {
value = decodeQuery(paramSplit[1]);
}
String[] known = map.get(name);
if(known == null) {
known = new String[] {value};
}
else {
String[] tmp = new String[known.length+1];
tmp[tmp.length-1] = value;
System.arraycopy(known, 0, tmp, 0, known.length);
known = tmp;
}
map.put(name, known);
}
return map;
} | @Test
void testGetQueryMapBug54055() {
String query = "param2=15¶m1=12¶m3=bu4m1KzFvsozCnR4lra0%2Be69YzpnRcF09nDjc3VJvl8%3D";
Map<String, String[]> params = RequestViewHTTP.getQueryMap(query);
Assertions.assertNotNull(params);
Assertions.assertEquals(3, params.size());
String[] param1 = params.get("param1");
Assertions.assertNotNull(param1);
Assertions.assertEquals(1, param1.length);
Assertions.assertEquals("12", param1[0]);
String[] param2 = params.get("param2");
Assertions.assertNotNull(param2);
Assertions.assertEquals(1, param2.length);
Assertions.assertEquals("15", param2[0]);
String[] param3 = params.get("param3");
Assertions.assertNotNull(param3);
Assertions.assertEquals(1, param3.length);
Assertions.assertEquals("bu4m1KzFvsozCnR4lra0+e69YzpnRcF09nDjc3VJvl8=", param3[0]);
} |
public static Set<String> getDependencyTree(final byte[] jarBytes) {
Set<String> dependencies = new HashSet<>();
try (InputStream inputStream = new ByteArrayInputStream(jarBytes);
ZipInputStream zipInputStream = new ZipInputStream(inputStream)) {
ZipEntry entry;
while ((entry = zipInputStream.getNextEntry()) != null) {
if (entry.getName().endsWith(".class")) {
ClassNode classNode = new ClassNode(Opcodes.ASM7);
ClassReader classReader = new ClassReader(zipInputStream);
classReader.accept(classNode, 0);
addDependencies(classNode.superName, dependencies);
for (String interfaceName : classNode.interfaces) {
addDependencies(interfaceName, dependencies);
}
for (FieldNode fieldNode : classNode.fields) {
addDependencies(Type.getType(fieldNode.desc).getClassName(), dependencies);
}
for (MethodNode methodNode : classNode.methods) {
addDependencies(Type.getReturnType(methodNode.desc).getClassName(), dependencies);
for (Type argumentType : Type.getArgumentTypes(methodNode.desc)) {
addDependencies(argumentType.getClassName(), dependencies);
}
}
}
}
return dependencies;
} catch (Exception e) {
LOG.error("get dependency tree error", e);
throw new ShenyuException(AdminConstants.THE_PLUGIN_JAR_FILE_IS_NOT_CORRECT_OR_EXCEEDS_16_MB);
}
} | @Test
public void testException() {
assertThrowsExactly(ShenyuException.class, () -> JarDependencyUtils.getDependencyTree(null));
} |
public static String serializeRecordToJsonExpandingValue(ObjectMapper mapper, Record<GenericObject> record,
boolean flatten)
throws JsonProcessingException {
JsonRecord jsonRecord = new JsonRecord();
GenericObject value = record.getValue();
if (value != null) {
jsonRecord.setPayload(toJsonSerializable(record.getSchema(), value.getNativeObject()));
}
record.getKey().ifPresent(jsonRecord::setKey);
record.getTopicName().ifPresent(jsonRecord::setTopicName);
record.getEventTime().ifPresent(jsonRecord::setEventTime);
record.getProperties().forEach(jsonRecord::addProperty);
if (flatten) {
JsonNode jsonNode = mapper.convertValue(jsonRecord, JsonNode.class);
return JsonFlattener.flatten(new JacksonJsonValue(jsonNode));
} else {
return mapper.writeValueAsString(jsonRecord);
}
} | @Test(dataProvider = "schemaType")
public void testSerializeRecordToJsonExpandingValue(SchemaType schemaType) throws Exception {
RecordSchemaBuilder valueSchemaBuilder = org.apache.pulsar.client.api.schema.SchemaBuilder.record("value");
valueSchemaBuilder.field("c").type(SchemaType.STRING).optional().defaultValue(null);
valueSchemaBuilder.field("d").type(SchemaType.INT32).optional().defaultValue(null);
RecordSchemaBuilder udtSchemaBuilder = SchemaBuilder.record("type1");
udtSchemaBuilder.field("a").type(SchemaType.STRING).optional().defaultValue(null);
udtSchemaBuilder.field("b").type(SchemaType.BOOLEAN).optional().defaultValue(null);
udtSchemaBuilder.field("d").type(SchemaType.DOUBLE).optional().defaultValue(null);
udtSchemaBuilder.field("f").type(SchemaType.FLOAT).optional().defaultValue(null);
udtSchemaBuilder.field("i").type(SchemaType.INT32).optional().defaultValue(null);
udtSchemaBuilder.field("l").type(SchemaType.INT64).optional().defaultValue(null);
GenericSchema<GenericRecord> udtGenericSchema = Schema.generic(udtSchemaBuilder.build(schemaType));
valueSchemaBuilder.field("e", udtGenericSchema).type(schemaType).optional().defaultValue(null);
GenericSchema<GenericRecord> valueSchema = Schema.generic(valueSchemaBuilder.build(schemaType));
GenericRecord valueGenericRecord = valueSchema.newRecordBuilder()
.set("c", "1")
.set("d", 1)
.set("e", udtGenericSchema.newRecordBuilder()
.set("a", "a")
.set("b", true)
.set("d", 1.0)
.set("f", 1.0f)
.set("i", 1)
.set("l", 10L)
.build())
.build();
Map<String, String> properties = new HashMap<>();
properties.put("prop-key", "prop-value");
Record<GenericObject> genericObjectRecord = new Record<GenericObject>() {
@Override
public Optional<String> getTopicName() {
return Optional.of("data-ks1.table1");
}
@Override
public org.apache.pulsar.client.api.Schema getSchema() {
return valueSchema;
}
@Override
public Optional<String> getKey() {
return Optional.of("message-key");
}
@Override
public GenericObject getValue() {
return valueGenericRecord;
}
@Override
public Map<String, String> getProperties() {
return properties;
}
@Override
public Optional<Long> getEventTime() {
return Optional.of(1648502845803L);
}
};
ObjectMapper objectMapper = new ObjectMapper().setSerializationInclusion(JsonInclude.Include.NON_NULL);
String json = Utils.serializeRecordToJsonExpandingValue(objectMapper, genericObjectRecord, false);
assertEquals(json, "{\"topicName\":\"data-ks1.table1\",\"key\":\"message-key\",\"payload\":{\"c\":\"1\","
+ "\"d\":1,\"e\":{\"a\":\"a\",\"b\":true,\"d\":1.0,\"f\":1.0,\"i\":1,\"l\":10}},"
+ "\"properties\":{\"prop-key\":\"prop-value\"},\"eventTime\":1648502845803}");
} |
@Override
public Optional<DatabaseAdminExecutor> create(final SQLStatementContext sqlStatementContext) {
SQLStatement sqlStatement = sqlStatementContext.getSqlStatement();
if (sqlStatement instanceof ShowFunctionStatusStatement) {
return Optional.of(new ShowFunctionStatusExecutor((ShowFunctionStatusStatement) sqlStatement));
}
if (sqlStatement instanceof ShowProcedureStatusStatement) {
return Optional.of(new ShowProcedureStatusExecutor((ShowProcedureStatusStatement) sqlStatement));
}
if (sqlStatement instanceof ShowTablesStatement) {
return Optional.of(new ShowTablesExecutor((ShowTablesStatement) sqlStatement, sqlStatementContext.getDatabaseType()));
}
return Optional.empty();
} | @Test
void assertCreateWithMySQLShowProcessListStatement() {
when(sqlStatementContext.getSqlStatement()).thenReturn(new MySQLShowProcessListStatement(false));
Optional<DatabaseAdminExecutor> actual = new MySQLAdminExecutorCreator().create(sqlStatementContext, "", "", Collections.emptyList());
assertTrue(actual.isPresent());
assertThat(actual.get(), instanceOf(ShowProcessListExecutor.class));
} |
static ApiError validateQuotaKeyValue(
Map<String, ConfigDef.ConfigKey> validKeys,
String key,
double value
) {
// Ensure we have an allowed quota key
ConfigDef.ConfigKey configKey = validKeys.get(key);
if (configKey == null) {
return new ApiError(Errors.INVALID_REQUEST, "Invalid configuration key " + key);
}
if (value <= 0.0) {
return new ApiError(Errors.INVALID_REQUEST, "Quota " + key + " must be greater than 0");
}
// Ensure the quota value is valid
switch (configKey.type()) {
case DOUBLE:
return ApiError.NONE;
case SHORT:
if (value > Short.MAX_VALUE) {
return new ApiError(Errors.INVALID_REQUEST,
"Proposed value for " + key + " is too large for a SHORT.");
}
return getErrorForIntegralQuotaValue(value, key);
case INT:
if (value > Integer.MAX_VALUE) {
return new ApiError(Errors.INVALID_REQUEST,
"Proposed value for " + key + " is too large for an INT.");
}
return getErrorForIntegralQuotaValue(value, key);
case LONG: {
if (value > Long.MAX_VALUE) {
return new ApiError(Errors.INVALID_REQUEST,
"Proposed value for " + key + " is too large for a LONG.");
}
return getErrorForIntegralQuotaValue(value, key);
}
default:
return new ApiError(Errors.UNKNOWN_SERVER_ERROR,
"Unexpected config type " + configKey.type() + " should be Long or Double");
}
} | @Test
public void testValidateQuotaKeyValueForConsumerByteRateTooLarge() {
assertEquals(new ApiError(Errors.INVALID_REQUEST,
"Proposed value for consumer_byte_rate is too large for a LONG."),
ClientQuotaControlManager.validateQuotaKeyValue(
VALID_CLIENT_ID_QUOTA_KEYS, "consumer_byte_rate", 36893488147419103232.4));
} |
@Override
public boolean applyFilterToCamelHeaders(String headerName, Object headerValue, Exchange exchange) {
boolean answer = super.applyFilterToCamelHeaders(headerName, headerValue, exchange);
// using rest producer then headers are mapping to uri and query parameters using {key} syntax
// if there is a match to an existing Camel Message header, then we should filter (=true) this
// header as its already been mapped by the RestProducer from camel-core, and we do not want
// the header to included as HTTP header also (eg as duplicate value)
if (!answer) {
if (templateUri != null) {
String token = "{" + headerName + "}";
if (templateUri.contains(token)) {
answer = true;
}
}
if (!answer && queryParameters != null) {
String[] tokens = new String[4];
tokens[0] = "={" + headerName + "}";
tokens[1] = "={" + headerName + "?}";
tokens[2] = "=%7B" + headerName + "%7D";
tokens[3] = "=%7B" + headerName + "%3F%7D";
for (String token : tokens) {
if (queryParameters.contains(token)) {
answer = true;
break;
}
}
}
}
return answer;
} | @Test
public void shouldDecideOnApplingHeaderFilterToTemplateTokensUnencoded() {
final HttpRestHeaderFilterStrategy strategy = new HttpRestHeaderFilterStrategy(
"{uriToken1}{uriToken2}",
"q1={queryToken1}&q2={queryToken2?}&");
assertTrue(strategy.applyFilterToCamelHeaders("uriToken1", "value", NOT_USED));
assertTrue(strategy.applyFilterToCamelHeaders("uriToken2", "value", NOT_USED));
assertTrue(strategy.applyFilterToCamelHeaders("queryToken1", "value", NOT_USED));
assertTrue(strategy.applyFilterToCamelHeaders("queryToken2", "value", NOT_USED));
assertFalse(strategy.applyFilterToCamelHeaders("unknown", "value", NOT_USED));
} |
public String xml(String text) {
if (text == null || text.isEmpty()) {
return text;
}
return StringEscapeUtils.escapeXml11(text);
} | @Test
public void testXml() {
EscapeTool instance = new EscapeTool();
String text = null;
String expResult = null;
String result = instance.xml(text);
assertEquals(expResult, result);
text = "";
expResult = "";
result = instance.xml(text);
assertEquals(expResult, result);
text = "<div>";
expResult = "<div>";
result = instance.xml(text);
assertEquals(expResult, result);
} |
public OpenAPI read(Class<?> cls) {
return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>());
} | @Test(description = "Parameter examples ordering")
public void testTicket3587() {
Reader reader = new Reader(new OpenAPI());
OpenAPI openAPI = reader.read(Ticket3587Resource.class);
String yaml = "openapi: 3.0.1\n"
+ "paths:\n"
+ " /test/test:\n"
+ " get:\n"
+ " operationId: parameterExamplesOrderingTest\n"
+ " parameters:\n"
+ " - in: query\n"
+ " schema:\n"
+ " type: string\n"
+ " examples:\n"
+ " Example One:\n"
+ " description: Example One\n"
+ " Example Two:\n"
+ " description: Example Two\n"
+ " Example Three:\n"
+ " description: Example Three\n"
+ " - in: query\n"
+ " schema:\n"
+ " type: string\n"
+ " examples:\n"
+ " Example Three:\n"
+ " description: Example Three\n"
+ " Example Two:\n"
+ " description: Example Two\n"
+ " Example One:\n"
+ " description: Example One\n"
+ " responses:\n"
+ " default:\n"
+ " description: default response\n"
+ " content:\n"
+ " '*/*': {}";
SerializationMatchers.assertEqualsToYamlExact(openAPI, yaml);
} |
public static ReduceByKey.CombineFunctionWithIdentity<Integer> ofInts() {
return SUMS_OF_INT;
} | @Test
public void testSumOfInts() {
assertEquals(6, (int) apply(Stream.of(1, 2, 3), Sums.ofInts()));
} |
public static OSClient getConnectedClient(OpenstackNode osNode) {
OpenstackAuth auth = osNode.keystoneConfig().authentication();
String endpoint = buildEndpoint(osNode);
Perspective perspective = auth.perspective();
Config config = getSslConfig();
try {
if (endpoint.contains(KEYSTONE_V2)) {
IOSClientBuilder.V2 builder = OSFactory.builderV2()
.endpoint(endpoint)
.tenantName(auth.project())
.credentials(auth.username(), auth.password())
.withConfig(config);
if (perspective != null) {
builder.perspective(getFacing(perspective));
}
return builder.authenticate();
} else if (endpoint.contains(KEYSTONE_V3)) {
Identifier project = Identifier.byName(auth.project());
Identifier domain = Identifier.byName(DOMAIN_DEFAULT);
IOSClientBuilder.V3 builder = OSFactory.builderV3()
.endpoint(endpoint)
.credentials(auth.username(), auth.password(), domain)
.scopeToProject(project, domain)
.withConfig(config);
if (perspective != null) {
builder.perspective(getFacing(perspective));
}
return builder.authenticate();
} else {
log.warn("Unrecognized keystone version type");
return null;
}
} catch (AuthenticationException e) {
log.error("Authentication failed due to {}", e);
return null;
}
} | @Ignore
@Test
public void testGetConnectedClient() {
OpenstackNode.Builder osNodeBuilderV2 = DefaultOpenstackNode.builder();
OpenstackAuth.Builder osNodeAuthBuilderV2 = DefaultOpenstackAuth.builder()
.version("v2.0")
.protocol(OpenstackAuth.Protocol.HTTP)
.project("admin")
.username("admin")
.password("password")
.perspective(OpenstackAuth.Perspective.PUBLIC);
String endpointV2 = "1.1.1.1:35357/v2.0";
KeystoneConfig keystoneConfigV2 = DefaultKeystoneConfig.builder()
.authentication(osNodeAuthBuilderV2.build())
.endpoint(endpointV2)
.build();
openstackControlNodeV2 = osNodeBuilderV2.hostname("controllerv2")
.type(OpenstackNode.NodeType.CONTROLLER)
.managementIp(IpAddress.valueOf("1.1.1.1"))
.keystoneConfig(keystoneConfigV2)
.state(NodeState.COMPLETE)
.build();
OpenstackNode.Builder osNodeBuilderV3 = DefaultOpenstackNode.builder();
OpenstackAuth.Builder osNodeAuthBuilderV3 = DefaultOpenstackAuth.builder()
.version("v2")
.protocol(OpenstackAuth.Protocol.HTTP)
.project("admin")
.username("admin")
.password("password")
.perspective(OpenstackAuth.Perspective.PUBLIC);
String endpointV3 = "2.2.2.2:80/v3";
KeystoneConfig keystoneConfigV3 = DefaultKeystoneConfig.builder()
.authentication(osNodeAuthBuilderV3.build())
.endpoint(endpointV3)
.build();
openstackControlNodeV3 = osNodeBuilderV3.hostname("controllerv3")
.type(OpenstackNode.NodeType.CONTROLLER)
.managementIp(IpAddress.valueOf("2.2.2.2"))
.keystoneConfig(keystoneConfigV3)
.state(NodeState.COMPLETE)
.build();
getConnectedClient(openstackControlNodeV2);
getConnectedClient(openstackControlNodeV3);
} |
@VisibleForTesting
static List<HivePartition> getPartitionsSample(List<HivePartition> partitions, int sampleSize)
{
checkArgument(sampleSize > 0, "sampleSize is expected to be greater than zero");
if (partitions.size() <= sampleSize) {
return partitions;
}
List<HivePartition> result = new ArrayList<>();
int samplesLeft = sampleSize;
HivePartition min = partitions.get(0);
HivePartition max = partitions.get(0);
for (HivePartition partition : partitions) {
if (partition.getPartitionId().compareTo(min.getPartitionId()) < 0) {
min = partition;
}
else if (partition.getPartitionId().compareTo(max.getPartitionId()) > 0) {
max = partition;
}
}
result.add(min);
samplesLeft--;
if (samplesLeft > 0) {
result.add(max);
samplesLeft--;
}
if (samplesLeft > 0) {
HashFunction hashFunction = murmur3_128();
Comparator<Map.Entry<HivePartition, Long>> hashComparator = Comparator
.<Map.Entry<HivePartition, Long>, Long>comparing(Map.Entry::getValue)
.thenComparing(entry -> entry.getKey().getPartitionId().getPartitionName());
partitions.stream()
.filter(partition -> !result.contains(partition))
.map(partition -> immutableEntry(partition, hashFunction.hashUnencodedChars(partition.getPartitionId().getPartitionName()).asLong()))
.sorted(hashComparator)
.limit(samplesLeft)
.forEachOrdered(entry -> result.add(entry.getKey()));
}
return unmodifiableList(result);
} | @Test
public void testGetPartitionsSample()
{
HivePartition p1 = partition("p1=string1/p2=1234");
HivePartition p2 = partition("p1=string2/p2=2345");
HivePartition p3 = partition("p1=string3/p2=3456");
HivePartition p4 = partition("p1=string4/p2=4567");
HivePartition p5 = partition("p1=string5/p2=5678");
assertEquals(getPartitionsSample(ImmutableList.of(p1), 1), ImmutableList.of(p1));
assertEquals(getPartitionsSample(ImmutableList.of(p1), 2), ImmutableList.of(p1));
assertEquals(getPartitionsSample(ImmutableList.of(p1, p2), 2), ImmutableList.of(p1, p2));
assertEquals(getPartitionsSample(ImmutableList.of(p1, p2, p3), 2), ImmutableList.of(p1, p3));
assertEquals(getPartitionsSample(ImmutableList.of(p1, p2, p3, p4), 1), getPartitionsSample(ImmutableList.of(p1, p2, p3, p4), 1));
assertEquals(getPartitionsSample(ImmutableList.of(p1, p2, p3, p4), 3), getPartitionsSample(ImmutableList.of(p1, p2, p3, p4), 3));
assertEquals(getPartitionsSample(ImmutableList.of(p1, p2, p3, p4, p5), 3), ImmutableList.of(p1, p5, p4));
} |
@ExecuteOn(TaskExecutors.IO)
@Post(consumes = MediaType.APPLICATION_YAML)
@Operation(tags = {"Flows"}, summary = "Create a flow from yaml source")
public HttpResponse<FlowWithSource> create(
@Parameter(description = "The flow") @Body String flow
) throws ConstraintViolationException {
Flow flowParsed = yamlFlowParser.parse(flow, Flow.class);
return HttpResponse.ok(doCreate(flowParsed, flow));
} | @Test
void updateFlowMultilineJson() {
String flowId = IdUtils.create();
Flow flow = generateFlowWithFlowable(flowId, "io.kestra.unittest", "\n \n a \nb\nc");
Flow result = client.toBlocking().retrieve(POST("/api/v1/flows", flow), Flow.class);
assertThat(result.getId(), is(flow.getId()));
FlowWithSource withSource = client.toBlocking().retrieve(GET("/api/v1/flows/" + flow.getNamespace() + "/" + flow.getId() + "?source=true").contentType(MediaType.APPLICATION_YAML), FlowWithSource.class);
assertThat(withSource.getId(), is(flow.getId()));
assertThat(withSource.getSource(), containsString("format: |2-"));
} |
void placeOrder(Order order) {
sendShippingRequest(order);
} | @Test
void testPlaceOrderWithoutDatabaseAndExceptions() throws Exception {
long paymentTime = timeLimits.paymentTime();
long queueTaskTime = timeLimits.queueTaskTime();
long messageTime = timeLimits.messageTime();
long employeeTime = timeLimits.employeeTime();
long queueTime = timeLimits.queueTime();
for (double d = 0.1; d < 2; d = d + 0.1) {
paymentTime *= d;
queueTaskTime *= d;
messageTime *= d;
employeeTime *= d;
queueTime *= d;
for (Exception e : exceptionList) {
Commander c = buildCommanderObjectWithoutDB(true, true, e);
var order = new Order(new User("K", null), "pen", 1f);
for (Order.MessageSent ms : Order.MessageSent.values()) {
c.placeOrder(order);
assertFalse(StringUtils.isBlank(order.id));
}
c = buildCommanderObjectWithoutDB(true, false, e);
order = new Order(new User("K", null), "pen", 1f);
for (Order.MessageSent ms : Order.MessageSent.values()) {
c.placeOrder(order);
assertFalse(StringUtils.isBlank(order.id));
}
c = buildCommanderObjectWithoutDB(false, false, e);
order = new Order(new User("K", null), "pen", 1f);
for (Order.MessageSent ms : Order.MessageSent.values()) {
c.placeOrder(order);
assertFalse(StringUtils.isBlank(order.id));
}
c = buildCommanderObjectWithoutDB(false, true, e);
order = new Order(new User("K", null), "pen", 1f);
for (Order.MessageSent ms : Order.MessageSent.values()) {
c.placeOrder(order);
assertFalse(StringUtils.isBlank(order.id));
}
}
}
} |
public static String beanToString(Object o) {
if (o == null) {
return null;
}
Field[] fields = o.getClass().getDeclaredFields();
StringBuilder buffer = new StringBuilder();
buffer.append("[");
for (Field field : fields) {
Object val = null;
try {
val = ReflectionUtil.getFieldValue(o, field);
} catch (RuntimeException e) {
LOGGER.warn("get field value failed", e);
}
if (val != null) {
buffer.append(field.getName()).append("=").append(val).append(", ");
}
}
if (buffer.length() > 2) {
buffer.delete(buffer.length() - 2, buffer.length());
}
buffer.append("]");
return buffer.toString();
} | @Test
public void testBeanToString() {
BranchDO branchDO = new BranchDO("xid123123", 123L, 1, 2.2, new Date());
Assertions.assertNotNull(BeanUtils.beanToString(branchDO));
// null object
Assertions.assertNull(BeanUtils.beanToString(null));
// buffer length < 2
Assertions.assertNotNull(BeanUtils.beanToString(new Object()));
// null val
Assertions.assertNotNull(BeanUtils.beanToString(new BranchDO(null, null, null, null, null)));
} |
Dependency newDependency(MavenProject prj) {
final File pom = new File(prj.getBasedir(), "pom.xml");
if (pom.isFile()) {
getLog().debug("Adding virtual dependency from pom.xml");
return new Dependency(pom, true);
} else if (prj.getFile().isFile()) {
getLog().debug("Adding virtual dependency from file");
return new Dependency(prj.getFile(), true);
} else {
return new Dependency(true);
}
} | @Test
public void should_newDependency_get_default_virtual_dependency() {
// Given
BaseDependencyCheckMojo instance = new BaseDependencyCheckMojoImpl();
new MockUp<MavenProject>() {
@Mock
public File getBasedir() {
return new File("src/test/resources/dir_without_pom");
}
@Mock
public File getFile() {
return new File("src/test/resources/dir_without_pom");
}
};
// When
String output = instance.newDependency(project).getFileName();
// Then
assertNull(output);
} |
public Map<String, Object> getKsqlStreamConfigProps(final String applicationId) {
final Map<String, Object> map = new HashMap<>(getKsqlStreamConfigProps());
map.put(
MetricCollectors.RESOURCE_LABEL_PREFIX
+ StreamsConfig.APPLICATION_ID_CONFIG,
applicationId
);
// Streams client metrics aren't used in Confluent deployment
possiblyConfigureConfluentTelemetry(map);
return Collections.unmodifiableMap(map);
} | @Test
public void shouldNotSetDeserializationExceptionHandlerWhenFailOnDeserializationErrorTrue() {
final KsqlConfig ksqlConfig = new KsqlConfig(Collections.singletonMap(KsqlConfig.FAIL_ON_DESERIALIZATION_ERROR_CONFIG, true));
final Object result = ksqlConfig.getKsqlStreamConfigProps().get(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG);
assertThat(result, nullValue());
} |
@Override
public boolean equals(Object obj) {
if (obj == null || !(obj instanceof Evidence)) {
return false;
}
if (this == obj) {
return true;
}
final Evidence o = (Evidence) obj;
return new EqualsBuilder()
.append(this.source == null ? null : this.source.toLowerCase(), o.source == null ? null : o.source.toLowerCase())
.append(this.name == null ? null : this.name.toLowerCase(), o.name == null ? null : o.name.toLowerCase())
.append(this.value == null ? null : this.value.toLowerCase(), o.value == null ? null : o.value.toLowerCase())
.append(this.confidence, o.getConfidence())
.append(this.fromHint, o.isFromHint())
.build();
} | @Test
public void testEquals() {
Evidence that0 = new Evidence("file", "name", "guice-3.0", Confidence.HIGHEST);
Evidence that1 = new Evidence("jar", "package name", "dependency", Confidence.HIGHEST);
Evidence that2 = new Evidence("jar", "package name", "google", Confidence.HIGHEST);
Evidence that3 = new Evidence("jar", "package name", "guice", Confidence.HIGHEST);
Evidence that4 = new Evidence("jar", "package name", "inject", Confidence.HIGHEST);
Evidence that5 = new Evidence("jar", "package name", "inject", Confidence.LOW);
Evidence that6 = new Evidence("jar", "package name", "internal", Confidence.LOW);
Evidence that7 = new Evidence("manifest", "Bundle-Description", "Guice is a lightweight dependency injection framework for Java 5 and above", Confidence.MEDIUM);
Evidence that8 = new Evidence("Manifest", "Implementation-Title", "Spring Framework", Confidence.HIGH);
Evidence instance = new Evidence("Manifest", "Implementation-Title", "Spring Framework", Confidence.HIGH);
assertFalse(instance.equals(that0));
assertFalse(instance.equals(that1));
assertFalse(instance.equals(that2));
assertFalse(instance.equals(that3));
assertFalse(instance.equals(that4));
assertFalse(instance.equals(that5));
assertFalse(instance.equals(that6));
assertFalse(instance.equals(that7));
assertTrue(instance.equals(that8));
} |
@Override
public List<Connection> getConnections(final String databaseName, final String dataSourceName, final int connectionOffset, final int connectionSize,
final ConnectionMode connectionMode) throws SQLException {
return getConnections0(databaseName, dataSourceName, connectionOffset, connectionSize, connectionMode);
} | @Test
void assertGetConnectionsWhenAllInCache() throws SQLException {
Connection expected = databaseConnectionManager.getConnections(DefaultDatabase.LOGIC_NAME, "ds", 0, 1, ConnectionMode.MEMORY_STRICTLY).get(0);
List<Connection> actual = databaseConnectionManager.getConnections(DefaultDatabase.LOGIC_NAME, "ds", 0, 1, ConnectionMode.CONNECTION_STRICTLY);
assertThat(actual.size(), is(1));
assertThat(actual.get(0), is(expected));
} |
@ConstantFunction(name = "bitand", argTypes = {BIGINT, BIGINT}, returnType = BIGINT)
public static ConstantOperator bitandBigint(ConstantOperator first, ConstantOperator second) {
return ConstantOperator.createBigint(first.getBigint() & second.getBigint());
} | @Test
public void bitandBigint() {
assertEquals(100, ScalarOperatorFunctions.bitandBigint(O_BI_100, O_BI_100).getBigint());
} |
public void register(Operation operation) {
Map<Long, Operation> callIds = liveOperations.computeIfAbsent(operation.getCallerAddress(),
(key) -> new ConcurrentHashMap<>());
if (callIds.putIfAbsent(operation.getCallId(), operation) != null) {
throw new IllegalStateException("Duplicate operation during registration of operation=" + operation);
}
} | @Test
public void when_registerDuplicateCallId_then_exception() throws UnknownHostException {
AsyncJobOperation operation = createOperation("1.2.3.4", 1234, 2222L);
r.register(operation);
// this should not fail
r.register(createOperation("1.2.3.4", 1234, 2223L));
// adding a duplicate, expecting failure
assertThrows(IllegalStateException.class, () -> r.register(operation));
} |
@Override
public Object getSmallintValue(final ResultSet resultSet, final int columnIndex) throws SQLException {
return resultSet.getShort(columnIndex);
} | @Test
void assertGetSmallintValue() throws SQLException {
when(resultSet.getShort(1)).thenReturn((short) 0);
assertThat(dialectResultSetMapper.getSmallintValue(resultSet, 1), is((short) 0));
} |
public static boolean isValidDnsNameOrWildcard(String name) {
return name.length() <= 255
&& (DNS_NAME.matcher(name).matches()
|| (name.startsWith("*.") && DNS_NAME.matcher(name.substring(2)).matches()));
} | @Test
public void testDnsNames() {
assertThat(IpAndDnsValidation.isValidDnsNameOrWildcard("example"), is(true));
assertThat(IpAndDnsValidation.isValidDnsNameOrWildcard("example.com"), is(true));
assertThat(IpAndDnsValidation.isValidDnsNameOrWildcard("example:com"), is(false));
assertThat(IpAndDnsValidation.isValidDnsNameOrWildcard("veryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryverylongexample.com"), is(false));
assertThat(IpAndDnsValidation.isValidDnsNameOrWildcard("example.com."), is(true));
assertThat(IpAndDnsValidation.isValidDnsNameOrWildcard("example.com.."), is(false));
assertThat(IpAndDnsValidation.isValidDnsNameOrWildcard("*.example.com."), is(true));
} |
public static String extractMulti(Pattern pattern, CharSequence content, String template) {
if (null == content || null == pattern || null == template) {
return null;
}
//提取模板中的编号
final TreeSet<Integer> varNums = new TreeSet<>((o1, o2) -> ObjectUtil.compare(o2, o1));
final Matcher matcherForTemplate = PatternPool.GROUP_VAR.matcher(template);
while (matcherForTemplate.find()) {
varNums.add(Integer.parseInt(matcherForTemplate.group(1)));
}
final Matcher matcher = pattern.matcher(content);
if (matcher.find()) {
for (Integer group : varNums) {
template = template.replace("$" + group, matcher.group(group));
}
return template;
}
return null;
} | @Test
public void extractMultiTest2() {
// 抽取多个分组然后把它们拼接起来
final String resultExtractMulti = ReUtil.extractMulti("(\\w)(\\w)(\\w)(\\w)(\\w)(\\w)(\\w)(\\w)(\\w)(\\w)", content, "$1-$2-$3-$4-$5-$6-$7-$8-$9-$10");
assertEquals("Z-Z-Z-a-a-a-b-b-b-c", resultExtractMulti);
} |
public static Object get(Object object, int index) {
if (index < 0) {
throw new IndexOutOfBoundsException("Index cannot be negative: " + index);
}
if (object instanceof Map) {
Map map = (Map) object;
Iterator iterator = map.entrySet().iterator();
return get(iterator, index);
} else if (object instanceof List) {
return ((List) object).get(index);
} else if (object instanceof Object[]) {
return ((Object[]) object)[index];
} else if (object instanceof Iterator) {
Iterator it = (Iterator) object;
while (it.hasNext()) {
index--;
if (index == -1) {
return it.next();
} else {
it.next();
}
}
throw new IndexOutOfBoundsException("Entry does not exist: " + index);
} else if (object instanceof Collection) {
Iterator iterator = ((Collection) object).iterator();
return get(iterator, index);
} else if (object instanceof Enumeration) {
Enumeration it = (Enumeration) object;
while (it.hasMoreElements()) {
index--;
if (index == -1) {
return it.nextElement();
} else {
it.nextElement();
}
}
throw new IndexOutOfBoundsException("Entry does not exist: " + index);
} else if (object == null) {
throw new IllegalArgumentException("Unsupported object type: null");
} else {
try {
return Array.get(object, index);
} catch (IllegalArgumentException ex) {
throw new IllegalArgumentException("Unsupported object type: " + object.getClass().getName());
}
}
} | @Test
void testGetEnumeration3() {
Vector<Object> vector = new Vector<>();
vector.add("1");
vector.add("2");
assertEquals("1", CollectionUtils.get(vector.elements(), 0));
assertEquals("2", CollectionUtils.get(vector.elements(), 1));
} |
public Object getAsJavaType( String valueName, Class<?> destinationType, InjectionTypeConverter converter )
throws KettleValueException {
int idx = rowMeta.indexOfValue( valueName );
if ( idx < 0 ) {
throw new KettleValueException( "Unknown column '" + valueName + "'" );
}
ValueMetaInterface metaType = rowMeta.getValueMeta( idx );
// find by source value type
switch ( metaType.getType() ) {
case ValueMetaInterface.TYPE_STRING:
String vs = rowMeta.getString( data, idx );
return getStringAsJavaType( vs, destinationType, converter );
case ValueMetaInterface.TYPE_BOOLEAN:
Boolean vb = rowMeta.getBoolean( data, idx );
if ( String.class.isAssignableFrom( destinationType ) ) {
return converter.boolean2string( vb );
} else if ( int.class.isAssignableFrom( destinationType ) ) {
return converter.boolean2intPrimitive( vb );
} else if ( Integer.class.isAssignableFrom( destinationType ) ) {
return converter.boolean2integer( vb );
} else if ( long.class.isAssignableFrom( destinationType ) ) {
return converter.boolean2longPrimitive( vb );
} else if ( Long.class.isAssignableFrom( destinationType ) ) {
return converter.boolean2long( vb );
} else if ( boolean.class.isAssignableFrom( destinationType ) ) {
return converter.boolean2booleanPrimitive( vb );
} else if ( Boolean.class.isAssignableFrom( destinationType ) ) {
return converter.boolean2boolean( vb );
} else if ( destinationType.isEnum() ) {
return converter.boolean2enum( destinationType, vb );
} else {
throw new RuntimeException( "Wrong value conversion to " + destinationType );
}
case ValueMetaInterface.TYPE_INTEGER:
Long vi = rowMeta.getInteger( data, idx );
if ( String.class.isAssignableFrom( destinationType ) ) {
return converter.integer2string( vi );
} else if ( int.class.isAssignableFrom( destinationType ) ) {
return converter.integer2intPrimitive( vi );
} else if ( Integer.class.isAssignableFrom( destinationType ) ) {
return converter.integer2integer( vi );
} else if ( long.class.isAssignableFrom( destinationType ) ) {
return converter.integer2longPrimitive( vi );
} else if ( Long.class.isAssignableFrom( destinationType ) ) {
return converter.integer2long( vi );
} else if ( boolean.class.isAssignableFrom( destinationType ) ) {
return converter.integer2booleanPrimitive( vi );
} else if ( Boolean.class.isAssignableFrom( destinationType ) ) {
return converter.integer2boolean( vi );
} else if ( destinationType.isEnum() ) {
return converter.integer2enum( destinationType, vi );
} else {
throw new RuntimeException( "Wrong value conversion to " + destinationType );
}
case ValueMetaInterface.TYPE_NUMBER:
Double vn = rowMeta.getNumber( data, idx );
if ( String.class.isAssignableFrom( destinationType ) ) {
return converter.number2string( vn );
} else if ( int.class.isAssignableFrom( destinationType ) ) {
return converter.number2intPrimitive( vn );
} else if ( Integer.class.isAssignableFrom( destinationType ) ) {
return converter.number2integer( vn );
} else if ( long.class.isAssignableFrom( destinationType ) ) {
return converter.number2longPrimitive( vn );
} else if ( Long.class.isAssignableFrom( destinationType ) ) {
return converter.number2long( vn );
} else if ( boolean.class.isAssignableFrom( destinationType ) ) {
return converter.number2booleanPrimitive( vn );
} else if ( Boolean.class.isAssignableFrom( destinationType ) ) {
return converter.number2boolean( vn );
} else if ( destinationType.isEnum() ) {
return converter.number2enum( destinationType, vn );
} else {
throw new RuntimeException( "Wrong value conversion to " + destinationType );
}
}
throw new KettleValueException( "Unknown conversion from " + metaType.getTypeDesc() + " into " + destinationType );
} | @Test
public void testBooleanConversion() throws Exception {
row = new RowMetaAndData( rowsMeta, null, true, null );
assertEquals( true, row.getAsJavaType( "bool", boolean.class, converter ) );
assertEquals( true, row.getAsJavaType( "bool", Boolean.class, converter ) );
assertEquals( 1, row.getAsJavaType( "bool", int.class, converter ) );
assertEquals( 1, row.getAsJavaType( "bool", Integer.class, converter ) );
assertEquals( 1L, row.getAsJavaType( "bool", long.class, converter ) );
assertEquals( 1L, row.getAsJavaType( "bool", Long.class, converter ) );
assertEquals( "Y", row.getAsJavaType( "bool", String.class, converter ) );
row = new RowMetaAndData( rowsMeta, null, false, null );
assertEquals( false, row.getAsJavaType( "bool", boolean.class, converter ) );
assertEquals( false, row.getAsJavaType( "bool", Boolean.class, converter ) );
assertEquals( 0, row.getAsJavaType( "bool", int.class, converter ) );
assertEquals( 0, row.getAsJavaType( "bool", Integer.class, converter ) );
assertEquals( 0L, row.getAsJavaType( "bool", long.class, converter ) );
assertEquals( 0L, row.getAsJavaType( "bool", Long.class, converter ) );
assertEquals( "N", row.getAsJavaType( "bool", String.class, converter ) );
row = new RowMetaAndData( rowsMeta, null, null, null );
assertEquals( null, row.getAsJavaType( "bool", String.class, converter ) );
assertEquals( null, row.getAsJavaType( "bool", Integer.class, converter ) );
assertEquals( null, row.getAsJavaType( "bool", Long.class, converter ) );
assertEquals( null, row.getAsJavaType( "bool", Boolean.class, converter ) );
} |
@JSONField(serialize = false, deserialize = false)
public RetryPolicy getRetryPolicy() {
if (GroupRetryPolicyType.EXPONENTIAL.equals(type)) {
if (exponentialRetryPolicy == null) {
return DEFAULT_RETRY_POLICY;
}
return exponentialRetryPolicy;
} else if (GroupRetryPolicyType.CUSTOMIZED.equals(type)) {
if (customizedRetryPolicy == null) {
return DEFAULT_RETRY_POLICY;
}
return customizedRetryPolicy;
} else {
return DEFAULT_RETRY_POLICY;
}
} | @Test
public void testGetRetryPolicy() {
GroupRetryPolicy groupRetryPolicy = new GroupRetryPolicy();
RetryPolicy retryPolicy = groupRetryPolicy.getRetryPolicy();
assertThat(retryPolicy).isInstanceOf(CustomizedRetryPolicy.class);
groupRetryPolicy.setType(GroupRetryPolicyType.EXPONENTIAL);
retryPolicy = groupRetryPolicy.getRetryPolicy();
assertThat(retryPolicy).isInstanceOf(CustomizedRetryPolicy.class);
groupRetryPolicy.setType(GroupRetryPolicyType.CUSTOMIZED);
groupRetryPolicy.setCustomizedRetryPolicy(new CustomizedRetryPolicy());
retryPolicy = groupRetryPolicy.getRetryPolicy();
assertThat(retryPolicy).isInstanceOf(CustomizedRetryPolicy.class);
groupRetryPolicy.setType(GroupRetryPolicyType.EXPONENTIAL);
groupRetryPolicy.setExponentialRetryPolicy(new ExponentialRetryPolicy());
retryPolicy = groupRetryPolicy.getRetryPolicy();
assertThat(retryPolicy).isInstanceOf(ExponentialRetryPolicy.class);
groupRetryPolicy.setType(null);
retryPolicy = groupRetryPolicy.getRetryPolicy();
assertThat(retryPolicy).isInstanceOf(CustomizedRetryPolicy.class);
} |
public synchronized void resetOffset(String topic, String group, Map<MessageQueue, Long> offsetTable) {
DefaultMQPushConsumerImpl consumer = null;
try {
MQConsumerInner impl = this.consumerTable.get(group);
if (impl instanceof DefaultMQPushConsumerImpl) {
consumer = (DefaultMQPushConsumerImpl) impl;
} else {
log.info("[reset-offset] consumer dose not exist. group={}", group);
return;
}
consumer.suspend();
ConcurrentMap<MessageQueue, ProcessQueue> processQueueTable = consumer.getRebalanceImpl().getProcessQueueTable();
for (Map.Entry<MessageQueue, ProcessQueue> entry : processQueueTable.entrySet()) {
MessageQueue mq = entry.getKey();
if (topic.equals(mq.getTopic()) && offsetTable.containsKey(mq)) {
ProcessQueue pq = entry.getValue();
pq.setDropped(true);
pq.clear();
}
}
try {
TimeUnit.SECONDS.sleep(10);
} catch (InterruptedException ignored) {
}
Iterator<MessageQueue> iterator = processQueueTable.keySet().iterator();
while (iterator.hasNext()) {
MessageQueue mq = iterator.next();
Long offset = offsetTable.get(mq);
if (topic.equals(mq.getTopic()) && offset != null) {
try {
consumer.updateConsumeOffset(mq, offset);
consumer.getRebalanceImpl().removeUnnecessaryMessageQueue(mq, processQueueTable.get(mq));
iterator.remove();
} catch (Exception e) {
log.warn("reset offset failed. group={}, {}", group, mq, e);
}
}
}
} finally {
if (consumer != null) {
consumer.resume();
}
}
} | @Test
public void testResetOffset() throws IllegalAccessException {
topicRouteTable.put(topic, createTopicRouteData());
brokerAddrTable.put(defaultBroker, createBrokerAddrMap());
consumerTable.put(group, createMQConsumerInner());
Map<MessageQueue, Long> offsetTable = new HashMap<>();
offsetTable.put(createMessageQueue(), 0L);
mqClientInstance.resetOffset(topic, group, offsetTable);
Field consumerTableField = FieldUtils.getDeclaredField(mqClientInstance.getClass(), "consumerTable", true);
ConcurrentMap<String, MQConsumerInner> consumerTable = (ConcurrentMap<String, MQConsumerInner>) consumerTableField.get(mqClientInstance);
DefaultMQPushConsumerImpl consumer = (DefaultMQPushConsumerImpl) consumerTable.get(group);
verify(consumer).suspend();
verify(consumer).resume();
verify(consumer, times(1))
.updateConsumeOffset(
any(MessageQueue.class),
eq(0L));
} |
public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, Timer timer) {
invokeCompletedOffsetCommitCallbacks();
if (offsets.isEmpty()) {
// We guarantee that the callbacks for all commitAsync() will be invoked when
// commitSync() completes, even if the user tries to commit empty offsets.
return invokePendingAsyncCommits(timer);
}
long attempts = 0L;
do {
if (coordinatorUnknownAndUnreadySync(timer)) {
return false;
}
RequestFuture<Void> future = sendOffsetCommitRequest(offsets);
client.poll(future, timer);
// We may have had in-flight offset commits when the synchronous commit began. If so, ensure that
// the corresponding callbacks are invoked prior to returning in order to preserve the order that
// the offset commits were applied.
invokeCompletedOffsetCommitCallbacks();
if (future.succeeded()) {
if (interceptors != null)
interceptors.onCommit(offsets);
return true;
}
if (future.failed() && !future.isRetriable())
throw future.exception();
timer.sleep(retryBackoff.backoff(attempts++));
} while (timer.notExpired());
return false;
} | @Test
public void shouldLoseAllOwnedPartitionsBeforeRejoiningAfterDroppingOutOfTheGroup() {
final List<TopicPartition> partitions = singletonList(t1p);
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, false, Optional.of("group-id"), true)) {
final Time realTime = Time.SYSTEM;
coordinator.ensureActiveGroup();
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.REBALANCE_IN_PROGRESS);
assertThrows(RebalanceInProgressException.class, () -> coordinator.commitOffsetsSync(
singletonMap(t1p, new OffsetAndMetadata(100L)),
time.timer(Long.MAX_VALUE)));
int generationId = 42;
String memberId = "consumer-42";
client.prepareResponse(joinGroupFollowerResponse(generationId, memberId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(Collections.emptyList(), Errors.UNKNOWN_MEMBER_ID));
boolean res = coordinator.joinGroupIfNeeded(realTime.timer(1000));
assertFalse(res);
assertEquals(AbstractCoordinator.Generation.NO_GENERATION, coordinator.generation());
assertEquals("", coordinator.generation().memberId);
res = coordinator.joinGroupIfNeeded(realTime.timer(1000));
assertFalse(res);
}
Collection<TopicPartition> lost = getLost(partitions);
assertEquals(lost.isEmpty() ? 0 : 1, rebalanceListener.lostCount);
assertEquals(lost.isEmpty() ? null : lost, rebalanceListener.lost);
} |
public void openFile() {
openFile( false );
} | @Test
public void testLoadLastUsedTransLocalNoFilenameAtStartup() throws Exception {
String repositoryName = null;
String fileName = null;
setLoadLastUsedJobLocalWithRepository( false, repositoryName, null, fileName, true, true );
verify( spoon, never() ).openFile( anyString(), anyBoolean() );
} |
@Override
public List<AdminUserDO> getUserList(Collection<Long> ids) {
if (CollUtil.isEmpty(ids)) {
return Collections.emptyList();
}
return userMapper.selectBatchIds(ids);
} | @Test
public void testGetUserList() {
// mock 数据
AdminUserDO user = randomAdminUserDO();
userMapper.insert(user);
// 测试 id 不匹配
userMapper.insert(randomAdminUserDO());
// 准备参数
Collection<Long> ids = singleton(user.getId());
// 调用
List<AdminUserDO> result = userService.getUserList(ids);
// 断言
assertEquals(1, result.size());
assertEquals(user, result.get(0));
} |
protected static void configureMulticastSocket(MulticastSocket multicastSocket, Address bindAddress,
HazelcastProperties hzProperties, MulticastConfig multicastConfig, ILogger logger)
throws SocketException, IOException, UnknownHostException {
multicastSocket.setReuseAddress(true);
// bind to receive interface
multicastSocket.bind(new InetSocketAddress(multicastConfig.getMulticastPort()));
multicastSocket.setTimeToLive(multicastConfig.getMulticastTimeToLive());
try {
boolean loopbackBind = bindAddress.getInetAddress().isLoopbackAddress();
Boolean loopbackModeEnabled = multicastConfig.getLoopbackModeEnabled();
if (loopbackModeEnabled != null) {
// setting loopbackmode is just a hint - and the argument means "disable"!
// to check the real value we call getLoopbackMode() (and again - return value means "disabled")
multicastSocket.setLoopbackMode(!loopbackModeEnabled);
}
// If LoopBack mode is not enabled (i.e. getLoopbackMode return true) and bind address is a loopback one,
// then print a warning
if (loopbackBind && multicastSocket.getLoopbackMode()) {
logger.warning("Hazelcast is bound to " + bindAddress.getHost() + " and loop-back mode is "
+ "disabled. This could cause multicast auto-discovery issues "
+ "and render it unable to work. Check your network connectivity, try to enable the "
+ "loopback mode and/or force -Djava.net.preferIPv4Stack=true on your JVM.");
}
// warning: before modifying lines below, take a look at these links:
// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4417033
// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6402758
// https://github.com/hazelcast/hazelcast/pull/19251#issuecomment-891375270
boolean callSetInterface = OS.isMac() || !loopbackBind;
String propSetInterface = hzProperties.getString(ClusterProperty.MULTICAST_SOCKET_SET_INTERFACE);
if (propSetInterface != null) {
callSetInterface = Boolean.parseBoolean(propSetInterface);
}
if (callSetInterface) {
multicastSocket.setInterface(bindAddress.getInetAddress());
}
} catch (Exception e) {
logger.warning(e);
}
multicastSocket.setReceiveBufferSize(SOCKET_BUFFER_SIZE);
multicastSocket.setSendBufferSize(SOCKET_BUFFER_SIZE);
String multicastGroup = hzProperties.getString(ClusterProperty.MULTICAST_GROUP);
if (multicastGroup == null) {
multicastGroup = multicastConfig.getMulticastGroup();
}
multicastConfig.setMulticastGroup(multicastGroup);
multicastSocket.joinGroup(InetAddress.getByName(multicastGroup));
multicastSocket.setSoTimeout(SOCKET_TIMEOUT);
} | @Test
public void testSetInterfaceDefaultWhenLoopback() throws Exception {
Config config = createConfig(null);
MulticastConfig multicastConfig = config.getNetworkConfig().getJoin().getMulticastConfig();
multicastConfig.setLoopbackModeEnabled(true);
MulticastSocket multicastSocket = mock(MulticastSocket.class);
Address address = new Address("127.0.0.1", 5701);
HazelcastProperties hzProperties = new HazelcastProperties(config);
MulticastService.configureMulticastSocket(multicastSocket, address, hzProperties , multicastConfig, mock(ILogger.class));
verify(multicastSocket).setLoopbackMode(false);
// https://github.com/hazelcast/hazelcast/pull/19251#issuecomment-891375270
if (OS.isMac()) {
verify(multicastSocket).setInterface(address.getInetAddress());
} else {
verify(multicastSocket, never()).setInterface(any());
}
} |
@Nullable
public static Resource getJsBundleResource(PluginManager pluginManager, String pluginName,
String bundleName) {
Assert.hasText(pluginName, "The pluginName must not be blank");
Assert.hasText(bundleName, "Bundle name must not be blank");
DefaultResourceLoader resourceLoader = getResourceLoader(pluginManager, pluginName);
if (resourceLoader == null) {
return null;
}
String path = PathUtils.combinePath(CONSOLE_BUNDLE_LOCATION, bundleName);
String simplifyPath = StringUtils.cleanPath(path);
FileUtils.checkDirectoryTraversal("/" + CONSOLE_BUNDLE_LOCATION, simplifyPath);
Resource resource = resourceLoader.getResource(simplifyPath);
return resource.exists() ? resource : null;
} | @Test
void getJsBundleResource() {
Resource jsBundleResource =
BundleResourceUtils.getJsBundleResource(pluginManager, "fake-plugin", "main.js");
assertThat(jsBundleResource).isNotNull();
assertThat(jsBundleResource.exists()).isTrue();
jsBundleResource =
BundleResourceUtils.getJsBundleResource(pluginManager, "fake-plugin", "test.js");
assertThat(jsBundleResource).isNull();
jsBundleResource =
BundleResourceUtils.getJsBundleResource(pluginManager, "nothing-plugin", "main.js");
assertThat(jsBundleResource).isNull();
assertThatThrownBy(() -> {
BundleResourceUtils.getJsBundleResource(pluginManager, "fake-plugin",
"../test/main.js");
}).isInstanceOf(AccessDeniedException.class);
} |
@Override
public Num calculate(BarSeries series, Position position) {
return position.hasLoss() ? series.one() : series.zero();
} | @Test
public void calculateWithTwoShortPositions() {
MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 110, 100, 95, 105);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.sellAt(0, series), Trade.buyAt(1, series),
Trade.sellAt(3, series), Trade.buyAt(5, series));
assertNumEquals(2, getCriterion().calculate(series, tradingRecord));
} |
public static String toUnderlineName(String s) {
if (s == null) {
return null;
}
StringBuilder sb = new StringBuilder();
boolean upperCase = false;
for (int i = 0; i < s.length(); i++) {
char c = s.charAt(i);
boolean nextUpperCase = true;
if (i < (s.length() - 1)) {
nextUpperCase = Character.isUpperCase(s.charAt(i + 1));
}
if (Character.isUpperCase(c)) {
if (!upperCase || !nextUpperCase) {
if (i > 0) sb.append(SEPARATOR);
}
upperCase = true;
} else {
upperCase = false;
}
sb.append(Character.toLowerCase(c));
}
return sb.toString();
} | @Test
public void testToUnderlineName(){
String a = "userName";
Assert.assertEquals("user_name", StringKit.toUnderlineName(a));
} |
@Override
public KTable<Windowed<K>, V> aggregate(final Initializer<V> initializer) {
return aggregate(initializer, Materialized.with(null, null));
} | @Test
public void timeWindowAggregateOverlappingWindowsTest() {
final KTable<Windowed<String>, String> customers = groupedStream.cogroup(MockAggregator.TOSTRING_ADDER)
.windowedBy(TimeWindows.of(ofMillis(500L)).advanceBy(ofMillis(200L))).aggregate(
MockInitializer.STRING_INIT, Materialized.with(Serdes.String(), Serdes.String()));
customers.toStream().to(OUTPUT);
try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
final TestInputTopic<String, String> testInputTopic = driver.createInputTopic(
TOPIC, new StringSerializer(), new StringSerializer());
final TestOutputTopic<Windowed<String>, String> testOutputTopic = driver.createOutputTopic(
OUTPUT, new TimeWindowedDeserializer<>(new StringDeserializer(), WINDOW_SIZE), new StringDeserializer());
testInputTopic.pipeInput("k1", "A", 0);
testInputTopic.pipeInput("k2", "A", 0);
testInputTopic.pipeInput("k1", "B", 250);
testInputTopic.pipeInput("k2", "B", 250);
testInputTopic.pipeInput("k2", "A", 500L);
testInputTopic.pipeInput("k1", "A", 500L);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", "0+A", 0);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", "0+A", 0);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", "0+A+B", 250);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", "0+B", 250);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", "0+A+B", 250);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", "0+B", 250);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", "0+B+A", 500);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", "0+A", 500);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", "0+B+A", 500);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", "0+A", 500);
}
} |
@Override
public CompletableFuture<QueryMessageResult> queryMessageAsync(
String topic, String key, int maxCount, long begin, long end) {
long topicId;
try {
TopicMetadata topicMetadata = metadataStore.getTopic(topic);
if (topicMetadata == null) {
log.info("MessageFetcher#queryMessageAsync, topic metadata not found, topic={}", topic);
return CompletableFuture.completedFuture(new QueryMessageResult());
}
topicId = topicMetadata.getTopicId();
} catch (Exception e) {
log.error("MessageFetcher#queryMessageAsync, get topic id failed, topic={}", topic, e);
return CompletableFuture.completedFuture(new QueryMessageResult());
}
CompletableFuture<List<IndexItem>> future = indexService.queryAsync(topic, key, maxCount, begin, end);
return future.thenCompose(indexItemList -> {
List<CompletableFuture<SelectMappedBufferResult>> futureList = new ArrayList<>(maxCount);
for (IndexItem indexItem : indexItemList) {
if (topicId != indexItem.getTopicId()) {
continue;
}
FlatMessageFile flatFile =
flatFileStore.getFlatFile(new MessageQueue(topic, brokerName, indexItem.getQueueId()));
if (flatFile == null) {
continue;
}
CompletableFuture<SelectMappedBufferResult> getMessageFuture = flatFile
.getCommitLogAsync(indexItem.getOffset(), indexItem.getSize())
.thenApply(messageBuffer -> new SelectMappedBufferResult(
indexItem.getOffset(), messageBuffer, indexItem.getSize(), null));
futureList.add(getMessageFuture);
if (futureList.size() >= maxCount) {
break;
}
}
return CompletableFuture.allOf(futureList.toArray(new CompletableFuture[0])).thenApply(v -> {
QueryMessageResult result = new QueryMessageResult();
futureList.forEach(f -> f.thenAccept(result::addMessage));
return result;
});
}).whenComplete((result, throwable) -> {
if (result != null) {
log.info("MessageFetcher#queryMessageAsync, " +
"query result={}, topic={}, topicId={}, key={}, maxCount={}, timestamp={}-{}",
result.getMessageBufferList().size(), topic, topicId, key, maxCount, begin, end);
}
});
} | @Test
public void testQueryMessageAsync() throws Exception {
this.getMessageFromTieredStoreTest();
mq = dispatcherTest.mq;
messageStore = dispatcherTest.messageStore;
storeConfig = dispatcherTest.storeConfig;
QueryMessageResult queryMessageResult = fetcher.queryMessageAsync(
mq.getTopic(), "uk", 32, 0L, System.currentTimeMillis()).join();
Assert.assertEquals(32, queryMessageResult.getMessageBufferList().size());
queryMessageResult = fetcher.queryMessageAsync(
mq.getTopic(), "uk", 120, 0L, System.currentTimeMillis()).join();
Assert.assertEquals(100, queryMessageResult.getMessageBufferList().size());
} |
@EventListener
void startup(StartupEvent event) {
if (configuration.getBackgroundJobServer().isEnabled()) {
backgroundJobServer.get().start();
}
if (configuration.getDashboard().isEnabled()) {
dashboardWebServer.get().start();
}
} | @Test
void onStartOptionalsAreNotCalledToBootstrapIfNotConfigured() {
when(backgroundJobServerConfiguration.isEnabled()).thenReturn(false);
when(dashboardConfiguration.isEnabled()).thenReturn(false);
jobRunrStarter.startup(null);
verifyNoInteractions(backgroundJobServer);
verifyNoInteractions(dashboardWebServer);
} |
@Override
public Component createComponent(Entry entry) {
final Object existingComponent = entryAccessor.getComponent(entry);
if (existingComponent != null)
return (Component) existingComponent;
final AFreeplaneAction action = entryAccessor.getAction(entry);
final JComponent component;
if(action != null){
AbstractButton actionComponent;
if (action.isSelectable()) {
actionComponent = new JAutoToggleButton(action);
IconReplacer.replaceByImageIcon(entry, actionComponent, entryAccessor);
}
else if(entry.builders().contains("bigIcon")) {
actionComponent = new JBigButton(action);
Icon icon = actionComponent.getIcon();
Icon scaledIcon = IconFactory.getInstance().getScaledIcon(icon, new Quantity<LengthUnit>(icon.getIconHeight() * 2, LengthUnit.px));
actionComponent.setIcon(FreeplaneIconFactory.toImageIcon(scaledIcon));
}
else {
actionComponent = new JButton(action);
IconReplacer.replaceByImageIcon(entry, actionComponent, entryAccessor);
}
component = actionComponent;
component.setName(action.getKey());
}
else if(entry.builders().contains("separator")){
component = new Separator();
}
else if(entry.builders().contains("panel")){
component = new JUnitPanel();
}
else if(entry.builders().contains("dropdownMenu")){
String textKey = (String) entry.getAttribute("text");
String text = textKey != null ? TextUtils.getText(textKey) + "..." : "...";
String iconKey = (String) entry.getAttribute("icon");
Icon icon = ResourceController.getResourceController().getIcon(iconKey != null ? iconKey : "arrowDown.icon");
String tooltipKey = (String) entry.getAttribute("tooltip");
JButtonWithDropdownMenu buttonWithMenu = new JButtonWithDropdownMenu(text, icon);
IconReplacer.replaceByScaledImageIcon(buttonWithMenu);
if(textKey != null)
TranslatedElement.TEXT.setKey(buttonWithMenu, textKey);
TranslatedElementFactory.createTooltip(buttonWithMenu, tooltipKey);
entry.children()
.stream()
.map(entryAccessor::getAction)
.filter(x -> x != null)
.forEach(buttonWithMenu::addMenuAction);
component = buttonWithMenu;
}
else
component = null;
return component;
} | @Test
public void testName() throws Exception {
ResourceAccessor resourceAccessorMock = mock(ResourceAccessor.class);
final ToolbarComponentProvider toolbarComponentProvider = new ToolbarComponentProvider(resourceAccessorMock);
final Entry entry = new Entry();
final EntryAccessor entryAccessor = new EntryAccessor();
final Component testComponent = new JPanel();
entryAccessor.setComponent(entry, testComponent);
assertThat(toolbarComponentProvider.createComponent(entry), equalTo(testComponent));
} |
public Long getHttpExpiresTime(final String pHttpExpiresHeader) {
if (pHttpExpiresHeader != null && pHttpExpiresHeader.length() > 0) {
try {
final Date dateExpires = Configuration.getInstance().getHttpHeaderDateTimeFormat().parse(pHttpExpiresHeader);
return dateExpires.getTime();
} catch (final Exception ex) {
if (Configuration.getInstance().isDebugMapTileDownloader())
Log.d(IMapView.LOGTAG, "Unable to parse expiration tag for tile, server returned " + pHttpExpiresHeader, ex);
}
}
return null;
} | @Test
public void testGetHttpExpiresTime() {
final TileSourcePolicy tileSourcePolicy = new TileSourcePolicy();
for (final String string : mExpiresStringOK) {
Assert.assertEquals(mExpiresValue, (long) tileSourcePolicy.getHttpExpiresTime(string));
}
for (final String string : mExpiresStringKO) {
Assert.assertNull(tileSourcePolicy.getHttpExpiresTime(string));
}
} |
public static Date strToDate(String dateStr) throws ParseException {
return strToDate(dateStr, DATE_FORMAT_TIME);
} | @Test
public void strToDate() throws Exception {
long d0 = 0l;
long d1 = 1501127802000l; // 2017-07-27 11:56:42:975 +8
long d2 = 1501127835000l; // 2017-07-27 11:57:15:658 +8
TimeZone timeZone = TimeZone.getDefault();
Date date0 = new Date(d0 - timeZone.getOffset(d0));
Date date1 = new Date(d1 - timeZone.getOffset(d1));
Date date2 = new Date(d2 - timeZone.getOffset(d2));
String s0 = "1970-01-01 00:00:00";
String s1 = "2017-07-27 03:56:42";
String s2 = "2017-07-27 03:57:15";
Assert.assertEquals(DateUtils.strToDate(s0).getTime(), date0.getTime());
Assert.assertEquals(DateUtils.strToDate(s1).getTime(), date1.getTime());
Assert.assertEquals(DateUtils.strToDate(s2).getTime(), date2.getTime());
} |
public static long calculateTotalFlinkMemoryFromComponents(Configuration config) {
Preconditions.checkArgument(config.contains(TaskManagerOptions.TASK_HEAP_MEMORY));
Preconditions.checkArgument(config.contains(TaskManagerOptions.TASK_OFF_HEAP_MEMORY));
Preconditions.checkArgument(config.contains(TaskManagerOptions.NETWORK_MEMORY_MAX));
Preconditions.checkArgument(config.contains(TaskManagerOptions.NETWORK_MEMORY_MIN));
Preconditions.checkArgument(config.contains(TaskManagerOptions.MANAGED_MEMORY_SIZE));
Preconditions.checkArgument(config.contains(TaskManagerOptions.FRAMEWORK_HEAP_MEMORY));
Preconditions.checkArgument(config.contains(TaskManagerOptions.FRAMEWORK_OFF_HEAP_MEMORY));
Preconditions.checkArgument(
config.get(TaskManagerOptions.NETWORK_MEMORY_MAX)
.equals(config.get(TaskManagerOptions.NETWORK_MEMORY_MIN)));
return config.get(TaskManagerOptions.TASK_HEAP_MEMORY)
.add(config.get(TaskManagerOptions.TASK_OFF_HEAP_MEMORY))
.add(config.get(TaskManagerOptions.NETWORK_MEMORY_MAX))
.add(config.get(TaskManagerOptions.MANAGED_MEMORY_SIZE))
.add(config.get(TaskManagerOptions.FRAMEWORK_HEAP_MEMORY))
.add(config.get(TaskManagerOptions.FRAMEWORK_OFF_HEAP_MEMORY))
.getBytes();
} | @Test
void testCalculateTotalFlinkMemoryWithMissingFactors() {
Configuration config = new Configuration();
config.set(TaskManagerOptions.FRAMEWORK_HEAP_MEMORY, new MemorySize(1));
config.set(TaskManagerOptions.FRAMEWORK_OFF_HEAP_MEMORY, new MemorySize(3));
config.set(TaskManagerOptions.TASK_OFF_HEAP_MEMORY, new MemorySize(4));
config.set(TaskManagerOptions.MANAGED_MEMORY_SIZE, new MemorySize(7));
assertThatThrownBy(
() ->
TaskExecutorResourceUtils.calculateTotalFlinkMemoryFromComponents(
config))
.isInstanceOf(IllegalArgumentException.class);
} |
public ProcessingNodesState calculateProcessingState(TimeRange timeRange) {
final DateTime updateThresholdTimestamp = clock.nowUTC().minus(updateThreshold.toMilliseconds());
try (DBCursor<ProcessingStatusDto> statusCursor = db.find(activeNodes(updateThresholdTimestamp))) {
if (!statusCursor.hasNext()) {
return ProcessingNodesState.NONE_ACTIVE;
}
int activeNodes = 0;
int idleNodes = 0;
while (statusCursor.hasNext()) {
activeNodes++;
ProcessingStatusDto nodeProcessingStatus = statusCursor.next();
DateTime lastIndexedMessage = nodeProcessingStatus.receiveTimes().postIndexing();
// If node is behind and is busy, it is overloaded.
if (lastIndexedMessage.isBefore(timeRange.getTo()) && isBusy(nodeProcessingStatus)) {
return ProcessingNodesState.SOME_OVERLOADED;
}
// If a node did not index a message that is at least at the start of the time range,
// we consider it idle.
if (lastIndexedMessage.isBefore(timeRange.getFrom())) {
idleNodes++;
}
}
// Only if all nodes are idle, we stop the processing.
if (activeNodes == idleNodes) {
return ProcessingNodesState.ALL_IDLE;
}
}
// If none of the above checks return, we can assume that some nodes have already indexed the given timerange.
return ProcessingNodesState.SOME_UP_TO_DATE;
} | @Test
@MongoDBFixtures("processing-status-idle-nodes.json")
public void processingStateIdleNodesWhereLastMessageWithinTimeRange() {
when(clock.nowUTC()).thenReturn(DateTime.parse("2019-01-01T04:00:00.000Z"));
when(updateThreshold.toMilliseconds()).thenReturn(Duration.hours(1).toMilliseconds());
TimeRange timeRange = AbsoluteRange.create("2019-01-01T02:00:00.000Z", "2019-01-01T03:00:00.000Z");
assertThat(dbService.calculateProcessingState(timeRange)).isEqualTo(ProcessingNodesState.SOME_UP_TO_DATE);
} |
public static boolean httpRequestWasMade() {
return getFakeHttpLayer().hasRequestInfos();
} | @Test
public void httpRequestWasMade_returnsFalseIfNoRequestMatchingGivenRuleWasMAde()
throws IOException, HttpException {
makeRequest("http://example.com");
assertFalse(FakeHttp.httpRequestWasMade("http://example.org"));
} |
public static ByteArrayOutputStream getPayload(MultipartPayload multipartPayload) throws IOException {
final ByteArrayOutputStream os = new ByteArrayOutputStream();
final String preamble = multipartPayload.getPreamble();
if (preamble != null) {
os.write((preamble + "\r\n").getBytes());
}
final List<BodyPartPayload> bodyParts = multipartPayload.getBodyParts();
if (!bodyParts.isEmpty()) {
final String boundary = multipartPayload.getBoundary();
final byte[] startBoundary = ("--" + boundary + "\r\n").getBytes();
for (BodyPartPayload bodyPart : bodyParts) {
os.write(startBoundary);
final Map<String, String> bodyPartHeaders = bodyPart.getHeaders();
if (bodyPartHeaders != null) {
for (Map.Entry<String, String> header : bodyPartHeaders.entrySet()) {
os.write((header.getKey() + ": " + header.getValue() + "\r\n").getBytes());
}
}
os.write("\r\n".getBytes());
if (bodyPart instanceof MultipartPayload) {
getPayload((MultipartPayload) bodyPart).writeTo(os);
} else if (bodyPart instanceof ByteArrayBodyPartPayload) {
final ByteArrayBodyPartPayload byteArrayBodyPart = (ByteArrayBodyPartPayload) bodyPart;
os.write(byteArrayBodyPart.getPayload(), byteArrayBodyPart.getOff(), byteArrayBodyPart.getLen());
} else {
throw new AssertionError(bodyPart.getClass());
}
os.write("\r\n".getBytes()); //CRLF for the next (starting or closing) boundary
}
os.write(("--" + boundary + "--").getBytes());
final String epilogue = multipartPayload.getEpilogue();
if (epilogue != null) {
os.write(("\r\n" + epilogue).getBytes());
}
}
return os;
} | @Test
public void testFileByteArrayBodyPartPayloadMultipartPayload() throws IOException {
final MultipartPayload mP = new MultipartPayload("testFileByteArrayBodyPartPayloadMultipartPayload boundary");
mP.addBodyPart(new FileByteArrayBodyPartPayload("fileContent".getBytes(), "name", "filename.ext"));
final StringBuilder headersString = new StringBuilder();
for (Map.Entry<String, String> header : mP.getHeaders().entrySet()) {
headersString.append(header.getKey())
.append(": ")
.append(header.getValue())
.append("\r\n");
}
assertEquals("Content-Type: multipart/form-data; "
+ "boundary=\"testFileByteArrayBodyPartPayloadMultipartPayload boundary\"\r\n",
headersString.toString());
assertEquals("--testFileByteArrayBodyPartPayloadMultipartPayload boundary\r\n"
+ "Content-Disposition: form-data; name=\"name\"; filename=\"filename.ext\"\r\n"
+ "\r\n"
+ "fileContent"
+ "\r\n--testFileByteArrayBodyPartPayloadMultipartPayload boundary--",
MultipartUtils.getPayload(mP).toString());
} |
@Override
public boolean shouldWait() {
RingbufferContainer ringbuffer = getRingBufferContainerOrNull();
if (ringbuffer == null) {
return true;
}
if (ringbuffer.isTooLargeSequence(sequence) || ringbuffer.isStaleSequence(sequence)) {
//no need to wait, let the operation continue and fail in beforeRun
return false;
}
// the sequence is not readable
return sequence == ringbuffer.tailSequence() + 1;
} | @Test
public void whenOneAfterTailAndBufferEmpty() {
ReadOneOperation op = getReadOneOperation(ringbuffer.tailSequence() + 1);
// since there is an item, we don't need to wait
boolean shouldWait = op.shouldWait();
assertTrue(shouldWait);
} |
public ParseResult parse(File file) throws IOException, SchemaParseException {
return parse(file, null);
} | @Test
void testParseURI() throws IOException {
Path tempFile = Files.createTempFile("TestSchemaParser", null);
Charset charset = UTF_CHARSETS[(int) Math.floor(UTF_CHARSETS.length * Math.random())];
Files.write(tempFile, singletonList(SCHEMA_JSON), charset);
Schema schema = new SchemaParser().parse(tempFile.toUri(), null).mainSchema();
assertEquals(SCHEMA_REAL, schema);
} |
@Async
@EventListener(ReplyCreatedEvent.class)
public void onNewReply(ReplyCreatedEvent event) {
Reply reply = event.getReply();
var commentName = reply.getSpec().getCommentName();
client.fetch(Comment.class, commentName)
.ifPresent(comment -> newReplyReasonPublisher.publishReasonBy(reply, comment));
} | @Test
void onNewReplyTest() {
var reply = mock(Reply.class);
var spec = mock(Reply.ReplySpec.class);
when(reply.getSpec()).thenReturn(spec);
when(spec.getCommentName()).thenReturn("fake-comment");
var spyReasonPublisher = spy(reasonPublisher);
var comment = mock(Comment.class);
when(client.fetch(eq(Comment.class), eq("fake-comment")))
.thenReturn(Optional.of(comment));
var event = new ReplyCreatedEvent(this, reply);
spyReasonPublisher.onNewReply(event);
verify(newReplyReasonPublisher).publishReasonBy(eq(reply), eq(comment));
verify(spec).getCommentName();
verify(client).fetch(eq(Comment.class), eq("fake-comment"));
} |
public StepInstanceActionResponse terminate(
WorkflowInstance instance,
String stepId,
User user,
Actions.StepInstanceAction action,
boolean blocking) {
validateStepId(instance, stepId, action);
StepInstance stepInstance =
stepInstanceDao.getStepInstance(
instance.getWorkflowId(),
instance.getWorkflowInstanceId(),
instance.getWorkflowRunId(),
stepId,
Constants.LATEST_INSTANCE_RUN);
if (!stepInstance.getRuntimeState().getStatus().shouldWakeup()) {
throw new MaestroInvalidStatusException(
"Cannot manually %s the step %s as it is in a terminal state [%s]",
action.name(), stepInstance.getIdentity(), stepInstance.getRuntimeState().getStatus());
}
// prepare payload and then add it to db
StepAction stepAction =
StepAction.createTerminate(
action, stepInstance, user, "manual step instance API call", false);
saveAction(stepInstance, stepAction);
if (blocking) {
long startTime = System.currentTimeMillis();
while (System.currentTimeMillis() - startTime < ACTION_TIMEOUT) {
StepRuntimeState state =
stepInstanceDao.getStepInstanceRuntimeState(
stepInstance.getWorkflowId(),
stepInstance.getWorkflowInstanceId(),
stepInstance.getWorkflowRunId(),
stepInstance.getStepId(),
Constants.LATEST_INSTANCE_RUN);
if (!state.getStatus().shouldWakeup()) {
return createActionResponseFrom(stepInstance, state, stepAction.toTimelineEvent());
}
TimeUtils.sleep(CHECK_INTERVAL);
}
throw new MaestroTimeoutException(
"%s action for the step %s is timed out. No retry is needed and maestro will eventually complete the action.",
action.name(), stepInstance.getIdentity());
} else {
return createActionResponseFrom(stepInstance, null, stepAction.toTimelineEvent());
}
} | @Test
public void testInvalidTerminate() {
AssertHelper.assertThrows(
"Cannot manually terminate the step",
MaestroBadRequestException.class,
"Cannot manually STOP the step [not-existing] because the latest workflow run",
() -> actionDao.terminate(instance, "not-existing", user, STOP, false));
AssertHelper.assertThrows(
"Cannot manually terminate the step",
MaestroNotFoundException.class,
"step instance [job.2][LATEST] not found (either not created or deleted)",
() -> actionDao.terminate(instance, "job.2", user, KILL, false));
stepInstance.getRuntimeState().setStatus(StepInstance.Status.FATALLY_FAILED);
stepInstanceDao.insertOrUpsertStepInstance(stepInstance, true);
AssertHelper.assertThrows(
"Cannot manually restart the step",
MaestroInvalidStatusException.class,
"Cannot manually SKIP the step [sample-dag-test-3][1][1][job1] as it is in a terminal state [FATALLY_FAILED]",
() -> actionDao.terminate(instance, "job1", user, SKIP, false));
stepInstance.getRuntimeState().setStatus(StepInstance.Status.RUNNING);
stepInstanceDao.insertOrUpsertStepInstance(stepInstance, true);
actionDao.terminate(instance, "job1", user, KILL, false);
AssertHelper.assertThrows(
"Cannot manually terminate the step",
MaestroResourceConflictException.class,
"There is an ongoing action for this step [sample-dag-test-3][1][1][job1]",
() -> actionDao.terminate(instance, "job1", user, STOP, false));
} |
public void add(CSQueue queue) {
String fullName = queue.getQueuePath();
String shortName = queue.getQueueShortName();
try {
modificationLock.writeLock().lock();
fullNameQueues.put(fullName, queue);
getMap.put(fullName, queue);
//we only update short queue name ambiguity for non root queues
if (!shortName.equals(CapacitySchedulerConfiguration.ROOT)) {
//getting or creating the ambiguity set for the current queue
Set<String> fullNamesSet =
this.shortNameToLongNames.getOrDefault(shortName, new HashSet<>());
//adding the full name to the queue
fullNamesSet.add(fullName);
this.shortNameToLongNames.put(shortName, fullNamesSet);
}
//updating the getMap references for the queue
updateGetMapForShortName(shortName);
} finally {
modificationLock.writeLock().unlock();
}
} | @Test
public void testAmbiguousMapping() throws IOException {
CSQueueStore store = new CSQueueStore();
//root.main
CSQueue main = createParentQueue("main", root);
//root.main.A
CSQueue mainA = createParentQueue("A", main);
//root.main.A.C
CSQueue mainAC = createLeafQueue("C", mainA);
//root.main.A.D
CSQueue mainAD = createParentQueue("D", mainA);
//root.main.A.D.E
CSQueue mainADE = createLeafQueue("E", mainAD);
//root.main.A.D.F
CSQueue mainADF = createLeafQueue("F", mainAD);
//root.main.B
CSQueue mainB = createParentQueue("B", main);
//root.main.B.C
CSQueue mainBC = createLeafQueue("C", mainB);
//root.main.B.D
CSQueue mainBD = createParentQueue("D", mainB);
//root.main.B.D.E
CSQueue mainBDE = createLeafQueue("E", mainBD);
//root.main.B.D.G
CSQueue mainBDG = createLeafQueue("G", mainBD);
store.add(main);
store.add(mainA);
store.add(mainAC);
store.add(mainAD);
store.add(mainADE);
store.add(mainADF);
store.add(mainB);
store.add(mainBC);
store.add(mainBD);
store.add(mainBDE);
store.add(mainBDG);
assertAccessibleByAllNames(store, main);
assertAccessibleByAllNames(store, mainA);
assertAccessibleByAllNames(store, mainB);
assertAccessibleByAllNames(store, mainADF);
assertAccessibleByAllNames(store, mainBDG);
assertAmbiguous(store, mainAC);
assertAmbiguous(store, mainAD);
assertAmbiguous(store, mainADE);
assertAmbiguous(store, mainBC);
assertAmbiguous(store, mainBD);
assertAmbiguous(store, mainBDE);
} |
@SuppressWarnings("unchecked")
@Override
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException {
NodeStatus remoteNodeStatus = request.getNodeStatus();
/**
* Here is the node heartbeat sequence...
* 1. Check if it's a valid (i.e. not excluded) node
* 2. Check if it's a registered node
* 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat
* 4. Send healthStatus to RMNode
* 5. Update node's labels if distributed Node Labels configuration is enabled
*/
NodeId nodeId = remoteNodeStatus.getNodeId();
// 1. Check if it's a valid (i.e. not excluded) node, if not, see if it is
// in decommissioning.
if (!this.nodesListManager.isValidNode(nodeId.getHost())
&& !isNodeInDecommissioning(nodeId)) {
String message =
"Disallowed NodeManager nodeId: " + nodeId + " hostname: "
+ nodeId.getHost();
LOG.info(message);
return YarnServerBuilderUtils.newNodeHeartbeatResponse(
NodeAction.SHUTDOWN, message);
}
// 2. Check if it's a registered node
RMNode rmNode = this.rmContext.getRMNodes().get(nodeId);
if (rmNode == null) {
/* node does not exist */
String message = "Node not found resyncing " + remoteNodeStatus.getNodeId();
LOG.info(message);
return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC,
message);
}
// Send ping
this.nmLivelinessMonitor.receivedPing(nodeId);
this.decommissioningWatcher.update(rmNode, remoteNodeStatus);
// 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat
NodeHeartbeatResponse lastNodeHeartbeatResponse = rmNode.getLastNodeHeartBeatResponse();
if (getNextResponseId(
remoteNodeStatus.getResponseId()) == lastNodeHeartbeatResponse
.getResponseId()) {
LOG.info("Received duplicate heartbeat from node "
+ rmNode.getNodeAddress()+ " responseId=" + remoteNodeStatus.getResponseId());
return lastNodeHeartbeatResponse;
} else if (remoteNodeStatus.getResponseId() != lastNodeHeartbeatResponse
.getResponseId()) {
String message =
"Too far behind rm response id:"
+ lastNodeHeartbeatResponse.getResponseId() + " nm response id:"
+ remoteNodeStatus.getResponseId();
LOG.info(message);
// TODO: Just sending reboot is not enough. Think more.
this.rmContext.getDispatcher().getEventHandler().handle(
new RMNodeEvent(nodeId, RMNodeEventType.REBOOTING));
return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC,
message);
}
// Evaluate whether a DECOMMISSIONING node is ready to be DECOMMISSIONED.
if (rmNode.getState() == NodeState.DECOMMISSIONING &&
decommissioningWatcher.checkReadyToBeDecommissioned(
rmNode.getNodeID())) {
String message = "DECOMMISSIONING " + nodeId +
" is ready to be decommissioned";
LOG.info(message);
this.rmContext.getDispatcher().getEventHandler().handle(
new RMNodeEvent(nodeId, RMNodeEventType.DECOMMISSION));
this.nmLivelinessMonitor.unregister(nodeId);
return YarnServerBuilderUtils.newNodeHeartbeatResponse(
NodeAction.SHUTDOWN, message);
}
if (timelineServiceV2Enabled) {
// Check & update collectors info from request.
updateAppCollectorsMap(request);
}
// Heartbeat response
long newInterval = nextHeartBeatInterval;
if (heartBeatIntervalScalingEnable) {
newInterval = rmNode.calculateHeartBeatInterval(
nextHeartBeatInterval, heartBeatIntervalMin,
heartBeatIntervalMax, heartBeatIntervalSpeedupFactor,
heartBeatIntervalSlowdownFactor);
}
NodeHeartbeatResponse nodeHeartBeatResponse =
YarnServerBuilderUtils.newNodeHeartbeatResponse(
getNextResponseId(lastNodeHeartbeatResponse.getResponseId()),
NodeAction.NORMAL, null, null, null, null, newInterval);
rmNode.setAndUpdateNodeHeartbeatResponse(nodeHeartBeatResponse);
populateKeys(request, nodeHeartBeatResponse);
populateTokenSequenceNo(request, nodeHeartBeatResponse);
if (timelineServiceV2Enabled) {
// Return collectors' map that NM needs to know
setAppCollectorsMapToResponse(rmNode.getRunningApps(),
nodeHeartBeatResponse);
}
// 4. Send status to RMNode, saving the latest response.
RMNodeStatusEvent nodeStatusEvent =
new RMNodeStatusEvent(nodeId, remoteNodeStatus);
if (request.getLogAggregationReportsForApps() != null
&& !request.getLogAggregationReportsForApps().isEmpty()) {
nodeStatusEvent.setLogAggregationReportsForApps(request
.getLogAggregationReportsForApps());
}
this.rmContext.getDispatcher().getEventHandler().handle(nodeStatusEvent);
// 5. Update node's labels to RM's NodeLabelManager.
if (isDistributedNodeLabelsConf && request.getNodeLabels() != null) {
try {
updateNodeLabelsFromNMReport(
NodeLabelsUtils.convertToStringSet(request.getNodeLabels()),
nodeId);
nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(true);
} catch (IOException ex) {
//ensure the error message is captured and sent across in response
nodeHeartBeatResponse.setDiagnosticsMessage(ex.getMessage());
nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(false);
}
}
// 6. check if node's capacity is load from dynamic-resources.xml
// if so, send updated resource back to NM.
String nid = nodeId.toString();
Resource capability = loadNodeResourceFromDRConfiguration(nid);
// sync back with new resource if not null.
if (capability != null) {
nodeHeartBeatResponse.setResource(capability);
}
// Check if we got an event (AdminService) that updated the resources
if (rmNode.isUpdatedCapability()) {
nodeHeartBeatResponse.setResource(rmNode.getTotalCapability());
rmNode.resetUpdatedCapability();
}
// 7. Send Container Queuing Limits back to the Node. This will be used by
// the node to truncate the number of Containers queued for execution.
if (this.rmContext.getNodeManagerQueueLimitCalculator() != null) {
nodeHeartBeatResponse.setContainerQueuingLimit(
this.rmContext.getNodeManagerQueueLimitCalculator()
.createContainerQueuingLimit());
}
// 8. Get node's attributes and update node-to-attributes mapping
// in RMNodeAttributeManager.
if (request.getNodeAttributes() != null) {
try {
// update node attributes if necessary then update heartbeat response
updateNodeAttributesIfNecessary(nodeId, request.getNodeAttributes());
nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(true);
} catch (IOException ex) {
//ensure the error message is captured and sent across in response
String errorMsg =
nodeHeartBeatResponse.getDiagnosticsMessage() == null ?
ex.getMessage() :
nodeHeartBeatResponse.getDiagnosticsMessage() + "\n" + ex
.getMessage();
nodeHeartBeatResponse.setDiagnosticsMessage(errorMsg);
nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(false);
}
}
return nodeHeartBeatResponse;
} | @Test
public void testDecommissionWithIncludeHosts() throws Exception {
writeToHostsFile("localhost", "host1", "host2");
Configuration conf = new Configuration();
conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, hostFile
.getAbsolutePath());
rm = new MockRM(conf);
rm.start();
MockNM nm1 = rm.registerNode("host1:1234", 5120);
MockNM nm2 = rm.registerNode("host2:5678", 10240);
MockNM nm3 = rm.registerNode("localhost:4433", 1024);
ClusterMetrics metrics = ClusterMetrics.getMetrics();
assert(metrics != null);
int metricCount = metrics.getNumDecommisionedNMs();
NodeHeartbeatResponse nodeHeartbeat = nm1.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
nodeHeartbeat = nm2.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
nodeHeartbeat = nm3.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
// To test that IPs also work
String ip = NetUtils.normalizeHostName("localhost");
writeToHostsFile("host1", ip);
rm.getNodesListManager().refreshNodes(conf);
checkShutdownNMCount(rm, ++metricCount);
nodeHeartbeat = nm1.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
Assert
.assertEquals(1, ClusterMetrics.getMetrics().getNumShutdownNMs());
nodeHeartbeat = nm2.nodeHeartbeat(true);
Assert.assertTrue("Node is not decommisioned.", NodeAction.SHUTDOWN
.equals(nodeHeartbeat.getNodeAction()));
nodeHeartbeat = nm3.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
Assert.assertEquals(metricCount, ClusterMetrics.getMetrics()
.getNumShutdownNMs());
rm.stop();
} |
public Set<EntityDescriptor> resolveEntities(Collection<EntityDescriptor> unresolvedEntities) {
final MutableGraph<EntityDescriptor> dependencyGraph = GraphBuilder.directed()
.allowsSelfLoops(false)
.nodeOrder(ElementOrder.insertion())
.build();
unresolvedEntities.forEach(dependencyGraph::addNode);
final HashSet<EntityDescriptor> resolvedEntities = new HashSet<>();
final MutableGraph<EntityDescriptor> finalDependencyGraph = resolveDependencyGraph(dependencyGraph, resolvedEntities);
LOG.debug("Final dependency graph: {}", finalDependencyGraph);
return finalDependencyGraph.nodes();
} | @Test
public void resolveEntitiesWithNoDependencies() throws NotFoundException {
final StreamMock streamMock = new StreamMock(ImmutableMap.of(
"_id", "stream-1234",
StreamImpl.FIELD_TITLE, "Stream Title"
));
when(streamService.load("stream-1234")).thenReturn(streamMock);
final ImmutableSet<EntityDescriptor> unresolvedEntities = ImmutableSet.of(
EntityDescriptor.create("stream-1234", ModelTypes.STREAM_V1)
);
final Set<EntityDescriptor> resolvedEntities = contentPackService.resolveEntities(unresolvedEntities);
assertThat(resolvedEntities).containsOnly(EntityDescriptor.create("stream-1234", ModelTypes.STREAM_V1));
} |
@Override
public TableConfig apply(PinotHelixResourceManager pinotHelixResourceManager,
TableConfig tableConfig, Schema schema, Map<String, String> extraProperties) {
IndexingConfig initialIndexingConfig = tableConfig.getIndexingConfig();
initialIndexingConfig.setInvertedIndexColumns(schema.getDimensionNames());
initialIndexingConfig.setNoDictionaryColumns(schema.getMetricNames());
return tableConfig;
} | @Test
public void testTuner() {
TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE)
.setTableName("test").setTunerConfigList(Arrays.asList(_tunerConfig)).build();
TableConfigTunerRegistry.init(Arrays.asList(DEFAULT_TABLE_CONFIG_TUNER_PACKAGES));
TableConfigTuner tuner = TableConfigTunerRegistry.getTuner(TUNER_NAME);
TableConfig result = tuner.apply(null, tableConfig, _schema, Collections.emptyMap());
IndexingConfig newConfig = result.getIndexingConfig();
List<String> invertedIndexColumns = newConfig.getInvertedIndexColumns();
Assert.assertTrue(invertedIndexColumns.size() == 2);
for (int i = 0; i < DIMENSION_COLUMNS.length; i++) {
Assert.assertTrue(invertedIndexColumns.contains(DIMENSION_COLUMNS[i]));
}
List<String> noDictionaryColumns = newConfig.getNoDictionaryColumns();
Assert.assertTrue(noDictionaryColumns.size() == 1);
Assert.assertEquals(noDictionaryColumns.get(0), METRIC_COLUMNS[0]);
} |
@Around(SYNC_UPDATE_CONFIG_ALL)
public Object aroundSyncUpdateConfigAll(ProceedingJoinPoint pjp, HttpServletRequest request,
HttpServletResponse response, String dataId, String group, String content, String appName, String srcUser,
String tenant, String tag) throws Throwable {
if (!PropertyUtil.isManageCapacity()) {
return pjp.proceed();
}
LOGGER.info("[capacityManagement] aroundSyncUpdateConfigAll");
String betaIps = request.getHeader("betaIps");
if (StringUtils.isBlank(betaIps)) {
if (StringUtils.isBlank(tag)) {
// do capacity management limitation check for writing or updating config_info table.
if (configInfoPersistService.findConfigInfo(dataId, group, tenant) == null) {
// Write operation.
return do4Insert(pjp, request, response, group, tenant, content);
}
// Update operation.
return do4Update(pjp, request, response, dataId, group, tenant, content);
}
}
return pjp.proceed();
} | @Test
void testAroundSyncUpdateConfigAllForInsertAspect() throws Throwable {
//test with insert
//condition:
// 1. has tenant: true
// 2. capacity limit check: false
when(PropertyUtil.isManageCapacity()).thenReturn(false);
MockHttpServletRequest mockHttpServletRequest = new MockHttpServletRequest();
MockHttpServletResponse mockHttpServletResponse = new MockHttpServletResponse();
String localMockResult = (String) capacityManagementAspect.aroundSyncUpdateConfigAll(proceedingJoinPoint, mockHttpServletRequest,
mockHttpServletResponse, mockDataId, mockGroup, mockContent, null, null, mockTenant, null);
Mockito.verify(proceedingJoinPoint, Mockito.times(1)).proceed();
Mockito.verify(configInfoPersistService, Mockito.times(0)).findConfigInfo(any(), any(), any());
assert localMockResult.equals(mockProceedingJoinPointResult);
} |
public static boolean canDrop(FilterPredicate pred, List<ColumnChunkMetaData> columns) {
Objects.requireNonNull(pred, "pred cannot be null");
Objects.requireNonNull(columns, "columns cannot be null");
return pred.accept(new StatisticsFilter(columns));
} | @Test
public void testLtEq() {
assertTrue(canDrop(ltEq(intColumn, 9), columnMetas));
assertFalse(canDrop(ltEq(intColumn, 10), columnMetas));
assertFalse(canDrop(ltEq(intColumn, 100), columnMetas));
assertFalse(canDrop(ltEq(intColumn, 101), columnMetas));
assertTrue(canDrop(ltEq(intColumn, 0), nullColumnMetas));
assertTrue(canDrop(ltEq(intColumn, 7), nullColumnMetas));
assertTrue(canDrop(ltEq(missingColumn, fromString("any")), columnMetas));
assertFalse(canDrop(ltEq(intColumn, -1), missingMinMaxColumnMetas));
assertFalse(canDrop(ltEq(doubleColumn, -0.1), missingMinMaxColumnMetas));
} |
public static TypeBuilder<Schema> builder() {
return new TypeBuilder<>(new SchemaCompletion(), new NameContext());
} | @Test
void testLong() {
Schema.Type type = Schema.Type.LONG;
Schema simple = SchemaBuilder.builder().longType();
Schema expected = primitive(type, simple);
Schema built1 = SchemaBuilder.builder().longBuilder().prop("p", "v").endLong();
assertEquals(expected, built1);
} |
@VisibleForTesting
public static JobGraph createJobGraph(StreamGraph streamGraph) {
return new StreamingJobGraphGenerator(
Thread.currentThread().getContextClassLoader(),
streamGraph,
null,
Runnable::run)
.createJobGraph();
} | @Test
void testIntermediateDataSetReuse() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setBufferTimeout(-1);
DataStream<Integer> source = env.fromData(0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
// these two vertices can reuse the same intermediate dataset
source.rebalance().sinkTo(new DiscardingSink<>()).setParallelism(2).name("sink1");
source.rebalance().sinkTo(new DiscardingSink<>()).setParallelism(2).name("sink2");
// this can not reuse the same intermediate dataset because of different parallelism
source.rebalance().sinkTo(new DiscardingSink<>()).setParallelism(3);
// this can not reuse the same intermediate dataset because of different partitioner
source.broadcast().sinkTo(new DiscardingSink<>()).setParallelism(2);
// these two vertices can not reuse the same intermediate dataset because of the pipelined
// edge
source.forward().sinkTo(new DiscardingSink<>()).setParallelism(1).disableChaining();
source.forward().sinkTo(new DiscardingSink<>()).setParallelism(1).disableChaining();
DataStream<Integer> mapStream = source.forward().map(value -> value).setParallelism(1);
// these two vertices can reuse the same intermediate dataset
mapStream.broadcast().sinkTo(new DiscardingSink<>()).setParallelism(2).name("sink3");
mapStream.broadcast().sinkTo(new DiscardingSink<>()).setParallelism(2).name("sink4");
StreamGraph streamGraph = env.getStreamGraph();
streamGraph.setGlobalStreamExchangeMode(GlobalStreamExchangeMode.FORWARD_EDGES_PIPELINED);
JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph);
List<JobVertex> vertices = jobGraph.getVerticesSortedTopologicallyFromSources();
assertThat(vertices).hasSize(9);
JobVertex sourceVertex = vertices.get(0);
List<IntermediateDataSetID> producedDataSet =
sourceVertex.getProducedDataSets().stream()
.map(IntermediateDataSet::getId)
.collect(Collectors.toList());
assertThat(producedDataSet).hasSize(6);
JobVertex sinkVertex1 = checkNotNull(findJobVertexWithName(vertices, "sink1"));
JobVertex sinkVertex2 = checkNotNull(findJobVertexWithName(vertices, "sink2"));
JobVertex sinkVertex3 = checkNotNull(findJobVertexWithName(vertices, "sink3"));
JobVertex sinkVertex4 = checkNotNull(findJobVertexWithName(vertices, "sink4"));
assertThat(sinkVertex2.getInputs().get(0).getSource().getId())
.isEqualTo(sinkVertex1.getInputs().get(0).getSource().getId());
assertThat(sinkVertex4.getInputs().get(0).getSource().getId())
.isEqualTo(sinkVertex3.getInputs().get(0).getSource().getId());
assertThat(sinkVertex3.getInputs().get(0).getSource().getId())
.isNotEqualTo(sinkVertex1.getInputs().get(0).getSource().getId());
StreamConfig streamConfig = new StreamConfig(sourceVertex.getConfiguration());
List<IntermediateDataSetID> nonChainedOutputs =
streamConfig.getOperatorNonChainedOutputs(getClass().getClassLoader()).stream()
.map(NonChainedOutput::getDataSetId)
.collect(Collectors.toList());
assertThat(nonChainedOutputs).hasSize(5);
assertThat(nonChainedOutputs)
.doesNotContain(sinkVertex3.getInputs().get(0).getSource().getId());
List<IntermediateDataSetID> streamOutputsInOrder =
streamConfig.getVertexNonChainedOutputs(getClass().getClassLoader()).stream()
.map(NonChainedOutput::getDataSetId)
.collect(Collectors.toList());
assertThat(streamOutputsInOrder).hasSize(6);
assertThat(streamOutputsInOrder).isEqualTo(producedDataSet);
} |
@Override
public void setEventPublisher(EventPublisher publisher) {
publisher.registerHandlerFor(Envelope.class, this::write);
} | @Test
void writes_index_html() throws Throwable {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
HtmlFormatter formatter = new HtmlFormatter(bytes);
EventBus bus = new TimeServiceEventBus(Clock.systemUTC(), UUID::randomUUID);
formatter.setEventPublisher(bus);
TestRunStarted testRunStarted = new TestRunStarted(new Timestamp(10L, 0L));
bus.send(Envelope.of(testRunStarted));
TestRunFinished testRunFinished = new TestRunFinished(null, true, new Timestamp(15L, 0L), null);
bus.send(Envelope.of(testRunFinished));
assertThat(bytes, bytes(containsString("" +
"window.CUCUMBER_MESSAGES = [" +
"{\"testRunStarted\":{\"timestamp\":{\"seconds\":10,\"nanos\":0}}}," +
"{\"testRunFinished\":{\"success\":true,\"timestamp\":{\"seconds\":15,\"nanos\":0}}}" +
"];\n")));
} |
@Override
public boolean isOffsetExpired(OffsetAndMetadata offset, long currentTimestampMs, long offsetsRetentionMs) {
if (offset.expireTimestampMs.isPresent()) {
// Older versions with explicit expire_timestamp field => old expiration semantics is used
return currentTimestampMs >= offset.expireTimestampMs.getAsLong();
} else {
// Current version with no per partition retention
return currentTimestampMs - baseTimestamp.apply(offset) >= offsetsRetentionMs;
}
} | @Test
public void testIsOffsetExpired() {
long currentTimestamp = 1500L;
long commitTimestamp = 500L;
OptionalLong expireTimestampMs = OptionalLong.of(1500);
long offsetsRetentionMs = 500L;
OffsetExpirationConditionImpl condition = new OffsetExpirationConditionImpl(__ -> commitTimestamp);
OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(
100,
OptionalInt.of(1),
"metadata",
commitTimestamp,
expireTimestampMs
);
// Test when expire timestamp exists (older versions with per partition retention)
// 1. Current timestamp >= expire timestamp => should expire
assertTrue(condition.isOffsetExpired(offsetAndMetadata, currentTimestamp, offsetsRetentionMs));
// 2. Current timestamp < expire timestamp => should not expire
currentTimestamp = 499;
assertFalse(condition.isOffsetExpired(offsetAndMetadata, currentTimestamp, offsetsRetentionMs));
// Test when expire timestamp does not exist (current version with no per partition retention)
offsetAndMetadata = new OffsetAndMetadata(
100,
OptionalInt.of(1),
"metadata",
commitTimestamp,
OptionalLong.empty()
);
// 3. Current timestamp - base timestamp >= offsets retention => should expire
currentTimestamp = 1000L;
assertTrue(condition.isOffsetExpired(offsetAndMetadata, currentTimestamp, offsetsRetentionMs));
// 4. Current timestamp - base timestamp < offsets retention => should not expire
currentTimestamp = 999L;
assertFalse(condition.isOffsetExpired(offsetAndMetadata, currentTimestamp, offsetsRetentionMs));
} |
@Override
public BasicTypeDefine reconvert(Column column) {
BasicTypeDefine.BasicTypeDefineBuilder builder =
BasicTypeDefine.builder()
.name(column.getName())
.nullable(column.isNullable())
.comment(column.getComment())
.defaultValue(column.getDefaultValue());
switch (column.getDataType().getSqlType()) {
case BOOLEAN:
builder.columnType(HANA_BOOLEAN);
builder.dataType(HANA_BOOLEAN);
builder.length(2L);
break;
case TINYINT:
builder.columnType(HANA_TINYINT);
builder.dataType(HANA_TINYINT);
break;
case SMALLINT:
builder.columnType(HANA_SMALLINT);
builder.dataType(HANA_SMALLINT);
break;
case INT:
builder.columnType(HANA_INTEGER);
builder.dataType(HANA_INTEGER);
break;
case BIGINT:
builder.columnType(HANA_BIGINT);
builder.dataType(HANA_BIGINT);
break;
case FLOAT:
builder.columnType(HANA_REAL);
builder.dataType(HANA_REAL);
break;
case DOUBLE:
builder.columnType(HANA_DOUBLE);
builder.dataType(HANA_DOUBLE);
break;
case DECIMAL:
DecimalType decimalType = (DecimalType) column.getDataType();
long precision = decimalType.getPrecision();
int scale = decimalType.getScale();
if (precision <= 0) {
precision = DEFAULT_PRECISION;
scale = DEFAULT_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is precision less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (precision > MAX_PRECISION) {
scale = (int) Math.max(0, scale - (precision - MAX_PRECISION));
precision = MAX_PRECISION;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum precision of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_PRECISION,
precision,
scale);
}
if (scale < 0) {
scale = 0;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is scale less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (scale > MAX_SCALE) {
scale = MAX_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_SCALE,
precision,
scale);
}
builder.columnType(String.format("%s(%s,%s)", HANA_DECIMAL, precision, scale));
builder.dataType(HANA_DECIMAL);
builder.precision(precision);
builder.scale(scale);
break;
case BYTES:
builder.columnType(HANA_BLOB);
builder.dataType(HANA_BLOB);
break;
case STRING:
if (column.getColumnLength() == null
|| column.getColumnLength() <= MAX_NVARCHAR_LENGTH) {
builder.columnType(HANA_NVARCHAR);
builder.dataType(HANA_NVARCHAR);
builder.length(
column.getColumnLength() == null
? MAX_NVARCHAR_LENGTH
: column.getColumnLength());
} else {
builder.columnType(HANA_CLOB);
builder.dataType(HANA_CLOB);
}
break;
case DATE:
builder.columnType(HANA_DATE);
builder.dataType(HANA_DATE);
break;
case TIME:
builder.columnType(HANA_TIME);
builder.dataType(HANA_TIME);
break;
case TIMESTAMP:
if (column.getScale() == null || column.getScale() <= 0) {
builder.columnType(HANA_SECONDDATE);
builder.dataType(HANA_SECONDDATE);
} else {
int timestampScale = column.getScale();
if (column.getScale() > MAX_TIMESTAMP_SCALE) {
timestampScale = MAX_TIMESTAMP_SCALE;
log.warn(
"The timestamp column {} type timestamp({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to timestamp({})",
column.getName(),
column.getScale(),
MAX_TIMESTAMP_SCALE,
timestampScale);
}
builder.columnType(HANA_TIMESTAMP);
builder.dataType(HANA_TIMESTAMP);
builder.scale(timestampScale);
}
break;
default:
throw CommonError.convertToConnectorTypeError(
DatabaseIdentifier.SAP_HANA,
column.getDataType().getSqlType().name(),
column.getName());
}
BasicTypeDefine typeDefine = builder.build();
typeDefine.setColumnType(
appendColumnSizeIfNeed(
typeDefine.getColumnType(), typeDefine.getLength(), typeDefine.getScale()));
return typeDefine;
} | @Test
public void testReconvertByte() {
Column column = PhysicalColumn.builder().name("test").dataType(BasicType.BYTE_TYPE).build();
BasicTypeDefine typeDefine = SapHanaTypeConverter.INSTANCE.reconvert(column);
Assertions.assertEquals(column.getName(), typeDefine.getName());
Assertions.assertEquals(SapHanaTypeConverter.HANA_TINYINT, typeDefine.getColumnType());
Assertions.assertEquals(SapHanaTypeConverter.HANA_TINYINT, typeDefine.getDataType());
} |
public NetworkClient.InFlightRequest completeLastSent(String node) {
NetworkClient.InFlightRequest inFlightRequest = requestQueue(node).pollFirst();
inFlightRequestCount.decrementAndGet();
return inFlightRequest;
} | @Test
public void testCompleteLastSent() {
int correlationId1 = addRequest(dest);
int correlationId2 = addRequest(dest);
assertEquals(2, inFlightRequests.count());
assertEquals(correlationId2, inFlightRequests.completeLastSent(dest).header.correlationId());
assertEquals(1, inFlightRequests.count());
assertEquals(correlationId1, inFlightRequests.completeLastSent(dest).header.correlationId());
assertEquals(0, inFlightRequests.count());
} |
@Override
public AwsProxyResponse handle(Throwable ex) {
if (ex instanceof ErrorResponse) {
return new AwsProxyResponse(((ErrorResponse) ex).getStatusCode().value(),
HEADERS, getErrorJson(ex.getMessage()));
} else {
return super.handle(ex);
}
} | @Test
void noHandlerFoundExceptionResultsIn404() {
AwsProxyResponse response = new SpringAwsProxyExceptionHandler().
handle(new NoHandlerFoundException(HttpMethod.GET.name(), "https://atesturl",
HttpHeaders.EMPTY));
assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatusCode());
} |
public static boolean isValidCidr(String cidr) {
return isValidIPv4Cidr(cidr) || isValidIPv6Cidr(cidr);
} | @Test
public void isValidCidr() {
String ipv4Cidr = "192.168.1.0/24";
String ipv6Cidr = "2001:0db8:1234:5678::/64";
String invalidCidr = "192.168.1.0";
assert IPAddressUtils.isValidCidr(ipv4Cidr);
assert IPAddressUtils.isValidCidr(ipv6Cidr);
assert !IPAddressUtils.isValidCidr(invalidCidr);
} |
public static boolean isEmpty(final Object[] array) {
return array == null || array.length == 0;
} | @Test
void isEmpty() {
assertTrue(ArrayUtils.isEmpty(null));
assertTrue(ArrayUtils.isEmpty(new Object[0]));
assertFalse(ArrayUtils.isEmpty(new Object[] {"abc"}));
} |
@Override
public String toString() {
return "{\"username\" : " + (_username == null ? null : "\"" + _username + "\"")
+ ",\"text\" : " + (_text == null ? null : "\"" + _text + "\"")
+ ",\"icon_emoji\" : " + (_iconEmoji == null ? null : "\"" + _iconEmoji + "\"")
+ ",\"channel\" : " + (_channel == null ? null : "\"" + _channel + "\"") + "}";
} | @Test
public void testSlackMessageJsonFormat() {
String expectedJson = "{\"username\" : \"userA\",\"text\" : \"cc alert\",\"icon_emoji\" : \":information_source:"
+ "\",\"channel\" : \"#cc-alerts\"}";
assertEquals(expectedJson, new SlackMessage("userA", "cc alert", ":information_source:", "#cc-alerts").toString());
} |
public Timeslot getValidatedTimeslot(LocalTime other) {
if (!isContainedWithin(other)) {
throw new MomoException(ScheduleErrorCode.INVALID_SCHEDULE_TIMESLOT);
}
return Timeslot.from(other);
} | @DisplayName("주어진 시간이 시간슬롯 인터벌에 포함되어 있으면 일치하는 타임슬롯을 반환한다.")
@Test
void successfulWhenContainingIntervalFully() {
TimeslotInterval timeslotInterval = new TimeslotInterval(Timeslot.TIME_1000, Timeslot.TIME_1800);
LocalTime other = Timeslot.TIME_1200.startTime();
Timeslot timeslot = timeslotInterval.getValidatedTimeslot(other);
assertThat(timeslot.startTime()).isEqualTo(other);
} |
@Override
public Path move(final Path file, final Path target, final TransferStatus status, final Delete.Callback delete, final ConnectionCallback callback) throws BackgroundException {
try {
final BrickApiClient client = new BrickApiClient(session);
if(status.isExists()) {
if(!new CaseInsensitivePathPredicate(file).test(target)) {
if(log.isWarnEnabled()) {
log.warn(String.format("Delete file %s to be replaced with %s", target, file));
}
new BrickDeleteFeature(session).delete(Collections.singletonList(target), callback, delete);
}
}
final FileActionEntity entity = new FileActionsApi(client)
.move(new MovePathBody().destination(StringUtils.removeStart(target.getAbsolute(), String.valueOf(Path.DELIMITER))),
StringUtils.removeStart(file.getAbsolute(), String.valueOf(Path.DELIMITER)));
if(entity.getFileMigrationId() != null) {
this.poll(client, entity);
}
return target.withAttributes(file.attributes());
}
catch(ApiException e) {
throw new BrickExceptionMappingService().map("Cannot rename {0}", e, file);
}
} | @Test(expected = NotfoundException.class)
public void testMoveNotFound() throws Exception {
final Path test = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new BrickMoveFeature(session).move(test, new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback());
} |
@Override
public List<UsbSerialPort> getPorts() {
return mPorts;
} | @Test
public void compositeRndisDevice() throws Exception {
UsbDeviceConnection usbDeviceConnection = mock(UsbDeviceConnection.class);
UsbDevice usbDevice = mock(UsbDevice.class);
UsbInterface rndisControlInterface = mock(UsbInterface.class);
UsbInterface rndisDataInterface = mock(UsbInterface.class);
UsbInterface controlInterface = mock(UsbInterface.class);
UsbInterface dataInterface = mock(UsbInterface.class);
UsbEndpoint controlEndpoint = mock(UsbEndpoint.class);
UsbEndpoint readEndpoint = mock(UsbEndpoint.class);
UsbEndpoint writeEndpoint = mock(UsbEndpoint.class);
// has multiple USB_CLASS_CDC_DATA interfaces => get correct with IAD
when(usbDeviceConnection.getRawDescriptors()).thenReturn(HexDump.hexStringToByteArray(
"12 01 00 02 EF 02 01 40 FE CA 02 40 00 01 01 02 03 01\n" +
"09 02 8D 00 04 01 00 80 32\n" +
"08 0B 00 02 E0 01 03 00\n" +
"09 04 00 00 01 E0 01 03 04\n" +
"05 24 00 10 01\n" +
"05 24 01 00 01\n" +
"04 24 02 00\n" +
"05 24 06 00 01\n" +
"07 05 81 03 08 00 01\n" +
"09 04 01 00 02 0A 00 00 00\n" +
"07 05 82 02 40 00 00\n" +
"07 05 02 02 40 00 00\n" +
"08 0B 02 02 02 02 00 00\n" +
"09 04 02 00 01 02 02 00 04\n" +
"05 24 00 20 01\n" +
"05 24 01 00 03\n" +
"04 24 02 02\n" +
"05 24 06 02 03\n" +
"07 05 83 03 08 00 10\n" +
"09 04 03 00 02 0A 00 00 00\n" +
"07 05 04 02 40 00 00\n" +
"07 05 84 02 40 00 00"));
when(usbDeviceConnection.claimInterface(controlInterface,true)).thenReturn(true);
when(usbDeviceConnection.claimInterface(dataInterface,true)).thenReturn(true);
when(usbDevice.getInterfaceCount()).thenReturn(4);
when(usbDevice.getInterface(0)).thenReturn(rndisControlInterface);
when(usbDevice.getInterface(1)).thenReturn(rndisDataInterface);
when(usbDevice.getInterface(2)).thenReturn(controlInterface);
when(usbDevice.getInterface(3)).thenReturn(dataInterface);
when(rndisControlInterface.getId()).thenReturn(0);
when(rndisControlInterface.getInterfaceClass()).thenReturn(UsbConstants.USB_CLASS_WIRELESS_CONTROLLER);
when(rndisControlInterface.getInterfaceSubclass()).thenReturn(1);
when(rndisControlInterface.getInterfaceProtocol()).thenReturn(3);
when(rndisDataInterface.getId()).thenReturn(1);
when(rndisDataInterface.getInterfaceClass()).thenReturn(UsbConstants.USB_CLASS_CDC_DATA);
when(controlInterface.getId()).thenReturn(2);
when(controlInterface.getInterfaceClass()).thenReturn(UsbConstants.USB_CLASS_COMM);
when(controlInterface.getInterfaceSubclass()).thenReturn(USB_SUBCLASS_ACM);
when(dataInterface.getId()).thenReturn(3);
when(dataInterface.getInterfaceClass()).thenReturn(UsbConstants.USB_CLASS_CDC_DATA);
when(controlInterface.getEndpointCount()).thenReturn(1);
when(controlInterface.getEndpoint(0)).thenReturn(controlEndpoint);
when(dataInterface.getEndpointCount()).thenReturn(2);
when(dataInterface.getEndpoint(0)).thenReturn(writeEndpoint);
when(dataInterface.getEndpoint(1)).thenReturn(readEndpoint);
when(controlEndpoint.getDirection()).thenReturn(UsbConstants.USB_DIR_IN);
when(controlEndpoint.getType()).thenReturn(UsbConstants.USB_ENDPOINT_XFER_INT);
when(readEndpoint.getDirection()).thenReturn(UsbConstants.USB_DIR_IN);
when(readEndpoint.getType()).thenReturn(UsbConstants.USB_ENDPOINT_XFER_BULK);
when(writeEndpoint.getDirection()).thenReturn(UsbConstants.USB_DIR_OUT);
when(writeEndpoint.getType()).thenReturn(UsbConstants.USB_ENDPOINT_XFER_BULK);
CdcAcmSerialDriver driver = new CdcAcmSerialDriver(usbDevice);
CdcAcmSerialDriver.CdcAcmSerialPort port = (CdcAcmSerialDriver.CdcAcmSerialPort) driver.getPorts().get(0);
port.mConnection = usbDeviceConnection;
port.openInt();
assertEquals(readEndpoint, port.mReadEndpoint);
assertEquals(writeEndpoint, port.mWriteEndpoint);
} |
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
return inject(statement, new TopicProperties.Builder());
} | @Test
public void shouldUpdateStatementText() {
// Given:
givenStatement("CREATE STREAM x AS SELECT * FROM SOURCE;");
// When:
final ConfiguredStatement<?> result = injector.inject(statement, builder);
// Then:
assertThat(result.getMaskedStatementText(),
equalTo(
"CREATE STREAM X WITH (CLEANUP_POLICY='delete', KAFKA_TOPIC='name', PARTITIONS=1, REPLICAS=1, RETENTION_MS=100) AS SELECT *"
+ "\nFROM SOURCE SOURCE\n"
+ "EMIT CHANGES;"));
} |
@Override
public TimeLimiter timeLimiter(final String name) {
return timeLimiter(name, getDefaultConfig(), emptyMap());
} | @Test
public void timeLimiterNewWithNullConfigSupplier() {
exception.expect(NullPointerException.class);
exception.expectMessage("Supplier must not be null");
TimeLimiterRegistry registry = new InMemoryTimeLimiterRegistry(config);
registry.timeLimiter("name", (Supplier<TimeLimiterConfig>) null);
} |
public Result resolve(List<PluginDescriptor> plugins) {
// create graphs
dependenciesGraph = new DirectedGraph<>();
dependentsGraph = new DirectedGraph<>();
// populate graphs
Map<String, PluginDescriptor> pluginByIds = new HashMap<>();
for (PluginDescriptor plugin : plugins) {
addPlugin(plugin);
pluginByIds.put(plugin.getPluginId(), plugin);
}
log.debug("Graph: {}", dependenciesGraph);
// get a sorted list of dependencies
List<String> sortedPlugins = dependenciesGraph.reverseTopologicalSort();
log.debug("Plugins order: {}", sortedPlugins);
// create the result object
Result result = new Result(sortedPlugins);
resolved = true;
if (sortedPlugins != null) { // no cyclic dependency
// detect not found dependencies
for (String pluginId : sortedPlugins) {
if (!pluginByIds.containsKey(pluginId)) {
result.addNotFoundDependency(pluginId);
}
}
}
// check dependencies versions
for (PluginDescriptor plugin : plugins) {
String pluginId = plugin.getPluginId();
String existingVersion = plugin.getVersion();
List<String> dependents = getDependents(pluginId);
while (!dependents.isEmpty()) {
String dependentId = dependents.remove(0);
PluginDescriptor dependent = pluginByIds.get(dependentId);
String requiredVersion = getDependencyVersionSupport(dependent, pluginId);
boolean ok = checkDependencyVersion(requiredVersion, existingVersion);
if (!ok) {
result.addWrongDependencyVersion(new WrongDependencyVersion(pluginId, dependentId, existingVersion, requiredVersion));
}
}
}
return result;
} | @Test
void notFoundDependencies() {
PluginDescriptor pd1 = new DefaultPluginDescriptor()
.setPluginId("p1")
.setDependencies("p2, p3");
List<PluginDescriptor> plugins = new ArrayList<>();
plugins.add(pd1);
DependencyResolver.Result result = resolver.resolve(plugins);
assertFalse(result.getNotFoundDependencies().isEmpty());
assertEquals(Arrays.asList("p2", "p3"), result.getNotFoundDependencies());
} |
int getStrength(long previousDuration, long currentDuration, int strength) {
if (isPreviousDurationCloserToGoal(previousDuration, currentDuration)) {
return strength - 1;
} else {
return strength;
}
} | @Test
void getStrengthShouldReturn4IfStrengthIs4() {
// given
int currentStrength = 4;
// when
int actual = bcCryptWorkFactorService.getStrength(0, 0, currentStrength);
// then
assertThat(actual).isEqualTo(4);
} |
@Override
public void hasAllRequiredFields() {
if (!actual.isInitialized()) {
failWithoutActual(
simpleFact("expected to have all required fields set"),
fact("but was missing", actual.findInitializationErrors()),
fact("proto was", actualCustomStringRepresentationForProtoPackageMembersToCall()));
}
} | @Test
public void testHasAllRequiredFields() {
// Proto 3 doesn't have required fields.
if (isProto3()) {
return;
}
expectThat(parsePartial("")).hasAllRequiredFields();
expectThat(parsePartial("o_required_string_message: { required_string: \"foo\" }"))
.hasAllRequiredFields();
expectFailureWhenTesting()
.that(parsePartial("o_required_string_message: {}"))
.hasAllRequiredFields();
expectThatFailure()
.factKeys()
.containsExactly(
"expected to have all required fields set", "but was missing", "proto was");
expectThatFailure()
.factValue("but was missing")
.isEqualTo("[o_required_string_message.required_string]");
expectFailureWhenTesting()
.that(parsePartial("r_required_string_message: {} r_required_string_message: {}"))
.hasAllRequiredFields();
expectThatFailure()
.factKeys()
.containsExactly(
"expected to have all required fields set", "but was missing", "proto was");
expectThatFailure()
.factValue("but was missing")
.contains("r_required_string_message[0].required_string");
expectThatFailure()
.factValue("but was missing")
.contains("r_required_string_message[1].required_string");
} |
public String createNote(String notePath,
AuthenticationInfo subject) throws IOException {
return createNote(notePath, interpreterSettingManager.getDefaultInterpreterSetting().getName(),
subject);
} | @Test
void testSchedulePoolUsage() throws InterruptedException, IOException {
final int timeout = 30;
final String everySecondCron = "* * * * * ?";
// each run starts a new JVM and the job takes about ~5 seconds
final CountDownLatch jobsToExecuteCount = new CountDownLatch(5);
final String noteId = notebook.createNote("note1", anonymous);
executeNewParagraphByCron(noteId, everySecondCron);
afterStatusChangedListener = new StatusChangedListener() {
@Override
public void onStatusChanged(Job<?> job, Status before, Status after) {
if (after == Status.FINISHED) {
jobsToExecuteCount.countDown();
}
}
};
assertTrue(jobsToExecuteCount.await(timeout, TimeUnit.SECONDS));
terminateScheduledNote(noteId);
afterStatusChangedListener = null;
} |
static BsonTimestamp startAtTimestamp(Map<String, String> options) {
String startAtValue = options.get(START_AT_OPTION);
if (isNullOrEmpty(startAtValue)) {
throw QueryException.error("startAt property is required for MongoDB stream. " + POSSIBLE_VALUES);
}
if ("now".equalsIgnoreCase(startAtValue)) {
return MongoUtilities.bsonTimestampFromTimeMillis(System.currentTimeMillis());
} else {
try {
return MongoUtilities.bsonTimestampFromTimeMillis(Long.parseLong(startAtValue));
} catch (NumberFormatException e) {
try {
return MongoUtilities.bsonTimestampFromTimeMillis(Instant.parse(startAtValue).toEpochMilli());
} catch (DateTimeParseException ex) {
throw QueryException.error("Invalid startAt value: '" + startAtValue + "'. " + POSSIBLE_VALUES);
}
}
}
} | @Test
public void throws_at_invalid_dateTimeString() {
// given
long time = System.currentTimeMillis();
LocalDateTime timeDate = LocalDateTime.ofEpochSecond(time / 1000, 0, UTC);
String dateAsString = timeDate.format(DateTimeFormatter.ISO_DATE_TIME) + "BLABLABLA";
// when
QueryException queryException = assertThrows(QueryException.class, () ->
Options.startAtTimestamp(ImmutableMap.of(Options.START_AT_OPTION, dateAsString)));
// then
assertThat(queryException).hasMessageContaining("Invalid startAt value:");
} |
public static Map<String, String> getStringMap(String property, JsonNode node) {
Preconditions.checkArgument(node.has(property), "Cannot parse missing map: %s", property);
JsonNode pNode = node.get(property);
Preconditions.checkArgument(
pNode != null && !pNode.isNull() && pNode.isObject(),
"Cannot parse string map from non-object value: %s: %s",
property,
pNode);
ImmutableMap.Builder<String, String> builder = ImmutableMap.builder();
Iterator<String> fields = pNode.fieldNames();
while (fields.hasNext()) {
String field = fields.next();
builder.put(field, getString(field, pNode));
}
return builder.build();
} | @Test
public void getStringMap() throws JsonProcessingException {
assertThatThrownBy(() -> JsonUtil.getStringMap("items", JsonUtil.mapper().readTree("{}")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse missing map: items");
assertThatThrownBy(
() -> JsonUtil.getStringMap("items", JsonUtil.mapper().readTree("{\"items\": null}")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse string map from non-object value: items: null");
assertThatThrownBy(
() ->
JsonUtil.getStringMap(
"items", JsonUtil.mapper().readTree("{\"items\": {\"a\":\"23\", \"b\":45}}")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse to a string value: b: 45");
Map<String, String> items = ImmutableMap.of("a", "23", "b", "45");
assertThat(
JsonUtil.getStringMap(
"items", JsonUtil.mapper().readTree("{\"items\": {\"a\":\"23\", \"b\":\"45\"}}")))
.isEqualTo(items);
String json =
JsonUtil.generate(
gen -> {
gen.writeStartObject();
JsonUtil.writeStringMap("items", items, gen);
gen.writeEndObject();
},
false);
assertThat(JsonUtil.getStringMap("items", JsonUtil.mapper().readTree(json))).isEqualTo(items);
} |
public NumericIndicator dividedBy(Indicator<Num> other) {
return NumericIndicator.of(BinaryOperation.quotient(this, other));
} | @Test
public void dividedBy() {
final NumericIndicator numericIndicator = NumericIndicator.of(cp1);
final NumericIndicator staticOp = numericIndicator.dividedBy(5);
assertNumEquals(1 / 5.0, staticOp.getValue(0));
assertNumEquals(9 / 5.0, staticOp.getValue(8));
final NumericIndicator zeroOp = numericIndicator.dividedBy(0);
assertNumEquals(NaN.NaN, zeroOp.getValue(0));
assertNumEquals(NaN.NaN, zeroOp.getValue(8));
final NumericIndicator dynamicOp = numericIndicator.dividedBy(ema);
assertNumEquals(cp1.getValue(0).dividedBy(ema.getValue(0)), dynamicOp.getValue(0));
assertNumEquals(cp1.getValue(8).dividedBy(ema.getValue(8)), dynamicOp.getValue(8));
} |
@Override
public List<Container> allocateContainers(ResourceBlacklistRequest blackList,
List<ResourceRequest> oppResourceReqs,
ApplicationAttemptId applicationAttemptId,
OpportunisticContainerContext opportContext, long rmIdentifier,
String appSubmitter) throws YarnException {
// Update black list.
updateBlacklist(blackList, opportContext);
// Add OPPORTUNISTIC requests to the outstanding ones.
opportContext.addToOutstandingReqs(oppResourceReqs);
Set<String> nodeBlackList = new HashSet<>(opportContext.getBlacklist());
Set<String> allocatedNodes = new HashSet<>();
List<Container> allocatedContainers = new ArrayList<>();
// Satisfy the outstanding OPPORTUNISTIC requests.
boolean continueLoop = true;
while (continueLoop) {
continueLoop = false;
List<Map<Resource, List<Allocation>>> allocations = new ArrayList<>();
for (SchedulerRequestKey schedulerKey :
opportContext.getOutstandingOpReqs().descendingKeySet()) {
// Allocated containers :
// Key = Requested Capability,
// Value = List of Containers of given cap (the actual container size
// might be different than what is requested, which is why
// we need the requested capability (key) to match against
// the outstanding reqs)
int remAllocs = -1;
int maxAllocationsPerAMHeartbeat = getMaxAllocationsPerAMHeartbeat();
if (maxAllocationsPerAMHeartbeat > 0) {
remAllocs =
maxAllocationsPerAMHeartbeat - allocatedContainers.size()
- getTotalAllocations(allocations);
if (remAllocs <= 0) {
LOG.info("Not allocating more containers as we have reached max "
+ "allocations per AM heartbeat {}",
maxAllocationsPerAMHeartbeat);
break;
}
}
Map<Resource, List<Allocation>> allocation = allocate(
rmIdentifier, opportContext, schedulerKey, applicationAttemptId,
appSubmitter, nodeBlackList, allocatedNodes, remAllocs);
if (allocation.size() > 0) {
allocations.add(allocation);
continueLoop = true;
}
}
matchAllocation(allocations, allocatedContainers, opportContext);
}
return allocatedContainers;
} | @Test
public void testRoundRobinRackLocalAllocation() throws Exception {
ResourceBlacklistRequest blacklistRequest =
ResourceBlacklistRequest.newInstance(
new ArrayList<>(), new ArrayList<>());
List<ResourceRequest> reqs =
Arrays.asList(
ResourceRequest.newBuilder().allocationRequestId(1)
.priority(PRIORITY_NORMAL)
.resourceName("/r1")
.capability(CAPABILITY_1GB)
.relaxLocality(true)
.executionType(ExecutionType.OPPORTUNISTIC).build(),
ResourceRequest.newBuilder().allocationRequestId(1)
.priority(PRIORITY_NORMAL)
.resourceName("h1")
.capability(CAPABILITY_1GB)
.relaxLocality(true)
.executionType(ExecutionType.OPPORTUNISTIC).build(),
ResourceRequest.newBuilder().allocationRequestId(1)
.priority(PRIORITY_NORMAL)
.resourceName(ResourceRequest.ANY)
.capability(CAPABILITY_1GB)
.relaxLocality(true)
.executionType(ExecutionType.OPPORTUNISTIC).build(),
ResourceRequest.newBuilder().allocationRequestId(2)
.priority(PRIORITY_NORMAL)
.resourceName("/r1")
.capability(CAPABILITY_1GB)
.relaxLocality(true)
.executionType(ExecutionType.OPPORTUNISTIC).build(),
ResourceRequest.newBuilder().allocationRequestId(2)
.priority(PRIORITY_NORMAL)
.resourceName("h1")
.capability(CAPABILITY_1GB)
.relaxLocality(true)
.executionType(ExecutionType.OPPORTUNISTIC).build(),
ResourceRequest.newBuilder().allocationRequestId(2)
.priority(PRIORITY_NORMAL)
.resourceName(ResourceRequest.ANY)
.capability(CAPABILITY_1GB)
.relaxLocality(true)
.executionType(ExecutionType.OPPORTUNISTIC).build());
ApplicationAttemptId appAttId = ApplicationAttemptId.newInstance(
ApplicationId.newInstance(0L, 1), 1);
oppCntxt.updateNodeList(
Arrays.asList(
RemoteNode.newInstance(
NodeId.newInstance("h3", 1234), "h3:1234", "/r2"),
RemoteNode.newInstance(
NodeId.newInstance("h2", 1234), "h2:1234", "/r1"),
RemoteNode.newInstance(
NodeId.newInstance("h5", 1234), "h5:1234", "/r1"),
RemoteNode.newInstance(
NodeId.newInstance("h4", 1234), "h4:1234", "/r2")));
List<Container> containers = allocator.allocateContainers(
blacklistRequest, reqs, appAttId, oppCntxt, 1L, "luser");
Set<String> allocatedHosts = new HashSet<>();
for (Container c : containers) {
allocatedHosts.add(c.getNodeHttpAddress());
}
LOG.info("Containers: {}", containers);
Assert.assertTrue(allocatedHosts.contains("h2:1234"));
Assert.assertTrue(allocatedHosts.contains("h5:1234"));
Assert.assertFalse(allocatedHosts.contains("h3:1234"));
Assert.assertFalse(allocatedHosts.contains("h4:1234"));
Assert.assertEquals(2, containers.size());
} |
public static String extractFromURIPattern(String paramsRuleString, String pattern, String realURI) {
Map<String, String> criteriaMap = new TreeMap<>();
pattern = sanitizeURLForRegExp(pattern);
realURI = sanitizeURLForRegExp(realURI);
// Build a pattern for extracting parts from pattern and a pattern for extracting values
// from realURI. Supporting both {id} and :id.
String partsPattern = null;
String valuesPattern = null;
if (pattern.indexOf("/{") != -1) {
partsPattern = pattern.replaceAll(CURLY_PART_PATTERN, CURLY_PART_EXTRACTION_PATTERN);
valuesPattern = pattern.replaceAll(CURLY_PART_PATTERN, "(.+)");
} else {
partsPattern = pattern.replaceAll("(:[^:^/]+)", "\\:(.+)");
valuesPattern = pattern.replaceAll("(:[^:^/]+)", "(.+)");
}
if (pattern.contains("$")) {
partsPattern = partsPattern.replace("$", "\\$");
valuesPattern = valuesPattern.replace("$", "\\$");
}
Pattern partsP = Pattern.compile(partsPattern);
Matcher partsM = partsP.matcher(pattern);
Pattern valuesP = Pattern.compile(valuesPattern);
Matcher valuesM = valuesP.matcher(realURI);
// Rule string can be a URI_ELEMENT rule and containers ?? elements.
// We must remove them before parsing the URI parts.
if (paramsRuleString.contains("??")) {
paramsRuleString = paramsRuleString.split("\\?\\?")[0];
}
final var paramsRule = Arrays.stream(paramsRuleString.split("&&")).map(String::trim).distinct()
.collect(Collectors.toUnmodifiableSet());
// Both should match and have the same group count.
if (valuesM.matches() && partsM.matches() && valuesM.groupCount() == partsM.groupCount()) {
for (int i = 1; i < partsM.groupCount() + 1; i++) {
final String paramName = partsM.group(i);
final String paramValue = valuesM.group(i);
if (paramsRule.contains(paramName)) {
criteriaMap.put(paramName, paramValue);
}
}
}
// Just appends sorted entries, separating them with /.
StringBuilder result = new StringBuilder();
for (Map.Entry<String, String> criteria : criteriaMap.entrySet()) {
result.append("/").append(criteria.getKey()).append("=").append(criteria.getValue());
}
return result.toString();
} | @Test
void testExtractFromURIPatternUnsorted() {
// Check with parts not sorted in natural order.
String requestPath = "/deployment/byComponent/1.2/myComp";
String operationName = "/deployment/byComponent/{version}/{component}";
String paramRule = "version && component";
// Dispatch string parts are sorted.
String dispatchCriteria = DispatchCriteriaHelper.extractFromURIPattern(paramRule, operationName, requestPath);
assertEquals("/component=myComp/version=1.2", dispatchCriteria);
} |
public static void removeUnavailableStepsFromMapping( Map<TargetStepAttribute, SourceStepField> targetMap,
Set<SourceStepField> unavailableSourceSteps, Set<TargetStepAttribute> unavailableTargetSteps ) {
Iterator<Entry<TargetStepAttribute, SourceStepField>> targetMapIterator = targetMap.entrySet().iterator();
while ( targetMapIterator.hasNext() ) {
Entry<TargetStepAttribute, SourceStepField> entry = targetMapIterator.next();
SourceStepField currentSourceStepField = entry.getValue();
TargetStepAttribute currentTargetStepAttribute = entry.getKey();
if ( unavailableSourceSteps.contains( currentSourceStepField ) || unavailableTargetSteps.contains(
currentTargetStepAttribute ) ) {
targetMapIterator.remove();
}
}
} | @Test
public void removeUnavailableStepsFromMapping_unavailable_source_target_step() {
TargetStepAttribute unavailableTargetStep = new TargetStepAttribute( UNAVAILABLE_STEP, TEST_ATTR_VALUE, false );
SourceStepField unavailableSourceStep = new SourceStepField( UNAVAILABLE_STEP, TEST_FIELD );
Map<TargetStepAttribute, SourceStepField> targetMap = new HashMap<TargetStepAttribute, SourceStepField>();
targetMap.put( unavailableTargetStep, unavailableSourceStep );
Set<TargetStepAttribute> unavailableTargetSteps = Collections.singleton( UNAVAILABLE_TARGET_STEP );
Set<SourceStepField> unavailableSourceSteps = Collections.singleton( UNAVAILABLE_SOURCE_STEP );
MetaInject.removeUnavailableStepsFromMapping( targetMap, unavailableSourceSteps, unavailableTargetSteps );
assertTrue( targetMap.isEmpty() );
} |
public ContentInfo verify(ContentInfo signedMessage, Date date) {
final SignedData signedData = SignedData.getInstance(signedMessage.getContent());
final X509Certificate cert = certificate(signedData);
certificateVerifier.verify(cert, date);
final X500Name name = X500Name.getInstance(cert.getIssuerX500Principal().getEncoded());
try {
final CMSSignedData cms = new CMSSignedData(signedMessage);
cms.verifySignatures(signerId -> {
if (!name.equals(signerId.getIssuer())) {
throw new VerificationException("Issuer does not match certificate");
}
if (!cert.getSerialNumber().equals(signerId.getSerialNumber())) {
throw new VerificationException("Serial number does not match certificate");
}
return new JcaSignerInfoVerifierBuilder(digestProvider).setProvider(bcProvider).build(cert);
});
} catch (CMSException e) {
throw new VerificationException("Could not verify CMS", e);
}
return signedData.getEncapContentInfo();
} | @Test
public void verifyValidRvig2014Cms() throws Exception {
final ContentInfo signedMessage = ContentInfo.getInstance(fixture("rvig2014"));
final ContentInfo message = new CmsVerifier(new CertificateVerifier.None()).verify(signedMessage);
assertEquals(LdsSecurityObject.OID, message.getContentType().getId());
assertEquals("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", Hex.toHexString(
DigestUtils.digest("SHA1").digest(((ASN1OctetString) message.getContent()).getOctets())
));
} |
protected File initRootProjectWorkDir(File baseDir, Map<String, String> rootProperties) {
String workDir = rootProperties.get(CoreProperties.WORKING_DIRECTORY);
if (StringUtils.isBlank(workDir)) {
return new File(baseDir, CoreProperties.WORKING_DIRECTORY_DEFAULT_VALUE);
}
File customWorkDir = new File(workDir);
if (customWorkDir.isAbsolute()) {
return customWorkDir;
}
return new File(baseDir, customWorkDir.getPath());
} | @Test
public void shouldInitRootWorkDirWithCustomAbsoluteFolder() {
Map<String, String> props = singletonMap("sonar.working.directory", new File("src").getAbsolutePath());
ProjectReactorBuilder builder = new ProjectReactorBuilder(new ScannerProperties(props),
mock(AnalysisWarnings.class));
File baseDir = new File("target/tmp/baseDir");
File workDir = builder.initRootProjectWorkDir(baseDir, props);
assertThat(workDir).isEqualTo(new File("src").getAbsoluteFile());
} |
public void addValueProviders(final String segmentName,
final RocksDB db,
final Cache cache,
final Statistics statistics) {
if (storeToValueProviders.isEmpty()) {
logger.debug("Adding metrics recorder of task {} to metrics recording trigger", taskId);
streamsMetrics.rocksDBMetricsRecordingTrigger().addMetricsRecorder(this);
} else if (storeToValueProviders.containsKey(segmentName)) {
throw new IllegalStateException("Value providers for store " + segmentName + " of task " + taskId +
" has been already added. This is a bug in Kafka Streams. " +
"Please open a bug report under https://issues.apache.org/jira/projects/KAFKA/issues");
}
verifyDbAndCacheAndStatistics(segmentName, db, cache, statistics);
logger.debug("Adding value providers for store {} of task {}", segmentName, taskId);
storeToValueProviders.put(segmentName, new DbAndCacheAndStatistics(db, cache, statistics));
} | @Test
public void shouldThrowIfCacheToAddIsSameAsOnlyOneOfMultipleCaches() {
recorder.addValueProviders(SEGMENT_STORE_NAME_1, dbToAdd1, cacheToAdd1, statisticsToAdd1);
recorder.addValueProviders(SEGMENT_STORE_NAME_2, dbToAdd2, cacheToAdd2, statisticsToAdd2);
final Throwable exception = assertThrows(
IllegalStateException.class,
() -> recorder.addValueProviders(SEGMENT_STORE_NAME_3, dbToAdd3, cacheToAdd1, statisticsToAdd3)
);
assertThat(
exception.getMessage(),
is("Caches for store " + STORE_NAME + " of task " + TASK_ID1 +
" are either not all distinct or do not all refer to the same cache. This is a bug in Kafka Streams. " +
"Please open a bug report under https://issues.apache.org/jira/projects/KAFKA/issues")
);
} |
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return PathAttributes.EMPTY;
}
try {
if(containerService.isContainer(file)) {
final PathAttributes attributes = new PathAttributes();
final CloudBlobContainer container = session.getClient().getContainerReference(containerService.getContainer(file).getName());
container.downloadAttributes(null, null, context);
final BlobContainerProperties properties = container.getProperties();
attributes.setETag(properties.getEtag());
attributes.setModificationDate(properties.getLastModified().getTime());
return attributes;
}
if(file.isFile() || file.isPlaceholder()) {
try {
final CloudBlob blob = session.getClient().getContainerReference(containerService.getContainer(file).getName())
.getBlobReferenceFromServer(containerService.getKey(file));
final BlobRequestOptions options = new BlobRequestOptions();
blob.downloadAttributes(AccessCondition.generateEmptyCondition(), options, context);
return this.toAttributes(blob);
}
catch(StorageException e) {
switch(e.getHttpStatusCode()) {
case HttpStatus.SC_NOT_FOUND:
if(file.isPlaceholder()) {
// Ignore failure and look for common prefix
break;
}
default:
throw e;
}
}
}
// Check for common prefix
try {
new AzureObjectListService(session, context).list(file, new CancellingListProgressListener());
return PathAttributes.EMPTY;
}
catch(ListCanceledException l) {
// Found common prefix
return PathAttributes.EMPTY;
}
}
catch(StorageException e) {
throw new AzureExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
catch(URISyntaxException e) {
throw new NotfoundException(e.getMessage(), e);
}
} | @Test
public void testFind() throws Exception {
final Path container = new Path("cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path test = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new AzureTouchFeature(session, null).touch(test, new TransferStatus());
final AzureAttributesFinderFeature f = new AzureAttributesFinderFeature(session, null);
final PathAttributes attributes = f.find(test);
assertEquals(0L, attributes.getSize());
assertNotNull(attributes.getETag());
new AzureDeleteFeature(session, null).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
public final void setStrictness(Strictness strictness) {
Objects.requireNonNull(strictness);
this.strictness = strictness;
} | @Test
public void testEscapeCharacterQuoteInStrictMode() {
String json = "\"\\'\"";
JsonReader reader = new JsonReader(reader(json));
reader.setStrictness(Strictness.STRICT);
IOException expected = assertThrows(IOException.class, reader::nextString);
assertThat(expected)
.hasMessageThat()
.startsWith("Invalid escaped character \"'\" in strict mode");
} |
@Override
public void reset() throws IOException {
createDirectory(PATH_DATA.getKey());
createDirectory(PATH_WEB.getKey());
createDirectory(PATH_LOGS.getKey());
File tempDir = createOrCleanTempDirectory(PATH_TEMP.getKey());
try (AllProcessesCommands allProcessesCommands = new AllProcessesCommands(tempDir)) {
allProcessesCommands.clean();
}
} | @Test
public void reset_cleans_the_sharedmemory_file() throws IOException {
assertThat(tempDir.mkdir()).isTrue();
try (AllProcessesCommands commands = new AllProcessesCommands(tempDir)) {
for (int i = 0; i < MAX_PROCESSES; i++) {
commands.create(i).setUp();
}
underTest.reset();
for (int i = 0; i < MAX_PROCESSES; i++) {
assertThat(commands.create(i).isUp()).isFalse();
}
}
} |
@Override
public int get(PageId pageId, int pageOffset, int bytesToRead, ReadTargetBuffer target,
boolean isTemporary) throws IOException, PageNotFoundException {
Callable<Integer> callable = () ->
mPageStore.get(pageId, pageOffset, bytesToRead, target, isTemporary);
try {
return mTimeLimter.callWithTimeout(callable, mTimeoutMs, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
// Task got cancelled by others, interrupt the current thread
// and then throw a runtime ex to make the higher level stop.
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} catch (TimeoutException e) {
Metrics.STORE_GET_TIMEOUT.inc();
throw new IOException(e);
} catch (RejectedExecutionException e) {
Metrics.STORE_THREADS_REJECTED.inc();
throw new IOException(e);
} catch (Throwable t) {
Throwables.propagateIfPossible(t, IOException.class, PageNotFoundException.class);
throw new IOException(t);
}
} | @Test
public void get() throws Exception {
mPageStore.put(PAGE_ID, PAGE);
assertEquals(PAGE.length,
mTimeBoundPageStore.get(PAGE_ID, 0, PAGE.length, new ByteArrayTargetBuffer(mBuf, 0)));
assertArrayEquals(PAGE, mBuf);
} |
public void load() {
Set<CoreExtension> coreExtensions = serviceLoaderWrapper.load(getClass().getClassLoader());
ensureNoDuplicateName(coreExtensions);
coreExtensionRepository.setLoadedCoreExtensions(coreExtensions);
if (!coreExtensions.isEmpty()) {
LOG.info("Loaded core extensions: {}", coreExtensions.stream().map(CoreExtension::getName).collect(Collectors.joining(", ")));
}
} | @Test
public void load_fails_with_ISE_if_multiple_core_extensions_declare_same_names() {
Set<CoreExtension> coreExtensions = ImmutableSet.of(newCoreExtension("a"), newCoreExtension("a"), newCoreExtension("b"), newCoreExtension("b"));
when(serviceLoaderWrapper.load(any())).thenReturn(coreExtensions);
assertThatThrownBy(() -> underTest.load())
.isInstanceOf(IllegalStateException.class)
.hasMessage("Multiple core extensions declare the following names: a, b");
} |
@SuppressWarnings({"deprecation", "checkstyle:linelength"})
public void convertSiteProperties(Configuration conf,
Configuration yarnSiteConfig, boolean drfUsed,
boolean enableAsyncScheduler, boolean userPercentage,
FSConfigToCSConfigConverterParams.PreemptionMode preemptionMode) {
yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER,
CapacityScheduler.class.getCanonicalName());
if (conf.getBoolean(
FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED,
FairSchedulerConfiguration.DEFAULT_CONTINUOUS_SCHEDULING_ENABLED)) {
yarnSiteConfig.setBoolean(
CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true);
int interval = conf.getInt(
FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_SLEEP_MS,
FairSchedulerConfiguration.DEFAULT_CONTINUOUS_SCHEDULING_SLEEP_MS);
yarnSiteConfig.setInt(PREFIX +
"schedule-asynchronously.scheduling-interval-ms", interval);
}
// This should be always true to trigger cs auto
// refresh queue.
yarnSiteConfig.setBoolean(
YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
if (conf.getBoolean(FairSchedulerConfiguration.PREEMPTION,
FairSchedulerConfiguration.DEFAULT_PREEMPTION)) {
preemptionEnabled = true;
String policies = addMonitorPolicy(ProportionalCapacityPreemptionPolicy.
class.getCanonicalName(), yarnSiteConfig);
yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
policies);
int waitTimeBeforeKill = conf.getInt(
FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL,
FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_KILL);
yarnSiteConfig.setInt(
CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL,
waitTimeBeforeKill);
long waitBeforeNextStarvationCheck = conf.getLong(
FairSchedulerConfiguration.WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS,
FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS);
yarnSiteConfig.setLong(
CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL,
waitBeforeNextStarvationCheck);
} else {
if (preemptionMode ==
FSConfigToCSConfigConverterParams.PreemptionMode.NO_POLICY) {
yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, "");
}
}
// For auto created queue's auto deletion.
if (!userPercentage) {
String policies = addMonitorPolicy(AutoCreatedQueueDeletionPolicy.
class.getCanonicalName(), yarnSiteConfig);
yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
policies);
// Set the expired for deletion interval to 10s, consistent with fs.
yarnSiteConfig.setInt(CapacitySchedulerConfiguration.
AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME, 10);
}
if (conf.getBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE,
FairSchedulerConfiguration.DEFAULT_ASSIGN_MULTIPLE)) {
yarnSiteConfig.setBoolean(
CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, true);
} else {
yarnSiteConfig.setBoolean(
CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, false);
}
// Make auto cs conf refresh enabled.
yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
addMonitorPolicy(QueueConfigurationAutoRefreshPolicy
.class.getCanonicalName(), yarnSiteConfig));
int maxAssign = conf.getInt(FairSchedulerConfiguration.MAX_ASSIGN,
FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN);
if (maxAssign != FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN) {
yarnSiteConfig.setInt(
CapacitySchedulerConfiguration.MAX_ASSIGN_PER_HEARTBEAT,
maxAssign);
}
float localityThresholdNode = conf.getFloat(
FairSchedulerConfiguration.LOCALITY_THRESHOLD_NODE,
FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_NODE);
if (localityThresholdNode !=
FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_NODE) {
yarnSiteConfig.setFloat(CapacitySchedulerConfiguration.NODE_LOCALITY_DELAY,
localityThresholdNode);
}
float localityThresholdRack = conf.getFloat(
FairSchedulerConfiguration.LOCALITY_THRESHOLD_RACK,
FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_RACK);
if (localityThresholdRack !=
FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_RACK) {
yarnSiteConfig.setFloat(
CapacitySchedulerConfiguration.RACK_LOCALITY_ADDITIONAL_DELAY,
localityThresholdRack);
}
if (conf.getBoolean(FairSchedulerConfiguration.SIZE_BASED_WEIGHT,
FairSchedulerConfiguration.DEFAULT_SIZE_BASED_WEIGHT)) {
sizeBasedWeight = true;
}
if (drfUsed) {
yarnSiteConfig.set(
CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,
DominantResourceCalculator.class.getCanonicalName());
}
if (enableAsyncScheduler) {
yarnSiteConfig.setBoolean(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true);
}
} | @Test
public void testSitePreemptionConversion() {
yarnConfig.setBoolean(FairSchedulerConfiguration.PREEMPTION, true);
yarnConfig.setInt(FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL, 123);
yarnConfig.setInt(
FairSchedulerConfiguration.WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS,
321);
converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false,
false, false, null);
assertTrue("Preemption enabled",
yarnConvertedConfig.getBoolean(
YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS,
false));
assertEquals("Wait time before kill", 123,
yarnConvertedConfig.getInt(
CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL,
-1));
assertEquals("Starvation check wait time", 321,
yarnConvertedConfig.getInt(
CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL,
-1));
assertFalse("Observe_only should be false",
yarnConvertedConfig.getBoolean(CapacitySchedulerConfiguration.
PREEMPTION_OBSERVE_ONLY, false));
assertTrue("Should contain ProportionalCapacityPreemptionPolicy.",
yarnConvertedConfig.
get(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES).
contains(ProportionalCapacityPreemptionPolicy.
class.getCanonicalName()));
} |
@Nonnull
public static <T extends Throwable> T cloneExceptionWithFixedAsyncStackTrace(@Nonnull T original) {
StackTraceElement[] fixedStackTrace = getFixedStackTrace(original, Thread.currentThread().getStackTrace());
Class<? extends Throwable> exceptionClass = original.getClass();
Throwable clone = tryCreateExceptionWithMessageAndCause(exceptionClass,
original.getMessage(), original.getCause());
if (clone != null) {
clone.setStackTrace(fixedStackTrace);
return (T) clone;
}
return original;
} | @Test
public void testCloneExceptionWithFixedAsyncStackTrace_whenCannotConstructSource_then_returnWithoutCloning() {
IOException expectedException = new IOException();
NoPublicConstructorException result = ExceptionUtil.cloneExceptionWithFixedAsyncStackTrace(
new NoPublicConstructorException(expectedException));
assertEquals(NoPublicConstructorException.class, result.getClass());
assertEquals(expectedException, result.getCause());
assertNoAsyncTrace(result);
} |
public static String humanReadableByteCount(double bytes) {
if (bytes < 1024) {
return String.format("%.1f B", bytes);
}
int exp = (int) (Math.log(bytes) / Math.log(1024));
String pre = "KMGTPE".charAt(exp - 1) + "";
return String.format("%.1f %sB", bytes / Math.pow(1024, exp), pre);
} | @Test
public void testHumanReadableByteCount() {
assertEquals("0.0 B", NumericUtils.humanReadableByteCount(0));
assertEquals("27.0 B", NumericUtils.humanReadableByteCount(27));
assertEquals("1023.0 B", NumericUtils.humanReadableByteCount(1023));
assertEquals("1.0 KB", NumericUtils.humanReadableByteCount(1024));
assertEquals("108.0 KB", NumericUtils.humanReadableByteCount(110592));
assertEquals("27.0 GB", NumericUtils.humanReadableByteCount(28991029248L));
assertEquals("1.7 TB", NumericUtils.humanReadableByteCount(1855425871872L));
assertEquals("8.0 EB", NumericUtils.humanReadableByteCount(9223372036854775807L));
} |
public QueryObjectBundle rewriteQuery(@Language("SQL") String query, QueryConfiguration queryConfiguration, ClusterType clusterType)
{
return rewriteQuery(query, queryConfiguration, clusterType, false);
} | @Test
public void testRewriteFunctionCalls()
{
VerifierConfig verifierConfig = new VerifierConfig().setFunctionSubstitutes(
"/approx_distinct(x)/count(x)/," +
"/approx_percentile(x,array[0.9])/repeat(avg(x),cast(cardinality(array[0.9]) as integer))/," +
"/approx_percentile(x,_)/avg(x)/," +
"/arbitrary(x)/min(x)/," +
"/array_agg(x)/if(typeof(arbitrary(x))='integer', array_sort(array_agg(x)), array_agg(x))/," +
"/current_timestamp/timestamp '2023-01-01 00:00:00 UTC'/," +
"/first_value(x)/if(min(x) is not null, min(x), max(x))/," +
"/max_by(x,_)/max(x)/," +
"/map_agg(x,y)/transform_values(multimap_agg(x,y),(k,v)->array_max(v))/," +
"/min_by(x,_)/min(x)/," +
"/now()/date_trunc('day',now())/," +
"/rand()/1/," +
"/row_number() over (partition by x order by y)/row_number() over (partition by y)/");
QueryRewriter queryRewriter = getQueryRewriter(new QueryRewriteConfig(), verifierConfig);
// Test rewriting nested function calls.
assertCreateTableAs(
queryRewriter.rewriteQuery(
"SELECT\n" +
" TRIM(ARBITRARY(b))\n" +
"FROM test_table",
CONFIGURATION, CONTROL).getQuery(),
"SELECT\n" +
" TRIM(MIN(b))\n" +
"FROM test_table");
// Test rewriting with nested function calls.
assertCreateTableAs(
queryRewriter.rewriteQuery(
"SELECT\n" +
" MAP_AGG(a,b)\n" +
"FROM test_table",
CONFIGURATION, CONTROL).getQuery(),
"SELECT\n" +
" TRANSFORM_VALUES(MULTIMAP_AGG(a,b),(k,v)->ARRAY_MAX(v))\n" +
"FROM test_table");
// Test rewriting with literal.
assertCreateTableAs(
queryRewriter.rewriteQuery(
"SELECT RAND()",
CONFIGURATION, CONTROL).getQuery(),
"SELECT 1");
// Test rewriting with if expression.
assertCreateTableAs(
queryRewriter.rewriteQuery(
"SELECT\n" +
" ARRAY_AGG(DISTINCT a)\n" +
"FROM test_table",
CONFIGURATION, CONTROL).getQuery(),
"SELECT\n" +
" IF(TYPEOF(ARBITRARY(DISTINCT a))='integer', ARRAY_SORT(ARRAY_AGG(DISTINCT a)), ARRAY_AGG(DISTINCT a))\n" +
"FROM test_table");
// Test rewriting CurrentTime function.
assertCreateTableAs(
queryRewriter.rewriteQuery(
"SELECT\n" +
" TO_UNIXTIME(CURRENT_TIMESTAMP)\n" +
"FROM test_table",
CONFIGURATION, CONTROL).getQuery(),
"SELECT\n" +
" TO_UNIXTIME(TIMESTAMP '2023-01-01 00:00:00 UTC')\n" +
"FROM test_table");
// Test rewriting NOW function.
assertCreateTableAs(
queryRewriter.rewriteQuery(
"SELECT\n" +
" TO_UNIXTIME(NOW())\n" +
"FROM test_table",
CONFIGURATION, CONTROL).getQuery(),
"SELECT\n" +
" TO_UNIXTIME(DATE_TRUNC('day',NOW()))\n" +
"FROM test_table");
// Test rewriting columns in Join.
assertCreateTableAs(
queryRewriter.rewriteQuery(
"SELECT *\n" +
"FROM test_table x\n" +
"JOIN (\n" +
" SELECT\n" +
" b,\n" +
" APPROX_PERCENTILE(a, 0.5) AS a\n" +
" FROM test_table\n" +
" GROUP BY\n" +
" 1\n" +
") y\n" +
" ON (x.b = y.b)",
CONFIGURATION, CONTROL).getQuery(),
"SELECT *\n" +
"FROM test_table x\n" +
"JOIN (\n" +
" SELECT\n" +
" b,\n" +
" AVG(a) AS a\n" +
" FROM test_table\n" +
" GROUP BY\n" +
" 1\n" +
") y\n" +
" ON (x.b = y.b)");
// Test rewriting columns in SubqueryExpression.
assertCreateTableAs(
queryRewriter.rewriteQuery(
"SELECT a, b\n" +
"FROM test_table\n" +
"WHERE a IN (\n" +
" SELECT\n" +
" ARBITRARY(a)\n" +
" FROM test_table\n" +
")",
CONFIGURATION, CONTROL).getQuery(),
"SELECT a, b\n" +
"FROM test_table\n" +
"WHERE a IN (\n" +
" SELECT\n" +
" MIN(a)\n" +
" FROM test_table\n" +
")");
// Test rewriting columns in TableSubquery.
assertCreateTableAs(
queryRewriter.rewriteQuery(
"SELECT num\n" +
"FROM (\n" +
" SELECT\n" +
" APPROX_DISTINCT(b) AS num\n" +
" FROM test_table\n" +
") x",
CONFIGURATION, CONTROL).getQuery(),
"SELECT num\n" +
"FROM (\n" +
" SELECT\n" +
" COUNT(b) AS num\n" +
" FROM test_table\n" +
") x");
// Test rewriting columns in With.
assertCreateTableAs(
queryRewriter.rewriteQuery(
"WITH x AS (\n" +
" SELECT\n" +
" MAX_BY(a, b) AS a\n" +
" FROM test_table\n" +
")\n" +
"SELECT\n" +
" a\n" +
"FROM x", CONFIGURATION, CONTROL).getQuery(),
"WITH x AS (\n" +
" SELECT\n" +
" MAX(a) AS a\n" +
" FROM test_table\n" +
")\n" +
"SELECT\n" +
" a\n" +
"FROM x");
// Test rewriting columns in Union.
assertCreateTableAs(
queryRewriter.rewriteQuery(
"SELECT\n" +
" ARBITRARY(a)\n" +
"FROM (\n" +
" SELECT \n" +
" MIN_BY(a, b) AS a\n" +
" FROM test_table\n" +
"\n" +
" UNION ALL\n" +
"\n" +
" SELECT \n" +
" MIN_BY(a, b) AS a\n" +
" FROM test_table\n" +
") x",
CONFIGURATION, CONTROL).getQuery(),
"SELECT\n" +
" MIN(a)\n" +
"FROM (\n" +
" SELECT \n" +
" MIN(a) AS a\n" +
" FROM test_table\n" +
"\n" +
" UNION ALL\n" +
"\n" +
" SELECT \n" +
" MIN(a) AS a\n" +
" FROM test_table\n" +
") x");
// Test rewriting window functions with partition and order derived from the original.
assertCreateTableAs(
queryRewriter.rewriteQuery(
"SELECT\n" +
" FIRST_VALUE(a) OVER (\n" +
" PARTITION BY b\n" +
" )\n" +
"FROM test_table",
CONFIGURATION, CONTROL).getQuery(),
"SELECT\n" +
" IF(\n" +
" MIN(a) OVER (\n" +
" PARTITION BY\n" +
" b\n" +
" ) IS NOT NULL,\n" +
" MIN(a) OVER (\n" +
" PARTITION BY\n" +
" b\n" +
" ),\n" +
" MAX(a) OVER (\n" +
" PARTITION BY\n" +
" b\n" +
" )\n" +
" )\n" +
"FROM test_table");
// Test rewriting window functions with partition and order resolving.
assertCreateTableAs(
queryRewriter.rewriteQuery(
"SELECT\n" +
" ROW_NUMBER() OVER (\n" +
" PARTITION BY a\n" +
" ORDER BY b DESC\n" +
" )\n" +
"FROM test_table",
CONFIGURATION, CONTROL).getQuery(),
"SELECT\n" +
" ROW_NUMBER() OVER (\n" +
" PARTITION BY b\n" +
" ORDER BY b DESC\n" +
" )\n" +
"FROM test_table");
// Test mapping of multiple substitutions with match precedence and matching of literal arguments.
assertCreateTableAs(
queryRewriter.rewriteQuery(
"SELECT\n" +
" APPROX_PERCENTILE(a, 0.95),\n" +
" APPROX_PERCENTILE(a, ARRAY[0.5, 0.9])\n" +
"FROM test_table",
CONFIGURATION, CONTROL).getQuery(),
"SELECT\n" +
" AVG(a),\n" +
" REPEAT(AVG(a), CAST(CARDINALITY(ARRAY[0.5, 0.9]) AS INTEGER))\n" +
"FROM test_table");
} |
@Override
public void execute(Exchange exchange) throws SmppException {
QuerySm querySm = createQuerySm(exchange);
if (log.isDebugEnabled()) {
log.debug("Querying for a short message for exchange id '{}' and message id '{}'...",
exchange.getExchangeId(), querySm.getMessageId());
}
QuerySmResult querySmResult;
try {
querySmResult = session.queryShortMessage(
querySm.getMessageId(),
TypeOfNumber.valueOf(querySm.getSourceAddrTon()),
NumberingPlanIndicator.valueOf(querySm.getSourceAddrNpi()),
querySm.getSourceAddr());
} catch (Exception e) {
throw new SmppException(e);
}
if (log.isDebugEnabled()) {
log.debug("Query for a short message for exchange id '{}' and message id '{}'",
exchange.getExchangeId(), querySm.getMessageId());
}
Message message = ExchangeHelper.getResultMessage(exchange);
message.setHeader(SmppConstants.ID, querySm.getMessageId());
message.setHeader(SmppConstants.ERROR, querySmResult.getErrorCode());
message.setHeader(SmppConstants.FINAL_DATE, SmppUtils.string2Date(querySmResult.getFinalDate()));
message.setHeader(SmppConstants.MESSAGE_STATE, querySmResult.getMessageState().name());
} | @Test
public void execute() throws Exception {
Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut);
exchange.getIn().setHeader(SmppConstants.COMMAND, "QuerySm");
exchange.getIn().setHeader(SmppConstants.ID, "1");
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR_TON, TypeOfNumber.NATIONAL.value());
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR_NPI, NumberingPlanIndicator.NATIONAL.value());
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR, "1818");
when(session.queryShortMessage("1", TypeOfNumber.NATIONAL, NumberingPlanIndicator.NATIONAL, "1818"))
.thenReturn(new QuerySmResult("-300101010000004+", MessageState.DELIVERED, (byte) 0));
command.execute(exchange);
assertEquals("1", exchange.getMessage().getHeader(SmppConstants.ID));
assertEquals("DELIVERED", exchange.getMessage().getHeader(SmppConstants.MESSAGE_STATE));
assertEquals((byte) 0, exchange.getMessage().getHeader(SmppConstants.ERROR));
assertNotNull(exchange.getMessage().getHeader(SmppConstants.FINAL_DATE));
} |
@Override
public V getAndSet(V newValue) {
return get(getAndSetAsync(newValue));
} | @Test
public void testGetAndSet() {
RJsonBucket<TestType> al = redisson.getJsonBucket("test", new JacksonCodec<>(TestType.class));
TestType t = new TestType();
t.setName("name1");
al.set(t);
NestedType nt = new NestedType();
nt.setValue(123);
nt.setValues(Arrays.asList("t1", "t2"));
al.set("$.type", nt);
NestedType nt2 = new NestedType();
nt2.setValue(124);
nt2.setValues(Arrays.asList("t4", "t3"));
NestedType cv = al.getAndSet(new JacksonCodec<>(NestedType.class), "type", nt2);
assertThat(cv.getValue()).isEqualTo(nt.getValue());
assertThat(cv.getValues()).isEqualTo(nt.getValues());
NestedType nt3 = al.get(new JacksonCodec<>(NestedType.class), "type");
assertThat(nt3.getValue()).isEqualTo(nt2.getValue());
assertThat(nt3.getValues()).isEqualTo(nt2.getValues());
} |
public static void assertThatClassIsImmutable(Class<?> clazz) {
final ImmutableClassChecker checker = new ImmutableClassChecker();
if (!checker.isImmutableClass(clazz, false)) {
final Description toDescription = new StringDescription();
final Description mismatchDescription = new StringDescription();
checker.describeTo(toDescription);
checker.describeMismatch(mismatchDescription);
final String reason =
"\n" +
"Expected: is \"" + toDescription.toString() + "\"\n" +
" but : was \"" + mismatchDescription.toString() + "\"";
throw new AssertionError(reason);
}
} | @Test
public void testNotFinalPrivateMember() throws Exception {
boolean gotException = false;
try {
assertThatClassIsImmutable(NotFinalPrivateMember.class);
} catch (AssertionError assertion) {
assertThat(assertion.getMessage(),
containsString("a field named 'x' that is not final"));
gotException = true;
}
assertThat(gotException, is(true));
} |
static int majorVersion(final String javaSpecVersion) {
final String[] components = javaSpecVersion.split("\\.");
final int[] version = new int[components.length];
for (int i = 0; i < components.length; i++) {
version[i] = Integer.parseInt(components[i]);
}
if (version[0] == 1) {
assert version[1] >= 6;
return version[1];
} else {
return version[0];
}
} | @Test
public void testMajorVersion() {
assertEquals(6, PlatformDependent0.majorVersion("1.6"));
assertEquals(7, PlatformDependent0.majorVersion("1.7"));
assertEquals(8, PlatformDependent0.majorVersion("1.8"));
assertEquals(8, PlatformDependent0.majorVersion("8"));
assertEquals(9, PlatformDependent0.majorVersion("1.9")); // early version of JDK 9 before Project Verona
assertEquals(9, PlatformDependent0.majorVersion("9"));
} |
public static ExternalSorter create(Options options) {
return options.getSorterType() == Options.SorterType.HADOOP
? HadoopExternalSorter.create(options)
: NativeExternalSorter.create(options);
} | @Test
public void testAddAfterSort() throws Exception {
SorterTestUtils.testAddAfterSort(
ExternalSorter.create(
new ExternalSorter.Options()
.setTempLocation(getTmpLocation().toString())
.setSorterType(sorterType)),
thrown);
fail();
} |
public static Map<String, FileWriteSchemaTransformFormatProvider> loadProviders() {
return Providers.loadProviders(FileWriteSchemaTransformFormatProvider.class);
} | @Test
public void loadProviders() {
Map<String, FileWriteSchemaTransformFormatProvider> formatProviderMap =
FileWriteSchemaTransformFormatProviders.loadProviders();
Set<String> keys = formatProviderMap.keySet();
assertEquals(ImmutableSet.of(AVRO, CSV, JSON, PARQUET, XML), keys);
} |
private int refreshAdminAcls(String subClusterId) throws IOException, YarnException {
// Refresh the admin acls
ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol();
RefreshAdminAclsRequest request =
recordFactory.newRecordInstance(RefreshAdminAclsRequest.class);
if (StringUtils.isNotBlank(subClusterId)) {
request.setSubClusterId(subClusterId);
}
adminProtocol.refreshAdminAcls(request);
return 0;
} | @Test
public void testRefreshAdminAcls() throws Exception {
String[] args = { "-refreshAdminAcls" };
assertEquals(0, rmAdminCLI.run(args));
verify(admin).refreshAdminAcls(any(RefreshAdminAclsRequest.class));
} |
public Connection getConnection() throws SQLException, SystemException, RollbackException {
if (CONTAINER_DATASOURCE_NAMES.contains(dataSource.getClass().getSimpleName())) {
return dataSource.getConnection();
}
Transaction transaction = xaTransactionManagerProvider.getTransactionManager().getTransaction();
if (!enlistedTransactions.get().containsKey(transaction)) {
Connection connection = dataSource.getConnection();
XAConnection xaConnection = xaConnectionWrapper.wrap(xaDataSource, connection);
transaction.enlistResource(new SingleXAResource(resourceName, xaConnection.getXAResource()));
transaction.registerSynchronization(new Synchronization() {
@Override
public void beforeCompletion() {
enlistedTransactions.get().remove(transaction);
}
@Override
public void afterCompletion(final int status) {
enlistedTransactions.get().clear();
}
});
enlistedTransactions.get().put(transaction, connection);
}
return enlistedTransactions.get().get(transaction);
} | @Test
void assertGetAtomikosConnection() throws SQLException, RollbackException, SystemException {
DataSource dataSource = DataSourceUtils.build(AtomikosDataSourceBean.class, TypedSPILoader.getService(DatabaseType.class, "H2"), "ds1");
XATransactionDataSource transactionDataSource = new XATransactionDataSource(TypedSPILoader.getService(DatabaseType.class, "H2"), "ds1", dataSource, xaTransactionManagerProvider);
try (Connection ignored = transactionDataSource.getConnection()) {
verify(xaTransactionManagerProvider, times(0)).getTransactionManager();
}
} |
public static ProducingResult createProducingResult(
ResolvedSchema inputSchema, @Nullable Schema declaredSchema) {
// no schema has been declared by the user,
// the schema will be entirely derived from the input
if (declaredSchema == null) {
// go through data type to erase time attributes
final DataType physicalDataType = inputSchema.toSourceRowDataType();
final Schema schema = Schema.newBuilder().fromRowDataType(physicalDataType).build();
return new ProducingResult(null, schema, null);
}
final List<UnresolvedColumn> declaredColumns = declaredSchema.getColumns();
// the declared schema does not contain physical information,
// thus, it only replaces physical columns with metadata rowtime or adds a primary key
if (declaredColumns.stream().noneMatch(SchemaTranslator::isPhysical)) {
// go through data type to erase time attributes
final DataType sourceDataType = inputSchema.toSourceRowDataType();
final DataType physicalDataType =
patchDataTypeWithoutMetadataRowtime(sourceDataType, declaredColumns);
final Schema.Builder builder = Schema.newBuilder();
builder.fromRowDataType(physicalDataType);
builder.fromSchema(declaredSchema);
return new ProducingResult(null, builder.build(), null);
}
return new ProducingResult(null, declaredSchema, null);
} | @Test
void testOutputToNoSchema() {
final ResolvedSchema tableSchema =
ResolvedSchema.of(
Column.physical("id", BIGINT()),
Column.metadata("rowtime", TIMESTAMP_LTZ(3), null, false),
Column.physical("name", STRING()));
final ProducingResult result = SchemaTranslator.createProducingResult(tableSchema, null);
assertThat(result.getProjections()).isEmpty();
assertThat(result.getSchema())
.isEqualTo(
Schema.newBuilder()
.column("id", BIGINT())
.column("rowtime", TIMESTAMP_LTZ(3)) // becomes physical
.column("name", STRING())
.build());
assertThat(result.getPhysicalDataType()).isEmpty();
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.