focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public AttributedList<Path> read(final Path directory, final List<String> replies) throws FTPInvalidListException {
final AttributedList<Path> children = new AttributedList<>();
if(replies.isEmpty()) {
return children;
}
// At least one entry successfully parsed
boolean success = false;
for(String line : replies) {
final Map<String, Map<String, String>> file = this.parseFacts(line);
if(null == file) {
log.error(String.format("Error parsing line %s", line));
continue;
}
for(Map.Entry<String, Map<String, String>> f : file.entrySet()) {
final String name = f.getKey();
// size -- Size in octets
// modify -- Last modification time
// create -- Creation time
// type -- Entry type
// unique -- Unique id of file/directory
// perm -- File permissions, whether read, write, execute is allowed for the login id.
// lang -- Language of the file name per IANA [11] registry.
// media-type -- MIME media-type of file contents per IANA registry.
// charset -- Character set per IANA registry (if not UTF-8)
final Map<String, String> facts = f.getValue();
if(!facts.containsKey("type")) {
log.error(String.format("No type fact in line %s", line));
continue;
}
final Path parsed;
if("dir".equals(facts.get("type").toLowerCase(Locale.ROOT))) {
parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.directory));
}
else if("file".equals(facts.get("type").toLowerCase(Locale.ROOT))) {
parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file));
}
else if(facts.get("type").toLowerCase(Locale.ROOT).matches("os\\.unix=slink:.*")) {
parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file, Path.Type.symboliclink));
// Parse symbolic link target in Type=OS.unix=slink:/foobar;Perm=;Unique=keVO1+4G4; foobar
final String[] type = facts.get("type").split(":");
if(type.length == 2) {
final String target = type[1];
if(target.startsWith(String.valueOf(Path.DELIMITER))) {
parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(target), EnumSet.of(Path.Type.file)));
}
else {
parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(String.format("%s/%s", directory.getAbsolute(), target)), EnumSet.of(Path.Type.file)));
}
}
else {
log.warn(String.format("Missing symbolic link target for type %s in line %s", facts.get("type"), line));
continue;
}
}
else {
log.warn(String.format("Ignored type %s in line %s", facts.get("type"), line));
continue;
}
if(!success) {
if(parsed.isDirectory() && directory.getName().equals(name)) {
log.warn(String.format("Possibly bogus response line %s", line));
}
else {
success = true;
}
}
if(name.equals(".") || name.equals("..")) {
if(log.isDebugEnabled()) {
log.debug(String.format("Skip %s", name));
}
continue;
}
if(facts.containsKey("size")) {
parsed.attributes().setSize(Long.parseLong(facts.get("size")));
}
if(facts.containsKey("unix.uid")) {
parsed.attributes().setOwner(facts.get("unix.uid"));
}
if(facts.containsKey("unix.owner")) {
parsed.attributes().setOwner(facts.get("unix.owner"));
}
if(facts.containsKey("unix.gid")) {
parsed.attributes().setGroup(facts.get("unix.gid"));
}
if(facts.containsKey("unix.group")) {
parsed.attributes().setGroup(facts.get("unix.group"));
}
if(facts.containsKey("unix.mode")) {
parsed.attributes().setPermission(new Permission(facts.get("unix.mode")));
}
else if(facts.containsKey("perm")) {
if(PreferencesFactory.get().getBoolean("ftp.parser.mlsd.perm.enable")) {
Permission.Action user = Permission.Action.none;
final String flags = facts.get("perm");
if(StringUtils.contains(flags, 'r') || StringUtils.contains(flags, 'l')) {
// RETR command may be applied to that object
// Listing commands, LIST, NLST, and MLSD may be applied
user = user.or(Permission.Action.read);
}
if(StringUtils.contains(flags, 'w') || StringUtils.contains(flags, 'm') || StringUtils.contains(flags, 'c')) {
user = user.or(Permission.Action.write);
}
if(StringUtils.contains(flags, 'e')) {
// CWD command naming the object should succeed
user = user.or(Permission.Action.execute);
if(parsed.isDirectory()) {
user = user.or(Permission.Action.read);
}
}
final Permission permission = new Permission(user, Permission.Action.none, Permission.Action.none);
parsed.attributes().setPermission(permission);
}
}
if(facts.containsKey("modify")) {
// Time values are always represented in UTC
parsed.attributes().setModificationDate(this.parseTimestamp(facts.get("modify")));
}
if(facts.containsKey("create")) {
// Time values are always represented in UTC
parsed.attributes().setCreationDate(this.parseTimestamp(facts.get("create")));
}
children.add(parsed);
}
}
if(!success) {
throw new FTPInvalidListException(children);
}
return children;
} | @Test
public void testEmptyDir() throws Exception {
Path path = new Path(
"/www", EnumSet.of(Path.Type.directory));
String[] replies = new String[]{};
final AttributedList<Path> children = new FTPMlsdListResponseReader().read(path, Arrays.asList(replies));
assertEquals(0, children.size());
} |
public void executor(final ConfigGroupEnum type, final String json, final String eventType) {
ENUM_MAP.get(type).handle(json, eventType);
} | @Test
public void testPluginMyselfExecutor() {
String json = getJson();
websocketDataHandler.executor(ConfigGroupEnum.PLUGIN, json, DataEventTypeEnum.MYSELF.name());
List<PluginData> pluginDataList = new PluginDataHandler(pluginDataSubscriber).convert(json);
Mockito.verify(pluginDataSubscriber).refreshPluginDataSelf(pluginDataList);
} |
public static <Req extends RpcRequest> Matcher<Req> serviceEquals(String service) {
if (service == null) throw new NullPointerException("service == null");
if (service.isEmpty()) throw new NullPointerException("service is empty");
return new RpcServiceEquals<Req>(service);
} | @Test void serviceEquals_unmatched_mixedCase() {
when(request.service()).thenReturn("grpc.health.v1.Health");
assertThat(serviceEquals("grpc.health.v1.health").matches(request)).isFalse();
} |
public EtlStatus getEtlJobStatus(SparkLoadAppHandle handle, String appId, long loadJobId, String etlOutputPath,
SparkResource resource, BrokerDesc brokerDesc) throws UserException {
EtlStatus status = new EtlStatus();
Preconditions.checkState(appId != null && !appId.isEmpty());
if (resource.isYarnMaster()) {
// prepare yarn config
String configDir = resource.prepareYarnConfig();
// yarn client path
String yarnClient = resource.getYarnClientPath();
// command: yarn --config configDir application -status appId
String yarnStatusCmd = String.format(YARN_STATUS_CMD, yarnClient, configDir, appId);
LOG.info(yarnStatusCmd);
String[] envp = {"LC_ALL=" + Config.locale, "JAVA_HOME=" + System.getProperty("java.home")};
CommandResult result = Util.executeCommand(yarnStatusCmd, envp, EXEC_CMD_TIMEOUT_MS);
if (result.getReturnCode() != 0) {
String stderr = result.getStderr();
// case application not exists
if (stderr != null && stderr.contains("doesn't exist in RM")) {
LOG.warn("spark application not found. spark app id: {}, load job id: {}, stderr: {}",
appId, loadJobId, stderr);
status.setState(TEtlState.CANCELLED);
status.setFailMsg("spark application not found");
return status;
}
LOG.warn("yarn application status failed. spark app id: {}, load job id: {}, timeout: {}" +
", return code: {}, stderr: {}, stdout: {}",
appId, loadJobId, EXEC_CMD_TIMEOUT_MS, result.getReturnCode(), stderr, result.getStdout());
throw new LoadException("yarn application status failed. error: " + stderr);
}
ApplicationReport report = new YarnApplicationReport(result.getStdout()).getReport();
LOG.info("yarn application -status {}. load job id: {}, output: {}, report: {}",
appId, loadJobId, result.getStdout(), report);
YarnApplicationState state = report.getYarnApplicationState();
FinalApplicationStatus faStatus = report.getFinalApplicationStatus();
status.setState(fromYarnState(state, faStatus));
if (status.getState() == TEtlState.CANCELLED) {
if (state == YarnApplicationState.FINISHED) {
status.setFailMsg("spark app state: " + faStatus.toString());
} else {
status.setFailMsg("yarn app state: " + state.toString());
}
}
status.setTrackingUrl(handle.getUrl() != null ? handle.getUrl() : report.getTrackingUrl());
status.setProgress((int) (report.getProgress() * 100));
} else {
// state from handle
if (handle == null) {
status.setFailMsg("spark app handle is null");
status.setState(TEtlState.CANCELLED);
return status;
}
State state = handle.getState();
status.setState(fromSparkState(state));
if (status.getState() == TEtlState.CANCELLED) {
status.setFailMsg("spark app state: " + state.toString());
}
LOG.info("spark app id: {}, load job id: {}, app state: {}", appId, loadJobId, state);
}
if (status.getState() == TEtlState.FINISHED || status.getState() == TEtlState.CANCELLED) {
// get dpp result
String dppResultFilePath = EtlJobConfig.getDppResultFilePath(etlOutputPath);
try {
byte[] data;
if (brokerDesc.hasBroker()) {
data = BrokerUtil.readFile(dppResultFilePath, brokerDesc);
} else {
data = HdfsUtil.readFile(dppResultFilePath, brokerDesc);
}
String dppResultStr = new String(data, StandardCharsets.UTF_8);
DppResult dppResult = new Gson().fromJson(dppResultStr, DppResult.class);
if (dppResult != null) {
status.setDppResult(dppResult);
if (status.getState() == TEtlState.CANCELLED && !Strings.isNullOrEmpty(dppResult.failedReason)) {
status.setFailMsg(dppResult.failedReason);
}
}
} catch (UserException | JsonSyntaxException e) {
LOG.warn("read broker file failed. path: {}", dppResultFilePath, e);
}
}
return status;
} | @Test(expected = TimeoutException.class)
public void testGetEtlJobStatusTimeout(@Mocked BrokerUtil brokerUtil, @Mocked Util util,
@Mocked SparkYarnConfigFiles sparkYarnConfigFiles,
@Mocked SparkLoadAppHandle handle)
throws IOException, UserException {
new Expectations() {
{
sparkYarnConfigFiles.prepare();
sparkYarnConfigFiles.getConfigDir();
result = "./yarn_config";
Util.executeCommand(anyString, (String[]) any, anyLong);
minTimes = 0;
result = new TimeoutException("get spark etl job status timeout");
}
};
SparkResource resource = new SparkResource(resourceName);
Map<String, String> sparkConfigs = resource.getSparkConfigs();
sparkConfigs.put("spark.master", "yarn");
sparkConfigs.put("spark.submit.deployMode", "cluster");
sparkConfigs.put("spark.hadoop.yarn.resourcemanager.address", "127.0.0.1:9999");
new Expectations(resource) {
{
resource.getYarnClientPath();
result = Config.yarn_client_path;
}
};
BrokerDesc brokerDesc = new BrokerDesc(broker, Maps.newHashMap());
SparkEtlJobHandler handler = new SparkEtlJobHandler();
handler.getEtlJobStatus(handle, appId, loadJobId, etlOutputPath, resource, brokerDesc);
} |
File putIfAbsent(String userId, boolean saveToDisk) throws IOException {
String idKey = getIdStrategy().keyFor(userId);
String directoryName = idToDirectoryNameMap.get(idKey);
File directory = null;
if (directoryName == null) {
synchronized (this) {
directoryName = idToDirectoryNameMap.get(idKey);
if (directoryName == null) {
directory = createDirectoryForNewUser(userId);
directoryName = directory.getName();
idToDirectoryNameMap.put(idKey, directoryName);
if (saveToDisk) {
save();
}
}
}
}
return directory == null ? new File(usersDirectory, directoryName) : directory;
} | @Test
public void testDirectoryFormatBasic() throws IOException {
UserIdMapper mapper = createUserIdMapper(IdStrategy.CASE_INSENSITIVE);
String user1 = "1user";
File directory1 = mapper.putIfAbsent(user1, true);
assertThat(directory1.getName(), startsWith(user1 + '_'));
} |
public void fillMaxSpeed(Graph graph, EncodingManager em) {
// In DefaultMaxSpeedParser and in OSMMaxSpeedParser we don't have the rural/urban info,
// but now we have and can fill the country-dependent max_speed value where missing.
EnumEncodedValue<UrbanDensity> udEnc = em.getEnumEncodedValue(UrbanDensity.KEY, UrbanDensity.class);
fillMaxSpeed(graph, em, edge -> edge.get(udEnc) != UrbanDensity.RURAL);
} | @Test
public void testFwdBwd() {
ReaderWay way = new ReaderWay(0L);
way.setTag("country", Country.DEU);
way.setTag("highway", "primary");
way.setTag("maxspeed:forward", "50");
way.setTag("maxspeed:backward", "70");
EdgeIteratorState edge = createEdge(way);
calc.fillMaxSpeed(graph, em);
// internal max speed must be ignored as library currently ignores forward/backward
assertEquals(50, edge.get(maxSpeedEnc), 1);
assertEquals(70, edge.getReverse(maxSpeedEnc), 1);
} |
@Override
public Set<Interface> getInterfaces() {
return interfaces.values()
.stream()
.flatMap(set -> set.stream())
.collect(collectingAndThen(toSet(), ImmutableSet::copyOf));
} | @Test
public void testRemoveInterface() throws Exception {
ConnectPoint cp = createConnectPoint(1);
NetworkConfigEvent event = new NetworkConfigEvent(
NetworkConfigEvent.Type.CONFIG_REMOVED, cp, CONFIG_CLASS);
assertEquals(NUM_INTERFACES, interfaceManager.getInterfaces().size());
// Send in a config event removing an interface config
listener.event(event);
assertEquals(NUM_INTERFACES - 1, interfaceManager.getInterfaces().size());
} |
@Override
public void execute(Context context) {
long count = 0;
try (StreamWriter<ProjectDump.Rule> writer = dumpWriter.newStreamWriter(DumpElement.RULES)) {
ProjectDump.Rule.Builder ruleBuilder = ProjectDump.Rule.newBuilder();
for (Rule rule : ruleRepository.getAll()) {
ProjectDump.Rule ruleMessage = toRuleMessage(ruleBuilder, rule);
writer.write(ruleMessage);
count++;
}
LoggerFactory.getLogger(getClass()).debug("{} rules exported", count);
} catch (Exception e) {
throw new IllegalStateException(format("Rule Export failed after processing %d rules successfully", count), e);
}
} | @Test
public void excuse_throws_ISE_exception_with_number_of_successfully_exported_rules() {
ruleRepository.add("A").add("B").add("C")
// will cause NPE
.addNull();
assertThatThrownBy(() -> underTest.execute(new TestComputationStepContext()))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Rule Export failed after processing 3 rules successfully");
} |
public String generateKeyStorePassword() {
return RandomStringUtils.random(16, 0, 0, true, true, null, srand);
} | @Test
void testGenerateKeyStorePassword() throws Exception {
// We can't possibly test every possible string, but we can at least verify
// a few things about a few of the generated strings as a sanity check
ProxyCA proxyCA = new ProxyCA();
proxyCA.init();
Set<String> passwords = new HashSet<>();
for (int i = 0; i < 5; i++) {
String password = proxyCA.generateKeyStorePassword();
assertEquals(16, password.length());
for (char c : password.toCharArray()) {
assertFalse(c < ' ', "Found character '" + c + "' in password '"
+ password + "' which is outside of the expected range");
assertFalse(c > 'z', "Found character '" + c + "' in password '"
+ password + "' which is outside of the expected range");
}
assertFalse(passwords.contains(password),
"Password " + password
+ " was generated twice, which is _extremely_ unlikely"
+ " and shouldn't practically happen: " + passwords);
passwords.add(password);
}
} |
public JetSqlRow project(Object key, Object value) {
return project(key, null, value, null);
} | @Test
@SuppressWarnings("unchecked")
public void when_filteredByPredicate_then_returnsNull() {
KvRowProjector projector = new KvRowProjector(
new QueryPath[]{QueryPath.KEY_PATH, QueryPath.VALUE_PATH},
new QueryDataType[]{INT, INT},
new IdentityTarget(),
new IdentityTarget(),
(Expression<Boolean>) ConstantExpression.create(Boolean.FALSE, BOOLEAN),
emptyList(),
mock(ExpressionEvalContext.class)
);
JetSqlRow row = projector.project(1, 8);
assertThat(row).isNull();
} |
@PublicAPI(usage = ACCESS)
public JavaPackage getPackage(String packageName) {
return getValue(tryGetPackage(packageName), "This package does not contain any sub package '%s'", packageName);
} | @Test
public void function_GET_CLASSES() {
JavaPackage defaultPackage = importDefaultPackage(Object.class, String.class, Collection.class);
Iterable<JavaClass> classes = GET_CLASSES.apply(defaultPackage.getPackage("java.lang"));
assertThatTypes(classes).contain(Object.class, String.class);
for (JavaClass javaClass : classes) {
assertThat(javaClass.getPackageName()).startsWith("java.lang");
}
} |
public T getAndRemove(Predicate<T> preCondition) {
Iterator<T> iterator = deque.iterator();
for (int i = 0; iterator.hasNext(); i++) {
T next = iterator.next();
if (preCondition.test(next)) {
if (i < numPriorityElements) {
numPriorityElements--;
}
iterator.remove();
return next;
}
}
throw new NoSuchElementException();
} | @Test
void testGetAndRemove() {
final PrioritizedDeque<Integer> deque = new PrioritizedDeque<>();
deque.add(0);
deque.add(1);
deque.add(2);
deque.add(1);
deque.add(3);
assertThat(deque.getAndRemove(v -> v == 1).intValue()).isOne();
assertThat(deque.asUnmodifiableCollection()).containsExactly(0, 2, 1, 3);
assertThat(deque.getAndRemove(v -> v == 1).intValue()).isOne();
assertThat(deque.asUnmodifiableCollection()).containsExactly(0, 2, 3);
try {
int removed = deque.getAndRemove(v -> v == 1);
fail(
String.format(
"This should not happen. Item [%s] was removed, but it shouldn't be found",
removed));
} catch (NoSuchElementException ex) {
// expected
}
} |
public RuntimeOptionsBuilder parse(String... args) {
return parse(Arrays.asList(args));
} | @Test
void throws_runtime_exception_on_malformed_tag_expression() {
RuntimeException e = assertThrows(RuntimeException.class, () -> {
RuntimeOptions options = parser
.parse("--tags", ")")
.build();
});
} |
public boolean shouldVerboseLog() {
return verboseLog;
} | @Test
public void testVerboseLog() {
final DistCpOptions.Builder builder = new DistCpOptions.Builder(
Collections.singletonList(new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"));
Assert.assertFalse(builder.build().shouldVerboseLog());
try {
builder.withVerboseLog(true).build();
fail("-v should fail if -log option is not specified");
} catch (IllegalArgumentException e) {
assertExceptionContains("-v is valid only with -log option", e);
}
final Path logPath = new Path("hdfs://localhost:8020/logs");
builder.withLogPath(logPath).withVerboseLog(true);
Assert.assertTrue(builder.build().shouldVerboseLog());
} |
@Override
public Graph<Entity> resolveForInstallation(Entity entity,
Map<String, ValueReference> parameters,
Map<EntityDescriptor, Entity> entities) {
if (entity instanceof EntityV1) {
return resolveForInstallationV1((EntityV1) entity, parameters, entities);
} else {
throw new IllegalArgumentException("Unsupported entity version: " + entity.getClass());
}
} | @Test
@MongoDBFixtures("InputFacadeTest.json")
public void resolveForInstallationLookupTable() throws NotFoundException {
when(lookupuptableBuilder.lookupTable("whois")).thenReturn(lookupuptableBuilder);
when(lookupuptableBuilder.lookupTable("tor-exit-node-list")).thenReturn(lookupuptableBuilder);
when(lookupuptableBuilder.build()).thenReturn(lookupTable);
when(lookupTableService.newBuilder()).thenReturn(lookupuptableBuilder);
when(lookupTableService.hasTable("whois")).thenReturn(true);
when(lookupTableService.hasTable("tor-exit-node-list")).thenReturn(true);
final Input input = inputService.find("5ae2eb0a3d27464477f0fd8b");
final Map<String, Object> lookupTableConfig = new HashedMap(1);
lookupTableConfig.put("lookup_table_name", "tor-exit-node-list");
final ConverterEntity converterEntity = ConverterEntity.create(
ValueReference.of(Converter.Type.LOOKUP_TABLE.name()), ReferenceMapUtils.toReferenceMap(lookupTableConfig));
final List<ConverterEntity> converterEntities = new ArrayList<>(1);
converterEntities.add(converterEntity);
final InputWithExtractors inputWithExtractors = InputWithExtractors.create(input, inputService.getExtractors(input));
final LookupTableExtractor extractor = (LookupTableExtractor) inputWithExtractors.extractors().iterator().next();
final ExtractorEntity extractorEntity = ExtractorEntity.create(
ValueReference.of(extractor.getTitle()),
ValueReference.of(extractor.getType()),
ValueReference.of(extractor.getCursorStrategy()),
ValueReference.of(extractor.getTargetField()),
ValueReference.of(extractor.getSourceField()),
ReferenceMapUtils.toReferenceMap(extractor.getExtractorConfig()),
converterEntities,
ValueReference.of(extractor.getConditionType()),
ValueReference.of(extractor.getConditionValue()),
ValueReference.of(extractor.getOrder())
);
List<ExtractorEntity> extractors = new ArrayList<>();
extractors.add(extractorEntity);
InputEntity inputEntity = InputEntity.create(
ValueReference.of(input.getTitle()),
ReferenceMapUtils.toReferenceMap(input.getConfiguration()),
Collections.emptyMap(),
ValueReference.of(input.getType()),
ValueReference.of(input.isGlobal()),
extractors);
final Entity entity = EntityV1.builder()
.id(ModelId.of(input.getId()))
.type(ModelTypes.INPUT_V1)
.data(objectMapper.convertValue(inputEntity, JsonNode.class))
.build();
final LookupTableEntity whoIsEntity = LookupTableEntity.create(
ValueReference.of(DefaultEntityScope.NAME),
ValueReference.of("whois"),
ValueReference.of("title"),
ValueReference.of("description"),
ValueReference.of("cache_name"),
ValueReference.of("dataadapter_name"),
ValueReference.of("default_single_value"),
ValueReference.of("BOOLEAN"),
ValueReference.of("default_multi_value"),
ValueReference.of("BOOLEAN")
);
final LookupTableEntity torNodeEntity = LookupTableEntity.create(
ValueReference.of(DefaultEntityScope.NAME),
ValueReference.of("tor-exit-node-list"),
ValueReference.of("title"),
ValueReference.of("description"),
ValueReference.of("cache_name"),
ValueReference.of("dataadapter_name"),
ValueReference.of("default_single_value"),
ValueReference.of("BOOLEAN"),
ValueReference.of("default_multi_value"),
ValueReference.of("BOOLEAN")
);
final Entity expectedWhoIsEntity = EntityV1.builder()
.id(ModelId.of("dead-beef"))
.data(objectMapper.convertValue(whoIsEntity, JsonNode.class))
.type(ModelTypes.LOOKUP_TABLE_V1)
.build();
final Entity expectedTorEntity = EntityV1.builder()
.id(ModelId.of("dead-feed"))
.data(objectMapper.convertValue(torNodeEntity, JsonNode.class))
.type(ModelTypes.LOOKUP_TABLE_V1)
.build();
final EntityDescriptor whoisDescriptor = expectedWhoIsEntity.toEntityDescriptor();
final EntityDescriptor torDescriptor = expectedTorEntity.toEntityDescriptor();
final Map<EntityDescriptor, Entity> entityDescriptorEntityMap = new HashMap<>(2);
entityDescriptorEntityMap.put(whoisDescriptor, expectedWhoIsEntity);
entityDescriptorEntityMap.put(torDescriptor, expectedTorEntity);
Graph<Entity> graph = facade.resolveForInstallation(entity, Collections.emptyMap(), entityDescriptorEntityMap);
assertThat(graph.nodes()).contains(expectedWhoIsEntity);
assertThat(graph.nodes()).contains(expectedTorEntity);
} |
@JsonProperty("status")
public Status status() {
if (indices.isEmpty() || indices.stream().allMatch(i -> i.status() == Status.NOT_STARTED)) {
return Status.NOT_STARTED;
} else if (indices.stream().allMatch(RemoteReindexIndex::isCompleted)) {
// all are now completed, either finished or errored
if (indices.stream().anyMatch(i -> i.status() == Status.ERROR)) {
return Status.ERROR;
} else {
return Status.FINISHED;
}
} else {
return Status.RUNNING;
}
} | @Test
void testStatusFinished() {
final RemoteReindexMigration migration = withIndices(
index("one", RemoteReindexingMigrationAdapter.Status.FINISHED),
index("two", RemoteReindexingMigrationAdapter.Status.FINISHED)
);
Assertions.assertThat(migration.status()).isEqualTo(RemoteReindexingMigrationAdapter.Status.FINISHED);
} |
@Override
public boolean hasGlobalAdminRole(String username) {
return roleService.hasGlobalAdminRole(username);
} | @Test
void testHasGlobalAdminRole4() {
NacosUser nacosUser = new NacosUser("nacos");
nacosUser.setGlobalAdmin(false);
when(roleService.hasGlobalAdminRole(anyString())).thenReturn(true);
boolean hasGlobalAdminRole = abstractAuthenticationManager.hasGlobalAdminRole(nacosUser);
assertTrue(hasGlobalAdminRole);
} |
@Override
public int getColumnLength(final Object value) {
throw new UnsupportedSQLOperationException("PostgreSQLFloat4ArrayBinaryProtocolValue.getColumnLength()");
} | @Test
void assertGetColumnLength() {
assertThrows(UnsupportedSQLOperationException.class, () -> newInstance().getColumnLength("val"));
} |
void validateLogLevelConfigs(Collection<AlterableConfig> ops) {
ops.forEach(op -> {
String loggerName = op.name();
switch (OpType.forId(op.configOperation())) {
case SET:
validateLoggerNameExists(loggerName);
String logLevel = op.value();
if (!LogLevelConfig.VALID_LOG_LEVELS.contains(logLevel)) {
throw new InvalidConfigurationException("Cannot set the log level of " +
loggerName + " to " + logLevel + " as it is not a supported log level. " +
"Valid log levels are " + VALID_LOG_LEVELS_STRING);
}
break;
case DELETE:
validateLoggerNameExists(loggerName);
if (loggerName.equals(Log4jController.ROOT_LOGGER())) {
throw new InvalidRequestException("Removing the log level of the " +
Log4jController.ROOT_LOGGER() + " logger is not allowed");
}
break;
case APPEND:
throw new InvalidRequestException(OpType.APPEND +
" operation is not allowed for the " + BROKER_LOGGER + " resource");
case SUBTRACT:
throw new InvalidRequestException(OpType.SUBTRACT +
" operation is not allowed for the " + BROKER_LOGGER + " resource");
default:
throw new InvalidRequestException("Unknown operation type " +
(int) op.configOperation() + " is not allowed for the " +
BROKER_LOGGER + " resource");
}
});
} | @Test
public void testValidateDeleteLogLevelConfig() {
MANAGER.validateLogLevelConfigs(Arrays.asList(new AlterableConfig().
setName(LOG.getName()).
setConfigOperation(OpType.DELETE.id()).
setValue("")));
} |
@Override
public void start() {
try {
createAndStartGrpcServer();
} catch (final IOException e) {
throw new IllegalStateException("Failed to start the grpc server", e);
}
} | @Test
void testNoGraceShutdown() {
// The server takes 5s seconds to shutdown
final TestServer server = new TestServer(5000);
when(this.factory.createServer()).thenReturn(server);
// But we won't wait
final GrpcServerLifecycle lifecycle = new GrpcServerLifecycle(this.factory, ZERO, this.eventPublisher);
lifecycle.start();
verify(this.eventPublisher).publishEvent(ArgumentMatchers.any(GrpcServerStartedEvent.class));
assertFalse(server.isShutdown());
assertFalse(server.isTerminated());
// So the shutdown should complete near instantly
assertTimeoutPreemptively(ofMillis(100), (Executable) lifecycle::stop);
verify(this.eventPublisher).publishEvent(ArgumentMatchers.any(GrpcServerShutdownEvent.class));
verify(this.eventPublisher).publishEvent(ArgumentMatchers.any(GrpcServerTerminatedEvent.class));
assertTrue(server.isShutdown());
assertTrue(server.isTerminated());
} |
@AroundInvoke
public Object intercept(InvocationContext context) throws Exception { // NOPMD
// cette méthode est appelée par le conteneur ejb grâce à l'annotation AroundInvoke
if (DISABLED || !EJB_COUNTER.isDisplayed()) {
return context.proceed();
}
// nom identifiant la requête
final String requestName = getRequestName(context);
boolean systemError = false;
try {
EJB_COUNTER.bindContextIncludingCpu(requestName);
return context.proceed();
} catch (final Error e) {
// on catche Error pour avoir les erreurs systèmes
// mais pas Exception qui sont fonctionnelles en général
systemError = true;
throw e;
} finally {
// on enregistre la requête dans les statistiques
EJB_COUNTER.addRequestForCurrentContext(systemError);
}
} | @Test
public void testMonitoringTarget() throws Exception {
final Counter ejbCounter = MonitoringProxy.getEjbCounter();
ejbCounter.clear();
final MonitoringTargetInterceptor interceptor = new MonitoringTargetInterceptor();
ejbCounter.setDisplayed(true);
interceptor.intercept(new InvokeContext(false));
assertSame("requestsCount", 1, ejbCounter.getRequestsCount());
} |
@Override
public long getLastUpdateTime() {
return record.getLastUpdateTime();
} | @Test
public void test_getLastUpdateTime() {
assertEquals(0, view.getLastUpdateTime());
} |
@POST
@Path("register")
@Produces(MediaType.APPLICATION_JSON)
public Response register(Application app) {
try {
if (app.getName()==null) {
throw new IOException("Application name can not be empty.");
}
if (app.getOrganization()==null) {
throw new IOException("Application organization can not be empty.");
}
if (app.getDescription()==null) {
throw new IOException("Application description can not be empty.");
}
AppCatalogSolrClient sc = new AppCatalogSolrClient();
sc.register(app);
} catch (IOException e) {
return Response.status(Status.BAD_REQUEST).entity(e.getMessage()).build();
}
return Response.status(Status.ACCEPTED).build();
} | @Test
void testRegister() throws Exception {
AppStoreController ac = Mockito.mock(AppStoreController.class);
Application app = new Application();
app.setName("jenkins");
app.setOrganization("jenkins.org");
app.setDescription("This is a description");
app.setIcon("/css/img/feather.png");
Response expected = Response.ok().build();
when(ac.register(app)).thenReturn(Response.ok().build());
final Response actual = ac.register(app);
assertEquals(expected.getStatus(), actual.getStatus());
} |
public List<ColumnMatchResult<?>> getMismatchedColumns(List<Column> columns, ChecksumResult controlChecksum, ChecksumResult testChecksum)
{
return columns.stream()
.flatMap(column -> columnValidators.get(column.getCategory()).get().validate(column, controlChecksum, testChecksum).stream())
.filter(columnMatchResult -> !columnMatchResult.isMatched())
.collect(toImmutableList());
} | @Test
public void testFloatingPoint()
{
List<Column> columns = ImmutableList.of(DOUBLE_COLUMN, REAL_COLUMN);
ChecksumResult controlChecksum = new ChecksumResult(
5,
ImmutableMap.<String, Object>builder()
.putAll(FLOATING_POINT_COUNTS)
.put("double$sum", 1.0)
.put("real$sum", 1.0)
.build());
// Matched
ChecksumResult testChecksum = new ChecksumResult(
5,
ImmutableMap.<String, Object>builder()
.putAll(FLOATING_POINT_COUNTS)
.put("double$sum", 1 + RELATIVE_ERROR_MARGIN)
.put("real$sum", 1 - RELATIVE_ERROR_MARGIN + RELATIVE_ERROR_MARGIN * RELATIVE_ERROR_MARGIN)
.build());
assertTrue(checksumValidator.getMismatchedColumns(columns, controlChecksum, testChecksum).isEmpty());
// Mismatched
testChecksum = new ChecksumResult(
5,
ImmutableMap.<String, Object>builder()
.put("double$sum", 1.0)
.put("double$nan_count", 0L)
.put("double$pos_inf_count", 3L)
.put("double$neg_inf_count", 4L)
.put("real$sum", 1.0)
.put("real$nan_count", 2L)
.put("real$pos_inf_count", 0L)
.put("real$neg_inf_count", 4L)
.build());
assertMismatchedColumns(columns, controlChecksum, testChecksum, DOUBLE_COLUMN, REAL_COLUMN);
testChecksum = new ChecksumResult(
5,
ImmutableMap.<String, Object>builder()
.put("double$sum", 1.0)
.put("double$nan_count", 2L)
.put("double$pos_inf_count", 3L)
.put("double$neg_inf_count", 0L)
.put("real$sum", 1 - RELATIVE_ERROR_MARGIN)
.put("real$nan_count", 2L)
.put("real$pos_inf_count", 3L)
.put("real$neg_inf_count", 4L)
.build());
List<ColumnMatchResult<?>> mismatchedColumns = assertMismatchedColumns(columns, controlChecksum, testChecksum, DOUBLE_COLUMN, REAL_COLUMN);
assertEquals(mismatchedColumns.get(1).getMessage(), Optional.of("relative error: 1.0000500025000149E-4"));
} |
@Override
public ResponseHeader execute() throws SQLException {
check(sqlStatement, connectionSession.getConnectionContext().getGrantee());
if (isDropCurrentDatabase(sqlStatement.getDatabaseName())) {
checkSupportedDropCurrentDatabase(connectionSession);
connectionSession.setCurrentDatabaseName(null);
}
if (ProxyContext.getInstance().databaseExists(sqlStatement.getDatabaseName())) {
ProxyContext.getInstance().getContextManager().getPersistServiceFacade().getMetaDataManagerPersistService().dropDatabase(sqlStatement.getDatabaseName());
}
return new UpdateResponseHeader(sqlStatement);
} | @Test
void assertExecuteDropNotExistDatabase() {
when(sqlStatement.getDatabaseName()).thenReturn("test_not_exist_db");
assertThrows(DatabaseDropNotExistsException.class, () -> handler.execute());
} |
public static <T> T getBean(Class<T> interfaceClass, Class typeClass) {
Object object = serviceMap.get(interfaceClass.getName() + "<" + typeClass.getName() + ">");
if(object == null) return null;
if(object instanceof Object[]) {
return (T)Array.get(object, 0);
} else {
return (T)object;
}
} | @Test
public void testInfo() {
Info info = SingletonServiceFactory.getBean(Info.class);
Assert.assertEquals("contact", info.getContact().getName());
Assert.assertEquals("license", info.getLicense().getName());
} |
public HttpResult getBinary(String url) throws IOException, NotModifiedException {
return getBinary(url, null, null);
} | @Test
void dataTimeout() {
Mockito.when(config.httpClient().responseTimeout()).thenReturn(Duration.ofMillis(500));
this.getter = new HttpGetter(config, Mockito.mock(CommaFeedVersion.class), Mockito.mock(MetricRegistry.class));
this.mockServerClient.when(HttpRequest.request().withMethod("GET"))
.respond(HttpResponse.response().withDelay(Delay.milliseconds(1000)));
Assertions.assertThrows(SocketTimeoutException.class, () -> getter.getBinary(this.feedUrl));
} |
public void check( List<CheckResultInterface> remarks, TransMeta transMeta, StepMeta stepMeta,
RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space,
Repository repository, IMetaStore metaStore ) {
super.check( remarks, transMeta, stepMeta, prev, input, output, info, space, repository, metaStore );
CheckResult cr;
// See if we get input...
if ( input != null && input.length > 0 ) {
cr =
new CheckResult( CheckResult.TYPE_RESULT_ERROR, BaseMessages.getString(
PKG, "SalesforceInputMeta.CheckResult.NoInputExpected" ), stepMeta );
} else {
cr =
new CheckResult( CheckResult.TYPE_RESULT_OK, BaseMessages.getString(
PKG, "SalesforceInputMeta.CheckResult.NoInput" ), stepMeta );
}
remarks.add( cr );
// check return fields
if ( getInputFields().length == 0 ) {
cr =
new CheckResult( CheckResult.TYPE_RESULT_ERROR, BaseMessages.getString(
PKG, "SalesforceInputMeta.CheckResult.NoFields" ), stepMeta );
} else {
cr =
new CheckResult( CheckResult.TYPE_RESULT_OK, BaseMessages.getString(
PKG, "SalesforceInputMeta.CheckResult.FieldsOk" ), stepMeta );
}
remarks.add( cr );
// check additional fields
if ( includeTargetURL() && Utils.isEmpty( getTargetURLField() ) ) {
cr =
new CheckResult( CheckResult.TYPE_RESULT_ERROR, BaseMessages.getString(
PKG, "SalesforceInputMeta.CheckResult.NoTargetURLField" ), stepMeta );
remarks.add( cr );
}
if ( includeSQL() && Utils.isEmpty( getSQLField() ) ) {
cr =
new CheckResult( CheckResult.TYPE_RESULT_ERROR, BaseMessages.getString(
PKG, "SalesforceInputMeta.CheckResult.NoSQLField" ), stepMeta );
remarks.add( cr );
}
if ( includeModule() && Utils.isEmpty( moduleField ) ) {
cr =
new CheckResult( CheckResult.TYPE_RESULT_ERROR, BaseMessages.getString(
PKG, "SalesforceInputMeta.CheckResult.NoModuleField" ), stepMeta );
remarks.add( cr );
}
if ( includeTimestamp() && Utils.isEmpty( getTimestampField() ) ) {
cr =
new CheckResult( CheckResult.TYPE_RESULT_ERROR, BaseMessages.getString(
PKG, "SalesforceInputMeta.CheckResult.NoTimestampField" ), stepMeta );
remarks.add( cr );
}
if ( includeRowNumber() && Utils.isEmpty( getRowNumberField() ) ) {
cr =
new CheckResult( CheckResult.TYPE_RESULT_ERROR, BaseMessages.getString(
PKG, "SalesforceInputMeta.CheckResult.NoRowNumberField" ), stepMeta );
remarks.add( cr );
}
if ( includeDeletionDate() && Utils.isEmpty( getDeletionDateField() ) ) {
cr =
new CheckResult( CheckResult.TYPE_RESULT_ERROR, BaseMessages.getString(
PKG, "SalesforceInputMeta.CheckResult.NoDeletionDateField" ), stepMeta );
remarks.add( cr );
}
} | @Test
public void testCheck() {
SalesforceInputMeta meta = new SalesforceInputMeta();
meta.setDefault();
List<CheckResultInterface> remarks = new ArrayList<CheckResultInterface>();
meta.check( remarks, null, null, null, null, null, null, null, null, null );
boolean hasError = false;
for ( CheckResultInterface cr : remarks ) {
if ( cr.getType() == CheckResult.TYPE_RESULT_ERROR ) {
hasError = true;
}
}
assertFalse( remarks.isEmpty() );
assertTrue( hasError );
remarks.clear();
meta.setDefault();
meta.setUsername( "user" );
meta.setInputFields( new SalesforceInputField[]{ new SalesforceInputField( "test" ) } );
meta.check( remarks, null, null, null, null, null, null, null, null, null );
hasError = false;
for ( CheckResultInterface cr : remarks ) {
if ( cr.getType() == CheckResult.TYPE_RESULT_ERROR ) {
hasError = true;
}
}
assertFalse( remarks.isEmpty() );
assertFalse( hasError );
remarks.clear();
meta.setDefault();
meta.setUsername( "user" );
meta.setIncludeDeletionDate( true );
meta.setIncludeModule( true );
meta.setIncludeRowNumber( true );
meta.setIncludeSQL( true );
meta.setIncludeTargetURL( true );
meta.setIncludeTimestamp( true );
meta.setInputFields( new SalesforceInputField[]{ new SalesforceInputField( "test" ) } );
meta.check( remarks, null, null, null, null, null, null, null, null, null );
hasError = false;
int errorCount = 0;
for ( CheckResultInterface cr : remarks ) {
if ( cr.getType() == CheckResult.TYPE_RESULT_ERROR ) {
hasError = true;
errorCount++;
}
}
assertFalse( remarks.isEmpty() );
assertTrue( hasError );
assertEquals( 6, errorCount );
remarks.clear();
meta.setDefault();
meta.setUsername( "user" );
meta.setIncludeDeletionDate( true );
meta.setDeletionDateField( "delDate" );
meta.setIncludeModule( true );
meta.setModuleField( "mod" );
meta.setIncludeRowNumber( true );
meta.setRowNumberField( "rownum" );
meta.setIncludeSQL( true );
meta.setSQLField( "theSQL" );
meta.setIncludeTargetURL( true );
meta.setTargetURLField( "theURL" );
meta.setIncludeTimestamp( true );
meta.setTimestampField( "ts_Field" );
meta.setInputFields( new SalesforceInputField[]{ new SalesforceInputField( "test" ) } );
meta.check( remarks, null, null, null, null, null, null, null, null, null );
hasError = false;
for ( CheckResultInterface cr : remarks ) {
if ( cr.getType() == CheckResult.TYPE_RESULT_ERROR ) {
hasError = true;
errorCount++;
}
}
assertFalse( remarks.isEmpty() );
assertFalse( hasError );
} |
@Override
public boolean isSecured(ApplicationId appId) {
SecurityInfo info = states.get(appId).value();
return info == null ? false : info.getState().equals(SECURED);
} | @Test
public void testIsSecured() {
SecurityInfo info = states.get(appId);
assertEquals(SECURED, info.getState());
} |
@Override
public PermissionTicket createTicket(ResourceSet resourceSet, Set<String> scopes) {
// check to ensure that the scopes requested are a subset of those in the resource set
if (!scopeService.scopesMatch(resourceSet.getScopes(), scopes)) {
throw new InsufficientScopeException("Scopes of resource set are not enough for requested permission.");
}
Permission perm = new Permission();
perm.setResourceSet(resourceSet);
perm.setScopes(scopes);
PermissionTicket ticket = new PermissionTicket();
ticket.setPermission(perm);
ticket.setTicket(UUID.randomUUID().toString());
ticket.setExpiration(new Date(System.currentTimeMillis() + permissionExpirationSeconds * 1000L));
return repository.save(ticket);
} | @Test(expected = InsufficientScopeException.class)
public void testCreate_scopeMismatch() {
@SuppressWarnings("unused")
// try to get scopes outside of what we're allowed to do, this should throw an exception
PermissionTicket perm = permissionService.createTicket(rs1, scopes2);
} |
@Override
protected CompletableFuture<TaskManagerDetailsInfo> handleRequest(
@Nonnull HandlerRequest<EmptyRequestBody> request,
@Nonnull ResourceManagerGateway gateway)
throws RestHandlerException {
final ResourceID taskManagerResourceId =
request.getPathParameter(TaskManagerIdPathParameter.class);
CompletableFuture<TaskManagerInfoWithSlots> taskManagerInfoWithSlotsFuture =
gateway.requestTaskManagerDetailsInfo(taskManagerResourceId, timeout);
metricFetcher.update();
return taskManagerInfoWithSlotsFuture
.thenApply(
(taskManagerInfoWithSlots) -> {
final MetricStore.TaskManagerMetricStore tmMetrics =
metricStore.getTaskManagerMetricStore(
taskManagerResourceId.getResourceIdString());
final TaskManagerMetricsInfo taskManagerMetricsInfo;
if (tmMetrics != null) {
log.debug(
"Create metrics info for TaskManager {}.",
taskManagerResourceId.getStringWithMetadata());
taskManagerMetricsInfo = createTaskManagerMetricsInfo(tmMetrics);
} else {
log.debug(
"No metrics for TaskManager {}.",
taskManagerResourceId.getStringWithMetadata());
taskManagerMetricsInfo = TaskManagerMetricsInfo.empty();
}
return new TaskManagerDetailsInfo(
taskManagerInfoWithSlots, taskManagerMetricsInfo);
})
.exceptionally(
(Throwable throwable) -> {
final Throwable strippedThrowable =
ExceptionUtils.stripExecutionException(throwable);
if (strippedThrowable instanceof UnknownTaskExecutorException) {
throw new CompletionException(
new RestHandlerException(
"Could not find TaskExecutor "
+ taskManagerResourceId
+ '.',
HttpResponseStatus.NOT_FOUND,
strippedThrowable));
} else {
throw new CompletionException(strippedThrowable);
}
});
} | @Test
void testTaskManagerMetricsInfoExtraction()
throws RestHandlerException, ExecutionException, InterruptedException,
JsonProcessingException, HandlerRequestException {
initializeMetricStore(metricFetcher.getMetricStore());
resourceManagerGateway.setRequestTaskManagerDetailsInfoFunction(
taskManagerId ->
CompletableFuture.completedFuture(
new TaskManagerInfoWithSlots(
createEmptyTaskManagerInfo(), Collections.emptyList())));
HandlerRequest<EmptyRequestBody> request = createRequest();
TaskManagerDetailsInfo taskManagerDetailsInfo =
testInstance.handleRequest(request, resourceManagerGateway).get();
TaskManagerMetricsInfo actual = taskManagerDetailsInfo.getTaskManagerMetricsInfo();
TaskManagerMetricsInfo expected =
new TaskManagerMetricsInfo(
1L,
2L,
3L,
4L,
5L,
6L,
7L,
8L,
9L,
10L,
11L,
12L,
15L,
16L,
17L,
18L,
19L,
20L,
Collections.emptyList());
ObjectMapper objectMapper = JacksonMapperFactory.createObjectMapper();
String actualJson = objectMapper.writeValueAsString(actual);
String expectedJson = objectMapper.writeValueAsString(expected);
assertThat(actualJson).isEqualTo(expectedJson);
} |
@Override
public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException {
URL url = invoker.getUrl();
String methodName = RpcUtils.getMethodName(invocation);
int max = invoker.getUrl().getMethodParameter(methodName, ACTIVES_KEY, 0);
final RpcStatus rpcStatus = RpcStatus.getStatus(invoker.getUrl(), RpcUtils.getMethodName(invocation));
if (!RpcStatus.beginCount(url, methodName, max)) {
long timeout = invoker.getUrl().getMethodParameter(RpcUtils.getMethodName(invocation), TIMEOUT_KEY, 0);
long start = System.currentTimeMillis();
long remain = timeout;
synchronized (rpcStatus) {
while (!RpcStatus.beginCount(url, methodName, max)) {
try {
rpcStatus.wait(remain);
} catch (InterruptedException e) {
// ignore
}
long elapsed = System.currentTimeMillis() - start;
remain = timeout - elapsed;
if (remain <= 0) {
throw new RpcException(
RpcException.LIMIT_EXCEEDED_EXCEPTION,
"Waiting concurrent invoke timeout in client-side for service: "
+ invoker.getInterface().getName()
+ ", method: " + RpcUtils.getMethodName(invocation) + ", elapsed: "
+ elapsed + ", timeout: " + timeout + ". concurrent invokes: "
+ rpcStatus.getActive()
+ ". max concurrent invoke limit: " + max);
}
}
}
}
invocation.put(ACTIVE_LIMIT_FILTER_START_TIME, System.currentTimeMillis());
return invoker.invoke(invocation);
} | @Test
void testInvokeNoActives() {
URL url = URL.valueOf("test://test:11/test?accesslog=true&group=dubbo&version=1.1&actives=0");
Invoker<ActiveLimitFilterTest> invoker = new MyInvoker<ActiveLimitFilterTest>(url);
Invocation invocation = new MockInvocation();
activeLimitFilter.invoke(invoker, invocation);
} |
public Set<TaskId> standbyTasks() {
return unmodifiableSet(assignedStandbyTasks.taskIds());
} | @Test
public void shouldNotModifyStandbyView() {
final ClientState clientState = new ClientState(1);
final Set<TaskId> taskIds = clientState.standbyTasks();
assertThrows(UnsupportedOperationException.class, () -> taskIds.add(TASK_0_0));
assertThat(clientState, hasStandbyTasks(0));
} |
@Override
public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo,
List<String> partNames, boolean areAllPartsFound) throws MetaException {
checkStatisticsList(colStatsWithSourceInfo);
ColumnStatisticsObj statsObj = null;
String colType;
String colName = null;
// check if all the ColumnStatisticsObjs contain stats and all the ndv are
// bitvectors
boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size();
NumDistinctValueEstimator ndvEstimator = null;
boolean areAllNDVEstimatorsMergeable = true;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
if (statsObj == null) {
colName = cso.getColName();
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType,
cso.getStatsData().getSetField());
LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName,
doAllPartitionContainStats);
}
DoubleColumnStatsDataInspector columnStatsData = doubleInspectorFromStats(cso);
// check if we can merge NDV estimators
if (columnStatsData.getNdvEstimator() == null) {
areAllNDVEstimatorsMergeable = false;
break;
} else {
NumDistinctValueEstimator estimator = columnStatsData.getNdvEstimator();
if (ndvEstimator == null) {
ndvEstimator = estimator;
} else {
if (!ndvEstimator.canMerge(estimator)) {
areAllNDVEstimatorsMergeable = false;
break;
}
}
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
LOG.debug("all of the bit vectors can merge for {} is {}", colName, areAllNDVEstimatorsMergeable);
ColumnStatisticsData columnStatisticsData = initColumnStatisticsData();
if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) {
DoubleColumnStatsDataInspector aggregateData = null;
long lowerBound = 0;
long higherBound = 0;
double densityAvgSum = 0.0;
DoubleColumnStatsMerger merger = new DoubleColumnStatsMerger();
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
DoubleColumnStatsDataInspector newData = doubleInspectorFromStats(cso);
lowerBound = Math.max(lowerBound, newData.getNumDVs());
higherBound += newData.getNumDVs();
densityAvgSum += (newData.getHighValue() - newData.getLowValue()) / newData.getNumDVs();
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(merger.mergeLowValue(
merger.getLowValue(aggregateData), merger.getLowValue(newData)));
aggregateData.setHighValue(merger.mergeHighValue(
merger.getHighValue(aggregateData), merger.getHighValue(newData)));
aggregateData.setNumNulls(merger.mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls()));
aggregateData.setNumDVs(merger.mergeNumDVs(aggregateData.getNumDVs(), newData.getNumDVs()));
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
// if all the ColumnStatisticsObjs contain bitvectors, we do not need to
// use uniform distribution assumption because we can merge bitvectors
// to get a good estimation.
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
} else {
long estimation;
if (useDensityFunctionForNDVEstimation) {
// We have estimation, lowerbound and higherbound. We use estimation
// if it is between lowerbound and higherbound.
double densityAvg = densityAvgSum / partNames.size();
estimation = (long) ((aggregateData.getHighValue() - aggregateData.getLowValue()) / densityAvg);
if (estimation < lowerBound) {
estimation = lowerBound;
} else if (estimation > higherBound) {
estimation = higherBound;
}
} else {
estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner);
}
aggregateData.setNumDVs(estimation);
}
columnStatisticsData.setDoubleStats(aggregateData);
} else {
// TODO: bail out if missing stats are over a certain threshold
// we need extrapolation
LOG.debug("start extrapolation for {}", colName);
Map<String, Integer> indexMap = new HashMap<>();
for (int index = 0; index < partNames.size(); index++) {
indexMap.put(partNames.get(index), index);
}
Map<String, Double> adjustedIndexMap = new HashMap<>();
Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>();
// while we scan the css, we also get the densityAvg, lowerbound and
// higherbound when useDensityFunctionForNDVEstimation is true.
double densityAvgSum = 0.0;
if (!areAllNDVEstimatorsMergeable) {
// if not every partition uses bitvector for ndv, we just fall back to
// the traditional extrapolation methods.
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DoubleColumnStatsData newData = cso.getStatsData().getDoubleStats();
if (useDensityFunctionForNDVEstimation && newData.isSetLowValue() && newData.isSetHighValue()) {
densityAvgSum += (newData.getHighValue() - newData.getLowValue()) / newData.getNumDVs();
}
adjustedIndexMap.put(partName, (double) indexMap.get(partName));
adjustedStatsMap.put(partName, cso.getStatsData());
}
} else {
// we first merge all the adjacent bitvectors that we could merge and
// derive new partition names and index.
StringBuilder pseudoPartName = new StringBuilder();
double pseudoIndexSum = 0;
int length = 0;
int curIndex = -1;
DoubleColumnStatsData aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DoubleColumnStatsDataInspector newData =
doubleInspectorFromStats(cso);
// newData.isSetBitVectors() should be true for sure because we
// already checked it before.
if (indexMap.get(partName) != curIndex) {
// There is bitvector, but it is not adjacent to the previous ones.
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDoubleStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += (aggregateData.getHighValue() - aggregateData.getLowValue()) / aggregateData.getNumDVs();
}
// reset everything
pseudoPartName = new StringBuilder();
pseudoIndexSum = 0;
length = 0;
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
aggregateData = null;
}
curIndex = indexMap.get(partName);
pseudoPartName.append(partName);
pseudoIndexSum += curIndex;
length++;
curIndex++;
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue()));
aggregateData.setHighValue(Math.max(aggregateData.getHighValue(),
newData.getHighValue()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDoubleStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += (aggregateData.getHighValue() - aggregateData.getLowValue()) / aggregateData.getNumDVs();
}
}
}
extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(),
adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size());
}
LOG.debug(
"Ndv estimation for {} is {}. # of partitions requested: {}. # of partitions found: {}",
colName, columnStatisticsData.getDoubleStats().getNumDVs(), partNames.size(),
colStatsWithSourceInfo.size());
KllHistogramEstimator mergedKllHistogramEstimator = mergeHistograms(colStatsWithSourceInfo);
if (mergedKllHistogramEstimator != null) {
columnStatisticsData.getDoubleStats().setHistogram(mergedKllHistogramEstimator.serialize());
}
statsObj.setStatsData(columnStatisticsData);
return statsObj;
} | @Test
public void testAggregateMultiStatsWhenAllAvailable() throws MetaException {
List<String> partitions = Arrays.asList("part1", "part2", "part3");
ColumnStatisticsData data1 = new ColStatsBuilder<>(double.class).numNulls(1).numDVs(3)
.low(1d).high(3d).hll(1, 2, 3).build();
ColumnStatisticsData data2 = new ColStatsBuilder<>(double.class).numNulls(2).numDVs(3)
.low(3d).high(5d).hll(3, 4, 5).build();
ColumnStatisticsData data3 = new ColStatsBuilder<>(double.class).numNulls(3).numDVs(2)
.low(6d).high(7d).hll(6, 7).build();
List<ColStatsObjWithSourceInfo> statsList = Arrays.asList(
createStatsWithInfo(data1, TABLE, COL, partitions.get(0)),
createStatsWithInfo(data2, TABLE, COL, partitions.get(1)),
createStatsWithInfo(data3, TABLE, COL, partitions.get(2)));
DoubleColumnStatsAggregator aggregator = new DoubleColumnStatsAggregator();
ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, true);
// the aggregation does not update hll, only numDVs is, it keeps the first hll
// notice that numDVs is computed by using HLL, it can detect that '3' appears twice
ColumnStatisticsData expectedStats = new ColStatsBuilder<>(double.class).numNulls(6).numDVs(7)
.low(1d).high(7d).hll(1, 2, 3).build();
assertEqualStatistics(expectedStats, computedStatsObj.getStatsData());
} |
public static boolean isUUID(CharSequence value) {
return isMatchRegex(UUID, value) || isMatchRegex(UUID_SIMPLE, value);
} | @Test
public void isUUIDTest() {
assertTrue(Validator.isUUID(IdUtil.randomUUID()));
assertTrue(Validator.isUUID(IdUtil.fastSimpleUUID()));
assertTrue(Validator.isUUID(IdUtil.randomUUID().toUpperCase()));
assertTrue(Validator.isUUID(IdUtil.fastSimpleUUID().toUpperCase()));
} |
@Override
public void onChangeLogParsed(Run<?, ?> run, SCM scm, TaskListener listener, ChangeLogSet<?> changelog) throws Exception {
try {
JiraSite jiraSite = JiraSite.get(run.getParent());
if (jiraSite == null) {
return;
}
Collection<String> issueKeys = getIssueKeys(changelog, jiraSite.getIssuePattern());
if (issueKeys.isEmpty()) {
return;
}
String jql = constructJQLQuery(issueKeys);
JiraSession session = jiraSite.getSession();
if (session == null) {
return;
}
// Query for JIRA issues
List<Issue> issues = session.getIssuesFromJqlSearch(jql);
Set<JiraIssue> issuesFromJqlSearch = issues == null ? Collections.emptySet() :
issues.stream().map( JiraIssue::new ).collect( Collectors.toSet() );
// If there are no JIRA issues, do not update the actions
if (issuesFromJqlSearch.isEmpty()) {
return;
}
// Create or update the JiraBuildAction
JiraBuildAction action = run.getAction(JiraBuildAction.class);
if (action == null) {
run.addAction(new JiraBuildAction(run, issuesFromJqlSearch));
} else {
action.addIssues(issuesFromJqlSearch);
}
run.save();
} catch (Exception e ){ // we do not want to fail the build if an issue happen here
LOGGER.warn( "Failure executing Jira query to fetch issues. Skipping recording Jira issues.: {}", e.getMessage() );
// stack trace in debug mode
LOGGER.debug( e.getMessage(), e);
}
} | @Test
public void changeSetHasNoJiraIssue() throws Exception {
JiraSCMListener listener = new JiraSCMListener();
Job job = mock(Job.class);
Run run = mock(Run.class);
ChangeLogSet logSet = mock(ChangeLogSet.class);
final ChangeLogSet.Entry entry = mock(ChangeLogSet.Entry.class);
when(entry.getParent()).thenReturn(logSet);
when(logSet.getRun()).thenReturn(run);
when(run.getParent()).thenReturn(job);
when(entry.getMsg()).thenReturn("No jira ticket here");
ChangeLogSet<ChangeLogSet.Entry> set = new ChangeLogSet<ChangeLogSet.Entry>(run, null) {
@Override
public boolean isEmptySet() {
return false;
}
@Override
public Iterator<Entry> iterator() {
return Collections.singletonList(entry).iterator();
}
};
// Setup JIRA site
jiraSiteMockedStatic = mockStatic(JiraSite.class);
JiraSite site = mock(JiraSite.class);
when(site.getIssuePattern()).thenReturn(JiraSite.DEFAULT_ISSUE_PATTERN);
when(JiraSite.get(job)).thenReturn(site);
JiraBuildAction action = new JiraBuildAction(run, new HashSet());
when(run.getAction(JiraBuildAction.class)).thenReturn(action);
listener.onChangeLogParsed(run, null,null, set);
Assert.assertTrue(action.getIssues().isEmpty());
} |
ConsumerRecord<Object, Object> deserialize(final ProcessorContext<?, ?> processorContext,
final ConsumerRecord<byte[], byte[]> rawRecord) {
try {
return new ConsumerRecord<>(
rawRecord.topic(),
rawRecord.partition(),
rawRecord.offset(),
rawRecord.timestamp(),
TimestampType.CREATE_TIME,
rawRecord.serializedKeySize(),
rawRecord.serializedValueSize(),
sourceNode.deserializeKey(rawRecord.topic(), rawRecord.headers(), rawRecord.key()),
sourceNode.deserializeValue(rawRecord.topic(), rawRecord.headers(), rawRecord.value()),
rawRecord.headers(),
Optional.empty()
);
} catch (final RuntimeException deserializationException) {
handleDeserializationFailure(deserializationExceptionHandler, processorContext, deserializationException, rawRecord, log, droppedRecordsSensor, sourceNode().name());
return null; // 'handleDeserializationFailure' would either throw or swallow -- if we swallow we need to skip the record by returning 'null'
}
} | @Test
public void shouldFailWhenDeserializationFailsAndExceptionHandlerThrows() {
try (final Metrics metrics = new Metrics()) {
final RecordDeserializer recordDeserializer = new RecordDeserializer(
new TheSourceNode(
sourceNodeName,
true,
false,
"key",
"value"
),
new DeserializationExceptionHandlerMock(
null, // indicate to throw an exception
rawRecord,
sourceNodeName,
taskId
),
new LogContext(),
metrics.sensor("dropped-records")
);
final StreamsException exception = assertThrows(
StreamsException.class,
() -> recordDeserializer.deserialize(context, rawRecord)
);
assertEquals("Fatal user code error in deserialization error callback", exception.getMessage());
assertEquals("CRASH", exception.getCause().getMessage());
}
} |
private LinkKey(ConnectPoint src, ConnectPoint dst) {
this.src = checkNotNull(src);
this.dst = checkNotNull(dst);
} | @Test
public void testCompareEquals() {
LinkKey k1 = LinkKey.linkKey(SRC1, DST2);
LinkKey k2 = LinkKey.linkKey(SRC1, DST2);
assertThat(k1, is(equalTo(k2)));
} |
@Override
public void consume(Update update) {
super.consume(update);
} | @Test
void canProcessUpdatesWithoutUserInfo() {
Update update = mock(Update.class);
// At the moment, only poll updates carry no user information
when(update.hasPoll()).thenReturn(true);
bot.consume(update);
} |
public List<JournalReadEntry> readNext(long startOffset, long requestedMaximumCount) {
// Capture the log end offset early for the failure handling below. The end offset will change during the
// runtime of the retry loop because new messages are written to the journal. If we would use the changing
// end offset in the error handling while loop, we would skip valid messages.
final long logEndOffset = getLogEndOffset();
List<JournalReadEntry> messages = read(startOffset, requestedMaximumCount);
if (messages.isEmpty()) {
// If we got an empty result BUT we know that there are more messages in the log, we bump the readOffset
// by 1 and try to read again. We continue until we either get an non-empty result or we reached the
// end of the log.
// This can happen when a log segment is truncated at the end but later segments have valid messages again.
long failedReadOffset = startOffset;
long retryReadOffset = failedReadOffset + 1;
while (messages.isEmpty() && failedReadOffset < (logEndOffset - 1)) {
LOG.warn(
"Couldn't read any messages from offset <{}> but journal has more messages. Skipping and " +
"trying to read from offset <{}>",
failedReadOffset, retryReadOffset);
// Retry the read with an increased offset to skip corrupt segments
messages = read(retryReadOffset, requestedMaximumCount);
// Bump offsets in case we still read an empty result
failedReadOffset++;
retryReadOffset++;
}
}
return messages;
} | @Test
@Ignore
public void readNext() throws Exception {
final LocalKafkaJournal journal = new LocalKafkaJournal(journalDirectory.toPath(),
scheduler, Size.kilobytes(1L),
Duration.standardHours(1),
Size.kilobytes(10L),
Duration.standardDays(1),
1_000_000,
Duration.standardMinutes(1),
100,
new MetricRegistry(),
serverStatus);
final byte[] idBytes = "id".getBytes(UTF_8);
final byte[] messageBytes = "message".getBytes(UTF_8);
// High frequency reading with occasional writing.
// This doesn't trigger the bug reliably every time though.
IntStream.range(0, 1_000_000).parallel().forEach(
(i) -> {
if (i % 1000 == 0) {
journal.write(idBytes, messageBytes);
} else {
journal.read(1L).forEach(e -> journal.markJournalOffsetCommitted(e.getOffset()));
}
}
);
} |
public static <E> FilterIter<E> filtered(final Iterator<? extends E> iterator, final Filter<? super E> filter) {
return new FilterIter<>(iterator, filter);
} | @Test
public void filteredTest(){
final List<String> obj2 = ListUtil.toList("3");
final List<String> obj = ListUtil.toList("1", "3");
final FilterIter<String> filtered = IterUtil.filtered(obj.iterator(), obj2::contains);
assertEquals("3", filtered.next());
assertFalse(filtered.hasNext());
} |
public static UserAgent parse(String userAgentString) {
return UserAgentParser.parse(userAgentString);
} | @Test
public void parseWxworkMobileTest() {
final String uaString = "Mozilla/5.0 (Linux; Android 10; JSN-AL00 Build/HONORJSN-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.120 MQQBrowser/6.2 TBS/045710 Mobile Safari/537.36 wxwork/3.1.10 ColorScheme/Light MicroMessenger/7.0.1 NetType/WIFI Language/zh Lang/zh";
final UserAgent ua = UserAgentUtil.parse(uaString);
assertEquals("wxwork", ua.getBrowser().toString());
assertEquals("3.1.10", ua.getVersion());
assertEquals("Webkit", ua.getEngine().toString());
assertEquals("537.36", ua.getEngineVersion());
assertEquals("Android", ua.getOs().toString());
assertEquals("10", ua.getOsVersion());
assertEquals("Android", ua.getPlatform().toString());
assertTrue(ua.isMobile());
} |
@Override
public Set<EmailRecipient> findSubscribedEmailRecipients(String dispatcherKey, String projectKey,
SubscriberPermissionsOnProject subscriberPermissionsOnProject) {
verifyProjectKey(projectKey);
try (DbSession dbSession = dbClient.openSession(false)) {
Set<EmailSubscriberDto> emailSubscribers = dbClient.propertiesDao().findEmailSubscribersForNotification(
dbSession, dispatcherKey, EmailNotificationChannel.class.getSimpleName(), projectKey);
return keepAuthorizedEmailSubscribers(dbSession, projectKey, subscriberPermissionsOnProject, emailSubscribers);
}
} | @Test
public void findSubscribedEmailRecipients_with_logins_fails_with_NPE_if_projectKey_is_null() {
String dispatcherKey = randomAlphabetic(12);
assertThatThrownBy(() -> underTest.findSubscribedEmailRecipients(dispatcherKey, null, ImmutableSet.of(), ALL_MUST_HAVE_ROLE_USER))
.isInstanceOf(NullPointerException.class)
.hasMessage("projectKey is mandatory");
} |
@Override
public Set<K8sNode> nodes() {
return nodeStore.nodes();
} | @Test
public void testGetAllNodes() {
assertEquals(ERR_SIZE, 2, target.nodes().size());
assertTrue(ERR_NOT_FOUND, target.nodes().contains(MINION_2));
assertTrue(ERR_NOT_FOUND, target.nodes().contains(MINION_3));
} |
@Override
public AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets(
String groupId,
Map<TopicPartition, OffsetAndMetadata> offsets,
AlterConsumerGroupOffsetsOptions options
) {
SimpleAdminApiFuture<CoordinatorKey, Map<TopicPartition, Errors>> future =
AlterConsumerGroupOffsetsHandler.newFuture(groupId);
AlterConsumerGroupOffsetsHandler handler = new AlterConsumerGroupOffsetsHandler(groupId, offsets, logContext);
invokeDriver(handler, future, options.timeoutMs);
return new AlterConsumerGroupOffsetsResult(future.get(CoordinatorKey.byGroupId(groupId)));
} | @Test
public void testAlterConsumerGroupOffsetsNonRetriableErrors() throws Exception {
// Non-retriable errors throw an exception
final TopicPartition tp1 = new TopicPartition("foo", 0);
final List<Errors> nonRetriableErrors = asList(
Errors.GROUP_AUTHORIZATION_FAILED, Errors.INVALID_GROUP_ID, Errors.GROUP_ID_NOT_FOUND, Errors.STALE_MEMBER_EPOCH);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
for (Errors error : nonRetriableErrors) {
env.kafkaClient().prepareResponse(
prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
env.kafkaClient().prepareResponse(prepareOffsetCommitResponse(tp1, error));
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(tp1, new OffsetAndMetadata(123L));
AlterConsumerGroupOffsetsResult errorResult = env.adminClient()
.alterConsumerGroupOffsets(GROUP_ID, offsets);
TestUtils.assertFutureError(errorResult.all(), error.exception().getClass());
TestUtils.assertFutureError(errorResult.partitionResult(tp1), error.exception().getClass());
}
}
} |
public boolean isExpired() {
return super.isExpired();
} | @Test
public void testAnonymous() {
Assert.assertNotNull(AuthenticationToken.ANONYMOUS);
Assert.assertEquals(null, AuthenticationToken.ANONYMOUS.getUserName());
Assert.assertEquals(null, AuthenticationToken.ANONYMOUS.getName());
Assert.assertEquals(null, AuthenticationToken.ANONYMOUS.getType());
Assert.assertEquals(-1, AuthenticationToken.ANONYMOUS.getExpires());
Assert.assertFalse(AuthenticationToken.ANONYMOUS.isExpired());
} |
static ProjectMeasuresQuery newProjectMeasuresQuery(List<Criterion> criteria, @Nullable Set<String> projectUuids) {
ProjectMeasuresQuery query = new ProjectMeasuresQuery();
Optional.ofNullable(projectUuids).ifPresent(query::setProjectUuids);
criteria.forEach(criterion -> processCriterion(criterion, query));
return query;
} | @Test
public void create_query_on_qualifier() {
ProjectMeasuresQuery query = newProjectMeasuresQuery(singletonList(Criterion.builder().setKey("qualifier").setOperator(EQ).setValue("APP").build()),
emptySet());
assertThat(query.getQualifiers().get()).containsOnly("APP");
} |
public static String[] splitString( String string, String separator ) {
/*
* 0123456 Example a;b;c;d --> new String[] { a, b, c, d }
*/
// System.out.println("splitString ["+path+"] using ["+separator+"]");
List<String> list = new ArrayList<>();
if ( string == null || string.length() == 0 ) {
return new String[] {};
}
int sepLen = separator.length();
int from = 0;
int end = string.length() - sepLen + 1;
for ( int i = from; i < end; i += sepLen ) {
if ( string.substring( i, i + sepLen ).equalsIgnoreCase( separator ) ) {
// OK, we found a separator, the string to add to the list
// is [from, i[
list.add( nullToEmpty( string.substring( from, i ) ) );
from = i + sepLen;
}
}
// Wait, if the string didn't end with a separator, we still have information at the end of the string...
// In our example that would be "d"...
if ( from + sepLen <= string.length() ) {
list.add( nullToEmpty( string.substring( from, string.length() ) ) );
}
return list.toArray( new String[list.size()] );
} | @Test
public void testSplitStringWithDelimiterAndEmptyEnclosure() {
String mask = "Hello%s world";
String[] chunks = {"Hello", " world"};
String stringToSplit = String.format( mask, DELIMITER1 );
String[] result = Const.splitString( stringToSplit, DELIMITER1, "" );
assertSplit( result, chunks );
} |
public static String[] tokenizeOnSpace(String s)
{
return Arrays.stream(s.split("(?<=" + StringUtil.PATTERN_SPACE + ")|(?=" + StringUtil.PATTERN_SPACE + ")"))
.toArray(String[]::new);
} | @Test
void testTokenizeOnSpace_onlySpacesWithText()
{
String[] result = StringUtil.tokenizeOnSpace(" a ");
assertArrayEquals(new String[] {" ", " ", "a", " ", " "}, result);
} |
public static double add(float v1, float v2) {
return add(Float.toString(v1), Float.toString(v2)).doubleValue();
} | @Test
public void addTest3() {
final float a = 3.15f;
final double b = 4.22;
final double result = NumberUtil.add(a, b, a, b).doubleValue();
assertEquals(14.74, result, 0);
} |
public static <E extends Throwable> void withContextClassLoader(
final ClassLoader cl, final ThrowingRunnable<E> r) throws E {
try (TemporaryClassLoaderContext ignored = TemporaryClassLoaderContext.of(cl)) {
r.run();
}
} | @Test
void testRunWithContextClassLoaderRunnable() {
final ClassLoader aPrioriContextClassLoader =
Thread.currentThread().getContextClassLoader();
try {
final ClassLoader original = new URLClassLoader(new URL[0]);
final ClassLoader temp = new URLClassLoader(new URL[0]);
// set the original context class loader
Thread.currentThread().setContextClassLoader(original);
LambdaUtil.withContextClassLoader(
temp, () -> assertThat(Thread.currentThread().getContextClassLoader()))
.isSameAs(temp);
// make sure the method restored the original context class loader
assertThat(Thread.currentThread().getContextClassLoader()).isSameAs(original);
} finally {
Thread.currentThread().setContextClassLoader(aPrioriContextClassLoader);
}
} |
@Override
public ConnectResponse<List<SimpleConnectorPluginInfo>> connectorPlugins() {
try {
LOG.debug("Issuing request to Kafka Connect at URI {} to list connector plugins", connectUri);
final ConnectResponse<List<SimpleConnectorPluginInfo>> connectResponse =
withRetries(() -> Request
.get(resolveUri(CONNECTOR_PLUGINS))
.setHeaders(requestHeaders)
.responseTimeout(Timeout.ofMilliseconds(requestTimeoutMs))
.connectTimeout(Timeout.ofMilliseconds(requestTimeoutMs))
.execute(httpClient)
.handleResponse(
createHandler(
HttpStatus.SC_OK,
new TypeReference<List<SimpleConnectorPluginInfo>>() {},
Function.identity())));
connectResponse.error()
.ifPresent(error -> LOG.warn("Could not list connector plugins: {}.", error));
return connectResponse;
} catch (final Exception e) {
throw new KsqlServerException(e);
}
} | @Test
public void testListPlugins() throws JsonProcessingException {
// Given:
MAPPER.writeValueAsString(ImmutableList.of(SAMPLE_PLUGIN));
WireMock.stubFor(
WireMock.get(WireMock.urlEqualTo(pathPrefix + "/connector-plugins"))
.withHeader(AUTHORIZATION.toString(), new EqualToPattern(AUTH_HEADER))
.withHeader(CUSTOM_HEADER_NAME, new EqualToPattern(CUSTOM_HEADER_VALUE))
.willReturn(WireMock.aResponse()
.withStatus(HttpStatus.SC_OK)
.withBody(MAPPER.writeValueAsString(ImmutableList.of(SAMPLE_PLUGIN))))
);
// When:
final ConnectResponse<List<SimpleConnectorPluginInfo>> response = client.connectorPlugins();
// Then:
assertThat(response.datum(), OptionalMatchers.of(is(ImmutableList.of(SAMPLE_PLUGIN))));
assertThat("Expected no error!", !response.error().isPresent());
} |
public double calculateDensity(Graph graph, boolean isGraphDirected) {
double result;
double edgesCount = graph.getEdgeCount();
double nodesCount = graph.getNodeCount();
double multiplier = 1;
if (!isGraphDirected) {
multiplier = 2;
}
result = (multiplier * edgesCount) / (nodesCount * nodesCount - nodesCount);
return result;
} | @Test
public void testDirectedPathGraphDensity() {
GraphModel graphModel = GraphGenerator.generatePathDirectedGraph(2);
DirectedGraph graph = graphModel.getDirectedGraph();
GraphDensity d = new GraphDensity();
double density = d.calculateDensity(graph, true);
assertEquals(density, 0.5);
} |
public boolean isInfrastructure(ConnectPoint connectPoint) {
return infrastructurePoints.get().contains(connectPoint);
} | @Test
public void pointRelated() {
assertTrue("should be infrastructure point",
dt.isInfrastructure(new ConnectPoint(D1, P1)));
assertFalse("should not be infrastructure point",
dt.isInfrastructure(new ConnectPoint(D1, P2)));
} |
@Override
public synchronized boolean onActivity() {
if (!firstEventReceived) {
firstEventReceived = true;
return true;
}
return false;
} | @Test
public void testOnActivity_FirstCall() {
assertTrue(strategy.onActivity(), "First call of onActivity() should return true.");
} |
public static Compression.Algorithm getHFileCompressionAlgorithm(Map<String, String> paramsMap) {
String algoName = paramsMap.get(HFILE_COMPRESSION_ALGORITHM_NAME.key());
if (StringUtils.isNullOrEmpty(algoName)) {
return Compression.Algorithm.GZ;
}
return Compression.Algorithm.valueOf(algoName.toUpperCase());
} | @Test
public void testGetHFileCompressionAlgorithmWithEmptyString() {
assertEquals(Compression.Algorithm.GZ, getHFileCompressionAlgorithm(
Collections.singletonMap(HFILE_COMPRESSION_ALGORITHM_NAME.key(), "")));
} |
@Override
public PollResult poll(long currentTimeMs) {
return pollInternal(
prepareFetchRequests(),
this::handleFetchSuccess,
this::handleFetchFailure
);
} | @Test
public void testFetchErrorShouldClearPreferredReadReplica() {
buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(),
Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED, Duration.ofMinutes(5).toMillis());
subscriptions.assignFromUser(singleton(tp0));
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds, false));
subscriptions.seek(tp0, 0);
assertEquals(1, sendFetches());
// Set preferred read replica to node=1
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L,
FetchResponse.INVALID_LAST_STABLE_OFFSET, 0, Optional.of(1)));
networkClientDelegate.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchRecords();
// Verify
Node selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds());
assertEquals(1, selected.id());
assertEquals(1, sendFetches());
assertFalse(fetcher.hasCompletedFetches());
// Error - preferred read replica should be cleared. An actual error response will contain -1 as the
// preferred read replica. In the test we want to ensure that we are handling the error.
client.prepareResponse(fullFetchResponse(tidp0, MemoryRecords.EMPTY, Errors.NOT_LEADER_OR_FOLLOWER, -1L,
FetchResponse.INVALID_LAST_STABLE_OFFSET, 0, Optional.of(1)));
networkClientDelegate.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchRecords();
selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds());
assertEquals(-1, selected.id());
} |
@Override
public LifecycleConfiguration getConfiguration(final Path file) throws BackgroundException {
final Path container = containerService.getContainer(file);
if(container.isRoot()) {
return LifecycleConfiguration.empty();
}
try {
final Storage.Buckets.Get request = session.getClient().buckets().get(container.getName());
if(containerService.getContainer(file).attributes().getCustom().containsKey(GoogleStorageAttributesFinderFeature.KEY_REQUESTER_PAYS)) {
request.setUserProject(session.getHost().getCredentials().getUsername());
}
final Bucket.Lifecycle status = request.execute().getLifecycle();
if(null != status) {
Integer transition = null;
Integer expiration = null;
for(Bucket.Lifecycle.Rule rule : status.getRule()) {
if("SetStorageClass".equals(rule.getAction().getType())) {
transition = rule.getCondition().getAge();
}
if("Delete".equals(rule.getAction().getType())) {
expiration = rule.getCondition().getAge();
}
}
return new LifecycleConfiguration(transition, expiration);
}
return LifecycleConfiguration.empty();
}
catch(IOException e) {
try {
throw new GoogleStorageExceptionMappingService().map("Failure to read attributes of {0}", e, container);
}
catch(AccessDeniedException | InteroperabilityException l) {
log.warn(String.format("Missing permission to read lifecycle configuration for %s %s", container, e.getMessage()));
return LifecycleConfiguration.empty();
}
}
} | @Test
public void testGetConfigurationAccessDenied() throws Exception {
assertEquals(LifecycleConfiguration.empty(), new GoogleStorageLifecycleFeature(session).getConfiguration(
new Path("bucket", EnumSet.of(Path.Type.directory))
));
} |
@Override
public boolean hasGlobalAdminRole(String username) {
return roleService.hasGlobalAdminRole(username);
} | @Test
void testHasGlobalAdminRole() {
when(roleService.hasGlobalAdminRole(anyString())).thenReturn(true);
boolean hasGlobalAdminRole = abstractAuthenticationManager.hasGlobalAdminRole("nacos");
assertTrue(hasGlobalAdminRole);
} |
@VisibleForTesting
public int getClusterNodesFailedRetrieved() {
return numGetClusterNodesFailedRetrieved.value();
} | @Test
public void testGetClusterNodesFailed() {
long totalBadBefore = metrics.getClusterNodesFailedRetrieved();
badSubCluster.getClusterNodes();
Assert.assertEquals(totalBadBefore + 1, metrics.getClusterNodesFailedRetrieved());
} |
public static Integer getFirstNumber(CharSequence StringWithNumber) {
return Convert.toInt(get(PatternPool.NUMBERS, StringWithNumber, 0), null);
} | @Test
public void getFirstNumberTest() {
// 找到匹配的第一个数字
final Integer resultGetFirstNumber = ReUtil.getFirstNumber(content);
assertEquals(Integer.valueOf(1234), resultGetFirstNumber);
} |
@VisibleForTesting
JobMeta filterPrivateDatabases( JobMeta jobMeta ) {
Set<String> privateDatabases = jobMeta.getPrivateDatabases();
if ( privateDatabases != null ) {
// keep only private transformation databases
for ( Iterator<DatabaseMeta> it = jobMeta.getDatabases().iterator(); it.hasNext(); ) {
DatabaseMeta databaseMeta = it.next();
String databaseName = databaseMeta.getName();
if ( !privateDatabases.contains( databaseName ) && !jobMeta.isDatabaseConnectionUsed( databaseMeta ) ) {
it.remove();
}
}
}
return jobMeta;
} | @Test
public void filterPrivateDatabasesWithOnePrivateDatabaseTest() {
IUnifiedRepository purMock = mock( IUnifiedRepository.class );
JobMeta jobMeta = new JobMeta( );
jobMeta.setDatabases( getDummyDatabases() );
Set<String> privateDatabases = new HashSet<>( );
privateDatabases.add( "database2" );
jobMeta.setPrivateDatabases( privateDatabases );
StreamToJobNodeConverter jobConverter = new StreamToJobNodeConverter( purMock );
assertEquals( 1, jobConverter.filterPrivateDatabases( jobMeta ).getDatabases().size() );
} |
@Override
public SymbolTable getSymbolTable(String symbolTableName)
{
try
{
SymbolTableMetadata metadata = _symbolTableNameHandler.extractMetadata(symbolTableName);
String serverNodeUri = metadata.getServerNodeUri();
String tableName = metadata.getSymbolTableName();
boolean isRemote = metadata.isRemote();
// Check if it's the default table name.
if (tableName.equals(_defaultResponseSymbolTableName))
{
return _defaultResponseSymbolTable;
}
// First check the cache.
SymbolTable symbolTable = _symbolTableNameToSymbolTableCache.getIfPresent(tableName);
if (symbolTable != null)
{
return symbolTable;
}
// If this is a local table, and we didn't find it in the cache, cry foul.
if (!isRemote)
{
throw new IllegalStateException("Unable to fetch symbol table with name: " + symbolTableName);
}
// Ok, we didn't find it in the cache, let's go query the service the table was served from.
URI symbolTableUri = new URI(serverNodeUri + "/" + RestLiSymbolTableRequestHandler.SYMBOL_TABLE_URI_PATH + "/" + tableName);
symbolTable = fetchRemoteSymbolTable(symbolTableUri, Collections.emptyMap(), false);
if (symbolTable != null)
{
// Cache the retrieved table.
_symbolTableNameToSymbolTableCache.put(tableName, symbolTable);
return symbolTable;
}
}
catch (URISyntaxException ex)
{
LOGGER.error("Failed to construct symbol table URI from symbol table name: " + symbolTableName, ex);
}
throw new IllegalStateException("Unable to fetch symbol table with name: " + symbolTableName);
} | @Test(expectedExceptions = IllegalStateException.class)
public void testGetRemoteSymbolTableFetchError()
{
RestResponseBuilder builder = new RestResponseBuilder();
builder.setStatus(404);
when(_client.restRequest(eq(new RestRequestBuilder(URI.create("https://OtherHost:100/service/symbolTable/Test--332004310")).build())))
.thenReturn(CompletableFuture.completedFuture(builder.build()));
_provider.getSymbolTable("https://OtherHost:100/service|Test--332004310");
} |
public static MySQLBinaryProtocolValue getBinaryProtocolValue(final BinaryColumnType binaryColumnType) {
Preconditions.checkArgument(BINARY_PROTOCOL_VALUES.containsKey(binaryColumnType), "Cannot find MySQL type '%s' in column type when process binary protocol value", binaryColumnType);
return BINARY_PROTOCOL_VALUES.get(binaryColumnType);
} | @Test
void assertGetBinaryProtocolValueWithMySQLTypeDatetime() {
assertThat(MySQLBinaryProtocolValueFactory.getBinaryProtocolValue(MySQLBinaryColumnType.DATETIME), instanceOf(MySQLDateBinaryProtocolValue.class));
} |
public void start() {
LOG.info("Running process from " + executable.toAbsolutePath());
CommandLine cmdLine = new CommandLine(executable.toAbsolutePath().toString());
arguments.forEach(it -> cmdLine.addArgument(it, true));
try {
createExecutor().execute(cmdLine, environment.getEnv(), listener);
listener.onStart();
} catch (IOException e) {
throw new RuntimeException(e);
}
} | @Test
void testExitCode() throws ExecutionException, InterruptedException, TimeoutException {
final CompletableFuture<Integer> exitCodeFuture = new CompletableFuture<>();
final ProcessListener listener = new ProcessListener() {
@Override
public void onStart() {
}
@Override
public void onStdOut(String line) {
LOG.info("Stdout:" + line);
}
@Override
public void onStdErr(String line) {
LOG.info("Stderr:" + line);
}
@Override
public void onProcessComplete(int exitValue) {
exitCodeFuture.complete(exitValue);
}
@Override
public void onProcessFailed(ExecuteException e) {
exitCodeFuture.complete(e.getExitValue());
}
};
final CommandLineProcess process = new CommandLineProcess(binPath, List.of("143"), listener, new Environment(System.getenv()));
process.start();
final Integer exitCode = exitCodeFuture.get(10, TimeUnit.SECONDS);
Assertions.assertThat(exitCode).isEqualTo(143);
} |
@Override
public Num calculate(BarSeries series, Position position) {
if (position == null || !position.isClosed()) {
return series.zero();
}
Returns returns = new Returns(series, position, Returns.ReturnType.LOG);
return calculateVaR(returns, confidence);
} | @Test
public void calculateOnlyWithLossPositions() {
series = new MockBarSeries(numFunction, 100d, 95d, 100d, 80d, 85d, 70d);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series), Trade.sellAt(1, series),
Trade.buyAt(2, series), Trade.sellAt(5, series));
AnalysisCriterion varCriterion = getCriterion();
assertNumEquals(numOf(Math.log(80d / 100)), varCriterion.calculate(series, tradingRecord));
} |
protected void setMethod() {
boolean activateBody = RestMeta.isActiveBody( wMethod.getText() );
boolean activateParams = RestMeta.isActiveParameters( wMethod.getText() );
wlBody.setEnabled( activateBody );
wBody.setEnabled( activateBody );
wApplicationType.setEnabled( activateBody );
wlParameters.setEnabled( activateParams );
wParameters.setEnabled( activateParams );
wGet.setEnabled( activateParams );
wlMatrixParameters.setEnabled( activateParams );
wMatrixParameters.setEnabled( activateParams );
wMatrixGet.setEnabled( activateParams );
} | @Test
public void testSetMethod_GET() {
doReturn( RestMeta.HTTP_METHOD_GET ).when( method ).getText();
dialog.setMethod();
verify( bodyl, times( 1 ) ).setEnabled( false );
verify( body, times( 1 ) ).setEnabled( false );
verify( type, times( 1 ) ).setEnabled( false );
verify( paramsl, times( 1 ) ).setEnabled( false );
verify( params, times( 1 ) ).setEnabled( false );
verify( paramsb, times( 1 ) ).setEnabled( false );
verify( matrixl, times( 1 ) ).setEnabled( false );
verify( matrix, times( 1 ) ).setEnabled( false );
verify( matrixb, times( 1 ) ).setEnabled( false );
} |
public static void updateHadoopConfig(org.apache.hadoop.conf.Configuration hadoopConfig) {
LOG.info("Updating Hadoop configuration");
String providers = hadoopConfig.get(PROVIDER_CONFIG_NAME, "");
if (!providers.contains(DynamicTemporaryAWSCredentialsProvider.NAME)) {
if (providers.isEmpty()) {
LOG.debug("Setting provider");
providers = DynamicTemporaryAWSCredentialsProvider.NAME;
} else {
providers = DynamicTemporaryAWSCredentialsProvider.NAME + "," + providers;
LOG.debug("Prepending provider, new providers value: {}", providers);
}
hadoopConfig.set(PROVIDER_CONFIG_NAME, providers);
} else {
LOG.debug("Provider already exists");
}
if (!StringUtils.isNullOrWhitespaceOnly(region)) {
LOG.debug("Setting region");
hadoopConfig.set("fs.s3a.endpoint.region", region);
}
LOG.info("Updated Hadoop configuration successfully");
} | @Test
public void updateHadoopConfigShouldSetProviderWhenEmpty() {
org.apache.hadoop.conf.Configuration hadoopConfiguration =
new org.apache.hadoop.conf.Configuration();
hadoopConfiguration.set(PROVIDER_CONFIG_NAME, "");
AbstractS3DelegationTokenReceiver.updateHadoopConfig(hadoopConfiguration);
assertEquals(
DynamicTemporaryAWSCredentialsProvider.NAME,
hadoopConfiguration.get(PROVIDER_CONFIG_NAME));
} |
@NotNull
public SocialUserDO authSocialUser(Integer socialType, Integer userType, String code, String state) {
// 优先从 DB 中获取,因为 code 有且可以使用一次。
// 在社交登录时,当未绑定 User 时,需要绑定登录,此时需要 code 使用两次
SocialUserDO socialUser = socialUserMapper.selectByTypeAndCodeAnState(socialType, code, state);
if (socialUser != null) {
return socialUser;
}
// 请求获取
AuthUser authUser = socialClientService.getAuthUser(socialType, userType, code, state);
Assert.notNull(authUser, "三方用户不能为空");
// 保存到 DB 中
socialUser = socialUserMapper.selectByTypeAndOpenid(socialType, authUser.getUuid());
if (socialUser == null) {
socialUser = new SocialUserDO();
}
socialUser.setType(socialType).setCode(code).setState(state) // 需要保存 code + state 字段,保证后续可查询
.setOpenid(authUser.getUuid()).setToken(authUser.getToken().getAccessToken()).setRawTokenInfo((toJsonString(authUser.getToken())))
.setNickname(authUser.getNickname()).setAvatar(authUser.getAvatar()).setRawUserInfo(toJsonString(authUser.getRawUserInfo()));
if (socialUser.getId() == null) {
socialUserMapper.insert(socialUser);
} else {
socialUserMapper.updateById(socialUser);
}
return socialUser;
} | @Test
public void testAuthSocialUser_notNull() {
// mock 数据
SocialUserDO socialUser = randomPojo(SocialUserDO.class,
o -> o.setType(SocialTypeEnum.GITEE.getType()).setCode("tudou").setState("yuanma"));
socialUserMapper.insert(socialUser);
// 准备参数
Integer socialType = SocialTypeEnum.GITEE.getType();
Integer userType = randomEle(SocialTypeEnum.values()).getType();
String code = "tudou";
String state = "yuanma";
// 调用
SocialUserDO result = socialUserService.authSocialUser(socialType, userType, code, state);
// 断言
assertPojoEquals(socialUser, result);
} |
@Override
public Health health() {
if (registerCenterService == null) {
registerCenterService = PluginServiceManager.getPluginService(RegisterCenterService.class);
}
return Health.status(new Status(registerCenterService.getRegisterCenterStatus(), "Service Center is alive"))
.build();
} | @Test
public void health() {
try (final MockedStatic<PluginServiceManager> pluginServiceManagerMockedStatic = Mockito.mockStatic(PluginServiceManager.class);){
pluginServiceManagerMockedStatic.when(() -> PluginServiceManager.getPluginService(RegisterCenterService.class))
.thenReturn(new TestRegistryService());
final ServiceCombHealthIndicator serviceCombHealthIndicator = new ServiceCombHealthIndicator();
final Health health = serviceCombHealthIndicator.health();
Assert.assertEquals(health.getStatus().getCode(), status);
}
} |
@Override
public Table getTable(String dbName, String tblName) {
return get(tableCache, OdpsTableName.of(dbName, tblName));
} | @Test
public void testGetTable() throws ExecutionException {
OdpsTable table = (OdpsTable) odpsMetadata.getTable("project", "tableName");
Assert.assertTrue(table.isOdpsTable());
Assert.assertEquals("tableName", table.getName());
Assert.assertEquals("project", table.getDbName());
Assert.assertFalse(table.isUnPartitioned());
Assert.assertEquals("c1", table.getColumn("c1").getName());
} |
private Collection<String> getDataSourceNames(final Collection<ShardingTableRuleConfiguration> tableRuleConfigs,
final Collection<ShardingAutoTableRuleConfiguration> autoTableRuleConfigs, final Collection<String> dataSourceNames) {
if (tableRuleConfigs.isEmpty() && autoTableRuleConfigs.isEmpty()) {
return dataSourceNames;
}
if (tableRuleConfigs.stream().map(ShardingTableRuleConfiguration::getActualDataNodes).anyMatch(each -> null == each || each.isEmpty())) {
return dataSourceNames;
}
Collection<String> result = new LinkedHashSet<>();
tableRuleConfigs.forEach(each -> result.addAll(getDataSourceNames(each)));
autoTableRuleConfigs.forEach(each -> result.addAll(getDataSourceNames(each)));
return result;
} | @Test
void assertGetDataSourceNamesWithoutShardingTablesAndShardingAutoTables() {
ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();
ShardingRule shardingRule = new ShardingRule(shardingRuleConfig, createDataSources(), mock(ComputeNodeInstanceContext.class));
assertThat(shardingRule.getDataSourceNames(), is(new LinkedHashSet<>(Arrays.asList("ds_0", "ds_1", "resource0", "resource1"))));
} |
@Override
public Integer clusterGetSlotForKey(byte[] key) {
RFuture<Integer> f = executorService.readAsync((String)null, StringCodec.INSTANCE, RedisCommands.KEYSLOT, key);
return syncFuture(f);
} | @Test
public void testClusterGetSlotForKey() {
Integer slot = connection.clusterGetSlotForKey("123".getBytes());
assertThat(slot).isNotNull();
} |
public static GaussianProcessRegression<double[]> fit(double[][] x, double[] y, Properties params) {
MercerKernel<double[]> kernel = MercerKernel.of(params.getProperty("smile.gaussian_process.kernel", "linear"));
double noise = Double.parseDouble(params.getProperty("smile.gaussian_process.noise", "1E-10"));
boolean normalize = Boolean.parseBoolean(params.getProperty("smile.gaussian_process.normalize", "true"));
double tol = Double.parseDouble(params.getProperty("smile.gaussian_process.tolerance", "1E-5"));
int maxIter = Integer.parseInt(params.getProperty("smile.gaussian_process.iterations", "0"));
return fit(x, y, kernel, noise, normalize, tol, maxIter);
} | @Test
public void testHPO() {
System.out.println("HPO longley");
MathEx.setSeed(19650218); // to get repeatable results.
double[][] longley = MathEx.clone(Longley.x);
MathEx.standardize(longley);
GaussianProcessRegression<double[]> model = GaussianProcessRegression.fit(longley, Longley.y, new GaussianKernel(8.0), 0.2, true, 1E-5, 500);
System.out.println(model);
assertEquals(-0.8996, model.L, 1E-4);
assertEquals(0.0137, model.noise, 1E-4);
MercerKernel<double[]> kernel = model.kernel;
double noise = model.noise;
RegressionMetrics metrics = LOOCV.regression(longley, Longley.y, (xi, yi) -> GaussianProcessRegression.fit(xi, yi, kernel, noise));
System.out.println(metrics);
assertEquals(1.7104, metrics.rmse, 1E-4);
} |
@Override
public int hashCode() {
int result = principalType != null ? principalType.hashCode() : 0;
result = 31 * result + (name != null ? name.hashCode() : 0);
return result;
} | @Test
public void testEqualsAndHashCode() {
String name = "KafkaUser";
KafkaPrincipal principal1 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, name);
KafkaPrincipal principal2 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, name);
assertEquals(principal1.hashCode(), principal2.hashCode());
assertEquals(principal1, principal2);
} |
public static Optional<String> getSystemProperty(String propertyName) {
return Optional.ofNullable(getSystemProperty(propertyName, null));
} | @Test
public void getSystemProperty_whenPropertyNotExists_returnsEmptyOptional() {
assertThat(TsunamiConfig.getSystemProperty(TEST_PROPERTY)).isEmpty();
} |
public long getExpireTimeInSeconds(String token) throws AccessException {
return NacosSignatureAlgorithm.getExpiredTimeInSeconds(token, key);
} | @Test
void testGetExpireTimeInSeconds() throws AccessException {
NacosJwtParser parser = new NacosJwtParser(encode("SecretKey012345678901234567SecretKey0123456789012345678901289012"));
String token = parser.jwtBuilder().setUserName("nacos").setExpiredTime(100L).compact();
long expiredTimeSeconds = parser.getExpireTimeInSeconds(token);
assertTrue(expiredTimeSeconds * 1000 - System.currentTimeMillis() > 0);
} |
public static <T> RuntimeTypeAdapterFactory<T> of(Class<T> baseType, String typeFieldName, boolean maintainType) {
return new RuntimeTypeAdapterFactory<>(baseType, typeFieldName, maintainType);
} | @Test
public void testNullTypeFieldName() {
assertThatThrownBy(() -> RuntimeTypeAdapterFactory.of(BillingInstrument.class, null))
.isInstanceOf(NullPointerException.class);
} |
@Override
public void handlerPlugin(final PluginData pluginData) {
super.getWasmExtern(HANDLER_PLUGIN_METHOD_NAME)
.ifPresent(handlerPlugin -> callWASI(pluginData, handlerPlugin));
} | @Test
public void handlerPluginTest() {
pluginDataHandler.handlerPlugin(pluginData);
testWasmPluginDataHandler.handlerPlugin(pluginData);
} |
public static FromEndOfWindow pastEndOfWindow() {
return new FromEndOfWindow();
} | @Test
public void testEarlyAndAtWatermark() throws Exception {
tester =
TriggerStateMachineTester.forTrigger(
AfterWatermarkStateMachine.pastEndOfWindow().withEarlyFirings(mockEarly),
FixedWindows.of(Duration.millis(100)));
injectElements(1);
IntervalWindow window = new IntervalWindow(new Instant(0), new Instant(100));
testRunningAsTrigger(mockEarly, window);
// Fire due to watermark
when(mockEarly.shouldFire(anyTriggerContext())).thenReturn(false);
tester.advanceInputWatermark(new Instant(100));
assertTrue(tester.shouldFire(window));
tester.fireIfShouldFire(window);
assertTrue(tester.isMarkedFinished(window));
} |
static <K, V> StateSerdes<K, V> prepareStoreSerde(final StateStoreContext context,
final String storeName,
final String changelogTopic,
final Serde<K> keySerde,
final Serde<V> valueSerde,
final PrepareFunc<V> prepareValueSerdeFunc) {
return new StateSerdes<>(
changelogTopic,
prepareSerde(WrappingNullableUtils::prepareKeySerde, storeName, keySerde, new SerdeGetter(context), true, context.taskId()),
prepareSerde(prepareValueSerdeFunc, storeName, valueSerde, new SerdeGetter(context), false, context.taskId())
);
} | @Test
public void shouldThrowStreamsExceptionWithExplicitErrorMessageForStateStoreContext() {
final MockInternalNewProcessorContext<String, String> context = new MockInternalNewProcessorContext<>();
utilsMock.when(() -> WrappingNullableUtils.prepareValueSerde(any(), any())).thenThrow(new StreamsException(""));
final Throwable exception = assertThrows(StreamsException.class,
() -> StoreSerdeInitializer.prepareStoreSerde((StateStoreContext) context, "myStore", "topic",
new Serdes.StringSerde(), new Serdes.StringSerde(), WrappingNullableUtils::prepareValueSerde));
assertThat(exception.getMessage(), equalTo("Failed to initialize value serdes for store myStore"));
} |
public static Schema create(Type type) {
switch (type) {
case STRING:
return new StringSchema();
case BYTES:
return new BytesSchema();
case INT:
return new IntSchema();
case LONG:
return new LongSchema();
case FLOAT:
return new FloatSchema();
case DOUBLE:
return new DoubleSchema();
case BOOLEAN:
return new BooleanSchema();
case NULL:
return new NullSchema();
default:
throw new AvroRuntimeException("Can't create a: " + type);
}
} | @Test
void longDefaultValue() {
Schema.Field field = new Schema.Field("myField", Schema.create(Schema.Type.LONG), "doc", 1L);
assertTrue(field.hasDefaultValue());
assertEquals(1L, field.defaultVal());
assertEquals(1L, GenericData.get().getDefaultValue(field));
field = new Schema.Field("myField", Schema.create(Schema.Type.LONG), "doc", Long.MIN_VALUE);
assertTrue(field.hasDefaultValue());
assertEquals(Long.MIN_VALUE, field.defaultVal());
assertEquals(Long.MIN_VALUE, GenericData.get().getDefaultValue(field));
field = new Schema.Field("myField", Schema.create(Schema.Type.LONG), "doc", Long.MAX_VALUE);
assertTrue(field.hasDefaultValue());
assertEquals(Long.MAX_VALUE, field.defaultVal());
assertEquals(Long.MAX_VALUE, GenericData.get().getDefaultValue(field));
} |
public static CsvMapper createCsvMapper() {
final CsvMapper csvMapper = new CsvMapper();
registerModules(csvMapper);
return csvMapper;
} | @Test
void testCsvMapperOptionalSupportedEnabled() throws Exception {
final CsvMapper mapper =
JacksonMapperFactory.createCsvMapper()
// ensures symmetric read/write behavior for empty optionals/strings
// ensures: Optional.empty() --write--> "" --read--> Optional.empty()
// otherwise: Optional.empty() --write--> "" --read--> Optional("")
// we should consider enabling this by default, but it unfortunately
// also affects String parsing without Optionals (i.e., prior code)
.enable(CsvParser.Feature.EMPTY_STRING_AS_NULL);
final ObjectWriter writer = mapper.writerWithSchemaFor(TypeWithOptional.class);
assertThat(writer.writeValueAsString(new TypeWithOptional(Optional.of("value"))))
.isEqualTo("value\n");
assertThat(writer.writeValueAsString(new TypeWithOptional(Optional.empty())))
.isEqualTo("\n");
final ObjectReader reader = mapper.readerWithSchemaFor(TypeWithOptional.class);
assertThat(reader.readValue("value\n", TypeWithOptional.class).data).contains("value");
assertThat(reader.readValue("null\n", TypeWithOptional.class).data).contains("null");
assertThat(reader.readValue("\n", TypeWithOptional.class).data).isEmpty();
} |
public static boolean isPubKeyCompressed(byte[] encoded) {
if (encoded.length == 33 && (encoded[0] == 0x02 || encoded[0] == 0x03))
return true;
else if (encoded.length == 65 && encoded[0] == 0x04)
return false;
else
throw new IllegalArgumentException(ByteUtils.formatHex(encoded));
} | @Test(expected = IllegalArgumentException.class)
public void isPubKeyCompressed_tooShort() {
ECKey.isPubKeyCompressed(ByteUtils.parseHex("036d"));
} |
@Override
public Optional<ReadError> read(DbFileSources.Line.Builder lineBuilder) {
ScannerReport.LineCoverage reportCoverage = getNextLineCoverageIfMatchLine(lineBuilder.getLine());
if (reportCoverage != null) {
processCoverage(lineBuilder, reportCoverage);
coverage = null;
}
return Optional.empty();
} | @Test
public void set_coverage() {
CoverageLineReader computeCoverageLine = new CoverageLineReader(newArrayList(ScannerReport.LineCoverage.newBuilder()
.setLine(1)
.setConditions(10)
.setHits(true)
.setCoveredConditions(2)
.build()).iterator());
DbFileSources.Line.Builder lineBuilder = DbFileSources.Data.newBuilder().addLinesBuilder().setLine(1);
assertThat(computeCoverageLine.read(lineBuilder)).isEmpty();
assertThat(lineBuilder.getLineHits()).isOne();
assertThat(lineBuilder.getConditions()).isEqualTo(10);
assertThat(lineBuilder.getCoveredConditions()).isEqualTo(2);
} |
public void setPreservePathElements(boolean preservePathElements) {
this.preservePathElements = preservePathElements;
} | @Test
public void testTarWithPreservedPathElements() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:tar");
mock.expectedMessageCount(1);
mock.expectedHeaderReceived(FILE_NAME, "poem.txt.tar");
tar.setPreservePathElements(true);
template.sendBodyAndHeader("direct:tar", TEXT, FILE_NAME, "poems/poem.txt");
MockEndpoint.assertIsSatisfied(context);
Exchange exchange = mock.getReceivedExchanges().get(0);
final byte[] convertedArray = exchange.getIn().getBody(byte[].class);
Map<String, EntryMetadata> tarData = toEntries(convertedArray);
assertTrue(tarData.containsKey("poems/"));
assertTrue(tarData.containsKey("poems/poem.txt"));
EntryMetadata entryFileMetadata = tarData.get("poems/poem.txt");
assertEquals(TEXT.getBytes(StandardCharsets.UTF_8).length, entryFileMetadata.size);
assertFalse(entryFileMetadata.isDirectory);
EntryMetadata entryDirMetadata = tarData.get("poems/");
assertTrue(entryDirMetadata.isDirectory);
} |
@Override
public TimeSeriesEntry<V, L> pollLastEntry() {
return get(pollLastEntryAsync());
} | @Test
public void testPollLastEntry() {
RTimeSeries<String, String> t = redisson.getTimeSeries("test");
t.add(1, "10", "100");
t.add(2, "20");
t.add(3, "30");
TimeSeriesEntry<String, String> e = t.pollLastEntry();
assertThat(e).isEqualTo(new TimeSeriesEntry<>(3, "30"));
assertThat(t.size()).isEqualTo(2);
TimeSeriesEntry<String, String> ee = t.lastEntry();
assertThat(ee).isEqualTo(new TimeSeriesEntry<>(2, "20"));
} |
@SneakyThrows
@Override
public Integer call() throws Exception {
super.call();
PicocliRunner.call(App.class, "template", "namespace", "--help");
return 0;
} | @Test
void runWithNoParam() {
ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
try (ApplicationContext ctx = ApplicationContext.builder().deduceEnvironment(false).start()) {
String[] args = {};
Integer call = PicocliRunner.call(TemplateNamespaceCommand.class, ctx, args);
assertThat(call, is(0));
assertThat(out.toString(), containsString("Usage: kestra template namespace"));
}
} |
public FEELFnResult<Boolean> invoke(@ParameterName("list") List list) {
if (list == null) {
return FEELFnResult.ofResult(true);
}
boolean result = true;
for (final Object element : list) {
if (element != null && !(element instanceof Boolean)) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not" +
" a Boolean"));
} else {
if (element != null) {
result &= (Boolean) element;
}
}
}
return FEELFnResult.ofResult(result);
} | @Test
void invokeListParamReturnTrue() {
FunctionTestUtil.assertResult(nnAllFunction.invoke(Arrays.asList(Boolean.TRUE, Boolean.TRUE)), true);
} |
public static String describe(List<org.apache.iceberg.expressions.Expression> exprs) {
return exprs.stream().map(Spark3Util::describe).collect(Collectors.joining(", "));
} | @Test
public void testDescribeExpression() {
Expression refExpression = equal("id", 1);
assertThat(Spark3Util.describe(refExpression)).isEqualTo("id = 1");
Expression yearExpression = greaterThan(year("ts"), 10);
assertThat(Spark3Util.describe(yearExpression)).isEqualTo("year(ts) > 10");
Expression monthExpression = greaterThanOrEqual(month("ts"), 10);
assertThat(Spark3Util.describe(monthExpression)).isEqualTo("month(ts) >= 10");
Expression dayExpression = lessThan(day("ts"), 10);
assertThat(Spark3Util.describe(dayExpression)).isEqualTo("day(ts) < 10");
Expression hourExpression = lessThanOrEqual(hour("ts"), 10);
assertThat(Spark3Util.describe(hourExpression)).isEqualTo("hour(ts) <= 10");
Expression bucketExpression = in(bucket("id", 5), 3);
assertThat(Spark3Util.describe(bucketExpression)).isEqualTo("bucket[5](id) IN (3)");
Expression truncateExpression = notIn(truncate("name", 3), "abc");
assertThat(Spark3Util.describe(truncateExpression))
.isEqualTo("truncate[3](name) NOT IN ('abc')");
Expression andExpression = and(refExpression, yearExpression);
assertThat(Spark3Util.describe(andExpression)).isEqualTo("(id = 1 AND year(ts) > 10)");
} |
public static <T> TimeLimiterOperator<T> of(TimeLimiter timeLimiter) {
return new TimeLimiterOperator<>(timeLimiter);
} | @Test
public void otherErrorUsingMono() {
given(timeLimiter.getTimeLimiterConfig())
.willReturn(toConfig(Duration.ofMinutes(1)));
given(helloWorldService.returnHelloWorld())
.willThrow(new Error("BAM!"));
Mono<?> mono = Mono.fromCallable(helloWorldService::returnHelloWorld)
.transformDeferred(TimeLimiterOperator.of(timeLimiter));
StepVerifier.create(mono)
.expectError(Error.class)
.verify(Duration.ofMinutes(1));
then(timeLimiter).should()
.onError(any(Error.class));
} |
int compareSegmentedKeys(final Bytes cacheKey, final Bytes storeKey) {
final long storeSegmentId = segmentId(storeKey);
final long cacheSegmentId = ByteBuffer.wrap(cacheKey.get()).getLong();
final int segmentCompare = Long.compare(cacheSegmentId, storeSegmentId);
if (segmentCompare == 0) {
final byte[] cacheKeyBytes = cacheKey.get();
final byte[] storeKeyBytes = storeKey.get();
return Bytes.BYTES_LEXICO_COMPARATOR.compare(
cacheKeyBytes, SEGMENT_ID_BYTES, cacheKeyBytes.length - SEGMENT_ID_BYTES,
storeKeyBytes, 0, storeKeyBytes.length
);
} else {
return segmentCompare;
}
} | @Test
public void compareSegmentedKeys() {
assertThat(
"same key in same segment should be ranked the same",
cacheFunction.compareSegmentedKeys(
cacheFunction.cacheKey(THE_KEY),
THE_KEY
) == 0
);
final Bytes sameKeyInPriorSegment = WindowKeySchema.toStoreKeyBinary(new byte[]{0xA, 0xB, 0xC}, 1234, 42);
assertThat(
"same keys in different segments should be ordered according to segment",
cacheFunction.compareSegmentedKeys(
cacheFunction.cacheKey(sameKeyInPriorSegment),
THE_KEY
) < 0
);
assertThat(
"same keys in different segments should be ordered according to segment",
cacheFunction.compareSegmentedKeys(
cacheFunction.cacheKey(THE_KEY),
sameKeyInPriorSegment
) > 0
);
final Bytes lowerKeyInSameSegment = WindowKeySchema.toStoreKeyBinary(new byte[]{0xA, 0xB, 0xB}, TIMESTAMP - 1, 0);
assertThat(
"different keys in same segments should be ordered according to key",
cacheFunction.compareSegmentedKeys(
cacheFunction.cacheKey(THE_KEY),
lowerKeyInSameSegment
) > 0
);
assertThat(
"different keys in same segments should be ordered according to key",
cacheFunction.compareSegmentedKeys(
cacheFunction.cacheKey(lowerKeyInSameSegment),
THE_KEY
) < 0
);
} |
@Override
public Optional<IndexMetaData> revise(final String tableName, final IndexMetaData originalMetaData, final ShardingRule rule) {
if (shardingTable.getActualDataNodes().isEmpty()) {
return Optional.empty();
}
IndexMetaData result = new IndexMetaData(IndexMetaDataUtils.getLogicIndexName(originalMetaData.getName(), shardingTable.getActualDataNodes().iterator().next().getTableName()));
result.getColumns().addAll(originalMetaData.getColumns());
result.setUnique(originalMetaData.isUnique());
return Optional.of(result);
} | @Test
void assertReviseWhenActualDataNodeIsEmpty() {
shardingRule = createShardingRule();
ShardingTable shardingTable = mock(ShardingTable.class);
when(shardingTable.getActualDataNodes()).thenReturn(Collections.emptyList());
shardingIndexReviser = new ShardingIndexReviser(shardingTable);
IndexMetaData originalMetaData = new IndexMetaData("TEST_INDEX");
Optional<IndexMetaData> revisedMetaData = shardingIndexReviser.revise("TABLE_NAME_1", originalMetaData, shardingRule);
assertThat(revisedMetaData, is(Optional.empty()));
} |
public static OpenGaussErrorResponsePacket newInstance(final Exception cause) {
Optional<ServerErrorMessage> serverErrorMessage = findServerErrorMessage(cause);
return serverErrorMessage.map(OpenGaussErrorResponsePacket::new).orElseGet(() -> createErrorResponsePacket(SQLExceptionTransformEngine.toSQLException(cause, DATABASE_TYPE)));
} | @Test
void assertNewInstanceWithServerErrorMessage() {
String encodedMessage = "SFATAL\0C3D000\0Mdatabase \"test\" does not exist\0c-1\0Ddetail\0Hhint\0P1\0p2\0qinternal query\0Wwhere\0Ffile\0L3\0Rroutine\0a0.0.0.0:1";
PSQLException cause = new PSQLException(new ServerErrorMessage(encodedMessage));
OpenGaussErrorResponsePacket actual = OpenGaussErrorPacketFactory.newInstance(cause);
Map<Character, String> actualFields = getFieldsInPacket(actual);
assertThat(actualFields.size(), is(13));
assertThat(actualFields.get(OpenGaussErrorResponsePacket.FIELD_TYPE_SEVERITY), is("FATAL"));
assertThat(actualFields.get(OpenGaussErrorResponsePacket.FIELD_TYPE_CODE), is("3D000"));
assertThat(actualFields.get(OpenGaussErrorResponsePacket.FIELD_TYPE_MESSAGE), is("database \"test\" does not exist"));
assertThat(actualFields.get(OpenGaussErrorResponsePacket.FIELD_TYPE_ERROR_CODE), is("-1"));
assertThat(actualFields.get(OpenGaussErrorResponsePacket.FIELD_TYPE_DETAIL), is("detail"));
assertThat(actualFields.get(OpenGaussErrorResponsePacket.FIELD_TYPE_HINT), is("hint"));
assertThat(actualFields.get(OpenGaussErrorResponsePacket.FIELD_TYPE_POSITION), is("1"));
assertThat(actualFields.get(OpenGaussErrorResponsePacket.FIELD_TYPE_INTERNAL_POSITION), is("2"));
assertThat(actualFields.get(OpenGaussErrorResponsePacket.FIELD_TYPE_INTERNAL_QUERY), is("internal query"));
assertThat(actualFields.get(OpenGaussErrorResponsePacket.FIELD_TYPE_WHERE), is("where"));
assertThat(actualFields.get(OpenGaussErrorResponsePacket.FIELD_TYPE_FILE), is("file"));
assertThat(actualFields.get(OpenGaussErrorResponsePacket.FIELD_TYPE_LINE), is("3"));
assertThat(actualFields.get(OpenGaussErrorResponsePacket.FIELD_TYPE_ROUTINE), is("routine"));
} |
public BlobConfiguration getConfiguration() {
return configuration;
} | @Test
void testCreateEndpointWithChangeFeedConfig() {
context.getRegistry().bind("creds", storageSharedKeyCredential());
context.getRegistry().bind("metadata", Collections.emptyMap());
context.getRegistry().bind("starttime",
OffsetDateTime.of(LocalDate.of(2021, 8, 4), LocalTime.of(11, 5), ZoneOffset.ofHours(0)));
context.getRegistry().bind("endtime",
OffsetDateTime.of(LocalDate.of(2021, 12, 4), LocalTime.of(11, 5), ZoneOffset.ofHours(0)));
final String uri = "azure-storage-blob://camelazure"
+ "?credentials=#creds"
+ "&credentialType=SHARED_KEY_CREDENTIAL"
+ "&operation=getChangeFeed"
+ "&changeFeedStartTime=#starttime"
+ "&changeFeedEndTime=#endtime";
final BlobEndpoint endpoint = (BlobEndpoint) context.getEndpoint(uri);
assertEquals("camelazure", endpoint.getConfiguration().getAccountName());
assertNull(endpoint.getConfiguration().getServiceClient());
assertNotNull(endpoint.getConfiguration().getCredentials());
assertEquals(BlobOperationsDefinition.getChangeFeed, endpoint.getConfiguration().getOperation());
assertEquals(OffsetDateTime.parse("2021-08-04T11:05Z"), endpoint.getConfiguration().getChangeFeedStartTime());
assertEquals(OffsetDateTime.parse("2021-12-04T11:05Z"), endpoint.getConfiguration().getChangeFeedEndTime());
} |
static void checkValidTableId(String idToCheck) {
if (idToCheck.length() < MIN_TABLE_ID_LENGTH) {
throw new IllegalArgumentException("Table ID " + idToCheck + " cannot be empty.");
}
if (idToCheck.length() > MAX_TABLE_ID_LENGTH) {
throw new IllegalArgumentException(
"Table ID "
+ idToCheck
+ " cannot be longer than "
+ MAX_TABLE_ID_LENGTH
+ " characters.");
}
if (ILLEGAL_TABLE_CHARS.matcher(idToCheck).find()) {
throw new IllegalArgumentException(
"Table ID "
+ idToCheck
+ " is not a valid ID. Only letters, numbers, hyphens, underscores and exclamation points are allowed.");
}
} | @Test
public void testCheckValidTableIdWhenIdContainsIllegalCharacter() {
assertThrows(IllegalArgumentException.class, () -> checkValidTableId("table-id%"));
} |
@Override
void execute() {
Set<String> hdfsRoots = getObjectStore().listFSRoots();
if (hdfsRoots != null) {
System.out.println("Listing FS Roots..");
for (String s : hdfsRoots) {
System.out.println(s);
}
} else {
System.err.println("Encountered error during listFSRoot");
}
} | @Test
public void testListFSRoot() throws Exception {
String fsRoot1 = "hdfs://abc.de";
String fsRoot2 = "hdfs://fgh.ji";
ObjectStore mockObjectStore = Mockito.mock(ObjectStore.class);
when(mockObjectStore.listFSRoots()).thenReturn(Sets.newHashSet(fsRoot1, fsRoot2));
OutputStream os = new ByteArrayOutputStream();
System.setOut(new PrintStream(os));
MetaToolTaskListFSRoot t = new MetaToolTaskListFSRoot();
t.setCommandLine(new HiveMetaToolCommandLine(new String[] {"-listFSRoot"}));
t.setObjectStore(mockObjectStore);
t.execute();
assertTrue(os.toString() + " doesn't contain " + fsRoot1, os.toString().contains(fsRoot1));
assertTrue(os.toString() + " doesn't contain " + fsRoot2, os.toString().contains(fsRoot2));
} |
static public boolean areOnSameFileStore(File a, File b) throws RolloverFailure {
if (!a.exists()) {
throw new IllegalArgumentException("File [" + a + "] does not exist.");
}
if (!b.exists()) {
throw new IllegalArgumentException("File [" + b + "] does not exist.");
}
// Implements the following by reflection
// Path pathA = a.toPath();
// Path pathB = b.toPath();
//
// FileStore fileStoreA = Files.getFileStore(pathA);
// FileStore fileStoreB = Files.getFileStore(pathB);
//
// return fileStoreA.equals(fileStoreB);
try {
Class<?> pathClass = Class.forName(PATH_CLASS_STR);
Class<?> filesClass = Class.forName(FILES_CLASS_STR);
Method toPath = File.class.getMethod("toPath");
Method getFileStoreMethod = filesClass.getMethod("getFileStore", pathClass);
Object pathA = toPath.invoke(a);
Object pathB = toPath.invoke(b);
Object fileStoreA = getFileStoreMethod.invoke(null, pathA);
Object fileStoreB = getFileStoreMethod.invoke(null, pathB);
return fileStoreA.equals(fileStoreB);
} catch (Exception e) {
throw new RolloverFailure("Failed to check file store equality for [" + a + "] and [" + b + "]", e);
}
} | @Ignore
@Test
public void manual_filesOnDifferentVolumesShouldBeDetectedAsSuch() throws RolloverFailure {
if(!EnvUtil.isJDK7OrHigher())
return;
// author's computer has two volumes
File c = new File("c:/tmp/");
File d = new File("d:/");
assertFalse(FileStoreUtil.areOnSameFileStore(c, d));
} |
@VisibleForTesting
void submit(long requestId, DispatchableSubPlan dispatchableSubPlan, long timeoutMs, Map<String, String> queryOptions)
throws Exception {
Deadline deadline = Deadline.after(timeoutMs, TimeUnit.MILLISECONDS);
// Serialize the stage plans in parallel
List<DispatchablePlanFragment> stagePlans = dispatchableSubPlan.getQueryStageList();
Set<QueryServerInstance> serverInstances = new HashSet<>();
// Ignore the reduce stage (stage 0)
int numStages = stagePlans.size() - 1;
List<CompletableFuture<StageInfo>> stageInfoFutures = new ArrayList<>(numStages);
for (int i = 0; i < numStages; i++) {
DispatchablePlanFragment stagePlan = stagePlans.get(i + 1);
serverInstances.addAll(stagePlan.getServerInstanceToWorkerIdMap().keySet());
stageInfoFutures.add(CompletableFuture.supplyAsync(() -> {
ByteString rootNode = PlanNodeSerializer.process(stagePlan.getPlanFragment().getFragmentRoot()).toByteString();
ByteString customProperty = QueryPlanSerDeUtils.toProtoProperties(stagePlan.getCustomProperties());
return new StageInfo(rootNode, customProperty);
}, _executorService));
}
List<StageInfo> stageInfos = new ArrayList<>(numStages);
try {
for (CompletableFuture<StageInfo> future : stageInfoFutures) {
stageInfos.add(future.get(deadline.timeRemaining(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS));
}
} finally {
for (CompletableFuture<?> future : stageInfoFutures) {
if (!future.isDone()) {
future.cancel(true);
}
}
}
Map<String, String> requestMetadata = new HashMap<>();
requestMetadata.put(CommonConstants.Query.Request.MetadataKeys.REQUEST_ID, Long.toString(requestId));
requestMetadata.put(CommonConstants.Broker.Request.QueryOptionKey.TIMEOUT_MS,
Long.toString(deadline.timeRemaining(TimeUnit.MILLISECONDS)));
requestMetadata.putAll(queryOptions);
ByteString protoRequestMetadata = QueryPlanSerDeUtils.toProtoProperties(requestMetadata);
// Submit the query plan to all servers in parallel
int numServers = serverInstances.size();
BlockingQueue<AsyncQueryDispatchResponse> dispatchCallbacks = new ArrayBlockingQueue<>(numServers);
for (QueryServerInstance serverInstance : serverInstances) {
_executorService.submit(() -> {
try {
Worker.QueryRequest.Builder requestBuilder = Worker.QueryRequest.newBuilder();
requestBuilder.setVersion(CommonConstants.MultiStageQueryRunner.PlanVersions.V1);
for (int i = 0; i < numStages; i++) {
int stageId = i + 1;
DispatchablePlanFragment stagePlan = stagePlans.get(stageId);
List<Integer> workerIds = stagePlan.getServerInstanceToWorkerIdMap().get(serverInstance);
if (workerIds != null) {
List<WorkerMetadata> stageWorkerMetadataList = stagePlan.getWorkerMetadataList();
List<WorkerMetadata> workerMetadataList = new ArrayList<>(workerIds.size());
for (int workerId : workerIds) {
workerMetadataList.add(stageWorkerMetadataList.get(workerId));
}
List<Worker.WorkerMetadata> protoWorkerMetadataList =
QueryPlanSerDeUtils.toProtoWorkerMetadataList(workerMetadataList);
StageInfo stageInfo = stageInfos.get(i);
Worker.StageMetadata stageMetadata =
Worker.StageMetadata.newBuilder().setStageId(stageId).addAllWorkerMetadata(protoWorkerMetadataList)
.setCustomProperty(stageInfo._customProperty).build();
requestBuilder.addStagePlan(
Worker.StagePlan.newBuilder().setRootNode(stageInfo._rootNode).setStageMetadata(stageMetadata)
.build());
}
}
requestBuilder.setMetadata(protoRequestMetadata);
getOrCreateDispatchClient(serverInstance).submit(requestBuilder.build(), serverInstance, deadline,
dispatchCallbacks::offer);
} catch (Throwable t) {
LOGGER.warn("Caught exception while dispatching query: {} to server: {}", requestId, serverInstance, t);
dispatchCallbacks.offer(new AsyncQueryDispatchResponse(serverInstance, null, t));
}
});
}
int numSuccessCalls = 0;
// TODO: Cancel all dispatched requests if one of the dispatch errors out or deadline is breached.
while (!deadline.isExpired() && numSuccessCalls < numServers) {
AsyncQueryDispatchResponse resp =
dispatchCallbacks.poll(deadline.timeRemaining(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS);
if (resp != null) {
if (resp.getThrowable() != null) {
throw new RuntimeException(
String.format("Error dispatching query: %d to server: %s", requestId, resp.getServerInstance()),
resp.getThrowable());
} else {
Worker.QueryResponse response = resp.getQueryResponse();
assert response != null;
if (response.containsMetadata(CommonConstants.Query.Response.ServerResponseStatus.STATUS_ERROR)) {
throw new RuntimeException(
String.format("Unable to execute query plan for request: %d on server: %s, ERROR: %s", requestId,
resp.getServerInstance(),
response.getMetadataOrDefault(CommonConstants.Query.Response.ServerResponseStatus.STATUS_ERROR,
"null")));
}
numSuccessCalls++;
}
}
}
if (deadline.isExpired()) {
throw new TimeoutException("Timed out waiting for response of async query-dispatch");
}
} | @Test
public void testQueryDispatcherThrowsWhenQueryServerTimesOut() {
String sql = "SELECT * FROM a WHERE col1 = 'foo'";
QueryServer failingQueryServer = _queryServerMap.values().iterator().next();
CountDownLatch neverClosingLatch = new CountDownLatch(1);
Mockito.doAnswer(invocationOnMock -> {
neverClosingLatch.await();
StreamObserver<Worker.QueryResponse> observer = invocationOnMock.getArgument(1);
observer.onCompleted();
return null;
}).when(failingQueryServer).submit(Mockito.any(), Mockito.any());
DispatchableSubPlan dispatchableSubPlan = _queryEnvironment.planQuery(sql);
try {
_queryDispatcher.submit(REQUEST_ID_GEN.getAndIncrement(), dispatchableSubPlan, 200L, Collections.emptyMap());
Assert.fail("Method call above should have failed");
} catch (Exception e) {
String message = e.getMessage();
Assert.assertTrue(
message.contains("Timed out waiting for response") || message.contains("Error dispatching query"));
}
neverClosingLatch.countDown();
Mockito.reset(failingQueryServer);
} |
@Override
public <K, V> Map<K, V> toMap(DataTable dataTable, Type keyType, Type valueType) {
requireNonNull(dataTable, "dataTable may not be null");
requireNonNull(keyType, "keyType may not be null");
requireNonNull(valueType, "valueType may not be null");
if (dataTable.isEmpty()) {
return emptyMap();
}
DataTable keyColumn = dataTable.columns(0, 1);
DataTable valueColumns = dataTable.columns(1);
String firstHeaderCell = keyColumn.cell(0, 0);
boolean firstHeaderCellIsBlank = firstHeaderCell == null || firstHeaderCell.isEmpty();
List<K> keys = convertEntryKeys(keyType, keyColumn, valueType, firstHeaderCellIsBlank);
if (valueColumns.isEmpty()) {
return createMap(keyType, keys, valueType, nCopies(keys.size(), null));
}
boolean keysImplyTableRowTransformer = keys.size() == dataTable.height() - 1;
List<V> values = convertEntryValues(valueColumns, keyType, valueType, keysImplyTableRowTransformer);
if (keys.size() != values.size()) {
throw keyValueMismatchException(firstHeaderCellIsBlank, keys.size(), keyType, values.size(), valueType);
}
return createMap(keyType, keys, valueType, values);
} | @Test
void to_map_of_unknown_value_type__throws_exception() {
DataTable table = parse("",
" | Annie M. G. Schmidt | 1911-03-20 |",
" | Roald Dahl | 1916-09-13 |",
" | Astrid Lindgren | 1907-11-14 |");
CucumberDataTableException exception = assertThrows(
CucumberDataTableException.class,
() -> converter.toMap(table, String.class, Date.class));
assertThat(exception.getMessage(), is("" +
"Can't convert DataTable to Map<java.lang.String, java.util.Date>.\n" +
"Please review these problems:\n" +
"\n" +
" - There was no table entry transformer registered for java.util.Date.\n" +
" Please consider registering a table entry transformer.\n" +
"\n" +
" - There was no table cell transformer registered for java.util.Date.\n" +
" Please consider registering a table cell transformer.\n" +
"\n" +
" - There was no default table cell transformer registered to transform java.util.Date.\n" +
" Please consider registering a default table cell transformer.\n" +
"\n" +
"Note: Usually solving one is enough"));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.