focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public Iterable<K> loadAllKeys() {
// If loadAllKeys property is disabled, don't load anything
if (!genericMapStoreProperties.loadAllKeys) {
return Collections.emptyList();
}
awaitSuccessfulInit();
String sql = queries.loadAllKeys();
SqlResult keysResult = sqlService.execute(sql);
// The contract for loadAllKeys says that if iterator implements Closable
// then it will be closed when the iteration is over
return () -> new MappingClosingIterator<>(
keysResult.iterator(),
(SqlRow row) -> row.getObject(genericMapStoreProperties.idColumn),
keysResult::close
);
} | @Test
public void givenRowAndIdColumn_whenLoadAllKeys_thenReturnKeys() {
ObjectSpec spec = objectProvider.createObject(mapName, true);
objectProvider.insertItems(spec, 1);
Properties properties = new Properties();
properties.setProperty(DATA_CONNECTION_REF_PROPERTY, TEST_DATABASE_REF);
properties.setProperty(ID_COLUMN_PROPERTY, "person-id");
mapLoader = createMapLoader(properties, hz);
List<Integer> ids = newArrayList(mapLoader.loadAllKeys());
assertThat(ids).contains(0);
} |
public static CharSequence escapeCsv(CharSequence value) {
return escapeCsv(value, false);
} | @Test
public void escapeCsvWithSingleComma() {
CharSequence value = ",";
CharSequence expected = "\",\"";
escapeCsv(value, expected);
} |
@Override
public Boolean authenticate(final Host bookmark, final LoginCallback callback, final CancelCallback cancel)
throws BackgroundException {
final Credentials credentials = bookmark.getCredentials();
if(StringUtils.isBlank(credentials.getPassword())) {
final Credentials input = callback.prompt(bookmark, credentials.getUsername(),
String.format("%s %s", LocaleFactory.localizedString("Login", "Login"), bookmark.getHostname()),
MessageFormat.format(LocaleFactory.localizedString(
"Login {0} with username and password", "Credentials"), BookmarkNameProvider.toString(bookmark)),
// Change of username or service not allowed
new LoginOptions(bookmark.getProtocol()).user(false));
if(input.isPublicKeyAuthentication()) {
credentials.setIdentity(input.getIdentity());
return new SFTPPublicKeyAuthentication(client).authenticate(bookmark, callback, cancel);
}
credentials.setSaved(input.isSaved());
credentials.setPassword(input.getPassword());
}
return this.authenticate(bookmark, credentials, callback, cancel);
} | @Test(expected = LoginFailureException.class)
public void testAuthenticateFailure() throws Exception {
// Reconnect
session.disconnect();
session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback());
session.getHost().getCredentials().setPassword("p");
assertFalse(new SFTPPasswordAuthentication(session.getClient()).authenticate(session.getHost(), new DisabledLoginCallback(), new DisabledCancelCallback()));
} |
@Override
public List<String> assignSegment(String segmentName, Map<String, Map<String, String>> currentAssignment,
InstancePartitions instancePartitions, InstancePartitionsType instancePartitionsType) {
validateSegmentAssignmentStrategy(instancePartitions);
return SegmentAssignmentUtils.assignSegmentWithoutReplicaGroup(currentAssignment, instancePartitions, _replication);
} | @Test
public void testTableBalanced() {
Map<String, Map<String, String>> currentAssignment = new TreeMap<>();
for (String segmentName : SEGMENTS) {
List<String> instancesAssigned =
_segmentAssignment.assignSegment(segmentName, currentAssignment, _instancePartitionsMap);
currentAssignment
.put(segmentName, SegmentAssignmentUtils.getInstanceStateMap(instancesAssigned, SegmentStateModel.ONLINE));
}
// There should be 100 segments assigned
assertEquals(currentAssignment.size(), NUM_SEGMENTS);
// Each segment should have 3 replicas
for (Map<String, String> instanceStateMap : currentAssignment.values()) {
assertEquals(instanceStateMap.size(), NUM_REPLICAS);
}
// Each instance should have 30 segments assigned
int[] numSegmentsAssignedPerInstance =
SegmentAssignmentUtils.getNumSegmentsAssignedPerInstance(currentAssignment, INSTANCES);
int[] expectedNumSegmentsAssignedPerInstance = new int[NUM_INSTANCES];
int numSegmentsPerInstance = NUM_SEGMENTS * NUM_REPLICAS / NUM_INSTANCES;
Arrays.fill(expectedNumSegmentsAssignedPerInstance, numSegmentsPerInstance);
assertEquals(numSegmentsAssignedPerInstance, expectedNumSegmentsAssignedPerInstance);
// Current assignment should already be balanced
assertEquals(
_segmentAssignment.rebalanceTable(currentAssignment, _instancePartitionsMap, null, null, new RebalanceConfig()),
currentAssignment);
} |
public static KafkaPool fromCrd(
Reconciliation reconciliation,
Kafka kafka,
KafkaNodePool pool,
NodeIdAssignment idAssignment,
Storage oldStorage,
OwnerReference ownerReference,
SharedEnvironmentProvider sharedEnvironmentProvider
) {
ModelUtils.validateComputeResources(pool.getSpec().getResources(), "KafkaNodePool.spec.resources");
StorageUtils.validatePersistentStorage(pool.getSpec().getStorage(), "KafkaNodePool.spec.storage");
KafkaPool result = new KafkaPool(reconciliation, kafka, pool, componentName(kafka, pool), ownerReference, idAssignment, sharedEnvironmentProvider);
result.gcLoggingEnabled = isGcLoggingEnabled(kafka, pool);
result.jvmOptions = pool.getSpec().getJvmOptions() != null ? pool.getSpec().getJvmOptions() : kafka.getSpec().getKafka().getJvmOptions();
result.resources = pool.getSpec().getResources() != null ? pool.getSpec().getResources() : kafka.getSpec().getKafka().getResources();
result.processRoles = new HashSet<>(pool.getSpec().getRoles());
if (oldStorage != null) {
Storage newStorage = pool.getSpec().getStorage();
StorageDiff diff = new StorageDiff(reconciliation, oldStorage, newStorage, idAssignment.current(), idAssignment.desired());
if (diff.issuesDetected()) {
LOGGER.warnCr(reconciliation, "Only the following changes to Kafka storage are allowed: " +
"changing the deleteClaim flag, " +
"changing the kraftMetadata flag (but only one one volume can be marked to store the KRaft metadata log at a time), " +
"adding volumes to Jbod storage or removing volumes from Jbod storage, " +
"each volume in Jbod storage should have an unique ID, " +
"changing overrides to nodes which do not exist yet, " +
"and increasing size of persistent claim volumes (depending on the volume type and used storage class).");
LOGGER.warnCr(reconciliation, "The desired Kafka storage configuration in the KafkaNodePool resource {}/{} contains changes which are not allowed. As a " +
"result, all storage changes will be ignored. Use DEBUG level logging for more information " +
"about the detected changes.", pool.getMetadata().getNamespace(), pool.getMetadata().getName());
Condition warning = StatusUtils.buildWarningCondition("KafkaStorage",
"The desired Kafka storage configuration in the KafkaNodePool resource " + pool.getMetadata().getNamespace() + "/" + pool.getMetadata().getName() + " contains changes which are not allowed. As a " +
"result, all storage changes will be ignored. Use DEBUG level logging for more information " +
"about the detected changes.");
result.warningConditions.add(warning);
result.setStorage(oldStorage);
} else {
if (!VolumeUtils.kraftMetadataPath(oldStorage).equals(VolumeUtils.kraftMetadataPath(newStorage))) {
// The volume for the KRaft metadata log is changing. We should log it.
LOGGER.warnCr(reconciliation, "The KRaft metadata log for KafkaNodePool {}/{} will be moved from volume {} to volume {}.", pool.getMetadata().getNamespace(), pool.getMetadata().getName(), VolumeUtils.kraftMetadataPath(oldStorage), VolumeUtils.kraftMetadataPath(newStorage));
}
result.setStorage(newStorage);
}
} else {
result.setStorage(pool.getSpec().getStorage());
}
// Adds the warnings about unknown or deprecated fields
result.warningConditions.addAll(StatusUtils.validate(reconciliation, pool));
if (pool.getSpec().getTemplate() != null) {
KafkaNodePoolTemplate template = pool.getSpec().getTemplate();
result.templatePersistentVolumeClaims = template.getPersistentVolumeClaim();
result.templatePodSet = template.getPodSet();
result.templatePod = template.getPod();
result.templatePerBrokerService = template.getPerPodService();
result.templatePerBrokerRoute = template.getPerPodRoute();
result.templatePerBrokerIngress = template.getPerPodIngress();
result.templateContainer = template.getKafkaContainer();
result.templateInitContainer = template.getInitContainer();
} else if (kafka.getSpec().getKafka().getTemplate() != null) {
KafkaClusterTemplate template = kafka.getSpec().getKafka().getTemplate();
result.templatePersistentVolumeClaims = template.getPersistentVolumeClaim();
result.templatePodSet = template.getPodSet();
result.templatePod = template.getPod();
result.templatePerBrokerService = template.getPerPodService();
result.templatePerBrokerRoute = template.getPerPodRoute();
result.templatePerBrokerIngress = template.getPerPodIngress();
result.templateContainer = template.getKafkaContainer();
result.templateInitContainer = template.getInitContainer();
}
return result;
} | @Test
public void testKafkaPoolConfigureOptionsThroughPoolSpec() {
KafkaNodePool pool = new KafkaNodePoolBuilder(POOL)
.editSpec()
.withResources(new ResourceRequirementsBuilder().withRequests(Map.of("cpu", new Quantity("4"), "memory", new Quantity("16Gi"))).build())
.withNewJvmOptions()
.withGcLoggingEnabled()
.withXmx("4096m")
.endJvmOptions()
.withNewTemplate()
.withNewKafkaContainer()
.addToEnv(new ContainerEnvVarBuilder().withName("MY_ENV_VAR").withValue("my-env-var-value").build())
.endKafkaContainer()
.endTemplate()
.endSpec()
.build();
KafkaPool kp = KafkaPool.fromCrd(
Reconciliation.DUMMY_RECONCILIATION,
KAFKA,
pool,
new NodeIdAssignment(Set.of(10, 11, 13), Set.of(10, 11, 13), Set.of(), Set.of(), Set.of()),
new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()).build(),
OWNER_REFERENCE,
SHARED_ENV_PROVIDER
);
assertThat(kp, is(notNullValue()));
assertThat(kp.componentName, is(CLUSTER_NAME + "-pool"));
assertThat(kp.storage, is(new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()).build()));
assertThat(kp.resources.getRequests(), is(Map.of("cpu", new Quantity("4"), "memory", new Quantity("16Gi"))));
assertThat(kp.gcLoggingEnabled, is(true));
assertThat(kp.jvmOptions.getXmx(), is("4096m"));
assertThat(kp.templateContainer.getEnv(), is(List.of(new ContainerEnvVarBuilder().withName("MY_ENV_VAR").withValue("my-env-var-value").build())));
assertThat(kp.templateInitContainer, is(nullValue()));
assertThat(kp.templatePod, is(nullValue()));
assertThat(kp.templatePerBrokerIngress, is(nullValue()));
assertThat(kp.templatePodSet, is(nullValue()));
assertThat(kp.templatePerBrokerRoute, is(nullValue()));
assertThat(kp.templatePerBrokerService, is(nullValue()));
assertThat(kp.templatePersistentVolumeClaims, is(nullValue()));
} |
PartitionRegistration getPartition(Uuid topicId, int partitionId) {
TopicControlInfo topic = topics.get(topicId);
if (topic == null) {
return null;
}
return topic.parts.get(partitionId);
} | @Test
public void testEligibleLeaderReplicas_CleanElection() {
ReplicationControlTestContext ctx = new ReplicationControlTestContext.Builder()
.setIsElrEnabled(true)
.build();
ReplicationControlManager replicationControl = ctx.replicationControl;
ctx.registerBrokers(0, 1, 2, 3);
ctx.unfenceBrokers(0, 1, 2, 3);
CreatableTopicResult createTopicResult = ctx.createTestTopic("foo",
new int[][] {new int[] {0, 1, 2, 3}});
TopicIdPartition topicIdPartition = new TopicIdPartition(createTopicResult.topicId(), 0);
assertEquals(OptionalInt.of(0), ctx.currentLeader(topicIdPartition));
ctx.alterTopicConfig("foo", TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3");
ctx.fenceBrokers(Utils.mkSet(1, 2, 3));
PartitionRegistration partition = replicationControl.getPartition(topicIdPartition.topicId(), topicIdPartition.partitionId());
assertArrayEquals(new int[]{2, 3}, partition.elr, partition.toString());
assertArrayEquals(new int[]{}, partition.lastKnownElr, partition.toString());
ctx.unfenceBrokers(2);
ctx.fenceBrokers(Utils.mkSet(0, 1));
partition = replicationControl.getPartition(topicIdPartition.topicId(), topicIdPartition.partitionId());
assertArrayEquals(new int[]{0, 3}, partition.elr, partition.toString());
assertArrayEquals(new int[]{2}, partition.isr, partition.toString());
assertEquals(2, partition.leader, partition.toString());
assertArrayEquals(new int[]{}, partition.lastKnownElr, partition.toString());
} |
@Nonnull
@Override
public Sketch<IntegerSummary> getResult() {
return unionAll();
} | @Test
public void testAccumulatorWithSingleSketch() {
IntegerSketch input = new IntegerSketch(_lgK, IntegerSummary.Mode.Sum);
IntStream.range(0, 1000).forEach(i -> input.update(i, 1));
CompactSketch<IntegerSummary> sketch = input.compact();
TupleIntSketchAccumulator accumulator = new TupleIntSketchAccumulator(_setOps, _nominalEntries, 2);
accumulator.apply(sketch);
Assert.assertFalse(accumulator.isEmpty());
Assert.assertEquals(accumulator.getResult().getEstimate(), sketch.getEstimate());
} |
@Config("functions")
@ConfigDescription("A comma-separated list of ignored functions")
public IgnoredFunctionsMismatchResolverConfig setFunctions(String functions)
{
if (functions != null) {
this.functions = Splitter.on(",").trimResults().splitToList(functions).stream()
.map(catalog -> catalog.toLowerCase(ENGLISH))
.collect(toImmutableSet());
}
return this;
} | @Test
public void testDefault()
{
assertRecordedDefaults(recordDefaults(IgnoredFunctionsMismatchResolverConfig.class)
.setFunctions(null));
} |
public static Optional<KiePMMLModel> getFromCommonDataAndTransformationDictionaryAndModelWithSourcesCompiled(final CompilationDTO compilationDTO) {
logger.trace("getFromCommonDataAndTransformationDictionaryAndModelWithSourcesCompiled {}", compilationDTO);
final Function<ModelImplementationProvider<Model, KiePMMLModel>, KiePMMLModel> modelFunction =
implementation -> implementation.getKiePMMLModelWithSourcesCompiled(compilationDTO);
return getFromCommonDataAndTransformationDictionaryAndModelWithSourcesCommon(compilationDTO.getFields(),
compilationDTO.getModel(),
modelFunction);
} | @Test
void getFromCommonDataAndTransformationDictionaryAndModelWithSourcesCompiledWithProvider() throws Exception {
pmml = getPMMLWithMiningRandomTestModel();
MiningModel parentModel = (MiningModel) pmml.getModels().get(0);
Model model = parentModel.getSegmentation().getSegments().get(0).getModel();
final CommonCompilationDTO compilationDTO =
CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME,
pmml,
model,
new PMMLCompilationContextMock(), "fileName");
final Optional<KiePMMLModel> retrieved =
getFromCommonDataAndTransformationDictionaryAndModelWithSourcesCompiled(compilationDTO);
assertThat(retrieved).isNotNull().isPresent();
} |
public static Object extractValue(Object object, String attributeName, boolean failOnMissingAttribute) throws Exception {
return createGetter(object, attributeName, failOnMissingAttribute).getValue(object);
} | @Test
public void extractValue_whenIntermediateFieldIsInterfaceAndDoesNotContainField_thenThrowIllegalArgumentException()
throws Exception {
OuterObject object = new OuterObject();
try {
ReflectionHelper.extractValue(object, "emptyInterface.doesNotExist", true);
fail("Non-existing field has been ignored");
} catch (QueryException e) {
// createGetter() method is catching everything throwable and wraps it in QueryException
// I don't think it's the right thing to do, but I don't want to change this behaviour.
// Hence, I have to use try/catch in this test instead of just declaring
// IllegalArgumentException as expected exception.
assertEquals(IllegalArgumentException.class, e.getCause().getClass());
}
} |
public static String stripSpaces(String str) {
StringBuilder ret = new StringBuilder();
boolean inQuotes = false;
boolean inSpaceSequence = false;
for (char c : str.toCharArray()) {
if (Character.isWhitespace(c)) {
if (inQuotes) {
ret.append(c);
continue;
}
if (!inSpaceSequence) {
// start of space sequence
inSpaceSequence = true;
ret.append(" ");
}
} else {
if (inSpaceSequence) {
inSpaceSequence = false;
}
if (c == '\"') {
inQuotes = !inQuotes;
}
ret.append(c);
}
}
return ret.toString();
} | @Test
public void testStripSpaces() {
assertEquals("a b", ConfigUtils.stripSpaces("a b"));
assertEquals("\"a b\"", ConfigUtils.stripSpaces("\"a b\""));
assertEquals("a b \"a b\"", ConfigUtils.stripSpaces("a b \"a b\""));
assertEquals("a b", ConfigUtils.stripSpaces("a b"));
} |
@Override
public Set<RuleDescriptionSectionDto> generateSections(RulesDefinition.Rule rule) {
return getDescriptionInHtml(rule)
.map(this::generateSections)
.orElse(emptySet());
} | @Test
public void parse_moved_noncompliant_code() {
when(rule.htmlDescription()).thenReturn(DESCRIPTION + RECOMMENTEDCODINGPRACTICE + NONCOMPLIANTCODE + SEE);
Set<RuleDescriptionSectionDto> results = generator.generateSections(rule);
Map<String, String> sectionKeyToContent = results.stream().collect(toMap(RuleDescriptionSectionDto::getKey, RuleDescriptionSectionDto::getContent));
assertThat(sectionKeyToContent).hasSize(4)
.containsEntry(DEFAULT_SECTION_KEY, rule.htmlDescription())
.containsEntry(ROOT_CAUSE_SECTION_KEY, DESCRIPTION)
.containsEntry(ASSESS_THE_PROBLEM_SECTION_KEY, NONCOMPLIANTCODE)
.containsEntry(HOW_TO_FIX_SECTION_KEY, RECOMMENTEDCODINGPRACTICE + SEE);
} |
@Override
public void moveTo(long position) throws IllegalArgumentException {
if (position < 0 || length() < position) {
throw new IllegalArgumentException("Position out of the bounds of the file!");
}
fp = position;
} | @Test
public void available() throws IOException {
int amount = 12;
ss.moveTo(text.length - amount);
assertEquals(amount, ss.availableExact());
} |
@CanIgnoreReturnValue
public Replacements add(Replacement replacement) {
return add(replacement, CoalescePolicy.REJECT);
} | @Test
public void zeroLengthRangeOverlaps() {
Replacements replacements = new Replacements();
replacements.add(Replacement.create(1, 1, "Something"));
Replacement around = Replacement.create(0, 2, "Around");
assertThrows(IllegalArgumentException.class, () -> replacements.add(around));
} |
@Override
public AppToken createAppToken(long appId, String privateKey) {
Algorithm algorithm = readApplicationPrivateKey(appId, privateKey);
LocalDateTime now = LocalDateTime.now(clock);
// Expiration period is configurable and could be greater if needed.
// See https://developer.github.com/apps/building-github-apps/authenticating-with-github-apps/#authenticating-as-a-github-app
LocalDateTime expiresAt = now.plus(AppToken.EXPIRATION_PERIOD_IN_MINUTES, ChronoUnit.MINUTES);
ZoneOffset offset = clock.getZone().getRules().getOffset(now);
Date nowDate = Date.from(now.toInstant(offset));
Date expiresAtDate = Date.from(expiresAt.toInstant(offset));
JWTCreator.Builder builder = JWT.create()
.withIssuer(String.valueOf(appId))
.withIssuedAt(nowDate)
.withExpiresAt(expiresAtDate);
return new AppToken(builder.sign(algorithm));
} | @Test
public void getApplicationJWTToken_throws_ISE_if_conf_is_not_complete() {
GithubAppConfiguration githubAppConfiguration = createAppConfiguration(false);
assertThatThrownBy(() -> underTest.createAppToken(githubAppConfiguration.getId(), githubAppConfiguration.getPrivateKey()))
.isInstanceOf(IllegalStateException.class);
} |
public ChannelFuture handshake(Channel channel, FullHttpRequest req) {
return handshake(channel, req, null, channel.newPromise());
} | @Test
public void testHandshakeForHttpRequestWithoutAggregator() {
EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder(), new HttpResponseEncoder());
WebSocketServerHandshaker serverHandshaker = newHandshaker("ws://example.com/chat",
"chat", WebSocketDecoderConfig.DEFAULT);
HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/chat");
request.headers()
.set(HttpHeaderNames.HOST, "example.com")
.set(HttpHeaderNames.ORIGIN, "example.com")
.set(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET)
.set(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE)
.set(HttpHeaderNames.SEC_WEBSOCKET_KEY, "dGhlIHNhbXBsZSBub25jZQ==")
.set(HttpHeaderNames.SEC_WEBSOCKET_ORIGIN, "http://example.com")
.set(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, "chat, superchat")
.set(HttpHeaderNames.SEC_WEBSOCKET_KEY1, "4 @1 46546xW%0l 1 5")
.set(HttpHeaderNames.SEC_WEBSOCKET_KEY2, "12998 5 Y3 1 .P00")
.set(HttpHeaderNames.WEBSOCKET_PROTOCOL, "chat, superchat")
.set(HttpHeaderNames.SEC_WEBSOCKET_VERSION, webSocketVersion().toAsciiString());
ChannelFuture future = serverHandshaker.handshake(channel, request);
assertFalse(future.isDone());
assertNotNull(channel.pipeline().get("handshaker"));
if (webSocketVersion() != WebSocketVersion.V00) {
assertNull(channel.pipeline().get("httpAggregator"));
channel.writeInbound(LastHttpContent.EMPTY_LAST_CONTENT);
} else {
assertNotNull(channel.pipeline().get("httpAggregator"));
channel.writeInbound(new DefaultLastHttpContent(
Unpooled.copiedBuffer("^n:ds[4U", CharsetUtil.US_ASCII)));
}
assertTrue(future.isDone());
assertNull(channel.pipeline().get("handshaker"));
ByteBuf byteBuf = channel.readOutbound();
assertFalse(channel.finish());
channel = new EmbeddedChannel(new HttpResponseDecoder());
assertTrue(channel.writeInbound(byteBuf));
HttpResponse response = channel.readInbound();
assertEquals(SWITCHING_PROTOCOLS, response.status());
assertTrue(response.headers().containsValue(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET, true));
LastHttpContent lastHttpContent = channel.readInbound();
if (webSocketVersion() != WebSocketVersion.V00) {
assertEquals(LastHttpContent.EMPTY_LAST_CONTENT, lastHttpContent);
} else {
assertEquals("8jKS'y:G*Co,Wxa-", lastHttpContent.content().toString(CharsetUtil.US_ASCII));
assertTrue(lastHttpContent.release());
}
assertFalse(channel.finish());
} |
static public int convert(ILoggingEvent event) {
Level level = event.getLevel();
switch (level.levelInt) {
case Level.ERROR_INT:
return SyslogConstants.ERROR_SEVERITY;
case Level.WARN_INT:
return SyslogConstants.WARNING_SEVERITY;
case Level.INFO_INT:
return SyslogConstants.INFO_SEVERITY;
case Level.DEBUG_INT:
case Level.TRACE_INT:
return SyslogConstants.DEBUG_SEVERITY;
default:
throw new IllegalArgumentException("Level " + level
+ " is not a valid level for a printing method");
}
} | @Test
public void smoke() {
assertEquals(SyslogConstants.DEBUG_SEVERITY, LevelToSyslogSeverity
.convert(createEventOfLevel(Level.TRACE)));
assertEquals(SyslogConstants.DEBUG_SEVERITY, LevelToSyslogSeverity
.convert(createEventOfLevel(Level.DEBUG)));
assertEquals(SyslogConstants.INFO_SEVERITY, LevelToSyslogSeverity
.convert(createEventOfLevel(Level.INFO)));
assertEquals(SyslogConstants.WARNING_SEVERITY, LevelToSyslogSeverity
.convert(createEventOfLevel(Level.WARN)));
assertEquals(SyslogConstants.ERROR_SEVERITY, LevelToSyslogSeverity
.convert(createEventOfLevel(Level.ERROR)));
} |
@Override
public HttpServletRequest readRequest(AwsProxyRequest request, SecurityContext securityContext, Context lambdaContext, ContainerConfig config)
throws InvalidRequestEventException {
// Expect the HTTP method and context to be populated. If they are not, we are handling an
// unsupported event type.
if (request.getHttpMethod() == null || request.getHttpMethod().equals("") || request.getRequestContext() == null) {
throw new InvalidRequestEventException(INVALID_REQUEST_ERROR);
}
request.setPath(stripBasePath(request.getPath(), config));
if (request.getMultiValueHeaders() != null && request.getMultiValueHeaders().getFirst(HttpHeaders.CONTENT_TYPE) != null) {
String contentType = request.getMultiValueHeaders().getFirst(HttpHeaders.CONTENT_TYPE);
// put single as we always expect to have one and only one content type in a request.
request.getMultiValueHeaders().putSingle(HttpHeaders.CONTENT_TYPE, getContentTypeWithCharset(contentType, config));
}
AwsProxyHttpServletRequest servletRequest = new AwsProxyHttpServletRequest(request, lambdaContext, securityContext, config);
servletRequest.setServletContext(servletContext);
servletRequest.setAttribute(API_GATEWAY_CONTEXT_PROPERTY, request.getRequestContext());
servletRequest.setAttribute(API_GATEWAY_STAGE_VARS_PROPERTY, request.getStageVariables());
servletRequest.setAttribute(API_GATEWAY_EVENT_PROPERTY, request);
servletRequest.setAttribute(ALB_CONTEXT_PROPERTY, request.getRequestContext().getElb());
servletRequest.setAttribute(LAMBDA_CONTEXT_PROPERTY, lambdaContext);
servletRequest.setAttribute(JAX_SECURITY_CONTEXT_PROPERTY, securityContext);
return servletRequest;
} | @Test
void readRequest_invalidEventEmptyMethod_expectException() {
try {
AwsProxyRequest req = new AwsProxyRequestBuilder("/path", null).build();
reader.readRequest(req, null, null, ContainerConfig.defaultConfig());
fail("Expected InvalidRequestEventException");
} catch (InvalidRequestEventException e) {
assertEquals(AwsProxyHttpServletRequestReader.INVALID_REQUEST_ERROR, e.getMessage());
}
} |
public void register() {
if (StringUtils.isEmpty(RegisterContext.INSTANCE.getClientInfo().getServiceId())) {
LOGGER.warning("No service to register for nacos client...");
return;
}
String serviceId = RegisterContext.INSTANCE.getClientInfo().getServiceId();
String group = nacosRegisterConfig.getGroup();
instance = nacosServiceManager.buildNacosInstanceFromRegistration();
try {
NamingService namingService = nacosServiceManager.getNamingService();
namingService.registerInstance(serviceId, group, instance);
LOGGER.log(Level.INFO, String.format(Locale.ENGLISH, "registry success, group={%s},serviceId={%s},"
+ "instanceIp={%s},instancePort={%s} register finished", group, serviceId, instance.getIp(),
instance.getPort()));
} catch (NacosException e) {
LOGGER.log(Level.SEVERE, String.format(Locale.ENGLISH, "failed when registry service,serviceId={%s}",
serviceId), e);
}
} | @Test
public void testRegister() throws NacosException {
mockNamingService();
nacosClient.register();
Assert.assertNotNull(ReflectUtils.getFieldValue(nacosClient, "instance"));
} |
public FEELFnResult<List<Object>> invoke(@ParameterName("list") Object[] lists) {
if ( lists == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "lists", "cannot be null"));
}
final Set<Object> resultSet = new LinkedHashSet<>();
for ( final Object list : lists ) {
if ( list instanceof Collection ) {
resultSet.addAll((Collection) list);
} else {
resultSet.add(list);
}
}
// spec requires us to return a new list
return FEELFnResult.ofResult( new ArrayList<>(resultSet) );
} | @Test
void invokeEmptyArray() {
FunctionTestUtil.assertResultList(unionFunction.invoke(new Object[]{}), Collections.emptyList());
} |
public static boolean validateCSConfiguration(
final Configuration oldConfParam, final Configuration newConf,
final RMContext rmContext) throws IOException {
// ensure that the oldConf is deep copied
Configuration oldConf = new Configuration(oldConfParam);
QueueMetrics.setConfigurationValidation(oldConf, true);
QueueMetrics.setConfigurationValidation(newConf, true);
CapacityScheduler liveScheduler = (CapacityScheduler) rmContext.getScheduler();
CapacityScheduler newCs = new CapacityScheduler();
try {
//TODO: extract all the validation steps and replace reinitialize with
//the specific validation steps
newCs.setConf(oldConf);
newCs.setRMContext(rmContext);
newCs.init(oldConf);
newCs.addNodes(liveScheduler.getAllNodes());
newCs.reinitialize(newConf, rmContext, true);
return true;
} finally {
newCs.stop();
}
} | @Test
public void testValidateCSConfigInvalidCapacity() {
Configuration oldConfig = CapacitySchedulerConfigGeneratorForTest
.createBasicCSConfiguration();
Configuration newConfig = new Configuration(oldConfig);
newConfig
.set("yarn.scheduler.capacity.root.test1.capacity", "500");
RMContext rmContext = prepareRMContext();
try {
CapacitySchedulerConfigValidator
.validateCSConfiguration(oldConfig, newConfig, rmContext);
fail("Invalid capacity");
} catch (IOException e) {
Assert.assertTrue(e.getCause().getMessage()
.startsWith("Illegal capacity"));
}
} |
@Override
@CacheEvict(value = RedisKeyConstants.MAIL_ACCOUNT, key = "#id")
public void deleteMailAccount(Long id) {
// 校验是否存在账号
validateMailAccountExists(id);
// 校验是否存在关联模版
if (mailTemplateService.getMailTemplateCountByAccountId(id) > 0) {
throw exception(MAIL_ACCOUNT_RELATE_TEMPLATE_EXISTS);
}
// 删除
mailAccountMapper.deleteById(id);
} | @Test
public void testDeleteMailAccount_success() {
// mock 数据
MailAccountDO dbMailAccount = randomPojo(MailAccountDO.class);
mailAccountMapper.insert(dbMailAccount);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbMailAccount.getId();
// mock 方法(无关联模版)
when(mailTemplateService.getMailTemplateCountByAccountId(eq(id))).thenReturn(0L);
// 调用
mailAccountService.deleteMailAccount(id);
// 校验数据不存在了
assertNull(mailAccountMapper.selectById(id));
} |
@Override
public double score(int[] truth, int[] prediction) {
return of(truth, prediction, strategy);
} | @Test
public void testMacro() {
System.out.println("Macro-Precision");
int[] truth = {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5
};
int[] prediction = {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 2, 3, 2, 2, 2, 3, 1, 3, 3, 3, 4, 5, 4, 4, 4, 4, 1, 5, 5
};
Precision instance = new Precision(Averaging.Macro);
double expResult = 0.8727;
double result = instance.score(truth, prediction);
assertEquals(expResult, result, 1E-4);
} |
public static KTableHolder<GenericKey> build(
final KGroupedStreamHolder groupedStream,
final StreamAggregate aggregate,
final RuntimeBuildContext buildContext,
final MaterializedFactory materializedFactory) {
return build(
groupedStream,
aggregate,
buildContext,
materializedFactory,
new AggregateParamsFactory()
);
} | @Test
public void shouldBuildSchemaCorrectlyForWindowedAggregate() {
// Given:
givenHoppingWindowedAggregate();
// When:
final KTableHolder<?> result = windowedAggregate.build(planBuilder, planInfo);
// Then:
assertThat(result.getSchema(), is(OUTPUT_SCHEMA));
} |
String getUrl() {
return "http://" + this.httpServer.getInetAddress().getHostAddress() + ":" + this.httpServer.getLocalPort();
} | @Test
public void action_is_matched_on_URL_with_parameters() throws IOException {
Response response = call(underTest.getUrl() + "/pompom?toto=2");
assertIsPomPomResponse(response);
} |
static int calcTimeoutMsRemainingAsInt(long now, long deadlineMs) {
long deltaMs = deadlineMs - now;
if (deltaMs > Integer.MAX_VALUE)
deltaMs = Integer.MAX_VALUE;
else if (deltaMs < Integer.MIN_VALUE)
deltaMs = Integer.MIN_VALUE;
return (int) deltaMs;
} | @Test
public void testCalcTimeoutMsRemainingAsInt() {
assertEquals(0, KafkaAdminClient.calcTimeoutMsRemainingAsInt(1000, 1000));
assertEquals(100, KafkaAdminClient.calcTimeoutMsRemainingAsInt(1000, 1100));
assertEquals(Integer.MAX_VALUE, KafkaAdminClient.calcTimeoutMsRemainingAsInt(0, Long.MAX_VALUE));
assertEquals(Integer.MIN_VALUE, KafkaAdminClient.calcTimeoutMsRemainingAsInt(Long.MAX_VALUE, 0));
} |
@Override
public Range<T> firstRange() {
return rangeSet.firstRange();
} | @Test
public void testFirstRange() {
set = new RangeSetWrapper<>(consumer, reverseConvert, managedCursor);
assertNull(set.firstRange());
set.addOpenClosed(0, 97, 0, 99);
assertEquals(set.firstRange(), Range.openClosed(new LongPair(0, 97), new LongPair(0, 99)));
assertEquals(set.size(), 1);
set.addOpenClosed(0, 98, 0, 105);
assertEquals(set.firstRange(), Range.openClosed(new LongPair(0, 97), new LongPair(0, 105)));
assertEquals(set.size(), 1);
set.addOpenClosed(0, 5, 0, 75);
assertEquals(set.firstRange(), Range.openClosed(new LongPair(0, 5), new LongPair(0, 75)));
assertEquals(set.size(), 2);
} |
@Override
public int deleteUndoLogByLogCreated(Date logCreated, int limitRows, Connection conn) throws SQLException {
return super.deleteUndoLogByLogCreated(logCreated, limitRows, conn);
} | @Test
public void testDeleteUndoLogByLogCreated() throws SQLException {
Assertions.assertEquals(0, undoLogManager.deleteUndoLogByLogCreated(new Date(), 3000, dataSource.getConnection()));
Assertions.assertDoesNotThrow(() -> undoLogManager.deleteUndoLogByLogCreated(new Date(), 3000, connectionProxy));
} |
public double[][] test(DataFrame data) {
DataFrame x = formula.x(data);
int n = x.nrow();
int ntrees = trees.length;
double[][] prediction = new double[ntrees][n];
for (int j = 0; j < n; j++) {
Tuple xj = x.get(j);
double base = b;
for (int i = 0; i < ntrees; i++) {
base += shrinkage * trees[i].predict(xj);
prediction[i][j] = base;
}
}
return prediction;
} | @Test
public void testBank32nhLS() {
test(Loss.ls(), "bank32nh", Bank32nh.formula, Bank32nh.data, 0.0845);
} |
public double getLatitudeFromY01(final double pY01, boolean wrapEnabled) {
final double latitude = getLatitudeFromY01(wrapEnabled ? Clip(pY01, 0, 1) : pY01);
return wrapEnabled ? Clip(latitude, getMinLatitude(), getMaxLatitude()) : latitude;
} | @Test
public void testGetLatitudeFromY01() {
checkLatitude(tileSystem.getMaxLatitude(), tileSystem.getLatitudeFromY01(0, true));
checkLatitude(0, tileSystem.getLatitudeFromY01(0.5, true));
checkLatitude(tileSystem.getMinLatitude(), tileSystem.getLatitudeFromY01(1, true));
} |
public B group(String group) {
this.group = group;
return getThis();
} | @Test
void group() {
ServiceBuilder builder = new ServiceBuilder();
builder.group("group");
Assertions.assertEquals("group", builder.build().getGroup());
} |
public static <P> Builder<P> newBuilder() {
return new Builder<P>();
} | @Test void noRulesOk() {
ParameterizedSampler.<Boolean>newBuilder().build();
} |
@Override
public void upgrade() {
Optional<IndexSetTemplate> defaultIndexSetTemplate = indexSetDefaultTemplateService.getDefaultIndexSetTemplate();
if (defaultIndexSetTemplate.isEmpty()) {
IndexSetsDefaultConfiguration legacyDefaultConfig = clusterConfigService.get(IndexSetsDefaultConfiguration.class);
if (legacyDefaultConfig == null) {
saveDefaultTemplate(factory.create());
} else {
saveDefaultTemplate(createTemplateConfig(legacyDefaultConfig));
removeLegacyConfig();
}
} else {
LOG.debug("Migration already completed.");
}
} | @Test
void testDefaultConfigWithDataTieringAndUseLegacyRotation() throws JsonProcessingException {
when(clusterConfigService.get(IndexSetsDefaultConfiguration.class)).thenReturn(readLegacyConfig(CONFIG_USE_LEGACY_FALSE));
IndexSetTemplateConfig defaultConfiguration = readConfig(CONFIG_USE_LEGACY_FALSE);
underTest.upgrade();
verify(indexSetDefaultTemplateService).createAndSaveDefault(createTemplate(defaultConfiguration));
verify(clusterConfigService).remove(IndexSetsDefaultConfiguration.class);
} |
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) {
CharStream input = CharStreams.fromString(source);
FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input );
CommonTokenStream tokens = new CommonTokenStream( lexer );
FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens );
ParserHelper parserHelper = new ParserHelper(eventsManager);
additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol()));
parser.setHelper(parserHelper);
parser.setErrorHandler( new FEELErrorHandler() );
parser.removeErrorListeners(); // removes the error listener that prints to the console
parser.addErrorListener( new FEELParserErrorListener( eventsManager ) );
// pre-loads the parser with symbols
defineVariables( inputVariableTypes, inputVariables, parser );
if (typeRegistry != null) {
parserHelper.setTypeRegistry(typeRegistry);
}
return parser;
} | @Test
void between() {
String inputExpression = "x between 10+y and 3**z";
BaseNode between = parse( inputExpression );
assertThat( between).isInstanceOf(BetweenNode.class);
assertThat( between.getResultType()).isEqualTo(BuiltInType.BOOLEAN);
assertThat( between.getText()).isEqualTo(inputExpression);
BetweenNode btw = (BetweenNode) between;
assertThat( btw.getValue()).isInstanceOf(NameRefNode.class);
assertThat( btw.getValue().getText()).isEqualTo("x");
assertThat( btw.getStart()).isInstanceOf(InfixOpNode.class);
assertThat( btw.getStart().getText()).isEqualTo( "10+y");
assertThat( btw.getEnd()).isInstanceOf(InfixOpNode.class);
assertThat( btw.getEnd().getText()).isEqualTo( "3**z");
} |
public HttpResponseDecoderSpec parseHttpAfterConnectRequest(boolean parseHttpAfterConnectRequest) {
this.parseHttpAfterConnectRequest = parseHttpAfterConnectRequest;
return this;
} | @Test
void parseHttpAfterConnectRequest() {
checkDefaultParseHttpAfterConnectRequest(conf);
conf.parseHttpAfterConnectRequest(true);
assertThat(conf.parseHttpAfterConnectRequest).as("parse http after connect request").isTrue();
checkDefaultMaxInitialLineLength(conf);
checkDefaultMaxHeaderSize(conf);
checkDefaultMaxChunkSize(conf);
checkDefaultValidateHeaders(conf);
checkDefaultInitialBufferSize(conf);
checkDefaultAllowDuplicateContentLengths(conf);
checkDefaultFailOnMissingResponse(conf);
checkDefaultH2cMaxContentLength(conf);
} |
protected String decideSource(MappedMessage cef, RawMessage raw) {
// Try getting the host name from the CEF extension "deviceAddress"/"dvc"
final Map<String, Object> fields = cef.mappedExtensions();
if (fields != null && !fields.isEmpty()) {
final String deviceAddress = (String) fields.getOrDefault(CEFMapping.dvc.getFullName(), fields.get(CEFMapping.dvc.getKeyName()));
if (!isNullOrEmpty(deviceAddress)) {
return deviceAddress;
}
}
// Try getting the hostname from the CEF message metadata (e. g. syslog)
if (!isNullOrEmpty(cef.host())) {
return cef.host();
}
// Use raw message source information if we were not able to parse a source from the CEF extensions.
final ResolvableInetSocketAddress address = raw.getRemoteAddress();
final InetSocketAddress remoteAddress;
if (address == null) {
remoteAddress = null;
} else {
remoteAddress = address.getInetSocketAddress();
}
return remoteAddress == null ? "unknown" : remoteAddress.getAddress().toString();
} | @Test
public void decideSourceWithoutDeviceAddressReturnsRawMessageRemoteAddress() throws Exception {
final MappedMessage cefMessage = mock(MappedMessage.class);
when(cefMessage.mappedExtensions()).thenReturn(Collections.emptyMap());
final RawMessage rawMessage = new RawMessage(new byte[0], new InetSocketAddress("128.66.23.42", 12345));
// The hostname is unresolved, so we have to add the leading slash. Oh, Java...
assertEquals("/128.66.23.42", codec.decideSource(cefMessage, rawMessage));
} |
public FEELFnResult<String> invoke(@ParameterName("from") Object val) {
if ( val == null ) {
return FEELFnResult.ofResult( null );
} else {
return FEELFnResult.ofResult( TypeUtil.formatValue(val, false) );
}
} | @Test
void invokeZonedDateTime() {
final ZonedDateTime zonedDateTime = ZonedDateTime.now();
FunctionTestUtil.assertResult(stringFunction.invoke(zonedDateTime),
DateAndTimeFunction.REGION_DATETIME_FORMATTER.format(zonedDateTime));
} |
public static IpPrefix valueOf(int address, int prefixLength) {
return new IpPrefix(IpAddress.valueOf(address), prefixLength);
} | @Test(expected = IllegalArgumentException.class)
public void testInvalidValueOfByteArrayNegativePrefixLengthIPv4() {
IpPrefix ipPrefix;
byte[] value;
value = new byte[] {1, 2, 3, 4};
ipPrefix = IpPrefix.valueOf(IpAddress.Version.INET, value, -1);
} |
static int determineOperatorReservoirSize(int operatorParallelism, int numPartitions) {
int coordinatorReservoirSize = determineCoordinatorReservoirSize(numPartitions);
int totalOperatorSamples = coordinatorReservoirSize * OPERATOR_OVER_SAMPLE_RATIO;
return (int) Math.ceil((double) totalOperatorSamples / operatorParallelism);
} | @Test
public void testOperatorReservoirSize() {
assertThat(SketchUtil.determineOperatorReservoirSize(5, 3))
.isEqualTo((10_002 * SketchUtil.OPERATOR_OVER_SAMPLE_RATIO) / 5);
assertThat(SketchUtil.determineOperatorReservoirSize(123, 123))
.isEqualTo((123_00 * SketchUtil.OPERATOR_OVER_SAMPLE_RATIO) / 123);
assertThat(SketchUtil.determineOperatorReservoirSize(256, 123))
.isEqualTo(
(int) Math.ceil((double) (123_00 * SketchUtil.OPERATOR_OVER_SAMPLE_RATIO) / 256));
assertThat(SketchUtil.determineOperatorReservoirSize(5_120, 10_123))
.isEqualTo(
(int) Math.ceil((double) (992_054 * SketchUtil.OPERATOR_OVER_SAMPLE_RATIO) / 5_120));
} |
@Udf
public <T> String toJsonString(@UdfParameter final T input) {
return toJson(input);
} | @Test
public void shouldSerializeBytes() {
// When:
final String result = udf.toJsonString(ByteBuffer.allocate(4).putInt(1097151));
// Then:
assertEquals("\"ABC9vw==\"", result);
} |
public int getSequenceCount() {
return this.sequenceCount;
} | @Test
public void testGetSequenceCount() throws Exception {
assertEquals(4, buildChunk().getSequenceCount());
} |
public static String toHexStringWithPrefix(BigInteger value) {
return HEX_PREFIX + value.toString(16);
} | @Test
public void testToHexStringWithPrefix() {
assertEquals(Numeric.toHexStringWithPrefix(BigInteger.TEN), ("0xa"));
assertEquals(Numeric.toHexStringWithPrefix(BigInteger.valueOf(1024)), ("0x400"));
assertEquals(Numeric.toHexStringWithPrefix(BigInteger.valueOf(65)), ("0x41"));
assertEquals(Numeric.toHexStringWithPrefix(BigInteger.valueOf(0)), ("0x0"));
} |
public List<R> scanForResourcesUri(URI classpathResourceUri) {
requireNonNull(classpathResourceUri, "classpathResourceUri must not be null");
if (CLASSPATH_SCHEME.equals(classpathResourceUri.getScheme())) {
return scanForClasspathResource(resourceName(classpathResourceUri), NULL_FILTER);
}
return findResourcesForUri(classpathResourceUri, DEFAULT_PACKAGE_NAME, NULL_FILTER, createUriResource());
} | @Test
void scanForResourcesClasspathPackageUri() {
URI uri = URI.create("classpath:io/cucumber/core/resource");
List<URI> resources = resourceScanner.scanForResourcesUri(uri);
assertThat(resources, containsInAnyOrder(
URI.create("classpath:io/cucumber/core/resource/test/resource.txt"),
URI.create("classpath:io/cucumber/core/resource/test/other-resource.txt"),
URI.create("classpath:io/cucumber/core/resource/test/spaces%20in%20name%20resource.txt")));
} |
@Override
public long getMaxMigrationNumber() {
return steps.get(steps.size() - 1).getMigrationNumber();
} | @Test
public void getMaxMigrationNumber_returns_migration_of_last_step_in_constructor_list_argument() {
assertThat(underTest.getMaxMigrationNumber()).isEqualTo(8L);
assertThat(unorderedSteps.getMaxMigrationNumber()).isOne();
} |
public RuntimeOptionsBuilder parse(String... args) {
return parse(Arrays.asList(args));
} | @Test
void assigns_feature_paths() {
RuntimeOptions options = parser
.parse("somewhere_else")
.build();
assertThat(options.getFeaturePaths(), contains(new File("somewhere_else").toURI()));
} |
@Override
public boolean match(final String rule) {
return rule.matches("^zh\\|\\d+-\\d+$");
} | @Test
public void match() {
assertTrue(generator.match("zh|10-15"));
assertFalse(generator.match("zh"));
assertFalse(generator.match("zh|"));
assertFalse(generator.match("zh|10.1-15"));
} |
@Override
public List<Document> get() {
try (var input = markdownResource.getInputStream()) {
Node node = parser.parseReader(new InputStreamReader(input));
DocumentVisitor documentVisitor = new DocumentVisitor(config);
node.accept(documentVisitor);
return documentVisitor.getDocuments();
}
catch (IOException e) {
throw new RuntimeException(e);
}
} | @Test
void testBlockquote() {
MarkdownDocumentReader reader = new MarkdownDocumentReader("classpath:/blockquote.md");
List<Document> documents = reader.get();
assertThat(documents).hasSize(2)
.extracting(Document::getMetadata, Document::getContent)
.containsOnly(tuple(Map.of(),
"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur diam eros, laoreet sit amet cursus vitae, varius sed nisi. Cras sit amet quam quis velit commodo porta consectetur id nisi. Phasellus tincidunt pulvinar augue."),
tuple(Map.of("category", "blockquote"),
"Proin vel laoreet leo, sed luctus augue. Sed et ligula commodo, commodo lacus at, consequat turpis. Maecenas eget sapien odio. Maecenas urna lectus, pellentesque in accumsan aliquam, congue eu libero. Ut rhoncus nec justo a porttitor. Pellentesque auctor pharetra eros, viverra sodales lorem aliquet id. Curabitur semper nisi vel sem interdum suscipit."));
} |
public static byte[] decryptDES(byte[] data, byte[] key) {
return desTemplate(data, key, DES_Algorithm, DES_Transformation, false);
} | @Test
public void testDecryptDES() throws Exception {
TestCase.assertTrue(
Arrays.equals(
bytesDataDES,
EncryptKit.decryptDES(bytesResDES, bytesKeyDES)
)
);
TestCase.assertTrue(
Arrays.equals(
bytesDataDES,
EncryptKit.decryptHexStringDES(resDES, bytesKeyDES)
)
);
TestCase.assertTrue(
Arrays.equals(
bytesDataDES,
EncryptKit.decryptBase64DES(Base64.getEncoder().encode(bytesResDES), bytesKeyDES)
)
);
} |
public String abbreviate(String fqClassName) {
StringBuilder buf = new StringBuilder(targetLength);
if (fqClassName == null) {
throw new IllegalArgumentException("Class name may not be null");
}
int inLen = fqClassName.length();
if (inLen < targetLength) {
return fqClassName;
}
int[] dotIndexesArray = new int[ClassicConstants.MAX_DOTS];
// a.b.c contains 2 dots but 2+1 parts.
// see also http://jira.qos.ch/browse/LBCLASSIC-110
int[] lengthArray = new int[ClassicConstants.MAX_DOTS + 1];
int dotCount = computeDotIndexes(fqClassName, dotIndexesArray);
// System.out.println();
// System.out.println("Dot count for [" + className + "] is " + dotCount);
// if there are not dots than abbreviation is not possible
if (dotCount == 0) {
return fqClassName;
}
// printArray("dotArray: ", dotArray);
computeLengthArray(fqClassName, dotIndexesArray, lengthArray, dotCount);
// printArray("lengthArray: ", lengthArray);
for (int i = 0; i <= dotCount; i++) {
if (i == 0) {
buf.append(fqClassName.substring(0, lengthArray[i] - 1));
} else {
buf.append(fqClassName.substring(dotIndexesArray[i - 1],
dotIndexesArray[i - 1] + lengthArray[i]));
}
// System.out.println("i=" + i + ", buf=" + buf);
}
return buf.toString();
} | @Test
public void testXDot() {
{
TargetLengthBasedClassNameAbbreviator abbreviator = new TargetLengthBasedClassNameAbbreviator(21);
String name = "com.logback.wombat.alligator.Foobar";
assertEquals("c.l.w.a.Foobar", abbreviator.abbreviate(name));
}
{
TargetLengthBasedClassNameAbbreviator abbreviator = new TargetLengthBasedClassNameAbbreviator(22);
String name = "com.logback.wombat.alligator.Foobar";
assertEquals("c.l.w.alligator.Foobar", abbreviator.abbreviate(name));
}
{
TargetLengthBasedClassNameAbbreviator abbreviator = new TargetLengthBasedClassNameAbbreviator(1);
String name = "com.logback.wombat.alligator.tomato.Foobar";
assertEquals("c.l.w.a.t.Foobar", abbreviator.abbreviate(name));
}
{
TargetLengthBasedClassNameAbbreviator abbreviator = new TargetLengthBasedClassNameAbbreviator(21);
String name = "com.logback.wombat.alligator.tomato.Foobar";
assertEquals("c.l.w.a.tomato.Foobar", abbreviator.abbreviate(name));
}
{
TargetLengthBasedClassNameAbbreviator abbreviator = new TargetLengthBasedClassNameAbbreviator(29);
String name = "com.logback.wombat.alligator.tomato.Foobar";
assertEquals("c.l.w.alligator.tomato.Foobar", abbreviator.abbreviate(name));
}
} |
public static List<String> getUnixGroups(String user) throws IOException {
String effectiveGroupsResult;
String allGroupsResult;
List<String> groups = new ArrayList<>();
try {
effectiveGroupsResult = ShellUtils.execCommand(
ShellUtils.getEffectiveGroupsForUserCommand(user));
allGroupsResult = ShellUtils.execCommand(
ShellUtils.getAllGroupsForUserCommand(user));
} catch (ExitCodeException e) {
// if we didn't get the group - just return empty list
LOG.warn("got exception trying to get groups for user {}: {}", user, e.toString());
return groups;
}
StringTokenizer tokenizer = new StringTokenizer(
effectiveGroupsResult, ShellUtils.TOKEN_SEPARATOR_REGEX);
while (tokenizer.hasMoreTokens()) {
groups.add(tokenizer.nextToken());
}
tokenizer = new StringTokenizer(allGroupsResult, ShellUtils.TOKEN_SEPARATOR_REGEX);
while (tokenizer.hasMoreTokens()) {
groups.add(tokenizer.nextToken());
}
return groups;
} | @Test
public void userGroup() throws Throwable {
String userName = "alluxio-user1";
String userEffectiveGroup1 = "alluxio-user1-effective-group1";
String userEffectiveGroup2 = "alluxio-user1-effective-group2";
String userAllGroup1 = "alluxio-user1-all-group1";
String userAllGroup2 = "alluxio-user1-all-group2";
List<String> userEffectiveGroups = new ArrayList<>();
List<String> userAllGroups = new ArrayList<>();
userEffectiveGroups.add(userEffectiveGroup1);
userEffectiveGroups.add(userEffectiveGroup2);
userAllGroups.add(userAllGroup1);
userAllGroups.add(userAllGroup2);
setupShellMocks(userName, userEffectiveGroups, userAllGroups);
List<String> groups = CommonUtils.getUnixGroups(userName);
assertNotNull(groups);
assertEquals(groups.size(), 4);
assertEquals(groups.get(0), userEffectiveGroup1);
assertEquals(groups.get(1), userEffectiveGroup2);
assertEquals(groups.get(2), userAllGroup1);
assertEquals(groups.get(3), userAllGroup2);
} |
@Override
public Collection<LocalDataQueryResultRow> getRows(final ExportStorageNodesStatement sqlStatement, final ContextManager contextManager) {
checkSQLStatement(contextManager.getMetaDataContexts().getMetaData(), sqlStatement);
String exportedData = generateExportData(contextManager.getMetaDataContexts().getMetaData(), sqlStatement);
if (sqlStatement.getFilePath().isPresent()) {
String filePath = sqlStatement.getFilePath().get();
ExportUtils.exportToFile(filePath, exportedData);
return Collections.singleton(new LocalDataQueryResultRow(contextManager.getComputeNodeInstanceContext().getInstance().getMetaData().getId(), LocalDateTime.now(),
String.format("Successfully exported to:'%s'", filePath)));
}
return Collections.singleton(
new LocalDataQueryResultRow(contextManager.getComputeNodeInstanceContext().getInstance().getMetaData().getId(), LocalDateTime.now(), exportedData));
} | @Test
void assertExecuteWithEmptyMetaData() {
ContextManager contextManager = mockEmptyContextManager();
when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager);
when(ProxyContext.getInstance().getAllDatabaseNames()).thenReturn(Collections.singleton("empty_metadata"));
ExportStorageNodesStatement sqlStatement = new ExportStorageNodesStatement(null, null);
Collection<LocalDataQueryResultRow> actual = new ExportStorageNodesExecutor().getRows(sqlStatement, contextManager);
assertThat(actual.size(), is(1));
LocalDataQueryResultRow row = actual.iterator().next();
assertThat(row.getCell(3), is("{\"storage_nodes\":{}}"));
} |
public String decrypt(String encryptedText) {
Matcher matcher = ENCRYPTED_PATTERN.matcher(encryptedText);
if (matcher.matches()) {
Cipher cipher = ciphers.get(matcher.group(1).toLowerCase(Locale.ENGLISH));
if (cipher != null) {
return cipher.decrypt(matcher.group(2));
}
}
return encryptedText;
} | @Test
public void decrypt_unknown_algorithm() {
Encryption encryption = new Encryption(null);
assertThat(encryption.decrypt("{xxx}Zm9v")).isEqualTo("{xxx}Zm9v");
} |
public static int[] computePhysicalIndicesOrTimeAttributeMarkers(
TableSource<?> tableSource,
List<TableColumn> logicalColumns,
boolean streamMarkers,
Function<String, String> nameRemapping) {
Optional<String> proctimeAttribute = getProctimeAttribute(tableSource);
List<String> rowtimeAttributes = getRowtimeAttributes(tableSource);
List<TableColumn> columnsWithoutTimeAttributes =
logicalColumns.stream()
.filter(
col ->
!rowtimeAttributes.contains(col.getName())
&& proctimeAttribute
.map(attr -> !attr.equals(col.getName()))
.orElse(true))
.collect(Collectors.toList());
Map<TableColumn, Integer> columnsToPhysicalIndices =
TypeMappingUtils.computePhysicalIndices(
columnsWithoutTimeAttributes.stream(),
tableSource.getProducedDataType(),
nameRemapping);
return logicalColumns.stream()
.mapToInt(
logicalColumn -> {
if (proctimeAttribute
.map(attr -> attr.equals(logicalColumn.getName()))
.orElse(false)) {
verifyTimeAttributeType(logicalColumn, "Proctime");
if (streamMarkers) {
return TimeIndicatorTypeInfo.PROCTIME_STREAM_MARKER;
} else {
return TimeIndicatorTypeInfo.PROCTIME_BATCH_MARKER;
}
} else if (rowtimeAttributes.contains(logicalColumn.getName())) {
verifyTimeAttributeType(logicalColumn, "Rowtime");
if (streamMarkers) {
return TimeIndicatorTypeInfo.ROWTIME_STREAM_MARKER;
} else {
return TimeIndicatorTypeInfo.ROWTIME_BATCH_MARKER;
}
} else {
return columnsToPhysicalIndices.get(logicalColumn);
}
})
.toArray();
} | @Test
void testWrongLogicalTypeForProctimeAttribute() {
TestTableSource tableSource =
new TestTableSource(
DataTypes.BIGINT(), Collections.singletonList("rowtime"), "proctime");
assertThatThrownBy(
() ->
TypeMappingUtils.computePhysicalIndicesOrTimeAttributeMarkers(
tableSource,
TableSchema.builder()
.field("a", Types.LONG)
.field("rowtime", Types.SQL_TIMESTAMP)
.field("proctime", Types.SQL_TIME)
.build()
.getTableColumns(),
false,
Function.identity()))
.isInstanceOf(ValidationException.class)
.hasMessage(
"Proctime field 'proctime' has invalid type TIME(0). Proctime attributes "
+ "must be of a Timestamp family.");
} |
@Override
public TableEntryByTypeTransformer tableEntryByTypeTransformer() {
return transformer;
} | @Test
void transforms_with_correct_method_with_cell_transformer() throws Throwable {
Method method = JavaDefaultDataTableEntryTransformerDefinitionTest.class.getMethod(
"correct_method_with_cell_transformer", Map.class, Type.class, TableCellByTypeTransformer.class);
JavaDefaultDataTableEntryTransformerDefinition definition = new JavaDefaultDataTableEntryTransformerDefinition(
method, lookup);
assertThat(definition.tableEntryByTypeTransformer()
.transform(fromValue, String.class, cellTransformer),
is("key=value"));
} |
public boolean allSearchFiltersVisible() {
return hiddenSearchFiltersIDs.isEmpty();
} | @Test
void testAllSearchFiltersVisibleReturnsFalseOnNonEmptyHiddenFilters() {
toTest = new SearchFilterVisibilityCheckStatus(Collections.singletonList("There is a hidden one!"));
assertFalse(toTest.allSearchFiltersVisible());
assertFalse(toTest.allSearchFiltersVisible(null));
assertFalse(toTest.allSearchFiltersVisible(Collections.emptyList()));
} |
@Override
public Map<String, Map<String, String>> getAdditionalInformation() {
return Collections.singletonMap("values", values);
} | @Test
public void testGetValues() throws Exception {
Map<String,String> values = new HashMap<>();
values.put("foo", "bar");
values.put("baz", "lol");
final ListField list = new ListField("list", "The List", Collections.emptyList(), values, "Hello, this is a list", ConfigurationField.Optional.NOT_OPTIONAL);
assertThat(list.getAdditionalInformation().get("values")).isEqualTo(values);
} |
@Override
public List<String> readFilesWithRetries(Sleeper sleeper, BackOff backOff)
throws IOException, InterruptedException {
IOException lastException = null;
do {
try {
// Match inputPath which may contains glob
Collection<Metadata> files =
Iterables.getOnlyElement(FileSystems.match(Collections.singletonList(filePattern)))
.metadata();
LOG.debug("Found {} file(s) by matching the path: {}", files.size(), filePattern);
if (files.isEmpty() || !checkTotalNumOfFiles(files)) {
continue;
}
// Read data from file paths
return readLines(files);
} catch (IOException e) {
// Ignore and retry
lastException = e;
LOG.warn("Error in file reading. Ignore and retry.");
}
} while (BackOffUtils.next(sleeper, backOff));
// Failed after max retries
throw new IOException(
String.format("Unable to read file(s) after retrying %d times", MAX_READ_RETRIES),
lastException);
} | @Test
public void testReadWithRetriesFailsWhenRedundantFileLoaded() throws Exception {
tmpFolder.newFile("result-000-of-001");
tmpFolder.newFile("tmp-result-000-of-001");
NumberedShardedFile shardedFile = new NumberedShardedFile(filePattern);
thrown.expect(IOException.class);
thrown.expectMessage(
containsString(
"Unable to read file(s) after retrying " + NumberedShardedFile.MAX_READ_RETRIES));
shardedFile.readFilesWithRetries(fastClock, backOff);
} |
@Override
public void release(final String id) {
synchronized(lock) {
if(log.isDebugEnabled()) {
log.debug(String.format("Release sleep assertion %s", id));
}
this.releaseAssertion(id);
}
} | @Test
public void testRelease() {
final SleepPreventer s = new IOKitSleepPreventer();
final String lock = s.lock();
Assert.assertNotNull(lock);
s.release(lock);
} |
public boolean hasOnlineDir(Uuid dir) {
return DirectoryId.isOnline(dir, directories);
} | @Test
void testHasOnlineDir() {
BrokerRegistration registration = new BrokerRegistration.Builder().
setId(0).
setEpoch(0).
setIncarnationId(Uuid.fromString("m6CiJvfITZeKVC6UuhlZew")).
setListeners(Collections.singletonList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9090))).
setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 2))).
setRack(Optional.empty()).
setFenced(false).
setInControlledShutdown(false).
setDirectories(Arrays.asList(
Uuid.fromString("dir1G6EtuR1OTdAzFw1AFQ"),
Uuid.fromString("dir2gwpjTvKC7sMfcLNd8g"),
Uuid.fromString("dir3Ir8mQ0mMxfv93RITDA")
)).
build();
assertTrue(registration.hasOnlineDir(Uuid.fromString("dir1G6EtuR1OTdAzFw1AFQ")));
assertTrue(registration.hasOnlineDir(Uuid.fromString("dir2gwpjTvKC7sMfcLNd8g")));
assertTrue(registration.hasOnlineDir(Uuid.fromString("dir3Ir8mQ0mMxfv93RITDA")));
assertTrue(registration.hasOnlineDir(DirectoryId.UNASSIGNED));
assertTrue(registration.hasOnlineDir(DirectoryId.MIGRATING));
assertFalse(registration.hasOnlineDir(Uuid.fromString("sOwN7HH7S1maxpU1WzlzXg")));
assertFalse(registration.hasOnlineDir(DirectoryId.LOST));
} |
public void runExtractor(Message msg) {
try(final Timer.Context ignored = completeTimer.time()) {
final String field;
try (final Timer.Context ignored2 = conditionTimer.time()) {
// We can only work on Strings.
if (!(msg.getField(sourceField) instanceof String)) {
conditionMissesCounter.inc();
return;
}
field = (String) msg.getField(sourceField);
// Decide if to extract at all.
if (conditionType.equals(ConditionType.STRING)) {
if (field.contains(conditionValue)) {
conditionHitsCounter.inc();
} else {
conditionMissesCounter.inc();
return;
}
} else if (conditionType.equals(ConditionType.REGEX)) {
if (regexConditionPattern.matcher(field).find()) {
conditionHitsCounter.inc();
} else {
conditionMissesCounter.inc();
return;
}
}
}
try (final Timer.Context ignored2 = executionTimer.time()) {
Result[] results;
try {
results = run(field);
} catch (ExtractorException e) {
final String error = "Could not apply extractor <" + getTitle() + " (" + getId() + ")>";
msg.addProcessingError(new Message.ProcessingError(
ProcessingFailureCause.ExtractorException, error, ExceptionUtils.getRootCauseMessage(e)));
return;
}
if (results == null || results.length == 0 || Arrays.stream(results).anyMatch(result -> result.getValue() == null)) {
return;
} else if (results.length == 1 && results[0].target == null) {
// results[0].target is null if this extractor cannot produce multiple fields use targetField in that case
msg.addField(targetField, results[0].getValue());
} else {
for (final Result result : results) {
msg.addField(result.getTarget(), result.getValue());
}
}
// Remove original from message?
if (cursorStrategy.equals(CursorStrategy.CUT) && !targetField.equals(sourceField) && !Message.RESERVED_FIELDS.contains(sourceField) && results[0].beginIndex != -1) {
final StringBuilder sb = new StringBuilder(field);
final List<Result> reverseList = Arrays.stream(results)
.sorted(Comparator.<Result>comparingInt(result -> result.endIndex).reversed())
.collect(Collectors.toList());
// remove all from reverse so that the indices still match
for (final Result result : reverseList) {
sb.delete(result.getBeginIndex(), result.getEndIndex());
}
final String builtString = sb.toString();
final String finalResult = builtString.trim().isEmpty() ? "fullyCutByExtractor" : builtString;
msg.removeField(sourceField);
// TODO don't add an empty field back, or rather don't add fullyCutByExtractor
msg.addField(sourceField, finalResult);
}
runConverters(msg);
}
}
} | @Test
public void testCursorStrategyCutIfBeginIndexIsDisabled() throws Exception {
final TestExtractor extractor = new TestExtractor.Builder()
.cursorStrategy(CUT)
.sourceField("msg")
.callback(new Callable<Result[]>() {
@Override
public Result[] call() throws Exception {
return new Result[]{
new Result("the", -1, 3)
};
}
})
.build();
final Message msg = createMessage("message");
msg.addField("msg", "the hello");
extractor.runExtractor(msg);
// If the begin index is -1, the source field should not be modified.
assertThat(msg.getField("msg")).isEqualTo("the hello");
} |
public FEELFnResult<String> invoke(@ParameterName("from") Object val) {
if ( val == null ) {
return FEELFnResult.ofResult( null );
} else {
return FEELFnResult.ofResult( TypeUtil.formatValue(val, false) );
}
} | @Test
void invokeListEmpty() {
FunctionTestUtil.assertResult(stringFunction.invoke(Collections.emptyList()), "[ ]");
} |
public <T extends VFSConnectionDetails> boolean test( @NonNull ConnectionManager manager,
@NonNull T details,
@Nullable VFSConnectionTestOptions options )
throws KettleException {
if ( options == null ) {
options = new VFSConnectionTestOptions();
}
// The specified connection details may not exist saved in the meta-store,
// but still needs to have a non-empty name in it, to be able to form a temporary PVFS URI.
if ( StringUtils.isEmpty( details.getName() ) ) {
return false;
}
VFSConnectionProvider<T> provider = getExistingProvider( manager, details );
if ( !provider.test( details ) ) {
return false;
}
if ( !details.isRootPathSupported() || options.isRootPathIgnored() ) {
return true;
}
String resolvedRootPath;
try {
resolvedRootPath = getResolvedRootPath( details );
} catch ( KettleException e ) {
// Invalid root path.
return false;
}
if ( resolvedRootPath == null ) {
return !details.isRootPathRequired();
}
// Ensure that root path exists and is a folder.
return isFolder( getConnectionRootProviderFileObject( manager, provider, details ) );
} | @Test
public void testTestReturnsTrueWhenRootPathInvalidAndOptionsToIgnoreRootPath() throws KettleException {
when( vfsConnectionDetails.getRootPath() ).thenReturn( "../invalid" );
assertTrue( vfsConnectionManagerHelper.test( connectionManager, vfsConnectionDetails, getTestOptionsRootPathIgnored() ) );
} |
public Future<KafkaVersionChange> reconcile() {
return getPods()
.compose(this::detectToAndFromVersions)
.compose(i -> prepareVersionChange());
} | @Test
public void testUpgradeWithoutVersion(VertxTestContext context) {
VersionChangeCreator vcc = mockVersionChangeCreator(
mockKafka(null, VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).metadataVersion(), null),
mockRos(mockUniformPods(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).version()))
);
Checkpoint async = context.checkpoint();
vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> {
assertThat(c.from(), is(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION)));
assertThat(c.to(), is(VERSIONS.defaultVersion()));
assertThat(c.metadataVersion(), is(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).metadataVersion()));
async.flag();
})));
} |
@SuppressWarnings("FutureReturnValueIgnored")
public void start() {
running.set(true);
configFetcher.start();
memoryMonitor.start();
streamingWorkerHarness.start();
sampler.start();
workerStatusReporter.start();
activeWorkRefresher.start();
} | @Test
public void testUnboundedSourcesDrain() throws Exception {
List<Integer> finalizeTracker = Lists.newArrayList();
TestCountingSource.setFinalizeTracker(finalizeTracker);
StreamingDataflowWorker worker =
makeWorker(
defaultWorkerParams()
.setInstructions(makeUnboundedSourcePipeline())
.publishCounters()
.build());
worker.start();
// Test new key.
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 2"
+ " cache_token: 3"
+ " }"
+ "}",
null));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
Windmill.WorkItemCommitRequest commit = result.get(2L);
UnsignedLong finalizeId =
UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0));
assertThat(
removeDynamicFields(commit),
equalTo(
setMessagesMetadata(
PaneInfo.NO_FIRING,
CoderUtils.encodeToByteArray(
CollectionCoder.of(GlobalWindow.Coder.INSTANCE),
Collections.singletonList(GlobalWindow.INSTANCE)),
parseCommitRequest(
"key: \"0000000000000001\" "
+ "sharding_key: 1 "
+ "work_token: 2 "
+ "cache_token: 3 "
+ "source_backlog_bytes: 7 "
+ "source_bytes_processed: 18 "
+ "output_messages {"
+ " destination_stream_id: \"out\""
+ " bundles {"
+ " key: \"0000000000000001\""
+ " messages {"
+ " timestamp: 0"
+ " data: \"0:0\""
+ " }"
+ " messages_ids: \"\""
+ " }"
+ "} "
+ "source_state_updates {"
+ " state: \"\000\""
+ " finalize_ids: "
+ finalizeId
+ "} "
+ "source_watermark: 1000"))
.build()));
// Test drain work item.
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 3"
+ " cache_token: 3"
+ " source_state {"
+ " only_finalize: true"
+ " finalize_ids: "
+ finalizeId
+ " }"
+ " }"
+ "}",
null));
result = server.waitForAndGetCommits(1);
commit = result.get(3L);
assertThat(
commit,
equalTo(
parseCommitRequest(
"key: \"0000000000000001\" "
+ "sharding_key: 1 "
+ "work_token: 3 "
+ "cache_token: 3 "
+ "source_state_updates {"
+ " only_finalize: true"
+ "} ")
.build()));
assertThat(finalizeTracker, contains(0));
} |
public double calculateDensity(Graph graph, boolean isGraphDirected) {
double result;
double edgesCount = graph.getEdgeCount();
double nodesCount = graph.getNodeCount();
double multiplier = 1;
if (!isGraphDirected) {
multiplier = 2;
}
result = (multiplier * edgesCount) / (nodesCount * nodesCount - nodesCount);
return result;
} | @Test
public void testDirectedCompleteGraphDensity() {
GraphModel graphModel = GraphGenerator.generateCompleteDirectedGraph(5);
DirectedGraph graph = graphModel.getDirectedGraph();
GraphDensity d = new GraphDensity();
double density = d.calculateDensity(graph, true);
assertEquals(density, 1.0);
} |
public Usage metrics(boolean details) {
ZonedDateTime to = ZonedDateTime.now();
ZonedDateTime from = to
.toLocalDate()
.atStartOfDay(ZoneId.systemDefault())
.minusDays(1);
return metrics(details, from, to);
} | @Test
public void metrics() throws URISyntaxException {
ImmutableMap<String, Object> properties = ImmutableMap.of("kestra.server-type", ServerType.WEBSERVER.name());
try (ApplicationContext applicationContext = Helpers.applicationContext(properties).start()) {
CollectorService collectorService = applicationContext.getBean(CollectorService.class);
Usage metrics = collectorService.metrics(true);
assertThat(metrics.getUri(), is("https://mysuperhost.com/subpath"));
assertThat(metrics.getUuid(), notNullValue());
assertThat(metrics.getVersion(), notNullValue());
assertThat(metrics.getStartTime(), notNullValue());
assertThat(metrics.getEnvironments(), contains("test"));
assertThat(metrics.getStartTime(), notNullValue());
assertThat(metrics.getHost().getUuid(), notNullValue());
assertThat(metrics.getHost().getHardware().getLogicalProcessorCount(), notNullValue());
assertThat(metrics.getHost().getJvm().getName(), notNullValue());
assertThat(metrics.getHost().getOs().getFamily(), notNullValue());
assertThat(metrics.getConfigurations().getRepositoryType(), is("memory"));
assertThat(metrics.getConfigurations().getQueueType(), is("memory"));
assertThat(metrics.getExecutions(), notNullValue());
// 1 per hour
assertThat(metrics.getExecutions().getDailyExecutionsCount().size(), greaterThan(0));
// no task runs as it's an empty instance
assertThat(metrics.getExecutions().getDailyTaskRunsCount(), nullValue());
assertThat(metrics.getInstanceUuid(), is(TestSettingRepository.instanceUuid));
}
} |
public static AccessTokenRetriever create(Map<String, ?> configs, Map<String, Object> jaasConfig) {
return create(configs, null, jaasConfig);
} | @Test
public void testConfigureRefreshingFileAccessTokenRetriever() throws Exception {
String expected = "{}";
File tmpDir = createTempDir("access-token");
File accessTokenFile = createTempFile(tmpDir, "access-token-", ".json", expected);
Map<String, ?> configs = Collections.singletonMap(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, accessTokenFile.toURI().toString());
Map<String, Object> jaasConfig = Collections.emptyMap();
try (AccessTokenRetriever accessTokenRetriever = AccessTokenRetrieverFactory.create(configs, jaasConfig)) {
accessTokenRetriever.init();
assertEquals(expected, accessTokenRetriever.retrieve());
}
} |
public Cluster buildCluster() {
Set<String> internalTopics = new HashSet<>();
List<PartitionInfo> partitions = new ArrayList<>();
Map<String, Uuid> topicIds = new HashMap<>();
for (TopicMetadata metadata : topicMetadata()) {
if (metadata.error == Errors.NONE) {
if (metadata.isInternal)
internalTopics.add(metadata.topic);
if (metadata.topicId() != null && !Uuid.ZERO_UUID.equals(metadata.topicId())) {
topicIds.put(metadata.topic, metadata.topicId());
}
for (PartitionMetadata partitionMetadata : metadata.partitionMetadata) {
partitions.add(toPartitionInfo(partitionMetadata, holder().brokers));
}
}
}
return new Cluster(data.clusterId(), brokers(), partitions, topicsByError(Errors.TOPIC_AUTHORIZATION_FAILED),
topicsByError(Errors.INVALID_TOPIC_EXCEPTION), internalTopics, controller(), topicIds);
} | @Test
void buildClusterTest() {
Uuid zeroUuid = new Uuid(0L, 0L);
Uuid randomUuid = Uuid.randomUuid();
MetadataResponseData.MetadataResponseTopic topicMetadata1 = new MetadataResponseData.MetadataResponseTopic()
.setName("topic1")
.setErrorCode(Errors.NONE.code())
.setPartitions(emptyList())
.setIsInternal(false);
MetadataResponseData.MetadataResponseTopic topicMetadata2 = new MetadataResponseData.MetadataResponseTopic()
.setName("topic2")
.setErrorCode(Errors.NONE.code())
.setTopicId(zeroUuid)
.setPartitions(emptyList())
.setIsInternal(false);
MetadataResponseData.MetadataResponseTopic topicMetadata3 = new MetadataResponseData.MetadataResponseTopic()
.setName("topic3")
.setErrorCode(Errors.NONE.code())
.setTopicId(randomUuid)
.setPartitions(emptyList())
.setIsInternal(false);
MetadataResponseData.MetadataResponseTopicCollection topics =
new MetadataResponseData.MetadataResponseTopicCollection();
topics.add(topicMetadata1);
topics.add(topicMetadata2);
topics.add(topicMetadata3);
MetadataResponse metadataResponse = new MetadataResponse(new MetadataResponseData().setTopics(topics),
ApiKeys.METADATA.latestVersion());
Cluster cluster = metadataResponse.buildCluster();
assertNull(cluster.topicName(Uuid.ZERO_UUID));
assertNull(cluster.topicName(zeroUuid));
assertEquals("topic3", cluster.topicName(randomUuid));
} |
public static void smooth(PointList geometry, double maxWindowSize) {
if (geometry.size() <= 2) {
// geometry consists only of tower nodes, there are no pillar nodes to be smoothed in between
return;
}
// calculate the distance between all points once here to avoid repeated calculation.
// for n nodes there are always n-1 edges
double[] distances = new double[geometry.size() - 1];
for (int i = 0; i <= geometry.size() - 2; i++) {
distances[i] = DistancePlaneProjection.DIST_PLANE.calcDist(
geometry.getLat(i), geometry.getLon(i),
geometry.getLat(i + 1), geometry.getLon(i + 1)
);
}
// map that will collect all smoothed elevation values, size is less by 2
// because elevation of start and end point (tower nodes) won't be touched
IntDoubleHashMap averagedElevations = new IntDoubleHashMap((geometry.size() - 1) * 4 / 3);
// iterate over every pillar node to smooth its elevation
// first and last points are left out as they are tower nodes
for (int i = 1; i <= geometry.size() - 2; i++) {
// first, determine the average window which could be smaller when close to pillar nodes
double searchDistance = maxWindowSize / 2.0;
double searchDistanceBack = 0.0;
for (int j = i - 1; j >= 0; j--) {
searchDistanceBack += distances[j];
if (searchDistanceBack > searchDistance) {
break;
}
}
// update search distance if pillar node is close to START tower node
searchDistance = Math.min(searchDistance, searchDistanceBack);
double searchDistanceForward = 0.0;
for (int j = i; j < geometry.size() - 1; j++) {
searchDistanceForward += distances[j];
if (searchDistanceForward > searchDistance) {
break;
}
}
// update search distance if pillar node is close to END tower node
searchDistance = Math.min(searchDistance, searchDistanceForward);
if (searchDistance <= 0.0) {
// there is nothing to smooth. this is an edge case where pillar nodes share exactly the same location
// as a tower node.
// by doing so we avoid (at least theoretically) a division by zero later in the function call
continue;
}
// area under elevation curve
double elevationArea = 0.0;
// first going again backwards
double distanceBack = 0.0;
for (int j = i - 1; j >= 0; j--) {
double dist = distances[j];
double searchDistLeft = searchDistance - distanceBack;
distanceBack += dist;
if (searchDistLeft < dist) {
// node lies outside averaging window
double elevationDelta = geometry.getEle(j) - geometry.getEle(j + 1);
double elevationAtSearchDistance = geometry.getEle(j + 1) + searchDistLeft / dist * elevationDelta;
elevationArea += searchDistLeft * (geometry.getEle(j + 1) + elevationAtSearchDistance) / 2.0;
break;
} else {
elevationArea += dist * (geometry.getEle(j + 1) + geometry.getEle(j)) / 2.0;
}
}
// now going forward
double distanceForward = 0.0;
for (int j = i; j < geometry.size() - 1; j++) {
double dist = distances[j];
double searchDistLeft = searchDistance - distanceForward;
distanceForward += dist;
if (searchDistLeft < dist) {
double elevationDelta = geometry.getEle(j + 1) - geometry.getEle(j);
double elevationAtSearchDistance = geometry.getEle(j) + searchDistLeft / dist * elevationDelta;
elevationArea += searchDistLeft * (geometry.getEle(j) + elevationAtSearchDistance) / 2.0;
break;
} else {
elevationArea += dist * (geometry.getEle(j + 1) + geometry.getEle(j)) / 2.0;
}
}
double elevationAverage = elevationArea / (searchDistance * 2);
averagedElevations.put(i, elevationAverage);
}
// after all pillar nodes got an averaged elevation, elevations are overwritten
averagedElevations.forEach((Consumer<IntDoubleCursor>) c -> geometry.setElevation(c.key, c.value));
} | @Test
public void testSparsePoints() {
PointList pl = new PointList(3, true);
pl.add(47.329730504970684, 10.156667197157475, 0);
pl.add(47.3298073615309, 10.15798541322701, 100);
pl.add(47.3316055451794, 10.158042110691866, 200);
EdgeElevationSmoothingMovingAverage.smooth(pl, 150.0);
assertEquals(3, pl.size());
assertEquals(0, pl.getEle(0), 0.000001);
assertEquals((62.5 * 75 + 118.75 * 75) / 150.0, pl.getEle(1), 0.5);
assertEquals(200, pl.getEle(2), 0.000001);
} |
@SuppressWarnings({"rawtypes", "unchecked"})
public SchemaMetaData revise(final SchemaMetaData originalMetaData) {
SchemaMetaData result = originalMetaData;
for (Entry<ShardingSphereRule, MetaDataReviseEntry> entry : OrderedSPILoader.getServices(MetaDataReviseEntry.class, rules).entrySet()) {
result = revise(result, entry.getKey(), entry.getValue());
}
return result;
} | @Test
void assertReviseWithMetaDataReviseEntry() {
SchemaMetaData schemaMetaData = new SchemaMetaData("expected", Collections.singletonList(createTableMetaData()));
SchemaMetaData actual = new SchemaMetaDataReviseEngine(
Collections.singleton(new FixtureGlobalRule()), new ConfigurationProperties(new Properties()), mock(DatabaseType.class), mock(DataSource.class)).revise(schemaMetaData);
assertThat(actual.getName(), is(schemaMetaData.getName()));
assertThat(actual.getTables(), is(schemaMetaData.getTables()));
} |
public static Optional<Page> createPartitionManifest(PartitionUpdate partitionUpdate)
{
// Manifest Page layout:
// fileName fileSize
// X X
// X X
// X X
// ....
PageBuilder manifestBuilder = new PageBuilder(ImmutableList.of(VARCHAR, BIGINT));
BlockBuilder fileNameBuilder = manifestBuilder.getBlockBuilder(0);
BlockBuilder fileSizeBuilder = manifestBuilder.getBlockBuilder(1);
for (FileWriteInfo fileWriteInfo : partitionUpdate.getFileWriteInfos()) {
if (!fileWriteInfo.getFileSize().isPresent()) {
return Optional.empty();
}
manifestBuilder.declarePosition();
VARCHAR.writeSlice(fileNameBuilder, utf8Slice(fileWriteInfo.getWriteFileName()));
BIGINT.writeLong(fileSizeBuilder, fileWriteInfo.getFileSize().get());
}
return Optional.of(manifestBuilder.build());
} | @Test
public void testCreatePartitionManifest()
{
PartitionUpdate partitionUpdate = new PartitionUpdate("testPartition", NEW, "/testDir", "/testDir", ImmutableList.of(new FileWriteInfo("testFileName", "testFileName", Optional.of(FILE_SIZE))), 100, 1024, 1024, false);
Optional<Page> manifestPage = createPartitionManifest(partitionUpdate);
assertTrue(manifestPage.isPresent());
assertEquals(manifestPage.get().getChannelCount(), 2);
assertEquals(manifestPage.get().getPositionCount(), 1);
} |
@Override
public KTableValueGetterSupplier<K, VOut> view() {
if (queryableName != null) {
return new KTableMaterializedValueGetterSupplier<>(queryableName);
}
return new KTableValueGetterSupplier<K, VOut>() {
final KTableValueGetterSupplier<K, V> parentValueGetterSupplier = parent.valueGetterSupplier();
public KTableValueGetter<K, VOut> get() {
return new KTableTransformValuesGetter(
parentValueGetterSupplier.get(),
transformerSupplier.get());
}
@Override
public String[] storeNames() {
return parentValueGetterSupplier.storeNames();
}
};
} | @Test
public void shouldGetQueryableStoreNameIfMaterialized() {
final KTableTransformValues<String, String, String> transformValues =
new KTableTransformValues<>(parent, new ExclamationValueTransformerSupplier(), QUERYABLE_NAME);
final String[] storeNames = transformValues.view().storeNames();
assertThat(storeNames, is(new String[]{QUERYABLE_NAME}));
} |
@Override
public void open() throws CatalogException {
if (this.client == null) {
try {
this.client = Hive.get(hiveConf).getMSC();
} catch (Exception e) {
throw new HoodieCatalogException("Failed to create hive metastore client", e);
}
LOG.info("Connected to Hive metastore");
}
if (!databaseExists(getDefaultDatabase())) {
LOG.info("{} does not exist, will be created.", getDefaultDatabase());
CatalogDatabase database = new CatalogDatabaseImpl(Collections.emptyMap(), "default database");
try {
createDatabase(getDefaultDatabase(), database, true);
} catch (DatabaseAlreadyExistException e) {
throw new HoodieCatalogException(getName(), e);
}
}
} | @Test
void testCreateTableWithoutPreCombineKey() throws TableAlreadyExistException, DatabaseNotExistException, IOException, TableNotExistException {
String db = "default";
hoodieCatalog = HoodieCatalogTestUtils.createHiveCatalog();
hoodieCatalog.open();
Map<String, String> options = new HashMap<>();
options.put(FactoryUtil.CONNECTOR.key(), "hudi");
TypedProperties props = createTableAndReturnTableProperties(options, new ObjectPath(db, "tmptb1"));
assertFalse(props.containsKey("hoodie.table.precombine.field"));
options.put(PRECOMBINE_FIELD.key(), "ts_3");
props = createTableAndReturnTableProperties(options, new ObjectPath(db, "tmptb2"));
assertTrue(props.containsKey("hoodie.table.precombine.field"));
assertEquals("ts_3", props.get("hoodie.table.precombine.field"));
} |
List<Endpoint> endpoints() {
try {
String urlString = String.format("%s/api/v1/namespaces/%s/pods", kubernetesMaster, namespace);
return enrichWithPublicAddresses(parsePodsList(callGet(urlString)));
} catch (RestClientException e) {
return handleKnownException(e);
}
} | @Test
public void endpointsByNamespaceWithPublicIpExposingMultiplePorts()throws JsonProcessingException {
// given
stub(String.format("/api/v1/namespaces/%s/pods", NAMESPACE), podsListResponse());
stub(String.format("/api/v1/namespaces/%s/endpoints", NAMESPACE), endpointsListResponse());
stub(String.format("/api/v1/namespaces/%s/services/hazelcast-0", NAMESPACE),
serviceLbWithMultiplePorts("hazelcast-0", List.of(
servicePortWithName("hazelcast", 5701, 5701, 31916),
servicePortWithName("wan-port", 5710, 5710, 31926)), "35.232.226.200"));
stub(String.format("/api/v1/namespaces/%s/services/service-1", NAMESPACE),
serviceLbWithMultiplePorts("service-1", List.of(
servicePortWithName("hazelcast", 5701, 5701, 31917),
servicePortWithName("wan-port", 5701, 5701, 31916)), "35.232.226.201"));
// when
List<Endpoint> result = kubernetesClient.endpoints();
// then
assertThat(formatPrivate(result)).containsExactlyInAnyOrder(ready("192.168.0.25", 5701), ready("172.17.0.5", 5702));
assertThat(formatPublic(result)).containsExactlyInAnyOrder(ready("35.232.226.200", 5701), ready("35.232.226.201", 5701));
} |
public String replaceSecretInfo(String line) {
if (line == null) {
return null;
}
for (CommandArgument argument : arguments) {
line = argument.replaceSecretInfo(line);
}
for (SecretString secret : secrets) {
line = secret.replaceSecretInfo(line);
}
return line;
} | @Test
void shouldReplaceSecretInfoShouldNotFailForNull() {
ArrayList<CommandArgument> commands = new ArrayList<>();
commands.add(new PasswordArgument("foo"));
ArrayList<SecretString> secretStrings = new ArrayList<>();
secretStrings.add(new PasswordArgument("foo"));
ConsoleResult result = new ConsoleResult(10, new ArrayList<>(), new ArrayList<>(), commands, secretStrings);
assertThat(result.replaceSecretInfo(null)).isNull();
} |
public static boolean isListEqual(List<String> firstList, List<String> secondList) {
if (firstList == null && secondList == null) {
return true;
}
if (firstList == null || secondList == null) {
return false;
}
if (firstList == secondList) {
return true;
}
if (firstList.size() != secondList.size()) {
return false;
}
boolean flag1 = firstList.containsAll(secondList);
boolean flag2 = secondList.containsAll(firstList);
return flag1 && flag2;
} | @Test
void testIsListEqualForNotEquals() {
List<String> list1 = Arrays.asList("1", "2", "3");
List<String> list2 = Arrays.asList("1", "2", "3", "4");
List<String> list3 = Arrays.asList("1", "2", "3", "5");
assertFalse(CollectionUtils.isListEqual(list1, list2));
assertFalse(CollectionUtils.isListEqual(list2, list3));
} |
public static boolean isHostInNetworkCard(String host) {
try {
InetAddress addr = InetAddress.getByName(host);
return NetworkInterface.getByInetAddress(addr) != null;
} catch (Exception e) {
return false;
}
} | @Test
public void isHostInNetworkCard() throws Exception {
} |
public Schema mergeTables(
Map<FeatureOption, MergingStrategy> mergingStrategies,
Schema sourceSchema,
List<SqlNode> derivedColumns,
List<SqlWatermark> derivedWatermarkSpecs,
SqlTableConstraint derivedPrimaryKey) {
SchemaBuilder schemaBuilder =
new SchemaBuilder(
mergingStrategies,
sourceSchema,
(FlinkTypeFactory) validator.getTypeFactory(),
dataTypeFactory,
validator,
escapeExpression);
schemaBuilder.appendDerivedColumns(mergingStrategies, derivedColumns);
schemaBuilder.appendDerivedWatermarks(mergingStrategies, derivedWatermarkSpecs);
schemaBuilder.appendDerivedPrimaryKey(derivedPrimaryKey);
return schemaBuilder.build();
} | @Test
void mergeOverwritingMetadataColumnsDuplicate() {
Schema sourceSchema =
Schema.newBuilder()
.column("one", DataTypes.INT())
.columnByMetadata("two", DataTypes.INT())
.build();
List<SqlNode> derivedColumns =
Collections.singletonList(metadataColumn("two", DataTypes.BOOLEAN(), true));
Map<FeatureOption, MergingStrategy> mergingStrategies = getDefaultMergingStrategies();
mergingStrategies.put(FeatureOption.METADATA, MergingStrategy.OVERWRITING);
Schema mergedSchema =
util.mergeTables(
mergingStrategies,
sourceSchema,
derivedColumns,
Collections.emptyList(),
null);
Schema expectedSchema =
Schema.newBuilder()
.column("one", DataTypes.INT())
.columnByMetadata("two", DataTypes.BOOLEAN(), true)
.build();
assertThat(mergedSchema).isEqualTo(expectedSchema);
} |
@Override
public boolean isSatisfied(int index, TradingRecord tradingRecord) {
boolean satisfied = false;
// No trading history or no position opened, no gain
if (tradingRecord != null) {
Position currentPosition = tradingRecord.getCurrentPosition();
if (currentPosition.isOpened()) {
Num entryPrice = currentPosition.getEntry().getNetPrice();
Num currentPrice = referencePrice.getValue(index);
Num gainThreshold = stopGainThreshold.getValue(index);
if (currentPosition.getEntry().isBuy()) {
satisfied = currentPrice.isGreaterThanOrEqual(entryPrice.plus(gainThreshold));
} else {
satisfied = currentPrice.isLessThanOrEqual(entryPrice.minus(gainThreshold));
}
}
}
return satisfied;
} | @Test
public void testStopGainTriggeredOnLongPosition() {
TradingRecord tradingRecord = new BaseTradingRecord();
tradingRecord.enter(0, series.getBar(0).getClosePrice(), series.numOf(1));
AverageTrueRangeStopGainRule rule = new AverageTrueRangeStopGainRule(series, 3, 2.0);
assertFalse(rule.isSatisfied(1, tradingRecord)); // Price is still below stop gain
assertFalse(rule.isSatisfied(2, tradingRecord)); // Price is still below stop gain
// Simulate a price rise to trigger stop gain
series.addBar(series.getLastBar().getEndTime().plusDays(1), 16, 19, 15, 19, 1000);
assertTrue(rule.isSatisfied(5, tradingRecord)); // Stop gain should trigger now
} |
public static <T> Collection<T> union(Collection<T> coll1, Collection<T> coll2) {
if (isEmpty(coll1) && isEmpty(coll2)) {
return new ArrayList<>();
}
if (isEmpty(coll1)) {
return new ArrayList<>(coll2);
} else if (isEmpty(coll2)) {
return new ArrayList<>(coll1);
}
final ArrayList<T> list = new ArrayList<>(Math.max(coll1.size(), coll2.size()));
final Map<T, Integer> map1 = countMap(coll1);
final Map<T, Integer> map2 = countMap(coll2);
final Set<T> elts = newHashSet(coll2);
elts.addAll(coll1);
int m;
for (T t : elts) {
m = Math.max(Convert.toInt(map1.get(t), 0), Convert.toInt(map2.get(t), 0));
for (int i = 0; i < m; i++) {
list.add(t);
}
}
return list;
} | @SuppressWarnings("ConstantValue")
@Test
public void unionNullTest() {
final List<String> list1 = new ArrayList<>();
final List<String> list2 = null;
final List<String> list3 = null;
final Collection<String> union = CollUtil.union(list1, list2, list3);
assertNotNull(union);
} |
@Override
@SuppressWarnings("rawtypes")
public void report(SortedMap<String, Gauge> gauges, SortedMap<String, Counter> counters,
SortedMap<String, Histogram> histograms, SortedMap<String, Meter> meters, SortedMap<String, Timer> timers) {
MetaData.Builder metaData = new MetaData.Builder(sanitize, hostName, clock.getTime() / 1000, period)
.type(COLLECTD_TYPE_GAUGE);
try {
connect(sender);
for (Map.Entry<String, Gauge> entry : gauges.entrySet()) {
serializeGauge(metaData.plugin(entry.getKey()), entry.getValue());
}
for (Map.Entry<String, Counter> entry : counters.entrySet()) {
serializeCounter(metaData.plugin(entry.getKey()), entry.getValue());
}
for (Map.Entry<String, Histogram> entry : histograms.entrySet()) {
serializeHistogram(metaData.plugin(entry.getKey()), entry.getValue());
}
for (Map.Entry<String, Meter> entry : meters.entrySet()) {
serializeMeter(metaData.plugin(entry.getKey()), entry.getValue());
}
for (Map.Entry<String, Timer> entry : timers.entrySet()) {
serializeTimer(metaData.plugin(entry.getKey()), entry.getValue());
}
} catch (IOException e) {
LOG.warn("Unable to report to Collectd", e);
} finally {
disconnect(sender);
}
} | @Test
public void reportsCounters() throws Exception {
Counter counter = mock(Counter.class);
when(counter.getCount()).thenReturn(42L);
reporter.report(
map(),
map("api.rest.requests.count", counter),
map(),
map(),
map());
assertThat(nextValues(receiver)).containsExactly(42d);
} |
public ProducerTableInfo getAllProducerInfo(final String addr, final long timeoutMillis)
throws RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, InterruptedException,
MQBrokerException {
GetAllProducerInfoRequestHeader requestHeader = new GetAllProducerInfoRequestHeader();
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_ALL_PRODUCER_INFO, requestHeader);
RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr),
request, timeoutMillis);
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
return ProducerTableInfo.decode(response.getBody(), ProducerTableInfo.class);
}
default:
break;
}
throw new MQBrokerException(response.getCode(), response.getRemark(), addr);
} | @Test
public void assertGetAllProducerInfo() throws RemotingException, InterruptedException, MQBrokerException {
mockInvokeSync();
Map<String, List<ProducerInfo>> data = new HashMap<>();
data.put("key", Collections.emptyList());
ProducerTableInfo responseBody = new ProducerTableInfo(data);
setResponseBody(responseBody);
ProducerTableInfo actual = mqClientAPI.getAllProducerInfo(defaultBrokerAddr, defaultTimeout);
assertNotNull(actual);
assertEquals(1, actual.getData().size());
} |
public static String cutString(String value) {
byte[] bytes = value.getBytes(Helper.UTF_CS);
// See #2609 and test why we use a value < 255
return bytes.length > 250 ? new String(bytes, 0, 250, Helper.UTF_CS) : value;
} | @Test
public void testCutString() {
String s = cutString("Бухарестская улица (http://ru.wikipedia.org/wiki/" +
"%D0%91%D1%83%D1%85%D0%B0%D1%80%D0%B5%D1%81%D1%82%D1%81%D0%BA%D0%B0%D1%8F_%D1%83%D0%BB%D0%B8%D1%86%D0%B0_(%D0%A1%D0%B0%D0%BD%D0%BA%D1%82-%D0%9F%D0%B5%D1%82%D0%B5%D1%80%D0%B1%D1%83%D1%80%D0%B3))");
assertEquals(250, s.getBytes(UTF_CS).length);
} |
@Override
public void transitionToActive(final StreamTask streamTask, final RecordCollector recordCollector, final ThreadCache newCache) {
if (stateManager.taskType() != TaskType.ACTIVE) {
throw new IllegalStateException("Tried to transition processor context to active but the state manager's " +
"type was " + stateManager.taskType());
}
this.streamTask = streamTask;
this.collector = recordCollector;
this.cache = newCache;
addAllFlushListenersToNewCache();
} | @Test
public void globalSessionStoreShouldBeReadOnly() {
foreachSetUp();
when(stateManager.taskType()).thenReturn(TaskType.ACTIVE);
when(stateManager.getGlobalStore(anyString())).thenReturn(null);
final SessionStore<String, Long> sessionStore = mock(SessionStore.class);
when(stateManager.getGlobalStore("GlobalSessionStore")).thenAnswer(answer -> sessionStoreMock(sessionStore));
context = buildProcessorContextImpl(streamsConfig, stateManager);
final StreamTask task = mock(StreamTask.class);
context.transitionToActive(task, null, null);
mockProcessorNodeWithLocalKeyValueStore();
doTest("GlobalSessionStore", (Consumer<SessionStore<String, Long>>) store -> {
verifyStoreCannotBeInitializedOrClosed(store);
checkThrowsUnsupportedOperation(store::flush, "flush()");
checkThrowsUnsupportedOperation(() -> store.remove(null), "remove()");
checkThrowsUnsupportedOperation(() -> store.put(null, null), "put()");
assertEquals(iters.get(3), store.findSessions(KEY, 1L, 2L));
assertEquals(iters.get(4), store.findSessions(KEY, KEY, 1L, 2L));
assertEquals(iters.get(5), store.fetch(KEY));
assertEquals(iters.get(6), store.fetch(KEY, KEY));
});
} |
public Marshaller createMarshaller(Class<?> clazz) throws JAXBException {
Marshaller marshaller = getContext(clazz).createMarshaller();
setMarshallerProperties(marshaller);
if (marshallerEventHandler != null) {
marshaller.setEventHandler(marshallerEventHandler);
}
marshaller.setSchema(marshallerSchema);
return marshaller;
} | @Test
void buildsMarshallerWithSchemaLocationProperty() throws Exception {
JAXBContextFactory factory =
new JAXBContextFactory.Builder()
.withMarshallerSchemaLocation("http://apihost http://apihost/schema.xsd")
.build();
Marshaller marshaller = factory.createMarshaller(Object.class);
assertThat(marshaller.getProperty(Marshaller.JAXB_SCHEMA_LOCATION))
.isEqualTo("http://apihost http://apihost/schema.xsd");
} |
public static <T> SamplerFunction<T> deferDecision() {
return (SamplerFunction<T>) Constants.DEFER_DECISION;
} | @Test void deferDecision_returnsNull() {
assertThat(deferDecision().trySample(null)).isNull();
assertThat(deferDecision().trySample("1")).isNull();
} |
@Override
public V put(K key, V value, Duration ttl) {
return get(putAsync(key, value, ttl));
} | @Test
public void testContainsValueTTL() throws InterruptedException {
RMapCacheNative<SimpleKey, SimpleValue> map = redisson.getMapCacheNative("simple01");
Assertions.assertFalse(map.containsValue(new SimpleValue("34")));
map.put(new SimpleKey("33"), new SimpleValue("44"), Duration.ofSeconds(1));
Assertions.assertTrue(map.containsValue(new SimpleValue("44")));
Assertions.assertFalse(map.containsValue(new SimpleValue("34")));
Thread.sleep(1000);
Assertions.assertFalse(map.containsValue(new SimpleValue("44")));
map.destroy();
} |
public void transitionTo(ClassicGroupState groupState) {
assertValidTransition(groupState);
previousState = state;
state = groupState;
currentStateTimestamp = Optional.of(time.milliseconds());
metrics.onClassicGroupStateTransition(previousState, state);
} | @Test
public void testPreparingRebalanceToDeadTransition() {
group.transitionTo(PREPARING_REBALANCE);
group.transitionTo(DEAD);
assertState(group, DEAD);
} |
public static ClusterAllocationDiskSettings create(boolean enabled, String low, String high, String floodStage) {
if (!enabled) {
return ClusterAllocationDiskSettings.create(enabled, null);
}
return ClusterAllocationDiskSettings.create(enabled, createWatermarkSettings(low, high, floodStage));
} | @Test
public void createPercentageWatermarkSettingsWithoutFloodStage() throws Exception {
ClusterAllocationDiskSettings settings = ClusterAllocationDiskSettingsFactory.create(true, "65%", "75%", "");
assertThat(settings).isInstanceOf(ClusterAllocationDiskSettings.class);
assertThat(settings.ThresholdEnabled()).isTrue();
assertThat(settings.watermarkSettings()).isInstanceOf(PercentageWatermarkSettings.class);
assertThat(settings.watermarkSettings().type()).isEqualTo(WatermarkSettings.SettingsType.PERCENTAGE);
assertThat(settings.watermarkSettings().low()).isEqualTo(65D);
assertThat(settings.watermarkSettings().high()).isEqualTo(75D);
assertThat(settings.watermarkSettings().floodStage()).isNull();
} |
@Override
public ManagedChannel shutdownNow() {
ArrayList<ManagedChannel> channels = new ArrayList<>();
synchronized (this) {
shutdownStarted = true;
channels.addAll(usedChannels);
channels.addAll(channelCache);
}
for (ManagedChannel channel : channels) {
channel.shutdownNow();
}
return this;
} | @Test
public void testShutdownNow() throws Exception {
ManagedChannel mockChannel = mock(ManagedChannel.class);
when(channelSupplier.get()).thenReturn(mockChannel);
ClientCall<Object, Object> mockCall1 = mock(ClientCall.class);
ClientCall<Object, Object> mockCall2 = mock(ClientCall.class);
when(mockChannel.newCall(any(), any())).thenReturn(mockCall1, mockCall2);
IsolationChannel isolationChannel = IsolationChannel.create(channelSupplier);
ClientCall<Object, Object> call1 =
isolationChannel.newCall(methodDescriptor, CallOptions.DEFAULT);
call1.start(new NoopClientCall.NoopClientCallListener<>(), new Metadata());
ArgumentCaptor<Listener<Object>> captor1 = ArgumentCaptor.forClass(ClientCall.Listener.class);
verify(mockCall1).start(captor1.capture(), any());
when(mockChannel.shutdownNow()).thenReturn(mockChannel);
when(mockChannel.isShutdown()).thenReturn(false, true);
isolationChannel.shutdownNow();
assertFalse(isolationChannel.isShutdown());
ClientCall<Object, Object> call2 =
isolationChannel.newCall(methodDescriptor, CallOptions.DEFAULT);
call2.start(new NoopClientCall.NoopClientCallListener<>(), new Metadata());
ArgumentCaptor<Listener<Object>> captor2 = ArgumentCaptor.forClass(ClientCall.Listener.class);
verify(mockCall2).start(captor2.capture(), any());
captor1.getValue().onClose(Status.CANCELLED, new Metadata());
captor2.getValue().onClose(Status.CANCELLED, new Metadata());
assertTrue(isolationChannel.isShutdown());
verify(channelSupplier, times(1)).get();
verify(mockChannel, times(2)).newCall(any(), any());
verify(mockChannel, times(1)).shutdownNow();
verify(mockChannel, times(2)).isShutdown();
} |
@Restricted(NoExternalUse.class)
public static String extractPluginNameFromIconSrc(String iconSrc) {
if (iconSrc == null) {
return "";
}
if (!iconSrc.contains("plugin-")) {
return "";
}
String[] arr = iconSrc.split(" ");
for (String element : arr) {
if (element.startsWith("plugin-")) {
return element.replaceFirst("plugin-", "");
}
}
return "";
} | @Test
public void extractPluginNameFromIconSrcWhichContainsPluginWordInThePluginName() {
String result = Functions.extractPluginNameFromIconSrc("symbol-padlock plugin-design-library-plugin");
assertThat(result, is(equalTo("design-library-plugin")));
} |
public boolean isEmpty() {
return CommonUtils.isEmpty(providerInfos);
} | @Test
public void isEmpty() throws Exception {
ProviderGroup pg = new ProviderGroup("xxx", null);
Assert.assertTrue(pg.isEmpty());
pg = new ProviderGroup("xxx", new ArrayList<ProviderInfo>());
Assert.assertTrue(pg.isEmpty());
pg = new ProviderGroup("xxx", Arrays.asList(ProviderHelper.toProviderInfo("127.0.0.1:12200")));
Assert.assertTrue(!pg.isEmpty());
} |
@Override
public GatewayFilter apply(Config config) {
return new GatewayFilter() {
final UriTemplate uriTemplate = new UriTemplate(config.prefix);
@Override
public Mono<Void> filter(ServerWebExchange exchange, GatewayFilterChain chain) {
boolean alreadyPrefixed = exchange.getAttributeOrDefault(GATEWAY_ALREADY_PREFIXED_ATTR, false);
if (alreadyPrefixed) {
return chain.filter(exchange);
}
exchange.getAttributes().put(GATEWAY_ALREADY_PREFIXED_ATTR, true);
ServerHttpRequest req = exchange.getRequest();
addOriginalRequestUrl(exchange, req.getURI());
Map<String, String> uriVariables = getUriTemplateVariables(exchange);
URI uri = uriTemplate.expand(uriVariables);
String newPath = uri.getRawPath() + req.getURI().getRawPath();
exchange.getAttributes().put(GATEWAY_REQUEST_URL_ATTR, uri);
ServerHttpRequest request = req.mutate().path(newPath).build();
if (log.isTraceEnabled()) {
log.trace("Prefixed URI with: " + config.prefix + " -> " + request.getURI());
}
return chain.filter(exchange.mutate().request(request).build());
}
@Override
public String toString() {
return filterToStringCreator(PrefixPathGatewayFilterFactory.this).append("prefix", config.getPrefix())
.toString();
}
};
} | @Test
public void toStringFormat() {
Config config = new Config();
config.setPrefix("myprefix");
GatewayFilter filter = new PrefixPathGatewayFilterFactory().apply(config);
assertThat(filter.toString()).contains("myprefix");
} |
@Override
public CompletableFuture<ListGroupsResponseData> listGroups(
RequestContext context,
ListGroupsRequestData request
) {
if (!isActive.get()) {
return CompletableFuture.completedFuture(new ListGroupsResponseData()
.setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code())
);
}
final List<CompletableFuture<List<ListGroupsResponseData.ListedGroup>>> futures = FutureUtils.mapExceptionally(
runtime.scheduleReadAllOperation(
"list-groups",
(coordinator, lastCommittedOffset) -> coordinator.listGroups(
request.statesFilter(),
request.typesFilter(),
lastCommittedOffset
)
),
exception -> {
exception = Errors.maybeUnwrapException(exception);
if (exception instanceof NotCoordinatorException) {
return Collections.emptyList();
} else {
throw new CompletionException(exception);
}
}
);
return FutureUtils
.combineFutures(futures, ArrayList::new, List::addAll)
.thenApply(groups -> new ListGroupsResponseData().setGroups(groups))
.exceptionally(exception -> handleOperationException(
"list-groups",
request,
exception,
(error, __) -> new ListGroupsResponseData().setErrorCode(error.code())
));
} | @Test
public void testListGroups() throws ExecutionException, InterruptedException, TimeoutException {
CoordinatorRuntime<GroupCoordinatorShard, CoordinatorRecord> runtime = mockRuntime();
GroupCoordinatorService service = new GroupCoordinatorService(
new LogContext(),
createConfig(),
runtime,
new GroupCoordinatorMetrics(),
createConfigManager()
);
service.startup(() -> 3);
List<ListGroupsResponseData.ListedGroup> expectedResults = Arrays.asList(
new ListGroupsResponseData.ListedGroup()
.setGroupId("group0")
.setProtocolType("protocol1")
.setGroupState("Stable")
.setGroupType("classic"),
new ListGroupsResponseData.ListedGroup()
.setGroupId("group1")
.setProtocolType(ConsumerProtocol.PROTOCOL_TYPE)
.setGroupState("Empty")
.setGroupType("consumer"),
new ListGroupsResponseData.ListedGroup()
.setGroupId("group2")
.setProtocolType(ConsumerProtocol.PROTOCOL_TYPE)
.setGroupState("Dead")
.setGroupType("consumer")
);
when(runtime.scheduleReadAllOperation(
ArgumentMatchers.eq("list-groups"),
ArgumentMatchers.any()
)).thenReturn(Arrays.asList(
CompletableFuture.completedFuture(Collections.singletonList(expectedResults.get(0))),
CompletableFuture.completedFuture(Collections.singletonList(expectedResults.get(1))),
CompletableFuture.completedFuture(Collections.singletonList(expectedResults.get(2)))
));
CompletableFuture<ListGroupsResponseData> responseFuture = service.listGroups(
requestContext(ApiKeys.LIST_GROUPS),
new ListGroupsRequestData()
);
assertEquals(expectedResults, responseFuture.get(5, TimeUnit.SECONDS).groups());
} |
@Override
public ClientHttpResponse intercept(HttpRequest request, byte[] body, ClientHttpRequestExecution execution) throws IOException {
EnhancedPluginContext enhancedPluginContext = new EnhancedPluginContext();
EnhancedRequestContext enhancedRequestContext = EnhancedRequestContext.builder()
.httpHeaders(request.getHeaders())
.httpMethod(request.getMethod())
.url(request.getURI())
.build();
enhancedPluginContext.setRequest(enhancedRequestContext);
enhancedPluginContext.setOriginRequest(request);
enhancedPluginContext.setLocalServiceInstance(pluginRunner.getLocalServiceInstance());
enhancedPluginContext.setTargetServiceInstance((ServiceInstance) MetadataContextHolder.get()
.getLoadbalancerMetadata().get(LOAD_BALANCER_SERVICE_INSTANCE), request.getURI());
// Run pre enhanced plugins.
pluginRunner.run(EnhancedPluginType.Client.PRE, enhancedPluginContext);
long startMillis = System.currentTimeMillis();
try {
ClientHttpResponse response = execution.execute(request, body);
enhancedPluginContext.setDelay(System.currentTimeMillis() - startMillis);
EnhancedResponseContext enhancedResponseContext = EnhancedResponseContext.builder()
.httpStatus(response.getRawStatusCode())
.httpHeaders(response.getHeaders())
.build();
enhancedPluginContext.setResponse(enhancedResponseContext);
// Run post enhanced plugins.
pluginRunner.run(EnhancedPluginType.Client.POST, enhancedPluginContext);
return response;
}
catch (IOException e) {
enhancedPluginContext.setDelay(System.currentTimeMillis() - startMillis);
enhancedPluginContext.setThrowable(e);
// Run exception enhanced plugins.
pluginRunner.run(EnhancedPluginType.Client.EXCEPTION, enhancedPluginContext);
throw e;
}
finally {
// Run finally enhanced plugins.
pluginRunner.run(EnhancedPluginType.Client.FINALLY, enhancedPluginContext);
}
} | @Test
public void testRun() throws IOException, URISyntaxException {
ClientHttpResponse actualResult;
final byte[] inputBody = null;
URI uri = new URI("http://0.0.0.0/");
doReturn(uri).when(mockHttpRequest).getURI();
doReturn(HttpMethod.GET).when(mockHttpRequest).getMethod();
doReturn(mockHttpHeaders).when(mockHttpRequest).getHeaders();
doReturn(mockClientHttpResponse).when(mockClientHttpRequestExecution).execute(mockHttpRequest, inputBody);
EnhancedRestTemplateInterceptor reporter = new EnhancedRestTemplateInterceptor(new DefaultEnhancedPluginRunner(new ArrayList<>(), registration, null));
actualResult = reporter.intercept(mockHttpRequest, inputBody, mockClientHttpRequestExecution);
assertThat(actualResult).isEqualTo(mockClientHttpResponse);
actualResult = reporter.intercept(mockHttpRequest, inputBody, mockClientHttpRequestExecution);
assertThat(actualResult).isEqualTo(mockClientHttpResponse);
doThrow(new SocketTimeoutException()).when(mockClientHttpRequestExecution).execute(mockHttpRequest, inputBody);
assertThatThrownBy(() -> reporter.intercept(mockHttpRequest, inputBody, mockClientHttpRequestExecution)).isInstanceOf(SocketTimeoutException.class);
} |
@Override
public T peekLast()
{
if (_tail == null)
{
return null;
}
return _tail._value;
} | @Test
public void testEmptyPeekLast()
{
LinkedDeque<Object> q = new LinkedDeque<>();
Assert.assertNull(q.peekLast(), "peekLast on empty queue should return null");
} |
@Override
public Optional<DispatchEvent> build(final DataChangedEvent event) {
String instanceId = ComputeNode.getInstanceIdByComputeNode(event.getKey());
if (!Strings.isNullOrEmpty(instanceId)) {
Optional<DispatchEvent> result = createInstanceDispatchEvent(event, instanceId);
if (result.isPresent()) {
return result;
}
}
if (event.getKey().startsWith(ComputeNode.getShowProcessListTriggerNodePath())) {
return createReportLocalProcessesEvent(event);
}
if (event.getKey().startsWith(ComputeNode.getKillProcessTriggerNodePath())) {
return createKillLocalProcessEvent(event);
}
return Optional.empty();
} | @Test
void assertCreateEventWhenEnabled() {
Optional<DispatchEvent> actual = new ComputeNodeStateDispatchEventBuilder()
.build(new DataChangedEvent("/nodes/compute_nodes/status/foo_instance_id", "", Type.UPDATED));
assertTrue(actual.isPresent());
assertTrue(((ComputeNodeInstanceStateChangedEvent) actual.get()).getStatus().isEmpty());
assertThat(((ComputeNodeInstanceStateChangedEvent) actual.get()).getInstanceId(), is("foo_instance_id"));
} |
public int compare(boolean b1, boolean b2) {
throw new UnsupportedOperationException(
"compare(boolean, boolean) was called on a non-boolean comparator: " + toString());
} | @Test
public void testDoubleComparator() {
Double[] valuesInAscendingOrder = {
null,
Double.NEGATIVE_INFINITY,
-Double.MAX_VALUE,
-123456.7890123456789,
-Double.MIN_VALUE,
0.0,
Double.MIN_VALUE,
123456.7890123456789,
Double.MAX_VALUE,
Double.POSITIVE_INFINITY
};
for (int i = 0; i < valuesInAscendingOrder.length; ++i) {
for (int j = 0; j < valuesInAscendingOrder.length; ++j) {
Double vi = valuesInAscendingOrder[i];
Double vj = valuesInAscendingOrder[j];
int exp = i - j;
assertSignumEquals(vi, vj, exp, DOUBLE_COMPARATOR.compare(vi, vj));
if (vi != null && vj != null) {
assertSignumEquals(vi, vj, exp, DOUBLE_COMPARATOR.compare(vi.doubleValue(), vj.doubleValue()));
}
}
}
checkThrowingUnsupportedException(DOUBLE_COMPARATOR, Double.TYPE);
} |
public InputSplit[] getSplits(JobConf job, int numSplits)
throws IOException {
StopWatch sw = new StopWatch().start();
FileStatus[] stats = listStatus(job);
// Save the number of input files for metrics/loadgen
job.setLong(NUM_INPUT_FILES, stats.length);
long totalSize = 0; // compute total size
boolean ignoreDirs = !job.getBoolean(INPUT_DIR_RECURSIVE, false)
&& job.getBoolean(INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS, false);
List<FileStatus> files = new ArrayList<>(stats.length);
for (FileStatus file: stats) { // check we have valid files
if (file.isDirectory()) {
if (!ignoreDirs) {
throw new IOException("Not a file: "+ file.getPath());
}
} else {
files.add(file);
totalSize += file.getLen();
}
}
long goalSize = totalSize / (numSplits == 0 ? 1 : numSplits);
long minSize = Math.max(job.getLong(org.apache.hadoop.mapreduce.lib.input.
FileInputFormat.SPLIT_MINSIZE, 1), minSplitSize);
// generate splits
ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
NetworkTopology clusterMap = new NetworkTopology();
for (FileStatus file: files) {
Path path = file.getPath();
long length = file.getLen();
if (length != 0) {
FileSystem fs = path.getFileSystem(job);
BlockLocation[] blkLocations;
if (file instanceof LocatedFileStatus) {
blkLocations = ((LocatedFileStatus) file).getBlockLocations();
} else {
blkLocations = fs.getFileBlockLocations(file, 0, length);
}
if (isSplitable(fs, path)) {
long blockSize = file.getBlockSize();
long splitSize = computeSplitSize(goalSize, minSize, blockSize);
long bytesRemaining = length;
while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) {
String[][] splitHosts = getSplitHostsAndCachedHosts(blkLocations,
length-bytesRemaining, splitSize, clusterMap);
splits.add(makeSplit(path, length-bytesRemaining, splitSize,
splitHosts[0], splitHosts[1]));
bytesRemaining -= splitSize;
}
if (bytesRemaining != 0) {
String[][] splitHosts = getSplitHostsAndCachedHosts(blkLocations, length
- bytesRemaining, bytesRemaining, clusterMap);
splits.add(makeSplit(path, length - bytesRemaining, bytesRemaining,
splitHosts[0], splitHosts[1]));
}
} else {
if (LOG.isDebugEnabled()) {
// Log only if the file is big enough to be splitted
if (length > Math.min(file.getBlockSize(), minSize)) {
LOG.debug("File is not splittable so no parallelization "
+ "is possible: " + file.getPath());
}
}
String[][] splitHosts = getSplitHostsAndCachedHosts(blkLocations,0,length,clusterMap);
splits.add(makeSplit(path, 0, length, splitHosts[0], splitHosts[1]));
}
} else {
//Create empty hosts array for zero length files
splits.add(makeSplit(path, 0, length, new String[0]));
}
}
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Total # of splits generated by getSplits: " + splits.size()
+ ", TimeTaken: " + sw.now(TimeUnit.MILLISECONDS));
}
return splits.toArray(new FileSplit[splits.size()]);
} | @Test
public void testIgnoreDirs() throws Exception {
Configuration conf = getConfiguration();
conf.setBoolean(FileInputFormat.INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS, true);
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR, "test:///a1");
MockFileSystem mockFs = (MockFileSystem) new Path("test:///").getFileSystem(conf);
JobConf job = new JobConf(conf);
TextInputFormat fileInputFormat = new TextInputFormat();
fileInputFormat.configure(job);
InputSplit[] splits = fileInputFormat.getSplits(job, 1);
Assert.assertEquals("Input splits are not correct", 1, splits.length);
FileSystem.closeAll();
} |
@Override
public BlobDescriptor call() throws IOException, RegistryException {
EventHandlers eventHandlers = buildContext.getEventHandlers();
DescriptorDigest blobDigest = blobDescriptor.getDigest();
try (ProgressEventDispatcher progressEventDispatcher =
progressEventDispatcherFactory.create(
"pushing blob " + blobDigest, blobDescriptor.getSize());
TimerEventDispatcher ignored =
new TimerEventDispatcher(eventHandlers, DESCRIPTION + blobDescriptor);
ThrottledAccumulatingConsumer throttledProgressReporter =
new ThrottledAccumulatingConsumer(progressEventDispatcher::dispatchProgress)) {
// check if the BLOB is available
if (!forcePush && registryClient.checkBlob(blobDigest).isPresent()) {
eventHandlers.dispatch(
LogEvent.info(
"Skipping push; BLOB already exists on target registry : " + blobDescriptor));
return blobDescriptor;
}
// If base and target images are in the same registry, then use mount/from to try mounting the
// BLOB from the base image repository to the target image repository and possibly avoid
// having to push the BLOB. See
// https://docs.docker.com/registry/spec/api/#cross-repository-blob-mount for details.
String baseRegistry = buildContext.getBaseImageConfiguration().getImageRegistry();
String baseRepository = buildContext.getBaseImageConfiguration().getImageRepository();
String targetRegistry = buildContext.getTargetImageConfiguration().getImageRegistry();
String sourceRepository = targetRegistry.equals(baseRegistry) ? baseRepository : null;
registryClient.pushBlob(blobDigest, blob, sourceRepository, throttledProgressReporter);
return blobDescriptor;
}
} | @Test
public void testCall_doBlobCheckAndBlobDoesNotExist() throws IOException, RegistryException {
Mockito.when(registryClient.checkBlob(Mockito.any())).thenReturn(Optional.empty());
call(false);
Mockito.verify(registryClient).checkBlob(Mockito.any());
Mockito.verify(registryClient)
.pushBlob(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any());
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.