focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public QueryBuilder createQueryFilter() { BoolQueryBuilder filter = boolQuery(); // anyone filter.should(QueryBuilders.termQuery(FIELD_ALLOW_ANYONE, true)); // users Optional.ofNullable(userSession.getUuid()) .ifPresent(uuid -> filter.should(termQuery(FIELD_USER_IDS, uuid))); // groups userSession.getGroups() .stream() .map(GroupDto::getUuid) .forEach(groupUuid -> filter.should(termQuery(FIELD_GROUP_IDS, groupUuid))); return JoinQueryBuilders.hasParentQuery( TYPE_AUTHORIZATION, QueryBuilders.boolQuery().filter(filter), false); }
@Test public void createQueryFilter_sets_filter_on_anyone_and_user_id_and_group_ids_if_user_is_logged_in_and_has_groups() { GroupDto group1 = GroupTesting.newGroupDto().setUuid("10"); GroupDto group2 = GroupTesting.newGroupDto().setUuid("11"); UserDto userDto = UserTesting.newUserDto(); userSession.logIn(userDto).setGroups(group1, group2); HasParentQueryBuilder filter = (HasParentQueryBuilder) underTest.createQueryFilter(); assertJson(filter.toString()).isSimilarTo("{" + " \"has_parent\": {" + " \"query\": {" + " \"bool\": {" + " \"filter\": [{" + " \"bool\": {" + " \"should\": [" + " {" + " \"term\": {" + " \"auth_allowAnyone\": {\"value\": true}" + " }" + " }," + " {" + " \"term\": {" + " \"auth_userIds\": {\"value\": \"" + userDto.getUuid() + "\"}" + " }" + " }," + " {" + " \"term\": {" + " \"auth_groupIds\": {\"value\": \"10\"}" + " }" + " }," + " {" + " \"term\": {" + " \"auth_groupIds\": {\"value\": \"11\"}" + " }" + " }" + " ]" + " }" + " }]" + " }" + " }," + " \"parent_type\": \"auth\"" + " }" + "}"); }
public void onEvent(ServiceInstancesChangedEvent event) { if (destroyed.get() || !accept(event) || isRetryAndExpired(event)) { return; } doOnEvent(event); }
@Test @Order(12) public void testInstanceWithoutRevision() { Set<String> serviceNames = new HashSet<>(); serviceNames.add("app1"); ServiceDiscovery serviceDiscovery = Mockito.mock(ServiceDiscovery.class); listener = new ServiceInstancesChangedListener(serviceNames, serviceDiscovery); ServiceInstancesChangedListener spyListener = Mockito.spy(listener); Mockito.doReturn(null).when(metadataService).getMetadataInfo(eq(null)); ServiceInstancesChangedEvent event = new ServiceInstancesChangedEvent("app1", app1InstancesWithNoRevision); spyListener.onEvent(event); // notification succeeded assertTrue(true); }
public boolean isEmpty() { return resources.isEmpty(); }
@Test void testIsEmpty() { final ResourceCounter empty = ResourceCounter.empty(); assertThat(empty.isEmpty()).isTrue(); }
@Override public RedisClusterNode clusterGetNodeForKey(byte[] key) { int slot = executorService.getConnectionManager().calcSlot(key); return clusterGetNodeForSlot(slot); }
@Test public void testClusterGetNodeForKey() { RedisClusterNode node = connection.clusterGetNodeForKey("123".getBytes()); assertThat(node).isNotNull(); }
@Override public String toString() { return "AfterPane.elementCountAtLeast(" + countElems + ")"; }
@Test public void testToString() { Trigger trigger = AfterPane.elementCountAtLeast(5); assertEquals("AfterPane.elementCountAtLeast(5)", trigger.toString()); }
public static Pair<Optional<Method>, Optional<TypedExpression>> resolveMethodWithEmptyCollectionArguments( final MethodCallExpr methodExpression, final MvelCompilerContext mvelCompilerContext, final Optional<TypedExpression> scope, List<TypedExpression> arguments, List<Integer> emptyCollectionArgumentsIndexes) { Objects.requireNonNull(methodExpression, "MethodExpression parameter cannot be null as the method searches methods based on this expression!"); Objects.requireNonNull(mvelCompilerContext, "MvelCompilerContext parameter cannot be null!"); Objects.requireNonNull(arguments, "Arguments parameter cannot be null! Use an empty list instance if needed instead."); Objects.requireNonNull(emptyCollectionArgumentsIndexes, "EmptyListArgumentIndexes parameter cannot be null! Use an empty list instance if needed instead."); if (emptyCollectionArgumentsIndexes.size() > arguments.size()) { throw new IllegalArgumentException("There cannot be more empty collection arguments than all arguments! emptyCollectionArgumentsIndexes parameter has more items than arguments parameter. " + "(" + emptyCollectionArgumentsIndexes.size() + " > " + arguments.size() + ")"); } else { final List<TypedExpression> coercedArgumentsTypesList = new ArrayList<>(arguments); Pair<Optional<Method>, Optional<TypedExpression>> resolveMethodResult = MethodResolutionUtils.resolveMethod(methodExpression, mvelCompilerContext, scope, coercedArgumentsTypesList); if (resolveMethodResult.a.isPresent()) { return resolveMethodResult; } else { // Rather work only with the argumentsType and when a method is resolved, flip the arguments list based on it. // This needs to go through all possible combinations. final int indexesListSize = emptyCollectionArgumentsIndexes.size(); for (int numberOfProcessedIndexes = 0; numberOfProcessedIndexes < indexesListSize; numberOfProcessedIndexes++) { for (int indexOfEmptyListIndex = numberOfProcessedIndexes; indexOfEmptyListIndex < indexesListSize; indexOfEmptyListIndex++) { switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex)); resolveMethodResult = MethodResolutionUtils.resolveMethod(methodExpression, mvelCompilerContext, scope, coercedArgumentsTypesList); if (resolveMethodResult.a.isPresent()) { modifyArgumentsBasedOnCoercedCollectionArguments(arguments, coercedArgumentsTypesList); return resolveMethodResult; } switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex)); } switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(numberOfProcessedIndexes)); } // No method found, return empty. return new Pair<>(Optional.empty(), scope); } } }
@Test public void resolveMethodWithEmptyCollectionArgumentsCoerceList() { final MethodCallExpr methodExpression = new MethodCallExpr("setAddresses", new MapCreationLiteralExpression(null, NodeList.nodeList())); final List<TypedExpression> arguments = new ArrayList<>(); arguments.add(new MapExprT(new MapCreationLiteralExpression(null, NodeList.nodeList()))); final TypedExpression scope = new ObjectCreationExpressionT(Collections.emptyList(), Person.class); final Pair<Optional<Method>, Optional<TypedExpression>> resolvedMethodResult = MethodResolutionUtils.resolveMethodWithEmptyCollectionArguments( methodExpression, new MvelCompilerContext(null), Optional.of(scope), arguments, List.of(0)); Assertions.assertThat(resolvedMethodResult.a).isPresent(); Assertions.assertThat(getTypedExpressionsClasses(arguments)) .containsExactlyElementsOf(List.of(ListExprT.class)); }
@JsonIgnore public Map<WorkflowInstance.Status, List<Long>> flatten( Predicate<WorkflowInstance.Status> condition) { return info.entrySet().stream() .filter(e -> condition.test(e.getKey())) .collect( Collectors.toMap( Map.Entry::getKey, e -> { List<Long> instanceIds = new ArrayList<>(); for (Interval interval : e.getValue()) { for (long id = interval.start; id <= interval.end; ++id) { instanceIds.add(id); } } return instanceIds; })); }
@Test public void testFlatten() throws Exception { TestDetails testDetails = loadObject("fixtures/instances/sample-foreach-details.json", TestDetails.class); Map<WorkflowInstance.Status, List<Long>> flatten = testDetails.test1.flatten(e -> true); assertEquals(Collections.singletonList(1L), flatten.get(WorkflowInstance.Status.CREATED)); assertEquals(Arrays.asList(2L, 4L, 21L), flatten.get(WorkflowInstance.Status.IN_PROGRESS)); assertEquals(Collections.singletonList(5L), flatten.get(WorkflowInstance.Status.PAUSED)); assertEquals(Collections.singletonList(7L), flatten.get(WorkflowInstance.Status.TIMED_OUT)); assertEquals(Collections.singletonList(6L), flatten.get(WorkflowInstance.Status.STOPPED)); assertEquals( Arrays.asList(8L, 9L, 11L, 12L, 13L, 14L, 15L), flatten.get(WorkflowInstance.Status.FAILED)); assertEquals( Arrays.asList(10L, 16L, 17L, 18L, 19L, 20L, 22L), flatten.get(WorkflowInstance.Status.SUCCEEDED)); }
public boolean transitionToFailed(Throwable throwable) { // When the state enters FINISHING, the only thing remaining is to commit // the transaction. It should only be failed if the transaction commit fails. return transitionToFailed(throwable, currentState -> currentState != FINISHING && !currentState.isDone()); }
@Test public void testFailed() { QueryStateMachine stateMachine = createQueryStateMachine(); assertTrue(stateMachine.transitionToFailed(FAILED_CAUSE)); assertFinalState(stateMachine, FAILED, FAILED_CAUSE); }
public void changeLevel(LoggerLevel level) { Level logbackLevel = Level.toLevel(level.name()); database.enableSqlLogging(level == TRACE); helper.changeRoot(serverProcessLogging.getLogLevelConfig(), logbackLevel); LoggerFactory.getLogger(ServerLogging.class).info("Level of logs changed to {}", level); }
@Test public void changeLevel_fails_with_IAE_when_level_is_ERROR() { assertThatThrownBy(() -> underTest.changeLevel(ERROR)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("ERROR log level is not supported (allowed levels are [TRACE, DEBUG, INFO])"); }
@Override public ImportResult importItem(UUID jobId, IdempotentImportExecutor idempotentImportExecutor, TokensAndUrlAuthData authData, MediaContainerResource resource) throws Exception { // Ensure credential is populated getOrCreateCredential(authData); logDebugJobStatus("%s before transmogrification", jobId, resource); // Make the data onedrive compatible resource.transmogrify(transmogrificationConfig); logDebugJobStatus("%s after transmogrification", jobId, resource); for (MediaAlbum album : resource.getAlbums()) { // Create a OneDrive folder and then save the id with the mapping data idempotentImportExecutor.executeAndSwallowIOExceptions( album.getId(), album.getName(), () -> createOneDriveFolder(album)); } executeIdempotentImport(jobId, idempotentImportExecutor, resource.getVideos()); executeIdempotentImport(jobId, idempotentImportExecutor, resource.getPhotos()); return ImportResult.OK; }
@Test public void testImportItemAllSuccess() throws Exception { List<MediaAlbum> albums = ImmutableList.of(new MediaAlbum("id1", "albumb1", "This is a fake albumb")); List<PhotoModel> photos = ImmutableList.of( new PhotoModel("Pic1", "http://fake.com/1.jpg", "A pic", "image/jpg", "p1", "id1", true), new PhotoModel( "Pic2", "https://fake.com/2.png", "fine art", "image/png", "p2", "id1", true)); when(jobStore.getStream(uuid, "http://fake.com/1.jpg")) .thenReturn(new InputStreamWrapper(new ByteArrayInputStream(new byte[CHUNK_SIZE]))); when(jobStore.getStream(uuid, "https://fake.com/2.png")) .thenReturn(new InputStreamWrapper(new ByteArrayInputStream(new byte[CHUNK_SIZE]))); MediaContainerResource data = new MediaContainerResource(albums, photos, null /*videos*/); Call call = mock(Call.class); doReturn(call).when(client).newCall( argThat((Request r) -> r.url().toString().equals( "https://www.baseurl.com/v1.0/me/drive/special/photos/children"))); Response response = mock(Response.class); ResponseBody body = mock(ResponseBody.class); when(body.bytes()) .thenReturn( ResponseBody.create(MediaType.parse("application/json"), "{\"id\": \"id1\"}").bytes()); when(body.string()) .thenReturn( ResponseBody.create(MediaType.parse("application/json"), "{\"id\": \"id1\"}").string()); when(response.code()).thenReturn(200); when(response.body()).thenReturn(body); when(call.execute()).thenReturn(response); Call call2 = mock(Call.class); doReturn(call2).when(client).newCall( argThat((Request r) -> r.url().toString().contains("createUploadSession"))); Response response2 = mock(Response.class); ResponseBody body2 = mock(ResponseBody.class); when(body2.bytes()) .thenReturn(ResponseBody .create(MediaType.parse("application/json"), "{\"uploadUrl\": \"https://scalia.com/link\"}") .bytes()); when(body2.string()) .thenReturn(ResponseBody .create(MediaType.parse("application/json"), "{\"uploadUrl\": \"https://scalia.com/link\"}") .string()); when(response2.code()).thenReturn(200); when(response2.body()).thenReturn(body2); when(call2.execute()).thenReturn(response2); Call call3 = mock(Call.class); doReturn(call3).when(client).newCall( argThat((Request r) -> r.url().toString().contains("scalia.com/link"))); Response response3 = mock(Response.class); ResponseBody body3 = mock(ResponseBody.class); when(body3.bytes()) .thenReturn(ResponseBody.create(MediaType.parse("application/json"), "{\"id\": \"rand1\"}") .bytes()); when(body3.string()) .thenReturn(ResponseBody.create(MediaType.parse("application/json"), "{\"id\": \"rand1\"}") .string()); when(response3.code()).thenReturn(200); when(response3.body()).thenReturn(body3); when(call3.execute()).thenReturn(response3); ImportResult result = importer.importItem(uuid, executor, authData, data); verify(client, times(5)).newCall(any()); assertThat(result).isEqualTo(ImportResult.OK); }
private EntityRelation doDelete(String strFromId, String strFromType, String strRelationType, String strRelationTypeGroup, String strToId, String strToType) throws ThingsboardException { checkParameter(FROM_ID, strFromId); checkParameter(FROM_TYPE, strFromType); checkParameter(RELATION_TYPE, strRelationType); checkParameter(TO_ID, strToId); checkParameter(TO_TYPE, strToType); EntityId fromId = EntityIdFactory.getByTypeAndId(strFromType, strFromId); EntityId toId = EntityIdFactory.getByTypeAndId(strToType, strToId); checkCanCreateRelation(fromId); checkCanCreateRelation(toId); RelationTypeGroup relationTypeGroup = parseRelationTypeGroup(strRelationTypeGroup, RelationTypeGroup.COMMON); EntityRelation relation = new EntityRelation(fromId, toId, strRelationType, relationTypeGroup); return tbEntityRelationService.delete(getTenantId(), getCurrentUser().getCustomerId(), relation, getCurrentUser()); }
@Test public void testDeleteRelationWithOtherToDeviceError() throws Exception { Device device = buildSimpleDevice("Test device 1"); EntityRelation relation = createFromRelation(mainDevice, device, "CONTAINS"); doPost("/api/relation", relation).andExpect(status().isOk()); Device device2 = buildSimpleDevice("Test device 2"); String url = String.format("/api/relation?fromId=%s&fromType=%s&relationType=%s&toId=%s&toType=%s", mainDevice.getUuidId(), EntityType.DEVICE, "CONTAINS", device2.getUuidId(), EntityType.DEVICE ); Mockito.reset(tbClusterService, auditLogService); doDelete(url) .andExpect(status().isNotFound()) .andExpect(statusReason(containsString(msgErrorNotFound))); testNotifyEntityNever(mainDevice.getId(), null); }
public PageListResponse<IndexSetFieldType> getIndexSetFieldTypesListPage( final String indexSetId, final String fieldNameQuery, final List<String> filters, final int page, final int perPage, final String sort, final Sorting.Direction order) { final List<IndexSetFieldType> filteredFields = getFilteredList(indexSetId, fieldNameQuery, filters, sort, order); final int total = filteredFields.size(); final List<IndexSetFieldType> retrievedPage = filteredFields.stream() .skip((long) Math.max(0, page - 1) * perPage) .limit(perPage) .toList(); return PageListResponse.create("", PaginatedList.PaginationInfo.create( total, retrievedPage.size(), page, perPage), total, sort, order.toString().toLowerCase(Locale.ROOT), retrievedPage, IndexSetFieldType.ATTRIBUTES, IndexSetFieldType.ENTITY_DEFAULTS); }
@Test void testReturnsEmptyPageIfCannotCreateIndexSetFromConfig() { IndexSetConfig indexSetConfig = mock(IndexSetConfig.class); doReturn(Optional.of(indexSetConfig)).when(indexSetService).get("I_am_strangely_broken!"); doReturn(new CustomFieldMappings()).when(indexSetConfig).customFieldMappings(); doReturn(null).when(indexSetFactory).create(indexSetConfig); final PageListResponse<IndexSetFieldType> response = toTest.getIndexSetFieldTypesListPage("I_am_strangely_broken!", "", List.of(), 0, 10, "index_set_id", Sorting.Direction.ASC); assertEquals(0, response.total()); assertTrue(response.elements().isEmpty()); verifyNoInteractions(indexFieldTypesService); }
@Override public int compareTo( MonetDbVersion mDbVersion ) { int result = majorVersion.compareTo( mDbVersion.majorVersion ); if ( result != 0 ) { return result; } result = minorVersion.compareTo( mDbVersion.minorVersion ); if ( result != 0 ) { return result; } result = patchVersion.compareTo( mDbVersion.patchVersion ); if ( result != 0 ) { return result; } return result; }
@Test public void testCompareVersions_TheSame() throws Exception { String dbVersionBigger = "11.11.7"; String dbVersion = "11.11.7"; assertEquals( 0, new MonetDbVersion( dbVersionBigger ).compareTo( new MonetDbVersion( dbVersion ) ) ); }
public DirectoryEntry lookUp( File workingDirectory, JimfsPath path, Set<? super LinkOption> options) throws IOException { checkNotNull(path); checkNotNull(options); DirectoryEntry result = lookUp(workingDirectory, path, options, 0); if (result == null) { // an intermediate file in the path did not exist or was not a directory throw new NoSuchFileException(path.toString()); } return result; }
@Test public void testLookup_absolute_withDotDotsInPath() throws IOException { assertExists(lookup("/.."), "/", "/"); assertExists(lookup("/../../.."), "/", "/"); assertExists(lookup("/work/.."), "/", "/"); assertExists(lookup("/work/../work/one/two/../two/three"), "two", "three"); assertExists(lookup("/work/one/two/../../four/../one/two/three/../three"), "two", "three"); assertExists(lookup("/work/one/two/three/../../two/three/.."), "one", "two"); assertExists(lookup("/work/one/two/three/../../two/three/../.."), "work", "one"); }
@Override public void setRuntimeContext(RuntimeContext runtimeContext) { Preconditions.checkNotNull(runtimeContext); if (runtimeContext instanceof IterationRuntimeContext) { super.setRuntimeContext( new RichAsyncFunctionIterationRuntimeContext( (IterationRuntimeContext) runtimeContext)); } else { super.setRuntimeContext(new RichAsyncFunctionRuntimeContext(runtimeContext)); } }
@Test void testRuntimeContext() { RichAsyncFunction<Integer, Integer> function = new RichAsyncFunction<Integer, Integer>() { private static final long serialVersionUID = 1707630162838967972L; @Override public void asyncInvoke(Integer input, ResultFuture<Integer> resultFuture) throws Exception { // no op } }; final String taskName = "foobarTask"; final OperatorMetricGroup metricGroup = UnregisteredMetricsGroup.createOperatorMetricGroup(); final int numberOfParallelSubtasks = 43; final int indexOfSubtask = 42; final int attemptNumber = 1337; final String taskNameWithSubtask = "foobarTask (43/43)#1337"; final Map<String, String> globalJobParameters = new HashMap<>(); globalJobParameters.put("k1", "v1"); final ClassLoader userCodeClassLoader = mock(ClassLoader.class); final boolean isObjectReused = true; RuntimeContext mockedRuntimeContext = mock(RuntimeContext.class); TaskInfo mockedTaskInfo = mock(TaskInfo.class); when(mockedTaskInfo.getTaskName()).thenReturn(taskName); when(mockedTaskInfo.getNumberOfParallelSubtasks()).thenReturn(numberOfParallelSubtasks); when(mockedTaskInfo.getIndexOfThisSubtask()).thenReturn(indexOfSubtask); when(mockedTaskInfo.getAttemptNumber()).thenReturn(attemptNumber); when(mockedTaskInfo.getTaskNameWithSubtasks()).thenReturn(taskNameWithSubtask); when(mockedRuntimeContext.getTaskInfo()).thenReturn(mockedTaskInfo); when(mockedRuntimeContext.getMetricGroup()).thenReturn(metricGroup); when(mockedRuntimeContext.getGlobalJobParameters()).thenReturn(globalJobParameters); when(mockedRuntimeContext.isObjectReuseEnabled()).thenReturn(isObjectReused); when(mockedRuntimeContext.getUserCodeClassLoader()).thenReturn(userCodeClassLoader); function.setRuntimeContext(mockedRuntimeContext); RuntimeContext runtimeContext = function.getRuntimeContext(); assertThat(runtimeContext.getTaskInfo().getTaskName()).isEqualTo(taskName); assertThat(runtimeContext.getMetricGroup()).isEqualTo(metricGroup); assertThat(runtimeContext.getTaskInfo().getNumberOfParallelSubtasks()) .isEqualTo(numberOfParallelSubtasks); assertThat(runtimeContext.getTaskInfo().getIndexOfThisSubtask()).isEqualTo(indexOfSubtask); assertThat(runtimeContext.getTaskInfo().getAttemptNumber()).isEqualTo(attemptNumber); assertThat(runtimeContext.getTaskInfo().getTaskNameWithSubtasks()) .isEqualTo(taskNameWithSubtask); assertThat(runtimeContext.getGlobalJobParameters()).isEqualTo(globalJobParameters); assertThat(runtimeContext.isObjectReuseEnabled()).isEqualTo(isObjectReused); assertThat(runtimeContext.getUserCodeClassLoader()).isEqualTo(userCodeClassLoader); assertThatThrownBy(runtimeContext::getDistributedCache) .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy( () -> runtimeContext.getState( new ValueStateDescriptor<>("foobar", Integer.class, 42))) .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy( () -> runtimeContext.getListState( new ListStateDescriptor<>("foobar", Integer.class))) .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy( () -> runtimeContext.getReducingState( new ReducingStateDescriptor<>( "foobar", new ReduceFunction<Integer>() { private static final long serialVersionUID = 2136425961884441050L; @Override public Integer reduce( Integer value1, Integer value2) { return value1; } }, Integer.class))) .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy( () -> runtimeContext.getAggregatingState( new AggregatingStateDescriptor<>( "foobar", new AggregateFunction<Integer, Integer, Integer>() { @Override public Integer createAccumulator() { return null; } @Override public Integer add( Integer value, Integer accumulator) { return null; } @Override public Integer getResult(Integer accumulator) { return null; } @Override public Integer merge(Integer a, Integer b) { return null; } }, Integer.class))) .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy( () -> runtimeContext.getMapState( new MapStateDescriptor<>( "foobar", Integer.class, String.class))) .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy( () -> runtimeContext.addAccumulator( "foobar", new Accumulator<Integer, Integer>() { private static final long serialVersionUID = -4673320336846482358L; @Override public void add(Integer value) { // no op } @Override public Integer getLocalValue() { return null; } @Override public void resetLocal() {} @Override public void merge( Accumulator<Integer, Integer> other) {} @Override public Accumulator<Integer, Integer> clone() { return null; } })) .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(() -> runtimeContext.getAccumulator("foobar")) .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(() -> runtimeContext.getIntCounter("foobar")) .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(() -> runtimeContext.getLongCounter("foobar")) .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(() -> runtimeContext.getDoubleCounter("foobar")) .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(() -> runtimeContext.getHistogram("foobar")) .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(() -> runtimeContext.getBroadcastVariable("foobar")) .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(() -> runtimeContext.hasBroadcastVariable("foobar")) .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy(() -> runtimeContext.getBroadcastVariable("foobar")) .isInstanceOf(UnsupportedOperationException.class); assertThatThrownBy( () -> runtimeContext.getBroadcastVariableWithInitializer( "foobar", data -> null)) .isInstanceOf(UnsupportedOperationException.class); }
@Override public Server build(Environment environment) { printBanner(environment.getName()); final ThreadPool threadPool = createThreadPool(environment.metrics()); final Server server = buildServer(environment.lifecycle(), threadPool); final Handler applicationHandler = createAppServlet(server, environment.jersey(), environment.getObjectMapper(), environment.getValidator(), environment.getApplicationContext(), environment.getJerseyServletContainer(), environment.metrics()); final Handler adminHandler = createAdminServlet(server, environment.getAdminContext(), environment.metrics(), environment.healthChecks(), environment.admin()); final RoutingHandler routingHandler = buildRoutingHandler(environment.metrics(), server, applicationHandler, adminHandler); final Handler gzipHandler = buildGzipHandler(routingHandler); server.setHandler(addStatsHandler(addRequestLog(server, gzipHandler, environment.getName()))); return server; }
@Test void configuresDumpBeforeExit() { http.setDumpBeforeStop(true); assertThat(http.build(environment).isDumpBeforeStop()).isTrue(); }
public static Field p(String fieldName) { return SELECT_ALL_FROM_SOURCES_ALL.where(fieldName); }
@Test void use_contains_instead_of_contains_equiv_when_input_size_is_1() { String q = Q.p("f1").containsEquiv(List.of("p1")) .build(); assertEquals(q, "yql=select * from sources * where f1 contains \"p1\""); }
@Override public void startLeaderElection(LeaderContender contender) throws Exception { synchronized (lock) { Preconditions.checkState( leaderContender == null, "No LeaderContender should have been registered with this LeaderElection, yet."); this.leaderContender = contender; this.leaderContender.grantLeadership(sessionID); } }
@Test void testRevokeCallOnClose() throws Exception { final AtomicBoolean revokeLeadershipCalled = new AtomicBoolean(false); final TestingGenericLeaderContender contender = TestingGenericLeaderContender.newBuilder() .setRevokeLeadershipRunnable(() -> revokeLeadershipCalled.set(true)) .build(); try (final LeaderElection testInstance = new StandaloneLeaderElection(SESSION_ID)) { testInstance.startLeaderElection(contender); } assertThat(revokeLeadershipCalled).isTrue(); }
@Override public void add(Component component, Metric metric, Measure measure) { requireNonNull(component); checkValueTypeConsistency(metric, measure); Optional<Measure> existingMeasure = find(component, metric); if (existingMeasure.isPresent()) { throw new UnsupportedOperationException( format( "a measure can be set only once for a specific Component (key=%s), Metric (key=%s). Use update method", component.getKey(), metric.getKey())); } add(component, metric, measure, OverridePolicy.OVERRIDE); }
@Test public void add_throws_NPE_if_Component_argument_is_null() { assertThatThrownBy(() -> underTest.add(null, metric1, SOME_MEASURE)) .isInstanceOf(NullPointerException.class); }
@Override public String arguments() { ArrayList<String> args = new ArrayList<>(); if (buildFile != null) { args.add("-f \"" + FilenameUtils.separatorsToUnix(buildFile) + "\""); } if (target != null) { args.add(target); } return StringUtils.join(args, " "); }
@Test public void shouldContainBuildFileAndTargetWhenBothDefined() throws Exception { RakeTask rakeTask = new RakeTask(); rakeTask.setBuildFile("myrakefile.rb"); rakeTask.setTarget("db:migrate VERSION=0"); assertThat(rakeTask.arguments(), is("-f \"myrakefile.rb\" db:migrate VERSION=0")); }
public static SegmentAssignmentStrategy getSegmentAssignmentStrategy(HelixManager helixManager, TableConfig tableConfig, String assignmentType, InstancePartitions instancePartitions) { String assignmentStrategy = null; TableType currentTableType = tableConfig.getTableType(); // TODO: Handle segment assignment strategy in future for CONSUMING segments in follow up PR // See https://github.com/apache/pinot/issues/9047 // Accommodate new changes for assignment strategy Map<String, SegmentAssignmentConfig> segmentAssignmentConfigMap = tableConfig.getSegmentAssignmentConfigMap(); if (tableConfig.isDimTable()) { // Segment Assignment Strategy for DIM tables Preconditions.checkState(currentTableType == TableType.OFFLINE, "All Servers Segment assignment Strategy is only applicable to Dim OfflineTables"); SegmentAssignmentStrategy segmentAssignmentStrategy = new AllServersSegmentAssignmentStrategy(); segmentAssignmentStrategy.init(helixManager, tableConfig); return segmentAssignmentStrategy; } else { // Try to determine segment assignment strategy from table config if (segmentAssignmentConfigMap != null) { SegmentAssignmentConfig segmentAssignmentConfig; // Use the pre defined segment assignment strategy segmentAssignmentConfig = segmentAssignmentConfigMap.get(assignmentType.toUpperCase()); // Segment assignment config is only applicable to offline tables and completed segments of real time tables if (segmentAssignmentConfig != null) { assignmentStrategy = segmentAssignmentConfig.getAssignmentStrategy().toLowerCase(); } } } // Use the existing information to determine segment assignment strategy SegmentAssignmentStrategy segmentAssignmentStrategy; if (assignmentStrategy == null) { // Calculate numReplicaGroups and numPartitions to determine segment assignment strategy Preconditions .checkState(instancePartitions != null, "Failed to find instance partitions for segment assignment strategy"); int numReplicaGroups = instancePartitions.getNumReplicaGroups(); int numPartitions = instancePartitions.getNumPartitions(); if (numReplicaGroups == 1 && numPartitions == 1) { segmentAssignmentStrategy = new BalancedNumSegmentAssignmentStrategy(); } else { segmentAssignmentStrategy = new ReplicaGroupSegmentAssignmentStrategy(); } } else { // Set segment assignment strategy depending on strategy set in table config switch (assignmentStrategy) { case AssignmentStrategy.REPLICA_GROUP_SEGMENT_ASSIGNMENT_STRATEGY: segmentAssignmentStrategy = new ReplicaGroupSegmentAssignmentStrategy(); break; case AssignmentStrategy.BALANCE_NUM_SEGMENT_ASSIGNMENT_STRATEGY: default: segmentAssignmentStrategy = new BalancedNumSegmentAssignmentStrategy(); break; } } segmentAssignmentStrategy.init(helixManager, tableConfig); return segmentAssignmentStrategy; }
@Test public void testSegmentAssignmentStrategyForDimTable() { TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(RAW_TABLE_NAME).setIsDimTable(true).build(); SegmentAssignmentStrategy segmentAssignmentStrategy = SegmentAssignmentStrategyFactory .getSegmentAssignmentStrategy(null, tableConfig, InstancePartitionsType.OFFLINE.toString(), null); Assert.assertNotNull(segmentAssignmentStrategy); Assert.assertTrue(segmentAssignmentStrategy instanceof AllServersSegmentAssignmentStrategy); }
public static Schema schemaFromJavaBeanClass( TypeDescriptor<?> typeDescriptor, FieldValueTypeSupplier fieldValueTypeSupplier) { return StaticSchemaInference.schemaFromClass(typeDescriptor, fieldValueTypeSupplier); }
@Test public void testNestedMap() { Schema schema = JavaBeanUtils.schemaFromJavaBeanClass( new TypeDescriptor<NestedMapBean>() {}, GetterTypeSupplier.INSTANCE); SchemaTestUtils.assertSchemaEquivalent(NESTED_MAP_BEAN_SCHEMA, schema); }
@Override public Job getJobById(UUID id) { try (final Connection conn = dataSource.getConnection()) { return jobTable(conn) .selectJobById(id) .orElseThrow(() -> new JobNotFoundException(id)); } catch (SQLException e) { throw new StorageException(e); } }
@Test void testGetJobById() throws SQLException { when(resultSet.next()).thenReturn(false); assertThatThrownBy(() -> jobStorageProvider.getJobById(randomUUID())).isInstanceOf(JobNotFoundException.class); }
@Override public Collection<Process> getProcessList() { String taskId = new UUID(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()).toString().replace("-", ""); Collection<String> triggerPaths = getShowProcessListTriggerPaths(taskId); boolean isCompleted = false; try { triggerPaths.forEach(each -> repository.persist(each, "")); isCompleted = ProcessOperationLockRegistry.getInstance().waitUntilReleaseReady(taskId, () -> isReady(triggerPaths)); return getShowProcessListData(taskId); } finally { repository.delete(ProcessNode.getProcessIdPath(taskId)); if (!isCompleted) { triggerPaths.forEach(repository::delete); } } }
@Test void assertGetProcessList() { when(repository.getChildrenKeys(ComputeNode.getOnlineNodePath(InstanceType.JDBC))).thenReturn(Collections.emptyList()); when(repository.getChildrenKeys(ComputeNode.getOnlineNodePath(InstanceType.PROXY))).thenReturn(Collections.singletonList("abc")); when(repository.query(any())).thenReturn(null); processPersistService.getProcessList(); verify(repository).persist(any(), any()); }
public RuntimeOptionsBuilder parse(String... args) { return parse(Arrays.asList(args)); }
@Test void name_with_spaces_is_preserved() { RuntimeOptions options = parser .parse("--name", "some Name") .build(); Pattern actualPattern = options.getNameFilters().iterator().next(); assertThat(actualPattern.pattern(), is("some Name")); }
@Override public GroupingShuffleReaderIterator<K, V> iterator() throws IOException { ApplianceShuffleEntryReader entryReader = new ApplianceShuffleEntryReader( shuffleReaderConfig, executionContext, operationContext, true); initCounter(entryReader.getDatasetId()); return iterator(entryReader); }
@Test public void testShuffleReadCounterMultipleExecutingSteps() throws Exception { PipelineOptions options = PipelineOptionsFactory.create(); options .as(DataflowPipelineDebugOptions.class) .setExperiments(Lists.newArrayList(Experiment.IntertransformIO.getName())); BatchModeExecutionContext context = BatchModeExecutionContext.forTesting(options, "testStage"); final int kFirstShard = 0; TestShuffleReader shuffleReader = new TestShuffleReader(); final int kNumRecords = 10; for (int i = 0; i < kNumRecords; ++i) { byte[] key = CoderUtils.encodeToByteArray(BigEndianIntegerCoder.of(), i); shuffleReader.addEntry( newShuffleEntry(fabricatePosition(kFirstShard, key), key, EMPTY_BYTE_ARRAY, key)); } TestShuffleReadCounterFactory shuffleReadCounterFactory = new TestShuffleReadCounterFactory(); // Note that TestShuffleReader start/end positions are in the // space of keys not the positions (TODO: should probably always // use positions instead). String stop = encodeBase64URLSafeString(fabricatePosition(kNumRecords).getPosition().toByteArray()); TestOperationContext operationContext = TestOperationContext.create(); GroupingShuffleReader<Integer, Integer> groupingShuffleReader = new GroupingShuffleReader<>( options, null, null, stop, WindowedValue.getFullCoder( KvCoder.of( BigEndianIntegerCoder.of(), IterableCoder.of(BigEndianIntegerCoder.of())), IntervalWindow.getCoder()), context, operationContext, shuffleReadCounterFactory, false /* do not sort values */); assertFalse(shuffleReader.isClosed()); try (GroupingShuffleReaderIterator<Integer, Integer> iter = groupingShuffleReader.iterator(shuffleReader)) { // Poke the iterator so we can test dynamic splitting. assertTrue(iter.start()); int numRecordsReturned = 1; // including start() above. for (; iter.advance(); ++numRecordsReturned) { if (numRecordsReturned > 5) { setCurrentExecutionState(MOCK_ORIGINAL_NAME_FOR_EXECUTING_STEP2); } iter.getCurrent().getValue(); // ignored } assertEquals(kNumRecords, numRecordsReturned); } assertTrue(shuffleReader.isClosed()); Map<String, Long> expectedReadBytesMap = new HashMap<>(); expectedReadBytesMap.put(MOCK_ORIGINAL_NAME_FOR_EXECUTING_STEP1, 48L); expectedReadBytesMap.put(MOCK_ORIGINAL_NAME_FOR_EXECUTING_STEP2, 32L); expectShuffleReadCounterEquals(shuffleReadCounterFactory, expectedReadBytesMap); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { return this.list(directory, listener, new HostPreferences(session.getHost()).getInteger("eue.listing.chunksize")); }
@Test public void testListForSharedFolder() throws Exception { final EueResourceIdProvider fileid = new EueResourceIdProvider(session); final Path sourceFolder = new EueDirectoryFeature(session, fileid).mkdir(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final Path folder2 = new EueDirectoryFeature(session, fileid).mkdir(new Path(sourceFolder, new AlphanumericRandomStringService().random(), EnumSet.of(directory)), new TransferStatus()); assertTrue(new EueFindFeature(session, fileid).find(folder2)); final ShareCreationResponseEntry shareCreationResponseEntry = createShare(fileid, folder2); final String shareName = shareCreationResponseEntry.getEntity().getName(); final PathAttributes attr = new EueListService(session, fileid).list(sourceFolder, new DisabledListProgressListener()).get(folder2).attributes(); assertNotNull(attr.getLink()); assertEquals(attr.getLink(), new EueShareUrlProvider(session.getHost(), session.userShares()).toUrl(folder2).find(DescriptiveUrl.Type.signed)); new EueDeleteFeature(session, fileid).delete(Collections.singletonList(sourceFolder), new DisabledPasswordCallback(), new Delete.DisabledCallback()); }
public static long estimateSize(StructType tableSchema, long totalRecords) { if (totalRecords == Long.MAX_VALUE) { return totalRecords; } long result; try { result = LongMath.checkedMultiply(tableSchema.defaultSize(), totalRecords); } catch (ArithmeticException e) { result = Long.MAX_VALUE; } return result; }
@Test public void testEstimateSize() throws IOException { long tableSize = SparkSchemaUtil.estimateSize(SparkSchemaUtil.convert(TEST_SCHEMA), 1); assertThat(tableSize).as("estimateSize matches with expected approximation").isEqualTo(24); }
@SuppressWarnings("unchecked") public static void validateResponse(HttpURLConnection conn, int expectedStatus) throws IOException { if (conn.getResponseCode() != expectedStatus) { Exception toThrow; InputStream es = null; try { es = conn.getErrorStream(); Map json = JsonSerialization.mapReader().readValue(es); json = (Map) json.get(ERROR_JSON); String exClass = (String) json.get(ERROR_CLASSNAME_JSON); String exMsg = (String) json.get(ERROR_MESSAGE_JSON); if (exClass != null) { try { ClassLoader cl = HttpExceptionUtils.class.getClassLoader(); Class klass = cl.loadClass(exClass); Preconditions.checkState(Exception.class.isAssignableFrom(klass), "Class [%s] is not a subclass of Exception", klass); MethodHandle methodHandle = PUBLIC_LOOKUP.findConstructor( klass, EXCEPTION_CONSTRUCTOR_TYPE); toThrow = (Exception) methodHandle.invoke(exMsg); } catch (Throwable t) { toThrow = new IOException(String.format( "HTTP status [%d], exception [%s], message [%s], URL [%s]", conn.getResponseCode(), exClass, exMsg, conn.getURL())); } } else { String msg = (exMsg != null) ? exMsg : conn.getResponseMessage(); toThrow = new IOException(String.format( "HTTP status [%d], message [%s], URL [%s]", conn.getResponseCode(), msg, conn.getURL())); } } catch (Exception ex) { toThrow = new IOException(String.format( "HTTP status [%d], message [%s], URL [%s], exception [%s]", conn.getResponseCode(), conn.getResponseMessage(), conn.getURL(), ex.toString()), ex); } finally { if (es != null) { try { es.close(); } catch (IOException ex) { //ignore } } } throwEx(toThrow); } }
@Test public void testValidateResponseJsonErrorUnknownException() throws Exception { Map<String, Object> json = new HashMap<String, Object>(); json.put(HttpExceptionUtils.ERROR_EXCEPTION_JSON, "FooException"); json.put(HttpExceptionUtils.ERROR_CLASSNAME_JSON, "foo.FooException"); json.put(HttpExceptionUtils.ERROR_MESSAGE_JSON, "EX"); Map<String, Object> response = new HashMap<String, Object>(); response.put(HttpExceptionUtils.ERROR_JSON, json); ObjectMapper jsonMapper = new ObjectMapper(); String msg = jsonMapper.writeValueAsString(response); InputStream is = new ByteArrayInputStream(msg.getBytes(StandardCharsets.UTF_8)); HttpURLConnection conn = Mockito.mock(HttpURLConnection.class); Mockito.when(conn.getErrorStream()).thenReturn(is); Mockito.when(conn.getResponseMessage()).thenReturn("msg"); Mockito.when(conn.getResponseCode()).thenReturn(HttpURLConnection.HTTP_BAD_REQUEST); LambdaTestUtils.interceptAndValidateMessageContains(IOException.class, Arrays.asList(Integer.toString(HttpURLConnection.HTTP_BAD_REQUEST), "foo.FooException", "EX"), () -> HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_CREATED)); }
public MultiMap<Value, T, List<T>> get(final KeyDefinition keyDefinition) { return tree.get(keyDefinition); }
@Test void testUpdateAge() throws Exception { final MultiMapChangeHandler changeHandler = mock(MultiMapChangeHandler.class); map.get(AGE).addChangeListener(changeHandler); toni.setAge(10); final MultiMap<Value, Person, List<Person>> age = map.get(AGE); assertThat(age.get(new Value(20))).doesNotContain(toni); assertThat(age.get(new Value(10))).contains(toni); }
public String get(String key) { return properties.getProperty(key); }
@Test(expected = NullPointerException.class) public void testGet_whenKeyNull() { HazelcastProperties properties = new HazelcastProperties(config); properties.get(null); }
public static Map<String, Map<String, String>> rebalanceReplicaGroupBasedTable( Map<String, Map<String, String>> currentAssignment, InstancePartitions instancePartitions, Map<Integer, List<String>> instancePartitionIdToSegmentsMap) { Map<String, Map<String, String>> newAssignment = new TreeMap<>(); for (Map.Entry<Integer, List<String>> entry : instancePartitionIdToSegmentsMap.entrySet()) { // Uniformly spray the segment partitions over the instance partitions int instancePartitionId = entry.getKey(); List<String> segments = entry.getValue(); rebalanceReplicaGroupBasedPartition(currentAssignment, instancePartitions, instancePartitionId, segments, newAssignment); } return newAssignment; }
@Test public void testRebalanceReplicaGroupBasedTable() { // Table is rebalanced on a per partition basis, so testing rebalancing one partition is enough int numSegments = 90; List<String> segments = SegmentAssignmentTestUtils.getNameList(SEGMENT_NAME_PREFIX, numSegments); Map<Integer, List<String>> partitionIdToSegmentsMap = Collections.singletonMap(0, segments); int numInstances = 9; List<String> instances = SegmentAssignmentTestUtils.getNameList(INSTANCE_NAME_PREFIX, numInstances); // { // 0_0=[instance_0, instance_1, instance_2], // 0_1=[instance_3, instance_4, instance_5], // 0_2=[instance_6, instance_7, instance_8] // } InstancePartitions instancePartitions = new InstancePartitions(null); int numInstancesPerReplicaGroup = numInstances / NUM_REPLICAS; int instanceIdToAdd = 0; for (int replicaGroupId = 0; replicaGroupId < NUM_REPLICAS; replicaGroupId++) { List<String> instancesForReplicaGroup = new ArrayList<>(numInstancesPerReplicaGroup); for (int i = 0; i < numInstancesPerReplicaGroup; i++) { instancesForReplicaGroup.add(instances.get(instanceIdToAdd++)); } instancePartitions.setInstances(0, replicaGroupId, instancesForReplicaGroup); } // Uniformly spray segments to the instances: // Replica-group 0: [instance_0, instance_1, instance_2], // Replica-group 1: [instance_3, instance_4, instance_5], // Replica-group 2: [instance_6, instance_7, instance_8] // segment_0 segment_1 segment_2 // segment_3 segment_4 segment_5 // ... Map<String, Map<String, String>> currentAssignment = new TreeMap<>(); for (int segmentId = 0; segmentId < numSegments; segmentId++) { List<String> instancesAssigned = new ArrayList<>(NUM_REPLICAS); for (int replicaGroupId = 0; replicaGroupId < NUM_REPLICAS; replicaGroupId++) { int assignedInstanceId = segmentId % numInstancesPerReplicaGroup + replicaGroupId * numInstancesPerReplicaGroup; instancesAssigned.add(instances.get(assignedInstanceId)); } currentAssignment.put(segments.get(segmentId), SegmentAssignmentUtils.getInstanceStateMap(instancesAssigned, SegmentStateModel.ONLINE)); } // There should be 90 segments assigned assertEquals(currentAssignment.size(), numSegments); // Each segment should have 3 replicas for (Map<String, String> instanceStateMap : currentAssignment.values()) { assertEquals(instanceStateMap.size(), NUM_REPLICAS); } // Each instance should have 30 segments assigned int[] numSegmentsAssignedPerInstance = SegmentAssignmentUtils.getNumSegmentsAssignedPerInstance(currentAssignment, instances); int[] expectedNumSegmentsAssignedPerInstance = new int[numInstances]; int numSegmentsPerInstance = numSegments * NUM_REPLICAS / numInstances; Arrays.fill(expectedNumSegmentsAssignedPerInstance, numSegmentsPerInstance); assertEquals(numSegmentsAssignedPerInstance, expectedNumSegmentsAssignedPerInstance); // Current assignment should already be balanced assertEquals(SegmentAssignmentUtils .rebalanceReplicaGroupBasedTable(currentAssignment, instancePartitions, partitionIdToSegmentsMap), currentAssignment); // Replace instance_0 with instance_9, instance_4 with instance_10 // { // 0_0=[instance_9, instance_1, instance_2], // 0_1=[instance_3, instance_10, instance_5], // 0_2=[instance_6, instance_7, instance_8] // } List<String> newInstances = new ArrayList<>(numInstances); List<String> newReplicaGroup0Instances = new ArrayList<>(instancePartitions.getInstances(0, 0)); String newReplicaGroup0Instance = INSTANCE_NAME_PREFIX + 9; newReplicaGroup0Instances.set(0, newReplicaGroup0Instance); newInstances.addAll(newReplicaGroup0Instances); List<String> newReplicaGroup1Instances = new ArrayList<>(instancePartitions.getInstances(0, 1)); String newReplicaGroup1Instance = INSTANCE_NAME_PREFIX + 10; newReplicaGroup1Instances.set(1, newReplicaGroup1Instance); newInstances.addAll(newReplicaGroup1Instances); List<String> newReplicaGroup2Instances = instancePartitions.getInstances(0, 2); newInstances.addAll(newReplicaGroup2Instances); InstancePartitions newInstancePartitions = new InstancePartitions(null); newInstancePartitions.setInstances(0, 0, newReplicaGroup0Instances); newInstancePartitions.setInstances(0, 1, newReplicaGroup1Instances); newInstancePartitions.setInstances(0, 2, newReplicaGroup2Instances); Map<String, Map<String, String>> newAssignment = SegmentAssignmentUtils .rebalanceReplicaGroupBasedTable(currentAssignment, newInstancePartitions, partitionIdToSegmentsMap); // There should be 90 segments assigned assertEquals(newAssignment.size(), numSegments); // Each segment should have 3 replicas for (Map<String, String> instanceStateMap : newAssignment.values()) { assertEquals(instanceStateMap.size(), NUM_REPLICAS); } // Each instance should have 30 segments assigned numSegmentsAssignedPerInstance = SegmentAssignmentUtils.getNumSegmentsAssignedPerInstance(newAssignment, newInstances); assertEquals(numSegmentsAssignedPerInstance, expectedNumSegmentsAssignedPerInstance); // All segments on instance_0 should be moved to instance_9, all segments on instance_4 should be moved to // instance_10 Map<String, Integer> numSegmentsToBeMovedPerInstance = SegmentAssignmentUtils.getNumSegmentsToBeMovedPerInstance(currentAssignment, newAssignment); assertEquals(numSegmentsToBeMovedPerInstance.size(), 2); assertEquals((int) numSegmentsToBeMovedPerInstance.get(newReplicaGroup0Instance), numSegmentsPerInstance); assertEquals((int) numSegmentsToBeMovedPerInstance.get(newReplicaGroup1Instance), numSegmentsPerInstance); String oldReplicaGroup0Instance = INSTANCE_NAME_PREFIX + 0; String oldReplicaGroup1Instance = INSTANCE_NAME_PREFIX + 4; for (String segmentName : segments) { Map<String, String> oldInstanceStateMap = currentAssignment.get(segmentName); if (oldInstanceStateMap.containsKey(oldReplicaGroup0Instance)) { assertTrue(newAssignment.get(segmentName).containsKey(newReplicaGroup0Instance)); } if (oldInstanceStateMap.containsKey(oldReplicaGroup1Instance)) { assertTrue(newAssignment.get(segmentName).containsKey(newReplicaGroup1Instance)); } } // Remove 3 instances (1 from each replica-group) // { // 0_0=[instance_0, instance_1], // 0_1=[instance_3, instance_4], // 0_2=[instance_6, instance_7] // } int newNumInstances = numInstances - 3; int newNumInstancesPerReplicaGroup = newNumInstances / NUM_REPLICAS; newInstances = new ArrayList<>(newNumInstances); for (int replicaGroupId = 0; replicaGroupId < NUM_REPLICAS; replicaGroupId++) { List<String> newInstancesForReplicaGroup = instancePartitions.getInstances(0, replicaGroupId).subList(0, newNumInstancesPerReplicaGroup); newInstancePartitions.setInstances(0, replicaGroupId, newInstancesForReplicaGroup); newInstances.addAll(newInstancesForReplicaGroup); } newAssignment = SegmentAssignmentUtils .rebalanceReplicaGroupBasedTable(currentAssignment, newInstancePartitions, partitionIdToSegmentsMap); // There should be 90 segments assigned assertEquals(newAssignment.size(), numSegments); // Each segment should have 3 replicas for (Map<String, String> instanceStateMap : newAssignment.values()) { assertEquals(instanceStateMap.size(), NUM_REPLICAS); } // Each instance should have 45 segments assigned numSegmentsAssignedPerInstance = SegmentAssignmentUtils.getNumSegmentsAssignedPerInstance(newAssignment, newInstances); expectedNumSegmentsAssignedPerInstance = new int[newNumInstances]; int newNumSegmentsPerInstance = numSegments * NUM_REPLICAS / newNumInstances; Arrays.fill(expectedNumSegmentsAssignedPerInstance, newNumSegmentsPerInstance); assertEquals(numSegmentsAssignedPerInstance, expectedNumSegmentsAssignedPerInstance); // Each instance should have 15 segments to be moved to it numSegmentsToBeMovedPerInstance = SegmentAssignmentUtils.getNumSegmentsToBeMovedPerInstance(currentAssignment, newAssignment); assertEquals(numSegmentsToBeMovedPerInstance.size(), newNumInstances); for (String instanceName : newInstances) { assertEquals((int) numSegmentsToBeMovedPerInstance.get(instanceName), newNumSegmentsPerInstance - numSegmentsPerInstance); } // Add 6 instances (2 to each replica-group) // { // 0_0=[instance_0, instance_1, instance_2, instance_9, instance_10], // 0_1=[instance_3, instance_4, instance_5, instance_11, instance_12], // 0_2=[instance_6, instance_7, instance_8, instance_13, instance_14] // } newNumInstances = numInstances + 6; newNumInstancesPerReplicaGroup = newNumInstances / NUM_REPLICAS; newInstances = SegmentAssignmentTestUtils.getNameList(INSTANCE_NAME_PREFIX, newNumInstances); instanceIdToAdd = numInstances; for (int replicaGroupId = 0; replicaGroupId < NUM_REPLICAS; replicaGroupId++) { List<String> newInstancesForReplicaGroup = new ArrayList<>(instancePartitions.getInstances(0, replicaGroupId)); for (int i = 0; i < newNumInstancesPerReplicaGroup - numInstancesPerReplicaGroup; i++) { newInstancesForReplicaGroup.add(newInstances.get(instanceIdToAdd++)); } newInstancePartitions.setInstances(0, replicaGroupId, newInstancesForReplicaGroup); } newAssignment = SegmentAssignmentUtils .rebalanceReplicaGroupBasedTable(currentAssignment, newInstancePartitions, partitionIdToSegmentsMap); // There should be 90 segments assigned assertEquals(newAssignment.size(), numSegments); // Each segment should have 3 replicas for (Map<String, String> instanceStateMap : newAssignment.values()) { assertEquals(instanceStateMap.size(), NUM_REPLICAS); } // Each instance should have 18 segments assigned numSegmentsAssignedPerInstance = SegmentAssignmentUtils.getNumSegmentsAssignedPerInstance(newAssignment, newInstances); expectedNumSegmentsAssignedPerInstance = new int[newNumInstances]; newNumSegmentsPerInstance = numSegments * NUM_REPLICAS / newNumInstances; Arrays.fill(expectedNumSegmentsAssignedPerInstance, newNumSegmentsPerInstance); assertEquals(numSegmentsAssignedPerInstance, expectedNumSegmentsAssignedPerInstance); // Each new added instance should have 18 segments to be moved to it numSegmentsToBeMovedPerInstance = SegmentAssignmentUtils.getNumSegmentsToBeMovedPerInstance(currentAssignment, newAssignment); assertEquals(numSegmentsToBeMovedPerInstance.size(), 6); for (int instanceId = numInstances; instanceId < newNumInstances; instanceId++) { assertEquals((int) numSegmentsToBeMovedPerInstance.get(newInstances.get(instanceId)), newNumSegmentsPerInstance); } // Change all instances // { // 0_0=[i_0, i_1, i_2], // 0_1=[i_3, i_4, i_5], // 0_2=[i_6, i_7, i_8] // } newInstances = SegmentAssignmentTestUtils.getNameList("i_", numInstances); instanceIdToAdd = 0; for (int replicaGroupId = 0; replicaGroupId < NUM_REPLICAS; replicaGroupId++) { List<String> instancesForReplicaGroup = new ArrayList<>(numInstancesPerReplicaGroup); for (int i = 0; i < numInstancesPerReplicaGroup; i++) { instancesForReplicaGroup.add(newInstances.get(instanceIdToAdd++)); } newInstancePartitions.setInstances(0, replicaGroupId, instancesForReplicaGroup); } newAssignment = SegmentAssignmentUtils .rebalanceReplicaGroupBasedTable(currentAssignment, newInstancePartitions, partitionIdToSegmentsMap); // There should be 90 segments assigned assertEquals(newAssignment.size(), numSegments); // Each segment should have 3 replicas for (Map<String, String> instanceStateMap : newAssignment.values()) { assertEquals(instanceStateMap.size(), NUM_REPLICAS); } // Each instance should have 30 segments assigned numSegmentsAssignedPerInstance = SegmentAssignmentUtils.getNumSegmentsAssignedPerInstance(newAssignment, newInstances); expectedNumSegmentsAssignedPerInstance = new int[numInstances]; Arrays.fill(expectedNumSegmentsAssignedPerInstance, numSegmentsPerInstance); assertEquals(numSegmentsAssignedPerInstance, expectedNumSegmentsAssignedPerInstance); // Each instance should have 30 segments to be moved to it numSegmentsToBeMovedPerInstance = SegmentAssignmentUtils.getNumSegmentsToBeMovedPerInstance(currentAssignment, newAssignment); assertEquals(numSegmentsToBeMovedPerInstance.size(), numInstances); for (String instanceName : newInstances) { assertEquals((int) numSegmentsToBeMovedPerInstance.get(instanceName), numSegmentsPerInstance); } }
@Override public <T> Future<T> submit(Callable<T> task) { final RunnableFuture<T> rf = new CompletableFutureTask<>(task); execute(rf); return rf; }
@Test public void submitRunnable_withResult() throws Exception { final int taskCount = 10; ManagedExecutorService executorService = newManagedExecutorService(1, taskCount); final String result = randomString(); Future[] futures = new Future[taskCount]; for (int i = 0; i < taskCount; i++) { futures[i] = executorService.submit(() -> { }, result); } checkAllDone(Arrays.asList(futures)); for (Future future : futures) { assertEquals(result, future.get()); } }
static AnnotatedClusterState generatedStateFrom(final Params params) { final ContentCluster cluster = params.cluster; final ClusterState workingState = ClusterState.emptyState(); final Map<Node, NodeStateReason> nodeStateReasons = new HashMap<>(); for (final NodeInfo nodeInfo : cluster.getNodeInfos()) { final NodeState nodeState = computeEffectiveNodeState(nodeInfo, params, nodeStateReasons); workingState.setNodeState(nodeInfo.getNode(), nodeState); } takeDownGroupsWithTooLowAvailability(workingState, nodeStateReasons, params); final Optional<ClusterStateReason> reasonToBeDown = clusterDownReason(workingState, params); if (reasonToBeDown.isPresent()) { workingState.setClusterState(State.DOWN); } workingState.setDistributionBits(inferDistributionBitCount(cluster, workingState, params)); return new AnnotatedClusterState(workingState, reasonToBeDown, nodeStateReasons); }
@Test void cluster_not_down_if_more_than_min_count_of_distributors_are_available() { final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) .bringEntireClusterUp() .reportDistributorNodeState(0, State.DOWN); final ClusterStateGenerator.Params params = fixture.generatorParams().minDistributorNodesUp(2); final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); assertThat(state.toString(), equalTo("distributor:3 .0.s:d storage:3")); assertThat(state.getClusterStateReason(), equalTo(Optional.empty())); }
@Override public ServerConfiguration getServerConfiguration(String issuer) { return servers.get(issuer); }
@Test public void getClientConfiguration_noIssuer() { ServerConfiguration result = service.getServerConfiguration("www.badexample.net"); assertThat(result, is(nullValue())); }
public static void main(String[] args) { if (args.length < 1 || args[0].equals("-h") || args[0].equals("--help")) { System.out.println(usage); return; } // Copy args, because CommandFormat mutates the list. List<String> argsList = new ArrayList<String>(Arrays.asList(args)); CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "-glob", "-jar"); try { cf.parse(argsList); } catch (UnknownOptionException e) { terminate(1, "unrecognized option"); return; } String classPath = System.getProperty("java.class.path"); if (cf.getOpt("-glob")) { // The classpath returned from the property has been globbed already. System.out.println(classPath); } else if (cf.getOpt("-jar")) { if (argsList.isEmpty() || argsList.get(0) == null || argsList.get(0).isEmpty()) { terminate(1, "-jar option requires path of jar file to write"); return; } // Write the classpath into the manifest of a temporary jar file. Path workingDir = new Path(System.getProperty("user.dir")); final String tmpJarPath; try { tmpJarPath = FileUtil.createJarWithClassPath(classPath, workingDir, System.getenv())[0]; } catch (IOException e) { terminate(1, "I/O error creating jar: " + e.getMessage()); return; } // Rename the temporary file to its final location. String jarPath = argsList.get(0); try { FileUtil.replaceFile(new File(tmpJarPath), new File(jarPath)); } catch (IOException e) { terminate(1, "I/O error renaming jar temporary file to path: " + e.getMessage()); return; } } }
@Test public void testGlob() { Classpath.main(new String[] { "--glob" }); String strOut = new String(stdout.toByteArray(), UTF8); assertEquals(System.getProperty("java.class.path"), strOut.trim()); assertTrue(stderr.toByteArray().length == 0); }
public static <K, E, V> Collector<E, ImmutableSetMultimap.Builder<K, V>, ImmutableSetMultimap<K, V>> unorderedFlattenIndex( Function<? super E, K> keyFunction, Function<? super E, Stream<V>> valueFunction) { verifyKeyAndValueFunctions(keyFunction, valueFunction); BiConsumer<ImmutableSetMultimap.Builder<K, V>, E> accumulator = (map, element) -> { K key = requireNonNull(keyFunction.apply(element), KEY_FUNCTION_CANT_RETURN_NULL_MESSAGE); Stream<V> valueStream = requireNonNull(valueFunction.apply(element), VALUE_FUNCTION_CANT_RETURN_NULL_MESSAGE); valueStream.forEach(value -> map.put(key, value)); }; BinaryOperator<ImmutableSetMultimap.Builder<K, V>> merger = (m1, m2) -> { for (Map.Entry<K, V> entry : m2.build().entries()) { m1.put(entry.getKey(), entry.getValue()); } return m1; }; return Collector.of( ImmutableSetMultimap::builder, accumulator, merger, ImmutableSetMultimap.Builder::build); }
@Test public void unorderedFlattenIndex_with_valueFunction_parallel_stream() { SetMultimap<String, String> multimap = HUGE_LIST.parallelStream().collect(unorderedFlattenIndex(identity(), Stream::of)); assertThat(multimap.keySet()).isEqualTo(HUGE_SET); }
@Override public GroupAssignment assign( GroupSpec groupSpec, SubscribedTopicDescriber subscribedTopicDescriber ) throws PartitionAssignorException { if (groupSpec.memberIds().isEmpty()) { return new GroupAssignment(Collections.emptyMap()); } else if (groupSpec.subscriptionType() == SubscriptionType.HOMOGENEOUS) { return assignHomogeneousGroup(groupSpec, subscribedTopicDescriber); } else { return assignHeterogeneousGroup(groupSpec, subscribedTopicDescriber); } }
@Test public void testReassignmentWhenOnePartitionAddedForTwoMembersTwoTopics() { // Simulating adding a partition - originally T1 -> 3 Partitions and T2 -> 3 Partitions Map<Uuid, TopicMetadata> topicMetadata = new HashMap<>(); topicMetadata.put(topic1Uuid, new TopicMetadata( topic1Uuid, topic1Name, 4, Collections.emptyMap() )); topicMetadata.put(topic2Uuid, new TopicMetadata( topic2Uuid, topic2Name, 4, Collections.emptyMap() )); Map<String, MemberSubscriptionAndAssignmentImpl> members = new TreeMap<>(); members.put(memberA, new MemberSubscriptionAndAssignmentImpl( Optional.empty(), Optional.empty(), mkSet(topic1Uuid, topic2Uuid), new Assignment(mkAssignment( mkTopicAssignment(topic1Uuid, 0, 1), mkTopicAssignment(topic2Uuid, 0, 1) )) )); members.put(memberB, new MemberSubscriptionAndAssignmentImpl( Optional.empty(), Optional.empty(), mkSet(topic1Uuid, topic2Uuid), new Assignment(mkAssignment( mkTopicAssignment(topic1Uuid, 2), mkTopicAssignment(topic2Uuid, 2) )) )); GroupSpec groupSpec = new GroupSpecImpl( members, HOMOGENEOUS, invertedTargetAssignment(members) ); SubscribedTopicDescriberImpl subscribedTopicMetadata = new SubscribedTopicDescriberImpl(topicMetadata); GroupAssignment computedAssignment = assignor.assign( groupSpec, subscribedTopicMetadata ); Map<String, Map<Uuid, Set<Integer>>> expectedAssignment = new HashMap<>(); expectedAssignment.put(memberA, mkAssignment( mkTopicAssignment(topic1Uuid, 0, 1), mkTopicAssignment(topic2Uuid, 0, 1) )); expectedAssignment.put(memberB, mkAssignment( mkTopicAssignment(topic1Uuid, 2, 3), mkTopicAssignment(topic2Uuid, 2, 3) )); assertAssignment(expectedAssignment, computedAssignment); }
static boolean isValidIpEntity(String ip) { if (ip == null) return true; try { InetAddress.getByName(ip); return true; } catch (UnknownHostException e) { return false; } }
@Test public void testIsValidIpEntityWithNull() { assertTrue(ClientQuotaControlManager.isValidIpEntity(null)); }
@SuppressWarnings("MethodLength") public void onFragment(final DirectBuffer buffer, final int offset, final int length, final Header header) { messageHeaderDecoder.wrap(buffer, offset); final int templateId = messageHeaderDecoder.templateId(); final int schemaId = messageHeaderDecoder.schemaId(); if (schemaId != MessageHeaderDecoder.SCHEMA_ID) { if (listenerExtension != null) { listenerExtension.onExtensionMessage( messageHeaderDecoder.blockLength(), templateId, schemaId, messageHeaderDecoder.version(), buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, length - MessageHeaderDecoder.ENCODED_LENGTH); return; } throw new ClusterException("expected schemaId=" + MessageHeaderDecoder.SCHEMA_ID + ", actual=" + schemaId); } switch (templateId) { case SessionMessageHeaderDecoder.TEMPLATE_ID: { sessionMessageHeaderDecoder.wrap( buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, messageHeaderDecoder.blockLength(), messageHeaderDecoder.version()); final long sessionId = sessionMessageHeaderDecoder.clusterSessionId(); if (sessionId == clusterSessionId) { listener.onMessage( sessionId, sessionMessageHeaderDecoder.timestamp(), buffer, offset + SESSION_HEADER_LENGTH, length - SESSION_HEADER_LENGTH, header); } break; } case SessionEventDecoder.TEMPLATE_ID: { sessionEventDecoder.wrap( buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, messageHeaderDecoder.blockLength(), messageHeaderDecoder.version()); final long sessionId = sessionEventDecoder.clusterSessionId(); if (sessionId == clusterSessionId) { listener.onSessionEvent( sessionEventDecoder.correlationId(), sessionId, sessionEventDecoder.leadershipTermId(), sessionEventDecoder.leaderMemberId(), sessionEventDecoder.code(), sessionEventDecoder.detail()); } break; } case NewLeaderEventDecoder.TEMPLATE_ID: { newLeaderEventDecoder.wrap( buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, messageHeaderDecoder.blockLength(), messageHeaderDecoder.version()); final long sessionId = newLeaderEventDecoder.clusterSessionId(); if (sessionId == clusterSessionId) { listener.onNewLeader( sessionId, newLeaderEventDecoder.leadershipTermId(), newLeaderEventDecoder.leaderMemberId(), newLeaderEventDecoder.ingressEndpoints()); } break; } case AdminResponseDecoder.TEMPLATE_ID: { adminResponseDecoder.wrap( buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, messageHeaderDecoder.blockLength(), messageHeaderDecoder.version()); final long sessionId = adminResponseDecoder.clusterSessionId(); if (sessionId == clusterSessionId) { final long correlationId = adminResponseDecoder.correlationId(); final AdminRequestType requestType = adminResponseDecoder.requestType(); final AdminResponseCode responseCode = adminResponseDecoder.responseCode(); final String message = adminResponseDecoder.message(); final int payloadOffset = adminResponseDecoder.offset() + AdminResponseDecoder.BLOCK_LENGTH + AdminResponseDecoder.messageHeaderLength() + message.length() + AdminResponseDecoder.payloadHeaderLength(); final int payloadLength = adminResponseDecoder.payloadLength(); listener.onAdminResponse( sessionId, correlationId, requestType, responseCode, message, buffer, payloadOffset, payloadLength); } break; } default: break; } }
@Test void onFragmentShouldDelegateToEgressListenerOnUnknownSchemaId() { final int schemaId = 17; final int templateId = 19; messageHeaderEncoder .wrap(buffer, 0) .schemaId(schemaId) .templateId(templateId); final EgressListenerExtension listenerExtension = mock(EgressListenerExtension.class); final Header header = new Header(0, 0); final EgressAdapter adapter = new EgressAdapter( mock(EgressListener.class), listenerExtension, 0, mock(Subscription.class), 3); adapter.onFragment(buffer, 0, MessageHeaderDecoder.ENCODED_LENGTH * 2, header); verify(listenerExtension).onExtensionMessage( anyInt(), eq(templateId), eq(schemaId), eq(0), eq(buffer), eq(MessageHeaderDecoder.ENCODED_LENGTH), eq(MessageHeaderDecoder.ENCODED_LENGTH)); verifyNoMoreInteractions(listenerExtension); }
@Override public void deleteSource(final SourceName sourceName, final boolean restoreInProgress) { synchronized (metaStoreLock) { dataSources.compute(sourceName, (ignored, sourceInfo) -> { if (sourceInfo == null) { throw new KsqlException(String.format("No data source with name %s exists.", sourceName.text())); } if (dropConstraints.containsKey(sourceName)) { final String references = dropConstraints.get(sourceName).stream().map(SourceName::text) .sorted().collect(Collectors.joining(", ")); // If this request is part of the metastore restoration process, then ignore any // constraints this source may have. This logic fixes a compatibility issue caused by // https://github.com/confluentinc/ksql/pull/6545, which makes the restoration to fail if // this source has another source referencing to it. if (restoreInProgress) { LOG.warn("The following streams and/or tables read from the '{}' source: [{}].\n" + "Ignoring DROP constraints when restoring the metastore. \n" + "Future CREATE statements that recreate this '{}' source may not have " + "DROP constraints if existing source references exist.", sourceName.text(), references); dropConstraints.remove(sourceName); } else { throw new KsqlReferentialIntegrityException(String.format( "Cannot drop %s.%n" + "The following streams and/or tables read from this source: [%s].%n" + "You need to drop them before dropping %s.", sourceName.text(), references, sourceName.text() )); } } // Remove drop constraints from the referenced sources sourceInfo.references.stream().forEach(ref -> dropConstraint(ref, sourceName)); LOG.info("Source {} deleted from the metastore", sourceName.text()); return null; }); } }
@Test public void shouldThrowOnRemoveUnknownSource() { // When: final Exception e = assertThrows( KsqlException.class, () -> metaStore.deleteSource(of("bob")) ); // Then: assertThat(e.getMessage(), containsString("No data source with name bob exists")); }
@Override public ColumnStatistics buildColumnStatistics() { Optional<BinaryStatistics> binaryStatistics = buildBinaryStatistics(); if (binaryStatistics.isPresent()) { verify(nonNullValueCount > 0); return new BinaryColumnStatistics(nonNullValueCount, null, rawSize, storageSize, binaryStatistics.get()); } return new ColumnStatistics(nonNullValueCount, null, rawSize, storageSize); }
@Test public void testBlockBinaryStatistics() { String alphabets = "abcdefghijklmnopqrstuvwxyz"; VariableWidthBlockBuilder blockBuilder = new VariableWidthBlockBuilder(null, alphabets.length(), alphabets.length()); Slice slice = utf8Slice(alphabets); for (int i = 0; i < slice.length(); i++) { VARBINARY.writeSlice(blockBuilder, slice, i, 1); } blockBuilder.appendNull(); BinaryStatisticsBuilder binaryStatisticsBuilder = new BinaryStatisticsBuilder(); binaryStatisticsBuilder.addBlock(VARBINARY, blockBuilder); BinaryStatistics binaryStatistics = binaryStatisticsBuilder.buildColumnStatistics().getBinaryStatistics(); assertEquals(binaryStatistics.getSum(), slice.length()); }
@Override public Optional<NativeEntity<EventDefinitionDto>> loadNativeEntity(NativeEntityDescriptor nativeEntityDescriptor) { final Optional<EventDefinitionDto> eventDefinition = eventDefinitionService.get(nativeEntityDescriptor.id().id()); return eventDefinition.map(eventDefinitionDto -> NativeEntity.create(nativeEntityDescriptor, eventDefinitionDto)); }
@Test @MongoDBFixtures("EventDefinitionFacadeTest.json") public void loadNativeEntity() { final NativeEntityDescriptor nativeEntityDescriptor = NativeEntityDescriptor .create(ModelId.of("content-pack-id"), ModelId.of("5d4032513d2746703d1467f6"), ModelTypes.EVENT_DEFINITION_V1, "title"); final Optional<NativeEntity<EventDefinitionDto>> optionalNativeEntity = facade.loadNativeEntity(nativeEntityDescriptor); assertThat(optionalNativeEntity).isPresent(); final NativeEntity<EventDefinitionDto> nativeEntity = optionalNativeEntity.get(); assertThat(nativeEntity.entity()).isNotNull(); final EventDefinitionDto eventDefinition = nativeEntity.entity(); assertThat(eventDefinition.id()).isEqualTo("5d4032513d2746703d1467f6"); }
@Udf(description = "Returns all substrings of the input that matches the given regex pattern") public List<String> regexpExtractAll( @UdfParameter(description = "The regex pattern") final String pattern, @UdfParameter(description = "The input string to apply regex on") final String input ) { return regexpExtractAll(pattern, input, 0); }
@Test public void shouldReturnSubstringWhenMatched() { assertThat(udf.regexpExtractAll("e.*", "test string"), contains("est string")); assertThat(udf.regexpExtractAll(".", "test"), contains("t", "e", "s", "t")); assertThat(udf.regexpExtractAll("[AEIOU].{4}", "usEr nAmE 1308"), contains("Er nA", "E 130")); }
@Nullable public byte[] getValue() { return mValue; }
@Test public void setValue_UINT8() { final MutableData data = new MutableData(new byte[1]); data.setValue(200, Data.FORMAT_UINT8, 0); assertArrayEquals(new byte[] { (byte) 0xC8 } , data.getValue()); }
public void unzip(ZipInputStream zipInputStream, File destDir) throws IOException { try(ZipInputStream zis = zipInputStream) { destDir.mkdirs(); ZipEntry zipEntry = zis.getNextEntry(); while (zipEntry != null) { extractTo(zipEntry, zis, destDir); zipEntry = zis.getNextEntry(); } } }
@Test void shouldThrowUpWhileTryingToUnzipIfAnyOfTheFilePathsInArchiveHasAPathContainingDotDotSlashPath() throws URISyntaxException, IOException { try { zipUtil.unzip(new File(getClass().getResource("/archive_traversal_attack.zip").toURI()), destDir); fail("squash.zip is capable of causing archive traversal attack and hence should not be allowed."); } catch (IllegalPathException e) { assertThat(e.getMessage()).isEqualTo("File ../2.txt is outside extraction target directory"); } }
public static <T> Iterables<T> iterables() { return new Iterables<>(); }
@Test @Category(ValidatesRunner.class) public void testFlattenIterablesSets() { Set<String> linesSet = ImmutableSet.copyOf(LINES); PCollection<Set<String>> input = p.apply(Create.<Set<String>>of(linesSet).withCoder(SetCoder.of(StringUtf8Coder.of()))); PCollection<String> output = input.apply(Flatten.iterables()); PAssert.that(output).containsInAnyOrder(LINES_ARRAY); p.run(); }
private int deregisterSubCluster(String subClusterId) throws IOException, YarnException { PrintWriter writer = new PrintWriter(new OutputStreamWriter( System.out, StandardCharsets.UTF_8)); ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol(); DeregisterSubClusterRequest request = DeregisterSubClusterRequest.newInstance(subClusterId); DeregisterSubClusterResponse response = adminProtocol.deregisterSubCluster(request); FormattingCLIUtils formattingCLIUtils = new FormattingCLIUtils(DEREGISTER_SUBCLUSTER_TITLE) .addHeaders(DEREGISTER_SUBCLUSTER_HEADER); List<DeregisterSubClusters> deregisterSubClusters = response.getDeregisterSubClusters(); deregisterSubClusters.forEach(deregisterSubCluster -> { String responseSubClusterId = deregisterSubCluster.getSubClusterId(); String deregisterState = deregisterSubCluster.getDeregisterState(); String lastHeartBeatTime = deregisterSubCluster.getLastHeartBeatTime(); String info = deregisterSubCluster.getInformation(); String subClusterState = deregisterSubCluster.getSubClusterState(); formattingCLIUtils.addLine(responseSubClusterId, deregisterState, lastHeartBeatTime, info, subClusterState); }); writer.print(formattingCLIUtils.render()); writer.flush(); return EXIT_SUCCESS; }
@Test public void testDeregisterSubCluster() throws Exception { PrintStream oldOutPrintStream = System.out; ByteArrayOutputStream dataOut = new ByteArrayOutputStream(); System.setOut(new PrintStream(dataOut)); oldOutPrintStream.println(dataOut); String[] args = {"-deregisterSubCluster", "-sc", "SC-1"}; assertEquals(0, rmAdminCLI.run(args)); args = new String[]{"-deregisterSubCluster", "--subClusterId", "SC-1"}; assertEquals(0, rmAdminCLI.run(args)); }
public static String extractCharset(String line, String defaultValue) { if (line == null) { return defaultValue; } final String[] parts = line.split(" "); String charsetInfo = ""; for (var part : parts) { if (part.startsWith("charset")) { charsetInfo = part; break; } } final String charset = charsetInfo.replace("charset=", "").replace(";", ""); if (charset.isBlank()) { return defaultValue; } return charset; }
@DisplayName("null charset information") @Test void testExtractCharsetNull() { assertEquals("UTF-8", TelegramAsyncHandler.extractCharset(null, StandardCharsets.UTF_8.name())); }
public abstract HashMap<Integer, T_Sess> loadAllRawSessionsOf(OmemoDevice userDevice, BareJid contact) throws IOException;
@Test public void loadAllRawSessionsReturnsEmptyMapTest() throws IOException { HashMap<Integer, T_Sess> sessions = store.loadAllRawSessionsOf(alice, bob.getJid()); assertNotNull(sessions); assertEquals(0, sessions.size()); }
boolean isSignRequestsEnabled() { return configuration.getBoolean(SIGN_REQUESTS_ENABLED).orElse(false); }
@Test public void is_sign_requests_enabled() { settings.setProperty("sonar.auth.saml.signature.enabled", true); assertThat(underTest.isSignRequestsEnabled()).isTrue(); settings.setProperty("sonar.auth.saml.signature.enabled", false); assertThat(underTest.isSignRequestsEnabled()).isFalse(); }
public String kv2String(String k, Object v) { this.kvs.put(k, v == null ? "" : v); return toString(); }
@Test public void testKv2StringShouldPrintMessageAndAllKeyAndValuePairs() { String result = logMessage.kv2String("key", "value"); assertEquals("key=value", result); }
public static void removeMatching(Collection<String> values, String... patterns) { removeMatching(values, Arrays.asList(patterns)); }
@Test public void testRemoveMatchingWithMatchingPattern() throws Exception { Collection<String> values = stringToList("A"); StringCollectionUtil.removeMatching(values, "A"); assertTrue(values.isEmpty()); }
@Override public String decrypt(String encryptedText) { try { javax.crypto.Cipher cipher = javax.crypto.Cipher.getInstance(CRYPTO_ALGO); ByteBuffer byteBuffer = ByteBuffer.wrap(Base64.decodeBase64(StringUtils.trim(encryptedText))); byte[] iv = new byte[GCM_IV_LENGTH_IN_BYTES]; byteBuffer.get(iv); byte[] cipherText = new byte[byteBuffer.remaining()]; byteBuffer.get(cipherText); cipher.init(javax.crypto.Cipher.DECRYPT_MODE, loadSecretFile(), new GCMParameterSpec(GCM_TAG_LENGTH_IN_BITS, iv)); byte[] cipherData = cipher.doFinal(cipherText); return new String(cipherData, StandardCharsets.UTF_8); } catch (RuntimeException e) { throw e; } catch (Exception e) { throw new IllegalStateException(e); } }
@Test public void decrypt() throws Exception { AesGCMCipher cipher = new AesGCMCipher(pathToSecretKey()); String input1 = "this is a secret"; String input2 = "asdkfja;ksldjfowiaqueropijadfskncmnv/sdjflskjdflkjiqoeuwroiqu./qewirouasoidfhjaskldfhjkhckjnkiuoewiruoasdjkfalkufoiwueroijuqwoerjsdkjflweoiru"; assertThat(cipher.decrypt(cipher.encrypt(input1))).isEqualTo(input1); assertThat(cipher.decrypt(cipher.encrypt(input1))).isEqualTo(input1); assertThat(cipher.decrypt(cipher.encrypt(input2))).isEqualTo(input2); assertThat(cipher.decrypt(cipher.encrypt(input2))).isEqualTo(input2); }
public void createNewCodeDefinition(DbSession dbSession, String projectUuid, String mainBranchUuid, String defaultBranchName, String newCodeDefinitionType, @Nullable String newCodeDefinitionValue) { boolean isCommunityEdition = editionProvider.get().filter(EditionProvider.Edition.COMMUNITY::equals).isPresent(); NewCodePeriodType newCodePeriodType = parseNewCodeDefinitionType(newCodeDefinitionType); NewCodePeriodDto dto = new NewCodePeriodDto(); dto.setType(newCodePeriodType); dto.setProjectUuid(projectUuid); if (isCommunityEdition) { dto.setBranchUuid(mainBranchUuid); } getNewCodeDefinitionValueProjectCreation(newCodePeriodType, newCodeDefinitionValue, defaultBranchName).ifPresent(dto::setValue); if (!CaycUtils.isNewCodePeriodCompliant(dto.getType(), dto.getValue())) { throw new IllegalArgumentException("Failed to set the New Code Definition. The given value is not compatible with the Clean as You Code methodology. " + "Please refer to the documentation for compliant options."); } dbClient.newCodePeriodDao().insert(dbSession, dto); }
@Test public void createNewCodeDefinition_throw_IAE_if_previous_version_type_and_value_provided() { assertThatThrownBy(() -> newCodeDefinitionResolver.createNewCodeDefinition(dbSession, DEFAULT_PROJECT_ID, MAIN_BRANCH_UUID, MAIN_BRANCH, PREVIOUS_VERSION.name(), "10.2.3")) .isInstanceOf(IllegalArgumentException.class) .hasMessageContaining("Unexpected value for newCodeDefinitionType 'PREVIOUS_VERSION'"); }
public void setContentType(byte[] contentType) { this.contentType = contentType; }
@Test public void testSetContentType() { restValue.setContentType(PAYLOAD); assertEquals(PAYLOAD, restValue.getContentType()); assertContains(restValue.toString(), "contentType='" + new String(PAYLOAD, StandardCharsets.UTF_8)); }
@Override public final void isEqualTo(@Nullable Object other) { if (Objects.equal(actual, other)) { return; } // Fail but with a more descriptive message: if (actual == null || !(other instanceof Map)) { super.isEqualTo(other); return; } containsEntriesInAnyOrder((Map<?, ?>) other, /* allowUnexpected= */ false); }
@Test public void isEqualToNotConsistentWithEquals() { TreeMap<String, Integer> actual = new TreeMap<>(CASE_INSENSITIVE_ORDER); TreeMap<String, Integer> expected = new TreeMap<>(CASE_INSENSITIVE_ORDER); actual.put("one", 1); expected.put("ONE", 1); /* * Our contract doesn't guarantee that the following test will pass. It *currently* does, * though, and if we change that behavior, we want this test to let us know. */ assertThat(actual).isEqualTo(expected); }
@Override public Serde<List<?>> getSerde( final PersistenceSchema schema, final Map<String, String> formatProperties, final KsqlConfig ksqlConfig, final Supplier<SchemaRegistryClient> srClientFactory, final boolean isKey) { FormatProperties.validateProperties(name(), formatProperties, getSupportedProperties()); SerdeUtils.throwOnUnsupportedFeatures(schema.features(), supportedFeatures()); if (!schema.columns().isEmpty()) { throw new KsqlException("The '" + NAME + "' format can only be used when no columns are defined. Got: " + schema.columns()); } return new KsqlVoidSerde<>(); }
@Test public void shouldThrowOnColumns() { // Given: when(schema.columns()).thenReturn(ImmutableList.of(column)); // When: final Exception e = assertThrows( KsqlException.class, () -> format.getSerde(schema, formatProps, ksqlConfig, srClientFactory, false) ); // Then: assertThat(e.getMessage(), is("The 'NONE' format can only be used when no columns are defined. Got: [column]")); }
@Override public void execute(Exchange exchange) throws SmppException { CancelSm cancelSm = createCancelSm(exchange); if (log.isDebugEnabled()) { log.debug("Canceling a short message for exchange id '{}' and message id '{}'", exchange.getExchangeId(), cancelSm.getMessageId()); } try { session.cancelShortMessage( cancelSm.getServiceType(), cancelSm.getMessageId(), TypeOfNumber.valueOf(cancelSm.getSourceAddrTon()), NumberingPlanIndicator.valueOf(cancelSm.getSourceAddrNpi()), cancelSm.getSourceAddr(), TypeOfNumber.valueOf(cancelSm.getDestAddrTon()), NumberingPlanIndicator.valueOf(cancelSm.getDestAddrNpi()), cancelSm.getDestinationAddress()); } catch (Exception e) { throw new SmppException(e); } if (log.isDebugEnabled()) { log.debug("Cancel a short message for exchange id '{}' and message id '{}'", exchange.getExchangeId(), cancelSm.getMessageId()); } Message message = ExchangeHelper.getResultMessage(exchange); message.setHeader(SmppConstants.ID, cancelSm.getMessageId()); }
@Test public void executeWithConfigurationData() throws Exception { Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut); exchange.getIn().setHeader(SmppConstants.COMMAND, "CancelSm"); exchange.getIn().setHeader(SmppConstants.ID, "1"); command.execute(exchange); verify(session).cancelShortMessage("", "1", TypeOfNumber.UNKNOWN, NumberingPlanIndicator.UNKNOWN, "1616", TypeOfNumber.UNKNOWN, NumberingPlanIndicator.UNKNOWN, "1717"); assertEquals("1", exchange.getMessage().getHeader(SmppConstants.ID)); }
public static MySQLCommandPacket newInstance(final MySQLCommandPacketType commandPacketType, final MySQLPacketPayload payload, final ConnectionSession connectionSession) { switch (commandPacketType) { case COM_QUIT: return new MySQLComQuitPacket(); case COM_INIT_DB: return new MySQLComInitDbPacket(payload); case COM_FIELD_LIST: return new MySQLComFieldListPacket(payload); case COM_QUERY: return new MySQLComQueryPacket(payload); case COM_STMT_PREPARE: return new MySQLComStmtPreparePacket(payload); case COM_STMT_EXECUTE: MySQLServerPreparedStatement serverPreparedStatement = connectionSession.getServerPreparedStatementRegistry().getPreparedStatement(payload.getByteBuf().getIntLE(payload.getByteBuf().readerIndex())); return new MySQLComStmtExecutePacket(payload, serverPreparedStatement.getSqlStatementContext().getSqlStatement().getParameterCount()); case COM_STMT_SEND_LONG_DATA: return new MySQLComStmtSendLongDataPacket(payload); case COM_STMT_RESET: return new MySQLComStmtResetPacket(payload); case COM_STMT_CLOSE: return new MySQLComStmtClosePacket(payload); case COM_SET_OPTION: return new MySQLComSetOptionPacket(payload); case COM_PING: return new MySQLComPingPacket(); case COM_RESET_CONNECTION: return new MySQLComResetConnectionPacket(); default: return new MySQLUnsupportedCommandPacket(commandPacketType); } }
@Test void assertNewInstanceWithComStmtFetchPacket() { assertThat(MySQLCommandPacketFactory.newInstance(MySQLCommandPacketType.COM_STMT_FETCH, payload, connectionSession), instanceOf(MySQLUnsupportedCommandPacket.class)); }
@Override public <T extends State> T state(StateNamespace namespace, StateTag<T> address) { return workItemState.get(namespace, address, StateContexts.nullContext()); }
@Test public void testMultimapEntriesAndKeysMergeLocalAdd() { final String tag = "multimap"; StateTag<MultimapState<byte[], Integer>> addr = StateTags.multimap(tag, ByteArrayCoder.of(), VarIntCoder.of()); MultimapState<byte[], Integer> multimapState = underTest.state(NAMESPACE, addr); final byte[] key1 = "key1".getBytes(StandardCharsets.UTF_8); final byte[] key2 = "key2".getBytes(StandardCharsets.UTF_8); final byte[] key3 = "key3".getBytes(StandardCharsets.UTF_8); SettableFuture<Iterable<Map.Entry<ByteString, Iterable<Integer>>>> entriesFuture = SettableFuture.create(); when(mockReader.multimapFetchAllFuture( false, key(NAMESPACE, tag), STATE_FAMILY, VarIntCoder.of())) .thenReturn(entriesFuture); SettableFuture<Iterable<Map.Entry<ByteString, Iterable<Integer>>>> keysFuture = SettableFuture.create(); when(mockReader.multimapFetchAllFuture( true, key(NAMESPACE, tag), STATE_FAMILY, VarIntCoder.of())) .thenReturn(keysFuture); ReadableState<Iterable<Map.Entry<byte[], Integer>>> entriesResult = multimapState.entries().readLater(); ReadableState<Iterable<byte[]>> keysResult = multimapState.keys().readLater(); waitAndSet( entriesFuture, Arrays.asList(multimapEntry(key1, 1, 2, 3), multimapEntry(key2, 2, 3, 4)), 30); waitAndSet(keysFuture, Arrays.asList(multimapEntry(key1), multimapEntry(key2)), 30); multimapState.put(key1, 7); multimapState.put(dup(key2), 8); multimapState.put(dup(key3), 8); Iterable<Map.Entry<byte[], Integer>> entries = entriesResult.read(); assertEquals(9, Iterables.size(entries)); assertThat( entries, Matchers.containsInAnyOrder( multimapEntryMatcher(key1, 1), multimapEntryMatcher(key1, 2), multimapEntryMatcher(key1, 3), multimapEntryMatcher(key1, 7), multimapEntryMatcher(key2, 4), multimapEntryMatcher(key2, 2), multimapEntryMatcher(key2, 3), multimapEntryMatcher(key2, 8), multimapEntryMatcher(key3, 8))); Iterable<byte[]> keys = keysResult.read(); assertEquals(3, Iterables.size(keys)); assertThat(keys, Matchers.containsInAnyOrder(key1, key2, key3)); }
public void prepareFieldNamesParameters( String[] parameters, String[] parameterFieldNames, String[] parameterValues, NamedParams namedParam, JobEntryTrans jobEntryTrans ) throws UnknownParamException { for ( int idx = 0; idx < parameters.length; idx++ ) { // Grab the parameter value set in the Trans job entry // Set fieldNameParameter only if exists and if it is not declared any staticValue( parameterValues array ) // String thisValue = namedParam.getParameterValue( parameters[ idx ] ); // multiple executions on the same jobEntryTrans variableSpace need to be updated even for nulls or blank values. // so we have to ask if that same variable had a value before and if it had - and the new value is empty - // we should set it as a blank value instead of ignoring it. // NOTE: we should only replace it if we have a parameterFieldNames defined -> parameterFieldNames[ idx ] ) != null if ( !Utils.isEmpty( jobEntryTrans.getVariable( parameters[ idx ] ) ) && Utils.isEmpty( thisValue ) && idx < parameterFieldNames.length && !Utils.isEmpty( Const.trim( parameterFieldNames[ idx ] ) ) ) { jobEntryTrans.setVariable( parameters[ idx ], "" ); } // Set value only if is not empty at namedParam and exists in parameterFieldNames if ( !Utils.isEmpty( thisValue ) && idx < parameterFieldNames.length ) { // If exists then ask if is not empty if ( !Utils.isEmpty( Const.trim( parameterFieldNames[ idx ] ) ) ) { // If is not empty then we have to ask if it exists too in parameterValues array, since the values in // parameterValues prevail over parameterFieldNames if ( idx < parameterValues.length ) { // If is empty at parameterValues array, then we can finally add that variable with that value if ( Utils.isEmpty( Const.trim( parameterValues[ idx ] ) ) ) { jobEntryTrans.setVariable( parameters[ idx ], thisValue ); } } else { // Or if not in parameterValues then we can add that variable with that value too jobEntryTrans.setVariable( parameters[ idx ], thisValue ); } } } } }
@Test public void testPrepareFieldNamesParametersWithNulls() throws UnknownParamException { //NOTE: this only tests the prepareFieldNamesParameters function not all variable substitution logic // array of params String[] parameterNames = new String[7]; parameterNames[0] = "param1"; parameterNames[1] = "param2"; parameterNames[2] = "param3"; parameterNames[3] = "param4"; parameterNames[4] = "param5"; parameterNames[5] = "param6"; parameterNames[6] = "param7"; // array of fieldNames params String[] parameterFieldNames = new String[7]; parameterFieldNames[0] = null; parameterFieldNames[2] = "ValueParam3"; parameterFieldNames[3] = "FieldValueParam4"; parameterFieldNames[4] = "FieldValueParam5"; parameterFieldNames[6] = "FieldValueParam7"; // array of parameterValues params String[] parameterValues = new String[7]; parameterValues[1] = "ValueParam2"; parameterValues[3] = ""; parameterValues[4] = "StaticValueParam5"; parameterValues[5] = "StaticValueParam6"; JobEntryTrans jet = new JobEntryTrans(); VariableSpace variableSpace = new Variables(); jet.copyVariablesFrom( variableSpace ); jet.setVariable( "param6", "someDummyPreviousValue6" ); jet.setVariable( "param7", "someDummyPreviousValue7" ); //at this point StreamColumnNameParams are already inserted in namedParams NamedParams namedParam = Mockito.mock( NamedParamsDefault.class ); Mockito.doReturn( "value1" ).when( namedParam ).getParameterValue( "param1" ); Mockito.doReturn( "value2" ).when( namedParam ).getParameterValue( "param2" ); Mockito.doReturn( "value3" ).when( namedParam ).getParameterValue( "param3" ); Mockito.doReturn( "value4" ).when( namedParam ).getParameterValue( "param4" ); Mockito.doReturn( "value5" ).when( namedParam ).getParameterValue( "param5" ); Mockito.doReturn( "" ).when( namedParam ).getParameterValue( "param6" ); Mockito.doReturn( "" ).when( namedParam ).getParameterValue( "param7" ); jet.prepareFieldNamesParameters( parameterNames, parameterFieldNames, parameterValues, namedParam, jet ); // "param1" has parameterFieldName value = null and no parameterValues defined so it should be null Assert.assertEquals( null, jet.getVariable( "param1" ) ); // "param2" has only parameterValues defined and no parameterFieldName value so it should be null Assert.assertEquals( null, jet.getVariable( "param2" ) ); // "param3" has only the parameterFieldName defined so it should return the mocked value Assert.assertEquals( "value3", jet.getVariable( "param3" ) ); // "param4" has parameterFieldName and also an empty parameterValues defined so it should return the mocked value Assert.assertEquals( "value4", jet.getVariable( "param4" ) ); // "param5" has parameterFieldName and also parameterValues defined with a not empty value so it should return null Assert.assertEquals( null, jet.getVariable( "param5" ) ); // "param6" only has a parameterValues defined with a not empty value and has a previous value on it ( someDummyPreviousValue6 ) // so it should keep "someDummyPreviousValue6" since there is no parameterFieldNames definition Assert.assertEquals( "someDummyPreviousValue6", jet.getVariable( "param6" ) ); // "param7" only has a parameterFieldNames defined and has a previous value on it ( someDummyPreviousValue7 ) // so it should update to the new value mocked = "" even it is a blank value - PDI-18227 Assert.assertEquals( "", jet.getVariable( "param7" ) ); }
@PostMapping("/refresh-token") public CustomResponse<TokenResponse> refreshToken(@RequestBody @Valid final TokenRefreshRequest tokenRefreshRequest) { return refreshTokenService.refreshToken(tokenRefreshRequest); }
@Test void refreshToken_ValidRequest_ReturnsTokenResponse() throws Exception { // Given TokenRefreshRequest tokenRefreshRequest = TokenRefreshRequest.builder() .refreshToken("validRefreshToken") .build(); TokenResponse tokenResponse = TokenResponse.builder() .accessToken("newAccessToken") .accessTokenExpiresAt(System.currentTimeMillis() + 3600) .refreshToken("newRefreshToken") .build(); CustomResponse<TokenResponse> expectedResponse = CustomResponse.successOf(tokenResponse); // When when(refreshTokenService.refreshToken(any(TokenRefreshRequest.class))).thenReturn(expectedResponse); // Then mockMvc.perform(post("/api/v1/authentication/users/refresh-token") .contentType(MediaType.APPLICATION_JSON) .content(objectMapper.writeValueAsString(tokenRefreshRequest))) .andExpect(status().isOk()) .andExpect(jsonPath("$.isSuccess").value(true)) .andExpect(jsonPath("$.httpStatus").value("OK")) .andExpect(jsonPath("$.response.accessToken").value("newAccessToken")); verify(refreshTokenService, times(1)).refreshToken(any(TokenRefreshRequest.class)); }
static void process(int maxMessages, MessageFormatter formatter, ConsumerWrapper consumer, PrintStream output, boolean skipMessageOnError) { while (messageCount < maxMessages || maxMessages == -1) { ConsumerRecord<byte[], byte[]> msg; try { msg = consumer.receive(); } catch (WakeupException we) { LOG.trace("Caught WakeupException because consumer is shutdown, ignore and terminate."); // Consumer will be closed return; } catch (Throwable t) { LOG.error("Error processing message, terminating consumer process: ", t); // Consumer will be closed return; } messageCount += 1; try { formatter.writeTo(new ConsumerRecord<>(msg.topic(), msg.partition(), msg.offset(), msg.timestamp(), msg.timestampType(), 0, 0, msg.key(), msg.value(), msg.headers(), Optional.empty()), output); } catch (Throwable t) { if (skipMessageOnError) { LOG.error("Error processing message, skipping this message: ", t); } else { // Consumer will be closed throw t; } } if (checkErr(output)) { // Consumer will be closed return; } } }
@Test public void shouldStopWhenOutputCheckErrorFails() { ConsoleConsumer.ConsumerWrapper consumer = mock(ConsoleConsumer.ConsumerWrapper.class); MessageFormatter formatter = mock(MessageFormatter.class); PrintStream printStream = mock(PrintStream.class); ConsumerRecord<byte[], byte[]> record = new ConsumerRecord<>("foo", 1, 1, new byte[0], new byte[0]); when(consumer.receive()).thenReturn(record); //Simulate an error on System.out after the first record has been printed when(printStream.checkError()).thenReturn(true); ConsoleConsumer.process(-1, formatter, consumer, printStream, true); verify(formatter).writeTo(any(), eq(printStream)); verify(consumer).receive(); verify(printStream).checkError(); consumer.cleanup(); }
public void run(OutputReceiver<PartitionRecord> receiver) throws InvalidProtocolBufferException { // Erase any existing missing partitions. metadataTableDao.writeDetectNewPartitionMissingPartitions(new HashMap<>()); List<PartitionRecord> partitions = metadataTableDao.readAllStreamPartitions(); for (PartitionRecord partitionRecord : partitions) { if (partitionRecord.getUuid().isEmpty()) { partitionRecord.setUuid(UniqueIdGenerator.getNextId()); } if (endTime != null) { partitionRecord.setEndTime(endTime); } LOG.info("DNP: Outputting existing partition: {}", partitionRecord); metrics.incListPartitionsCount(); receiver.outputWithTimestamp(partitionRecord, Instant.EPOCH); } List<NewPartition> newPartitions = metadataTableDao.readNewPartitionsIncludingDeleted(); for (NewPartition newPartition : newPartitions) { processNewPartitionsAction.processNewPartition(newPartition, receiver); } }
@Test public void testOutputNewPartitions() throws InvalidProtocolBufferException { ByteStringRange partition1 = ByteStringRange.create("A", "B"); ChangeStreamContinuationToken token1 = ChangeStreamContinuationToken.create(partition1, "tokenAB"); Instant watermark1 = Instant.now().minus(Duration.standardSeconds(10)); NewPartition newSplitPartition = new NewPartition(partition1, Collections.singletonList(token1), watermark1); metadataTableDao.writeNewPartition(newSplitPartition); ByteStringRange partition2 = ByteStringRange.create("B", "D"); ByteStringRange partition2parent1 = ByteStringRange.create("C", "D"); ChangeStreamContinuationToken token2 = ChangeStreamContinuationToken.create(partition2parent1, "tokenCD"); Instant watermark2 = Instant.now().plus(Duration.standardSeconds(10)); NewPartition newPartitionMissingParent = new NewPartition(partition2, Collections.singletonList(token2), watermark2); metadataTableDao.writeNewPartition(newPartitionMissingParent); ByteStringRange partition3 = ByteStringRange.create("D", "E"); ChangeStreamContinuationToken token3 = ChangeStreamContinuationToken.create(partition3, "tokenDE"); Instant watermark3 = Instant.now().plus(Duration.standardSeconds(5)); NewPartition deletedNewPartition = new NewPartition(partition3, Collections.singletonList(token3), watermark3); metadataTableDao.writeNewPartition(deletedNewPartition); metadataTableDao.markNewPartitionForDeletion(deletedNewPartition); // There are only 2 NewPartition rows because partition3 is deleted. assertEquals(2, metadataTableDao.readNewPartitions().size()); action.run(receiver); verify(receiver, times(2)) .outputWithTimestamp(partitionRecordArgumentCaptor.capture(), eq(Instant.EPOCH)); List<PartitionRecord> actualPartitions = partitionRecordArgumentCaptor.getAllValues(); PartitionRecord actualPartition1 = actualPartitions.get(0); assertEquals(partition1, actualPartition1.getPartition()); assertEquals(1, actualPartition1.getChangeStreamContinuationTokens().size()); assertEquals(token1, actualPartition1.getChangeStreamContinuationTokens().get(0)); assertNull(actualPartition1.getStartTime()); assertEquals(watermark1, actualPartition1.getParentLowWatermark()); assertEquals(endTime, actualPartition1.getEndTime()); assertEquals( Collections.singletonList(newSplitPartition), actualPartition1.getParentPartitions()); // Uuid is filled. assertFalse(actualPartition1.getUuid().isEmpty()); // The 2nd partition is not partition2 but partition3 because partition2 is missing a parent. // Even though partition3 is marked for deletion, we still process it. PartitionRecord actualPartition2 = actualPartitions.get(1); assertEquals(partition3, actualPartition2.getPartition()); assertEquals(1, actualPartition2.getChangeStreamContinuationTokens().size()); assertEquals(token3, actualPartition2.getChangeStreamContinuationTokens().get(0)); assertNull(actualPartition2.getStartTime()); assertEquals(watermark3, actualPartition2.getParentLowWatermark()); assertEquals(endTime, actualPartition2.getEndTime()); assertEquals( Collections.singletonList(deletedNewPartition), actualPartition2.getParentPartitions()); // There is only 1 NewPartition row now because we processed them except for the NewPartition // with missing parent. assertEquals(1, metadataTableDao.readNewPartitions().size()); }
public static <InputT, OutputT> PTransform<PCollection<InputT>, PCollection<OutputT>> to( Class<OutputT> clazz) { return to(TypeDescriptor.of(clazz)); }
@Test @Category(NeedsRunner.class) public void testFromRowsUnboxingPrimitive() { PCollection<Long> longs = pipeline .apply(Create.of(new POJO1())) .apply(Select.fieldNames("field2")) .apply(Convert.to(TypeDescriptors.longs())); PAssert.that(longs).containsInAnyOrder((Long) EXPECTED_ROW1.getValue("field2")); pipeline.run(); }
private boolean needBrokerDataUpdate() { final long updateMaxIntervalMillis = TimeUnit.MINUTES .toMillis(conf.getLoadBalancerReportUpdateMaxIntervalMinutes()); long timeSinceLastReportWrittenToStore = System.currentTimeMillis() - localData.getLastUpdate(); if (timeSinceLastReportWrittenToStore > updateMaxIntervalMillis) { log.info("Writing local data to metadata store because time since last" + " update exceeded threshold of {} minutes", conf.getLoadBalancerReportUpdateMaxIntervalMinutes()); // Always update after surpassing the maximum interval. return true; } final double maxChange = LocalBrokerData.max( percentChange(lastData.getMsgRateIn() + lastData.getMsgRateOut(), localData.getMsgRateIn() + localData.getMsgRateOut()), percentChange(lastData.getMsgThroughputIn() + lastData.getMsgThroughputOut(), localData.getMsgThroughputIn() + localData.getMsgThroughputOut()), percentChange(lastData.getNumBundles(), localData.getNumBundles()), 100.0 * Math.abs(getMaxResourceUsageWithWeight(lastData, conf) - getMaxResourceUsageWithWeight(localData, conf)) ); if (maxChange > conf.getLoadBalancerReportUpdateThresholdPercentage()) { log.info("Writing local data to metadata store because maximum change {}% exceeded threshold {}%; " + "time since last report written is {} seconds", maxChange, conf.getLoadBalancerReportUpdateThresholdPercentage(), timeSinceLastReportWrittenToStore / 1000.0); return true; } return false; }
@Test public void testNeedBrokerDataUpdate() throws Exception { final LocalBrokerData lastData = new LocalBrokerData(); final LocalBrokerData currentData = new LocalBrokerData(); final ServiceConfiguration conf = pulsar1.getConfiguration(); // Set this manually in case the default changes. conf.setLoadBalancerReportUpdateThresholdPercentage(5); // Easier to test using an uninitialized ModularLoadManagerImpl. final ModularLoadManagerImpl loadManager = new ModularLoadManagerImpl(); setField(loadManager, "lastData", lastData); setField(loadManager, "localData", currentData); setField(loadManager, "conf", conf); Supplier<Boolean> needUpdate = () -> { try { return (Boolean) invokeSimpleMethod(loadManager, "needBrokerDataUpdate"); } catch (Exception e) { throw new RuntimeException(e); } }; lastData.setMsgRateIn(100); currentData.setMsgRateIn(104); // 4% difference: shouldn't trigger an update. assert (!needUpdate.get()); currentData.setMsgRateIn(105.1); // 5% difference: should trigger an update (exactly 5% is flaky due to precision). assert (needUpdate.get()); // Do similar tests for lower values. currentData.setMsgRateIn(94); assert (needUpdate.get()); currentData.setMsgRateIn(95.1); assert (!needUpdate.get()); // 0 to non-zero should always trigger an update. lastData.setMsgRateIn(0); currentData.setMsgRateIn(1e-8); assert (needUpdate.get()); // non-zero to zero should trigger an update as long as the threshold is less than 100. lastData.setMsgRateIn(1e-8); currentData.setMsgRateIn(0); assert (needUpdate.get()); // zero to zero should never trigger an update. currentData.setMsgRateIn(0); lastData.setMsgRateIn(0); assert (!needUpdate.get()); // Minimally test other absolute values to ensure they are included. lastData.setCpu(new ResourceUsage(100, 1000)); currentData.setCpu(new ResourceUsage(106, 1000)); assert (!needUpdate.get()); // Minimally test other absolute values to ensure they are included. lastData.setCpu(new ResourceUsage(100, 1000)); currentData.setCpu(new ResourceUsage(206, 1000)); assert (needUpdate.get()); // set the resource weight of cpu to 0, so that it should not trigger an update conf.setLoadBalancerCPUResourceWeight(0); assert (!needUpdate.get()); lastData.setCpu(new ResourceUsage()); currentData.setCpu(new ResourceUsage()); lastData.setMsgThroughputIn(100); currentData.setMsgThroughputIn(106); assert (needUpdate.get()); currentData.setMsgThroughputIn(100); lastData.setNumBundles(100); currentData.setNumBundles(106); assert (needUpdate.get()); currentData.setNumBundles(100); assert (!needUpdate.get()); }
void logRun(long runIndex, boolean runSucceeded, Duration pollInterval, Instant runStartTime, Instant runEndTime) { if (runSucceeded && exceptionCounter > 0) { --exceptionCounter; } Duration actualRunDuration = Duration.between(runStartTime, runEndTime); if (actualRunDuration.compareTo(pollInterval) < 0) { LOGGER.debug("JobZooKeeper run took {}", actualRunDuration); runTookToLongCounter = 0; } else { LOGGER.debug("JobZooKeeper run took {} (while pollIntervalInSeconds is {})", actualRunDuration, pollInterval); if (runTookToLongCounter < 2) { runTookToLongCounter++; } else { dashboardNotificationManager.notify(new PollIntervalInSecondsTimeBoxIsTooSmallNotification(runIndex, (int) pollInterval.getSeconds(), runStartTime, (int) actualRunDuration.getSeconds())); runTookToLongCounter = 0; } } }
@Test void ifThreeNotConsecutiveRunsTookTooLongNoNotificationIsShown() { statistics.logRun(2, true, pollInterval, now().minusSeconds(45), now().minusSeconds(30)); statistics.logRun(3, true, pollInterval, now().minusSeconds(30), now().minusSeconds(15)); statistics.logRun(4, true, pollInterval, now().minusSeconds(15), now().minusSeconds(11)); verify(dashboardNotificationManager, never()).notify(any(PollIntervalInSecondsTimeBoxIsTooSmallNotification.class)); }
public static void retryWithBackoff( final int maxRetries, final int initialWaitMs, final int maxWaitMs, final Runnable runnable, final Class<?>... passThroughExceptions) { retryWithBackoff( maxRetries, initialWaitMs, maxWaitMs, runnable, () -> false, Arrays.stream(passThroughExceptions) .map(c -> (Predicate<Exception>) c::isInstance) .collect(Collectors.toList()) ); }
@Test public void shouldThrowPassThroughExceptions() { doThrow(new IllegalArgumentException("error")).when(runnable).run(); try { RetryUtil.retryWithBackoff(3, 1, 3, runnable, IllegalArgumentException.class); fail("retry should have thrown"); } catch (final IllegalArgumentException e) { } verify(runnable, times(1)).run(); }
public static File createTmpFile(String dir, String prefix, String suffix) throws IOException { return Files.createTempFile(Paths.get(dir), prefix, suffix).toFile(); }
@Test void testCreateTmpFileWithPath() throws IOException { File tmpFile = null; try { tmpFile = DiskUtils.createTmpFile(EnvUtil.getNacosTmpDir(), "nacos1", ".ut"); assertEquals(EnvUtil.getNacosTmpDir(), tmpFile.getParent()); assertTrue(tmpFile.getName().startsWith("nacos1")); assertTrue(tmpFile.getName().endsWith(".ut")); } finally { if (tmpFile != null) { tmpFile.deleteOnExit(); } } }
boolean isUpstreamDefinite() { if (upstreamDefinite == null) { upstreamDefinite = isRoot() || prev.isTokenDefinite() && prev.isUpstreamDefinite(); } return upstreamDefinite; }
@Test public void is_upstream_definite_in_simple_case() { assertThat(makePathReturningTail(makePPT("foo")).isUpstreamDefinite()).isTrue(); assertThat(makePathReturningTail(makePPT("foo"), makePPT("bar")).isUpstreamDefinite()).isTrue(); assertThat(makePathReturningTail(makePPT("foo", "foo2"), makePPT("bar")).isUpstreamDefinite()).isFalse(); assertThat(makePathReturningTail(new WildcardPathToken(), makePPT("bar")).isUpstreamDefinite()).isFalse(); assertThat(makePathReturningTail(new ScanPathToken(), makePPT("bar")).isUpstreamDefinite()).isFalse(); }
public <T> void addStoreLevelMutableMetric(final String taskId, final String metricsScope, final String storeName, final String name, final String description, final RecordingLevel recordingLevel, final Gauge<T> valueProvider) { final MetricName metricName = metrics.metricName( name, STATE_STORE_LEVEL_GROUP, description, storeLevelTagMap(taskId, metricsScope, storeName) ); if (metrics.metric(metricName) == null) { metrics.addMetricIfAbsent(metricName, new MetricConfig().recordLevel(recordingLevel), valueProvider); final String key = storeSensorPrefix(Thread.currentThread().getName(), taskId, storeName); storeLevelMetrics.computeIfAbsent(key, ignored -> new LinkedList<>()).push(metricName); } }
@Test public void shouldNotAddStoreLevelMutableMetricIfAlreadyExists() { final Metrics metrics = mock(Metrics.class); final MetricName metricName = new MetricName(METRIC_NAME1, STATE_STORE_LEVEL_GROUP, DESCRIPTION1, STORE_LEVEL_TAG_MAP); when(metrics.metricName(METRIC_NAME1, STATE_STORE_LEVEL_GROUP, DESCRIPTION1, STORE_LEVEL_TAG_MAP)) .thenReturn(metricName); when(metrics.metric(metricName)).thenReturn(null); final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, VERSION, time); streamsMetrics.addStoreLevelMutableMetric( TASK_ID1, SCOPE_NAME, STORE_NAME1, METRIC_NAME1, DESCRIPTION1, INFO_RECORDING_LEVEL, VALUE_PROVIDER ); }
public static Optional<String> getRuleName(final String rulePath) { Pattern pattern = Pattern.compile(getRuleNameNode() + "/(\\w+)" + ACTIVE_VERSION_SUFFIX, Pattern.CASE_INSENSITIVE); Matcher matcher = pattern.matcher(rulePath); return matcher.find() ? Optional.of(matcher.group(1)) : Optional.empty(); }
@Test void assertGetRuleNameWhenNotFound() { Optional<String> actual = GlobalNodePath.getRuleName("/invalid/transaction/active_version"); assertFalse(actual.isPresent()); }
public List<Entity> query(String gqlQuery) { try { QueryResults<Entity> queryResults = datastore.run( GqlQuery.newGqlQueryBuilder(ResultType.ENTITY, gqlQuery) .setNamespace(namespace) .build()); List<Entity> entities = new ArrayList<>(); while (queryResults.hasNext()) { Entity entity = queryResults.next(); entities.add(entity); // Mark for deletion if namespace matches the test if (entity.getKey().getNamespace().equals(namespace)) { keys.add(entity.getKey()); } } return entities; } catch (Exception e) { throw new DatastoreResourceManagerException("Error running Datastore query", e); } }
@Test public void testQuery() { // Prepare test data String gqlQuery = "SELECT * FROM test_kind"; // Mock the Datastore run method QueryResults<Entity> mockResult = mock(QueryResults.class); Entity mockEntity = mock(Entity.class); when(datastoreMock.run(any(GqlQuery.class))).thenReturn(mockResult); when(mockResult.hasNext()).thenReturn(true).thenReturn(false); when(mockResult.next()).thenReturn(mockEntity); Key mockKey = mock(Key.class); when(mockEntity.getKey()).thenReturn(mockKey); when(mockKey.getNamespace()).thenReturn("test-namespace"); // Execute the method under test List<Entity> result = resourceManager.query(gqlQuery); resourceManager.cleanupAll(); // Verify the result assertThat(result).isNotEmpty(); assertThat(result.get(0)).isEqualTo(mockEntity); verify(datastoreMock).delete(mockKey); }
public static String formatExpression(final Expression expression) { return formatExpression(expression, FormatOptions.of(s -> false)); }
@Test public void shouldFormatDecimalLiteral() { assertThat(ExpressionFormatter.formatExpression(new DecimalLiteral(new BigDecimal("3.5"))), equalTo("3.5")); }
public static List<List<GtfsStorage.FeedIdWithStopId>> findStronglyConnectedComponentsOfStopGraph(PtGraph ptGraph) { PtGraphAsAdjacencyList ptGraphAsAdjacencyList = new PtGraphAsAdjacencyList(ptGraph); TarjanSCC.ConnectedComponents components = TarjanSCC.findComponents(ptGraphAsAdjacencyList, EdgeFilter.ALL_EDGES, false); List<List<GtfsStorage.FeedIdWithStopId>> stronglyConnectedComponentsOfStopGraph = new ArrayList<>(); for (IntArrayList component : components.getComponents()) { ArrayList<GtfsStorage.FeedIdWithStopId> stopsOfComponent = new ArrayList<>(); for (IntCursor intCursor : component) { stopsOfComponent.addAll(getStopsForNode(ptGraph, intCursor.value)); } if (!stopsOfComponent.isEmpty()) { stronglyConnectedComponentsOfStopGraph.add(stopsOfComponent); } } BitSetIterator iter = components.getSingleNodeComponents().iterator(); for (int i = iter.nextSetBit(); i >= 0; i = iter.nextSetBit()) { List<GtfsStorage.FeedIdWithStopId> stopsForNode = getStopsForNode(ptGraph, i); if (!stopsForNode.isEmpty()) { stronglyConnectedComponentsOfStopGraph.add(stopsForNode); } } return stronglyConnectedComponentsOfStopGraph; }
@Test public void testStronglyConnectedComponentsOfStopGraph() { PtGraph ptGraph = graphHopperGtfs.getPtGraph(); List<List<GtfsStorage.FeedIdWithStopId>> stronglyConnectedComponentsOfStopGraph = Analysis.findStronglyConnectedComponentsOfStopGraph(ptGraph); List<GtfsStorage.FeedIdWithStopId> largestComponent = stronglyConnectedComponentsOfStopGraph.get(0); assertThat(largestComponent) .extracting("stopId") .containsExactlyInAnyOrder("EMSI", "DADAN", "NADAV", "NANAA", "STAGECOACH", "AMV", "FUR_CREEK_RES", "BULLFROG", "BEATTY_AIRPORT", "AIRPORT"); List<List<GtfsStorage.FeedIdWithStopId>> singleElementComponents = stronglyConnectedComponentsOfStopGraph.subList(1, 4); assertThat(singleElementComponents.stream().map(it -> it.get(0))) .extracting("stopId") .containsExactlyInAnyOrder("JUSTICE_COURT", "MUSEUM", "NEXT_TO_MUSEUM"); }
public Map<String, String> parse(String body) { final ImmutableMap.Builder<String, String> newLookupBuilder = ImmutableMap.builder(); final String[] lines = body.split(lineSeparator); for (String line : lines) { if (line.startsWith(this.ignorechar)) { continue; } final String[] values = line.split(this.splitPattern); if (values.length <= Math.max(keyColumn, keyOnly ? 0 : valueColumn)) { continue; } final String key = this.caseInsensitive ? values[keyColumn].toLowerCase(Locale.ENGLISH) : values[keyColumn]; final String value = this.keyOnly ? "" : values[valueColumn].trim(); final String finalKey = Strings.isNullOrEmpty(quoteChar) ? key.trim() : key.trim().replaceAll("^" + quoteChar + "|" + quoteChar + "$", ""); final String finalValue = Strings.isNullOrEmpty(quoteChar) ? value.trim() : value.trim().replaceAll("^" + quoteChar + "|" + quoteChar + "$", ""); newLookupBuilder.put(finalKey, finalValue); } return newLookupBuilder.build(); }
@Test public void parseFileWithSwappedColumns() throws Exception { final String input = "# Sample file for testing\n" + "foo:23\n" + "bar:42\n" + "baz:17"; final DSVParser dsvParser = new DSVParser("#", "\n", ":", "", false, false, 1, Optional.of(0)); final Map<String, String> result = dsvParser.parse(input); assertThat(result) .isNotNull() .isNotEmpty() .hasSize(3) .containsExactly( new AbstractMap.SimpleEntry<>("23", "foo"), new AbstractMap.SimpleEntry<>("42", "bar"), new AbstractMap.SimpleEntry<>("17", "baz") ); }
static public int facilityStringToint(String facilityStr) { if ("KERN".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_KERN; } else if ("USER".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_USER; } else if ("MAIL".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_MAIL; } else if ("DAEMON".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_DAEMON; } else if ("AUTH".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_AUTH; } else if ("SYSLOG".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_SYSLOG; } else if ("LPR".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_LPR; } else if ("NEWS".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_NEWS; } else if ("UUCP".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_UUCP; } else if ("CRON".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_CRON; } else if ("AUTHPRIV".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_AUTHPRIV; } else if ("FTP".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_FTP; } else if ("NTP".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_NTP; } else if ("AUDIT".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_AUDIT; } else if ("ALERT".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_ALERT; } else if ("CLOCK".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_CLOCK; } else if ("LOCAL0".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_LOCAL0; } else if ("LOCAL1".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_LOCAL1; } else if ("LOCAL2".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_LOCAL2; } else if ("LOCAL3".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_LOCAL3; } else if ("LOCAL4".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_LOCAL4; } else if ("LOCAL5".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_LOCAL5; } else if ("LOCAL6".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_LOCAL6; } else if ("LOCAL7".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_LOCAL7; } else { throw new IllegalArgumentException(facilityStr + " is not a valid syslog facility string"); } }
@Test public void testFacilityStringToint() throws InterruptedException { assertEquals(SyslogConstants.LOG_KERN, SyslogAppenderBase.facilityStringToint("KERN")); assertEquals(SyslogConstants.LOG_USER, SyslogAppenderBase.facilityStringToint("USER")); assertEquals(SyslogConstants.LOG_MAIL, SyslogAppenderBase.facilityStringToint("MAIL")); assertEquals(SyslogConstants.LOG_DAEMON, SyslogAppenderBase.facilityStringToint("DAEMON")); assertEquals(SyslogConstants.LOG_AUTH, SyslogAppenderBase.facilityStringToint("AUTH")); assertEquals(SyslogConstants.LOG_SYSLOG, SyslogAppenderBase.facilityStringToint("SYSLOG")); assertEquals(SyslogConstants.LOG_LPR, SyslogAppenderBase.facilityStringToint("LPR")); assertEquals(SyslogConstants.LOG_NEWS, SyslogAppenderBase.facilityStringToint("NEWS")); assertEquals(SyslogConstants.LOG_UUCP, SyslogAppenderBase.facilityStringToint("UUCP")); assertEquals(SyslogConstants.LOG_CRON, SyslogAppenderBase.facilityStringToint("CRON")); assertEquals(SyslogConstants.LOG_AUTHPRIV, SyslogAppenderBase.facilityStringToint("AUTHPRIV")); assertEquals(SyslogConstants.LOG_FTP, SyslogAppenderBase.facilityStringToint("FTP")); assertEquals(SyslogConstants.LOG_NTP, SyslogAppenderBase.facilityStringToint("NTP")); assertEquals(SyslogConstants.LOG_AUDIT, SyslogAppenderBase.facilityStringToint("AUDIT")); assertEquals(SyslogConstants.LOG_ALERT, SyslogAppenderBase.facilityStringToint("ALERT")); assertEquals(SyslogConstants.LOG_CLOCK, SyslogAppenderBase.facilityStringToint("CLOCK")); assertEquals(SyslogConstants.LOG_LOCAL0, SyslogAppenderBase.facilityStringToint("LOCAL0")); assertEquals(SyslogConstants.LOG_LOCAL1, SyslogAppenderBase.facilityStringToint("LOCAL1")); assertEquals(SyslogConstants.LOG_LOCAL2, SyslogAppenderBase.facilityStringToint("LOCAL2")); assertEquals(SyslogConstants.LOG_LOCAL3, SyslogAppenderBase.facilityStringToint("LOCAL3")); assertEquals(SyslogConstants.LOG_LOCAL4, SyslogAppenderBase.facilityStringToint("LOCAL4")); assertEquals(SyslogConstants.LOG_LOCAL5, SyslogAppenderBase.facilityStringToint("LOCAL5")); assertEquals(SyslogConstants.LOG_LOCAL6, SyslogAppenderBase.facilityStringToint("LOCAL6")); assertEquals(SyslogConstants.LOG_LOCAL7, SyslogAppenderBase.facilityStringToint("LOCAL7")); }
public static boolean parse(final String str, ResTable_config out) { return parse(str, out, true); }
@Test public void parse_mcc_upperCase() { ResTable_config config = new ResTable_config(); ConfigDescription.parse("MCC310", config); assertThat(config.mcc).isEqualTo(310); }
public static void hookPendingIntentGetBroadcast(PendingIntent pendingIntent, Context context, int requestCode, Intent intent, int flags) { hookPendingIntent(intent, pendingIntent); }
@Test public void hookPendingIntentGetBroadcast() { PushAutoTrackHelper.hookPendingIntentGetBroadcast(MockDataTest.mockPendingIntent(), mApplication, 100, MockDataTest.mockJPushIntent(), 100); }
@VisibleForTesting V1StatefulSet createStatefulSet() { final String jobName = createJobName(instanceConfig.getFunctionDetails(), this.jobName); final V1StatefulSet statefulSet = new V1StatefulSet(); // setup stateful set metadata final V1ObjectMeta objectMeta = new V1ObjectMeta(); objectMeta.name(jobName); objectMeta.setLabels(getLabels(instanceConfig.getFunctionDetails())); // we don't technically need to set this, but it is useful for testing objectMeta.setNamespace(jobNamespace); statefulSet.metadata(objectMeta); // create the stateful set spec final V1StatefulSetSpec statefulSetSpec = new V1StatefulSetSpec(); statefulSetSpec.serviceName(jobName); statefulSetSpec.setReplicas(instanceConfig.getFunctionDetails().getParallelism()); // Parallel pod management tells the StatefulSet controller to launch or terminate // all Pods in parallel, and not to wait for Pods to become Running and Ready or completely // terminated prior to launching or terminating another Pod. statefulSetSpec.setPodManagementPolicy("Parallel"); // add selector match labels // so the we know which pods to manage final V1LabelSelector selector = new V1LabelSelector(); selector.matchLabels(getLabels(instanceConfig.getFunctionDetails())); statefulSetSpec.selector(selector); // create a pod template final V1PodTemplateSpec podTemplateSpec = new V1PodTemplateSpec(); // set up pod meta final V1ObjectMeta templateMetaData = new V1ObjectMeta().labels(getLabels(instanceConfig.getFunctionDetails())); templateMetaData.annotations(getPrometheusAnnotations()); podTemplateSpec.setMetadata(templateMetaData); final List<String> command = getExecutorCommand(); podTemplateSpec.spec(getPodSpec(command, instanceConfig.getFunctionDetails().hasResources() ? instanceConfig.getFunctionDetails().getResources() : null)); statefulSetSpec.setTemplate(podTemplateSpec); statefulSet.spec(statefulSetSpec); // let the customizer run but ensure it doesn't change the name so we can find it again final V1StatefulSet overridden = manifestCustomizer .map((customizer) -> customizer.customizeStatefulSet(instanceConfig.getFunctionDetails(), statefulSet)) .orElse(statefulSet); overridden.getMetadata().name(jobName); return statefulSet; }
@Test public void testCustomKubernetesDownloadCommandsWithAuthWithoutAuthSpec() throws Exception { InstanceConfig config = createJavaInstanceConfig(FunctionDetails.Runtime.JAVA, false); config.setFunctionDetails(createFunctionDetails(FunctionDetails.Runtime.JAVA, false)); factory = createKubernetesRuntimeFactory(null, 10, 1.0, 1.0, Optional.empty(), null, wconfig -> { wconfig.setAuthenticationEnabled(true); }, AuthenticationConfig.builder() .clientAuthenticationPlugin("com.MyAuth") .clientAuthenticationParameters("{\"authParam1\": \"authParamValue1\"}") .build()); KubernetesRuntime container = factory.createContainer(config, userJarFile, userJarFile, null, null, 30l); V1StatefulSet spec = container.createStatefulSet(); String expectedDownloadCommand = "pulsar-admin --admin-url " + pulsarAdminUrl + " --auth-plugin com.MyAuth --auth-params {\"authParam1\": \"authParamValue1\"}" + " functions download " + "--tenant " + TEST_TENANT + " --namespace " + TEST_NAMESPACE + " --name " + TEST_NAME + " --destination-file " + pulsarRootDir + "/" + userJarFile; String containerCommand = spec.getSpec().getTemplate().getSpec().getContainers().get(0).getCommand().get(2); assertTrue(containerCommand.contains(expectedDownloadCommand), "Found:" + containerCommand); }
@Nonnull public InstanceConfig setBackupCount(int newBackupCount) { checkBackupCount(newBackupCount, 0); this.backupCount = newBackupCount; return this; }
@Test public void when_TooBigBackupCount_thenThrowsException() { // When InstanceConfig instanceConfig = new InstanceConfig(); // Then Assert.assertThrows(IllegalArgumentException.class, () -> instanceConfig.setBackupCount(10)); }
private void readObject(ObjectInputStream s) throws IOException, ClassNotFoundException { s.defaultReadObject(); // Deserialization parameters Object[] a = new Object[length]; for (int i = 0; i < length; i++) { a[i] = s.readObject(); } this.parameters = a; }
@Test public void testReadObject() throws IOException, ClassNotFoundException { Object[] parameters = new Object[1]; parameters[0] = hippo4j; Request request = new DefaultRequest(rid, name, parameters); byte[] bytes; try ( ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); ObjectOutputStream outputStream = new ObjectOutputStream(byteArrayOutputStream)) { outputStream.writeObject(request); outputStream.flush(); bytes = byteArrayOutputStream.toByteArray(); } Request request1; try ( ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(bytes); ObjectInputStream objectInputStream = new ObjectInputStream(byteArrayInputStream)) { request1 = (Request) objectInputStream.readObject(); } Assert.assertEquals(request1.hashCode(), request1.hashCode()); Assert.assertEquals(name, request1.getKey()); Assert.assertEquals(rid, request1.getRID()); Assert.assertArrayEquals(parameters, request1.getParameters()); Assert.assertEquals(request1, request); }
@Override public void error(String msg) { logger.error(msg); }
@Test public void testError() { Log mockLog = mock(Log.class); InternalLogger logger = new CommonsLogger(mockLog, "foo"); logger.error("a"); verify(mockLog).error("a"); }
@SafeVarargs public static <T> Stream<T> of(T... array) { Assert.notNull(array, "Array must be not null!"); return Stream.of(array); }
@Test public void streamTestEmptyIterator() { assertStreamIsEmpty(StreamUtil.of(Collections.emptyIterator())); }
public static List<ByteBuffer> cloneByteBufferList(List<ByteBuffer> source) { List<ByteBuffer> ret = new ArrayList<>(source.size()); for (ByteBuffer b : source) { ret.add(cloneByteBuffer(b)); } return ret; }
@Test public void cloneDirectByteBufferList() { final int bufferSize = 10; final int listLength = 10; ArrayList<ByteBuffer> bufDirectList = new ArrayList<>(listLength); for (int k = 0; k < listLength; k++) { ByteBuffer bufDirect = ByteBuffer.allocateDirect(bufferSize); for (byte i = 0; i < bufferSize; i++) { bufDirect.put((byte) (i + k)); } bufDirectList.add(bufDirect); } List<ByteBuffer> bufListClone = BufferUtils.cloneByteBufferList(bufDirectList); assertEquals(listLength, bufListClone.size()); for (int k = 0; k < listLength; k++) { assertEquals(bufDirectList.get(k), bufListClone.get(k)); } }
public static String substVars(String val, PropertyContainer pc1) throws ScanException { return substVars(val, pc1, null); }
@Test public void openBraceAsLastCharacter() throws JoranException, ScanException { Exception e = assertThrows(IllegalArgumentException.class, () -> { OptionHelper.substVars("a{a{", context); }); String expectedMessage = "All tokens consumed but was expecting \"}\""; assertEquals(expectedMessage, e.getMessage()); }
public AstNode rewrite(final AstNode node, final C context) { return rewriter.process(node, context); }
@Test public void shouldRewriteGroupBy() { // Given: final Expression exp1 = mock(Expression.class); final Expression exp2 = mock(Expression.class); final Expression rewrittenExp1 = mock(Expression.class); final Expression rewrittenExp2 = mock(Expression.class); final GroupBy groupBy = new GroupBy( location, ImmutableList.of(exp1, exp2) ); when(expressionRewriter.apply(exp1, context)).thenReturn(rewrittenExp1); when(expressionRewriter.apply(exp2, context)).thenReturn(rewrittenExp2); // When: final AstNode rewritten = rewriter.rewrite(groupBy, context); // Then: assertThat( rewritten, equalTo( new GroupBy( location, ImmutableList.of(rewrittenExp1, rewrittenExp2) ) ) ); }
public FileIO getFileIO(StorageType.Type storageType) throws IllegalArgumentException { Supplier<? extends RuntimeException> exceptionSupplier = () -> new IllegalArgumentException(storageType.getValue() + " is not configured"); if (HDFS.equals(storageType)) { return Optional.ofNullable(hdfsFileIO).orElseThrow(exceptionSupplier); } else if (LOCAL.equals(storageType)) { return Optional.ofNullable(localFileIO).orElseThrow(exceptionSupplier); } else if (S3.equals(storageType)) { return Optional.ofNullable(s3FileIO).orElseThrow(exceptionSupplier); } else if (ADLS.equals(storageType)) { return Optional.ofNullable(adlsFileIO).orElseThrow(exceptionSupplier); } else { throw new IllegalArgumentException("FileIO not supported for storage type: " + storageType); } }
@Test public void testGetUndefinedFileIOThrowsException() { // hdfs storage is not configured Assertions.assertThrows( IllegalArgumentException.class, () -> fileIOManager.getFileIO(StorageType.HDFS)); }
@GET @Path("/{connector}/offsets") @Operation(summary = "Get the current offsets for the specified connector") public ConnectorOffsets getOffsets(final @PathParam("connector") String connector) throws Throwable { FutureCallback<ConnectorOffsets> cb = new FutureCallback<>(); herder.connectorOffsets(connector, cb); return requestHandler.completeRequest(cb); }
@Test public void testGetOffsets() throws Throwable { final ArgumentCaptor<Callback<ConnectorOffsets>> cb = ArgumentCaptor.forClass(Callback.class); ConnectorOffsets offsets = new ConnectorOffsets(Arrays.asList( new ConnectorOffset(Collections.singletonMap("partitionKey", "partitionValue"), Collections.singletonMap("offsetKey", "offsetValue")), new ConnectorOffset(Collections.singletonMap("partitionKey", "partitionValue2"), Collections.singletonMap("offsetKey", "offsetValue")) )); expectAndCallbackResult(cb, offsets).when(herder).connectorOffsets(eq(CONNECTOR_NAME), cb.capture()); assertEquals(offsets, connectorsResource.getOffsets(CONNECTOR_NAME)); }
public static ExternalServiceCredentialsGenerator.Builder builder(final SecretBytes key) { return builder(key.value()); }
@Test void testInvalidConstructor() { assertThrows(RuntimeException.class, () -> ExternalServiceCredentialsGenerator .builder(new byte[32]) .withUsernameTimestampTruncatorAndPrefix(null, PREFIX) .build()); assertThrows(RuntimeException.class, () -> ExternalServiceCredentialsGenerator .builder(new byte[32]) .withUsernameTimestampTruncatorAndPrefix(timestamp -> timestamp.truncatedTo(ChronoUnit.DAYS), null) .build()); }
@Override public V remove(Object o) { if (o instanceof String) { lowerKeyToOriginMap.remove(((String) o).toLowerCase()); return targetMap.remove(((String) o).toLowerCase()); } return null; }
@Test void testRemove() { Map<String, Object> map = new LowerCaseLinkHashMap<>(lowerCaseLinkHashMap); Object value = map.remove("key"); Assertions.assertEquals("Value", value); Assertions.assertFalse(map.containsKey("key")); Assertions.assertTrue(map.containsKey("key2")); }
public void logOnCatchupPosition( final int memberId, final long leadershipTermId, final long logPosition, final int followerMemberId, final String catchupEndpoint) { final int length = catchupPositionLength(catchupEndpoint); final int captureLength = captureLength(length); final int encodedLength = encodedLength(captureLength); final ManyToOneRingBuffer ringBuffer = this.ringBuffer; final int index = ringBuffer.tryClaim(CATCHUP_POSITION.toEventCodeId(), encodedLength); if (index > 0) { try { encodeOnCatchupPosition( (UnsafeBuffer)ringBuffer.buffer(), index, captureLength, length, memberId, leadershipTermId, logPosition, followerMemberId, catchupEndpoint); } finally { ringBuffer.commit(index); } } }
@Test void logCatchupPosition() { final int offset = ALIGNMENT * 4; logBuffer.putLong(CAPACITY + TAIL_POSITION_OFFSET, offset); final long leadershipTermId = 1233L; final long logPosition = 100L; final int followerMemberId = 18; final int memberId = -901; final String catchupEndpoint = "aeron:udp?endpoint=localhost:9090"; logger.logOnCatchupPosition(memberId, leadershipTermId, logPosition, followerMemberId, catchupEndpoint); final int length = 2 * SIZE_OF_LONG + 2 * SIZE_OF_INT + SIZE_OF_INT + catchupEndpoint.length(); verifyLogHeader(logBuffer, offset, CATCHUP_POSITION.toEventCodeId(), length, length); int index = encodedMsgOffset(offset) + LOG_HEADER_LENGTH; assertEquals(leadershipTermId, logBuffer.getLong(index, LITTLE_ENDIAN)); index += SIZE_OF_LONG; assertEquals(logPosition, logBuffer.getLong(index, LITTLE_ENDIAN)); index += SIZE_OF_LONG; assertEquals(followerMemberId, logBuffer.getInt(index, LITTLE_ENDIAN)); index += SIZE_OF_INT; assertEquals(memberId, logBuffer.getInt(index, LITTLE_ENDIAN)); index += SIZE_OF_INT; final int catchupEndpointLength = logBuffer.getInt(index, LITTLE_ENDIAN); index += SIZE_OF_INT; assertEquals(catchupEndpoint, logBuffer.getStringWithoutLengthAscii(index, catchupEndpointLength)); final StringBuilder sb = new StringBuilder(); ClusterEventDissector.dissectCatchupPosition(CATCHUP_POSITION, logBuffer, encodedMsgOffset(offset), sb); final String expectedMessagePattern = "\\[[0-9]+\\.[0-9]+] CLUSTER: CATCHUP_POSITION \\[61/61]: " + "memberId=-901 leadershipTermId=1233 logPosition=100 followerMemberId=18 " + "catchupEndpoint=aeron:udp\\?endpoint=localhost:9090"; assertThat(sb.toString(), Matchers.matchesPattern(expectedMessagePattern)); }
public boolean isExist(final String key) { try { return null != client.checkExists().forPath(key); } catch (Exception e) { LOGGER.error("check if key exist error", e); return false; } }
@Test void isExist() throws Exception { assertFalse(() -> client.isExist("/test")); ExistsBuilderImpl existsBuilder = mock(ExistsBuilderImpl.class); when(curatorFramework.checkExists()).thenReturn(existsBuilder); when(existsBuilder.forPath(anyString())).thenReturn(new Stat()); boolean exist = client.isExist("/test"); assertTrue(exist); }
@Override public String toString() { return getClass().getSimpleName() + "[lastConnectUrl=" + Parameters.getLastConnectUrl() + ", lastConnectInfo=" + Parameters.getLastConnectInfo() + ']'; }
@Test public void testToString() { final String string = driver.toString(); assertNotNull("toString not null", string); assertFalse("toString not empty", string.isEmpty()); }
void validateFileLength(final Location location) throws IOException { final long recordEnd = location.getOffset() + location.getSize(); //Check if the end of the record will go past the file length if (recordEnd > dataFile.length) { /* * AMQ-9254 if the read request is outside expected dataFile length, * perform expensive OS file length lookup operation to allow read * operation if it will succeed */ final long osFileLength = dataFile.getFile().length(); if(recordEnd > osFileLength) { throw new IOException("Invalid location size: " + location + ", size: " + location.getSize()); } else { LOG.warn("DataFile:{} actual length:{} larger than expected:{} for readRecord location:{} size:{}", dataFile.file.getName(), osFileLength, dataFile.length, location, location.getSize()); } } }
@Test public void testValidateUsingRealFileLength() throws IOException { //Create file of size 1024 final DataFile dataFile = newTestDataFile(1024); final DataFileAccessor accessor = new DataFileAccessor(mock(Journal.class), dataFile); //Set a bad length value on the dataFile so that the initial check fails //because the location is greater than dataFile.length //We should read the real file size (1024) which is greater than the //location size + offset so this should work dataFile.setLength(512); final Location location = new Location(0, 100); location.setSize(512); accessor.validateFileLength(location); }
public DrlxParseResult drlxParse(Class<?> patternType, String bindingId, String expression) { return drlxParse(patternType, bindingId, expression, false); }
@Test public void testNullSafeExpressionsWithContains() { SingleDrlxParseSuccess result = (SingleDrlxParseSuccess) parser.drlxParse(Person.class, "$p", "address!.city contains (\"Mi\")"); List<Expression> nullSafeExpressions = result.getNullSafeExpressions(); assertThat(nullSafeExpressions).hasSize(1); assertThat(nullSafeExpressions.get(0).toString()).isEqualTo("_this.getAddress() != null"); // null check is done after the first constraint assertThat(result.getExpr().toString()).isEqualTo("D.eval(org.drools.model.operators.ContainsOperator.INSTANCE, _this.getAddress().getCity(), \"Mi\")"); }
public static ConfigInfos generateResult(String connType, Map<String, ConfigKey> configKeys, List<ConfigValue> configValues, List<String> groups) { int errorCount = 0; List<ConfigInfo> configInfoList = new LinkedList<>(); Map<String, ConfigValue> configValueMap = new HashMap<>(); for (ConfigValue configValue: configValues) { String configName = configValue.name(); configValueMap.put(configName, configValue); if (!configKeys.containsKey(configName)) { configInfoList.add(new ConfigInfo(null, convertConfigValue(configValue, null))); errorCount += configValue.errorMessages().size(); } } for (Map.Entry<String, ConfigKey> entry : configKeys.entrySet()) { String configName = entry.getKey(); ConfigKeyInfo configKeyInfo = convertConfigKey(entry.getValue()); Type type = entry.getValue().type; ConfigValueInfo configValueInfo = null; if (configValueMap.containsKey(configName)) { ConfigValue configValue = configValueMap.get(configName); configValueInfo = convertConfigValue(configValue, type); errorCount += configValue.errorMessages().size(); } configInfoList.add(new ConfigInfo(configKeyInfo, configValueInfo)); } return new ConfigInfos(connType, errorCount, groups, configInfoList); }
@Test public void testGenerateResultWithConfigValuesAllUsingConfigKeysAndWithSomeErrors() { String name = "com.acme.connector.MyConnector"; Map<String, ConfigDef.ConfigKey> keys = new HashMap<>(); addConfigKey(keys, "config.a1", null); addConfigKey(keys, "config.b1", "group B"); addConfigKey(keys, "config.b2", "group B"); addConfigKey(keys, "config.c1", "group C"); List<String> groups = Arrays.asList("groupB", "group C"); List<ConfigValue> values = new ArrayList<>(); addValue(values, "config.a1", "value.a1"); addValue(values, "config.b1", "value.b1"); addValue(values, "config.b2", "value.b2"); addValue(values, "config.c1", "value.c1", "error c1"); ConfigInfos infos = AbstractHerder.generateResult(name, keys, values, groups); assertEquals(name, infos.name()); assertEquals(groups, infos.groups()); assertEquals(values.size(), infos.values().size()); assertEquals(1, infos.errorCount()); assertInfoKey(infos, "config.a1", null); assertInfoKey(infos, "config.b1", "group B"); assertInfoKey(infos, "config.b2", "group B"); assertInfoKey(infos, "config.c1", "group C"); assertInfoValue(infos, "config.a1", "value.a1"); assertInfoValue(infos, "config.b1", "value.b1"); assertInfoValue(infos, "config.b2", "value.b2"); assertInfoValue(infos, "config.c1", "value.c1", "error c1"); }
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) { CharStream input = CharStreams.fromString(source); FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input ); CommonTokenStream tokens = new CommonTokenStream( lexer ); FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens ); ParserHelper parserHelper = new ParserHelper(eventsManager); additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol())); parser.setHelper(parserHelper); parser.setErrorHandler( new FEELErrorHandler() ); parser.removeErrorListeners(); // removes the error listener that prints to the console parser.addErrorListener( new FEELParserErrorListener( eventsManager ) ); // pre-loads the parser with symbols defineVariables( inputVariableTypes, inputVariables, parser ); if (typeRegistry != null) { parserHelper.setTypeRegistry(typeRegistry); } return parser; }
@Test void contextPathExpression3() { String inputExpression = "{ first name : \"bob\" }.first name"; BaseNode pathBase = parse( inputExpression ); assertThat( pathBase).isInstanceOf(PathExpressionNode.class); assertThat( pathBase.getText()).isEqualTo(inputExpression); assertThat( pathBase.getResultType()).isEqualTo(BuiltInType.STRING); PathExpressionNode pathExpr = (PathExpressionNode) pathBase; assertThat( pathExpr.getExpression()).isInstanceOf(ContextNode.class); assertThat( pathExpr.getExpression().getText()).isEqualTo( "{ first name : \"bob\" }"); assertThat( pathExpr.getName()).isInstanceOf(NameRefNode.class); assertThat( pathExpr.getName().getText()).isEqualTo( "first name"); }
@Override public Set<String> getFileUuids() { return ImmutableSet.copyOf(fileUuids); }
@Test public void getFileUuids_returns_empty_when_repository_is_empty() { assertThat(underTest.getFileUuids()).isEmpty(); }