focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public Map<ExecutionAttemptID, ExecutionSlotAssignment> allocateSlotsFor( List<ExecutionAttemptID> executionAttemptIds) { final Map<ExecutionVertexID, ExecutionAttemptID> vertexIdToExecutionId = new HashMap<>(); executionAttemptIds.forEach( executionId -> vertexIdToExecutionId.put(executionId.getExecutionVertexId(), executionId)); checkState( vertexIdToExecutionId.size() == executionAttemptIds.size(), "SlotSharingExecutionSlotAllocator does not support one execution vertex to have multiple concurrent executions"); final List<ExecutionVertexID> vertexIds = executionAttemptIds.stream() .map(ExecutionAttemptID::getExecutionVertexId) .collect(Collectors.toList()); return allocateSlotsForVertices(vertexIds).stream() .collect( Collectors.toMap( vertexAssignment -> vertexIdToExecutionId.get( vertexAssignment.getExecutionVertexId()), vertexAssignment -> new ExecutionSlotAssignment( vertexIdToExecutionId.get( vertexAssignment.getExecutionVertexId()), vertexAssignment.getLogicalSlotFuture()))); }
@Test void testPhysicalSlotReleaseLogicalSlots() throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext.newBuilder().addGroup(EV1, EV2).build(); Map<ExecutionAttemptID, ExecutionSlotAssignment> assignments = context.allocateSlotsFor(EV1, EV2); List<TestingPayload> payloads = assignments.values().stream() .map( assignment -> { TestingPayload payload = new TestingPayload(); assignment .getLogicalSlotFuture() .thenAccept( logicalSlot -> logicalSlot.tryAssignPayload(payload)); return payload; }) .collect(Collectors.toList()); SlotRequestId slotRequestId = context.getSlotProvider().getFirstRequestOrFail().getSlotRequestId(); TestingPhysicalSlot physicalSlot = context.getSlotProvider().getFirstResponseOrFail().get(); assertThat(payloads.stream().allMatch(payload -> payload.getTerminalStateFuture().isDone())) .isFalse(); assertThat(physicalSlot.getPayload()).isNotNull(); physicalSlot.getPayload().release(new Throwable()); assertThat(payloads.stream().allMatch(payload -> payload.getTerminalStateFuture().isDone())) .isTrue(); assertThat(context.getSlotProvider().getCancellations()).containsKey(slotRequestId); context.allocateSlotsFor(EV1, EV2); // there should be one more physical slot allocation, as the first allocation should be // removed after releasing all logical slots assertThat(context.getSlotProvider().getRequests()).hasSize(2); }
@Override public void upgrade() { if (shouldSkip()) { return; } final ImmutableSet<String> eventIndexPrefixes = ImmutableSet.of( elasticsearchConfig.getDefaultEventsIndexPrefix(), elasticsearchConfig.getDefaultSystemEventsIndexPrefix()); elasticsearch.addGl2MessageIdFieldAlias(eventIndexPrefixes); writeMigrationCompleted(eventIndexPrefixes); }
@Test void usesEventIndexPrefixesFromElasticsearchConfig() { mockConfiguredEventPrefixes("events-prefix", "system-events-prefix"); this.sut.upgrade(); verify(elasticsearchAdapter) .addGl2MessageIdFieldAlias(ImmutableSet.of("events-prefix", "system-events-prefix")); }
@Override public PipelineJobProgressUpdatedParameter write(final String ackId, final Collection<Record> records) { if (records.isEmpty()) { return new PipelineJobProgressUpdatedParameter(0); } while (!channel.isWritable() && channel.isActive()) { doAwait(); } if (!channel.isActive()) { return new PipelineJobProgressUpdatedParameter(0); } Collection<DataRecordResult.Record> resultRecords = getResultRecords(records); DataRecordResult dataRecordResult = DataRecordResult.newBuilder().addAllRecord(resultRecords).setAckId(ackId).build(); channel.writeAndFlush(CDCResponseUtils.succeed("", ResponseCase.DATA_RECORD_RESULT, dataRecordResult)); return new PipelineJobProgressUpdatedParameter(resultRecords.size()); }
@Test void assertWrite() throws IOException { Channel mockChannel = mock(Channel.class); when(mockChannel.isWritable()).thenReturn(false, true); when(mockChannel.isActive()).thenReturn(true); ShardingSphereDatabase mockDatabase = mock(ShardingSphereDatabase.class); when(mockDatabase.getName()).thenReturn("test"); try (PipelineCDCSocketSink sink = new PipelineCDCSocketSink(mockChannel, mockDatabase, Collections.singletonList("test.t_order"))) { PipelineJobProgressUpdatedParameter actual = sink.write("ack", Collections.singletonList(new FinishedRecord(new IngestPlaceholderPosition()))); assertThat(actual.getProcessedRecordsCount(), is(0)); actual = sink.write("ack", Collections.singletonList(new DataRecord(PipelineSQLOperationType.DELETE, "t_order", new IngestPlaceholderPosition(), 1))); assertThat(actual.getProcessedRecordsCount(), is(1)); } }
public LocationIndex prepareIndex() { return prepareIndex(EdgeFilter.ALL_EDGES); }
@Test public void testWayGeometry() { Graph g = createTestGraphWithWayGeometry(); LocationIndex index = createIndexNoPrepare(g, 500000).prepareIndex(); assertEquals(3, findClosestEdge(index, 0, 0)); assertEquals(3, findClosestEdge(index, 0, 0.1)); assertEquals(3, findClosestEdge(index, 0.1, 0.1)); assertEquals(1, findClosestNode(index, -0.5, -0.5)); }
static Predicate obtainPredicateFromExpression( final CamelContext camelContext, final String predExpression, final String expressionLanguage) { try { return camelContext.resolveLanguage(expressionLanguage).createPredicate(predExpression); } catch (Exception e) { String message = String.format(ERROR_INVALID_PREDICATE_EXPRESSION, expressionLanguage, predExpression); throw new IllegalArgumentException(message, e); } }
@Test void obtainPredicateFromExpressionWithError() { String expression = "not a valid expression"; assertThrows(IllegalArgumentException.class, () -> DynamicRouterControlService.obtainPredicateFromExpression(context, expression, expressionLanguage)); }
public Set<Integer> nodesThatShouldBeDown(ClusterState state) { return calculate(state).nodesThatShouldBeDown(); }
@Test void min_ratio_of_zero_never_takes_down_groups_implicitly() { GroupAvailabilityCalculator calc = calcForHierarchicCluster( DistributionBuilder.withGroups(2).eachWithNodeCount(4), 0.0); assertThat(calc.nodesThatShouldBeDown(clusterState( "distributor:8 storage:8")), equalTo(emptySet())); // 1 down in each group assertThat(calc.nodesThatShouldBeDown(clusterState( "distributor:8 storage:8 .0.s:d .4.s:d")), equalTo(emptySet())); // 2 down in each group assertThat(calc.nodesThatShouldBeDown(clusterState( "distributor:8 storage:8 .0.s:d .1.s:d .4.s:d .5.s:d")), equalTo(emptySet())); // 3 down in each group assertThat(calc.nodesThatShouldBeDown(clusterState( "distributor:8 storage:8 .0.s:d .1.s:d .2.s:d .4.s:d .5.s:d .6.s:d")), equalTo(emptySet())); }
@Override public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo, List<String> partNames, boolean areAllPartsFound) throws MetaException { checkStatisticsList(colStatsWithSourceInfo); ColumnStatisticsObj statsObj = null; String colType; String colName = null; // check if all the ColumnStatisticsObjs contain stats and all the ndv are // bitvectors boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size(); NumDistinctValueEstimator ndvEstimator = null; boolean areAllNDVEstimatorsMergeable = true; for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); if (statsObj == null) { colName = cso.getColName(); colType = cso.getColType(); statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso.getStatsData().getSetField()); LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName, doAllPartitionContainStats); } DateColumnStatsDataInspector columnStatsData = dateInspectorFromStats(cso); // check if we can merge NDV estimators if (columnStatsData.getNdvEstimator() == null) { areAllNDVEstimatorsMergeable = false; break; } else { NumDistinctValueEstimator estimator = columnStatsData.getNdvEstimator(); if (ndvEstimator == null) { ndvEstimator = estimator; } else { if (!ndvEstimator.canMerge(estimator)) { areAllNDVEstimatorsMergeable = false; break; } } } } if (areAllNDVEstimatorsMergeable && ndvEstimator != null) { ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator); } LOG.debug("all of the bit vectors can merge for {} is {}", colName, areAllNDVEstimatorsMergeable); ColumnStatisticsData columnStatisticsData = initColumnStatisticsData(); if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) { DateColumnStatsDataInspector aggregateData = null; long lowerBound = 0; long higherBound = 0; double densityAvgSum = 0.0; DateColumnStatsMerger merger = new DateColumnStatsMerger(); for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); DateColumnStatsDataInspector newData = dateInspectorFromStats(cso); lowerBound = Math.max(lowerBound, newData.getNumDVs()); higherBound += newData.getNumDVs(); if (newData.isSetLowValue() && newData.isSetHighValue()) { densityAvgSum += ((double) diff(newData.getHighValue(), newData.getLowValue())) / newData.getNumDVs(); } if (areAllNDVEstimatorsMergeable && ndvEstimator != null) { ndvEstimator.mergeEstimators(newData.getNdvEstimator()); } if (aggregateData == null) { aggregateData = newData.deepCopy(); } else { aggregateData.setLowValue(merger.mergeLowValue( merger.getLowValue(aggregateData), merger.getLowValue(newData))); aggregateData.setHighValue(merger.mergeHighValue( merger.getHighValue(aggregateData), merger.getHighValue(newData))); aggregateData.setNumNulls(merger.mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls())); aggregateData.setNumDVs(merger.mergeNumDVs(aggregateData.getNumDVs(), newData.getNumDVs())); } } if (areAllNDVEstimatorsMergeable && ndvEstimator != null) { // if all the ColumnStatisticsObjs contain bitvectors, we do not need to // use uniform distribution assumption because we can merge bitvectors // to get a good estimation. aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); } else { long estimation; if (useDensityFunctionForNDVEstimation && aggregateData != null && aggregateData.isSetLowValue() && aggregateData.isSetHighValue()) { // We have estimation, lowerbound and higherbound. We use estimation // if it is between lowerbound and higherbound. double densityAvg = densityAvgSum / partNames.size(); estimation = (long) (diff(aggregateData.getHighValue(), aggregateData.getLowValue()) / densityAvg); if (estimation < lowerBound) { estimation = lowerBound; } else if (estimation > higherBound) { estimation = higherBound; } } else { estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner); } aggregateData.setNumDVs(estimation); } columnStatisticsData.setDateStats(aggregateData); } else { // TODO: bail out if missing stats are over a certain threshold // we need extrapolation LOG.debug("start extrapolation for {}", colName); Map<String, Integer> indexMap = new HashMap<>(); for (int index = 0; index < partNames.size(); index++) { indexMap.put(partNames.get(index), index); } Map<String, Double> adjustedIndexMap = new HashMap<>(); Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>(); // while we scan the css, we also get the densityAvg, lowerbound and // higherbound when useDensityFunctionForNDVEstimation is true. double densityAvgSum = 0.0; if (!areAllNDVEstimatorsMergeable) { // if not every partition uses bitvector for ndv, we just fall back to // the traditional extrapolation methods. for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); String partName = csp.getPartName(); DateColumnStatsData newData = cso.getStatsData().getDateStats(); if (useDensityFunctionForNDVEstimation && newData.isSetLowValue() && newData.isSetHighValue()) { densityAvgSum += ((double) diff(newData.getHighValue(), newData.getLowValue())) / newData.getNumDVs(); } adjustedIndexMap.put(partName, (double) indexMap.get(partName)); adjustedStatsMap.put(partName, cso.getStatsData()); } } else { // we first merge all the adjacent bitvectors that we could merge and // derive new partition names and index. StringBuilder pseudoPartName = new StringBuilder(); double pseudoIndexSum = 0; int length = 0; int curIndex = -1; DateColumnStatsDataInspector aggregateData = null; for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); String partName = csp.getPartName(); DateColumnStatsDataInspector newData = dateInspectorFromStats(cso); // newData.isSetBitVectors() should be true for sure because we // already checked it before. if (indexMap.get(partName) != curIndex) { // There is bitvector, but it is not adjacent to the previous ones. if (length > 0) { // we have to set ndv adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); ColumnStatisticsData csd = new ColumnStatisticsData(); csd.setDateStats(aggregateData); adjustedStatsMap.put(pseudoPartName.toString(), csd); if (useDensityFunctionForNDVEstimation) { densityAvgSum += ((double) diff(aggregateData.getHighValue(), aggregateData.getLowValue())) / aggregateData.getNumDVs(); } // reset everything pseudoPartName = new StringBuilder(); pseudoIndexSum = 0; length = 0; ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator); } aggregateData = null; } curIndex = indexMap.get(partName); pseudoPartName.append(partName); pseudoIndexSum += curIndex; length++; curIndex++; if (aggregateData == null) { aggregateData = newData.deepCopy(); } else { aggregateData.setLowValue(min(aggregateData.getLowValue(), newData.getLowValue())); aggregateData.setHighValue(max(aggregateData.getHighValue(), newData.getHighValue())); aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); } ndvEstimator.mergeEstimators(newData.getNdvEstimator()); } if (length > 0) { // we have to set ndv adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); ColumnStatisticsData csd = new ColumnStatisticsData(); csd.setDateStats(aggregateData); adjustedStatsMap.put(pseudoPartName.toString(), csd); if (useDensityFunctionForNDVEstimation) { densityAvgSum += ((double) diff(aggregateData.getHighValue(), aggregateData.getLowValue())) / aggregateData.getNumDVs(); } } } extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(), adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size()); } LOG.debug( "Ndv estimation for {} is {}. # of partitions requested: {}. # of partitions found: {}", colName, columnStatisticsData.getDateStats().getNumDVs(), partNames.size(), colStatsWithSourceInfo.size()); KllHistogramEstimator mergedKllHistogramEstimator = mergeHistograms(colStatsWithSourceInfo); if (mergedKllHistogramEstimator != null) { columnStatisticsData.getDateStats().setHistogram(mergedKllHistogramEstimator.serialize()); } statsObj.setStatsData(columnStatisticsData); return statsObj; }
@Test public void testAggregateMultiStatsWhenUnmergeableBitVectors() throws MetaException { List<String> partitions = Arrays.asList("part1", "part2", "part3"); long[] values1 = { DATE_1.getDaysSinceEpoch(), DATE_2.getDaysSinceEpoch(), DATE_3.getDaysSinceEpoch() }; ColumnStatisticsData data1 = new ColStatsBuilder<>(Date.class).numNulls(1).numDVs(3) .low(DATE_1).high(DATE_3).fmSketch(values1).kll(values1).build(); long[] values2 = { DATE_3.getDaysSinceEpoch(), DATE_4.getDaysSinceEpoch(), DATE_5.getDaysSinceEpoch() }; ColumnStatisticsData data2 = new ColStatsBuilder<>(Date.class).numNulls(2).numDVs(3) .low(DATE_3).high(DATE_5).hll(values2).kll(values2).build(); long[] values3 = { DATE_1.getDaysSinceEpoch(), DATE_2.getDaysSinceEpoch(), DATE_6.getDaysSinceEpoch(), DATE_8.getDaysSinceEpoch() }; ColumnStatisticsData data3 = new ColStatsBuilder<>(Date.class).numNulls(3).numDVs(4) .low(DATE_1).high(DATE_8).hll(values3).kll(values3).build(); List<ColStatsObjWithSourceInfo> statsList = Arrays.asList( createStatsWithInfo(data1, TABLE, COL, partitions.get(0)), createStatsWithInfo(data2, TABLE, COL, partitions.get(1)), createStatsWithInfo(data3, TABLE, COL, partitions.get(2))); DateColumnStatsAggregator aggregator = new DateColumnStatsAggregator(); long[] values = Longs.concat(values1, values2, values3); ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, true); // the aggregation does not update the bitvector, only numDVs is, it keeps the first bitvector; // numDVs is set to the maximum among all stats when non-mergeable bitvectors are detected ColumnStatisticsData expectedStats = new ColStatsBuilder<>(Date.class).numNulls(6).numDVs(4) .low(DATE_1).high(DATE_8).fmSketch(values1).kll(values).build(); assertEqualStatistics(expectedStats, computedStatsObj.getStatsData()); aggregator.useDensityFunctionForNDVEstimation = true; computedStatsObj = aggregator.aggregate(statsList, partitions, true); // the use of the density function leads to a different estimation for numNDV expectedStats = new ColStatsBuilder<>(Date.class).numNulls(6).numDVs(6) .low(DATE_1).high(DATE_8).fmSketch(values1).kll(values).build(); assertEqualStatistics(expectedStats, computedStatsObj.getStatsData()); aggregator.useDensityFunctionForNDVEstimation = false; double[] tunerValues = new double[] { 0, 0.5, 0.75, 1 }; long[] expectedNDVs = new long[] { 4, 7, 8, 10 }; for (int i = 0; i < tunerValues.length; i++) { aggregator.ndvTuner = tunerValues[i]; computedStatsObj = aggregator.aggregate(statsList, partitions, true); expectedStats = new ColStatsBuilder<>(Date.class).numNulls(6).numDVs(expectedNDVs[i]) .low(DATE_1).high(DATE_8).fmSketch(values1).kll(values).build(); assertEqualStatistics(expectedStats, computedStatsObj.getStatsData()); } }
@Udf(description = "Returns the sine of an INT value") public Double sin( @UdfParameter( value = "value", description = "The value in radians to get the sine of." ) final Integer value ) { return sin(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleNull() { assertThat(udf.sin((Integer) null), is(nullValue())); assertThat(udf.sin((Long) null), is(nullValue())); assertThat(udf.sin((Double) null), is(nullValue())); }
public static Iterable<List<Object>> fixData(List<Column> columns, Iterable<List<Object>> data) { if (data == null) { return null; } requireNonNull(columns, "columns is null"); List<TypeSignature> signatures = columns.stream() .map(column -> parseTypeSignature(column.getType())) .collect(toList()); ImmutableList.Builder<List<Object>> rows = ImmutableList.builder(); for (List<Object> row : data) { checkArgument(row.size() == columns.size(), "row/column size mismatch"); List<Object> newRow = new ArrayList<>(); for (int i = 0; i < row.size(); i++) { newRow.add(fixValue(signatures.get(i), row.get(i))); } rows.add(unmodifiableList(newRow)); // allow nulls in list } return rows.build(); }
@Test public void testFixData() { testFixDataWithTypePrefix(""); }
@Override public int run(String[] args) throws Exception { if (args.length != 2) { return usage(args); } String action = args[0]; String name = args[1]; int result; if (A_LOAD.equals(action)) { result = loadClass(name); } else if (A_CREATE.equals(action)) { //first load to separate load errors from create result = loadClass(name); if (result == SUCCESS) { //class loads, so instantiate it result = createClassInstance(name); } } else if (A_RESOURCE.equals(action)) { result = loadResource(name); } else if (A_PRINTRESOURCE.equals(action)) { result = dumpResource(name); } else { result = usage(args); } return result; }
@Test public void testCreateFailsInConstructor() throws Throwable { run(FindClass.E_CREATE_FAILED, FindClass.A_CREATE, "org.apache.hadoop.util.TestFindClass$FailInConstructor"); }
@Override public <T> ResponseFuture<T> sendRequest(Request<T> request, RequestContext requestContext) { doEvaluateDisruptContext(request, requestContext); return _client.sendRequest(request, requestContext); }
@Test public void testSendRequest7() { when(_builder.build()).thenReturn(_request); when(_controller.getDisruptContext(any(String.class), any(ResourceMethod.class))).thenReturn(_disrupt); _client.sendRequest(_builder, _context, _behavior); verify(_underlying, times(1)).sendRequest(eq(_request), eq(_context), eq(_behavior)); verify(_context, times(1)).putLocalAttr(eq(DISRUPT_CONTEXT_KEY), eq(_disrupt)); verify(_context, times(1)).putLocalAttr(eq(DISRUPT_SOURCE_KEY), any(String.class)); }
@SuppressWarnings("unchecked") @Override public RegisterNodeManagerResponse registerNodeManager( RegisterNodeManagerRequest request) throws YarnException, IOException { NodeId nodeId = request.getNodeId(); String host = nodeId.getHost(); int cmPort = nodeId.getPort(); int httpPort = request.getHttpPort(); Resource capability = request.getResource(); String nodeManagerVersion = request.getNMVersion(); Resource physicalResource = request.getPhysicalResource(); NodeStatus nodeStatus = request.getNodeStatus(); RegisterNodeManagerResponse response = recordFactory .newRecordInstance(RegisterNodeManagerResponse.class); if (!minimumNodeManagerVersion.equals("NONE")) { if (minimumNodeManagerVersion.equals("EqualToRM")) { minimumNodeManagerVersion = YarnVersionInfo.getVersion(); } if ((nodeManagerVersion == null) || (VersionUtil.compareVersions(nodeManagerVersion,minimumNodeManagerVersion)) < 0) { String message = "Disallowed NodeManager Version " + nodeManagerVersion + ", is less than the minimum version " + minimumNodeManagerVersion + " sending SHUTDOWN signal to " + "NodeManager."; LOG.info(message); response.setDiagnosticsMessage(message); response.setNodeAction(NodeAction.SHUTDOWN); return response; } } if (checkIpHostnameInRegistration) { InetSocketAddress nmAddress = NetUtils.createSocketAddrForHost(host, cmPort); InetAddress inetAddress = Server.getRemoteIp(); if (inetAddress != null && nmAddress.isUnresolved()) { // Reject registration of unresolved nm to prevent resourcemanager // getting stuck at allocations. final String message = "hostname cannot be resolved (ip=" + inetAddress.getHostAddress() + ", hostname=" + host + ")"; LOG.warn("Unresolved nodemanager registration: " + message); response.setDiagnosticsMessage(message); response.setNodeAction(NodeAction.SHUTDOWN); return response; } } // Check if this node is a 'valid' node if (!this.nodesListManager.isValidNode(host) && !isNodeInDecommissioning(nodeId)) { String message = "Disallowed NodeManager from " + host + ", Sending SHUTDOWN signal to the NodeManager."; LOG.info(message); response.setDiagnosticsMessage(message); response.setNodeAction(NodeAction.SHUTDOWN); return response; } // check if node's capacity is load from dynamic-resources.xml String nid = nodeId.toString(); Resource dynamicLoadCapability = loadNodeResourceFromDRConfiguration(nid); if (dynamicLoadCapability != null) { LOG.debug("Resource for node: {} is adjusted from: {} to: {} due to" + " settings in dynamic-resources.xml.", nid, capability, dynamicLoadCapability); capability = dynamicLoadCapability; // sync back with new resource. response.setResource(capability); } // Check if this node has minimum allocations if (capability.getMemorySize() < minAllocMb || capability.getVirtualCores() < minAllocVcores) { String message = "NodeManager from " + host + " doesn't satisfy minimum allocations, Sending SHUTDOWN" + " signal to the NodeManager. Node capabilities are " + capability + "; minimums are " + minAllocMb + "mb and " + minAllocVcores + " vcores"; LOG.info(message); response.setDiagnosticsMessage(message); response.setNodeAction(NodeAction.SHUTDOWN); return response; } response.setContainerTokenMasterKey(containerTokenSecretManager .getCurrentKey()); response.setNMTokenMasterKey(nmTokenSecretManager .getCurrentKey()); RMNode rmNode = new RMNodeImpl(nodeId, rmContext, host, cmPort, httpPort, resolve(host), capability, nodeManagerVersion, physicalResource); RMNode oldNode = this.rmContext.getRMNodes().putIfAbsent(nodeId, rmNode); if (oldNode == null) { RMNodeStartedEvent startEvent = new RMNodeStartedEvent(nodeId, request.getNMContainerStatuses(), request.getRunningApplications(), nodeStatus); if (request.getLogAggregationReportsForApps() != null && !request.getLogAggregationReportsForApps().isEmpty()) { if (LOG.isDebugEnabled()) { LOG.debug("Found the number of previous cached log aggregation " + "status from nodemanager:" + nodeId + " is :" + request.getLogAggregationReportsForApps().size()); } startEvent.setLogAggregationReportsForApps(request .getLogAggregationReportsForApps()); } this.rmContext.getDispatcher().getEventHandler().handle( startEvent); } else { LOG.info("Reconnect from the node at: " + host); this.nmLivelinessMonitor.unregister(nodeId); if (CollectionUtils.isEmpty(request.getRunningApplications()) && rmNode.getState() != NodeState.DECOMMISSIONING && rmNode.getHttpPort() != oldNode.getHttpPort()) { // Reconnected node differs, so replace old node and start new node switch (rmNode.getState()) { case RUNNING: ClusterMetrics.getMetrics().decrNumActiveNodes(); break; case UNHEALTHY: ClusterMetrics.getMetrics().decrNumUnhealthyNMs(); break; default: LOG.debug("Unexpected Rmnode state"); } this.rmContext.getDispatcher().getEventHandler() .handle(new NodeRemovedSchedulerEvent(rmNode)); this.rmContext.getRMNodes().put(nodeId, rmNode); this.rmContext.getDispatcher().getEventHandler() .handle(new RMNodeStartedEvent(nodeId, null, null, nodeStatus)); } else { // Reset heartbeat ID since node just restarted. oldNode.resetLastNodeHeartBeatResponse(); this.rmContext.getDispatcher().getEventHandler() .handle(new RMNodeReconnectEvent(nodeId, rmNode, request.getRunningApplications(), request.getNMContainerStatuses())); } } // On every node manager register we will be clearing NMToken keys if // present for any running application. this.nmTokenSecretManager.removeNodeKey(nodeId); this.nmLivelinessMonitor.register(nodeId); // Handle received container status, this should be processed after new // RMNode inserted if (!rmContext.isWorkPreservingRecoveryEnabled()) { if (!request.getNMContainerStatuses().isEmpty()) { LOG.info("received container statuses on node manager register :" + request.getNMContainerStatuses()); for (NMContainerStatus status : request.getNMContainerStatuses()) { handleNMContainerStatus(status, nodeId); } } } // Update node's labels to RM's NodeLabelManager. Set<String> nodeLabels = NodeLabelsUtils.convertToStringSet( request.getNodeLabels()); if (isDistributedNodeLabelsConf && nodeLabels != null) { try { updateNodeLabelsFromNMReport(nodeLabels, nodeId); response.setAreNodeLabelsAcceptedByRM(true); } catch (IOException ex) { // Ensure the exception is captured in the response response.setDiagnosticsMessage(ex.getMessage()); response.setAreNodeLabelsAcceptedByRM(false); } } else if (isDelegatedCentralizedNodeLabelsConf) { this.rmContext.getRMDelegatedNodeLabelsUpdater().updateNodeLabels(nodeId); } // Update node's attributes to RM's NodeAttributesManager. if (request.getNodeAttributes() != null) { try { // update node attributes if necessary then update heartbeat response updateNodeAttributesIfNecessary(nodeId, request.getNodeAttributes()); response.setAreNodeAttributesAcceptedByRM(true); } catch (IOException ex) { //ensure the error message is captured and sent across in response String errorMsg = response.getDiagnosticsMessage() == null ? ex.getMessage() : response.getDiagnosticsMessage() + "\n" + ex.getMessage(); response.setDiagnosticsMessage(errorMsg); response.setAreNodeAttributesAcceptedByRM(false); } } StringBuilder message = new StringBuilder(); message.append("NodeManager from node ").append(host).append("(cmPort: ") .append(cmPort).append(" httpPort: "); message.append(httpPort).append(") ") .append("registered with capability: ").append(capability); message.append(", assigned nodeId ").append(nodeId); if (response.getAreNodeLabelsAcceptedByRM()) { message.append(", node labels { ").append( StringUtils.join(",", nodeLabels) + " } "); } if (response.getAreNodeAttributesAcceptedByRM()) { message.append(", node attributes { ") .append(request.getNodeAttributes() + " } "); } LOG.info(message.toString()); response.setNodeAction(NodeAction.NORMAL); response.setRMIdentifier(ResourceManager.getClusterTimeStamp()); response.setRMVersion(YarnVersionInfo.getVersion()); return response; }
@Test public void testNodeRegistrationFailure() throws Exception { writeToHostsFile("host1"); Configuration conf = new Configuration(); conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, hostFile .getAbsolutePath()); rm = new MockRM(conf); rm.start(); ResourceTrackerService resourceTrackerService = rm.getResourceTrackerService(); RegisterNodeManagerRequest req = Records.newRecord( RegisterNodeManagerRequest.class); NodeId nodeId = NodeId.newInstance("host2", 1234); req.setNodeId(nodeId); req.setHttpPort(1234); // trying to register a invalid node. RegisterNodeManagerResponse response = resourceTrackerService.registerNodeManager(req); Assert.assertEquals(NodeAction.SHUTDOWN,response.getNodeAction()); Assert .assertEquals( "Disallowed NodeManager from host2, Sending SHUTDOWN signal to the NodeManager.", response.getDiagnosticsMessage()); }
@Override public void close() { if (!ObjectUtils.isEmpty(watchConfigChangeListener)) { configService.removeChangeListener(watchConfigChangeListener); } }
@Test public void testClose() { Config configService = mock(Config.class); ApolloDataService apolloDataService = new ApolloDataService(configService, null, Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList()); apolloDataService.close(); }
@Override public void preflight(final Path directory) throws BackgroundException { final Acl acl = directory.attributes().getAcl(); if(Acl.EMPTY == acl) { // Missing initialization log.warn(String.format("Unknown ACLs on %s", directory)); return; } if(!acl.get(new Acl.CanonicalUser()).contains(CANLISTCHILDREN)) { if(log.isWarnEnabled()) { log.warn(String.format("ACL %s for %s does not include %s", acl, directory, CANLISTCHILDREN)); } throw new AccessDeniedException(MessageFormat.format(LocaleFactory.localizedString("Cannot download {0}", "Error"), directory.getName())).withFile(directory); } }
@Test public void testListChildrenInbox() throws Exception { final DeepboxIdProvider nodeid = new DeepboxIdProvider(session); final Path folder = new Path("/ORG 4 - DeepBox Desktop App/ORG3:Box1/Inbox/", EnumSet.of(Path.Type.directory, Path.Type.volume)); final PathAttributes attributes = new DeepboxAttributesFinderFeature(session, nodeid).find(folder); assertTrue(new BoxRestControllerApi(session.getClient()).getBox(ORG4, ORG4_BOX1).getBoxPolicy().isCanAddQueue()); assertTrue(attributes.getAcl().get(new Acl.CanonicalUser()).contains(CANLISTCHILDREN)); // assert no fail new DeepboxListService(session, nodeid).preflight(folder.withAttributes(attributes)); }
public List<String> tokenize(String text) { List<String> tokens = new ArrayList<>(); Matcher regexMatcher = regexExpression.matcher(text); int lastIndexOfPrevMatch = 0; while (regexMatcher.find(lastIndexOfPrevMatch)) // this is where the magic happens: // the regexp is used to find a matching pattern for substitution { int beginIndexOfNextMatch = regexMatcher.start(); String prevToken = text.substring(lastIndexOfPrevMatch, beginIndexOfNextMatch); if (!prevToken.isEmpty()) { tokens.add(prevToken); } String currentMatch = regexMatcher.group(); tokens.add(currentMatch); lastIndexOfPrevMatch = regexMatcher.end(); if (lastIndexOfPrevMatch < text.length() && text.charAt(lastIndexOfPrevMatch) != '_') { // beause it is sometimes positioned after the "_", but it should be positioned // before the "_" --lastIndexOfPrevMatch; } } String tail = text.substring(lastIndexOfPrevMatch); if (!tail.isEmpty()) { tokens.add(tail); } return tokens; }
@Test void testTokenize_happyPath_9() { // given CompoundCharacterTokenizer tokenizer = new CompoundCharacterTokenizer( new HashSet<>(Arrays.asList("_101_102_", "_101_102_"))); String text = "_100_101_102_103_104_"; // when List<String> tokens = tokenizer.tokenize(text); // then assertEquals(Arrays.asList("_100", "_101_102_", "_103_104_"), tokens); }
public static <K> KStreamHolder<K> build( final KStreamHolder<K> stream, final StreamSelectKey<K> selectKey, final RuntimeBuildContext buildContext ) { return build(stream, selectKey, buildContext, PartitionByParamsFactory::build); }
@Test public void shouldReturnRekeyedStream() { // When: final KStreamHolder<GenericKey> result = StreamSelectKeyBuilder .build(stream, selectKey, buildContext, paramBuilder); // Then: assertThat(result.getStream(), is(rekeyedKstream)); }
public abstract VoiceInstructionValue getConfigForDistance( double distance, String turnDescription, String thenVoiceInstruction);
@Test public void germanInitialVICMetricTest() { InitialVoiceInstructionConfig configMetric = new InitialVoiceInstructionConfig(FOR_HIGHER_DISTANCE_PLURAL.metric, trMap, Locale.GERMAN, 4250, 250, DistanceUtils.Unit.METRIC); compareVoiceInstructionValues( 4000, "Dem Straßenverlauf folgen für 4 Kilometer", configMetric.getConfigForDistance(5000, "abbiegen", " dann") ); compareVoiceInstructionValues( 4000, "Dem Straßenverlauf folgen für 4 Kilometer", configMetric.getConfigForDistance(4500, "abbiegen", " dann") ); }
public <T> Mono<CosmosItemResponse<T>> createItem( final T item, final PartitionKey partitionKey, final CosmosItemRequestOptions itemRequestOptions) { CosmosDbUtils.validateIfParameterIsNotEmpty(item, PARAM_ITEM); CosmosDbUtils.validateIfParameterIsNotEmpty(partitionKey, PARAM_PARTITION_KEY); return applyToContainer(container -> container.createItem(item, partitionKey, itemRequestOptions)); }
@Test void testCreateItem() { final CosmosDbContainerOperations operations = new CosmosDbContainerOperations(Mono.just(mock(CosmosAsyncContainer.class))); CosmosDbTestUtils.assertIllegalArgumentException(() -> operations.createItem(null, null, null)); CosmosDbTestUtils.assertIllegalArgumentException(() -> operations.createItem("", null, null)); CosmosDbTestUtils.assertIllegalArgumentException(() -> operations.createItem("tes", null, null)); }
public void declareTypes(final List<KiePMMLDroolsType> types) { logger.trace("declareTypes {} ", types); types.forEach(this::declareType); }
@Test void declareTypes() { List<KiePMMLDroolsType> types = new ArrayList<>(); types.add(KiePMMLDescrTestUtils.getDroolsType()); types.add(KiePMMLDescrTestUtils.getDottedDroolsType()); assertThat(builder.getDescr().getTypeDeclarations()).isEmpty(); KiePMMLDescrTypesFactory.factory(builder).declareTypes(types); assertThat(builder.getDescr().getTypeDeclarations()).hasSize(2); IntStream.range(0, types.size()) .forEach(i -> commonVerifyTypeDeclarationDescr(Objects.requireNonNull(types.get(i)), builder.getDescr().getTypeDeclarations().get(i))); }
@Override public synchronized Multimap<String, String> findBundlesForUnloading(final LoadData loadData, final ServiceConfiguration conf) { selectedBundlesCache.clear(); final double threshold = conf.getLoadBalancerBrokerThresholdShedderPercentage() / 100.0; final Map<String, Long> recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles(); final double minThroughputThreshold = conf.getLoadBalancerBundleUnloadMinThroughputThreshold() * MB; final double avgUsage = getBrokerAvgUsage(loadData, conf.getLoadBalancerHistoryResourcePercentage(), conf); if (avgUsage == 0) { log.warn("average max resource usage is 0"); return selectedBundlesCache; } loadData.getBrokerData().forEach((broker, brokerData) -> { final LocalBrokerData localData = brokerData.getLocalData(); final double currentUsage = brokerAvgResourceUsage.getOrDefault(broker, 0.0); if (currentUsage < avgUsage + threshold) { if (log.isDebugEnabled()) { log.debug("[{}] broker is not overloaded, ignoring at this point ({})", broker, localData.printResourceUsage()); } return; } double percentOfTrafficToOffload = currentUsage - avgUsage - threshold + ADDITIONAL_THRESHOLD_PERCENT_MARGIN; double brokerCurrentThroughput = localData.getMsgThroughputIn() + localData.getMsgThroughputOut(); double minimumThroughputToOffload = brokerCurrentThroughput * percentOfTrafficToOffload; if (minimumThroughputToOffload < minThroughputThreshold) { if (log.isDebugEnabled()) { log.debug("[{}] broker is planning to shed throughput {} MByte/s less than " + "minimumThroughputThreshold {} MByte/s, skipping bundle unload ({})", broker, minimumThroughputToOffload / MB, minThroughputThreshold / MB, localData.printResourceUsage()); } return; } log.info( "Attempting to shed load on {}, which has max resource usage above avgUsage and threshold {}%" + " > {}% + {}% -- Offloading at least {} MByte/s of traffic," + " left throughput {} MByte/s ({})", broker, 100 * currentUsage, 100 * avgUsage, 100 * threshold, minimumThroughputToOffload / MB, (brokerCurrentThroughput - minimumThroughputToOffload) / MB, localData.printResourceUsage()); if (localData.getBundles().size() > 1) { filterAndSelectBundle(loadData, recentlyUnloadedBundles, broker, localData, minimumThroughputToOffload); } else if (localData.getBundles().size() == 1) { log.warn( "HIGH USAGE WARNING : Sole namespace bundle {} is overloading broker {}. " + "No Load Shedding will be done on this broker", localData.getBundles().iterator().next(), broker); } else { log.warn("Broker {} is overloaded despite having no bundles", broker); } }); if (selectedBundlesCache.isEmpty() && conf.isLowerBoundarySheddingEnabled()) { tryLowerBoundaryShedding(loadData, conf); } return selectedBundlesCache; }
@Test public void testFilterRecentlyUnloaded() { int numBundles = 10; LoadData loadData = new LoadData(); LocalBrokerData broker1 = new LocalBrokerData(); broker1.setBandwidthIn(new ResourceUsage(999, 1000)); broker1.setBandwidthOut(new ResourceUsage(999, 1000)); LocalBrokerData broker2 = new LocalBrokerData(); String broker2Name = "broker2"; double brokerThroughput = 0; for (int i = 1; i <= numBundles; ++i) { broker1.getBundles().add("bundle-" + i); BundleData bundleData = new BundleData(); TimeAverageMessageData timeAverageMessageData = new TimeAverageMessageData(); double throughput = i * 1024 * 1024; timeAverageMessageData.setMsgThroughputIn(throughput); timeAverageMessageData.setMsgThroughputOut(throughput); bundleData.setShortTermData(timeAverageMessageData); loadData.getBundleData().put("bundle-" + i, bundleData); // This bundle should not be selected for `broker1` since it is belong to another broker. String broker2BundleName = broker2Name + "-bundle-" + (numBundles + i); loadData.getBundleData().put(broker2BundleName, bundleData); broker2.getBundles().add(broker2BundleName); brokerThroughput += throughput; } broker1.setMsgThroughputIn(brokerThroughput); broker1.setMsgThroughputOut(brokerThroughput); loadData.getBrokerData().put("broker-1", new BrokerData(broker1)); loadData.getBrokerData().put(broker2Name, new BrokerData(broker2)); loadData.getRecentlyUnloadedBundles().put("bundle-10", 1L); loadData.getRecentlyUnloadedBundles().put("bundle-9", 1L); Multimap<String, String> bundlesToUnload = thresholdShedder.findBundlesForUnloading(loadData, conf); assertFalse(bundlesToUnload.isEmpty()); assertEquals(bundlesToUnload.get("broker-1"), List.of("bundle-8", "bundle-7", "bundle-6", "bundle-5")); }
@Override public BackgroundException map(final GenericException e) { final StringBuilder buffer = new StringBuilder(); this.append(buffer, e.getMessage()); final StatusLine status = e.getHttpStatusLine(); if(null != status) { this.append(buffer, String.format("%d %s", status.getStatusCode(), status.getReasonPhrase())); } switch(e.getHttpStatusCode()) { case HttpStatus.SC_BAD_REQUEST: return new LoginFailureException(buffer.toString(), e); } return new DefaultHttpResponseExceptionMappingService().map(new HttpResponseException(e.getHttpStatusCode(), buffer.toString())); }
@Test public void testLoginFailure() { final GenericException f = new GenericException( "message", new Header[]{}, new BasicStatusLine(new ProtocolVersion("http", 1, 1), 403, "Forbidden")); assertTrue(new SwiftExceptionMappingService().map(f) instanceof AccessDeniedException); assertEquals("Access denied", new SwiftExceptionMappingService().map(f).getMessage()); assertEquals("Message. 403 Forbidden. Please contact your web hosting service provider for assistance.", new SwiftExceptionMappingService().map(f).getDetail()); }
public void removeDockerContainer(String containerId) { try { PrivilegedOperationExecutor privOpExecutor = PrivilegedOperationExecutor.getInstance(super.getConf()); if (DockerCommandExecutor.isRemovable( DockerCommandExecutor.getContainerStatus(containerId, privOpExecutor, nmContext))) { LOG.info("Removing Docker container : {}", containerId); DockerRmCommand dockerRmCommand = new DockerRmCommand(containerId, ResourceHandlerModule.getCgroupsRelativeRoot()); DockerCommandExecutor.executeDockerCommand(dockerRmCommand, containerId, null, privOpExecutor, false, nmContext); } } catch (ContainerExecutionException e) { LOG.warn("Unable to remove docker container: {}", containerId); } }
@Test public void testRemoveDockerContainer() throws Exception { ApplicationId appId = ApplicationId.newInstance(12345, 67890); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 54321); String cid = ContainerId.newContainerId(attemptId, 9876).toString(); LinuxContainerExecutor lce = mock(LinuxContainerExecutor.class); lce.removeDockerContainer(cid); verify(lce, times(1)).removeDockerContainer(cid); }
public Response post(URL url, Request request) throws IOException { return call(HttpMethods.POST, url, request); }
@Test public void testPost() throws IOException { verifyCall(HttpMethods.POST, FailoverHttpClient::post); }
public static double conversion(String expression) { return (new Calculator()).calculate(expression); }
@Test public void conversationTest2(){ final double conversion = Calculator.conversion("77 * 12"); assertEquals(924.0, conversion, 0); }
public static ValueLabel formatPacketRate(long packets) { return new ValueLabel(packets, PACKETS_UNIT).perSec(); }
@Test public void formatPacketRateSmall() { vl = TopoUtils.formatPacketRate(37); assertEquals(AM_WL, "37 pps", vl.toString()); }
@Override public boolean enableSendingOldValues(final boolean forceMaterialization) { if (queryableName != null) { sendOldValues = true; return true; } if (parent.enableSendingOldValues(forceMaterialization)) { sendOldValues = true; } return sendOldValues; }
@Test public void shouldEnableSendingOldValuesOnParentIfMapValuesNotMaterialized() { final StreamsBuilder builder = new StreamsBuilder(); final String topic1 = "topic1"; final KTableImpl<String, String, String> table1 = (KTableImpl<String, String, String>) builder.table(topic1, consumed); final KTableImpl<String, String, Integer> table2 = (KTableImpl<String, String, Integer>) table1.mapValues(s -> Integer.valueOf(s)); table2.enableSendingOldValues(true); assertThat(table1.sendingOldValueEnabled(), is(true)); assertThat(table2.sendingOldValueEnabled(), is(true)); testSendingOldValues(builder, topic1, table2); }
@VisibleForTesting void checkRemoteFoldernameField( String remoteFoldernameFieldName, SFTPPutData data ) throws KettleStepException { // Remote folder fieldname remoteFoldernameFieldName = environmentSubstitute( remoteFoldernameFieldName ); if ( Utils.isEmpty( remoteFoldernameFieldName ) ) { // remote folder field is missing throw new KettleStepException( BaseMessages.getString( PKG, "SFTPPut.Error.RemoteFolderNameFieldMissing" ) ); } data.indexOfRemoteDirectory = getInputRowMeta().indexOfValue( remoteFoldernameFieldName ); if ( data.indexOfRemoteDirectory == -1 ) { // remote foldername field is missing throw new KettleStepException( BaseMessages.getString( PKG, "SFTPPut.Error.CanNotFindField", remoteFoldernameFieldName ) ); } }
@Test public void checkRemoteFoldernameField_NameIsSet_Found() throws Exception { RowMeta rowMeta = rowOfStringsMeta( "some field", "remoteFoldernameFieldName" ); step.setInputRowMeta( rowMeta ); SFTPPutData data = new SFTPPutData(); step.checkRemoteFoldernameField( "remoteFoldernameFieldName", data ); assertEquals( 1, data.indexOfRemoteDirectory ); }
public List<CloudtrailSNSNotification> parse(Message message) { LOG.debug("Parsing message."); try { LOG.debug("Reading message body {}.", message.getBody()); final SQSMessage envelope = objectMapper.readValue(message.getBody(), SQSMessage.class); if (envelope.message == null) { LOG.warn("Message is empty. Processing of message has been aborted. Verify that the SQS subscription in AWS is NOT set to send raw data."); return Collections.emptyList(); } LOG.debug("Reading message envelope {}.", envelope.message); if (envelope.message.contains(CLOUD_TRAIL_VALIDATION_MESSAGE)) { return Collections.emptyList(); } final CloudtrailWriteNotification notification = objectMapper.readValue(envelope.message, CloudtrailWriteNotification.class); final List<String> s3ObjectKeys = notification.s3ObjectKey; if (s3ObjectKeys == null) { LOG.debug("No S3 object keys parsed."); return Collections.emptyList(); } LOG.debug("Processing [{}] S3 keys.", s3ObjectKeys.size()); final List<CloudtrailSNSNotification> notifications = new ArrayList<>(s3ObjectKeys.size()); for (String s3ObjectKey : s3ObjectKeys) { notifications.add(new CloudtrailSNSNotification(message.getReceiptHandle(), notification.s3Bucket, s3ObjectKey)); } LOG.debug("Returning [{}] notifications.", notifications.size()); return notifications; } catch (IOException e) { LOG.error("Parsing exception.", e); /* Don't throw an exception that would halt processing for one parsing failure. * Sometimes occasional non-JSON test messages will come through. If this happens, * just log the error and keep processing. * * Returning an empty list here is OK and should be caught by the caller. */ return new ArrayList<>(); } }
@Test public void testParseWithTwoS3Objects() throws Exception { final Message doubleMessage = new Message() .withBody("{\n" + " \"Type\" : \"Notification\",\n" + " \"MessageId\" : \"11a04c4a-094e-5395-b297-00eaefda2893\",\n" + " \"TopicArn\" : \"arn:aws:sns:eu-west-1:459220251735:cloudtrail-write\",\n" + " \"Message\" : \"{\\\"s3Bucket\\\":\\\"cloudtrailbucket\\\",\\\"s3ObjectKey\\\":[\\\"example/AWSLogs/459220251735/CloudTrail/eu-west-1/2014/09/27/459220251735_CloudTrail_eu-west-1_20140927T1620Z_Nk2SdmlEzA0gDpPr.json.gz\\\", \\\"example/AWSLogs/459220251735/CloudTrail/eu-west-1/2014/09/27/459220251999_CloudTrail2_eu-west-1_20140927T1620Z_Nk2SdmlEzA0gDpPr.json.gz\\\"]}\",\n" + " \"Timestamp\" : \"2014-09-27T16:22:44.011Z\",\n" + " \"SignatureVersion\" : \"1\",\n" + " \"Signature\" : \"q9xmJZ8nJR5iaAYMLN3M8v9HyLbUqbLjGGFlmmvIK9UDQiQO0wmvlYeo5/lQqvANW/v+NVXZxxOoWx06p6Rv5BwXIa2ASVh7RlXc2y+U2pQgLaQlJ671cA33iBi/iH1al/7lTLrlIkUb9m2gAdEyulbhZfBfAQOm7GN1PHR/nW+CtT61g4KvMSonNzj23jglLTb0r6pxxQ5EmXz6Jo5DOsbXvuFt0BSyVP/8xRXT1ap0S7BuUOstz8+FMqdUyOQSR9RA9r61yUsJ4nnq0KfK5/1gjTTDPmE4OkGvk6AuV9YTME7FWTY/wU4LPg5/+g/rUo2UDGrxnGoJ3OUW5yrtyQ==\",\n" + " \"SigningCertURL\" : \"https://sns.eu-west-1.amazonaws.com/SimpleNotificationService-d6d679a1d18e95c2f9ffcf11f4f9e198.pem\",\n" + " \"UnsubscribeURL\" : \"https://sns.eu-west-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:eu-west-1:459220251735:cloudtrail-write:9a3a4e76-4173-4c8c-b488-0126315ba643\"\n" + "}"); CloudtrailSNSNotificationParser parser = new CloudtrailSNSNotificationParser(objectMapper); List<CloudtrailSNSNotification> notifications = parser.parse(doubleMessage); assertEquals(2, notifications.size()); CloudtrailSNSNotification notification1 = notifications.get(0); CloudtrailSNSNotification notification2 = notifications.get(1); assertEquals(notification1.getS3Bucket(), "cloudtrailbucket"); assertEquals(notification1.getS3ObjectKey(), "example/AWSLogs/459220251735/CloudTrail/eu-west-1/2014/09/27/459220251735_CloudTrail_eu-west-1_20140927T1620Z_Nk2SdmlEzA0gDpPr.json.gz"); assertEquals(notification2.getS3Bucket(), "cloudtrailbucket"); assertEquals(notification2.getS3ObjectKey(), "example/AWSLogs/459220251735/CloudTrail/eu-west-1/2014/09/27/459220251999_CloudTrail2_eu-west-1_20140927T1620Z_Nk2SdmlEzA0gDpPr.json.gz"); }
public static <InputT> PTransform<PCollection<InputT>, PCollection<Row>> toRows() { return to(Row.class); }
@Test @Category(NeedsRunner.class) public void testToRows() { PCollection<Row> rows = pipeline.apply(Create.of(new POJO1())).apply(Convert.toRows()); PAssert.that(rows).containsInAnyOrder(EXPECTED_ROW1); pipeline.run(); }
@Override public void triggerProbe(DeviceId deviceId) { LOG.debug("Triggering probe on device {}", deviceId); final Dpid dpid = dpid(deviceId.uri()); OpenFlowSwitch sw = controller.getSwitch(dpid); if (sw == null || !sw.isConnected()) { LOG.error("Failed to probe device {} on sw={}", deviceId, sw); providerService.deviceDisconnected(deviceId); return; } else { LOG.trace("Confirmed device {} connection", deviceId); } // Prompt an update of port information. We can use any XID for this. OFFactory fact = sw.factory(); switch (fact.getVersion()) { case OF_10: sw.sendMsg(fact.buildFeaturesRequest().setXid(0).build()); break; case OF_13: case OF_14: case OF_15: sw.sendMsg(fact.buildPortDescStatsRequest().setXid(0).build()); break; default: LOG.warn("Unhandled protocol version"); } }
@Test public void triggerProbe() { int cur = SW1.sent.size(); provider.triggerProbe(DID1); assertEquals("OF message not sent", cur + 1, SW1.sent.size()); }
int sampleRunnable(Runnable runnable) { if (runnable instanceof LocalEventDispatcher eventDispatcher) { return sampleLocalDispatcherEvent(eventDispatcher); } occurrenceMap.add(runnable.getClass().getName(), 1); return 1; }
@Test public void testSampleRunnable() { Address caller = new Address(); Data data = mock(Data.class); EntryEventData mapEventAdded = new EntryEventData("source", "mapName", caller, data, data, data, ADDED.getType()); EntryEventData mapEventUpdated = new EntryEventData("source", "mapName", caller, data, data, data, UPDATED.getType()); EntryEventData mapEventRemoved = new EntryEventData("source", "mapName", caller, data, data, data, REMOVED.getType()); assertSampleRunnable("IMap 'mapName' ADDED", mapEventAdded, MapService.SERVICE_NAME); assertSampleRunnable("IMap 'mapName' UPDATED", mapEventUpdated, MapService.SERVICE_NAME); assertSampleRunnable("IMap 'mapName' REMOVED", mapEventRemoved, MapService.SERVICE_NAME); CacheEventData cacheEventCreated = new CacheEventDataImpl("cacheName", CacheEventType.CREATED, data, data, data, true); CacheEventData cacheEventUpdated = new CacheEventDataImpl("cacheName", CacheEventType.UPDATED, data, data, data, true); CacheEventData cacheEventRemoved = new CacheEventDataImpl("cacheName", CacheEventType.REMOVED, data, data, data, true); CacheEventSet CacheEventSetCreated = new CacheEventSet(CacheEventType.CREATED, singleton(cacheEventCreated), 1); CacheEventSet CacheEventSetUpdated = new CacheEventSet(CacheEventType.UPDATED, singleton(cacheEventUpdated), 1); CacheEventSet cacheEventSetRemoved = new CacheEventSet(CacheEventType.REMOVED, singleton(cacheEventRemoved), 1); assertSampleRunnable("ICache 'cacheName' CREATED", CacheEventSetCreated, CacheService.SERVICE_NAME); assertSampleRunnable("ICache 'cacheName' UPDATED", CacheEventSetUpdated, CacheService.SERVICE_NAME); assertSampleRunnable("ICache 'cacheName' REMOVED", cacheEventSetRemoved, CacheService.SERVICE_NAME); List<CacheEventData> cacheEventData = asList(cacheEventCreated, cacheEventUpdated, cacheEventRemoved); Set<CacheEventData> cacheEvents = new HashSet<>(cacheEventData); CacheEventSet cacheEventSetAll = new CacheEventSet(CacheEventType.EXPIRED, cacheEvents, 1); assertCacheEventSet(cacheEventSetAll, "ICache 'cacheName' CREATED", "ICache 'cacheName' UPDATED", "ICache 'cacheName' REMOVED"); QueueEvent queueEventAdded = new QueueEvent("queueName", data, ItemEventType.ADDED, caller); QueueEvent queueEventRemoved = new QueueEvent("queueName", data, ItemEventType.REMOVED, caller); assertSampleRunnable("IQueue 'queueName' ADDED", queueEventAdded, QueueService.SERVICE_NAME); assertSampleRunnable("IQueue 'queueName' REMOVED", queueEventRemoved, QueueService.SERVICE_NAME); CollectionEvent setEventAdded = new CollectionEvent("setName", data, ItemEventType.ADDED, caller); CollectionEvent setEventRemoved = new CollectionEvent("setName", data, ItemEventType.REMOVED, caller); assertSampleRunnable("ISet 'setName' ADDED", setEventAdded, SetService.SERVICE_NAME); assertSampleRunnable("ISet 'setName' REMOVED", setEventRemoved, SetService.SERVICE_NAME); CollectionEvent listEventAdded = new CollectionEvent("listName", data, ItemEventType.ADDED, caller); CollectionEvent listEventRemoved = new CollectionEvent("listName", data, ItemEventType.REMOVED, caller); assertSampleRunnable("IList 'listName' ADDED", listEventAdded, ListService.SERVICE_NAME); assertSampleRunnable("IList 'listName' REMOVED", listEventRemoved, ListService.SERVICE_NAME); assertSampleRunnable("Object", new Object(), LongRegisterService.SERVICE_NAME); assertSampleRunnable(new TestEvent(), TestEvent.class.getName()); }
public int filterEntriesForConsumer(List<? extends Entry> entries, EntryBatchSizes batchSizes, SendMessageInfo sendMessageInfo, EntryBatchIndexesAcks indexesAcks, ManagedCursor cursor, boolean isReplayRead, Consumer consumer) { return filterEntriesForConsumer(null, 0, entries, batchSizes, sendMessageInfo, indexesAcks, cursor, isReplayRead, consumer); }
@Test public void testFilterEntriesForConsumerOfEntryFilter() throws Exception { Topic mockTopic = mock(Topic.class); when(this.subscriptionMock.getTopic()).thenReturn(mockTopic); final EntryFilterProvider entryFilterProvider = mock(EntryFilterProvider.class); final ServiceConfiguration serviceConfiguration = mock(ServiceConfiguration.class); when(serviceConfiguration.isAllowOverrideEntryFilters()).thenReturn(true); final PulsarService pulsar = mock(PulsarService.class); when(pulsar.getConfiguration()).thenReturn(serviceConfiguration); BrokerService mockBrokerService = mock(BrokerService.class); when(mockBrokerService.pulsar()).thenReturn(pulsar); when(mockBrokerService.getEntryFilterProvider()).thenReturn(entryFilterProvider); when(mockTopic.getBrokerService()).thenReturn(mockBrokerService); EntryFilter mockFilter = mock(EntryFilter.class); when(mockFilter.filterEntry(any(Entry.class), any(FilterContext.class))).thenReturn( EntryFilter.FilterResult.REJECT); when(mockTopic.getEntryFilters()).thenReturn(List.of(mockFilter)); DispatchRateLimiter subscriptionDispatchRateLimiter = mock(DispatchRateLimiter.class); this.helper = new AbstractBaseDispatcherTestHelper(this.subscriptionMock, this.svcConfig, subscriptionDispatchRateLimiter); List<Entry> entries = new ArrayList<>(); Entry e = EntryImpl.create(1, 2, createMessage("message1", 1)); long expectedBytePermits = e.getLength(); entries.add(e); SendMessageInfo sendMessageInfo = SendMessageInfo.getThreadLocal(); EntryBatchSizes batchSizes = EntryBatchSizes.get(entries.size()); ManagedCursor cursor = mock(ManagedCursor.class); int size = this.helper.filterEntriesForConsumer(entries, batchSizes, sendMessageInfo, null, cursor, false, null); assertEquals(size, 0); verify(subscriptionDispatchRateLimiter).consumeDispatchQuota(1, expectedBytePermits); }
public CombinedServiceDiscovery(List<ServiceDiscovery> delegates) { this.delegates = Collections.unmodifiableList(delegates); }
@Test public void testCombinedServiceDiscovery() { StaticServiceDiscovery discovery1 = new StaticServiceDiscovery(); discovery1.addServer(new DefaultServiceDefinition("discovery1", "localhost", 1111)); discovery1.addServer(new DefaultServiceDefinition("discovery1", "localhost", 1112)); StaticServiceDiscovery discovery2 = new StaticServiceDiscovery(); discovery2.addServer(new DefaultServiceDefinition("discovery1", "localhost", 1113)); discovery2.addServer(new DefaultServiceDefinition("discovery2", "localhost", 1114)); CombinedServiceDiscovery discovery = CombinedServiceDiscovery.wrap(discovery1, discovery2); assertEquals(3, discovery.getServices("discovery1").size()); assertEquals(1, discovery.getServices("discovery2").size()); }
public static PostgreSQLBinaryProtocolValue getBinaryProtocolValue(final BinaryColumnType binaryColumnType) { Preconditions.checkArgument(BINARY_PROTOCOL_VALUES.containsKey(binaryColumnType), "Cannot find PostgreSQL type '%s' in column type when process binary protocol value", binaryColumnType); return BINARY_PROTOCOL_VALUES.get(binaryColumnType); }
@Test void assertGetInt8BinaryProtocolValue() { PostgreSQLBinaryProtocolValue binaryProtocolValue = PostgreSQLBinaryProtocolValueFactory.getBinaryProtocolValue(PostgreSQLColumnType.INT8); assertThat(binaryProtocolValue, instanceOf(PostgreSQLInt8BinaryProtocolValue.class)); }
public static void main(String[] args) throws Exception { Arguments arguments = new Arguments(); CommandLine commander = new CommandLine(arguments); try { commander.parseArgs(args); if (arguments.help) { commander.usage(commander.getOut()); return; } if (arguments.generateDocs && arguments.configFile != null) { CmdGenerateDocs cmd = new CmdGenerateDocs("pulsar"); cmd.addCommand("websocket", commander); cmd.run(null); return; } } catch (Exception e) { commander.getErr().println(e); return; } checkArgument(args.length == 1, "Need to specify a configuration file"); try { // load config file and start proxy service String configFile = args[0]; WebSocketProxyConfiguration config = loadConfig(configFile); ProxyServer proxyServer = new ProxyServer(config); WebSocketService service = new WebSocketService(config); start(proxyServer, service); } catch (Exception e) { log.error("Failed to start WebSocket service", e); ShutdownUtil.triggerImmediateForcefulShutdown(); } }
@Test public void testMainGenerateDocs() throws Exception { PrintStream oldStream = System.out; try { ByteArrayOutputStream baoStream = new ByteArrayOutputStream(); System.setOut(new PrintStream(baoStream)); Class argumentsClass = Class.forName("org.apache.pulsar.websocket.service.WebSocketServiceStarter$Arguments"); WebSocketServiceStarter.main(new String[]{"-g"}); String message = baoStream.toString(); Field[] fields = argumentsClass.getDeclaredFields(); for (Field field : fields) { boolean fieldHasAnno = field.isAnnotationPresent(Option.class); if (fieldHasAnno) { Option fieldAnno = field.getAnnotation(Option.class); String[] names = fieldAnno.names(); if (names.length == 0) { continue; } String nameStr = Arrays.asList(names).toString(); nameStr = nameStr.substring(1, nameStr.length() - 1); assertTrue(message.indexOf(nameStr) > 0); } } } finally { System.setOut(oldStream); } }
public Duration computeReadTimeout(HttpRequestMessage request, int attemptNum) { IClientConfig clientConfig = getRequestClientConfig(request); Long originTimeout = getOriginReadTimeout(); Long requestTimeout = getRequestReadTimeout(clientConfig); long computedTimeout; if (originTimeout == null && requestTimeout == null) { computedTimeout = MAX_OUTBOUND_READ_TIMEOUT_MS.get(); } else if (originTimeout == null || requestTimeout == null) { computedTimeout = originTimeout == null ? requestTimeout : originTimeout; } else { // return the stricter (i.e. lower) of the two timeouts computedTimeout = Math.min(originTimeout, requestTimeout); } // enforce max timeout upperbound return Duration.ofMillis(Math.min(computedTimeout, MAX_OUTBOUND_READ_TIMEOUT_MS.get())); }
@Test void computeReadTimeout_requestOnly() { requestConfig.set(CommonClientConfigKey.ReadTimeout, 1000); Duration timeout = originTimeoutManager.computeReadTimeout(request, 1); assertEquals(1000, timeout.toMillis()); }
public static SchemaBuilder builder(final Schema schema) { requireDecimal(schema); return builder(precision(schema), scale(schema)); }
@Test public void shouldFailIfBuilderWithScaleGTPrecision() { // When: final Exception e = assertThrows( SchemaException.class, () -> builder(1, 2) ); // Then: assertThat(e.getMessage(), containsString("DECIMAL precision must be >= scale")); }
@Override public int run(String[] argv) { // initialize FsShell init(); Tracer tracer = new Tracer.Builder("FsShell"). conf(TraceUtils.wrapHadoopConf(SHELL_HTRACE_PREFIX, getConf())). build(); int exitCode = -1; if (argv.length < 1) { printUsage(System.err); } else { String cmd = argv[0]; Command instance = null; try { instance = commandFactory.getInstance(cmd); if (instance == null) { throw new UnknownCommandException(); } TraceScope scope = tracer.newScope(instance.getCommandName()); if (scope.getSpan() != null) { String args = StringUtils.join(" ", argv); if (args.length() > 2048) { args = args.substring(0, 2048); } scope.getSpan().addKVAnnotation("args", args); } try { exitCode = instance.run(Arrays.copyOfRange(argv, 1, argv.length)); } finally { scope.close(); } } catch (IllegalArgumentException e) { if (e.getMessage() == null) { displayError(cmd, "Null exception message"); e.printStackTrace(System.err); } else { displayError(cmd, e.getLocalizedMessage()); } printUsage(System.err); if (instance != null) { printInstanceUsage(System.err, instance); } } catch (Exception e) { // instance.run catches IOE, so something is REALLY wrong if here LOG.debug("Error", e); displayError(cmd, "Fatal internal error"); e.printStackTrace(System.err); } } tracer.close(); return exitCode; }
@Test public void testExceptionNullMessage() throws Exception { final String cmdName = "-cmdExNullMsg"; final Command cmd = Mockito.mock(Command.class); Mockito.when(cmd.run(Mockito.any())).thenThrow( new IllegalArgumentException()); Mockito.when(cmd.getUsage()).thenReturn(cmdName); final CommandFactory cmdFactory = Mockito.mock(CommandFactory.class); final String[] names = {cmdName}; Mockito.when(cmdFactory.getNames()).thenReturn(names); Mockito.when(cmdFactory.getInstance(cmdName)).thenReturn(cmd); FsShell shell = new FsShell(new Configuration()); shell.commandFactory = cmdFactory; try (GenericTestUtils.SystemErrCapturer capture = new GenericTestUtils.SystemErrCapturer()) { ToolRunner.run(shell, new String[]{cmdName}); Assertions.assertThat(capture.getOutput()) .contains(cmdName + ": Null exception message"); } }
@Override public void close() throws IOException { in.close(); }
@Test public void testClose() throws Exception { try (InputStream sample = new ByteArrayInputStream(sample1.getBytes()); JsonArrayFixingInputStream instance = new JsonArrayFixingInputStream(sample)) { int i = instance.read(); } }
@Override public void info(String msg) { logger.info(msg); }
@Test public void testInfo() { Logger mockLogger = mock(Logger.class); when(mockLogger.getName()).thenReturn("foo"); InternalLogger logger = new Slf4JLogger(mockLogger); logger.info("a"); verify(mockLogger).getName(); verify(mockLogger).info("a"); }
public void setThreadAffinity(ThreadAffinity threadAffinity) { this.threadAffinity = threadAffinity; }
@Test public void test_setThreadAffinity_nullAffinityIsAllowed() { ReactorBuilder builder = newBuilder(); builder.setThreadAffinity(null); assertNull(builder.threadAffinity); }
public Canvas canvas() { Canvas canvas = new Canvas(getLowerBound(), getUpperBound()); canvas.add(this); if (name != null) { canvas.setTitle(name); } return canvas; }
@Test public void testHistogram3D() throws Exception { System.out.println("Histogram 3D"); double[] mu = {0.0, 0.0}; double[][] v = { {1.0, 0.6}, {0.6, 2.0} }; var gauss = new MultivariateGaussianDistribution(mu, Matrix.of(v)); var data = Stream.generate(gauss::rand).limit(10000).toArray(double[][]::new); Histogram3D.of(data, 50, false).canvas().window(); }
@POST @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) public Response createPort(InputStream input) { log.trace(String.format(MESSAGE, "CREATE")); URI location; try { ObjectNode jsonTree = readTreeFromStream(mapper(), input); final K8sPort port = codec(K8sPort.class).decode(jsonTree, this); adminService.createPort(port); location = new URI(port.portId()); } catch (IOException | URISyntaxException e) { throw new IllegalArgumentException(e); } return Response.created(location).build(); }
@Test public void testCreatePortWithCreateOperation() { mockAdminService.createPort(anyObject()); replay(mockAdminService); final WebTarget wt = target(); InputStream jsonStream = K8sPortWebResourceTest.class .getResourceAsStream("k8s-port.json"); Response response = wt.path(PATH).request(MediaType.APPLICATION_JSON_TYPE) .post(Entity.json(jsonStream)); final int status = response.getStatus(); assertThat(status, is(201)); verify(mockAdminService); }
@CheckForNull public static String includeRenamedMetrics(@Nullable String metric) { if (REMOVED_METRIC.equals(metric)) { return DEPRECATED_METRIC_REPLACEMENT; } else { return metric; } }
@Test void includeRenamedMetrics_whenAcceptedIssuesPassed_shouldReturnAccepted() { String upToDateMetric = RemovedMetricConverter.includeRenamedMetrics("accepted_issues"); assertThat(upToDateMetric).isEqualTo("accepted_issues"); }
public void removeChannelFromBuffer( String id ) { buffer.values().stream().filter( line -> id.equals( getLogChId( line ) ) ).forEach( line -> buffer.remove( line.getNr() ) ); tailMap.remove( id ); /* for ( BufferLine line : buffer.values() ) { if ( id.equals( getLogChId( line ) ) ) { buffer.remove( line.getNr() ); } }*/ }
@Test public void testRemoveChannelFromBuffer() { String logChannelId = "1"; String otherLogChannelId = "2"; LoggingBuffer loggingBuffer = new LoggingBuffer( 20 ); for ( int i = 0; i < 10; i++ ) { KettleLoggingEvent event = new KettleLoggingEvent(); event.setMessage( new LogMessage( "testWithLogChannelId", logChannelId, LogLevel.BASIC ) ); event.setTimeStamp( i ); loggingBuffer.addLogggingEvent( event ); } for ( int i = 10; i < 17; i++ ) { KettleLoggingEvent event = new KettleLoggingEvent(); event.setMessage( new LogMessage( "testWithNoLogChannelId", LogLevel.BASIC ) ); event.setTimeStamp( i ); loggingBuffer.addLogggingEvent( event ); } for ( int i = 17; i < 20; i++ ) { KettleLoggingEvent event = new KettleLoggingEvent(); event.setMessage( new LogMessage( "testWithOtherLogChannelId", otherLogChannelId, LogLevel.BASIC ) ); event.setTimeStamp( i ); loggingBuffer.addLogggingEvent( event ); } loggingBuffer.removeChannelFromBuffer( logChannelId ); Assert.assertEquals( 10, loggingBuffer.size() ); }
@Override public PollResult poll(long currentTimeMs) { return pollInternal( prepareFetchRequests(), this::handleFetchSuccess, this::handleFetchFailure ); }
@Test public void testFetchPositionAfterException() { // verify the advancement in the next fetch offset equals to the number of fetched records when // some fetched partitions cause Exception. This ensures that consumer won't lose record upon exception buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED); assignFromUser(mkSet(tp0, tp1)); subscriptions.seek(tp0, 1); subscriptions.seek(tp1, 1); assertEquals(1, sendFetches()); Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new LinkedHashMap<>(); partitions.put(tidp1, new FetchResponseData.PartitionData() .setPartitionIndex(tp1.partition()) .setHighWatermark(100) .setRecords(records)); partitions.put(tidp0, new FetchResponseData.PartitionData() .setPartitionIndex(tp0.partition()) .setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()) .setHighWatermark(100)); client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); networkClientDelegate.poll(time.timer(0)); List<ConsumerRecord<byte[], byte[]>> allFetchedRecords = new ArrayList<>(); fetchRecordsInto(allFetchedRecords); assertEquals(1, subscriptions.position(tp0).offset); assertEquals(4, subscriptions.position(tp1).offset); assertEquals(3, allFetchedRecords.size()); OffsetOutOfRangeException e = assertThrows(OffsetOutOfRangeException.class, () -> fetchRecordsInto(allFetchedRecords)); assertEquals(singleton(tp0), e.offsetOutOfRangePartitions().keySet()); assertEquals(1L, e.offsetOutOfRangePartitions().get(tp0).longValue()); assertEquals(1, subscriptions.position(tp0).offset); assertEquals(4, subscriptions.position(tp1).offset); assertEquals(3, allFetchedRecords.size()); }
public <T> T submitRequest(String pluginId, String requestName, PluginInteractionCallback<T> pluginInteractionCallback) { if (!pluginManager.isPluginOfType(extensionName, pluginId)) { throw new RecordNotFoundException(format("Did not find '%s' plugin with id '%s'. Looks like plugin is missing", extensionName, pluginId)); } try { String resolvedExtensionVersion = pluginManager.resolveExtensionVersion(pluginId, extensionName, goSupportedVersions); DefaultGoPluginApiRequest apiRequest = new DefaultGoPluginApiRequest(extensionName, resolvedExtensionVersion, requestName); apiRequest.setRequestBody(pluginInteractionCallback.requestBody(resolvedExtensionVersion)); apiRequest.setRequestParams(pluginInteractionCallback.requestParams(resolvedExtensionVersion)); apiRequest.setRequestHeaders(pluginInteractionCallback.requestHeaders(resolvedExtensionVersion)); GoPluginApiResponse response = pluginManager.submitTo(pluginId, extensionName, apiRequest); if (response == null) { throw new RuntimeException("The plugin sent a null response"); } if (DefaultGoApiResponse.SUCCESS_RESPONSE_CODE == response.responseCode()) { return pluginInteractionCallback.onSuccess(response.responseBody(), response.responseHeaders(), resolvedExtensionVersion); } pluginInteractionCallback.onFailure(response.responseCode(), response.responseBody(), resolvedExtensionVersion); throw new RuntimeException(format("The plugin sent a response that could not be understood by Go. Plugin returned with code '%s' and the following response: '%s'", response.responseCode(), response.responseBody())); } catch (RuntimeException e) { throw e; } catch (Exception e) { throw new RuntimeException(format("Interaction with plugin with id '%s' implementing '%s' extension failed while requesting for '%s'. Reason: [%s]", pluginId, extensionName, requestName, e.getMessage()), e); } }
@Test void shouldErrorOutOnValidationFailure() { when(response.responseCode()).thenReturn(DefaultGoApiResponse.VALIDATION_ERROR); when(pluginManager.submitTo(eq(pluginId), eq(extensionName), any(GoPluginApiRequest.class))).thenReturn(response); assertThatThrownBy(() -> helper.submitRequest(pluginId, requestName, new DefaultPluginInteractionCallback<>() { @Override public Object onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) { isSuccessInvoked[0] = true; return null; } })).isInstanceOf(RuntimeException.class); }
public DataConnectionConfig load(DataConnectionConfig dataConnectionConfig) { // Make a copy to preserve the original configuration DataConnectionConfig loadedConfig = new DataConnectionConfig(dataConnectionConfig); // Try XML file first if (loadConfig(loadedConfig, HazelcastDataConnection.CLIENT_XML_PATH, HazelcastDataConnection.CLIENT_XML)) { return loadedConfig; } // Try YML file loadConfig(loadedConfig, HazelcastDataConnection.CLIENT_YML_PATH, HazelcastDataConnection.CLIENT_YML); return loadedConfig; }
@Test public void testLoadYml() { DataConnectionConfig dataConnectionConfig = new DataConnectionConfig(); Path path = Paths.get("src", "test", "resources", "hazelcast-client-test-external.yaml"); dataConnectionConfig.setProperty(HazelcastDataConnection.CLIENT_YML_PATH, path.toString()); HazelcastDataConnectionConfigLoader loader = new HazelcastDataConnectionConfigLoader(); DataConnectionConfig loadedConfig = loader.load(dataConnectionConfig); assertNotNull(loadedConfig.getProperty(HazelcastDataConnection.CLIENT_YML)); assertNull(loadedConfig.getProperty(HazelcastDataConnection.CLIENT_XML)); }
public void setThreadpool(final String threadpool) { this.threadpool = threadpool; }
@Test public void testEqualsAndHashCode() { GrpcRegisterConfig config1 = new GrpcRegisterConfig(); GrpcRegisterConfig config2 = new GrpcRegisterConfig(); config1.setThreadpool("threadPool"); config2.setThreadpool("threadPool"); assertThat(ImmutableSet.of(config1, config2), hasSize(1)); }
public static void hideExpandedContent() { hideExpandedContent( spoonInstance().getActiveTransGraph() ); }
@Test public void testHideExpandedContentManager() throws Exception { TransGraph transGraph = mock( TransGraph.class ); Browser browser = mock( Browser.class ); SashForm sashForm = mock( SashForm.class ); Composite parent = setupExpandedContentMocks( transGraph, browser, sashForm ); ExpandedContentManager.hideExpandedContent( transGraph ); verify( browser ).moveBelow( null ); verify( parent ).layout( true, true ); verify( parent ).redraw(); verify( sashForm ).setWeights( new int[] { 3, 2, 1 } ); }
public static NotPlaceholderExpr get() { return instance; }
@Test public void testGet() { NotPlaceholderExpr instance = NotPlaceholderExpr.get(); // Check that the returned instance is not null assertEquals(instance, NotPlaceholderExpr.get()); }
Record convert(Object data) { return convert(data, null); }
@Test public void testMapValueInMapConvert() { Table table = mock(Table.class); when(table.schema()).thenReturn(STRUCT_IN_MAP_SCHEMA); RecordConverter converter = new RecordConverter(table, config); Map<String, Object> data = createNestedMapData(); Record record = converter.convert(ImmutableMap.of("stma", ImmutableMap.of("key1", data, "key2", data))); Map<?, ?> fieldVal = (Map<?, ?>) record.getField("stma"); Record mapVal = (Record) fieldVal.get("key1"); assertNestedRecordValues(mapVal); }
@Override public void addChildren(Deque<Expression> expressions) { addChildren(expressions, 2); }
@Test public void testFinish() throws IOException { And and = new And(); Expression first = mock(Expression.class); Expression second = mock(Expression.class); Deque<Expression> children = new LinkedList<Expression>(); children.add(second); children.add(first); and.addChildren(children); and.finish(); verify(first).finish(); verify(second).finish(); verifyNoMoreInteractions(first); verifyNoMoreInteractions(second); }
@Bean public SyncDataService nacosSyncDataService(final ObjectProvider<ConfigService> configService, final ObjectProvider<PluginDataSubscriber> pluginSubscriber, final ObjectProvider<List<MetaDataSubscriber>> metaSubscribers, final ObjectProvider<List<AuthDataSubscriber>> authSubscribers, final ObjectProvider<List<ProxySelectorDataSubscriber>> proxySelectorSubscribers, final ObjectProvider<List<DiscoveryUpstreamDataSubscriber>> discoveryUpstreamDataSubscribers) { LOGGER.info("you use nacos sync shenyu data......."); return new NacosSyncDataService(configService.getIfAvailable(), pluginSubscriber.getIfAvailable(), metaSubscribers.getIfAvailable(Collections::emptyList), authSubscribers.getIfAvailable(Collections::emptyList), proxySelectorSubscribers.getIfAvailable(), discoveryUpstreamDataSubscribers.getIfAvailable()); }
@Test public void nacosSyncDataServiceTest() { assertNotNull(syncDataService); }
@Override public List<String> getLineHashesMatchingDBVersion(Component component) { return cache.computeIfAbsent(component, this::createLineHashesMatchingDBVersion); }
@Test public void should_create_hash_without_significant_code_if_db_has_no_significant_code() { when(dbLineHashVersion.hasLineHashesWithoutSignificantCode(file)).thenReturn(true); List<String> lineHashes = underTest.getLineHashesMatchingDBVersion(file); assertLineHashes(lineHashes, "line1", "line2", "line3"); verify(dbLineHashVersion).hasLineHashesWithoutSignificantCode(file); verifyNoMoreInteractions(dbLineHashVersion); verifyNoInteractions(significantCodeRepository); }
public FileObject convertToFileObject( VariableSpace variables ) throws KettleFileException { return KettleVFS.getFileObject( path, variables ); }
@Test public void convertToFileObject() throws Exception { Element element = new Element( NAME, TYPE, adjustSlashes( PATH ), LOCAL_PROVIDER ); FileObject fileObject = element.convertToFileObject( space ); assertEquals( NAME, fileObject.getName().getBaseName() ); assertEquals( adjustSlashes( PATH ), fileObject.getPath().toString() ); }
boolean isContainerizable() { String moduleSpecification = getProperty(PropertyNames.CONTAINERIZE); if (project == null || Strings.isNullOrEmpty(moduleSpecification)) { return true; } // modules can be specified in one of three ways: // 1) a `groupId:artifactId` // 2) an `:artifactId` // 3) relative path within the repository if (moduleSpecification.equals(project.getGroupId() + ":" + project.getArtifactId()) || moduleSpecification.equals(":" + project.getArtifactId())) { return true; } // Relative paths never have a colon on *nix nor Windows. This moduleSpecification could be an // :artifactId or groupId:artifactId for a different artifact. if (moduleSpecification.contains(":")) { return false; } try { Path projectBase = project.getBasedir().toPath(); return projectBase.endsWith(moduleSpecification); } catch (InvalidPathException ex) { // ignore since moduleSpecification may not actually be a path return false; } }
@Test public void testIsContainerizable_artifactId() { project.setGroupId("group"); project.setArtifactId("artifact"); Properties projectProperties = project.getProperties(); projectProperties.setProperty("jib.containerize", ":artifact"); assertThat(testPluginConfiguration.isContainerizable()).isTrue(); projectProperties.setProperty("jib.containerize", ":artifact2"); assertThat(testPluginConfiguration.isContainerizable()).isFalse(); }
@Override public void error(final ErrorMessage msg) { inner.error(() -> throwIfNotRightSchema(msg.get(config))); }
@Test public void shouldLogError() { // When: processingLogger.error(errorMsg); // Then: final SchemaAndValue msg = verifyErrorMessage(); assertThat(msg, is(msg)); }
CacheConfig<K, V> asCacheConfig() { return this.copy(new CacheConfig<>(), false); }
@Test public void serializationSucceeds_cacheWriterFactory() { CacheConfig<String, Person> cacheConfig = newDefaultCacheConfig("test"); cacheConfig.setCacheWriterFactory(new PersonCacheWriterFactory()); PreJoinCacheConfig preJoinCacheConfig = new PreJoinCacheConfig(cacheConfig); Data data = serializationService.toData(preJoinCacheConfig); PreJoinCacheConfig deserialized = serializationService.toObject(data); assertEquals(preJoinCacheConfig, deserialized); assertEquals(cacheConfig, deserialized.asCacheConfig()); assertNull(deserialized.getCacheLoaderFactory()); assertTrue("Invalid Factory Class", deserialized.getCacheWriterFactory() instanceof PersonCacheWriterFactory); }
public Optional<Distance> horizontalDistAtVerticalClosureTime(Instant time) { Optional<Duration> timeUntilClosure = timeUntilVerticalClosure(time); //not closing in the vertical direction if (!timeUntilClosure.isPresent()) { return Optional.empty(); } Speed closureRate = horizontalClosureRateAt(time); Distance startingSeparation = horizontalSeparationAt(time); Distance distanceClosed = closureRate.times(timeUntilClosure.get()); return Optional.of(startingSeparation.minus(distanceClosed).abs()); }
@Test public void testHorizontalDistAtVerticalClosureTime() { //decreasing vertical separation... Distance[] verticalDistances = new Distance[]{ Distance.ofFeet(200), Distance.ofFeet(100), Distance.ofFeet(50) }; //increasing horizontal separation... Distance[] horizontalDistances = new Distance[]{ Distance.ofNauticalMiles(2), Distance.ofNauticalMiles(3), Distance.ofNauticalMiles(5) }; SeparationTimeSeries instance = new SeparationTimeSeries( times(), verticalDistances, horizontalDistances ); assertEquals( Distance.ofNauticalMiles(2 + 2), //2 NM + 2 NM in new "closure" instance.horizontalDistAtVerticalClosureTime(EPOCH.plusSeconds(0)).get() ); assertEquals( Distance.ofNauticalMiles(2.2 + 1.8), //2.2 NM + 1.8 NM in new "closure" instance.horizontalDistAtVerticalClosureTime(EPOCH.plusSeconds(1)).get() ); assertEquals( Distance.ofNauticalMiles(3 + 4), //3 NM ft + 10 seconds @ 2 NM every 5 sec instance.horizontalDistAtVerticalClosureTime(EPOCH.plusSeconds(5)).get() ); assertEquals( Distance.ofNauticalMiles(3.8 + 3.2), //3.8 NM + 8 seconds @ 2 NM every 5 sec instance.horizontalDistAtVerticalClosureTime(EPOCH.plusSeconds(7)).get() ); assertEquals( Distance.ofNauticalMiles(5 + 2), //5 NM + 5 seconds @ 2 NM every 5 sec instance.horizontalDistAtVerticalClosureTime(EPOCH.plusSeconds(10)).get() ); }
protected Object createSchemaDefaultValue(Type type, Field field, Schema fieldSchema) { Object defaultValue; if (defaultGenerated) { defaultValue = getOrCreateDefaultValue(type, field); if (defaultValue != null) { return deepCopy(fieldSchema, defaultValue); } // if we can't get the default value, try to use previous code below } AvroDefault defaultAnnotation = field.getAnnotation(AvroDefault.class); defaultValue = (defaultAnnotation == null) ? null : Schema.parseJsonToObject(defaultAnnotation.value()); if (defaultValue == null && fieldSchema.isNullable()) { defaultValue = JsonProperties.NULL_VALUE; } return defaultValue; }
@Test void createSchemaDefaultValue() { Meta meta = new Meta(); validateSchema(meta); meta.f4 = 0x1987; validateSchema(meta); }
@Override public AsyncEntry asyncEntry(String name, EntryType type, int count, Object... args) throws BlockException { StringResourceWrapper resource = new StringResourceWrapper(name, type); return asyncEntryInternal(resource, count, args); }
@Test public void testAsyncEntryNormalPass() { String resourceName = "testAsyncEntryNormalPass"; ResourceWrapper resourceWrapper = new StringResourceWrapper(resourceName, EntryType.IN); AsyncEntry entry = null; // Prepare a slot that "should pass". ShouldPassSlot slot = addShouldPassSlotFor(resourceWrapper); assertFalse(slot.entered || slot.exited); ContextUtil.enter("abc"); Entry previousEntry = ContextUtil.getContext().getCurEntry(); try { entry = ctSph.asyncEntry(resourceName, EntryType.IN, 1); assertTrue(slot.entered); assertFalse(slot.exited); Context asyncContext = entry.getAsyncContext(); assertNotNull(asyncContext); assertSame(entry, asyncContext.getCurEntry()); assertNotSame("The async entry should not be added to current context", entry, ContextUtil.getContext().getCurEntry()); assertSame(previousEntry, ContextUtil.getContext().getCurEntry()); } catch (BlockException ex) { fail("Unexpected blocked: " + ex.getClass().getCanonicalName()); } finally { if (entry != null) { Context asyncContext = entry.getAsyncContext(); entry.exit(); assertTrue(slot.exited); assertNull(entry.getAsyncContext()); assertSame(previousEntry, asyncContext.getCurEntry()); } ContextUtil.exit(); } }
@SuppressWarnings({"checkstyle:ParameterNumber"}) public static Pod createStatefulPod( Reconciliation reconciliation, String name, String namespace, Labels labels, String strimziPodSetName, String serviceAccountName, PodTemplate template, Map<String, String> defaultPodLabels, Map<String, String> podAnnotations, String headlessServiceName, Affinity affinity, List<Container> initContainers, List<Container> containers, List<Volume> volumes, List<LocalObjectReference> defaultImagePullSecrets, PodSecurityContext podSecurityContext ) { Pod pod = new PodBuilder() .withNewMetadata() .withName(name) .withLabels(labels.withStrimziPodName(name).withStatefulSetPod(name).withStrimziPodSetController(strimziPodSetName).withAdditionalLabels(Util.mergeLabelsOrAnnotations(defaultPodLabels, TemplateUtils.labels(template))).toMap()) .withNamespace(namespace) .withAnnotations(Util.mergeLabelsOrAnnotations(podAnnotations, TemplateUtils.annotations(template))) .endMetadata() .withNewSpec() .withRestartPolicy("Always") .withHostname(name) .withSubdomain(headlessServiceName) .withServiceAccountName(serviceAccountName) .withEnableServiceLinks(template != null ? template.getEnableServiceLinks() : null) .withAffinity(affinity) .withInitContainers(initContainers) .withContainers(containers) .withVolumes(volumes) .withTolerations(template != null && template.getTolerations() != null ? template.getTolerations() : null) .withTerminationGracePeriodSeconds(template != null ? (long) template.getTerminationGracePeriodSeconds() : 30L) .withImagePullSecrets(imagePullSecrets(template, defaultImagePullSecrets)) .withSecurityContext(podSecurityContext) .withPriorityClassName(template != null ? template.getPriorityClassName() : null) .withSchedulerName(template != null && template.getSchedulerName() != null ? template.getSchedulerName() : "default-scheduler") .withHostAliases(template != null ? template.getHostAliases() : null) .withTopologySpreadConstraints(template != null ? template.getTopologySpreadConstraints() : null) .endSpec() .build(); // Set the pod revision annotation pod.getMetadata().getAnnotations().put(PodRevision.STRIMZI_REVISION_ANNOTATION, PodRevision.getRevision(reconciliation, pod)); return pod; }
@Test public void testCreateStatefulPodWithEmptyTemplate() { Pod pod = WorkloadUtils.createStatefulPod( Reconciliation.DUMMY_RECONCILIATION, NAME + "-0", // => Pod name NAMESPACE, LABELS, NAME, // => Workload name NAME + "-sa", // => Service Account name new PodTemplate(), Map.of("default-label", "default-value"), Map.of("extra", "annotations"), HEADLESS_SERVICE_NAME, DEFAULT_AFFINITY, List.of(new ContainerBuilder().withName("init-container").build()), List.of(new ContainerBuilder().withName("container").build()), VolumeUtils.createPodSetVolumes(NAME + "-0", DEFAULT_STORAGE, false), List.of(new LocalObjectReference("some-pull-secret")), DEFAULT_POD_SECURITY_CONTEXT ); assertThat(pod.getMetadata().getName(), is(NAME + "-0")); assertThat(pod.getMetadata().getNamespace(), is(NAMESPACE)); assertThat(pod.getMetadata().getLabels(), is(LABELS .withStrimziPodSetController(NAME) .withStrimziPodName(NAME + "-0") .withAdditionalLabels(Map.of("statefulset.kubernetes.io/pod-name", "my-workload-0", "default-label", "default-value")) .toMap())); assertThat(pod.getMetadata().getAnnotations(), is(Map.of("extra", "annotations", PodRevision.STRIMZI_REVISION_ANNOTATION, "da09ff49"))); assertThat(pod.getSpec().getRestartPolicy(), is("Always")); assertThat(pod.getSpec().getHostname(), is(NAME + "-0")); assertThat(pod.getSpec().getServiceAccountName(), is(NAME + "-sa")); assertThat(pod.getSpec().getEnableServiceLinks(), is(nullValue())); assertThat(pod.getSpec().getAffinity(), is(DEFAULT_AFFINITY)); assertThat(pod.getSpec().getInitContainers().size(), is(1)); assertThat(pod.getSpec().getInitContainers().get(0).getName(), is("init-container")); assertThat(pod.getSpec().getContainers().size(), is(1)); assertThat(pod.getSpec().getContainers().get(0).getName(), is("container")); assertThat(pod.getSpec().getVolumes(), is(VolumeUtils.createPodSetVolumes(NAME + "-0", DEFAULT_STORAGE, false))); assertThat(pod.getSpec().getTolerations(), is(nullValue())); assertThat(pod.getSpec().getTerminationGracePeriodSeconds(), is(30L)); assertThat(pod.getSpec().getImagePullSecrets(), is(List.of(new LocalObjectReference("some-pull-secret")))); assertThat(pod.getSpec().getSecurityContext(), is(DEFAULT_POD_SECURITY_CONTEXT)); assertThat(pod.getSpec().getPriorityClassName(), is(nullValue())); assertThat(pod.getSpec().getSchedulerName(), is("default-scheduler")); assertThat(pod.getSpec().getHostAliases(), is(nullValue())); assertThat(pod.getSpec().getTopologySpreadConstraints(), is(nullValue())); }
@UdafFactory(description = "Compute average of column with type Long.", aggregateSchema = "STRUCT<SUM bigint, COUNT bigint>") public static TableUdaf<Long, Struct, Double> averageLong() { return getAverageImplementation( 0L, STRUCT_LONG, (sum, newValue) -> sum.getInt64(SUM) + newValue, (sum, count) -> sum.getInt64(SUM) / count, (sum1, sum2) -> sum1.getInt64(SUM) + sum2.getInt64(SUM), (sum, valueToUndo) -> sum.getInt64(SUM) - valueToUndo); }
@Test public void undoShouldHandleNull() { final TableUdaf<Long, Struct, Double> udaf = AverageUdaf.averageLong(); Struct agg = udaf.initialize(); final Long[] values = new Long[] {1L, 1L, 1L, 1L, null}; for (final Long thisValue : values) { agg = udaf.aggregate(thisValue, agg); } agg = udaf.undo(null, agg); assertThat(4L, equalTo(agg.getInt64(COUNT))); assertThat(4L, equalTo(agg.getInt64(SUM))); }
@Override public boolean isDetected() { String ci = system.envVariable("CI"); String revision = system.envVariable("BITBUCKET_COMMIT"); return "true".equals(ci) && isNotEmpty(revision); }
@Test public void isDetected() { setEnvVariable("CI", "true"); setEnvVariable("BITBUCKET_COMMIT", "bdf12fe"); assertThat(underTest.isDetected()).isTrue(); setEnvVariable("CI", "true"); setEnvVariable("BITBUCKET_COMMIT", null); assertThat(underTest.isDetected()).isFalse(); }
public void validate(AlmSettingDto almSettingDto) { String gitlabUrl = almSettingDto.getUrl(); String accessToken = almSettingDto.getDecryptedPersonalAccessToken(encryption); validate(ValidationMode.COMPLETE, gitlabUrl, accessToken); }
@Test public void validate_success() { String token = "personal-access-token"; AlmSettingDto almSettingDto = new AlmSettingDto() .setUrl(GITLAB_API_URL) .setPersonalAccessToken("personal-access-token"); when(encryption.isEncrypted(token)).thenReturn(false); underTest.validate(almSettingDto); verify(gitlabHttpClient).checkUrl(almSettingDto.getUrl()); verify(gitlabHttpClient).checkToken(almSettingDto.getUrl(), almSettingDto.getDecryptedPersonalAccessToken(encryption)); verify(gitlabHttpClient).checkReadPermission(almSettingDto.getUrl(), almSettingDto.getDecryptedPersonalAccessToken(encryption)); verify(gitlabHttpClient).checkWritePermission(almSettingDto.getUrl(), almSettingDto.getDecryptedPersonalAccessToken(encryption)); }
public synchronized Command heartbeat(final long workerId, final Map<String, Long> capacityBytesOnTiers, final Map<String, Long> usedBytesOnTiers, final List<Long> removedBlocks, final Map<BlockStoreLocation, List<Long>> addedBlocks, final Map<String, List<String>> lostStorage, final List<Metric> metrics) throws IOException { final BlockHeartbeatPOptions options = BlockHeartbeatPOptions.newBuilder() .addAllMetrics(metrics).putAllCapacityBytesOnTiers(capacityBytesOnTiers).build(); final List<LocationBlockIdListEntry> entryList = convertBlockListMapToProto(addedBlocks); final Map<String, StorageList> lostStorageMap = lostStorage.entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> StorageList.newBuilder().addAllStorage(e.getValue()).build())); final BlockHeartbeatPRequest request = BlockHeartbeatPRequest.newBuilder().setWorkerId(workerId) .putAllUsedBytesOnTiers(usedBytesOnTiers).addAllRemovedBlockIds(removedBlocks) .addAllAddedBlocks(entryList).setOptions(options) .putAllLostStorage(lostStorageMap).build(); return retryRPC(() -> mClient.withDeadlineAfter(mContext.getClusterConf() .getMs(PropertyKey.WORKER_MASTER_PERIODICAL_RPC_TIMEOUT), TimeUnit.MILLISECONDS) .blockHeartbeat(request).getCommand(), LOG, "Heartbeat", "workerId=%d", workerId); }
@Test public void heartBeat() throws Exception { final long workerId = 1L; final Map<String, Long> capacityBytesOnTiers = ImmutableMap.of("MEM", 1024 * 1024L); final Map<String, Long> usedBytesOnTiers = ImmutableMap.of("MEM", 1024L); final List<Long> removedBlocks = ImmutableList.of(); final Map<BlockStoreLocation, List<Long>> addedBlocks = ImmutableMap.of( new BlockStoreLocation("MEM", 0, "MEM"), ImmutableList.of(11L, 12L, 13L) ); final Map<String, List<String>> lostStorage = ImmutableMap.of( "MEM", ImmutableList.of("/tmp/lost") ); final List<Metric> metrics = ImmutableList.of(); createMockService( new BlockMasterWorkerServiceGrpc.BlockMasterWorkerServiceImplBase() { @Override public void blockHeartbeat(BlockHeartbeatPRequest request, StreamObserver<BlockHeartbeatPResponse> responseObserver) { // verify request data assertEquals(workerId, request.getWorkerId()); assertEquals(usedBytesOnTiers, request.getUsedBytesOnTiersMap()); assertEquals(removedBlocks, request.getRemovedBlockIdsList()); // verify added blocks for (LocationBlockIdListEntry entry: request.getAddedBlocksList()) { BlockStoreLocationProto locationProto = entry.getKey(); BlockStoreLocation location = new BlockStoreLocation( locationProto.getTierAlias(), 0, locationProto.getMediumType() ); List<Long> blocks = addedBlocks.get(location); assertTrue( blocks != null && blocks.containsAll(entry.getValue().getBlockIdList()) ); } // verify lost storage assertEquals(lostStorage.entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> StorageList.newBuilder().addAllStorage(e.getValue()).build())), request.getLostStorageMap()); responseObserver.onNext( BlockHeartbeatPResponse.newBuilder().setCommand(Command.newBuilder() .setCommandType( CommandType.Nothing)).build() ); responseObserver.onCompleted(); } }); BlockMasterClient client = new BlockMasterClient( MasterClientContext.newBuilder(ClientContext.create(mConf)).build() ); assertEquals(CommandType.Nothing, client.heartbeat( workerId, capacityBytesOnTiers, usedBytesOnTiers, removedBlocks, addedBlocks, lostStorage, metrics).getCommandType()); }
@Override public boolean validate(Path path, ResourceContext context) { // explicitly call a method not depending on LinkResourceService return validate(path); }
@Test public void testNotSatisfyWaypoint() { sut = new WaypointConstraint(DID4); assertThat(sut.validate(path, resourceContext), is(false)); }
@Override public final void aroundWriteTo(WriterInterceptorContext context) throws IOException { final String contentEncoding = (String) context.getHeaders().getFirst(HttpHeaders.CONTENT_ENCODING); if ((contentEncoding != null) && (contentEncoding.equals("gzip") || contentEncoding.equals("x-gzip"))) { context.setOutputStream(new GZIPOutputStream(context.getOutputStream())); } context.proceed(); }
@Test void aroundWriteToSpecX_GZip() throws IOException, WebApplicationException { MultivaluedMap<String, Object> headers = new MultivaluedHashMap<>(); headers.add(HttpHeaders.CONTENT_ENCODING, "x-gzip"); WriterInterceptorContextMock context = new WriterInterceptorContextMock(headers); new ConfiguredGZipEncoder(true).aroundWriteTo(context); assertThat(context.getOutputStream()).isInstanceOf(GZIPOutputStream.class); assertThat(context.isProceedCalled()).isTrue(); }
public final synchronized List<E> getAllAddOns() { Logger.d(mTag, "getAllAddOns has %d add on for %s", mAddOns.size(), getClass().getName()); if (mAddOns.size() == 0) { loadAddOns(); } Logger.d( mTag, "getAllAddOns will return %d add on for %s", mAddOns.size(), getClass().getName()); return unmodifiableList(mAddOns); }
@Test public void testGetAllAddOns() throws Exception { TestableAddOnsFactory factory = new TestableAddOnsFactory(true); List<TestAddOn> list = factory.getAllAddOns(); Assert.assertTrue(list.size() > 0); HashSet<String> seenIds = new HashSet<>(); for (AddOn addOn : list) { Assert.assertNotNull(addOn); Assert.assertFalse(seenIds.contains(addOn.getId())); seenIds.add(addOn.getId()); } }
@Nullable @Override public Message decode(@Nonnull final RawMessage rawMessage) { final GELFMessage gelfMessage = new GELFMessage(rawMessage.getPayload(), rawMessage.getRemoteAddress()); final String json = gelfMessage.getJSON(decompressSizeLimit, charset); final JsonNode node; try { node = objectMapper.readTree(json); if (node == null) { throw new IOException("null result"); } } catch (final Exception e) { log.error("Could not parse JSON, first 400 characters: " + StringUtils.abbreviate(json, 403), e); throw new IllegalStateException("JSON is null/could not be parsed (invalid JSON)", e); } try { validateGELFMessage(node, rawMessage.getId(), rawMessage.getRemoteAddress()); } catch (IllegalArgumentException e) { log.trace("Invalid GELF message <{}>", node); throw e; } // Timestamp. final double messageTimestamp = timestampValue(node); final DateTime timestamp; if (messageTimestamp <= 0) { timestamp = rawMessage.getTimestamp(); } else { // we treat this as a unix timestamp timestamp = Tools.dateTimeFromDouble(messageTimestamp); } final Message message = messageFactory.createMessage( stringValue(node, "short_message"), stringValue(node, "host"), timestamp ); message.addField(Message.FIELD_FULL_MESSAGE, stringValue(node, "full_message")); final String file = stringValue(node, "file"); if (file != null && !file.isEmpty()) { message.addField("file", file); } final long line = longValue(node, "line"); if (line > -1) { message.addField("line", line); } // Level is set by server if not specified by client. final int level = intValue(node, "level"); if (level > -1) { message.addField("level", level); } // Facility is set by server if not specified by client. final String facility = stringValue(node, "facility"); if (facility != null && !facility.isEmpty()) { message.addField("facility", facility); } // Add additional data if there is some. final Iterator<Map.Entry<String, JsonNode>> fields = node.fields(); while (fields.hasNext()) { final Map.Entry<String, JsonNode> entry = fields.next(); String key = entry.getKey(); // Do not index useless GELF "version" field. if ("version".equals(key)) { continue; } // Don't include GELF syntax underscore in message field key. if (key.startsWith("_") && key.length() > 1) { key = key.substring(1); } // We already set short_message and host as message and source. Do not add as fields again. if ("short_message".equals(key) || "host".equals(key)) { continue; } // Skip standard or already set fields. if (message.getField(key) != null || Message.RESERVED_FIELDS.contains(key) && !Message.RESERVED_SETTABLE_FIELDS.contains(key)) { continue; } // Convert JSON containers to Strings, and pick a suitable number representation. final JsonNode value = entry.getValue(); final Object fieldValue; if (value.isContainerNode()) { fieldValue = value.toString(); } else if (value.isFloatingPointNumber()) { fieldValue = value.asDouble(); } else if (value.isIntegralNumber()) { fieldValue = value.asLong(); } else if (value.isNull()) { log.debug("Field [{}] is NULL. Skipping.", key); continue; } else if (value.isTextual()) { fieldValue = value.asText(); } else { log.debug("Field [{}] has unknown value type. Skipping.", key); continue; } message.addField(key, fieldValue); } return message; }
@Test public void decodeFailsWithWrongTypeForShortMessage() throws Exception { final String json = "{" + "\"version\": \"1.1\"," + "\"host\": \"example.org\"," + "\"short_message\": 42" + "}"; final RawMessage rawMessage = new RawMessage(json.getBytes(StandardCharsets.UTF_8)); assertThatIllegalArgumentException().isThrownBy(() -> codec.decode(rawMessage)) .withNoCause() .withMessageMatching("GELF message <[0-9a-f-]+> has invalid \"short_message\": 42"); }
@Override public SubClusterId getHomeSubcluster( ApplicationSubmissionContext appSubmissionContext, List<SubClusterId> blackListSubClusters) throws YarnException { // null checks and default-queue behavior validate(appSubmissionContext); List<ResourceRequest> rrList = appSubmissionContext.getAMContainerResourceRequests(); // Fast path for FailForward to WeightedRandomRouterPolicy if (rrList == null || rrList.isEmpty() || (rrList.size() == 1 && ResourceRequest.isAnyLocation(rrList.get(0).getResourceName()))) { return super.getHomeSubcluster(appSubmissionContext, blackListSubClusters); } if (rrList.size() != 3) { throw new FederationPolicyException( "Invalid number of resource requests: " + rrList.size()); } Map<SubClusterId, SubClusterInfo> activeSubClusters = getActiveSubclusters(); Set<SubClusterId> validSubClusters = activeSubClusters.keySet(); FederationPolicyUtils.validateSubClusterAvailability(activeSubClusters.keySet(), blackListSubClusters); if (blackListSubClusters != null) { // Remove from the active SubClusters from StateStore the blacklisted ones validSubClusters.removeAll(blackListSubClusters); } try { // With three requests, this has been processed by the // ResourceRequestInterceptorREST, and should have // node, rack, and any SubClusterId targetId = null; ResourceRequest nodeRequest = null; ResourceRequest rackRequest = null; ResourceRequest anyRequest = null; for (ResourceRequest rr : rrList) { // Handle "node" requests try { targetId = resolver.getSubClusterForNode(rr.getResourceName()); nodeRequest = rr; } catch (YarnException e) { LOG.error("Cannot resolve node : {}.", e.getMessage()); } // Handle "rack" requests try { resolver.getSubClustersForRack(rr.getResourceName()); rackRequest = rr; } catch (YarnException e) { LOG.error("Cannot resolve rack : {}.", e.getMessage()); } // Handle "ANY" requests if (ResourceRequest.isAnyLocation(rr.getResourceName())) { anyRequest = rr; continue; } } if (nodeRequest == null) { throw new YarnException("Missing node request."); } if (rackRequest == null) { throw new YarnException("Missing rack request."); } if (anyRequest == null) { throw new YarnException("Missing any request."); } LOG.info("Node request: {} , Rack request: {} , Any request: {}.", nodeRequest.getResourceName(), rackRequest.getResourceName(), anyRequest.getResourceName()); // Handle "node" requests if (validSubClusters.contains(targetId) && enabledSCs .contains(targetId)) { LOG.info("Node {} is in SubCluster: {}.", nodeRequest.getResourceName(), targetId); return targetId; } else { throw new YarnException("The node " + nodeRequest.getResourceName() + " is in a blacklist SubCluster or not active. "); } } catch (YarnException e) { LOG.error("Validating resource requests failed, " + "Falling back to WeightedRandomRouterPolicy placement : {}.", e.getMessage()); // FailForward to WeightedRandomRouterPolicy // Overwrite request to use a default ANY ResourceRequest amReq = Records.newRecord(ResourceRequest.class); amReq.setPriority(appSubmissionContext.getPriority()); amReq.setResourceName(ResourceRequest.ANY); amReq.setCapability(appSubmissionContext.getResource()); amReq.setNumContainers(1); amReq.setRelaxLocality(true); amReq.setNodeLabelExpression(appSubmissionContext.getNodeLabelExpression()); amReq.setExecutionTypeRequest(ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED)); appSubmissionContext.setAMContainerResourceRequests(Collections.singletonList(amReq)); return super.getHomeSubcluster(appSubmissionContext, blackListSubClusters); } }
@Test public void testNodeNotExists() throws YarnException { List<ResourceRequest> requests = new ArrayList<ResourceRequest>(); boolean relaxLocality = true; requests.add(ResourceRequest .newInstance(Priority.UNDEFINED, "node5", Resource.newInstance(10, 1), 1, relaxLocality)); requests.add(ResourceRequest .newInstance(Priority.UNDEFINED, "rack1", Resource.newInstance(10, 1), 1)); requests.add(ResourceRequest .newInstance(Priority.UNDEFINED, ResourceRequest.ANY, Resource.newInstance(10, 1), 1)); ApplicationSubmissionContext asc = ApplicationSubmissionContext .newInstance(null, null, null, null, null, false, false, 0, Resources.none(), null, false, null, null); asc.setAMContainerResourceRequests(requests); try { ((FederationRouterPolicy) getPolicy()).getHomeSubcluster(asc, null); } catch (FederationPolicyException e) { Assert.fail(); } }
@Override public List<SnowflakeIdentifier> listDatabases() { List<SnowflakeIdentifier> databases; try { databases = connectionPool.run( conn -> queryHarness.query( conn, "SHOW DATABASES IN ACCOUNT", DATABASE_RESULT_SET_HANDLER)); } catch (SQLException e) { throw snowflakeExceptionToIcebergException( SnowflakeIdentifier.ofRoot(), e, "Failed to list databases"); } catch (InterruptedException e) { throw new UncheckedInterruptedException(e, "Interrupted while listing databases"); } databases.forEach( db -> Preconditions.checkState( db.type() == SnowflakeIdentifier.Type.DATABASE, "Expected DATABASE, got identifier '%s'", db)); return databases; }
@SuppressWarnings("unchecked") @Test public void testListDatabasesInAccount() throws SQLException { when(mockResultSet.next()).thenReturn(true).thenReturn(true).thenReturn(true).thenReturn(false); when(mockResultSet.getString("name")).thenReturn("DB_1").thenReturn("DB_2").thenReturn("DB_3"); List<SnowflakeIdentifier> actualList = snowflakeClient.listDatabases(); verify(mockQueryHarness) .query( eq(mockConnection), eq("SHOW DATABASES IN ACCOUNT"), any(JdbcSnowflakeClient.ResultSetParser.class)); assertThat(actualList) .containsExactly( SnowflakeIdentifier.ofDatabase("DB_1"), SnowflakeIdentifier.ofDatabase("DB_2"), SnowflakeIdentifier.ofDatabase("DB_3")); }
@Override public Method getMethod() { return Method.POST; }
@Test public void post_is_post() { PostRequest request = new PostRequest("api/issues/search"); assertThat(request.getMethod()).isEqualTo(WsRequest.Method.POST); }
@Nonnull public static Map<Integer, Accumulator> getAccumulators(QueryCacheContext context, String mapName, String cacheId) { PartitionAccumulatorRegistry partitionAccumulatorRegistry = getAccumulatorRegistryOrNull(context, mapName, cacheId); if (partitionAccumulatorRegistry == null) { return Collections.emptyMap(); } return partitionAccumulatorRegistry.getAll(); }
@Test public void getAccumulators_whenNoAccumulatorsRegistered_thenReturnEmptyMap() { Map<Integer, Accumulator> accumulators = getAccumulators(context, "myMap", "myCache"); assertNotNull(accumulators); assertEquals(0, accumulators.size()); }
public <T> boolean parse(Handler<T> handler, T target, CharSequence input) { if (input == null) throw new NullPointerException("input == null"); return parse(handler, target, input, 0, input.length()); }
@Test void toleratesButIgnores_emptyMembers() { for (String w : Arrays.asList(" ", "\t")) { entrySplitter.parse(parseIntoMap, map, ","); entrySplitter.parse(parseIntoMap, map, w + ","); entrySplitter.parse(parseIntoMap, map, "," + w); entrySplitter.parse(parseIntoMap, map, ",,"); entrySplitter.parse(parseIntoMap, map, "," + w + ","); entrySplitter.parse(parseIntoMap, map, w + "," + w + "," + w); } assertThat(map.isEmpty()); }
public void delete(DeletionTask deletionTask) { if (debugDelay != -1) { LOG.debug("Scheduling DeletionTask (delay {}) : {}", debugDelay, deletionTask); recordDeletionTaskInStateStore(deletionTask); sched.schedule(deletionTask, debugDelay, TimeUnit.SECONDS); } }
@Test public void testAbsDelete() throws Exception { Random r = new Random(); long seed = r.nextLong(); r.setSeed(seed); System.out.println("SEED: " + seed); List<Path> dirs = buildDirs(r, base, 20); createDirs(new Path("."), dirs); FakeDefaultContainerExecutor exec = new FakeDefaultContainerExecutor(); Configuration conf = new Configuration(); exec.setConf(conf); DeletionService del = new DeletionService(exec); del.init(conf); del.start(); try { for (Path p : dirs) { FileDeletionTask deletionTask = new FileDeletionTask(del, (Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo", p, null); del.delete(deletionTask); } int msecToWait = 20 * 1000; for (Path p : dirs) { while (msecToWait > 0 && lfs.util().exists(p)) { Thread.sleep(100); msecToWait -= 100; } assertFalse(lfs.util().exists(p)); } } finally { del.stop(); } }
@Override public ImportResult importItem( UUID jobId, IdempotentImportExecutor idempotentImportExecutor, TokensAndUrlAuthData authData, MusicContainerResource data) throws Exception { if (data == null) { // Nothing to do return ImportResult.OK; } // Update playlists for (MusicPlaylist playlist : data.getPlaylists()) { idempotentImportExecutor.executeAndSwallowIOExceptions( playlist.getId(), playlist.getTitle(), () -> importSinglePlaylist(jobId, authData, playlist)); } // Create playlistItems importPlaylistItems(data.getPlaylistItems(), idempotentImportExecutor, jobId, authData); // TODO: create tracks // TODO: create releases return ImportResult.OK; }
@Test public void importPlaylist() throws Exception { // Set up MusicPlaylist playlist = new MusicPlaylist("p1_id", "p1_title", null, null, null); ImmutableList<MusicPlaylist> playlists = ImmutableList.of(playlist); MusicContainerResource data = new MusicContainerResource(playlists, null, null, null); GooglePlaylist responsePlaylist = new GooglePlaylist(); responsePlaylist.setTitle("p1_title"); when(googleMusicHttpApi.importPlaylist(any(GooglePlaylist.class), any(String.class))) .thenReturn(responsePlaylist); // Run test ImportResult importResult = googleMusicImporter.importItem(uuid, executor, null, data); // Check results ArgumentCaptor<GooglePlaylist> playlistArgumentCaptor = ArgumentCaptor.forClass(GooglePlaylist.class); ArgumentCaptor<String> playlistIdArgumentCaptor = ArgumentCaptor.forClass(String.class); verify(googleMusicHttpApi) .importPlaylist(playlistArgumentCaptor.capture(), playlistIdArgumentCaptor.capture()); assertEquals("p1_title", playlistArgumentCaptor.getValue().getTitle()); assertEquals("p1_id", playlistIdArgumentCaptor.getValue()); assertTrue(executor.isKeyCached("p1_id")); assertEquals(importResult, ImportResult.OK); }
@Override public List<ImportValidationFeedback> verifyRule( Object subject ) { List<ImportValidationFeedback> feedback = new ArrayList<>(); if ( !isEnabled() || !( subject instanceof JobMeta ) ) { return feedback; } JobMeta jobMeta = (JobMeta) subject; String description = jobMeta.getDescription(); if ( null != description && minLength <= description.length() ) { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.APPROVAL, "A description is present" ) ); } else { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.ERROR, "A description is not present or too short" ) ); } return feedback; }
@Test public void testVerifyRule_NotJobMetaParameter_EnabledRule() { JobHasDescriptionImportRule importRule = getImportRule( 10, true ); List<ImportValidationFeedback> feedbackList = importRule.verifyRule( "" ); assertNotNull( feedbackList ); assertTrue( feedbackList.isEmpty() ); }
public CompletableFuture<List<BatchAckResult>> batchAckMessage( ProxyContext ctx, List<ReceiptHandleMessage> handleMessageList, String consumerGroup, String topic, long timeoutMillis ) { CompletableFuture<List<BatchAckResult>> future = new CompletableFuture<>(); try { List<BatchAckResult> batchAckResultList = new ArrayList<>(handleMessageList.size()); Map<String, List<ReceiptHandleMessage>> brokerHandleListMap = new HashMap<>(); for (ReceiptHandleMessage handleMessage : handleMessageList) { if (handleMessage.getReceiptHandle().isExpired()) { batchAckResultList.add(new BatchAckResult(handleMessage, EXPIRED_HANDLE_PROXY_EXCEPTION)); continue; } List<ReceiptHandleMessage> brokerHandleList = brokerHandleListMap.computeIfAbsent(handleMessage.getReceiptHandle().getBrokerName(), key -> new ArrayList<>()); brokerHandleList.add(handleMessage); } if (brokerHandleListMap.isEmpty()) { return FutureUtils.addExecutor(CompletableFuture.completedFuture(batchAckResultList), this.executor); } Set<Map.Entry<String, List<ReceiptHandleMessage>>> brokerHandleListMapEntrySet = brokerHandleListMap.entrySet(); CompletableFuture<List<BatchAckResult>>[] futures = new CompletableFuture[brokerHandleListMapEntrySet.size()]; int futureIndex = 0; for (Map.Entry<String, List<ReceiptHandleMessage>> entry : brokerHandleListMapEntrySet) { futures[futureIndex++] = processBrokerHandle(ctx, consumerGroup, topic, entry.getValue(), timeoutMillis); } CompletableFuture.allOf(futures).whenComplete((val, throwable) -> { if (throwable != null) { future.completeExceptionally(throwable); } for (CompletableFuture<List<BatchAckResult>> resultFuture : futures) { batchAckResultList.addAll(resultFuture.join()); } future.complete(batchAckResultList); }); } catch (Throwable t) { future.completeExceptionally(t); } return FutureUtils.addExecutor(future, this.executor); }
@Test public void testBatchAckMessage() throws Throwable { String brokerName1 = "brokerName1"; String brokerName2 = "brokerName2"; String errThrowBrokerName = "errThrowBrokerName"; MessageExt expireMessage = createMessageExt(TOPIC, "", 0, 3000, System.currentTimeMillis() - 10000, 0, 0, 0, 0, brokerName1); ReceiptHandle expireHandle = create(expireMessage); List<ReceiptHandleMessage> receiptHandleMessageList = new ArrayList<>(); receiptHandleMessageList.add(new ReceiptHandleMessage(expireHandle, expireMessage.getMsgId())); List<String> broker1Msg = new ArrayList<>(); List<String> broker2Msg = new ArrayList<>(); long now = System.currentTimeMillis(); int msgNum = 3; for (int i = 0; i < msgNum; i++) { MessageExt brokerMessage = createMessageExt(TOPIC, "", 0, 3000, now, 0, 0, 0, i + 1, brokerName1); ReceiptHandle brokerHandle = create(brokerMessage); receiptHandleMessageList.add(new ReceiptHandleMessage(brokerHandle, brokerMessage.getMsgId())); broker1Msg.add(brokerMessage.getMsgId()); } for (int i = 0; i < msgNum; i++) { MessageExt brokerMessage = createMessageExt(TOPIC, "", 0, 3000, now, 0, 0, 0, i + 1, brokerName2); ReceiptHandle brokerHandle = create(brokerMessage); receiptHandleMessageList.add(new ReceiptHandleMessage(brokerHandle, brokerMessage.getMsgId())); broker2Msg.add(brokerMessage.getMsgId()); } // for this message, will throw exception in batchAckMessage MessageExt errThrowMessage = createMessageExt(TOPIC, "", 0, 3000, now, 0, 0, 0, 0, errThrowBrokerName); ReceiptHandle errThrowHandle = create(errThrowMessage); receiptHandleMessageList.add(new ReceiptHandleMessage(errThrowHandle, errThrowMessage.getMsgId())); Collections.shuffle(receiptHandleMessageList); doAnswer((Answer<CompletableFuture<AckResult>>) invocation -> { List<ReceiptHandleMessage> handleMessageList = invocation.getArgument(1, List.class); AckResult ackResult = new AckResult(); String brokerName = handleMessageList.get(0).getReceiptHandle().getBrokerName(); if (brokerName.equals(brokerName1)) { ackResult.setStatus(AckStatus.OK); } else if (brokerName.equals(brokerName2)) { ackResult.setStatus(AckStatus.NO_EXIST); } else { return FutureUtils.completeExceptionally(new RuntimeException()); } return CompletableFuture.completedFuture(ackResult); }).when(this.messageService).batchAckMessage(any(), anyList(), anyString(), anyString(), anyLong()); List<BatchAckResult> batchAckResultList = this.consumerProcessor.batchAckMessage(createContext(), receiptHandleMessageList, CONSUMER_GROUP, TOPIC, 3000).get(); assertEquals(receiptHandleMessageList.size(), batchAckResultList.size()); // check ackResult for each msg Map<String, BatchAckResult> msgBatchAckResult = new HashMap<>(); for (BatchAckResult batchAckResult : batchAckResultList) { msgBatchAckResult.put(batchAckResult.getReceiptHandleMessage().getMessageId(), batchAckResult); } for (String msgId : broker1Msg) { assertEquals(AckStatus.OK, msgBatchAckResult.get(msgId).getAckResult().getStatus()); assertNull(msgBatchAckResult.get(msgId).getProxyException()); } for (String msgId : broker2Msg) { assertEquals(AckStatus.NO_EXIST, msgBatchAckResult.get(msgId).getAckResult().getStatus()); assertNull(msgBatchAckResult.get(msgId).getProxyException()); } assertNotNull(msgBatchAckResult.get(expireMessage.getMsgId()).getProxyException()); assertEquals(ProxyExceptionCode.INVALID_RECEIPT_HANDLE, msgBatchAckResult.get(expireMessage.getMsgId()).getProxyException().getCode()); assertNull(msgBatchAckResult.get(expireMessage.getMsgId()).getAckResult()); assertNotNull(msgBatchAckResult.get(errThrowMessage.getMsgId()).getProxyException()); assertEquals(ProxyExceptionCode.INTERNAL_SERVER_ERROR, msgBatchAckResult.get(errThrowMessage.getMsgId()).getProxyException().getCode()); assertNull(msgBatchAckResult.get(errThrowMessage.getMsgId()).getAckResult()); }
@Override public DirectPipelineResult run(Pipeline pipeline) { try { options = MAPPER .readValue(MAPPER.writeValueAsBytes(options), PipelineOptions.class) .as(DirectOptions.class); } catch (IOException e) { throw new IllegalArgumentException( "PipelineOptions specified failed to serialize to JSON.", e); } performRewrites(pipeline); MetricsEnvironment.setMetricsSupported(true); try { DirectGraphVisitor graphVisitor = new DirectGraphVisitor(); pipeline.traverseTopologically(graphVisitor); @SuppressWarnings("rawtypes") KeyedPValueTrackingVisitor keyedPValueVisitor = KeyedPValueTrackingVisitor.create(); pipeline.traverseTopologically(keyedPValueVisitor); DisplayDataValidator.validatePipeline(pipeline); DisplayDataValidator.validateOptions(options); ExecutorService metricsPool = Executors.newCachedThreadPool( new ThreadFactoryBuilder() .setThreadFactory(MoreExecutors.platformThreadFactory()) .setDaemon(false) // otherwise you say you want to leak, please don't! .setNameFormat("direct-metrics-counter-committer") .build()); DirectGraph graph = graphVisitor.getGraph(); EvaluationContext context = EvaluationContext.create( clockSupplier.get(), Enforcement.bundleFactoryFor(enabledEnforcements, graph), graph, keyedPValueVisitor.getKeyedPValues(), metricsPool); TransformEvaluatorRegistry registry = TransformEvaluatorRegistry.javaSdkNativeRegistry(context, options); PipelineExecutor executor = ExecutorServiceParallelExecutor.create( options.getTargetParallelism(), registry, Enforcement.defaultModelEnforcements(enabledEnforcements), context, metricsPool); executor.start(graph, RootProviderRegistry.javaNativeRegistry(context, options)); DirectPipelineResult result = new DirectPipelineResult(executor, context); if (options.isBlockOnRun()) { try { result.waitUntilFinish(); } catch (UserCodeException userException) { throw new PipelineExecutionException(userException.getCause()); } catch (Throwable t) { if (t instanceof RuntimeException) { throw (RuntimeException) t; } throw new RuntimeException(t); } } return result; } finally { MetricsEnvironment.setMetricsSupported(false); } }
@Test public void testMutatingInputCoderDoFnError() throws Exception { Pipeline pipeline = getPipeline(); pipeline .apply(Create.of(new byte[] {0x1, 0x2, 0x3}, new byte[] {0x4, 0x5, 0x6})) .apply( ParDo.of( new DoFn<byte[], Integer>() { @ProcessElement public void processElement(ProcessContext c) { byte[] inputArray = c.element(); inputArray[0] = 0xa; c.output(13); } })); thrown.expect(IllegalMutationException.class); thrown.expectMessage("Input"); thrown.expectMessage("must not be mutated"); pipeline.run(); }
@Override public MavenArtifact searchSha1(String sha1) throws IOException { if (null == sha1 || !sha1.matches("^[0-9A-Fa-f]{40}$")) { throw new IllegalArgumentException("Invalid SHA1 format"); } final List<MavenArtifact> collectedMatchingArtifacts = new ArrayList<>(1); String continuationToken = retrievePageAndAddMatchingArtifact(collectedMatchingArtifacts, sha1, null); while (continuationToken != null && collectedMatchingArtifacts.isEmpty()) { continuationToken = retrievePageAndAddMatchingArtifact(collectedMatchingArtifacts, sha1, continuationToken); } if (collectedMatchingArtifacts.isEmpty()) { throw new FileNotFoundException("Artifact not found in Nexus"); } else { return collectedMatchingArtifacts.get(0); } }
@Test(expected = FileNotFoundException.class) @Ignore public void testMissingSha1() throws Exception { searcher.searchSha1("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"); }
public void processVerstrekkingAanAfnemer(VerstrekkingAanAfnemer verstrekkingAanAfnemer){ if (logger.isDebugEnabled()) logger.debug("Processing verstrekkingAanAfnemer: {}", marshallElement(verstrekkingAanAfnemer)); Afnemersbericht afnemersbericht = afnemersberichtRepository.findByOnzeReferentie(verstrekkingAanAfnemer.getReferentieId()); if(mismatch(verstrekkingAanAfnemer, afnemersbericht)){ digidXClient.remoteLogBericht(Log.NO_RELATION_TO_SENT_MESSAGE, verstrekkingAanAfnemer, afnemersbericht); return; } switch (verstrekkingAanAfnemer.getGebeurtenissoort().getNaam()) { case "Null" -> { logger.info("Start processing Null message"); dglResponseService.processNullMessage(verstrekkingAanAfnemer.getGebeurtenisinhoud().getNull(), afnemersbericht); digidXClient.remoteLogWithoutRelatingToAccount(Log.MESSAGE_PROCESSED, "Null"); } case "Ag01" -> { logger.info("Start processing Ag01 message"); dglResponseService.processAg01(verstrekkingAanAfnemer.getGebeurtenisinhoud().getAg01(), afnemersbericht); digidXClient.remoteLogBericht(Log.MESSAGE_PROCESSED, verstrekkingAanAfnemer, afnemersbericht); } case "Ag31" -> { logger.info("Start processing Ag31 message"); dglResponseService.processAg31(verstrekkingAanAfnemer.getGebeurtenisinhoud().getAg31(), afnemersbericht); digidXClient.remoteLogBericht(Log.MESSAGE_PROCESSED, verstrekkingAanAfnemer, afnemersbericht); } case "Af01" -> { logger.info("Start processing Af01 message"); dglResponseService.processAf01(verstrekkingAanAfnemer.getGebeurtenisinhoud().getAf01(), afnemersbericht); digidXClient.remoteLogBericht(Log.MESSAGE_PROCESSED, verstrekkingAanAfnemer, afnemersbericht); } case "Af11" -> { logger.info("Start processing Af11 message"); dglResponseService.processAf11(verstrekkingAanAfnemer.getGebeurtenisinhoud().getAf11(), afnemersbericht); digidXClient.remoteLogWithoutRelatingToAccount(Log.MESSAGE_PROCESSED, "Af11"); } case "Gv01" -> { logger.info("Start processing Gv01 message"); Gv01 gv01 = verstrekkingAanAfnemer.getGebeurtenisinhoud().getGv01(); dglResponseService.processGv01(gv01); String bsn = CategorieUtil.findBsnOudeWaarde(gv01.getCategorie()); if (bsn == null) { bsn = CategorieUtil.findBsn(gv01.getCategorie()); } digidXClient.remoteLogSpontaneVerstrekking(Log.MESSAGE_PROCESSED, "Gv01", gv01.getANummer(), bsn); } case "Ng01" -> { logger.info("Start processing Ng01 message"); Ng01 ng01 = verstrekkingAanAfnemer.getGebeurtenisinhoud().getNg01(); dglResponseService.processNg01(ng01); digidXClient.remoteLogSpontaneVerstrekking(Log.MESSAGE_PROCESSED, "Ng01", CategorieUtil.findANummer(ng01.getCategorie()), ""); } case "Wa11" -> { logger.info("Start processing Wa11 message"); dglResponseService.processWa11(verstrekkingAanAfnemer.getGebeurtenisinhoud().getWa11()); } } }
@Test public void testProcessAg31(){ String testBsn = "SSSSSSSSS"; Ag31 testAg31 = TestDglMessagesUtil.createTestAg31(testBsn,"O", "SSSSSSSS"); VerstrekkingInhoudType inhoudType = new VerstrekkingInhoudType(); inhoudType.setAg31(testAg31); GeversioneerdType type = new GeversioneerdType(); type.setNaam("Ag31"); when(verstrekkingAanAfnemer.getReferentieId()).thenReturn("referentieId"); when(afnemersberichtRepository.findByOnzeReferentie("referentieId")).thenReturn(afnemersbericht); when(verstrekkingAanAfnemer.getGebeurtenissoort()).thenReturn(type); when(verstrekkingAanAfnemer.getGebeurtenisinhoud()).thenReturn(inhoudType); classUnderTest.processVerstrekkingAanAfnemer(verstrekkingAanAfnemer); verify(dglResponseService, times(1)).processAg31(testAg31, afnemersbericht); }
@Override @SuppressWarnings({ "rawtypes" }) synchronized public Value put(Transaction tx, Key key, Value value) throws IOException { Value oldValue = null; if (lastGetNodeCache != null && tx.equals(lastCacheTxSrc.get())) { if(lastGetEntryCache.getKey().equals(key)) { oldValue = lastGetEntryCache.setValue(value); lastGetEntryCache.setValue(value); lastGetNodeCache.storeUpdate(tx); flushCache(); return oldValue; } // This searches from the last location of a call to get for the element to replace // all the way to the end of the ListIndex. Iterator<Map.Entry<Key, Value>> iterator = lastGetNodeCache.iterator(tx); while (iterator.hasNext()) { Map.Entry<Key, Value> entry = iterator.next(); if (entry.getKey().equals(key)) { oldValue = entry.setValue(value); ((ListIterator) iterator).getCurrent().storeUpdate(tx); flushCache(); return oldValue; } } } else { flushCache(); } // Not found because the cache wasn't set or its not at the end of the list so we // start from the beginning and go to the cached location or the end, then we do // an add if its not found. Iterator<Map.Entry<Key, Value>> iterator = iterator(tx); while (iterator.hasNext() && ((ListIterator) iterator).getCurrent() != lastGetNodeCache) { Map.Entry<Key, Value> entry = iterator.next(); if (entry.getKey().equals(key)) { oldValue = entry.setValue(value); ((ListIterator) iterator).getCurrent().storeUpdate(tx); flushCache(); return oldValue; } } // Not found so add it last. flushCache(); return add(tx, key, value); }
@Test(timeout=60000) public void testPut() throws Exception { createPageFileAndIndex(100); ListIndex<String, Long> listIndex = ((ListIndex<String, Long>) this.index); this.index.load(tx); tx.commit(); int count = 30; tx = pf.tx(); doInsert(count); tx.commit(); assertEquals("correct size", count, listIndex.size()); tx = pf.tx(); Long value = listIndex.get(tx, key(10)); assertNotNull(value); listIndex.put(tx, key(10), 1024L); tx.commit(); tx = pf.tx(); value = listIndex.get(tx, key(10)); assertEquals(1024L, value.longValue()); assertTrue(listIndex.size() == 30); tx.commit(); tx = pf.tx(); value = listIndex.put(tx, key(31), 2048L); assertNull(value); assertTrue(listIndex.size() == 31); tx.commit(); }
public static StructType groupingKeyType(Schema schema, Collection<PartitionSpec> specs) { return buildPartitionProjectionType("grouping key", specs, commonActiveFieldIds(schema, specs)); }
@Test public void testGroupingKeyTypeWithAddingBackSamePartitionFieldInV2Table() { TestTables.TestTable table = TestTables.create(tableDir, "test", SCHEMA, BY_CATEGORY_DATA_SPEC, V2_FORMAT_VERSION); table.updateSpec().removeField("data").commit(); table.updateSpec().addField("data").commit(); StructType expectedType = StructType.of(NestedField.optional(1000, "category", Types.StringType.get())); StructType actualType = Partitioning.groupingKeyType(table.schema(), table.specs().values()); assertThat(actualType).isEqualTo(expectedType); }
@Override public <T extends State> T state(StateNamespace namespace, StateTag<T> address) { return workItemState.get(namespace, address, StateContexts.nullContext()); }
@Test public void testOrderedListInterleavedLocalAddClearReadRange() { Future<Map<Range<Instant>, RangeSet<Long>>> orderedListFuture = Futures.immediateFuture(null); Future<Map<Range<Instant>, RangeSet<Instant>>> deletionsFuture = Futures.immediateFuture(null); when(mockReader.valueFuture( systemKey(NAMESPACE, "orderedList" + IdTracker.IDS_AVAILABLE_STR), STATE_FAMILY, IdTracker.IDS_AVAILABLE_CODER)) .thenReturn(orderedListFuture); when(mockReader.valueFuture( systemKey(NAMESPACE, "orderedList" + IdTracker.DELETIONS_STR), STATE_FAMILY, IdTracker.SUBRANGE_DELETIONS_CODER)) .thenReturn(deletionsFuture); SettableFuture<Iterable<TimestampedValue<String>>> fromStorage = SettableFuture.create(); Range<Long> readSubrange = Range.closedOpen(1 * 1000000L, 8 * 1000000L); when(mockReader.orderedListFuture( readSubrange, key(NAMESPACE, "orderedList"), STATE_FAMILY, StringUtf8Coder.of())) .thenReturn(fromStorage); StateTag<OrderedListState<String>> addr = StateTags.orderedList("orderedList", StringUtf8Coder.of()); OrderedListState<String> orderedListState = underTest.state(NAMESPACE, addr); orderedListState.add(TimestampedValue.of("1", Instant.ofEpochSecond(1))); orderedListState.add(TimestampedValue.of("2", Instant.ofEpochSecond(2))); orderedListState.add(TimestampedValue.of("3", Instant.ofEpochSecond(3))); orderedListState.add(TimestampedValue.of("4", Instant.ofEpochSecond(4))); orderedListState.clearRange(Instant.ofEpochSecond(1), Instant.ofEpochSecond(4)); orderedListState.add(TimestampedValue.of("5", Instant.ofEpochSecond(5))); orderedListState.add(TimestampedValue.of("6", Instant.ofEpochSecond(6))); orderedListState.add(TimestampedValue.of("3_again", Instant.ofEpochSecond(3))); orderedListState.add(TimestampedValue.of("7", Instant.ofEpochSecond(7))); orderedListState.add(TimestampedValue.of("8", Instant.ofEpochSecond(8))); fromStorage.set(ImmutableList.<TimestampedValue<String>>of()); TimestampedValue[] expected = Iterables.toArray( ImmutableList.of( TimestampedValue.of("3_again", Instant.ofEpochSecond(3)), TimestampedValue.of("4", Instant.ofEpochSecond(4)), TimestampedValue.of("5", Instant.ofEpochSecond(5)), TimestampedValue.of("6", Instant.ofEpochSecond(6)), TimestampedValue.of("7", Instant.ofEpochSecond(7))), TimestampedValue.class); TimestampedValue[] read = Iterables.toArray( orderedListState.readRange(Instant.ofEpochSecond(1), Instant.ofEpochSecond(8)), TimestampedValue.class); assertArrayEquals(expected, read); }
public void delete( String name ) { initialize(); delete( metaStoreSupplier.get(), name, true ); }
@Test public void testDelete() { addOne(); connectionManager.delete( CONNECTION_NAME ); Assert.assertNull( connectionManager.getConnectionDetails( TestConnectionWithBucketsProvider.SCHEME, CONNECTION_NAME ) ); }
public int poll(final long now) { int expiredTimers = 0; final Timer[] timers = this.timers; final TimerHandler timerHandler = this.timerHandler; while (size > 0 && expiredTimers < POLL_LIMIT) { final Timer timer = timers[0]; if (timer.deadline > now) { break; } if (!timerHandler.onTimerEvent(timer.correlationId)) { break; } expiredTimers++; final int lastIndex = --size; final Timer lastTimer = timers[lastIndex]; timers[lastIndex] = null; if (0 != lastIndex) { shiftDown(timers, lastIndex, 0, lastTimer); } timerByCorrelationId.remove(timer.correlationId); addToFreeList(timer); } return expiredTimers; }
@Test void pollIsANoOpWhenNoTimersWhereScheduled() { final TimerHandler timerHandler = mock(TimerHandler.class); final PriorityHeapTimerService timerService = new PriorityHeapTimerService(timerHandler); assertEquals(0, timerService.poll(Long.MIN_VALUE)); verifyNoInteractions(timerHandler); }
public CruiseConfig deserializeConfig(String content) throws Exception { String md5 = md5Hex(content); Element element = parseInputStream(new ByteArrayInputStream(content.getBytes())); LOGGER.debug("[Config Save] Updating config cache with new XML"); CruiseConfig configForEdit = classParser(element, BasicCruiseConfig.class, configCache, new GoCipher(), registry, new ConfigReferenceElements()).parse(); setMd5(configForEdit, md5); configForEdit.setOrigins(new FileConfigOrigin()); return configForEdit; }
@Test void shouldThrowExceptionWhenCommandIsEmpty() { String jobWithCommand = """ <job name="functional"> <tasks> <exec command="" arguments="" /> </tasks> </job> """; String configWithInvalidCommand = withCommand(jobWithCommand); assertThatThrownBy(() -> xmlLoader.deserializeConfig(configWithInvalidCommand)) .as("Should not allow empty command") .hasMessageContaining("Command is invalid. \"\" should conform to the pattern - \\S(.*\\S)?"); }
public static InMemoryJobService create( GrpcFnServer<ArtifactStagingService> stagingService, Function<String, String> stagingServiceTokenProvider, ThrowingConsumer<Exception, String> cleanupJobFn, JobInvoker invoker) { return new InMemoryJobService( stagingService, stagingServiceTokenProvider, cleanupJobFn, invoker, DEFAULT_MAX_INVOCATION_HISTORY); }
@Test public void testInvocationCleanup() { final int maxInvocationHistory = 3; service = InMemoryJobService.create( stagingServer, session -> "token", null, invoker, maxInvocationHistory); assertThat(getNumberOfInvocations(), is(0)); Job job1 = runJob(); assertThat(getNumberOfInvocations(), is(1)); Job job2 = runJob(); assertThat(getNumberOfInvocations(), is(2)); Job job3 = runJob(); assertThat(getNumberOfInvocations(), is(maxInvocationHistory)); // All running invocations must be available and never be discarded // even if they exceed the max history size Job job4 = runJob(); assertThat(getNumberOfInvocations(), is(maxInvocationHistory + 1)); // We need to have more than maxInvocationHistory completed jobs for the cleanup to trigger job1.finish(); assertThat(getNumberOfInvocations(), is(maxInvocationHistory + 1)); job2.finish(); assertThat(getNumberOfInvocations(), is(maxInvocationHistory + 1)); job3.finish(); assertThat(getNumberOfInvocations(), is(maxInvocationHistory + 1)); // The fourth finished job exceeds maxInvocationHistory and triggers the cleanup job4.finish(); assertThat(getNumberOfInvocations(), is(maxInvocationHistory)); // Run a new job after the cleanup Job job5 = runJob(); assertThat(getNumberOfInvocations(), is(maxInvocationHistory + 1)); job5.finish(); assertThat(getNumberOfInvocations(), is(maxInvocationHistory)); }
@Override public TagPosition deleteTag(int bucketIndex, int tag) { for (int slotIndex = 0; slotIndex < mTagsPerBucket; slotIndex++) { if (readTag(bucketIndex, slotIndex) == tag) { writeTag(bucketIndex, slotIndex, 0); return new TagPosition(bucketIndex, slotIndex, CuckooStatus.OK); } } return new TagPosition(-1, -1, CuckooStatus.FAILURE_KEY_NOT_FOUND); }
@Test public void deleteTagTest() { CuckooTable cuckooTable = createCuckooTable(); Random random = new Random(); for (int i = 0; i < NUM_BUCKETS; i++) { for (int j = 0; j < TAGS_PER_BUCKET; j++) { int tag = random.nextInt(0xff); cuckooTable.writeTag(i, j, tag); assertEquals(CuckooStatus.OK, cuckooTable.deleteTag(i, tag).getStatus()); } } }
@Override @Nonnull public ProgressState call() { if (receptionDone) { return collector.offerBroadcast(DONE_ITEM); } if (connectionChanged) { throw new RestartableException("The member was reconnected: " + sourceAddressString); } tracker.reset(); tracker.notDone(); tryFillInbox(); long ackItemLocal = 0; for (ObjWithPtionIdAndSize o; (o = inbox.peek()) != null; ) { final Object item = o.getItem(); if (item == DONE_ITEM) { receptionDone = true; inbox.remove(); assert inbox.peek() == null : "Found something in the queue beyond the DONE_ITEM: " + inbox.remove(); break; } ProgressState outcome = item instanceof BroadcastItem broadcastItem ? collector.offerBroadcast(broadcastItem) : collector.offer(item, o.getPartitionId()); if (!outcome.isDone()) { tracker.madeProgress(outcome.isMadeProgress()); break; } tracker.madeProgress(); inbox.remove(); ackItemLocal += o.estimatedMemoryFootprint; } ackItem(ackItemLocal); numWaitingInInbox = inbox.size(); return tracker.toProgressState(); }
@Test public void when_receiveTwoObjects_then_emitThem() throws IOException { pushObjects(1, 2); t.call(); assertEquals(asList(1, 2), collector.getBuffer()); }
static PrimitiveIterator.OfInt rangeTranslate(int from, int to, IntUnaryOperator translator) { return new IndexIterator(from, to + 1, i -> true, translator); }
@Test public void testRangeTranslate() { assertEquals(IndexIterator.rangeTranslate(11, 18, i -> i - 10), 1, 2, 3, 4, 5, 6, 7, 8); }
public static Subject.Factory<Re2jStringSubject, String> re2jString() { return Re2jStringSubject.FACTORY; }
@Test public void doesNotMatch_string_succeeds() { assertAbout(re2jString()).that("world").doesNotMatch(PATTERN_STR); }
@Override public Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) { return offsetsForTimes(timestampsToSearch, Duration.ofMillis(defaultApiTimeoutMs)); }
@Test public void testOffsetsForTimesTimeoutException() { consumer = newConsumer(); long timeout = 100; doThrow(new TimeoutException("Event did not complete in time and was expired by the reaper")) .when(applicationEventHandler).addAndGet(any()); Throwable t = assertThrows( TimeoutException.class, () -> consumer.offsetsForTimes(mockTimestampToSearch(), Duration.ofMillis(timeout))); assertEquals("Failed to get offsets by times in " + timeout + "ms", t.getMessage()); }
@ApiOperation(value = "Send test email (sendTestMail)", notes = "Attempts to send test email to the System Administrator User using Mail Settings provided as a parameter. " + "You may change the 'To' email in the user profile of the System Administrator. " + SYSTEM_AUTHORITY_PARAGRAPH) @PreAuthorize("hasAuthority('SYS_ADMIN')") @RequestMapping(value = "/settings/testMail", method = RequestMethod.POST) public void sendTestMail( @Parameter(description = "A JSON value representing the Mail Settings.") @RequestBody AdminSettings adminSettings) throws ThingsboardException { accessControlService.checkPermission(getCurrentUser(), Resource.ADMIN_SETTINGS, Operation.READ); adminSettings = checkNotNull(adminSettings); if (adminSettings.getKey().equals("mail")) { if (adminSettings.getJsonValue().has("enableOauth2") && adminSettings.getJsonValue().get("enableOauth2").asBoolean()) { AdminSettings mailSettings = checkNotNull(adminSettingsService.findAdminSettingsByKey(TenantId.SYS_TENANT_ID, "mail")); JsonNode refreshToken = mailSettings.getJsonValue().get("refreshToken"); if (refreshToken == null) { throw new ThingsboardException("Refresh token was not generated. Please, generate refresh token.", ThingsboardErrorCode.GENERAL); } ObjectNode settings = (ObjectNode) adminSettings.getJsonValue(); settings.put("refreshToken", refreshToken.asText()); } else { if (!adminSettings.getJsonValue().has("password")) { AdminSettings mailSettings = checkNotNull(adminSettingsService.findAdminSettingsByKey(TenantId.SYS_TENANT_ID, "mail")); ((ObjectNode) adminSettings.getJsonValue()).put("password", mailSettings.getJsonValue().get("password").asText()); } } String email = getCurrentUser().getEmail(); mailService.sendTestMail(adminSettings.getJsonValue(), email); } }
@Test public void testSendTestMail() throws Exception { Mockito.doNothing().when(mailService).sendTestMail(any(), anyString()); loginSysAdmin(); AdminSettings adminSettings = doGet("/api/admin/settings/mail", AdminSettings.class); doPost("/api/admin/settings/testMail", adminSettings) .andExpect(status().isOk()); Mockito.verify(mailService).sendTestMail(Mockito.any(), Mockito.anyString()); }
@Override public int hashCode() { return Objects.hash( threadName, threadState, activeTasks, standbyTasks, mainConsumerClientId, restoreConsumerClientId, producerClientIds, adminClientId); }
@Test public void shouldNotBeEqualIfDifferInThreadName() { final ThreadMetadata differThreadName = new ThreadMetadataImpl( "different", THREAD_STATE, MAIN_CONSUMER_CLIENT_ID, RESTORE_CONSUMER_CLIENT_ID, PRODUCER_CLIENT_IDS, ADMIN_CLIENT_ID, ACTIVE_TASKS, STANDBY_TASKS ); assertThat(threadMetadata, not(equalTo(differThreadName))); assertThat(threadMetadata.hashCode(), not(equalTo(differThreadName.hashCode()))); }
@Override public boolean mayHaveMergesPending(String bucketSpace, int contentNodeIndex) { if (!stats.hasUpdatesFromAllDistributors()) { return true; } ContentNodeStats nodeStats = stats.getStats().getNodeStats(contentNodeIndex); if (nodeStats != null) { ContentNodeStats.BucketSpaceStats bucketSpaceStats = nodeStats.getBucketSpace(bucketSpace); return (bucketSpaceStats != null && bucketSpaceStats.mayHaveBucketsPending(minMergeCompletionRatio)); } return true; }
@Test void min_merge_completion_ratio_is_used_when_calculating_may_have_merges_pending() { // Completion ratio is (5-3)/5 = 0.4 assertTrue(Fixture.fromBucketsPending(3, 0.6).mayHaveMergesPending("default", 1)); // Completion ratio is (5-2)/5 = 0.6 assertFalse(Fixture.fromBucketsPending(2, 0.6).mayHaveMergesPending("default", 1)); }
public static UBinary create(Kind binaryOp, UExpression lhs, UExpression rhs) { checkArgument( OP_CODES.containsKey(binaryOp), "%s is not a supported binary operation", binaryOp); return new AutoValue_UBinary(binaryOp, lhs, rhs); }
@Test public void bitwiseXor() { assertUnifiesAndInlines( "4 ^ 17", UBinary.create(Kind.XOR, ULiteral.intLit(4), ULiteral.intLit(17))); }
public static <K, V> AsMap<K, V> asMap() { return new AsMap<>(false); }
@Test @Category(ValidatesRunner.class) public void testEmptyMapSideInput() throws Exception { final PCollectionView<Map<String, Integer>> view = pipeline .apply( "CreateEmptyView", Create.empty(KvCoder.of(StringUtf8Coder.of(), VarIntCoder.of()))) .apply(View.asMap()); PCollection<Integer> results = pipeline .apply("Create1", Create.of(1)) .apply( "OutputSideInputs", ParDo.of( new DoFn<Integer, Integer>() { @ProcessElement public void processElement(ProcessContext c) { assertTrue(c.sideInput(view).isEmpty()); assertTrue(c.sideInput(view).entrySet().isEmpty()); assertFalse(c.sideInput(view).entrySet().iterator().hasNext()); c.output(c.element()); } }) .withSideInputs(view)); // Pass at least one value through to guarantee that DoFn executes. PAssert.that(results).containsInAnyOrder(1); pipeline.run(); }