focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void close() throws IOException { State prevState = state; if (state == State.CLOSING) return; state = State.CLOSING; sslEngine.closeOutbound(); try { if (prevState != State.NOT_INITIALIZED && isConnected()) { if (!flush(netWriteBuffer)) { throw new IOException("Remaining data in the network buffer, can't send SSL close message."); } //prep the buffer for the close message netWriteBuffer.clear(); //perform the close, since we called sslEngine.closeOutbound SSLEngineResult wrapResult = sslEngine.wrap(ByteUtils.EMPTY_BUF, netWriteBuffer); //we should be in a close state if (wrapResult.getStatus() != SSLEngineResult.Status.CLOSED) { throw new IOException("Unexpected status returned by SSLEngine.wrap, expected CLOSED, received " + wrapResult.getStatus() + ". Will not send close message to peer."); } netWriteBuffer.flip(); flush(netWriteBuffer); } } catch (IOException ie) { log.debug("Failed to send SSL Close message", ie); } finally { try { sslEngine.closeInbound(); } catch (SSLException e) { // This log is for debugging purposes as an exception might occur frequently // at this point due to peers not following the TLS specs and failing to send a close_notify alert. // Even if they do, currently, we do not read data from the socket after invoking close(). log.debug("SSLEngine.closeInBound() raised an exception.", e); } socketChannel.socket().close(); socketChannel.close(); netReadBuffer = null; netWriteBuffer = null; appReadBuffer = null; if (fileChannelBuffer != null) { ByteBufferUnmapper.unmap("fileChannelBuffer", fileChannelBuffer); fileChannelBuffer = null; } } }
@Test public void testSSLEngineCloseInboundInvokedOnClose() throws IOException { // Given SSLEngine sslEngine = mock(SSLEngine.class); Socket socket = mock(Socket.class); SocketChannel socketChannel = mock(SocketChannel.class); SelectionKey selectionKey = mock(SelectionKey.class); when(socketChannel.socket()).thenReturn(socket); when(selectionKey.channel()).thenReturn(socketChannel); doThrow(new SSLException("Mock exception")).when(sslEngine).closeInbound(); SslTransportLayer sslTransportLayer = new SslTransportLayer( "test-channel", selectionKey, sslEngine, mock(ChannelMetadataRegistry.class) ); // When sslTransportLayer.close(); // Then verify(sslEngine, times(1)).closeOutbound(); verify(sslEngine, times(1)).closeInbound(); verifyNoMoreInteractions(sslEngine); }
@Override public void acknowledge(OutputBufferId outputBufferId, long sequenceId) { requireNonNull(outputBufferId, "bufferId is null"); partitions.get(outputBufferId.getId()).acknowledgePages(sequenceId); }
@Test public void testAcknowledge() { int partitionId = 0; PartitionedOutputBuffer buffer = createPartitionedBuffer( createInitialEmptyOutputBuffers(PARTITIONED) .withBuffer(FIRST, partitionId) .withNoMoreBufferIds(), sizeOfPages(20)); // add three items to the buffer for (int i = 0; i < 3; i++) { addPage(buffer, createPage(i), partitionId); } assertQueueState(buffer, FIRST, 3, 0); // get the three elements from the first buffer assertBufferResultEquals(TYPES, getBufferResult(buffer, FIRST, 0, sizeOfPages(10), NO_WAIT), bufferResult(0, createPage(0), createPage(1), createPage(2))); // acknowledge pages 0 and 1 acknowledgeBufferResult(buffer, FIRST, 2); // only page 2 is not removed assertQueueState(buffer, FIRST, 1, 2); // acknowledge page 2 acknowledgeBufferResult(buffer, FIRST, 3); // nothing left assertQueueState(buffer, FIRST, 0, 3); // acknowledge more pages will fail try { acknowledgeBufferResult(buffer, FIRST, 4); } catch (IllegalArgumentException e) { assertEquals(e.getMessage(), "Invalid sequence id"); } // fill the buffer for (int i = 3; i < 6; i++) { addPage(buffer, createPage(i), partitionId); } assertQueueState(buffer, FIRST, 3, 3); // getting new pages will again acknowledge the previously acknowledged pages but this is ok buffer.get(FIRST, 3, sizeOfPages(1)).cancel(true); assertQueueState(buffer, FIRST, 3, 3); }
@Override // Camel calls this method if the endpoint isSynchronous(), as the // KafkaEndpoint creates a SynchronousDelegateProducer for it public void process(Exchange exchange) throws Exception { // is the message body a list or something that contains multiple values Message message = exchange.getIn(); if (transactionId != null) { startKafkaTransaction(exchange); } if (endpoint.getConfiguration().isUseIterator() && isIterable(message.getBody())) { processIterableSync(exchange, message); } else { processSingleMessageSync(exchange, message); } }
@Test public void processSendsMessageWithTopicHeaderAndEndPoint() throws Exception { endpoint.getConfiguration().setTopic("sometopic"); Mockito.when(exchange.getIn()).thenReturn(in); Mockito.when(exchange.getMessage()).thenReturn(in); in.setHeader(KafkaConstants.PARTITION_KEY, 4); in.setHeader(KafkaConstants.TOPIC, "anotherTopic"); in.setHeader(KafkaConstants.KEY, "someKey"); producer.process(exchange); // the header is preserved assertNotNull(in.getHeader(KafkaConstants.TOPIC)); verifySendMessage(4, "sometopic", "someKey"); assertRecordMetadataExists(); }
public ConfigOperateResult insertOrUpdate(String srcIp, String srcUser, ConfigInfo configInfo, Map<String, Object> configAdvanceInfo) { try { ConfigInfoStateWrapper configInfoState = findConfigInfoState(configInfo.getDataId(), configInfo.getGroup(), configInfo.getTenant()); if (configInfoState == null) { return addConfigInfo(srcIp, srcUser, configInfo, configAdvanceInfo); } else { return updateConfigInfo(configInfo, srcIp, srcUser, configAdvanceInfo); } } catch (Exception exception) { LogUtil.FATAL_LOG.error("[db-error] try to update or add config failed, {}", exception.getMessage(), exception); throw exception; } }
@Test void testInsertOrUpdateOfInsertConfigSuccess() { String dataId = "dataId"; String group = "group"; String tenant = "tenant"; String appName = "appNameNew"; String content = "content132456"; Map<String, Object> configAdvanceInfo = new HashMap<>(); configAdvanceInfo.put("config_tags", "tag1,tag2"); ConfigInfo configInfo = new ConfigInfo(dataId, group, tenant, appName, content); long insertConfigIndoId = 12345678765L; GeneratedKeyHolder generatedKeyHolder = TestCaseUtils.createGeneratedKeyHolder(insertConfigIndoId); externalStorageUtilsMockedStatic.when(ExternalStorageUtils::createKeyHolder).thenReturn(generatedKeyHolder); //mock get config state Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))).thenReturn(null, new ConfigInfoStateWrapper()); //mock insert config info Mockito.when(jdbcTemplate.update(any(PreparedStatementCreator.class), eq(generatedKeyHolder))).thenReturn(1); Mockito.when(jdbcTemplate.update(eq(externalConfigInfoPersistService.mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_TAGS_RELATION) .insert(Arrays.asList("id", "tag_name", "tag_type", "data_id", "group_id", "tenant_id"))), eq(insertConfigIndoId), eq("tag1"), eq(StringUtils.EMPTY), eq(dataId), eq(group), eq(tenant))).thenReturn(1); Mockito.when(jdbcTemplate.update(eq(externalConfigInfoPersistService.mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_TAGS_RELATION) .insert(Arrays.asList("id", "tag_name", "tag_type", "data_id", "group_id", "tenant_id"))), eq(insertConfigIndoId), eq("tag2"), eq(StringUtils.EMPTY), eq(dataId), eq(group), eq(tenant))).thenReturn(1); String srcIp = "srcIp"; String srcUser = "srcUser"; //mock insert config info Mockito.doNothing().when(historyConfigInfoPersistService) .insertConfigHistoryAtomic(eq(0), eq(configInfo), eq(srcIp), eq(srcUser), any(Timestamp.class), eq("I")); externalConfigInfoPersistService.insertOrUpdate(srcIp, srcUser, configInfo, configAdvanceInfo); //expect insert config info Mockito.verify(jdbcTemplate, times(1)).update(any(PreparedStatementCreator.class), eq(generatedKeyHolder)); //expect insert config tags Mockito.verify(jdbcTemplate, times(1)).update(eq( externalConfigInfoPersistService.mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_TAGS_RELATION) .insert(Arrays.asList("id", "tag_name", "tag_type", "data_id", "group_id", "tenant_id"))), eq(insertConfigIndoId), eq("tag1"), eq(StringUtils.EMPTY), eq(dataId), eq(group), eq(tenant)); Mockito.verify(jdbcTemplate, times(1)).update(eq( externalConfigInfoPersistService.mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_TAGS_RELATION) .insert(Arrays.asList("id", "tag_name", "tag_type", "data_id", "group_id", "tenant_id"))), eq(insertConfigIndoId), eq("tag2"), eq(StringUtils.EMPTY), eq(dataId), eq(group), eq(tenant)); //expect insert history info Mockito.verify(historyConfigInfoPersistService, times(1)) .insertConfigHistoryAtomic(eq(0L), eq(configInfo), eq(srcIp), eq(srcUser), any(Timestamp.class), eq("I")); }
@Override public ArrayList<Character> read(String charactersString) throws Exception { return null; }
@Test public void testRead() throws Exception { Controlador controlador = new Controlador(); try { ArrayList<Character> result = controlador.read("12345"); // Verificar que el resultado no sea null // Resto del código de verificación } catch (Exception e) { fail("Excepción inesperada: " + e.getMessage()); } }
@Override public <T> ListState<T> getListState(ListStateDescriptor<T> stateProperties) { KeyedStateStore keyedStateStore = checkPreconditionsAndGetKeyedStateStore(stateProperties); stateProperties.initializeSerializerUnlessSet(this::createSerializer); return keyedStateStore.getListState(stateProperties); }
@Test void testV2ListStateInstantiation() throws Exception { final ExecutionConfig config = new ExecutionConfig(); SerializerConfig serializerConfig = config.getSerializerConfig(); serializerConfig.registerKryoType(Path.class); final AtomicReference<Object> descriptorCapture = new AtomicReference<>(); StreamingRuntimeContext context = createRuntimeContext(descriptorCapture, config); org.apache.flink.runtime.state.v2.ListStateDescriptor<TaskInfo> descr = new org.apache.flink.runtime.state.v2.ListStateDescriptor<>( "name", TypeInformation.of(TaskInfo.class), serializerConfig); context.getListState(descr); org.apache.flink.runtime.state.v2.ListStateDescriptor<?> descrIntercepted = (org.apache.flink.runtime.state.v2.ListStateDescriptor<?>) descriptorCapture.get(); TypeSerializer<?> serializer = descrIntercepted.getSerializer(); // check that the Path class is really registered, i.e., the execution config was applied assertThat(serializer).isInstanceOf(KryoSerializer.class); assertThat(((KryoSerializer<?>) serializer).getKryo().getRegistration(Path.class).getId()) .isPositive(); }
public static <K, V> Read<K, V> read() { return new AutoValue_KafkaIO_Read.Builder<K, V>() .setTopics(new ArrayList<>()) .setTopicPartitions(new ArrayList<>()) .setConsumerFactoryFn(KafkaIOUtils.KAFKA_CONSUMER_FACTORY_FN) .setConsumerConfig(KafkaIOUtils.DEFAULT_CONSUMER_PROPERTIES) .setMaxNumRecords(Long.MAX_VALUE) .setCommitOffsetsInFinalizeEnabled(false) .setDynamicRead(false) .setTimestampPolicyFactory(TimestampPolicyFactory.withProcessingTime()) .setConsumerPollingTimeout(2L) .setRedistributed(false) .setAllowDuplicates(false) .setRedistributeNumKeys(0) .build(); }
@Test public void testResolveDefaultApiTimeout() { final String defaultApiTimeoutConfig = "default.api.timeout.ms"; assertEquals( Duration.millis(20), KafkaUnboundedReader.resolveDefaultApiTimeout( KafkaIO.<Integer, Long>read() .withConsumerConfigUpdates( ImmutableMap.of( defaultApiTimeoutConfig, 20, ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 30)))); assertEquals( Duration.millis(2 * 30), KafkaUnboundedReader.resolveDefaultApiTimeout( KafkaIO.<Integer, Long>read() .withConsumerConfigUpdates( ImmutableMap.of(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 30)))); assertEquals( Duration.millis(60 * 1000), KafkaUnboundedReader.resolveDefaultApiTimeout(KafkaIO.<Integer, Long>read())); }
public static String getElasticJobNamespace() { // ElasticJob will persist job to namespace return getJobsPath(); }
@Test void assertGetElasticJobNamespace() { assertThat(PipelineMetaDataNode.getElasticJobNamespace(), is(jobsPath)); }
@Override protected Object createObject(ValueWrapper<Object> initialInstance, String className, Map<List<String>, Object> params, ClassLoader classLoader) { return fillBean(initialInstance, className, params, classLoader); }
@Test public void createObjectDirectMappingComplexType() { Map<List<String>, Object> params = new HashMap<>(); Person directMappingComplexTypeValue = new Person(); directMappingComplexTypeValue.setFirstName("TestName"); params.put(List.of(), directMappingComplexTypeValue); params.put(List.of("age"), 10); ValueWrapper<Object> initialInstance = runnerHelper.getDirectMapping(params); Object objectRaw = runnerHelper.createObject( initialInstance, Map.class.getCanonicalName(), params, getClass().getClassLoader()); assertThat(objectRaw).isInstanceOf(Person.class); Person object = (Person) objectRaw; assertThat(object.getAge()).isEqualTo(10); assertThat(object.getFirstName()).isEqualTo("TestName"); }
@VisibleForTesting public Optional<ProcessContinuation> run( PartitionMetadata partition, ChildPartitionsRecord record, RestrictionTracker<TimestampRange, Timestamp> tracker, ManualWatermarkEstimator<Instant> watermarkEstimator) { final String token = partition.getPartitionToken(); LOG.debug("[{}] Processing child partition record {}", token, record); final Timestamp startTimestamp = record.getStartTimestamp(); final Instant startInstant = new Instant(startTimestamp.toSqlTimestamp().getTime()); if (!tracker.tryClaim(startTimestamp)) { LOG.debug("[{}] Could not claim queryChangeStream({}), stopping", token, startTimestamp); return Optional.of(ProcessContinuation.stop()); } watermarkEstimator.setWatermark(startInstant); for (ChildPartition childPartition : record.getChildPartitions()) { processChildPartition(partition, record, childPartition); } LOG.debug("[{}] Child partitions action completed successfully", token); return Optional.empty(); }
@Test public void testRestrictionClaimedAnsIsSplitCaseAndChildExists() { final String partitionToken = "partitionToken"; final long heartbeat = 30L; final Timestamp startTimestamp = Timestamp.ofTimeMicroseconds(10L); final Timestamp endTimestamp = Timestamp.ofTimeMicroseconds(20L); final PartitionMetadata partition = mock(PartitionMetadata.class); final ChildPartitionsRecord record = new ChildPartitionsRecord( startTimestamp, "recordSequence", Arrays.asList( new ChildPartition("childPartition1", partitionToken), new ChildPartition("childPartition2", partitionToken)), null); when(partition.getEndTimestamp()).thenReturn(endTimestamp); when(partition.getHeartbeatMillis()).thenReturn(heartbeat); when(partition.getPartitionToken()).thenReturn(partitionToken); when(tracker.tryClaim(startTimestamp)).thenReturn(true); when(transaction.getPartition("childPartition1")).thenReturn(mock(Struct.class)); when(transaction.getPartition("childPartition2")).thenReturn(mock(Struct.class)); final Optional<ProcessContinuation> maybeContinuation = action.run(partition, record, tracker, watermarkEstimator); assertEquals(Optional.empty(), maybeContinuation); verify(watermarkEstimator).setWatermark(new Instant(startTimestamp.toSqlTimestamp().getTime())); }
synchronized void ensureTokenInitialized() throws IOException { // we haven't inited yet, or we used to have a token but it expired if (!hasInitedToken || (action != null && !action.isValid())) { //since we don't already have a token, go get one Token<?> token = fs.getDelegationToken(null); // security might be disabled if (token != null) { fs.setDelegationToken(token); addRenewAction(fs); LOG.debug("Created new DT for {}", token.getService()); } hasInitedToken = true; } }
@Test public void testInitWithUGIToken() throws IOException, URISyntaxException { Configuration conf = new Configuration(); DummyFs fs = spy(new DummyFs()); doReturn(null).when(fs).getDelegationToken(anyString()); Token<TokenIdentifier> token = new Token<TokenIdentifier>(new byte[0], new byte[0], DummyFs.TOKEN_KIND, new Text("127.0.0.1:1234")); fs.ugi.addToken(token); fs.ugi.addToken(new Token<TokenIdentifier>(new byte[0], new byte[0], new Text("Other token"), new Text("127.0.0.1:8021"))); assertEquals("wrong tokens in user", 2, fs.ugi.getTokens().size()); fs.emulateSecurityEnabled = true; fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf); fs.tokenAspect.ensureTokenInitialized(); // Select a token from ugi (not from the remote host), store it but don't // renew it verify(fs).setDelegationToken(token); verify(fs, never()).getDelegationToken(anyString()); assertNull(Whitebox.getInternalState(fs.tokenAspect, "dtRenewer")); assertNull(Whitebox.getInternalState(fs.tokenAspect, "action")); }
Mono<ServerResponse> getComment(ServerRequest request) { String name = request.pathVariable("name"); return Mono.defer(() -> Mono.justOrEmpty(commentPublicQueryService.getByName(name))) .subscribeOn(Schedulers.boundedElastic()) .flatMap(comment -> ServerResponse.ok().bodyValue(comment)); }
@Test void getComment() { when(commentPublicQueryService.getByName(any())) .thenReturn(null); webTestClient.get() .uri("/comments/test-comment") .exchange() .expectStatus() .isOk(); verify(commentPublicQueryService, times(1)).getByName(eq("test-comment")); }
@Override public <T> T persist(T detachedObject) { Map<Object, Object> alreadyPersisted = new HashMap<Object, Object>(); return persist(detachedObject, alreadyPersisted, RCascadeType.PERSIST); }
@Test public void testObjectShouldNotBeAttached() { Assertions.assertThrows(IllegalArgumentException.class, () -> { Customer customer = new Customer("12"); customer = redisson.getLiveObjectService().persist(customer); Order order = new Order(); customer.getOrders().add(order); }); }
@Override public boolean matches(Job localJob, Job storageProviderJob) { return AllowedConcurrentStateChange.super.matches(localJob, storageProviderJob) && localJob.getVersion() == storageProviderJob.getVersion() - 1 && localJob.getLastJobStateOfType(FailedState.class).isPresent(); }
@Test void ifLocalJobHasOtherThanEnqueuedStateItWillNotMatch() { final Job scheduledJob = aScheduledJob().build(); final Job enqueuedJob = aCopyOf(scheduledJob).withEnqueuedState(Instant.now()).build(); boolean matchesAllowedStateChange = allowedStateChange.matches(scheduledJob, enqueuedJob); assertThat(matchesAllowedStateChange).isFalse(); }
@Subscribe public void inputUpdated(InputUpdated inputUpdatedEvent) { final String inputId = inputUpdatedEvent.id(); LOG.debug("Input updated: {}", inputId); final Input input; try { input = inputService.find(inputId); } catch (NotFoundException e) { LOG.warn("Received InputUpdated event but could not find input {}", inputId, e); return; } final boolean startInput; final IOState<MessageInput> inputState = inputRegistry.getInputState(inputId); if (inputState != null) { startInput = inputState.getState() == IOState.Type.RUNNING; inputRegistry.remove(inputState); } else { startInput = false; } if (startInput && (input.isGlobal() || this.nodeId.getNodeId().equals(input.getNodeId()))) { startInput(input); } }
@Test @SuppressWarnings("unchecked") public void inputUpdatedDoesNotStopInputIfItIsNotRunning() throws Exception { final String inputId = "input-id"; final Input input = mock(Input.class); when(inputService.find(inputId)).thenReturn(input); when(inputRegistry.getInputState(inputId)).thenReturn(null); listener.inputUpdated(InputUpdated.create(inputId)); verify(inputRegistry, never()).remove(any(IOState.class)); }
void validateLogLevelConfigs(Collection<AlterableConfig> ops) { ops.forEach(op -> { String loggerName = op.name(); switch (OpType.forId(op.configOperation())) { case SET: validateLoggerNameExists(loggerName); String logLevel = op.value(); if (!LogLevelConfig.VALID_LOG_LEVELS.contains(logLevel)) { throw new InvalidConfigurationException("Cannot set the log level of " + loggerName + " to " + logLevel + " as it is not a supported log level. " + "Valid log levels are " + VALID_LOG_LEVELS_STRING); } break; case DELETE: validateLoggerNameExists(loggerName); if (loggerName.equals(Log4jController.ROOT_LOGGER())) { throw new InvalidRequestException("Removing the log level of the " + Log4jController.ROOT_LOGGER() + " logger is not allowed"); } break; case APPEND: throw new InvalidRequestException(OpType.APPEND + " operation is not allowed for the " + BROKER_LOGGER + " resource"); case SUBTRACT: throw new InvalidRequestException(OpType.SUBTRACT + " operation is not allowed for the " + BROKER_LOGGER + " resource"); default: throw new InvalidRequestException("Unknown operation type " + (int) op.configOperation() + " is not allowed for the " + BROKER_LOGGER + " resource"); } }); }
@Test public void testValidateSetRootLogLevelConfig() { MANAGER.validateLogLevelConfigs(Arrays.asList(new AlterableConfig(). setName(Log4jController.ROOT_LOGGER()). setConfigOperation(OpType.SET.id()). setValue("TRACE"))); }
@Override public String getPrefix() { return String.format("%s.%s", DriveProtocol.class.getPackage().getName(), "Drive"); }
@Test public void testPrefix() { assertEquals("ch.cyberduck.core.googledrive.Drive", new DriveProtocol().getPrefix()); }
public static String protectXMLCDATA( String content ) { if ( Utils.isEmpty( content ) ) { return content; } return "<![CDATA[" + content + "]]>"; }
@Test public void testProtectXMLCDATA() { assertEquals( null, Const.protectXMLCDATA( null ) ); assertEquals( "", Const.protectXMLCDATA( "" ) ); assertEquals( "<![CDATA[foo]]>", Const.protectXMLCDATA( "foo" ) ); }
@Override public boolean hasResourcesAvailable(Container container) { return hasResourcesAvailable(container.getResource()); }
@Test public void testHasResourcesAvailable() { AllocationBasedResourceUtilizationTracker tracker = new AllocationBasedResourceUtilizationTracker(mockContainerScheduler); Container testContainer = mock(Container.class); when(testContainer.getResource()).thenReturn(Resource.newInstance(512, 4)); for (int i = 0; i < 2; i++) { Assert.assertTrue(tracker.hasResourcesAvailable(testContainer)); tracker.addContainerResources(testContainer); } Assert.assertFalse(tracker.hasResourcesAvailable(testContainer)); }
public void clearCookies() { parent.headers().remove(HttpHeaders.Names.COOKIE); }
@Test void testClearCookies() { URI uri = URI.create("http://example.yahoo.com/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); httpReq.headers().put(HttpHeaders.Names.COOKIE, "XYZ=value"); DiscFilterRequest request = new DiscFilterRequest(httpReq); request.clearCookies(); assertNull(request.getHeader(HttpHeaders.Names.COOKIE)); }
public static <T> PrefetchableIterable<T> emptyIterable() { return (PrefetchableIterable<T>) EMPTY_ITERABLE; }
@Test public void testEmptyIterable() { verifyIterable(PrefetchableIterables.emptyIterable()); }
public static PredicateTreeAnnotations createPredicateTreeAnnotations(Predicate predicate) { PredicateTreeAnalyzerResult analyzerResult = PredicateTreeAnalyzer.analyzePredicateTree(predicate); // The tree size is used as the interval range. int intervalEnd = analyzerResult.treeSize; AnnotatorContext context = new AnnotatorContext(intervalEnd, analyzerResult.sizeMap); assignIntervalLabels(predicate, Interval.INTERVAL_BEGIN, intervalEnd, false, context); return new PredicateTreeAnnotations( analyzerResult.minFeature, intervalEnd, context.intervals, context.intervalsWithBounds, context.featureConjunctions); }
@Test void show_different_types_of_not_intervals() { { Predicate p = and( or( and( feature("key").inSet("A"), not(feature("key").inSet("B"))), and( not(feature("key").inSet("C")), feature("key").inSet("D"))), feature("foo").inSet("bar")); PredicateTreeAnnotations r = PredicateTreeAnnotator.createPredicateTreeAnnotations(p); assertEquals(3, r.minFeature); assertEquals(7, r.intervalEnd); assertEquals(6, r.intervalMap.size()); assertIntervalContains(r, "foo=bar", 0x00070007); assertIntervalContains(r, "key=A", 0x00010001); assertIntervalContains(r, "key=B", 0x00020002); assertIntervalContains(r, "key=C", 0x00010004); assertIntervalContains(r, "key=D", 0x00060006); assertIntervalContains(r, Feature.Z_STAR_COMPRESSED_ATTRIBUTE_NAME, 0x00020001, 0x00000006, 0x00040000); } { Predicate p = or( not(feature("key").inSet("A")), not(feature("key").inSet("B"))); PredicateTreeAnnotations r = PredicateTreeAnnotator.createPredicateTreeAnnotations(p); assertEquals(1, r.minFeature); assertEquals(4, r.intervalEnd); assertEquals(3, r.intervalMap.size()); assertIntervalContains(r, "key=A", 0x00010003); assertIntervalContains(r, "key=B", 0x00010003); assertIntervalContains(r, Feature.Z_STAR_COMPRESSED_ATTRIBUTE_NAME, 0x00030000, 0x00030000); } { Predicate p = or( and( not(feature("key").inSet("A")), not(feature("key").inSet("B"))), and( not(feature("key").inSet("C")), not(feature("key").inSet("D")))); PredicateTreeAnnotations r = PredicateTreeAnnotator.createPredicateTreeAnnotations(p); assertEquals(1, r.minFeature); assertEquals(8, r.intervalEnd); assertEquals(5, r.intervalMap.size()); assertIntervalContains(r, "key=A", 0x00010001); assertIntervalContains(r, "key=B", 0x00030007); assertIntervalContains(r, "key=C", 0x00010005); assertIntervalContains(r, "key=D", 0x00070007); assertIntervalContains(r, Feature.Z_STAR_COMPRESSED_ATTRIBUTE_NAME, 0x00010000, 0x00070002, 0x00050000, 0x00070006); } }
public static FileSystem write(final FileSystem fs, final Path path, final byte[] bytes) throws IOException { Objects.requireNonNull(path); Objects.requireNonNull(bytes); try (FSDataOutputStream out = fs.createFile(path).overwrite(true).build()) { out.write(bytes); } return fs; }
@Test public void testWriteStringFileContext() throws IOException { URI uri = tmp.toURI(); Configuration conf = new Configuration(); FileContext fc = FileContext.getFileContext(uri, conf); Path testPath = new Path(new Path(uri), "writestring.out"); String write = "A" + "\u00ea" + "\u00f1" + "\u00fc" + "C"; FileUtil.write(fc, testPath, write, StandardCharsets.UTF_8); String read = FileUtils.readFileToString(new File(testPath.toUri()), StandardCharsets.UTF_8); assertEquals(write, read); }
public int getDefaultSelectedSchemaIndex() { List<String> schemaNames; try { schemaNames = schemasProvider.getPartitionSchemasNames( transMeta ); } catch ( KettleException e ) { schemaNames = Collections.emptyList(); } PartitionSchema partitioningSchema = stepMeta.getStepPartitioningMeta().getPartitionSchema(); int defaultSelectedSchemaIndex = 0; if ( partitioningSchema != null && partitioningSchema.getName() != null && !schemaNames.isEmpty() ) { defaultSelectedSchemaIndex = Const.indexOfString( partitioningSchema.getName(), schemaNames ); } return defaultSelectedSchemaIndex != -1 ? defaultSelectedSchemaIndex : 0; }
@Test public void defaultSelectedSchemaIndexWhenSchemaNameIsNotDefined() throws Exception { PartitionSchema schema = new PartitionSchema( ); StepPartitioningMeta meta = mock( StepPartitioningMeta.class ); when( meta.getPartitionSchema() ).thenReturn( schema ); when( stepMeta.getStepPartitioningMeta() ).thenReturn( meta ); List<String> schemas = Arrays.asList( "test" ); when( partitionSchemasProvider.getPartitionSchemasNames( any( TransMeta.class ) ) ).thenReturn( schemas ); assertEquals( 0, settings.getDefaultSelectedSchemaIndex() ); }
@ConstantFunction(name = "time_slice", argTypes = {DATETIME, INT, VARCHAR}, returnType = DATETIME, isMonotonic = true) public static ConstantOperator timeSlice(ConstantOperator datetime, ConstantOperator interval, ConstantOperator unit) throws AnalysisException { return timeSlice(datetime, interval, unit, ConstantOperator.createVarchar("floor")); }
@Test public void timeSlice() throws AnalysisException { class Param { final LocalDateTime dateTime; final int interval; final String unit; final String boundary; LocalDateTime expect; String e; public Param(LocalDateTime dateTime, int interval, String unit, LocalDateTime expect) { this(dateTime, interval, unit, "floor", expect); } private Param(LocalDateTime dateTime, int interval, String unit, String boundary, LocalDateTime expect) { this.dateTime = dateTime; this.interval = interval; this.unit = unit; this.boundary = boundary; this.expect = expect; } private Param(LocalDateTime dateTime, int interval, String unit, String boundary, String e) { this.dateTime = dateTime; this.interval = interval; this.unit = unit; this.boundary = boundary; this.e = e; } } // test case from be TimeFunctionsTest List<Param> cases = Arrays.asList( // second new Param(LocalDateTime.of(0001, 1, 1, 21, 22, 51), 5, "second", LocalDateTime.of(0001, 1, 1, 21, 22, 50)), new Param(LocalDateTime.of(0001, 3, 2, 14, 17, 28), 5, "second", LocalDateTime.of(0001, 3, 2, 14, 17, 25)), new Param(LocalDateTime.of(0001, 5, 6, 11, 54, 23), 5, "second", LocalDateTime.of(0001, 5, 6, 11, 54, 20)), new Param(LocalDateTime.of(2022, 7, 8, 9, 13, 19), 5, "second", LocalDateTime.of(2022, 7, 8, 9, 13, 15)), new Param(LocalDateTime.of(2022, 9, 9, 8, 8, 16), 5, "second", LocalDateTime.of(2022, 9, 9, 8, 8, 15)), new Param(LocalDateTime.of(2022, 11, 3, 23, 41, 37), 5, "second", LocalDateTime.of(2022, 11, 3, 23, 41, 35)), // minute new Param(LocalDateTime.of(0001, 1, 1, 21, 22, 51), 5, "minute", LocalDateTime.of(0001, 1, 1, 21, 20, 0)), new Param(LocalDateTime.of(0001, 3, 2, 14, 17, 28), 5, "minute", LocalDateTime.of(0001, 3, 2, 14, 15, 0)), new Param(LocalDateTime.of(0001, 5, 6, 11, 54, 23), 5, "minute", LocalDateTime.of(0001, 5, 6, 11, 50, 0)), new Param(LocalDateTime.of(2022, 7, 8, 9, 13, 19), 5, "minute", LocalDateTime.of(2022, 7, 8, 9, 10, 0)), new Param(LocalDateTime.of(2022, 9, 9, 8, 8, 16), 5, "minute", LocalDateTime.of(2022, 9, 9, 8, 5, 0)), new Param(LocalDateTime.of(2022, 11, 3, 23, 41, 37), 5, "minute", LocalDateTime.of(2022, 11, 3, 23, 40, 0)), // hour new Param(LocalDateTime.of(0001, 1, 1, 21, 22, 51), 5, "hour", LocalDateTime.of(0001, 1, 1, 20, 0, 0)), new Param(LocalDateTime.of(0001, 3, 2, 14, 17, 28), 5, "hour", LocalDateTime.of(0001, 3, 2, 10, 0, 0)), new Param(LocalDateTime.of(0001, 5, 6, 11, 54, 23), 5, "hour", LocalDateTime.of(0001, 5, 6, 10, 0, 0)), new Param(LocalDateTime.of(2022, 7, 8, 9, 13, 19), 5, "hour", LocalDateTime.of(2022, 7, 8, 8, 0, 0)), new Param(LocalDateTime.of(2022, 9, 9, 8, 8, 16), 5, "hour", LocalDateTime.of(2022, 9, 9, 6, 0, 0)), new Param(LocalDateTime.of(2022, 11, 3, 23, 41, 37), 5, "hour", LocalDateTime.of(2022, 11, 3, 21, 0, 0)), // day new Param(LocalDateTime.of(0001, 1, 1, 21, 22, 51), 5, "day", LocalDateTime.of(0001, 1, 1, 0, 0, 0)), new Param(LocalDateTime.of(0001, 3, 2, 14, 17, 28), 5, "day", LocalDateTime.of(0001, 3, 2, 0, 0, 0)), new Param(LocalDateTime.of(0001, 5, 6, 11, 54, 23), 5, "day", LocalDateTime.of(0001, 5, 6, 0, 0, 0)), new Param(LocalDateTime.of(2022, 7, 8, 9, 13, 19), 5, "day", LocalDateTime.of(2022, 7, 5, 0, 0, 0)), new Param(LocalDateTime.of(2022, 9, 9, 8, 8, 16), 5, "day", LocalDateTime.of(2022, 9, 8, 0, 0, 0)), new Param(LocalDateTime.of(2022, 11, 3, 23, 41, 37), 5, "day", LocalDateTime.of(2022, 11, 2, 0, 0, 0)), // month new Param(LocalDateTime.of(0001, 1, 1, 21, 22, 51), 5, "month", LocalDateTime.of(0001, 1, 1, 0, 0, 0)), new Param(LocalDateTime.of(0001, 3, 2, 14, 17, 28), 5, "month", LocalDateTime.of(0001, 1, 1, 0, 0, 0)), new Param(LocalDateTime.of(0001, 5, 6, 11, 54, 23), 5, "month", LocalDateTime.of(0001, 1, 1, 0, 0, 0)), new Param(LocalDateTime.of(2022, 7, 8, 9, 13, 19), 5, "month", LocalDateTime.of(2022, 4, 1, 0, 0, 0)), new Param(LocalDateTime.of(2022, 9, 9, 8, 8, 16), 5, "month", LocalDateTime.of(2022, 9, 1, 0, 0, 0)), new Param(LocalDateTime.of(2022, 11, 3, 23, 41, 37), 5, "month", LocalDateTime.of(2022, 9, 1, 0, 0, 0)), // year new Param(LocalDateTime.of(0001, 1, 1, 21, 22, 51), 5, "year", LocalDateTime.of(0001, 1, 1, 0, 0, 0)), new Param(LocalDateTime.of(0001, 3, 2, 14, 17, 28), 5, "year", LocalDateTime.of(0001, 1, 1, 0, 0, 0)), new Param(LocalDateTime.of(0001, 5, 6, 11, 54, 23), 5, "year", LocalDateTime.of(0001, 1, 1, 0, 0, 0)), new Param(LocalDateTime.of(2022, 7, 8, 9, 13, 19), 5, "year", LocalDateTime.of(2021, 1, 1, 0, 0, 0)), new Param(LocalDateTime.of(2022, 9, 9, 8, 8, 16), 5, "year", LocalDateTime.of(2021, 1, 1, 0, 0, 0)), new Param(LocalDateTime.of(2022, 11, 3, 23, 41, 37), 5, "year", LocalDateTime.of(2021, 1, 1, 0, 0, 0)), // week new Param(LocalDateTime.of(0001, 1, 1, 21, 22, 51), 5, "week", LocalDateTime.of(0001, 1, 1, 0, 0, 0)), new Param(LocalDateTime.of(0001, 3, 2, 14, 17, 28), 5, "week", LocalDateTime.of(0001, 2, 5, 0, 0, 0)), new Param(LocalDateTime.of(0001, 5, 6, 11, 54, 23), 5, "week", LocalDateTime.of(0001, 4, 16, 0, 0, 0)), new Param(LocalDateTime.of(2022, 7, 8, 9, 13, 19), 5, "week", LocalDateTime.of(2022, 6, 20, 0, 0, 0)), new Param(LocalDateTime.of(2022, 9, 9, 8, 8, 16), 5, "week", LocalDateTime.of(2022, 8, 29, 0, 0, 0)), new Param(LocalDateTime.of(2022, 11, 3, 23, 41, 37), 5, "week", LocalDateTime.of(2022, 10, 3, 0, 0, 0)), // quarter new Param(LocalDateTime.of(0001, 1, 1, 21, 22, 51), 5, "quarter", LocalDateTime.of(0001, 1, 1, 0, 0, 0)), new Param(LocalDateTime.of(0001, 3, 2, 14, 17, 28), 5, "quarter", LocalDateTime.of(0001, 1, 1, 0, 0, 0)), new Param(LocalDateTime.of(0001, 5, 6, 11, 54, 23), 5, "quarter", LocalDateTime.of(0001, 1, 1, 0, 0, 0)), new Param(LocalDateTime.of(2022, 7, 8, 9, 13, 19), 5, "quarter", LocalDateTime.of(2022, 4, 1, 0, 0, 0)), new Param(LocalDateTime.of(2022, 9, 9, 8, 8, 16), 5, "quarter", LocalDateTime.of(2022, 4, 1, 0, 0, 0)), new Param(LocalDateTime.of(2022, 11, 3, 23, 41, 37), 5, "quarter", LocalDateTime.of(2022, 4, 1, 0, 0, 0)), // second ceil new Param(LocalDateTime.of(0001, 1, 1, 21, 22, 51), 5, "second", "ceil", LocalDateTime.of(0001, 1, 1, 21, 22, 55)), new Param(LocalDateTime.of(0001, 3, 2, 14, 17, 28), 5, "second", "ceil", LocalDateTime.of(0001, 3, 2, 14, 17, 30)), new Param(LocalDateTime.of(0001, 5, 6, 11, 54, 23), 5, "second", "ceil", LocalDateTime.of(0001, 5, 6, 11, 54, 25)), new Param(LocalDateTime.of(2022, 7, 8, 9, 13, 19), 5, "second", "ceil", LocalDateTime.of(2022, 7, 8, 9, 13, 20)), new Param(LocalDateTime.of(2022, 9, 9, 8, 8, 16), 5, "second", "ceil", LocalDateTime.of(2022, 9, 9, 8, 8, 20)), new Param(LocalDateTime.of(2022, 11, 3, 23, 41, 37), 5, "second", "ceil", LocalDateTime.of(2022, 11, 3, 23, 41, 40)), new Param(LocalDateTime.of(0000, 01, 01, 00, 00, 00), 5, "hour", "floor", "time used with time_slice can't before 0001-01-01 00:00:00"), new Param(LocalDateTime.of(2023, 12, 31, 03, 12, 00), 2147483647, "minute", "floor", LocalDateTime.of(0001, 01, 01, 00, 00, 00)) ); for (Param testCase : cases) { try { ConstantOperator result = ScalarOperatorFunctions.timeSlice( ConstantOperator.createDatetime(testCase.dateTime), ConstantOperator.createInt(testCase.interval), ConstantOperator.createVarchar(testCase.unit), ConstantOperator.createVarchar(testCase.boundary) ); if (testCase.expect != null) { assertEquals(testCase.expect, result.getDatetime()); } else { Assert.fail(); } } catch (AnalysisException e) { assertTrue(e.getMessage().contains(testCase.e)); } } }
public static boolean shouldStartHazelcast(AppSettings appSettings) { return isClusterEnabled(appSettings.getProps()) && toNodeType(appSettings.getProps()).equals(NodeType.APPLICATION); }
@Test @UseDataProvider("validIPv4andIPv6Addresses") public void shouldStartHazelcast_should_return_true_on_AppNode(String host) { assertThat(ClusterSettings.shouldStartHazelcast(newSettingsForAppNode(host))).isTrue(); }
public boolean setLocations(DefaultIssue issue, @Nullable Object locations) { if (!locationsEqualsIgnoreHashes(locations, issue.getLocations())) { issue.setLocations(locations); issue.setChanged(true); issue.setLocationsChanged(true); return true; } return false; }
@Test void change_locations_if_secondary_message_changed() { DbIssues.Locations locations = DbIssues.Locations.newBuilder() .addFlow(DbIssues.Flow.newBuilder() .addLocation(DbIssues.Location.newBuilder().setMsg("msg1")) .build()) .build(); issue.setLocations(locations); DbIssues.Locations.Builder builder = locations.toBuilder(); builder.getFlowBuilder(0).getLocationBuilder(0).setMsg("msg2"); boolean updated = underTest.setLocations(issue, builder.build()); assertThat(updated).isTrue(); }
@Override @Deprecated public void process(final org.apache.kafka.streams.processor.ProcessorSupplier<? super K, ? super V> processorSupplier, final String... stateStoreNames) { process(processorSupplier, Named.as(builder.newProcessorName(PROCESSOR_NAME)), stateStoreNames); }
@SuppressWarnings("deprecation") @Test public void shouldBindStateWithOldProcessorSupplier() { final Consumed<String, String> consumed = Consumed.with(Serdes.String(), Serdes.String()); final StreamsBuilder builder = new StreamsBuilder(); final String input = "input"; builder.stream(input, consumed) .process(new org.apache.kafka.streams.processor.ProcessorSupplier<String, String>() { @Override public org.apache.kafka.streams.processor.Processor<String, String> get() { return new org.apache.kafka.streams.processor.Processor<String, String>() { private KeyValueStore<String, Integer> sumStore; @Override public void init(final ProcessorContext context) { this.sumStore = context.getStateStore("sum"); } @Override public void process(final String key, final String value) { final Integer counter = sumStore.get(key); if (counter == null) { sumStore.putIfAbsent(key, value.length()); } else { if (value == null) { sumStore.delete(key); } else { sumStore.put(key, counter + value.length()); } } } @Override public void close() { } }; } @SuppressWarnings("unchecked") @Override public Set<StoreBuilder<?>> stores() { final Set<StoreBuilder<?>> stores = new HashSet<>(); stores.add(Stores.keyValueStoreBuilder( Stores.inMemoryKeyValueStore("sum"), Serdes.String(), Serdes.Integer() )); return stores; } }, Named.as("p")); final String topologyDescription = builder.build().describe().toString(); assertThat( topologyDescription, equalTo("Topologies:\n" + " Sub-topology: 0\n" + " Source: KSTREAM-SOURCE-0000000000 (topics: [input])\n" + " --> p\n" + " Processor: p (stores: [sum])\n" + " --> none\n" + " <-- KSTREAM-SOURCE-0000000000\n\n") ); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic<String, String> inputTopic = driver.createInputTopic( input, new StringSerializer(), new StringSerializer() ); inputTopic.pipeInput("A", "0", 5L); inputTopic.pipeInput("B", "00", 100L); inputTopic.pipeInput("C", "000", 0L); inputTopic.pipeInput("D", "0000", 0L); inputTopic.pipeInput("A", "00000", 10L); inputTopic.pipeInput("A", "000000", 8L); final KeyValueStore<String, Integer> sumStore = driver.getKeyValueStore("sum"); assertEquals(12, sumStore.get("A").intValue()); assertEquals(2, sumStore.get("B").intValue()); assertEquals(3, sumStore.get("C").intValue()); assertEquals(4, sumStore.get("D").intValue()); } }
public boolean createTable(CreateTableStmt stmt, List<Column> partitionColumns) throws DdlException { String dbName = stmt.getDbName(); String tableName = stmt.getTableName(); Map<String, String> properties = stmt.getProperties() != null ? stmt.getProperties() : new HashMap<>(); Path tablePath = null; boolean tableLocationExists = false; if (!stmt.isExternal()) { checkLocationProperties(properties); if (!Strings.isNullOrEmpty(properties.get(LOCATION_PROPERTY))) { String tableLocationWithUserAssign = properties.get(LOCATION_PROPERTY); tablePath = new Path(tableLocationWithUserAssign); if (pathExists(tablePath, hadoopConf)) { tableLocationExists = true; if (!isEmpty(tablePath, hadoopConf)) { throw new StarRocksConnectorException("not support creating table under non-empty directory: %s", tableLocationWithUserAssign); } } } else { tablePath = getDefaultLocation(dbName, tableName); } } else { // checkExternalLocationProperties(properties); if (properties.containsKey(EXTERNAL_LOCATION_PROPERTY)) { tablePath = new Path(properties.get(EXTERNAL_LOCATION_PROPERTY)); } else if (properties.containsKey(LOCATION_PROPERTY)) { tablePath = new Path(properties.get(LOCATION_PROPERTY)); } tableLocationExists = true; } HiveStorageFormat.check(properties); List<String> partitionColNames; if (partitionColumns.isEmpty()) { partitionColNames = stmt.getPartitionDesc() != null ? ((ListPartitionDesc) stmt.getPartitionDesc()).getPartitionColNames() : new ArrayList<>(); } else { partitionColNames = partitionColumns.stream().map(Column::getName).collect(Collectors.toList()); } // default is managed table HiveTable.HiveTableType tableType = HiveTable.HiveTableType.MANAGED_TABLE; if (stmt.isExternal()) { tableType = HiveTable.HiveTableType.EXTERNAL_TABLE; } HiveTable.Builder builder = HiveTable.builder() .setId(ConnectorTableId.CONNECTOR_ID_GENERATOR.getNextId().asInt()) .setTableName(tableName) .setCatalogName(catalogName) .setResourceName(toResourceName(catalogName, "hive")) .setHiveDbName(dbName) .setHiveTableName(tableName) .setPartitionColumnNames(partitionColNames) .setDataColumnNames(stmt.getColumns().stream() .map(Column::getName) .collect(Collectors.toList()).subList(0, stmt.getColumns().size() - partitionColNames.size())) .setFullSchema(stmt.getColumns()) .setTableLocation(tablePath == null ? null : tablePath.toString()) .setProperties(stmt.getProperties()) .setStorageFormat(HiveStorageFormat.get(properties.getOrDefault(FILE_FORMAT, "parquet"))) .setCreateTime(System.currentTimeMillis()) .setHiveTableType(tableType); Table table = builder.build(); try { if (!tableLocationExists) { createDirectory(tablePath, hadoopConf); } metastore.createTable(dbName, table); } catch (Exception e) { LOG.error("Failed to create table {}.{}", dbName, tableName); boolean shouldDelete; try { if (tableExists(dbName, tableName)) { LOG.warn("Table {}.{} already exists. But some error occur such as accessing meta service timeout", dbName, table, e); return true; } FileSystem fileSystem = FileSystem.get(URI.create(tablePath.toString()), hadoopConf); shouldDelete = !fileSystem.listLocatedStatus(tablePath).hasNext() && !tableLocationExists; if (shouldDelete) { fileSystem.delete(tablePath); } } catch (Exception e1) { LOG.error("Failed to delete table location {}", tablePath, e); } throw new DdlException(String.format("Failed to create table %s.%s. msg: %s", dbName, tableName, e.getMessage())); } return true; }
@Test public void testCreateTableForExternal() throws DdlException { new MockUp<HiveWriteUtils>() { @Mock public void createDirectory(Path path, Configuration conf) { } }; HiveMetastoreOperations mockedHmsOps = new HiveMetastoreOperations(cachingHiveMetastore, true, new Configuration(), MetastoreType.HMS, "hive_catalog") { @Override public Path getDefaultLocation(String dbName, String tableName) { return new Path("mytable_locatino"); } }; Map<String, String> properties = Maps.newHashMap(); properties.put("external_location", "hdfs://path_to_file/file_name"); CreateTableStmt stmt = new CreateTableStmt( false, true, new TableName("hive_catalog", "hive_db", "hive_table"), Lists.newArrayList( new ColumnDef("c1", TypeDef.create(PrimitiveType.INT)), new ColumnDef("p1", TypeDef.create(PrimitiveType.INT))), "hive", null, new ListPartitionDesc(Lists.newArrayList("p1"), new ArrayList<>()), null, properties, new HashMap<>(), "my table comment"); List<Column> columns = stmt.getColumnDefs().stream().map(def -> def.toColumn(null)).collect(Collectors.toList()); stmt.setColumns(columns); Assert.assertTrue(mockedHmsOps.createTable(stmt)); }
public FEELFnResult<Boolean> invoke(@ParameterName( "range1" ) Range range1, @ParameterName( "range2" ) Range range2) { if ( range1 == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range1", "cannot be null")); } if ( range2 == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range2", "cannot be null")); } try { boolean result = range1.getLowBoundary() == Range.RangeBoundary.CLOSED && range2.getHighBoundary() == Range.RangeBoundary.CLOSED && range1.getLowEndPoint().compareTo(range2.getHighEndPoint()) == 0; return FEELFnResult.ofResult( result ); } catch( Exception e ) { // points are not comparable return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range1", "cannot be compared to range2")); } }
@Test void invokeParamIsNull() { FunctionTestUtil.assertResultError(metByFunction.invoke(null, new RangeImpl()), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(metByFunction.invoke(new RangeImpl(), null), InvalidParametersEvent.class); }
@Override public boolean test(Pickle pickle) { URI picklePath = pickle.getUri(); if (!lineFilters.containsKey(picklePath)) { return true; } for (Integer line : lineFilters.get(picklePath)) { if (Objects.equals(line, pickle.getLocation().getLine()) || Objects.equals(line, pickle.getScenarioLocation().getLine()) || pickle.getExamplesLocation().map(Location::getLine).map(line::equals).orElse(false) || pickle.getRuleLocation().map(Location::getLine).map(line::equals).orElse(false) || pickle.getFeatureLocation().map(Location::getLine).map(line::equals).orElse(false)) { return true; } } return false; }
@Test void matches_first_example() { LinePredicate predicate = new LinePredicate(singletonMap( featurePath, singletonList(7))); assertTrue(predicate.test(firstPickle)); assertFalse(predicate.test(secondPickle)); assertFalse(predicate.test(thirdPickle)); assertFalse(predicate.test(fourthPickle)); }
@Override public void unregister(@NonNull SchemeWatcher watcher) { Assert.notNull(watcher, "Scheme watcher must not be null"); watchers.remove(watcher); }
@Test void shouldThrowExceptionWhenUnregisterNullWatcher() { assertThrows(IllegalArgumentException.class, () -> watcherManager.unregister(null)); }
@Override public ColumnStatistics buildColumnStatistics() { return new ColumnStatistics(nonNullValueCount, null, rawSize, storageSize); }
@Test public void testNoValues() { CountStatisticsBuilder statisticsBuilder = new CountStatisticsBuilder(); ColumnStatistics columnStatistics = statisticsBuilder.buildColumnStatistics(); assertEquals(columnStatistics.getNumberOfValues(), 0); }
public ParseResult parse(File file) throws IOException, SchemaParseException { return parse(file, null); }
@Test void testMultipleParseErrors() { SchemaParseException parseException = assertThrows(SchemaParseException.class, () -> new SchemaParser().parse(DummySchemaParser.SCHEMA_TEXT_ERROR).mainSchema()); assertTrue(parseException.getMessage().startsWith("Could not parse the schema")); Throwable[] suppressed = parseException.getSuppressed(); assertEquals(2, suppressed.length); assertEquals(DummySchemaParser.ERROR_MESSAGE, suppressed[0].getMessage()); assertEquals(JsonParseException.class, suppressed[1].getCause().getClass()); }
static DwrfStripeCacheMode toStripeCacheMode(DwrfProto.StripeCacheMode mode) { switch (mode) { case INDEX: return DwrfStripeCacheMode.INDEX; case FOOTER: return DwrfStripeCacheMode.FOOTER; case BOTH: return DwrfStripeCacheMode.INDEX_AND_FOOTER; default: return DwrfStripeCacheMode.NONE; } }
@Test public void testToStripeCacheMode() { assertEquals(DwrfMetadataReader.toStripeCacheMode(DwrfProto.StripeCacheMode.INDEX), DwrfStripeCacheMode.INDEX); assertEquals(DwrfMetadataReader.toStripeCacheMode(DwrfProto.StripeCacheMode.FOOTER), DwrfStripeCacheMode.FOOTER); assertEquals(DwrfMetadataReader.toStripeCacheMode(DwrfProto.StripeCacheMode.BOTH), DwrfStripeCacheMode.INDEX_AND_FOOTER); assertEquals(DwrfMetadataReader.toStripeCacheMode(DwrfProto.StripeCacheMode.NA), DwrfStripeCacheMode.NONE); }
@Override public void init(HttpRequest request, HttpResponse response) { String returnTo = request.getParameter(RETURN_TO_PARAMETER); Map<String, String> parameters = new HashMap<>(); Optional<String> sanitizeRedirectUrl = sanitizeRedirectUrl(returnTo); sanitizeRedirectUrl.ifPresent(s -> parameters.put(RETURN_TO_PARAMETER, s)); if (parameters.isEmpty()) { return; } response.addCookie(newCookieBuilder(request) .setName(AUTHENTICATION_COOKIE_NAME) .setValue(toJson(parameters)) .setHttpOnly(true) .setExpiry(FIVE_MINUTES_IN_SECONDS) .build()); }
@Test public void init_does_not_create_cookie_when_no_parameter() { underTest.init(request, response); verify(response, never()).addCookie(any(Cookie.class)); }
public Template getIndexTemplate(IndexSet indexSet) { final IndexSetMappingTemplate indexSetMappingTemplate = getTemplateIndexSetConfig(indexSet, indexSet.getConfig(), profileService); return indexMappingFactory.createIndexMapping(indexSet.getConfig()) .toTemplate(indexSetMappingTemplate); }
@Test void testUsesCustomMappingsAndProfileWhileGettingTemplate() { final TestIndexSet testIndexSet = indexSetConfig("test", "test-template-profiles", "custom", "000000000000000000000013", new CustomFieldMappings(List.of( new CustomFieldMapping("f1", "string"), new CustomFieldMapping("f2", "long") ))); doReturn(Optional.of(new IndexFieldTypeProfile( "000000000000000000000013", "test_profile", "Test profile", new CustomFieldMappings(List.of( new CustomFieldMapping("f1", "ip"), new CustomFieldMapping("f3", "ip") ))) )).when(profileService).get("000000000000000000000013"); IndexMappingTemplate indexMappingTemplateMock = mock(IndexMappingTemplate.class); doReturn(indexMappingTemplateMock).when(indexMappingFactory).createIndexMapping(testIndexSet.getConfig()); underTest.getIndexTemplate(testIndexSet); verify(indexMappingTemplateMock).toTemplate( new IndexSetMappingTemplate("standard", "test_*", new CustomFieldMappings(List.of( new CustomFieldMapping("f1", "string"), //from individual custom mapping new CustomFieldMapping("f2", "long"), //from individual custom mapping new CustomFieldMapping("f3", "ip") //from profile ))) ); }
@Udf public String concat(@UdfParameter( description = "The varchar fields to concatenate") final String... inputs) { if (inputs == null) { return null; } return Arrays.stream(inputs) .filter(Objects::nonNull) .collect(Collectors.joining()); }
@Test public void shouldIgnoreNullInputs() { assertThat(udf.concat(null, "this ", null, "should ", null, "work!", null), is("this should work!")); assertThat(udf.concat(null, ByteBuffer.wrap(new byte[] {1}), null, ByteBuffer.wrap(new byte[] {2}), null), is(ByteBuffer.wrap(new byte[] {1, 2}))); }
@CanIgnoreReturnValue public final Ordered containsExactlyEntriesIn(Multimap<?, ?> expectedMultimap) { checkNotNull(expectedMultimap, "expectedMultimap"); checkNotNull(actual); ListMultimap<?, ?> missing = difference(expectedMultimap, actual); ListMultimap<?, ?> extra = difference(actual, expectedMultimap); // TODO(kak): Possible enhancement: Include "[1 copy]" if the element does appear in // the subject but not enough times. Similarly for unexpected extra items. if (!missing.isEmpty()) { if (!extra.isEmpty()) { boolean addTypeInfo = hasMatchingToStringPair(missing.entries(), extra.entries()); // Note: The usage of countDuplicatesAndAddTypeInfo() below causes entries no longer to be // grouped by key in the 'missing' and 'unexpected items' parts of the message (we still // show the actual and expected multimaps in the standard format). String missingDisplay = addTypeInfo ? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(missing).entries()) : countDuplicatesMultimap(annotateEmptyStringsMultimap(missing)); String extraDisplay = addTypeInfo ? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(extra).entries()) : countDuplicatesMultimap(annotateEmptyStringsMultimap(extra)); failWithActual( fact("missing", missingDisplay), fact("unexpected", extraDisplay), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } else { failWithActual( fact("missing", countDuplicatesMultimap(annotateEmptyStringsMultimap(missing))), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } } else if (!extra.isEmpty()) { failWithActual( fact("unexpected", countDuplicatesMultimap(annotateEmptyStringsMultimap(extra))), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } return new MultimapInOrder(/* allowUnexpected = */ false, expectedMultimap); }
@Test public void containsExactlyInOrderFailureValuesOnly() { ImmutableMultimap<Integer, String> actual = ImmutableMultimap.of(3, "one", 3, "six", 3, "two", 4, "five", 4, "four"); ImmutableMultimap<Integer, String> expected = ImmutableMultimap.of(3, "six", 3, "two", 3, "one", 4, "five", 4, "four"); assertThat(actual).containsExactlyEntriesIn(expected); expectFailureWhenTestingThat(actual).containsExactlyEntriesIn(expected).inOrder(); assertFailureKeys( "contents match, but order was wrong", "keys with out-of-order values", "---", "expected", "but was"); assertFailureValue("keys with out-of-order values", "[3]"); }
@Override public String arguments() { ArrayList<String> args = new ArrayList<>(); if (buildFile != null) { args.add("-f \"" + FilenameUtils.separatorsToUnix(buildFile) + "\""); } if (target != null) { args.add(target); } return StringUtils.join(args, " "); }
@Test public void shouldReturnEmptyStringForDefault() throws Exception { RakeTask rakeTask = new RakeTask(); assertThat(rakeTask.arguments(), is("")); }
public static boolean isListEqual(List<String> firstList, List<String> secondList) { if (firstList == null && secondList == null) { return true; } if (firstList == null || secondList == null) { return false; } if (firstList == secondList) { return true; } if (firstList.size() != secondList.size()) { return false; } boolean flag1 = firstList.containsAll(secondList); boolean flag2 = secondList.containsAll(firstList); return flag1 && flag2; }
@Test void testIsListEqualForNull() { assertTrue(CollectionUtils.isListEqual(null, null)); assertFalse(CollectionUtils.isListEqual(Collections.emptyList(), null)); assertFalse(CollectionUtils.isListEqual(null, Collections.emptyList())); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() < 3) { onInvalidDataReceived(device, data); return; } final int responseCode = data.getIntValue(Data.FORMAT_UINT8, 0); final int requestCode = data.getIntValue(Data.FORMAT_UINT8, 1); final int status = data.getIntValue(Data.FORMAT_UINT8, 2); if (responseCode != SC_OP_CODE_RESPONSE_CODE) { onInvalidDataReceived(device, data); return; } if (status != SC_RESPONSE_SUCCESS) { onSCOperationError(device, requestCode, status); return; } if (requestCode == SC_OP_CODE_REQUEST_SUPPORTED_SENSOR_LOCATIONS) { final int size = data.size() - 3; final int[] locations = new int[size]; for (int i = 0; i < size; ++i) { locations[i] = data.getIntValue(Data.FORMAT_UINT8, 3 + i); } onSupportedSensorLocationsReceived(device, locations); } else { onSCOperationCompleted(device, requestCode); } }
@Test public void onSupportedSensorLocationsReceived() { final MutableData data = new MutableData(new byte[] { 0x10, 0x04, 0x01, 1, 2, 3}); response.onDataReceived(null, data); assertTrue(success); assertEquals(0, errorCode); assertEquals(4, requestCode); assertNotNull(locations); assertEquals(3, locations.length); assertEquals(2, locations[1]); }
byte[] removeEscapedEnclosures( byte[] field, int nrEnclosuresFound ) { byte[] result = new byte[field.length - nrEnclosuresFound]; int resultIndex = 0; for ( int i = 0; i < field.length; i++ ) { result[resultIndex++] = field[i]; if ( field[i] == enclosure[0] && i + 1 < field.length && field[i + 1] == enclosure[0] ) { // Skip the escaped enclosure after adding the first one i++; } } return result; }
@Test public void testRemoveEscapedEnclosuresWithOneEscapedInMiddle() { CsvInputData csvInputData = new CsvInputData(); csvInputData.enclosure = "\"".getBytes(); String result = new String( csvInputData.removeEscapedEnclosures( "abcd \"\" defg".getBytes(), 1 ) ); assertEquals( "abcd \" defg", result ); }
public String filterNamespaceName(String namespaceName) { if (namespaceName.toLowerCase().endsWith(".properties")) { int dotIndex = namespaceName.lastIndexOf("."); return namespaceName.substring(0, dotIndex); } return namespaceName; }
@Test public void testFilterNamespaceNameWithRandomCase() throws Exception { String someName = "AbC.ProPErties"; assertEquals("AbC", namespaceUtil.filterNamespaceName(someName)); }
public static boolean isUnclosedQuote(final String line) { // CHECKSTYLE_RULES.ON: CyclomaticComplexity int quoteStart = -1; for (int i = 0; i < line.length(); ++i) { if (quoteStart < 0 && isQuoteChar(line, i)) { quoteStart = i; } else if (quoteStart >= 0 && isTwoQuoteStart(line, i) && !isEscaped(line, i)) { // Together, two quotes are effectively an escaped quote and don't act as a quote character. // Skip the next quote char, since it's coupled with the first. i++; } else if (quoteStart >= 0 && isQuoteChar(line, i) && !isEscaped(line, i)) { quoteStart = -1; } } final int commentInd = line.indexOf(COMMENT); if (commentInd < 0) { return quoteStart >= 0; } else if (quoteStart < 0) { return false; } else { return commentInd > quoteStart; } }
@Test public void shouldNotFindUnclosedQuote_commentAfterQuote() { // Given: final String line = "some line 'quoted text' -- this is a comment"; // Then: assertThat(UnclosedQuoteChecker.isUnclosedQuote(line), is(false)); }
@Override public void beginCommit(Long txId) { if (txId <= lastSeenTxn.txnid) { LOG.info("txID {} is already processed, lastSeenTxn {}. Triggering recovery.", txId, lastSeenTxn); long start = System.currentTimeMillis(); options.recover(lastSeenTxn.dataFilePath, lastSeenTxn.offset); LOG.info("Recovery took {} ms.", System.currentTimeMillis() - start); } updateIndex(txId); }
@Test public void testIndexFileCreation() { HdfsState state = createHdfsState(); state.beginCommit(1L); Collection<File> files = FileUtils.listFiles(new File(TEST_OUT_DIR), null, false); File hdfsIndexFile = Paths.get(TEST_OUT_DIR, INDEX_FILE_PREFIX + TEST_TOPOLOGY_NAME + ".0").toFile(); assertTrue(files.contains(hdfsIndexFile)); }
public ProcessContinuation run( RestrictionTracker<TimestampRange, Timestamp> tracker, OutputReceiver<PartitionMetadata> receiver, ManualWatermarkEstimator<Instant> watermarkEstimator) { final Timestamp readTimestamp = tracker.currentRestriction().getFrom(); // Updates the current watermark as the min of the watermarks from all existing partitions final Timestamp minWatermark = dao.getUnfinishedMinWatermark(); if (minWatermark != null) { return processPartitions(tracker, receiver, watermarkEstimator, minWatermark, readTimestamp); } else { return terminate(tracker); } }
@Test public void testTerminatesWhenAllPartitionsAreFinished() { final Timestamp from = Timestamp.ofTimeMicroseconds(10L); when(restriction.getFrom()).thenReturn(from); when(dao.getUnfinishedMinWatermark()).thenReturn(null); final ProcessContinuation continuation = action.run(tracker, receiver, watermarkEstimator); assertEquals(ProcessContinuation.stop(), continuation); verify(watermarkEstimator, never()).setWatermark(any()); verify(receiver, never()).outputWithTimestamp(any(), any()); }
@Deprecated public RegistryBuilder wait(Integer wait) { this.wait = wait; return getThis(); }
@Test void testWait() { RegistryBuilder builder = new RegistryBuilder(); builder.wait(Integer.valueOf(1000)); Assertions.assertEquals(1000, builder.build().getWait()); }
@Override protected String buildUndoSQL() { return super.buildUndoSQL(); }
@Test public void buildUndoSQL() { String sql = executor.buildUndoSQL().toLowerCase(); Assertions.assertNotNull(sql); Assertions.assertTrue(sql.contains("update")); Assertions.assertTrue(sql.contains("id")); Assertions.assertTrue(sql.contains("age")); }
public static Predicate parse(String expression) { final Stack<Predicate> predicateStack = new Stack<>(); final Stack<Character> operatorStack = new Stack<>(); final String trimmedExpression = TRIMMER_PATTERN.matcher(expression).replaceAll(""); final StringTokenizer tokenizer = new StringTokenizer(trimmedExpression, OPERATORS, true); boolean isTokenMode = true; while (true) { final Character operator; final String token; if (isTokenMode) { if (tokenizer.hasMoreTokens()) { token = tokenizer.nextToken(); } else { break; } if (OPERATORS.contains(token)) { operator = token.charAt(0); } else { operator = null; } } else { operator = operatorStack.pop(); token = null; } isTokenMode = true; if (operator == null) { try { predicateStack.push(Class.forName(token).asSubclass(Predicate.class).getDeclaredConstructor().newInstance()); } catch (ClassCastException e) { throw new RuntimeException(token + " must implement " + Predicate.class.getName(), e); } catch (Exception e) { throw new RuntimeException(e); } } else { if (operatorStack.empty() || operator == '(') { operatorStack.push(operator); } else if (operator == ')') { while (operatorStack.peek() != '(') { evaluate(predicateStack, operatorStack); } operatorStack.pop(); } else { if (OPERATOR_PRECEDENCE.get(operator) < OPERATOR_PRECEDENCE.get(operatorStack.peek())) { evaluate(predicateStack, operatorStack); isTokenMode = false; } operatorStack.push(operator); } } } while (!operatorStack.empty()) { evaluate(predicateStack, operatorStack); } if (predicateStack.size() > 1) { throw new RuntimeException("Invalid logical expression"); } return predicateStack.pop(); }
@Test public void testAndOr() { final Predicate parsed = PredicateExpressionParser.parse("com.linkedin.data.it.AlwaysTruePredicate & com.linkedin.data.it.AlwaysTruePredicate | com.linkedin.data.it.AlwaysFalsePredicate"); Assert.assertEquals(parsed.getClass(), OrPredicate.class); final List<Predicate> orChildren = ((OrPredicate) parsed).getChildPredicates(); Assert.assertEquals(orChildren.get(0).getClass(), AndPredicate.class); Assert.assertEquals(orChildren.get(1).getClass(), AlwaysFalsePredicate.class); final List<Predicate> andChildren = ((AndPredicate) orChildren.get(0)).getChildPredicates(); Assert.assertEquals(andChildren.get(0).getClass(), AlwaysTruePredicate.class); Assert.assertEquals(andChildren.get(1).getClass(), AlwaysTruePredicate.class); }
public static boolean isEmpty(String value) { return StringUtils.isEmpty(value) || "false".equalsIgnoreCase(value) || "0".equalsIgnoreCase(value) || "null".equalsIgnoreCase(value) || "N/A".equalsIgnoreCase(value); }
@Test void testIsEmpty() throws Exception { assertThat(ConfigUtils.isEmpty(null), is(true)); assertThat(ConfigUtils.isEmpty(""), is(true)); assertThat(ConfigUtils.isEmpty("false"), is(true)); assertThat(ConfigUtils.isEmpty("FALSE"), is(true)); assertThat(ConfigUtils.isEmpty("0"), is(true)); assertThat(ConfigUtils.isEmpty("null"), is(true)); assertThat(ConfigUtils.isEmpty("NULL"), is(true)); assertThat(ConfigUtils.isEmpty("n/a"), is(true)); assertThat(ConfigUtils.isEmpty("N/A"), is(true)); }
public synchronized boolean autoEnterPureIncrementPhaseIfAllowed() { if (!enterPureIncrementPhase && maxSnapshotSplitsHighWatermark.compareTo(startupOffset) == 0) { split.asIncrementalSplit().getCompletedSnapshotSplitInfos().clear(); enterPureIncrementPhase = true; return true; } return false; }
@Test public void testAutoEnterPureIncrementPhaseIfAllowed() { Offset startupOffset = new TestOffset(100); List<CompletedSnapshotSplitInfo> snapshotSplits = Collections.emptyList(); IncrementalSplit split = createIncrementalSplit(startupOffset, snapshotSplits); IncrementalSplitState splitState = new IncrementalSplitState(split); Assertions.assertTrue(splitState.isEnterPureIncrementPhase()); Assertions.assertFalse(splitState.autoEnterPureIncrementPhaseIfAllowed()); startupOffset = new TestOffset(100); snapshotSplits = Stream.of( createCompletedSnapshotSplitInfo( "test1", new TestOffset(100), new TestOffset(100)), createCompletedSnapshotSplitInfo( "test2", new TestOffset(100), new TestOffset(100))) .collect(Collectors.toList()); split = createIncrementalSplit(startupOffset, snapshotSplits); splitState = new IncrementalSplitState(split); Assertions.assertFalse(splitState.isEnterPureIncrementPhase()); Assertions.assertTrue(splitState.autoEnterPureIncrementPhaseIfAllowed()); Assertions.assertTrue(splitState.isEnterPureIncrementPhase()); Assertions.assertFalse(splitState.autoEnterPureIncrementPhaseIfAllowed()); Assertions.assertTrue(splitState.isEnterPureIncrementPhase()); startupOffset = new TestOffset(100); snapshotSplits = Stream.of( createCompletedSnapshotSplitInfo( "test1", new TestOffset(100), new TestOffset(100)), createCompletedSnapshotSplitInfo( "test2", new TestOffset(100), new TestOffset(101))) .collect(Collectors.toList()); split = createIncrementalSplit(startupOffset, snapshotSplits); splitState = new IncrementalSplitState(split); Assertions.assertFalse(splitState.isEnterPureIncrementPhase()); Assertions.assertFalse(splitState.autoEnterPureIncrementPhaseIfAllowed()); }
@VisibleForTesting Collection<NumaNodeResource> getNumaNodesList() { return numaNodesList; }
@Test public void testReadNumaTopologyFromConfigurations() throws Exception { Collection<NumaNodeResource> nodesList = numaResourceAllocator .getNumaNodesList(); Collection<NumaNodeResource> expectedNodesList = getExpectedNumaNodesList(); Assert.assertEquals(expectedNodesList, nodesList); }
@Override public void createPort(Port osPort) { checkNotNull(osPort, ERR_NULL_PORT); checkArgument(!Strings.isNullOrEmpty(osPort.getId()), ERR_NULL_PORT_ID); checkArgument(!Strings.isNullOrEmpty(osPort.getNetworkId()), ERR_NULL_PORT_NET_ID); osNetworkStore.createPort(osPort); log.info(String.format(MSG_PORT, osPort.getId(), MSG_CREATED)); }
@Test(expected = IllegalArgumentException.class) public void createDuplicatePort() { target.createPort(PORT); target.createPort(PORT); }
public static Future<?> runClosureInThread(String groupId, final Closure done, final Status status) { if (done == null) { return null; } return runInThread(groupId, () -> { try { done.run(status); } catch (final Throwable t) { LOG.error("Fail to run done closure", t); } }); }
@Test public void testRunClosure() throws Exception { CountDownLatch latch = new CountDownLatch(1); ThreadPoolsFactory.runClosureInThread(GROUP_ID_001, status -> { assertTrue(status.isOk()); latch.countDown(); }); latch.await(); }
@Nonnull @Override public Result addChunk(ByteBuf buf, @Nullable SocketAddress remoteAddress) { if (!buf.isReadable(2)) { return new Result(null, false); } try { final IpfixParser.MessageDescription messageDescription = shallowParser.shallowParseMessage(buf); final long observationDomainId = messageDescription.getHeader().observationDomainId(); addTemplateKeyInCache(remoteAddress, messageDescription, observationDomainId); // TODO handle options templates // collects all data records that are now ready to be sent final Set<ShallowDataSet> packetsToSendCollection = new HashSet<>(); // the set of template records to include in the newly created message that is our "aggregate result" final Set<Integer> bufferedTemplateIdList = new HashSet<>(); if (!messageDescription.declaredTemplateIds().isEmpty()) { // if we have new templates, look for buffered data records that we have all the templates for now final Set<Integer> knownTemplateIdsList = new HashSet<>(); collectAllTemplateIds(remoteAddress, observationDomainId, knownTemplateIdsList); final Queue<ShallowDataSet> bufferedPackets = packetCache.getIfPresent(TemplateKey.idForExporter(remoteAddress, observationDomainId)); handleBufferedPackets(packetsToSendCollection, bufferedTemplateIdList, knownTemplateIdsList, bufferedPackets); } boolean packetBuffered = false; // the list of template keys to return in the result ( TODO this copies all of the templates all the time :( ) final Set<TemplateKey> templatesList = new HashSet<>(templateCache.asMap().keySet()); bufferedTemplateIdList.addAll(messageDescription.referencedTemplateIds()); LOG.debug("Finding the needed templates for the buffered and current packets"); for (int templateId : bufferedTemplateIdList) { final TemplateKey templateKey = new TemplateKey(remoteAddress, observationDomainId, templateId); final Object template = templateCache.getIfPresent(templateKey); if (template == null) { LOG.debug("Template is null, packet needs to be buffered until templates have been received."); try { final TemplateKey newTemplateKey = TemplateKey.idForExporter(remoteAddress, observationDomainId); final Queue<ShallowDataSet> bufferedPackets = packetCache.get(newTemplateKey, ConcurrentLinkedQueue::new); final byte[] bytes = ByteBufUtil.getBytes(buf); bufferedPackets.addAll(messageDescription.dataSets()); packetBuffered = true; } catch (ExecutionException ignored) { // the loader cannot fail, it only creates a new queue } } else { LOG.debug("Template [{}] has been added to template list.", templateKey); templatesList.add(templateKey); packetsToSendCollection.addAll(messageDescription.dataSets()); } } // if we have buffered this packet, don't try to process it now. we still need all the templates for it if (packetBuffered) { LOG.debug("Packet has been buffered and will not be processed now, returning result."); return new Result(null, true); } // if we didn't buffer anything but also didn't have anything queued that can be processed, don't proceed. if (packetsToSendCollection.isEmpty()) { LOG.debug("Packet has not been buffered and no packet is queued."); return new Result(null, true); } final IpfixJournal.RawIpfix.Builder journalBuilder = IpfixJournal.RawIpfix.newBuilder(); buildJournalObject(packetsToSendCollection, templatesList, journalBuilder); final IpfixJournal.RawIpfix rawIpfix = journalBuilder.build(); return getCompleteResult(rawIpfix); } catch (Exception e) { LOG.error("Unable to aggregate IPFIX message due to the following error ", e); return new Result(null, false); } }
@Test public void dataAndDataTemplate() throws IOException, URISyntaxException { final IpfixAggregator ipfixAggregator = new IpfixAggregator(); final Map<String, Object> configMap = getIxiaConfigmap(); final Configuration configuration = new Configuration(configMap); final IpfixCodec codec = new IpfixCodec(configuration, ipfixAggregator, messageFactory); AtomicInteger messageCount = new AtomicInteger(); try (InputStream stream = Resources.getResource("data-datatemplate.pcap").openStream()) { final Pcap pcap = Pcap.openStream(stream); pcap.loop(packet -> { if (packet.hasProtocol(Protocol.UDP)) { final UDPPacket udp = (UDPPacket) packet.getPacket(Protocol.UDP); final InetSocketAddress source = new InetSocketAddress(udp.getParentPacket().getSourceIP(), udp.getSourcePort()); byte[] payload = new byte[udp.getPayload().getReadableBytes()]; udp.getPayload().getBytes(payload); final ByteBuf buf = Unpooled.wrappedBuffer(payload); final CodecAggregator.Result result = ipfixAggregator.addChunk(buf, source); final ByteBuf ipfixRawBuf = result.getMessage(); if (ipfixRawBuf != null) { byte[] bytes = new byte[ipfixRawBuf.readableBytes()]; ipfixRawBuf.getBytes(0, bytes); final Collection<Message> messages = codec.decodeMessages(new RawMessage(bytes)); if (messages != null) { messageCount.addAndGet(messages.size()); } } } return true; }); } catch (IOException e) { LOG.debug("Cannot process PCAP stream", e); } assertThat(messageCount.get()).isEqualTo(4L); }
public EvaluationResult evaluate(Condition condition, Measure measure) { checkArgument(SUPPORTED_METRIC_TYPE.contains(condition.getMetric().getType()), "Conditions on MetricType %s are not supported", condition.getMetric().getType()); Comparable measureComparable = parseMeasure(measure); if (measureComparable == null) { return new EvaluationResult(Measure.Level.OK, null); } return evaluateCondition(condition, measureComparable) .orElseGet(() -> new EvaluationResult(Measure.Level.OK, measureComparable)); }
@Test public void testGreater() { Metric metric = createMetric(FLOAT); Measure measure = newMeasureBuilder().create(10.2d, 1, null); assertThat(underTest.evaluate(createCondition(metric, GREATER_THAN, "10.1"), measure)).hasLevel(ERROR).hasValue(10.2d); assertThat(underTest.evaluate(createCondition(metric, GREATER_THAN, "10.2"), measure)).hasLevel(OK).hasValue(10.2d); assertThat(underTest.evaluate(createCondition(metric, GREATER_THAN, "10.3"), measure)).hasLevel(OK).hasValue(10.2d); }
ControllerResult<Map<String, ApiError>> updateFeatures( Map<String, Short> updates, Map<String, FeatureUpdate.UpgradeType> upgradeTypes, boolean validateOnly ) { TreeMap<String, ApiError> results = new TreeMap<>(); List<ApiMessageAndVersion> records = BoundedList.newArrayBacked(MAX_RECORDS_PER_USER_OP); for (Entry<String, Short> entry : updates.entrySet()) { results.put(entry.getKey(), updateFeature(entry.getKey(), entry.getValue(), upgradeTypes.getOrDefault(entry.getKey(), FeatureUpdate.UpgradeType.UPGRADE), records)); } if (validateOnly) { return ControllerResult.of(Collections.emptyList(), results); } else { return ControllerResult.atomicOf(records, results); } }
@Test public void testCannotDowngradeToVersionBeforeMinimumSupportedKraftVersion() { FeatureControlManager manager = TEST_MANAGER_BUILDER1.build(); assertEquals(ControllerResult.of(Collections.emptyList(), singletonMap(MetadataVersion.FEATURE_NAME, new ApiError(Errors.INVALID_UPDATE_VERSION, "Invalid update version 3 for feature metadata.version. Local controller 0 only " + "supports versions 4-7"))), manager.updateFeatures( singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_2_IV0.featureLevel()), singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.UNSAFE_DOWNGRADE), true)); }
@VisibleForTesting public static JobGraph createJobGraph(StreamGraph streamGraph) { return new StreamingJobGraphGenerator( Thread.currentThread().getContextClassLoader(), streamGraph, null, Runnable::run) .createJobGraph(); }
@Test void testResourcesForIteration() throws Exception { ResourceSpec resource1 = ResourceSpec.newBuilder(0.1, 100).build(); ResourceSpec resource2 = ResourceSpec.newBuilder(0.2, 200).build(); ResourceSpec resource3 = ResourceSpec.newBuilder(0.3, 300).build(); ResourceSpec resource4 = ResourceSpec.newBuilder(0.4, 400).build(); ResourceSpec resource5 = ResourceSpec.newBuilder(0.5, 500).build(); Method opMethod = getSetResourcesMethodAndSetAccessible(SingleOutputStreamOperator.class); Method sinkMethod = getSetResourcesMethodAndSetAccessible(DataStreamSink.class); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Integer> source = env.addSource( new ParallelSourceFunction<Integer>() { @Override public void run(SourceContext<Integer> ctx) throws Exception {} @Override public void cancel() {} }) .name("test_source"); opMethod.invoke(source, resource1); IterativeStream<Integer> iteration = source.iterate(3000); opMethod.invoke(iteration, resource2); DataStream<Integer> flatMap = iteration .flatMap( new FlatMapFunction<Integer, Integer>() { @Override public void flatMap(Integer value, Collector<Integer> out) throws Exception { out.collect(value); } }) .name("test_flatMap"); opMethod.invoke(flatMap, resource3); // CHAIN(flatMap -> Filter) DataStream<Integer> increment = flatMap.filter( new FilterFunction<Integer>() { @Override public boolean filter(Integer value) throws Exception { return false; } }) .name("test_filter"); opMethod.invoke(increment, resource4); DataStreamSink<Integer> sink = iteration .closeWith(increment) .addSink( new SinkFunction<Integer>() { @Override public void invoke(Integer value) throws Exception {} }) .disableChaining() .name("test_sink"); sinkMethod.invoke(sink, resource5); JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph()); for (JobVertex jobVertex : jobGraph.getVertices()) { if (jobVertex.getName().contains("test_source")) { assertThat(jobVertex.getMinResources()).isEqualTo(resource1); } else if (jobVertex.getName().contains("Iteration_Source")) { assertThat(jobVertex.getPreferredResources()).isEqualTo(resource2); } else if (jobVertex.getName().contains("test_flatMap")) { assertThat(jobVertex.getMinResources()).isEqualTo(resource3.merge(resource4)); } else if (jobVertex.getName().contains("Iteration_Tail")) { assertThat(jobVertex.getPreferredResources()).isEqualTo(ResourceSpec.DEFAULT); } else if (jobVertex.getName().contains("test_sink")) { assertThat(jobVertex.getMinResources()).isEqualTo(resource5); } } }
public static List<AclEntry> filterAclEntriesByAclSpec( List<AclEntry> existingAcl, List<AclEntry> inAclSpec) throws AclException { ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec); ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); EnumMap<AclEntryScope, AclEntry> providedMask = Maps.newEnumMap(AclEntryScope.class); EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class); EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class); for (AclEntry existingEntry: existingAcl) { if (aclSpec.containsKey(existingEntry)) { scopeDirty.add(existingEntry.getScope()); if (existingEntry.getType() == MASK) { maskDirty.add(existingEntry.getScope()); } } else { if (existingEntry.getType() == MASK) { providedMask.put(existingEntry.getScope(), existingEntry); } else { aclBuilder.add(existingEntry); } } } copyDefaultsIfNeeded(aclBuilder); calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty); return buildAndValidateAcl(aclBuilder); }
@Test public void testFilterAclEntriesByAclSpecAccessMaskPreserved() throws AclException { List<AclEntry> existing = new ImmutableList.Builder<AclEntry>() .add(aclEntry(ACCESS, USER, ALL)) .add(aclEntry(ACCESS, USER, "bruce", READ)) .add(aclEntry(ACCESS, USER, "diana", READ_WRITE)) .add(aclEntry(ACCESS, GROUP, READ)) .add(aclEntry(ACCESS, MASK, READ)) .add(aclEntry(ACCESS, OTHER, READ)) .add(aclEntry(DEFAULT, USER, ALL)) .add(aclEntry(DEFAULT, USER, "bruce", READ)) .add(aclEntry(DEFAULT, USER, "diana", READ_WRITE)) .add(aclEntry(DEFAULT, GROUP, READ)) .add(aclEntry(DEFAULT, MASK, READ_WRITE)) .add(aclEntry(DEFAULT, OTHER, NONE)) .build(); List<AclEntry> aclSpec = Lists.newArrayList( aclEntry(DEFAULT, USER, "diana")); List<AclEntry> expected = new ImmutableList.Builder<AclEntry>() .add(aclEntry(ACCESS, USER, ALL)) .add(aclEntry(ACCESS, USER, "bruce", READ)) .add(aclEntry(ACCESS, USER, "diana", READ_WRITE)) .add(aclEntry(ACCESS, GROUP, READ)) .add(aclEntry(ACCESS, MASK, READ)) .add(aclEntry(ACCESS, OTHER, READ)) .add(aclEntry(DEFAULT, USER, ALL)) .add(aclEntry(DEFAULT, USER, "bruce", READ)) .add(aclEntry(DEFAULT, GROUP, READ)) .add(aclEntry(DEFAULT, MASK, READ)) .add(aclEntry(DEFAULT, OTHER, NONE)) .build(); assertEquals(expected, filterAclEntriesByAclSpec(existing, aclSpec)); }
public static OffsetBasedPagination forStartRowNumber(int startRowNumber, int pageSize) { checkArgument(startRowNumber >= 1, "startRowNumber must be >= 1"); checkArgument(pageSize >= 1, "page size must be >= 1"); return new OffsetBasedPagination(startRowNumber - 1, pageSize); }
@Test void hashcode_whenSameObjects_shouldBeEquals() { OffsetBasedPagination offsetBasedPagination = OffsetBasedPagination.forStartRowNumber(15, 20); Assertions.assertThat(offsetBasedPagination).hasSameHashCodeAs(offsetBasedPagination); }
@Nonnull public <K, V> KafkaProducer<K, V> getProducer(@Nullable String transactionalId) { if (getConfig().isShared()) { if (transactionalId != null) { throw new IllegalArgumentException("Cannot use transactions with shared " + "KafkaProducer for DataConnection" + getConfig().getName()); } retain(); //noinspection unchecked return (KafkaProducer<K, V>) producerSupplier.get(); } else { if (transactionalId != null) { @SuppressWarnings({"rawtypes", "unchecked"}) Map<String, Object> castProperties = (Map) getConfig().getProperties(); Map<String, Object> copy = new HashMap<>(castProperties); copy.put("transactional.id", transactionalId); return new KafkaProducer<>(copy); } else { return new KafkaProducer<>(getConfig().getProperties()); } } }
@Test public void shared_producer_is_allowed_to_be_created_with_empty_props() { kafkaDataConnection = createKafkaDataConnection(kafkaTestSupport); Producer<Object, Object> kafkaProducer = kafkaDataConnection.getProducer(null, new Properties()); assertThat(kafkaProducer).isNotNull(); kafkaProducer.close(); kafkaDataConnection.release(); }
@Override public JsonArray deepCopy() { if (!elements.isEmpty()) { JsonArray result = new JsonArray(elements.size()); for (JsonElement element : elements) { result.add(element.deepCopy()); } return result; } return new JsonArray(); }
@Test public void testDeepCopy() { JsonArray original = new JsonArray(); JsonArray firstEntry = new JsonArray(); original.add(firstEntry); JsonArray copy = original.deepCopy(); original.add(new JsonPrimitive("y")); assertThat(copy).hasSize(1); firstEntry.add(new JsonPrimitive("z")); assertThat(original.get(0).getAsJsonArray()).hasSize(1); assertThat(copy.get(0).getAsJsonArray()).hasSize(0); }
@Override public StateInstance getStateInstance(String stateInstanceId, String machineInstId) { StateInstance stateInstance = selectOne( stateLogStoreSqls.getGetStateInstanceByIdAndMachineInstanceIdSql(dbType), RESULT_SET_TO_STATE_INSTANCE, machineInstId, stateInstanceId); deserializeParamsAndException(stateInstance); return stateInstance; }
@Test public void testGetStateInstance() { Assertions.assertDoesNotThrow(() -> dbAndReportTcStateLogStore.getStateInstance("test", "test")); }
protected int calcAllArgConstructorParameterUnits(Schema record) { if (record.getType() != Schema.Type.RECORD) throw new RuntimeException("This method must only be called for record schemas."); return record.getFields().size(); }
@Test void calcAllArgConstructorParameterUnitsFailure() { assertThrows(RuntimeException.class, () -> { Schema nonRecordSchema = SchemaBuilder.array().items().booleanType(); new SpecificCompiler().calcAllArgConstructorParameterUnits(nonRecordSchema); }); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { return this.list(directory, listener, new HostPreferences(session.getHost()).getInteger("s3.listing.chunksize")); }
@Test public void testListFileDot() throws Exception { final Path container = new SpectraDirectoryFeature(session, new SpectraWriteFeature(session)).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path file = new SpectraTouchFeature(session).touch( new Path(container, ".", EnumSet.of(Path.Type.file)), new TransferStatus()); assertNotNull(new SpectraObjectListService(session).list(container, new DisabledListProgressListener()).find(new SimplePathPredicate(file))); new SpectraDeleteFeature(session).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public Num calculate(BarSeries series, Position position) { return varianceCriterion.calculate(series, position).sqrt(); }
@Test public void calculateStandardDeviationPnL() { MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 110, 100, 95, 105); TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series, series.one()), Trade.sellAt(2, series, series.one()), Trade.buyAt(3, series, series.one()), Trade.sellAt(5, series, series.one())); AnalysisCriterion criterion = getCriterion(new ProfitLossCriterion()); assertNumEquals(2.5, criterion.calculate(series, tradingRecord)); }
Mono<Void> dispatchNotification(Reason reason, Subscriber subscriber) { return getNotifiersBySubscriber(subscriber, reason) .flatMap(notifierName -> client.fetch(NotifierDescriptor.class, notifierName)) .flatMap(descriptor -> prepareNotificationElement(subscriber, reason, descriptor)) .flatMap(element -> { var dispatchMono = sendNotification(element); if (subscriber.isAnonymous()) { return dispatchMono; } // create notification for user var innerNofificationMono = createNotification(element); return Mono.when(dispatchMono, innerNofificationMono); }) .then(); }
@Test public void testDispatchNotification() { var spyNotificationCenter = spy(notificationCenter); doReturn(Flux.just("email-notifier")) .when(spyNotificationCenter).getNotifiersBySubscriber(any(), any()); NotifierDescriptor notifierDescriptor = mock(NotifierDescriptor.class); when(client.fetch(eq(NotifierDescriptor.class), eq("email-notifier"))) .thenReturn(Mono.just(notifierDescriptor)); var notificationElement = mock(DefaultNotificationCenter.NotificationElement.class); doReturn(Mono.just(notificationElement)) .when(spyNotificationCenter).prepareNotificationElement(any(), any(), any()); doReturn(Mono.empty()).when(spyNotificationCenter).sendNotification(any()); var reason = new Reason(); reason.setMetadata(new Metadata()); reason.getMetadata().setName("reason-a"); reason.setSpec(new Reason.Spec()); reason.getSpec().setReasonType("new-reply-on-comment"); var subscription = createSubscriptions().get(0); var subscriptionName = subscription.getMetadata().getName(); var subscriber = new Subscriber(UserIdentity.of(subscription.getSpec().getSubscriber().getName()), subscriptionName); spyNotificationCenter.dispatchNotification(reason, subscriber).block(); verify(client).fetch(eq(NotifierDescriptor.class), eq("email-notifier")); verify(spyNotificationCenter).sendNotification(any()); verify(spyNotificationCenter, times(0)).createNotification(any()); }
@DELETE @Timed @ApiOperation(value = "Delete all revisions of a content pack") @ApiResponses(value = { @ApiResponse(code = 400, message = "Missing or invalid content pack"), @ApiResponse(code = 500, message = "Error while saving content pack") }) @AuditEvent(type = AuditEventTypes.CONTENT_PACK_DELETE) @Path("{contentPackId}") @JsonView(ContentPackView.HttpView.class) public void deleteContentPack( @ApiParam(name = "contentPackId", value = "Content Pack ID", required = true) @PathParam("contentPackId") final ModelId contentPackId) { checkPermission(RestPermissions.CONTENT_PACK_DELETE, contentPackId.toString()); if (!contentPackInstallationPersistenceService.findByContentPackId(contentPackId).isEmpty()) { throw new BadRequestException("Content pack " + contentPackId + " with all its revisions can't be deleted: There are still installations of this content pack"); } final int deleted = contentPackPersistenceService.deleteById(contentPackId); LOG.debug("Deleted {} content packs with id {}", deleted, contentPackId); }
@Test public void deleteContentPack() throws Exception { final ModelId id = ModelId.of("1"); when(contentPackPersistenceService.deleteById(id)).thenReturn(1); contentPackResource.deleteContentPack(id); verify(contentPackPersistenceService, times(1)).deleteById(id); when(contentPackPersistenceService.deleteByIdAndRevision(id, 1)).thenReturn(1); contentPackResource.deleteContentPack(id, 1); verify(contentPackPersistenceService, times(1)).deleteByIdAndRevision(id, 1); }
@Override public boolean containsSlots(ResourceID owner) { return slotsPerTaskExecutor.containsKey(owner); }
@Test void testContainsSlots() { final DefaultAllocatedSlotPool slotPool = new DefaultAllocatedSlotPool(); final ResourceID owner = ResourceID.generate(); final AllocatedSlot allocatedSlot = createAllocatedSlot(owner); slotPool.addSlots(Collections.singleton(allocatedSlot), 0); assertThat(slotPool.containsSlots(owner)).isTrue(); assertThat(slotPool.containsSlots(ResourceID.generate())).isFalse(); }
@Override protected boolean isInfinite(Short number) { // Infinity never applies here because only types like Float and Double have Infinity return false; }
@Test void testIsInfinite() { ShortSummaryAggregator ag = new ShortSummaryAggregator(); // always false for Short assertThat(ag.isInfinite((short) -1)).isFalse(); assertThat(ag.isInfinite((short) 0)).isFalse(); assertThat(ag.isInfinite((short) 23)).isFalse(); assertThat(ag.isInfinite(Short.MAX_VALUE)).isFalse(); assertThat(ag.isInfinite(Short.MIN_VALUE)).isFalse(); assertThat(ag.isInfinite(null)).isFalse(); }
@Override protected boolean redirectMatches(String requestedRedirect, String redirectUri) { if (isStrictMatch()) { // we're doing a strict string match for all clients return Strings.nullToEmpty(requestedRedirect).equals(redirectUri); } else { // otherwise do the prefix-match from the library return super.redirectMatches(requestedRedirect, redirectUri); } }
@Test public void testRedirectMatches_default() { // this is not an exact match boolean res1 = resolver.redirectMatches(pathUri, goodUri); assertThat(res1, is(false)); // this is an exact match boolean res2 = resolver.redirectMatches(goodUri, goodUri); assertThat(res2, is(true)); }
@ApiOperation(value = "Save user settings (saveUserSettings)", notes = "Save user settings represented in json format for authorized user. ") @PreAuthorize("hasAnyAuthority('SYS_ADMIN', 'TENANT_ADMIN', 'CUSTOMER_USER')") @PostMapping(value = "/user/settings") public JsonNode saveUserSettings(@RequestBody JsonNode settings) throws ThingsboardException { SecurityUser currentUser = getCurrentUser(); UserSettings userSettings = new UserSettings(); userSettings.setType(UserSettingsType.GENERAL); userSettings.setSettings(settings); userSettings.setUserId(currentUser.getId()); return userSettingsService.saveUserSettings(currentUser.getTenantId(), userSettings).getSettings(); }
@Test public void testSaveUserSettings() throws Exception { loginCustomerUser(); JsonNode userSettings = JacksonUtil.toJsonNode("{\"A\":5, \"B\":10, \"E\":18}"); JsonNode savedSettings = doPost("/api/user/settings", userSettings, JsonNode.class); Assert.assertEquals(userSettings, savedSettings); JsonNode retrievedSettings = doGet("/api/user/settings", JsonNode.class); Assert.assertEquals(retrievedSettings, userSettings); }
@Override public CompletableFuture<Acknowledge> requestSlot( final SlotID slotId, final JobID jobId, final AllocationID allocationId, final ResourceProfile resourceProfile, final String targetAddress, final ResourceManagerId resourceManagerId, final Time timeout) { // TODO: Filter invalid requests from the resource manager by using the // instance/registration Id try (MdcCloseable ignored = MdcUtils.withContext(MdcUtils.asContextData(jobId))) { log.info( "Receive slot request {} for job {} from resource manager with leader id {}.", allocationId, jobId, resourceManagerId); if (!isConnectedToResourceManager(resourceManagerId)) { final String message = String.format( "TaskManager is not connected to the resource manager %s.", resourceManagerId); log.debug(message); return FutureUtils.completedExceptionally(new TaskManagerException(message)); } tryPersistAllocationSnapshot( new SlotAllocationSnapshot( slotId, jobId, targetAddress, allocationId, resourceProfile)); try { final boolean isConnected = allocateSlotForJob( jobId, slotId, allocationId, resourceProfile, targetAddress); if (isConnected) { offerSlotsToJobManager(jobId); } return CompletableFuture.completedFuture(Acknowledge.get()); } catch (SlotAllocationException e) { log.debug("Could not allocate slot for allocation id {}.", allocationId, e); return FutureUtils.completedExceptionally(e); } } }
@Test void testSlotOfferCounterIsSeparatedByJob() throws Exception { final OneShotLatch taskExecutorIsRegistered = new OneShotLatch(); final TestingResourceManagerGateway resourceManagerGateway = createRmWithTmRegisterAndNotifySlotHooks( new InstanceID(), taskExecutorIsRegistered, new CompletableFuture<>()); final CompletableFuture<Collection<SlotOffer>> firstOfferResponseFuture = new CompletableFuture<>(); final CompletableFuture<Collection<SlotOffer>> secondOfferResponseFuture = new CompletableFuture<>(); final Queue<CompletableFuture<Collection<SlotOffer>>> slotOfferResponses = new ArrayDeque<>( Arrays.asList(firstOfferResponseFuture, secondOfferResponseFuture)); final MultiShotLatch offerSlotsLatch = new MultiShotLatch(); final TestingJobMasterGateway jobMasterGateway1 = new TestingJobMasterGatewayBuilder() .setAddress("jm1") .setOfferSlotsFunction( (resourceID, slotOffers) -> { offerSlotsLatch.trigger(); return slotOfferResponses.remove(); }) .build(); final TestingJobMasterGateway jobMasterGateway2 = new TestingJobMasterGatewayBuilder() .setAddress("jm2") .setOfferSlotsFunction( (resourceID, slotOffers) -> { offerSlotsLatch.trigger(); return slotOfferResponses.remove(); }) .build(); rpc.registerGateway(resourceManagerGateway.getAddress(), resourceManagerGateway); rpc.registerGateway(jobMasterGateway1.getAddress(), jobMasterGateway1); rpc.registerGateway(jobMasterGateway2.getAddress(), jobMasterGateway2); final TaskSlotTable<Task> taskSlotTable = TaskSlotUtils.createTaskSlotTable(2, EXECUTOR_EXTENSION.getExecutor()); final TaskManagerServices taskManagerServices = createTaskManagerServicesWithTaskSlotTable(taskSlotTable); final TestingTaskExecutor taskExecutor = createTestingTaskExecutor(taskManagerServices); final ThreadSafeTaskSlotTable<Task> threadSafeTaskSlotTable = new ThreadSafeTaskSlotTable<>( taskSlotTable, taskExecutor.getMainThreadExecutableForTesting()); final SlotOffer slotOffer1 = new SlotOffer(new AllocationID(), 0, ResourceProfile.ANY); final SlotOffer slotOffer2 = new SlotOffer(new AllocationID(), 1, ResourceProfile.ANY); try { taskExecutor.start(); taskExecutor.waitUntilStarted(); final TaskExecutorGateway tmGateway = taskExecutor.getSelfGateway(TaskExecutorGateway.class); // wait until task executor registered at the RM taskExecutorIsRegistered.await(); // notify job leader to start slot offering jobManagerLeaderRetriever.notifyListener( jobMasterGateway1.getAddress(), jobMasterGateway1.getFencingToken().toUUID()); jobManagerLeaderRetriever2.notifyListener( jobMasterGateway2.getAddress(), jobMasterGateway2.getFencingToken().toUUID()); // request the first slot requestSlot( tmGateway, jobId, slotOffer1.getAllocationId(), buildSlotID(slotOffer1.getSlotIndex()), ResourceProfile.UNKNOWN, jobMasterGateway1.getAddress(), resourceManagerGateway.getFencingToken()); // wait until first slot offer as arrived offerSlotsLatch.await(); // request second slot, triggering another offer containing both slots requestSlot( tmGateway, jobId2, slotOffer2.getAllocationId(), buildSlotID(slotOffer2.getSlotIndex()), ResourceProfile.UNKNOWN, jobMasterGateway2.getAddress(), resourceManagerGateway.getFencingToken()); // wait until second slot offer as arrived offerSlotsLatch.await(); firstOfferResponseFuture.complete(Collections.singletonList(slotOffer1)); secondOfferResponseFuture.complete(Collections.singletonList(slotOffer2)); assertThat(threadSafeTaskSlotTable.getActiveTaskSlotAllocationIdsPerJob(jobId)) .contains(slotOffer1.getAllocationId()); assertThat(threadSafeTaskSlotTable.getActiveTaskSlotAllocationIdsPerJob(jobId2)) .contains(slotOffer2.getAllocationId()); } finally { RpcUtils.terminateRpcEndpoint(taskExecutor); } }
@Override public StorageObject upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final S3Protocol.AuthenticationHeaderSignatureVersion signatureVersion = session.getSignatureVersion(); switch(signatureVersion) { case AWS4HMACSHA256: if(!HashAlgorithm.sha256.equals(status.getChecksum().algorithm)) { // Checksum not set in upload filter status.setChecksum(writer.checksum(file, status).compute(local.getInputStream(), status)); } break; } try { return super.upload(file, local, throttle, listener, status, callback); } catch(InteroperabilityException e) { if(!session.getSignatureVersion().equals(signatureVersion)) { // Retry if upload fails with Header "x-amz-content-sha256" set to the hex-encoded SHA256 hash of the // request payload is required for AWS Version 4 request signing return this.upload(file, local, throttle, listener, status, callback); } throw e; } }
@Test(expected = NotfoundException.class) public void testUploadInvalidContainer() throws Exception { final S3SingleUploadService m = new S3SingleUploadService(session, new S3WriteFeature(session, new S3AccessControlListFeature(session))); final Path container = new Path("nosuchcontainer.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new Path(container, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final Local local = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString()); LocalTouchFactory.get().touch(local); final TransferStatus status = new TransferStatus(); m.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener(), status, new DisabledLoginCallback()); }
public static <K, InputT> GroupIntoBatches<K, InputT> ofByteSize(long batchSizeBytes) { return new GroupIntoBatches<K, InputT>(BatchingParams.createDefault()) .withByteSize(batchSizeBytes); }
@Test @Category({ ValidatesRunner.class, NeedsRunner.class, UsesTestStream.class, UsesTimersInParDo.class, UsesStatefulParDo.class, UsesOnWindowExpiration.class }) public void testInGlobalWindowBatchSizeByteSizeFn() { SerializableFunction<String, Long> getElementByteSizeFn = s -> { try { return 2 * StringUtf8Coder.of().getEncodedElementByteSize(s); } catch (Exception e) { throw new RuntimeException(e); } }; // to ensure ordered processing TestStream.Builder<KV<String, String>> streamBuilder = TestStream.create(KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of())) .advanceWatermarkTo(Instant.EPOCH); long offset = 0L; for (KV<String, String> kv : data) { streamBuilder = streamBuilder.addElements( TimestampedValue.of(kv, Instant.EPOCH.plus(Duration.standardSeconds(offset)))); offset++; } // fire them all at once TestStream<KV<String, String>> stream = streamBuilder.advanceWatermarkToInfinity(); PCollection<KV<String, Iterable<String>>> collection = pipeline .apply("Input data", stream) .apply(GroupIntoBatches.ofByteSize(BATCH_SIZE_BYTES, getElementByteSizeFn)) // set output coder .setCoder(KvCoder.of(StringUtf8Coder.of(), IterableCoder.of(StringUtf8Coder.of()))); PAssert.that("Incorrect batch size in one or more elements", collection) .satisfies( new SerializableFunction<Iterable<KV<String, Iterable<String>>>, Void>() { @Override public Void apply(Iterable<KV<String, Iterable<String>>> input) { assertTrue(checkBatchByteSizes(input, getElementByteSizeFn)); assertEquals("Invalid batch count", 9L, Iterables.size(input)); return null; } }); pipeline.run(); }
public void calculate(IThrowableProxy tp) { while (tp != null) { populateFrames(tp.getStackTraceElementProxyArray()); IThrowableProxy[] suppressed = tp.getSuppressed(); if (suppressed != null) { for (IThrowableProxy current : suppressed) { populateFrames(current.getStackTraceElementProxyArray()); } } tp = tp.getCause(); } }
@Test // Test http://jira.qos.ch/browse/LBCLASSIC-125 public void noClassDefFoundError_LBCLASSIC_125Test() throws MalformedURLException { ClassLoader cl = (URLClassLoader) makeBogusClassLoader(); Thread.currentThread().setContextClassLoader(cl); Throwable t = new Throwable("x"); ThrowableProxy tp = new ThrowableProxy(t); StackTraceElementProxy[] stepArray = tp.getStackTraceElementProxyArray(); StackTraceElement bogusSTE = new StackTraceElement("com.Bogus", "myMethod", "myFile", 12); stepArray[0] = new StackTraceElementProxy(bogusSTE); PackagingDataCalculator pdc = tp.getPackagingDataCalculator(); // NoClassDefFoundError should be caught pdc.calculate(tp); }
@SuppressWarnings("unchecked") static Object extractFromRecordValue(Object recordValue, String fieldName) { List<String> fields = Splitter.on('.').splitToList(fieldName); if (recordValue instanceof Struct) { return valueFromStruct((Struct) recordValue, fields); } else if (recordValue instanceof Map) { return valueFromMap((Map<String, ?>) recordValue, fields); } else { throw new UnsupportedOperationException( "Cannot extract value from type: " + recordValue.getClass().getName()); } }
@Test public void testExtractFromRecordValueMap() { Map<String, Object> val = ImmutableMap.of("key", 123L); Object result = RecordUtils.extractFromRecordValue(val, "key"); assertThat(result).isEqualTo(123L); }
public static <NodeT> Iterable<NodeT> topologicalOrder(Network<NodeT, ?> network) { return computeTopologicalOrder(Graphs.copyOf(network)); }
@Test public void testTopologicalSortWithSuborder() { // This cast is required to narrow the type accepted by the comparator Comparator<String> subOrder = (Comparator<String>) (Comparator) Ordering.arbitrary(); MutableNetwork<String, String> network = createNetwork(); Iterable<String> sortedNodes = Networks.topologicalOrder(network, subOrder); MutableNetwork<String, String> naturalOrderedNetwork = NetworkBuilder.from(network) .nodeOrder(ElementOrder.<String>natural()) .edgeOrder(ElementOrder.<String>natural()) .build(); MutableNetwork<String, String> arbitraryOrderNetwork = NetworkBuilder.from(network) .nodeOrder(ElementOrder.unordered()) .edgeOrder(ElementOrder.unordered()) .build(); MutableNetwork<String, String> reverseNaturalOrderNetwork = NetworkBuilder.from(network) .nodeOrder(ElementOrder.sorted(Ordering.natural().reverse())) .edgeOrder(ElementOrder.sorted(Ordering.natural().reverse())) .build(); for (String node : network.nodes()) { naturalOrderedNetwork.addNode(node); arbitraryOrderNetwork.addNode(node); reverseNaturalOrderNetwork.addNode(node); } for (String edge : network.edges()) { EndpointPair<String> incident = network.incidentNodes(edge); naturalOrderedNetwork.addEdge(incident.source(), incident.target(), edge); arbitraryOrderNetwork.addEdge(incident.source(), incident.target(), edge); reverseNaturalOrderNetwork.addEdge(incident.source(), incident.target(), edge); } Iterable<String> naturalSortedNodes = Networks.topologicalOrder(naturalOrderedNetwork, subOrder); Iterable<String> arbitrarySortedNodes = Networks.topologicalOrder(arbitraryOrderNetwork, subOrder); Iterable<String> reverseNaturalSortedNodes = Networks.topologicalOrder(reverseNaturalOrderNetwork, subOrder); assertThat(sortedNodes, equalTo(naturalSortedNodes)); assertThat(sortedNodes, equalTo(arbitrarySortedNodes)); assertThat(sortedNodes, equalTo(reverseNaturalSortedNodes)); }
public List<String> getAllExtensions() { final List<String> extensions = new LinkedList<>(); extensions.add(defaultExtension); extensions.addAll(Arrays.asList(otherExtensions)); return Collections.unmodifiableList(extensions); }
@Test public void testGetAllExtensions() throws Exception { final ResourceType BPMN2 = ResourceType.BPMN2; final List<String> extensionsBPMN2 = BPMN2.getAllExtensions(); assertThat(extensionsBPMN2.size()).isEqualTo(3); assertThat(extensionsBPMN2.contains("bpmn")).isTrue(); assertThat(extensionsBPMN2.contains("bpmn2")).isTrue(); assertThat(extensionsBPMN2.contains("bpmn-cm")).isTrue(); assertThat(extensionsBPMN2.contains("bpmn2-cm")).isFalse(); final ResourceType DRL = ResourceType.DRL; final List<String> extensionsDRL = DRL.getAllExtensions(); assertThat(extensionsDRL.size()).isEqualTo(1); assertThat(extensionsDRL.contains("drl")).isTrue(); }
protected void build(C instance) { if (!StringUtils.isEmpty(id)) { instance.setId(id); } }
@Test void build() { Builder builder = new Builder(); builder.id("id"); Config config = builder.build(); Config config2 = builder.build(); Assertions.assertEquals("id", config.getId()); Assertions.assertNotSame(config, config2); }
@Override public T getHollowObject(int ordinal) { List<T> refCachedItems = cachedItems; if (refCachedItems == null) { throw new IllegalStateException(String.format("HollowObjectCacheProvider for type %s has been detached or was not initialized", typeReadState == null ? null : typeReadState.getSchema().getName())); } if (refCachedItems.size() <= ordinal) { throw new IllegalStateException(String.format("Ordinal %s is out of bounds for pojo cache array of size %s.", ordinal, refCachedItems.size())); } return refCachedItems.get(ordinal); }
@Test public void preExisting() { TypeA a0 = typeA(0); TypeA a1 = typeA(1); TypeA a2 = typeA(2); prepopulate(a0, a1, a2); assertEquals(a0, subject.get().getHollowObject(a0.ordinal)); assertEquals(a1, subject.get().getHollowObject(a1.ordinal)); assertEquals(a2, subject.get().getHollowObject(a2.ordinal)); }
@Override public Result analysis( final Result result, final StreamAccessLogsMessage.Identifier identifier, final HTTPAccessLogEntry entry, final Role role ) { switch (role) { case PROXY: return analyzeProxy(result, entry); case SIDECAR: if (result.hasResult()) { return result; } return analyzeSideCar(result, entry); } return Result.builder().build(); }
@Test public void testSidecar2SidecarServerMetric() throws IOException { try (InputStreamReader isr = new InputStreamReader(getResourceAsStream("envoy-mesh-server-sidecar.msg"))) { StreamAccessLogsMessage.Builder requestBuilder = StreamAccessLogsMessage.newBuilder(); JsonFormat.parser().merge(isr, requestBuilder); AccessLogAnalyzer.Result result = this.analysis.analysis(AccessLogAnalyzer.Result.builder().build(), requestBuilder.getIdentifier(), requestBuilder.getHttpLogs().getLogEntry(0), Role.SIDECAR); Assertions.assertEquals(1, result.getMetrics().getHttpMetrics().getMetricsCount()); HTTPServiceMeshMetric incoming = result.getMetrics().getHttpMetrics().getMetrics(0); Assertions.assertEquals("productpage", incoming.getSourceServiceName()); Assertions.assertEquals("review", incoming.getDestServiceName()); Assertions.assertEquals(DetectPoint.server, incoming.getDetectPoint()); } }
@Override public double quantile(double p) { if (p < 0.0 || p > 1.0) { throw new IllegalArgumentException("Invalid p: " + p); } if (p == 0.0) { return Math.max(0, m+n-N); } if (p == 1.0) { return Math.min(m,n); } // Starting guess near peak of density. // Expand interval until we bracket. int kl, ku, inc = 1; int k = Math.max(0, Math.min(n, (int) (n * p))); if (p < cdf(k)) { do { k = Math.max(k - inc, 0); inc *= 2; } while (p < cdf(k) && k > 0); kl = k; ku = k + inc / 2; } else { do { k = Math.min(k + inc, n + 1); inc *= 2; } while (p > cdf(k)); ku = k; kl = k - inc / 2; } return quantile(p, kl, ku); }
@Test public void testQuantile() { System.out.println("quantile"); HyperGeometricDistribution instance = new HyperGeometricDistribution(100, 30, 70); instance.rand(); assertEquals(0, instance.quantile(0), 1E-30); assertEquals(14, instance.quantile(0.001), 1E-27); assertEquals(16, instance.quantile(0.01), 1E-25); assertEquals(18, instance.quantile(0.1), 1E-25); assertEquals(19, instance.quantile(0.2), 1E-7); assertEquals(20, instance.quantile(0.3), 1E-7); assertEquals(24, instance.quantile(0.9), 1E-8); assertEquals(26, instance.quantile(0.99), 1E-10); assertEquals(27, instance.quantile(0.999), 1E-12); assertEquals(30, instance.quantile(1), 1E-6); }
@Override public List<Map<String, String>> taskConfigs(int maxTasks) { if (knownConsumerGroups == null) { // If knownConsumerGroup is null, it means the initial loading has not finished. // An exception should be thrown to trigger the retry behavior in the framework. log.debug("Initial consumer loading has not yet completed"); throw new RetriableException("Timeout while loading consumer groups."); } // if the replication is disabled, known consumer group is empty, or checkpoint emission is // disabled by setting 'emit.checkpoints.enabled' to false, the interval of checkpoint emission // will be negative and no 'MirrorCheckpointTask' will be created if (!config.enabled() || knownConsumerGroups.isEmpty() || config.emitCheckpointsInterval().isNegative()) { return Collections.emptyList(); } int numTasks = Math.min(maxTasks, knownConsumerGroups.size()); List<List<String>> groupsPartitioned = ConnectorUtils.groupPartitions(new ArrayList<>(knownConsumerGroups), numTasks); return IntStream.range(0, numTasks) .mapToObj(i -> config.taskConfigForConsumerGroups(groupsPartitioned.get(i), i)) .collect(Collectors.toList()); }
@Test public void testMirrorCheckpointConnectorDisabled() { // disable the checkpoint emission MirrorCheckpointConfig config = new MirrorCheckpointConfig( makeProps("emit.checkpoints.enabled", "false")); Set<String> knownConsumerGroups = new HashSet<>(); knownConsumerGroups.add(CONSUMER_GROUP); // MirrorCheckpointConnector as minimum to run taskConfig() // expect no task will be created List<Map<String, String>> output = new MirrorCheckpointConnector(knownConsumerGroups, config).taskConfigs(1); assertEquals(0, output.size(), "MirrorCheckpointConnector not disabled"); }
public EurekaDataSource(String appId, String instanceId, List<String> serviceUrls, String ruleKey, Converter<String, T> configParser) { this(appId, instanceId, serviceUrls, ruleKey, configParser, DEFAULT_REFRESH_MS, DEFAULT_CONNECT_TIMEOUT_MS, DEFAULT_READ_TIMEOUT_MS); }
@Test public void testEurekaDataSource() throws Exception { String url = "http://localhost:" + port + "/eureka"; EurekaDataSource<List<FlowRule>> eurekaDataSource = new EurekaDataSource(appname, instanceId, Arrays.asList(url) , SENTINEL_KEY, new Converter<String, List<FlowRule>>() { @Override public List<FlowRule> convert(String source) { return JSON.parseObject(source, new TypeReference<List<FlowRule>>() { }); } }); FlowRuleManager.register2Property(eurekaDataSource.getProperty()); await().timeout(15, TimeUnit.SECONDS) .until(new Callable<Boolean>() { @Override public Boolean call() throws Exception { return FlowRuleManager.getRules().size() > 0; } }); Assert.assertTrue(FlowRuleManager.getRules().size() > 0); }
@Override public String getFieldDefinition( ValueMetaInterface v, String tk, String pk, boolean useAutoinc, boolean addFieldName, boolean addCr ) { return fallback.getFieldDefinition( v, tk, pk, useAutoinc, addFieldName, addCr ); }
@Test public void testStringFieldDef() throws Exception { DatabricksDatabaseMeta dbricks = new DatabricksDatabaseMeta(); String fieldDef = dbricks.getFieldDefinition( new ValueMetaString( "name" ), null, null, false, false, false ); assertEquals( "VARCHAR()", fieldDef ); }
@Override public void doRegister(ServiceInstance serviceInstance) { execute(namingService, service -> { Instance instance = toInstance(serviceInstance); service.registerInstance(instance.getServiceName(), group, instance); }); }
@Test void testDoRegister() throws NacosException { DefaultServiceInstance serviceInstance = createServiceInstance(SERVICE_NAME, LOCALHOST, NetUtils.getAvailablePort()); // register nacosServiceDiscovery.doRegister(serviceInstance); ArgumentCaptor<Instance> instanceCaptor = ArgumentCaptor.forClass(Instance.class); verify(namingServiceWrapper, times(1)).registerInstance(any(), eq(group), instanceCaptor.capture()); Instance capture = instanceCaptor.getValue(); assertEquals(SERVICE_NAME, capture.getServiceName()); assertEquals(LOCALHOST, capture.getIp()); assertEquals(serviceInstance.getPort(), capture.getPort()); }
static void filterProperties(Message message, Set<String> namesToClear) { List<Object> retainedProperties = messagePropertiesBuffer(); try { filterProperties(message, namesToClear, retainedProperties); } finally { retainedProperties.clear(); // ensure no object references are held due to any exception } }
@Test void filterProperties_message_passesFatalOnSetException() throws JMSException { Message message = mock(Message.class); when(message.getPropertyNames()).thenReturn( Collections.enumeration(Collections.singletonList("JMS_SQS_DeduplicationId"))); when(message.getObjectProperty("JMS_SQS_DeduplicationId")).thenReturn(""); doThrow(new LinkageError()).when(message).setObjectProperty(anyString(), eq("")); assertThatThrownBy( () -> PropertyFilter.filterProperties(message, Collections.singleton("b3"))).isInstanceOf( LinkageError.class); }
@Override protected Map<String, String> getHealthInformation() { if (!configuration.getBackgroundJobServer().isEnabled()) { healthStatus = HealthStatus.UP; return mapOf("backgroundJobServer", "disabled"); } else { if (backgroundJobServer.isRunning()) { healthStatus = HealthStatus.UP; return mapOf( "backgroundJobServer", "enabled", "backgroundJobServerStatus", "running" ); } else { healthStatus = HealthStatus.DOWN; return mapOf( "backgroundJobServer", "enabled", "backgroundJobServerStatus", "stopped" ); } } }
@Test void givenEnabledBackgroundJobServerAndBackgroundJobServerRunning_ThenHealthIsUp() { when(backgroundJobServerConfiguration.isEnabled()).thenReturn(true); when(backgroundJobServer.isRunning()).thenReturn(true); jobRunrHealthIndicator.getHealthInformation(); assertThat(jobRunrHealthIndicator.getHealthStatus()).isEqualTo(HealthStatus.UP); }
@Override public void triggerJob(Long id) throws SchedulerException { // 校验存在 JobDO job = validateJobExists(id); // 触发 Quartz 中的 Job schedulerManager.triggerJob(job.getId(), job.getHandlerName(), job.getHandlerParam()); }
@Test public void testTriggerJob_success() throws SchedulerException { // mock 数据 JobDO job = randomPojo(JobDO.class); jobMapper.insert(job); // 调用 jobService.triggerJob(job.getId()); // 校验调用 verify(schedulerManager).triggerJob(eq(job.getId()), eq(job.getHandlerName()), eq(job.getHandlerParam())); }
public static UBinary create(Kind binaryOp, UExpression lhs, UExpression rhs) { checkArgument( OP_CODES.containsKey(binaryOp), "%s is not a supported binary operation", binaryOp); return new AutoValue_UBinary(binaryOp, lhs, rhs); }
@Test public void plus() { assertUnifiesAndInlines( "4 + 17", UBinary.create(Kind.PLUS, ULiteral.intLit(4), ULiteral.intLit(17))); }
@Override public void start() { List<FrameworkFactory> frameworkFactories = IteratorUtils.toList(ServiceLoader.load(FrameworkFactory.class).iterator()); if (frameworkFactories.size() != 1) { throw new RuntimeException("One OSGi framework expected. Got " + frameworkFactories.size() + ": " + frameworkFactories); } try { framework = getFelixFramework(frameworkFactories); framework.start(); registerInternalServices(framework.getBundleContext()); } catch (BundleException e) { throw new RuntimeException("Failed to initialize OSGi framework", e); } }
@Test void shouldRegisterAnInstanceOfEachOfTheRequiredPluginServicesAfterOSGiFrameworkIsInitialized() { spy.start(); verify(bundleContext).registerService(eq(PluginRegistryService.class), any(DefaultPluginRegistryService.class), isNull()); verify(bundleContext).registerService(eq(LoggingService.class), any(DefaultPluginLoggingService.class), isNull()); }
@ShellMethod(key = "compactions show all", value = "Shows all compactions that are in active timeline") public String compactionsAll( @ShellOption(value = {"--includeExtraMetadata"}, help = "Include extra metadata", defaultValue = "false") final boolean includeExtraMetadata, @ShellOption(value = {"--limit"}, help = "Limit commits", defaultValue = "-1") final Integer limit, @ShellOption(value = {"--sortBy"}, help = "Sorting Field", defaultValue = "") final String sortByField, @ShellOption(value = {"--desc"}, help = "Ordering", defaultValue = "false") final boolean descending, @ShellOption(value = {"--headeronly"}, help = "Print Header Only", defaultValue = "false") final boolean headerOnly) { HoodieTableMetaClient client = checkAndGetMetaClient(); HoodieActiveTimeline activeTimeline = client.getActiveTimeline(); return printAllCompactions(activeTimeline, compactionPlanReader(this::readCompactionPlanForActiveTimeline, activeTimeline), includeExtraMetadata, sortByField, descending, limit, headerOnly); }
@Test public void testVerifyTableType() throws IOException { // create COW table. new TableCommand().createTable( tablePath, tableName, HoodieTableType.COPY_ON_WRITE.name(), "", TimelineLayoutVersion.VERSION_1, HoodieAvroPayload.class.getName()); // expect HoodieException for COPY_ON_WRITE table. assertThrows(HoodieException.class, () -> new CompactionCommand().compactionsAll(false, -1, "", false, false)); }
@Override public CompletableFuture<ClusterInfo> getBrokerClusterInfo(String address, long timeoutMillis) { CompletableFuture<ClusterInfo> future = new CompletableFuture<>(); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_BROKER_CLUSTER_INFO, null); remotingClient.invoke(address, request, timeoutMillis).thenAccept(response -> { if (response.getCode() == ResponseCode.SUCCESS) { ClusterInfo clusterInfo = ClusterInfo.decode(response.getBody(), ClusterInfo.class); future.complete(clusterInfo); } else { log.warn("getBrokerClusterInfo getResponseCommand failed, {} {}", response.getCode(), response.getRemark()); future.completeExceptionally(new MQClientException(response.getCode(), response.getRemark())); } }); return future; }
@Test public void assertGetBrokerClusterInfoWithError() { setResponseError(); CompletableFuture<ClusterInfo> actual = mqClientAdminImpl.getBrokerClusterInfo(defaultBrokerAddr, defaultTimeout); Throwable thrown = assertThrows(ExecutionException.class, actual::get); assertTrue(thrown.getCause() instanceof MQClientException); MQClientException mqException = (MQClientException) thrown.getCause(); assertEquals(ResponseCode.SYSTEM_ERROR, mqException.getResponseCode()); assertTrue(mqException.getMessage().contains("CODE: 1 DESC: null")); }
public static String substringAfter(String s, String splitter) { final int indexOf = s.indexOf(splitter); return indexOf >= 0 ? s.substring(indexOf + splitter.length()) : null; }
@Test void testSubstringAfterSplitterMultiChar() { assertThat(substringAfter("this is a test", " is ")).isEqualTo("a test"); assertThat(substringAfter("this is a test", " was ")).isNull(); }
@Override public CloseableIterator<ScannerReport.LineCoverage> readComponentCoverage(int fileRef) { ensureInitialized(); return delegate.readComponentCoverage(fileRef); }
@Test public void readComponentCoverage_returns_empty_CloseableIterator_when_file_does_not_exist() { assertThat(underTest.readComponentCoverage(COMPONENT_REF)).isExhausted(); }
public void run() { try { if ( job.isStopped() || ( job.getParentJob() != null && job.getParentJob().isStopped() ) ) { return; } // This JobEntryRunner is a replacement for the Job thread. // The job thread is never started because we simply want to wait for the result. // ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.JobStart.id, getJob() ); job.fireJobStartListeners(); // Fire the start listeners result = job.execute( entryNr + 1, result ); } catch ( KettleException e ) { e.printStackTrace(); log.logError( "An error occurred executing this job entry : ", e ); result.setResult( false ); result.setNrErrors( 1 ); } finally { //[PDI-14981] otherwise will get null pointer exception if 'job finished' listeners will be using it job.setResult( result ); try { ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.JobFinish.id, getJob() ); job.getJobMeta().disposeEmbeddedMetastoreProvider(); log.logDebug( BaseMessages.getString( PKG, "Job.Log.DisposeEmbeddedMetastore" ) ); job.fireJobFinishListeners(); //catch more general exception to prevent thread hanging } catch ( Exception e ) { result.setNrErrors( 1 ); result.setResult( false ); log.logError( BaseMessages.getString( PKG, "Job.Log.ErrorExecJob", e.getMessage() ), e ); } job.setFinished( true ); } finished = true; }
@Test public void testRun() throws Exception { // Call all the NO-OP paths when( mockJob.isStopped() ).thenReturn( true ); jobRunner.run(); when( mockJob.isStopped() ).thenReturn( false ); when( mockJob.getParentJob() ).thenReturn( null ); when( mockJob.getJobMeta() ).thenReturn( mockJobMeta ); jobRunner.run(); when( parentJob.isStopped() ).thenReturn( true ); when( mockJob.getParentJob() ).thenReturn( parentJob ); jobRunner.run(); when( parentJob.isStopped() ).thenReturn( false ); when( mockJob.execute( Mockito.anyInt(), Mockito.any( Result.class ) ) ).thenReturn( mockResult ); jobRunner.run(); }
public static Timestamp toTimestamp(BigDecimal bigDecimal) { final BigDecimal nanos = bigDecimal.remainder(BigDecimal.ONE.scaleByPowerOfTen(9)); final BigDecimal seconds = bigDecimal.subtract(nanos).scaleByPowerOfTen(-9).add(MIN_SECONDS); return Timestamp.ofTimeSecondsAndNanos(seconds.longValue(), nanos.intValue()); }
@Test public void testToTimestampConvertNanosToTimestamp() { assertEquals( Timestamp.ofTimeSecondsAndNanos(10L, 9), TimestampUtils.toTimestamp(new BigDecimal("62135596810000000009"))); }
@Override public PathAttributes toAttributes(final StorageObject object) { final PathAttributes attributes = new PathAttributes(); attributes.setSize(object.getContentLength()); final Date lastmodified = object.getLastModifiedDate(); if(lastmodified != null) { attributes.setModificationDate(lastmodified.getTime()); } if(StringUtils.isNotBlank(object.getStorageClass())) { attributes.setStorageClass(object.getStorageClass()); } else if(object.containsMetadata("storage-class")) { attributes.setStorageClass(object.getMetadata("storage-class").toString()); } if(StringUtils.isNotBlank(object.getETag())) { attributes.setETag(StringUtils.remove(object.getETag(), '"')); } // The ETag will only be the MD5 of the object data when the object is stored as plaintext or encrypted // using SSE-S3. If the object is encrypted using another method (such as SSE-C or SSE-KMS) the ETag is // not the MD5 of the object data. attributes.setChecksum(Checksum.parse(object.getETag())); if(object instanceof S3Object) { if(new HostPreferences(host).getBoolean("s3.listing.versioning.enable")) { attributes.setVersionId(((S3Object) object).getVersionId()); } } if(object.containsMetadata(METADATA_HEADER_SERVER_SIDE_ENCRYPTION_KMS_KEY_ID)) { attributes.setEncryption(new Encryption.Algorithm(object.getServerSideEncryptionAlgorithm(), object.getMetadata(METADATA_HEADER_SERVER_SIDE_ENCRYPTION_KMS_KEY_ID).toString()) { @Override public String getDescription() { return String.format("SSE-KMS (%s)", key); } }); } else { if(null != object.getServerSideEncryptionAlgorithm()) { // AES256 attributes.setEncryption(new Encryption.Algorithm(object.getServerSideEncryptionAlgorithm(), null) { @Override public String getDescription() { return "SSE-S3 (AES-256)"; } }); } } final Map<String, String> metadata = metadata(object); if(!metadata.isEmpty()) { attributes.setMetadata(metadata); } final Long mtime = S3TimestampFeature.fromHeaders(S3TimestampFeature.METADATA_MODIFICATION_DATE, Maps.transformValues(object.getMetadataMap(), Object::toString)); if(-1L != mtime) { attributes.setModificationDate(mtime); } final Long ctime = S3TimestampFeature.fromHeaders(S3TimestampFeature.METADATA_CREATION_DATE, Maps.transformValues(object.getMetadataMap(), Object::toString)); if(-1L != ctime) { attributes.setCreationDate(ctime); } return attributes; }
@Test public void testMtime() { final StorageObject object = new StorageObject(); object.addMetadata("ETag", "a43c1b0aa53a0c908810c06ab1ff3967"); object.addMetadata("Mtime", "1647683127.160620746"); assertEquals(1647683127000L, new S3AttributesAdapter(new Host(new S3Protocol())).toAttributes(object).getModificationDate()); }