focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static Config getConfig( Configuration configuration, @Nullable HostAndPort externalAddress) { return getConfig( configuration, externalAddress, null, PekkoUtils.getForkJoinExecutorConfig( ActorSystemBootstrapTools.getForkJoinExecutorConfiguration(configuration))); }
@Test void getConfigNormalizesHostName() { final Configuration configuration = new Configuration(); final String hostname = "AbC123foOBaR"; final int port = 1234; final Config config = PekkoUtils.getConfig(configuration, new HostAndPort(hostname, port)); assertThat(config.getString("pekko.remote.classic.netty.tcp.hostname")) .isEqualTo(NetUtils.unresolvedHostToNormalizedString(hostname)); }
public static Map<?, ?> convertToMap(Schema schema, Object value) { return convertToMapInternal(MAP_SELECTOR_SCHEMA, value); }
@Test public void shouldFailToParseStringOfMapWithIntValuesWithBlankEntry() { assertThrows(DataException.class, () -> Values.convertToMap(Schema.STRING_SCHEMA, " { \"foo\" : 1234567890 ,, \"bar\" : 0, \"baz\" : -987654321 } ")); }
public FlowWithSource importFlow(String tenantId, String source) { return this.importFlow(tenantId, source, false); }
@Test void importFlow() { String source = """ id: import namespace: some.namespace tasks: - id: task type: io.kestra.plugin.core.log.Log message: Hello"""; Flow importFlow = flowService.importFlow("my-tenant", source); assertThat(importFlow.getId(), is("import")); assertThat(importFlow.getNamespace(), is("some.namespace")); assertThat(importFlow.getRevision(), is(1)); assertThat(importFlow.getTasks().size(), is(1)); assertThat(importFlow.getTasks().getFirst().getId(), is("task")); Optional<FlowWithSource> fromDb = flowRepository.findByIdWithSource("my-tenant", "some.namespace", "import", Optional.empty()); assertThat(fromDb.isPresent(), is(true)); assertThat(fromDb.get().getRevision(), is(1)); assertThat(fromDb.get().getSource(), is(source)); source = source.replace("id: task", "id: replaced_task"); importFlow = flowService.importFlow("my-tenant", source); assertThat(importFlow.getRevision(), is(2)); assertThat(importFlow.getTasks().size(), is(1)); assertThat(importFlow.getTasks().getFirst().getId(), is("replaced_task")); fromDb = flowRepository.findByIdWithSource("my-tenant", "some.namespace", "import", Optional.empty()); assertThat(fromDb.isPresent(), is(true)); assertThat(fromDb.get().getRevision(), is(2)); assertThat(fromDb.get().getSource(), is(source)); }
@Override public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException { super.startElement(uri, localName, qName, attributes); if ("img".equals(localName) && attributes.getValue("alt") != null) { String nfo = "[image: " + attributes.getValue("alt") + ']'; characters(nfo.toCharArray(), 0, nfo.length()); } if ("a".equals(localName) && attributes.getValue("name") != null) { String nfo = "[bookmark: " + attributes.getValue("name") + ']'; characters(nfo.toCharArray(), 0, nfo.length()); } }
@Test public void aTagTest() throws Exception { ByteArrayOutputStream buffer = new ByteArrayOutputStream(); XHTMLContentHandler xhtml = new XHTMLContentHandler(new RichTextContentHandler( new OutputStreamWriter(buffer, UTF_8)), new Metadata()); xhtml.startDocument(); AttributesImpl attributes = new AttributesImpl(); attributes.addAttribute("", "", "name", "", "value"); xhtml.startElement("a", attributes); xhtml.endDocument(); assertEquals("\n\n\n\n[bookmark: value]", buffer.toString(UTF_8.name())); }
public QueueOperationResponse deleteMessage(final Exchange exchange) { ObjectHelper.notNull(exchange, MISSING_EXCHANGE); final String messageId = configurationOptionsProxy.getMessageId(exchange); final String popReceipt = configurationOptionsProxy.getPopReceipt(exchange); final Duration timeout = configurationOptionsProxy.getTimeout(exchange); if (ObjectHelper.isEmpty(messageId)) { throw new IllegalArgumentException( String.format("Message ID must be specified in camel headers '%s' for deleteMessage " + "operation.", QueueConstants.MESSAGE_ID)); } if (ObjectHelper.isEmpty(popReceipt)) { throw new IllegalArgumentException( String.format("Message Pop Receipt must be specified in camel headers '%s' for deleteMessage " + "operation.", QueueConstants.POP_RECEIPT)); } return buildResponseWithEmptyBody(client.deleteMessage(messageId, popReceipt, timeout)); }
@Test public void testDeleteMessage() { // mocking final HttpHeaders httpHeaders = new HttpHeaders().set("x-test-header", "123"); when(client.deleteMessage(any(), any(), any())).thenReturn(new ResponseBase<>(null, 200, httpHeaders, null, null)); final QueueOperations operations = new QueueOperations(configuration, client); final Exchange exchange = new DefaultExchange(context); // test if we have nothing set on exchange assertThrows(IllegalArgumentException.class, () -> operations.deleteMessage(exchange)); exchange.getIn().setHeader(QueueConstants.MESSAGE_ID, "1"); assertThrows(IllegalArgumentException.class, () -> operations.deleteMessage(exchange)); exchange.getIn().removeHeader(QueueConstants.MESSAGE_ID); exchange.getIn().setHeader(QueueConstants.POP_RECEIPT, "12"); assertThrows(IllegalArgumentException.class, () -> operations.deleteMessage(exchange)); exchange.getIn().setHeader(QueueConstants.MESSAGE_ID, "1"); final QueueOperationResponse response = operations.deleteMessage(exchange); assertNotNull(response); assertTrue((boolean) response.getBody()); }
@Override public boolean equals(Object obj) { if (obj == null) { return false; } if (!(obj instanceof HttpQueryParams)) { return false; } HttpQueryParams hqp2 = (HttpQueryParams) obj; return Iterables.elementsEqual(delegate.entries(), hqp2.delegate.entries()); }
@Test void testEquals() { HttpQueryParams qp1 = new HttpQueryParams(); qp1.add("k1", "v1"); qp1.add("k2", "v2"); HttpQueryParams qp2 = new HttpQueryParams(); qp2.add("k1", "v1"); qp2.add("k2", "v2"); assertEquals(qp1, qp2); }
public static Expression convert(Predicate[] predicates) { Expression expression = Expressions.alwaysTrue(); for (Predicate predicate : predicates) { Expression converted = convert(predicate); Preconditions.checkArgument( converted != null, "Cannot convert Spark predicate to Iceberg expression: %s", predicate); expression = Expressions.and(expression, converted); } return expression; }
@Test public void testEqualToNaN() { String col = "col"; NamedReference namedReference = FieldReference.apply(col); LiteralValue value = new LiteralValue(Float.NaN, DataTypes.FloatType); org.apache.spark.sql.connector.expressions.Expression[] attrAndValue = new org.apache.spark.sql.connector.expressions.Expression[] {namedReference, value}; org.apache.spark.sql.connector.expressions.Expression[] valueAndAttr = new org.apache.spark.sql.connector.expressions.Expression[] {value, namedReference}; Predicate eqNaN1 = new Predicate("=", attrAndValue); Expression expectedEqNaN = Expressions.isNaN(col); Expression actualEqNaN1 = SparkV2Filters.convert(eqNaN1); assertThat(actualEqNaN1.toString()).isEqualTo(expectedEqNaN.toString()); Predicate eqNaN2 = new Predicate("=", valueAndAttr); Expression actualEqNaN2 = SparkV2Filters.convert(eqNaN2); assertThat(actualEqNaN2.toString()).isEqualTo(expectedEqNaN.toString()); }
public void resetPositionsIfNeeded() { Map<TopicPartition, Long> offsetResetTimestamps = offsetFetcherUtils.getOffsetResetTimestamp(); if (offsetResetTimestamps.isEmpty()) return; resetPositionsAsync(offsetResetTimestamps); }
@Test public void testUpdateFetchPositionNoOpWithPositionSet() { buildFetcher(); assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 5L); offsetFetcher.resetPositionsIfNeeded(); assertFalse(client.hasInFlightRequests()); assertTrue(subscriptions.isFetchable(tp0)); assertEquals(5, subscriptions.position(tp0).offset); }
protected boolean initNextRecordReader() throws IOException { if (curReader != null) { curReader.close(); curReader = null; if (idx > 0) { progress += split.getLength(idx-1); // done processing so far } } // if all chunks have been processed, nothing more to do. if (idx == split.getNumPaths()) { return false; } reporter.progress(); // get a record reader for the idx-th chunk try { curReader = rrConstructor.newInstance(new Object [] {split, jc, reporter, Integer.valueOf(idx)}); // setup some helper config variables. jc.set(JobContext.MAP_INPUT_FILE, split.getPath(idx).toString()); jc.setLong(JobContext.MAP_INPUT_START, split.getOffset(idx)); jc.setLong(JobContext.MAP_INPUT_PATH, split.getLength(idx)); } catch (Exception e) { throw new RuntimeException (e); } idx++; return true; }
@SuppressWarnings("unchecked") @Test public void testInitNextRecordReader() throws IOException{ JobConf conf = new JobConf(); Path[] paths = new Path[3]; long[] fileLength = new long[3]; File[] files = new File[3]; LongWritable key = new LongWritable(1); Text value = new Text(); try { for(int i=0;i<3;i++){ fileLength[i] = i; File dir = new File(outDir.toString()); dir.mkdir(); files[i] = new File(dir,"testfile"+i); FileWriter fileWriter = new FileWriter(files[i]); fileWriter.close(); paths[i] = new Path(outDir+"/testfile"+i); } CombineFileSplit combineFileSplit = new CombineFileSplit(conf, paths, fileLength); Reporter reporter = Mockito.mock(Reporter.class); CombineFileRecordReader cfrr = new CombineFileRecordReader(conf, combineFileSplit, reporter, TextRecordReaderWrapper.class); verify(reporter).progress(); Assert.assertFalse(cfrr.next(key,value)); verify(reporter, times(3)).progress(); } finally { FileUtil.fullyDelete(new File(outDir.toString())); } }
@Override public BasicTypeDefine reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.builder() .name(column.getName()) .nullable(column.isNullable()) .comment(column.getComment()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case BOOLEAN: builder.columnType(HANA_BOOLEAN); builder.dataType(HANA_BOOLEAN); builder.length(2L); break; case TINYINT: builder.columnType(HANA_TINYINT); builder.dataType(HANA_TINYINT); break; case SMALLINT: builder.columnType(HANA_SMALLINT); builder.dataType(HANA_SMALLINT); break; case INT: builder.columnType(HANA_INTEGER); builder.dataType(HANA_INTEGER); break; case BIGINT: builder.columnType(HANA_BIGINT); builder.dataType(HANA_BIGINT); break; case FLOAT: builder.columnType(HANA_REAL); builder.dataType(HANA_REAL); break; case DOUBLE: builder.columnType(HANA_DOUBLE); builder.dataType(HANA_DOUBLE); break; case DECIMAL: DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = (int) Math.max(0, scale - (precision - MAX_PRECISION)); precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } builder.columnType(String.format("%s(%s,%s)", HANA_DECIMAL, precision, scale)); builder.dataType(HANA_DECIMAL); builder.precision(precision); builder.scale(scale); break; case BYTES: builder.columnType(HANA_BLOB); builder.dataType(HANA_BLOB); break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= MAX_NVARCHAR_LENGTH) { builder.columnType(HANA_NVARCHAR); builder.dataType(HANA_NVARCHAR); builder.length( column.getColumnLength() == null ? MAX_NVARCHAR_LENGTH : column.getColumnLength()); } else { builder.columnType(HANA_CLOB); builder.dataType(HANA_CLOB); } break; case DATE: builder.columnType(HANA_DATE); builder.dataType(HANA_DATE); break; case TIME: builder.columnType(HANA_TIME); builder.dataType(HANA_TIME); break; case TIMESTAMP: if (column.getScale() == null || column.getScale() <= 0) { builder.columnType(HANA_SECONDDATE); builder.dataType(HANA_SECONDDATE); } else { int timestampScale = column.getScale(); if (column.getScale() > MAX_TIMESTAMP_SCALE) { timestampScale = MAX_TIMESTAMP_SCALE; log.warn( "The timestamp column {} type timestamp({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to timestamp({})", column.getName(), column.getScale(), MAX_TIMESTAMP_SCALE, timestampScale); } builder.columnType(HANA_TIMESTAMP); builder.dataType(HANA_TIMESTAMP); builder.scale(timestampScale); } break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.SAP_HANA, column.getDataType().getSqlType().name(), column.getName()); } BasicTypeDefine typeDefine = builder.build(); typeDefine.setColumnType( appendColumnSizeIfNeed( typeDefine.getColumnType(), typeDefine.getLength(), typeDefine.getScale())); return typeDefine; }
@Test public void testReconvertDatetime() { Column column = PhysicalColumn.builder() .name("test") .dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE) .build(); BasicTypeDefine typeDefine = SapHanaTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(SapHanaTypeConverter.HANA_SECONDDATE, typeDefine.getColumnType()); Assertions.assertEquals(SapHanaTypeConverter.HANA_SECONDDATE, typeDefine.getDataType()); column = PhysicalColumn.builder() .name("test") .dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE) .scale(3) .build(); typeDefine = SapHanaTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(SapHanaTypeConverter.HANA_TIMESTAMP, typeDefine.getColumnType()); Assertions.assertEquals(SapHanaTypeConverter.HANA_TIMESTAMP, typeDefine.getDataType()); Assertions.assertEquals(column.getScale(), typeDefine.getScale()); }
private GenericRow unnestCollection(GenericRow record, String column) { Object value = record.getValue(GenericRow.MULTIPLE_RECORDS_KEY); if (value == null) { List<GenericRow> list = new ArrayList<>(); unnestCollection(record, column, list); record.putValue(GenericRow.MULTIPLE_RECORDS_KEY, list); } else { Collection<GenericRow> records = (Collection) value; List<GenericRow> list = new ArrayList<>(); for (GenericRow innerRecord : records) { unnestCollection(innerRecord, column, list); } record.putValue(GenericRow.MULTIPLE_RECORDS_KEY, list); } return record; }
@Test public void testUnnestCollection() { // unnest root level collection // { // "array":[ // { // "a":"v1" // }, // { // "a":"v2" // } // ]} // -> // [{ // "array.a":"v1" // }, // { // "array.a":"v2" // }] ComplexTypeTransformer transformer = new ComplexTypeTransformer(Arrays.asList("array"), "."); GenericRow genericRow = new GenericRow(); Object[] array = new Object[2]; Map<String, Object> map1 = new HashMap<>(); map1.put("a", "v1"); Map<String, Object> map2 = new HashMap<>(); map2.put("a", "v2"); array[0] = map1; array[1] = map2; genericRow.putValue("array", array); transformer.transform(genericRow); Assert.assertNotNull(genericRow.getValue(GenericRow.MULTIPLE_RECORDS_KEY)); Collection<GenericRow> collection = (Collection<GenericRow>) genericRow.getValue(GenericRow.MULTIPLE_RECORDS_KEY); Assert.assertEquals(2, collection.size()); Iterator<GenericRow> itr = collection.iterator(); Assert.assertEquals("v1", itr.next().getValue("array.a")); Assert.assertEquals("v2", itr.next().getValue("array.a")); // unnest sibling collections // { // "array":[ // { // "a":"v1" // }, // { // "a":"v2" // }], // "array2":[ // { // "b":"v3" // }, // { // "b":"v4" // }] // } // -> // [ // { // "array.a":"v1","array2.b":"v3" // }, // { // "array.a":"v1","array2.b":"v4" // }, // { // "array.a":"v2","array2.b":"v3" // }, // { // "array.a":"v2","array2.b":"v4" // }] // transformer = new ComplexTypeTransformer(Arrays.asList("array", "array2"), "."); genericRow = new GenericRow(); Object[] array2 = new Object[2]; Map<String, Object> map3 = new HashMap<>(); map3.put("b", "v3"); Map<String, Object> map4 = new HashMap<>(); map4.put("b", "v4"); array2[0] = map3; array2[1] = map4; genericRow.putValue("array", array); genericRow.putValue("array2", array2); transformer.transform(genericRow); Assert.assertNotNull(genericRow.getValue(GenericRow.MULTIPLE_RECORDS_KEY)); collection = (Collection<GenericRow>) genericRow.getValue(GenericRow.MULTIPLE_RECORDS_KEY); Assert.assertEquals(4, collection.size()); itr = collection.iterator(); GenericRow next = itr.next(); Assert.assertEquals("v1", next.getValue("array.a")); Assert.assertEquals("v3", next.getValue("array2.b")); next = itr.next(); Assert.assertEquals("v1", next.getValue("array.a")); Assert.assertEquals("v4", next.getValue("array2.b")); next = itr.next(); Assert.assertEquals("v2", next.getValue("array.a")); Assert.assertEquals("v3", next.getValue("array2.b")); next = itr.next(); Assert.assertEquals("v2", next.getValue("array.a")); Assert.assertEquals("v4", next.getValue("array2.b")); // unnest nested collection // { // "array":[ // { // "a":"v1", // "array2":[ // { // "b":"v3" // }, // { // "b":"v4" // } // ] // }, // { // "a":"v2", // "array2":[ // // ] // } // ]} // -> // [ // { // "array.a":"v1","array.array2.b":"v3" // }, // { // "array.a":"v1","array.array2.b":"v4" // }, // { // "array.a":"v2" // }] transformer = new ComplexTypeTransformer(Arrays.asList("array", "array.array2"), "."); genericRow = new GenericRow(); genericRow.putValue("array", array); map1.put("array2", array2); map2.put("array2", new Object[]{}); transformer.transform(genericRow); Assert.assertNotNull(genericRow.getValue(GenericRow.MULTIPLE_RECORDS_KEY)); collection = (Collection<GenericRow>) genericRow.getValue(GenericRow.MULTIPLE_RECORDS_KEY); Assert.assertEquals(3, collection.size()); itr = collection.iterator(); next = itr.next(); Assert.assertEquals("v1", next.getValue("array.a")); Assert.assertEquals("v3", next.getValue("array.array2.b")); next = itr.next(); Assert.assertEquals("v1", next.getValue("array.a")); Assert.assertEquals("v4", next.getValue("array.array2.b")); next = itr.next(); Assert.assertEquals("v2", next.getValue("array.a")); }
@Override public void broadcastOnIssueChange(List<DefaultIssue> issues, Collection<QGChangeEvent> changeEvents, boolean fromAlm) { if (listeners.isEmpty() || issues.isEmpty() || changeEvents.isEmpty()) { return; } try { broadcastChangeEventsToBranches(issues, changeEvents, fromAlm); } catch (Error e) { LOG.warn(format("Broadcasting to listeners failed for %s events", changeEvents.size()), e); } }
@Test public void broadcastOnIssueChange_calls_listener_for_each_component_uuid_with_at_least_one_QGChangeEvent() { // branch has multiple issues BranchDto component2 = newBranchDto(project1Uuid + "2"); DefaultIssue[] component2Issues = {newDefaultIssue(component2.getUuid()), newDefaultIssue(component2.getUuid())}; QGChangeEvent component2QGChangeEvent = newQGChangeEvent(component2); // branch 3 has multiple QGChangeEvent and only one issue BranchDto component3 = newBranchDto(project1Uuid + "3"); DefaultIssue component3Issue = newDefaultIssue(component3.getUuid()); QGChangeEvent[] component3QGChangeEvents = {newQGChangeEvent(component3), newQGChangeEvent(component3)}; // branch 4 has multiple QGChangeEvent and multiples issues BranchDto component4 = newBranchDto(project1Uuid + "4"); DefaultIssue[] component4Issues = {newDefaultIssue(component4.getUuid()), newDefaultIssue(component4.getUuid())}; QGChangeEvent[] component4QGChangeEvents = {newQGChangeEvent(component4), newQGChangeEvent(component4)}; // branch 5 has no QGChangeEvent but one issue BranchDto component5 = newBranchDto(project1Uuid + "5"); DefaultIssue component5Issue = newDefaultIssue(component5.getUuid()); List<DefaultIssue> issues = Stream.of( Stream.of(component1Issue), Arrays.stream(component2Issues), Stream.of(component3Issue), Arrays.stream(component4Issues), Stream.of(component5Issue)) .flatMap(s -> s) .toList(); List<DefaultIssue> changedIssues = randomizedList(issues); List<QGChangeEvent> qgChangeEvents = Stream.of( Stream.of(component1QGChangeEvent), Stream.of(component2QGChangeEvent), Arrays.stream(component3QGChangeEvents), Arrays.stream(component4QGChangeEvents)) .flatMap(s -> s) .toList(); underTest.broadcastOnIssueChange(changedIssues, randomizedList(qgChangeEvents), false); listeners.forEach(listener -> { verifyListenerCalled(listener, component1QGChangeEvent, component1Issue); verifyListenerCalled(listener, component2QGChangeEvent, component2Issues); Arrays.stream(component3QGChangeEvents) .forEach(component3QGChangeEvent -> verifyListenerCalled(listener, component3QGChangeEvent, component3Issue)); Arrays.stream(component4QGChangeEvents) .forEach(component4QGChangeEvent -> verifyListenerCalled(listener, component4QGChangeEvent, component4Issues)); }); verifyNoMoreInteractions(listener1, listener2, listener3); }
public Integer remove(final Object key) { return valOrNull(remove((int)key)); }
@Test void removeShouldReturnMissing() { assertEquals(MISSING_VALUE, map.remove(1)); }
public static byte[] encode(String s) { return s == null ? new byte[0] : s.getBytes(RpcConstants.DEFAULT_CHARSET); }
@Test public void encode() { Assert.assertTrue(StringSerializer.encode("11").length == 2); Assert.assertTrue(StringSerializer.encode("").length == 0); Assert.assertTrue(StringSerializer.encode(null).length == 0); }
public ValidationResult validate(final Map<String, InternalTopicConfig> topicConfigs) { log.info("Starting to validate internal topics {}.", topicConfigs.keySet()); final long now = time.milliseconds(); final long deadline = now + retryTimeoutMs; final ValidationResult validationResult = new ValidationResult(); final Set<String> topicDescriptionsStillToValidate = new HashSet<>(topicConfigs.keySet()); final Set<String> topicConfigsStillToValidate = new HashSet<>(topicConfigs.keySet()); while (!topicDescriptionsStillToValidate.isEmpty() || !topicConfigsStillToValidate.isEmpty()) { Map<String, KafkaFuture<TopicDescription>> descriptionsForTopic = Collections.emptyMap(); if (!topicDescriptionsStillToValidate.isEmpty()) { final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topicDescriptionsStillToValidate); descriptionsForTopic = describeTopicsResult.topicNameValues(); } Map<String, KafkaFuture<Config>> configsForTopic = Collections.emptyMap(); if (!topicConfigsStillToValidate.isEmpty()) { final DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs( topicConfigsStillToValidate.stream() .map(topic -> new ConfigResource(Type.TOPIC, topic)) .collect(Collectors.toSet()) ); configsForTopic = describeConfigsResult.values().entrySet().stream() .collect(Collectors.toMap(entry -> entry.getKey().name(), Map.Entry::getValue)); } while (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) { if (!descriptionsForTopic.isEmpty()) { doValidateTopic( validationResult, descriptionsForTopic, topicConfigs, topicDescriptionsStillToValidate, (streamsSide, brokerSide) -> validatePartitionCount(validationResult, streamsSide, brokerSide) ); } if (!configsForTopic.isEmpty()) { doValidateTopic( validationResult, configsForTopic, topicConfigs, topicConfigsStillToValidate, (streamsSide, brokerSide) -> validateCleanupPolicy(validationResult, streamsSide, brokerSide) ); } maybeThrowTimeoutException( Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate), deadline, String.format("Could not validate internal topics within %d milliseconds. " + "This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs) ); if (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) { Utils.sleep(100); } } maybeSleep( Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate), deadline, "validated" ); } log.info("Completed validation of internal topics {}.", topicConfigs.keySet()); return validationResult; }
@Test public void shouldReportMisconfigurationsOfPartitionCount() { setupTopicInMockAdminClient(topic1, repartitionTopicConfig()); setupTopicInMockAdminClient(topic2, repartitionTopicConfig()); setupTopicInMockAdminClient(topic3, repartitionTopicConfig()); final InternalTopicConfig internalTopicConfig1 = setupRepartitionTopicConfig(topic1, 2); final InternalTopicConfig internalTopicConfig2 = setupRepartitionTopicConfig(topic2, 3); final InternalTopicConfig internalTopicConfig3 = setupRepartitionTopicConfig(topic3, 1); final ValidationResult validationResult = internalTopicManager.validate(mkMap( mkEntry(topic1, internalTopicConfig1), mkEntry(topic2, internalTopicConfig2), mkEntry(topic3, internalTopicConfig3) )); final Map<String, List<String>> misconfigurationsForTopics = validationResult.misconfigurationsForTopics(); assertThat(validationResult.missingTopics(), empty()); assertThat(misconfigurationsForTopics.size(), is(2)); assertThat(misconfigurationsForTopics, hasKey(topic1)); assertThat(misconfigurationsForTopics.get(topic1).size(), is(1)); assertThat( misconfigurationsForTopics.get(topic1).get(0), is("Internal topic " + topic1 + " requires 2 partitions, but the existing topic on the broker has 1 partitions.") ); assertThat(misconfigurationsForTopics, hasKey(topic2)); assertThat(misconfigurationsForTopics.get(topic2).size(), is(1)); assertThat( misconfigurationsForTopics.get(topic2).get(0), is("Internal topic " + topic2 + " requires 3 partitions, but the existing topic on the broker has 1 partitions.") ); assertThat(misconfigurationsForTopics, not(hasKey(topic3))); }
@Override @Cacheable(value = RedisKeyConstants.ROLE, key = "#id", unless = "#result == null") public RoleDO getRoleFromCache(Long id) { return roleMapper.selectById(id); }
@Test public void testGetRoleFromCache() { // mock 数据(缓存) RoleDO roleDO = randomPojo(RoleDO.class); roleMapper.insert(roleDO); // 参数准备 Long id = roleDO.getId(); // 调用 RoleDO dbRoleDO = roleService.getRoleFromCache(id); // 断言 assertPojoEquals(roleDO, dbRoleDO); }
public static <K, V> WriteRecords<K, V> writeRecords() { return new AutoValue_KafkaIO_WriteRecords.Builder<K, V>() .setProducerConfig(WriteRecords.DEFAULT_PRODUCER_PROPERTIES) .setEOS(false) .setNumShards(0) .setConsumerFactoryFn(KafkaIOUtils.KAFKA_CONSUMER_FACTORY_FN) .setBadRecordRouter(BadRecordRouter.THROWING_ROUTER) .setBadRecordErrorHandler(new DefaultErrorHandler<>()) .build(); }
@Test public void testSinkToMultipleTopics() throws Exception { // Set different output topic names int numElements = 1000; try (MockProducerWrapper producerWrapper = new MockProducerWrapper(new LongSerializer())) { ProducerSendCompletionThread completionThread = new ProducerSendCompletionThread(producerWrapper.mockProducer).start(); String defaultTopic = "test"; p.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()).withoutMetadata()) .apply(ParDo.of(new KV2ProducerRecord(defaultTopic, false))) .setCoder(ProducerRecordCoder.of(VarIntCoder.of(), VarLongCoder.of())) .apply( KafkaIO.<Integer, Long>writeRecords() .withBootstrapServers("none") .withKeySerializer(IntegerSerializer.class) .withValueSerializer(LongSerializer.class) .withInputTimestamp() .withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey))); p.run(); completionThread.shutdown(); // Verify that appropriate messages are written to different Kafka topics List<ProducerRecord<Integer, Long>> sent = producerWrapper.mockProducer.history(); for (int i = 0; i < numElements; i++) { ProducerRecord<Integer, Long> record = sent.get(i); if (i % 2 == 0) { assertEquals("test_2", record.topic()); } else { assertEquals("test_1", record.topic()); } assertEquals(i, record.key().intValue()); assertEquals(i, record.value().longValue()); assertEquals(i, record.timestamp().intValue()); assertEquals(0, record.headers().toArray().length); } } }
@Override public ProcessingResult process(ReplicationTask task) { try { EurekaHttpResponse<?> httpResponse = task.execute(); int statusCode = httpResponse.getStatusCode(); Object entity = httpResponse.getEntity(); if (logger.isDebugEnabled()) { logger.debug("Replication task {} completed with status {}, (includes entity {})", task.getTaskName(), statusCode, entity != null); } if (isSuccess(statusCode)) { task.handleSuccess(); } else if (statusCode == 503) { logger.debug("Server busy (503) reply for task {}", task.getTaskName()); return ProcessingResult.Congestion; } else { task.handleFailure(statusCode, entity); return ProcessingResult.PermanentError; } } catch (Throwable e) { if (maybeReadTimeOut(e)) { logger.error("It seems to be a socket read timeout exception, it will retry later. if it continues to happen and some eureka node occupied all the cpu time, you should set property 'eureka.server.peer-node-read-timeout-ms' to a bigger value", e); //read timeout exception is more Congestion then TransientError, return Congestion for longer delay return ProcessingResult.Congestion; } else if (isNetworkConnectException(e)) { logNetworkErrorSample(task, e); return ProcessingResult.TransientError; } else { logger.error("{}: {} Not re-trying this exception because it does not seem to be a network exception", peerId, task.getTaskName(), e); return ProcessingResult.PermanentError; } } return ProcessingResult.Success; }
@Test public void testBatchableTaskPermanentFailureHandling() throws Exception { TestableInstanceReplicationTask task = aReplicationTask().build(); InstanceInfo instanceInfoFromPeer = InstanceInfoGenerator.takeOne(); replicationClient.withNetworkStatusCode(200); replicationClient.withBatchReply(400); replicationClient.withInstanceInfo(instanceInfoFromPeer); ProcessingResult status = replicationTaskProcessor.process(Collections.<ReplicationTask>singletonList(task)); assertThat(status, is(ProcessingResult.Success)); assertThat(task.getProcessingState(), is(ProcessingState.Failed)); }
Map<Address, int[]> getRemotePartitionAssignment() { return remotePartitionAssignment; }
@Test public void testRemotePartitionAssignment() { assertEquals(2, a.getRemotePartitionAssignment().size()); // only remote members are included assertArrayEquals(new int[]{2, 4}, a.getRemotePartitionAssignment().get(a1)); assertArrayEquals(new int[]{1, 5}, a.getRemotePartitionAssignment().get(a2)); }
@Override public boolean addAggrConfigInfo(final String dataId, final String group, String tenant, final String datumId, String appName, final String content) { String appNameTmp = StringUtils.isBlank(appName) ? StringUtils.EMPTY : appName; String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant; String contentTmp = StringUtils.isBlank(content) ? StringUtils.EMPTY : content; final Timestamp now = new Timestamp(System.currentTimeMillis()); ConfigInfoAggrMapper configInfoAggrMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO_AGGR); final String select = configInfoAggrMapper.select(Collections.singletonList("content"), Arrays.asList("data_id", "group_id", "tenant_id", "datum_id")); final String insert = configInfoAggrMapper.insert( Arrays.asList("data_id", "group_id", "tenant_id", "datum_id", "app_name", "content", "gmt_modified")); final String update = configInfoAggrMapper.update(Arrays.asList("content", "gmt_modified"), Arrays.asList("data_id", "group_id", "tenant_id", "datum_id")); String dbContent = databaseOperate.queryOne(select, new Object[] {dataId, group, tenantTmp, datumId}, String.class); if (Objects.isNull(dbContent)) { final Object[] args = new Object[] {dataId, group, tenantTmp, datumId, appNameTmp, contentTmp, now}; EmbeddedStorageContextHolder.addSqlContext(insert, args); } else if (!dbContent.equals(content)) { final Object[] args = new Object[] {contentTmp, now, dataId, group, tenantTmp, datumId}; EmbeddedStorageContextHolder.addSqlContext(update, args); } try { boolean result = databaseOperate.update(EmbeddedStorageContextHolder.getCurrentSqlContext()); if (!result) { throw new NacosConfigException("[Merge] Configuration release failed"); } return true; } finally { EmbeddedStorageContextHolder.cleanAllContext(); } }
@Test void testAddAggrConfigInfoOfUpdateNotEqualContent() { String dataId = "dataId111"; String group = "group"; String tenant = "tenant"; String datumId = "datumId"; String appName = "appname1234"; String content = "content1234"; //mock query datumId String existContent = "existContent111"; Mockito.when(databaseOperate.queryOne(anyString(), eq(new Object[] {dataId, group, tenant, datumId}), eq(String.class))) .thenReturn(existContent); //mock update success,return 1 Mockito.when(databaseOperate.update(any(List.class))).thenReturn(true); //mock update content boolean result = embededConfigInfoAggrPersistService.addAggrConfigInfo(dataId, group, tenant, datumId, appName, content); assertTrue(result); }
public static String getMasterForEntry(JournalEntry entry) { if (entry.hasAddMountPoint() || entry.hasAsyncPersistRequest() || entry.hasAddSyncPoint() || entry.hasActiveSyncTxId() || entry.hasCompleteFile() || entry.hasDeleteFile() || entry.hasDeleteMountPoint() || entry.hasInodeDirectory() || entry.hasInodeDirectoryIdGenerator() || entry.hasInodeFile() || entry.hasInodeLastModificationTime() || entry.hasNewBlock() || entry.hasPersistDirectory() || entry.hasRemoveSyncPoint() || entry.hasRename() || entry.hasSetAcl() || entry.hasSetAttribute() || entry.hasUpdateUfsMode() || entry.hasUpdateInode() || entry.hasUpdateInodeDirectory() || entry.hasUpdateInodeFile() || entry.hasLoadJob() || entry.hasCopyJob() || entry.hasMoveJob()) { return Constants.FILE_SYSTEM_MASTER_NAME; } if (entry.hasBlockContainerIdGenerator() || entry.hasDeleteBlock() || entry.hasBlockInfo()) { return Constants.BLOCK_MASTER_NAME; } if (entry.hasClusterInfo() || entry.hasPathProperties() || entry.hasRemovePathProperties()) { return Constants.META_MASTER_NAME; } if (entry.hasPolicyDefinition() || entry.hasPolicyRemove()) { return Constants.POLICY_ENGINE_NAME; } throw new IllegalStateException("Unrecognized journal entry: " + entry); }
@Test public void testEntries() { for (JournalEntry entry : ENTRIES) { assertNotNull(JournalEntryAssociation.getMasterForEntry(entry)); } }
@Override public boolean removeAll(Collection<?> c) { boolean changed = false; Iterator<?> iter = c.iterator(); while (iter.hasNext()) { changed |= remove(iter.next()); } return changed; }
@Test public void testRemoveAll() { LOG.info("Test remove all"); for (Integer i : list) { assertTrue(set.add(i)); } for (int i = 0; i < NUM; i++) { assertTrue(set.remove(list.get(i))); } // the deleted elements should not be there for (int i = 0; i < NUM; i++) { assertFalse(set.contains(list.get(i))); } // iterator should not have next Iterator<Integer> iter = set.iterator(); assertFalse(iter.hasNext()); assertTrue(set.isEmpty()); LOG.info("Test remove all - DONE"); }
public SchemaMapping fromParquet(MessageType parquetSchema) { List<Type> fields = parquetSchema.getFields(); List<TypeMapping> mappings = fromParquet(fields); List<Field> arrowFields = fields(mappings); return new SchemaMapping(new Schema(arrowFields), parquetSchema, mappings); }
@Test public void testParquetInt64TimestampMillisToArrow() { MessageType parquet = Types.buildMessage() .addField(Types.optional(INT64) .as(LogicalTypeAnnotation.timestampType(true, MILLIS)) .named("a")) .named("root"); Schema expected = new Schema(asList(field("a", new ArrowType.Timestamp(TimeUnit.MILLISECOND, "UTC")))); Assert.assertEquals(expected, converter.fromParquet(parquet).getArrowSchema()); }
public static void warn(Logger logger, String msg, Throwable e) { if (logger == null) { return; } if (logger.isWarnEnabled()) { logger.warn(msg, e); } }
@Test void testWarn() { Logger logger = Mockito.mock(Logger.class); when(logger.isWarnEnabled()).thenReturn(true); LogHelper.warn(logger, "warn"); verify(logger).warn("warn"); Throwable t = new RuntimeException(); LogHelper.warn(logger, t); verify(logger).warn(t); LogHelper.warn(logger, "warn", t); verify(logger).warn("warn", t); }
@Description("F cdf given the numerator degrees of freedom (df1), denominator degrees of freedom (df2) parameters, and value") @ScalarFunction @SqlType(StandardTypes.DOUBLE) public static double fCdf( @SqlType(StandardTypes.DOUBLE) double df1, @SqlType(StandardTypes.DOUBLE) double df2, @SqlType(StandardTypes.DOUBLE) double value) { checkCondition(value >= 0, INVALID_FUNCTION_ARGUMENT, "fCdf Function: value must non-negative"); checkCondition(df1 > 0, INVALID_FUNCTION_ARGUMENT, "fCdf Function: numerator df must be greater than 0"); checkCondition(df2 > 0, INVALID_FUNCTION_ARGUMENT, "fCdf Function: denominator df must be greater than 0"); FDistribution distribution = new FDistribution(null, df1, df2, FDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY); return distribution.cumulativeProbability(value); }
@Test public void testFCdf() { assertFunction("round(f_cdf(2.0, 5.0, 0.7988), 4)", DOUBLE, 0.5); assertFunction("round(f_cdf(2.0, 5.0, 3.7797), 4)", DOUBLE, 0.9); assertInvalidFunction("f_cdf(0, 3, 0.5)", "fCdf Function: numerator df must be greater than 0"); assertInvalidFunction("f_cdf(3, 0, 0.5)", "fCdf Function: denominator df must be greater than 0"); assertInvalidFunction("f_cdf(3, 5, -0.1)", "fCdf Function: value must non-negative"); }
@Udf public String lpad( @UdfParameter(description = "String to be padded") final String input, @UdfParameter(description = "Target length") final Integer targetLen, @UdfParameter(description = "Padding string") final String padding) { if (input == null) { return null; } if (padding == null || padding.isEmpty() || targetLen == null || targetLen < 0) { return null; } final StringBuilder sb = new StringBuilder(targetLen + padding.length()); final int padUpTo = Math.max(targetLen - input.length(), 0); for (int i = 0; i < padUpTo; i += padding.length()) { sb.append(padding); } sb.setLength(padUpTo); sb.append(input); sb.setLength(targetLen); return sb.toString(); }
@Test public void shouldPadInputString() { final String result = udf.lpad("foo", 7, "Bar"); assertThat(result, is("BarBfoo")); }
public void calculate(IThrowableProxy tp) { while (tp != null) { populateFrames(tp.getStackTraceElementProxyArray()); IThrowableProxy[] suppressed = tp.getSuppressed(); if (suppressed != null) { for (IThrowableProxy current : suppressed) { populateFrames(current.getStackTraceElementProxyArray()); } } tp = tp.getCause(); } }
@Test public void smoke() throws Exception { Throwable t = new Throwable("x"); ThrowableProxy tp = new ThrowableProxy(t); PackagingDataCalculator pdc = tp.getPackagingDataCalculator(); pdc.calculate(tp); verify(tp); tp.fullDump(); }
public void start() { configService.addListener(configListener); interfaceService.addListener(interfaceListener); setUpConnectivity(); }
@Test public void testNullInterfaces() { reset(interfaceService); interfaceService.addListener(anyObject(InterfaceListener.class)); expectLastCall().anyTimes(); expect(interfaceService.getInterfaces()).andReturn( Sets.newHashSet()).anyTimes(); expect(interfaceService.getInterfacesByPort(s2Eth1)) .andReturn(Collections.emptySet()).anyTimes(); expect(interfaceService.getInterfacesByPort(s1Eth1)) .andReturn(Collections.emptySet()).anyTimes(); expect(interfaceService.getInterfacesByIp(IpAddress.valueOf("192.168.10.101"))) .andReturn(Collections.emptySet()).anyTimes(); expect(interfaceService.getMatchingInterface(IpAddress.valueOf("192.168.10.1"))) .andReturn(null).anyTimes(); expect(interfaceService.getInterfacesByIp(IpAddress.valueOf("192.168.20.101"))) .andReturn(Collections.emptySet()).anyTimes(); expect(interfaceService.getMatchingInterface(IpAddress.valueOf("192.168.20.1"))) .andReturn(null).anyTimes(); expect(interfaceService.getInterfacesByIp(IpAddress.valueOf("192.168.30.101"))) .andReturn(Collections.emptySet()).anyTimes(); expect(interfaceService.getMatchingInterface(IpAddress.valueOf("192.168.30.1"))) .andReturn(null).anyTimes(); expect(interfaceService.getInterfacesByIp(IpAddress.valueOf("192.168.40.101"))) .andReturn(Collections.emptySet()).anyTimes(); expect(interfaceService.getMatchingInterface(IpAddress.valueOf("192.168.40.1"))) .andReturn(null).anyTimes(); expect(interfaceService.getInterfacesByIp(IpAddress.valueOf("192.168.50.101"))) .andReturn(Collections.emptySet()).anyTimes(); expect(interfaceService.getMatchingInterface(IpAddress.valueOf("192.168.50.1"))) .andReturn(null).anyTimes(); replay(interfaceService); reset(intentSynchronizer); replay(intentSynchronizer); peerConnectivityManager.start(); verify(intentSynchronizer); }
public static Builder newBuilder() { return new Builder(); }
@Test void localEndpointDefaults() { Tracing tracing = Tracing.newBuilder().build(); assertThat(tracing).extracting("tracer.pendingSpans.defaultSpan.localServiceName") .isEqualTo("unknown"); assertThat(tracing).extracting("tracer.pendingSpans.defaultSpan.localIp") .isEqualTo(Platform.get().linkLocalIp()); }
public static String getVersionDesc(int value) { int length = Version.values().length; if (value >= length) { return Version.values()[length - 1].name(); } return Version.values()[value].name(); }
@Test public void testGetVersionDesc() throws Exception { String desc = "V3_0_0_SNAPSHOT"; assertThat(MQVersion.getVersionDesc(0)).isEqualTo(desc); }
@Nullable String getCollectionName(BsonDocument command, String commandName) { if (COMMANDS_WITH_COLLECTION_NAME.contains(commandName)) { String collectionName = getNonEmptyBsonString(command.get(commandName)); if (collectionName != null) { return collectionName; } } // Some other commands, like getMore, have a field like {"collection": collectionName}. return getNonEmptyBsonString(command.get("collection")); }
@Test void getCollectionName_emptyStringCommandArgument() { assertThat( listener.getCollectionName(new BsonDocument("find", new BsonString(" ")), "find")).isNull(); }
public synchronized static HealthCheckRegistry setDefault(String name) { final HealthCheckRegistry registry = getOrCreate(name); return setDefault(name, registry); }
@Test public void unableToSetCustomDefaultRegistryTwice() { expectedException.expect(IllegalStateException.class); expectedException.expectMessage("Default health check registry is already set."); SharedHealthCheckRegistries.setDefault("default", new HealthCheckRegistry()); SharedHealthCheckRegistries.setDefault("default", new HealthCheckRegistry()); }
public AuthenticationType getType() { if (type != null) { // use the user provided type return type; } final boolean hasPassword = ObjectHelper.isNotEmpty(password); final boolean hasRefreshToken = ObjectHelper.isNotEmpty(refreshToken); final boolean hasKeystore = keystore != null && ObjectHelper.isNotEmpty(keystore.getResource()); final boolean hasClientCredentials = ObjectHelper.isNotEmpty(clientId) && ObjectHelper.isNotEmpty(clientSecret); if (hasPassword && !hasRefreshToken && !hasKeystore) { return AuthenticationType.USERNAME_PASSWORD; } if (!hasPassword && hasRefreshToken && !hasKeystore) { return AuthenticationType.REFRESH_TOKEN; } if (!hasPassword && !hasRefreshToken && hasKeystore) { return AuthenticationType.JWT; } if (!hasPassword && !hasRefreshToken && !hasKeystore && hasClientCredentials) { return AuthenticationType.CLIENT_CREDENTIALS; } if (hasPassword && hasRefreshToken || hasPassword && hasKeystore || hasRefreshToken && hasKeystore) { throw new IllegalArgumentException( "The provided authentication configuration can be used in multiple ways" + " for instance both with username/password and refresh_token. Either remove some of the configuration" + " options, so that authentication method can be auto-determined or explicitly set the authentication" + " type."); } throw new IllegalArgumentException( "You must specify parameters aligned with one of the supported authentication methods:" + " for username and password authentication: userName, password, clientSecret;" + " for refresh token authentication: refreshToken, clientSecret;" + " for JWT: userName, keystore. And for every one of those loginUrl and clientId must be specified also."); }
@Test public void shouldDetermineProperAuthenticationType() { assertEquals(AuthenticationType.USERNAME_PASSWORD, usernamePassword.getType()); assertEquals(AuthenticationType.REFRESH_TOKEN, refreshToken.getType()); assertEquals(AuthenticationType.JWT, jwt.getType()); }
public static PDImageXObject createFromImage(PDDocument document, BufferedImage image) throws IOException { return createFromImage(document, image, 0.75f); }
@Test void testCreateFromImageRGB() throws IOException { PDDocument document = new PDDocument(); BufferedImage image = ImageIO.read(JPEGFactoryTest.class.getResourceAsStream("jpeg.jpg")); assertEquals(3, image.getColorModel().getNumComponents()); PDImageXObject ximage = JPEGFactory.createFromImage(document, image); validate(ximage, 8, 344, 287, "jpg", PDDeviceRGB.INSTANCE.getName()); doWritePDF(document, ximage, TESTRESULTSDIR, "jpegrgb.pdf"); }
@NonNull public static String encode(@NonNull String s) { try { boolean escaped = false; StringBuilder out = new StringBuilder(s.length()); ByteArrayOutputStream buf = new ByteArrayOutputStream(); OutputStreamWriter w = new OutputStreamWriter(buf, StandardCharsets.UTF_8); for (int i = 0; i < s.length(); i++) { int c = s.charAt(i); if (c < 128 && c != ' ') { out.append((char) c); } else { // 1 char -> UTF8 w.write(c); w.flush(); for (byte b : buf.toByteArray()) { out.append('%'); out.append(toDigit((b >> 4) & 0xF)); out.append(toDigit(b & 0xF)); } buf.reset(); escaped = true; } } return escaped ? out.toString() : s; } catch (IOException e) { throw new Error(e); // impossible } }
@Test public void testEncodeSpaces() { final String urlWithSpaces = "http://hudson/job/Hudson Job"; String encoded = Util.encode(urlWithSpaces); assertEquals("http://hudson/job/Hudson%20Job", encoded); }
@Override public void execute(Exchange exchange) throws SmppException { SubmitSm[] submitSms = createSubmitSm(exchange); List<String> messageIDs = new ArrayList<>(submitSms.length); String messageID = null; for (int i = 0; i < submitSms.length; i++) { SubmitSm submitSm = submitSms[i]; messageID = null; if (log.isDebugEnabled()) { log.debug("Sending short message {} for exchange id '{}'...", i, exchange.getExchangeId()); } try { SubmitSmResult result = session.submitShortMessage( submitSm.getServiceType(), TypeOfNumber.valueOf(submitSm.getSourceAddrTon()), NumberingPlanIndicator.valueOf(submitSm.getSourceAddrNpi()), submitSm.getSourceAddr(), TypeOfNumber.valueOf(submitSm.getDestAddrTon()), NumberingPlanIndicator.valueOf(submitSm.getDestAddrNpi()), submitSm.getDestAddress(), new ESMClass(submitSm.getEsmClass()), submitSm.getProtocolId(), submitSm.getPriorityFlag(), submitSm.getScheduleDeliveryTime(), submitSm.getValidityPeriod(), new RegisteredDelivery(submitSm.getRegisteredDelivery()), submitSm.getReplaceIfPresent(), DataCodings.newInstance(submitSm.getDataCoding()), (byte) 0, submitSm.getShortMessage(), submitSm.getOptionalParameters()); if (result != null) { messageID = result.getMessageId(); } } catch (Exception e) { throw new SmppException(e); } if (messageID != null) { messageIDs.add(messageID); } } if (log.isDebugEnabled()) { log.debug("Sent short message for exchange id '{}' and received message ids '{}'", exchange.getExchangeId(), messageIDs); } Message message = ExchangeHelper.getResultMessage(exchange); message.setHeader(SmppConstants.ID, messageIDs); message.setHeader(SmppConstants.SENT_MESSAGE_COUNT, messageIDs.size()); }
@Test public void alphabetUpdatesDataCoding() throws Exception { final byte incorrectDataCoding = (byte) 0x00; byte[] body = { 'A', 'B', 'C' }; Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut); exchange.getIn().setHeader(SmppConstants.COMMAND, "SubmitSm"); exchange.getIn().setHeader(SmppConstants.ALPHABET, Alphabet.ALPHA_8_BIT.value()); exchange.getIn().setBody(body); when(session.submitShortMessage(eq("CMT"), eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1616"), eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1717"), eq(new ESMClass()), eq((byte) 0), eq((byte) 1), (String) isNull(), (String) isNull(), eq(new RegisteredDelivery(SMSCDeliveryReceipt.SUCCESS_FAILURE)), eq(ReplaceIfPresentFlag.DEFAULT.value()), argThat(not(DataCodings.newInstance(incorrectDataCoding))), eq((byte) 0), eq(body))) .thenReturn(new SubmitSmResult(new MessageId("1"), null)); command.execute(exchange); assertEquals(Arrays.asList("1"), exchange.getMessage().getHeader(SmppConstants.ID)); }
public Schema sorted() { // Create a new schema and copy over the appropriate Schema object attributes: // {fields, uuid, options} // Note: encoding positions are not copied over because generally they should align with the // ordering of field indices. Otherwise, problems may occur when encoding/decoding Rows of // this schema. Schema sortedSchema = this.fields.stream() .sorted(Comparator.comparing(Field::getName)) .map( field -> { FieldType innerType = field.getType(); if (innerType.getRowSchema() != null) { Schema innerSortedSchema = innerType.getRowSchema().sorted(); innerType = innerType.toBuilder().setRowSchema(innerSortedSchema).build(); return field.toBuilder().setType(innerType).build(); } return field; }) .collect(Schema.toSchema()) .withOptions(getOptions()); sortedSchema.setUUID(getUUID()); return sortedSchema; }
@Test public void testSorted() { Options testOptions = Options.builder() .setOption("test_str_option", FieldType.STRING, "test_str") .setOption("test_bool_option", FieldType.BOOLEAN, true) .build(); Schema unorderedSchema = Schema.builder() .addStringField("d") .addInt32Field("c") .addStringField("b") .addByteField("a") .build() .withOptions(testOptions); Schema unorderedSchemaAfterSorting = unorderedSchema.sorted(); Schema sortedSchema = Schema.builder() .addByteField("a") .addStringField("b") .addInt32Field("c") .addStringField("d") .build() .withOptions(testOptions); assertEquals(true, unorderedSchema.equivalent(unorderedSchemaAfterSorting)); assertEquals( true, Objects.equals(unorderedSchemaAfterSorting.getFields(), sortedSchema.getFields()) && Objects.equals(unorderedSchemaAfterSorting.getOptions(), sortedSchema.getOptions()) && Objects.equals( unorderedSchemaAfterSorting.getEncodingPositions(), sortedSchema.getEncodingPositions())); }
public void printKsqlEntityList(final List<KsqlEntity> entityList) { switch (outputFormat) { case JSON: printAsJson(entityList); break; case TABULAR: final boolean showStatements = entityList.size() > 1; for (final KsqlEntity ksqlEntity : entityList) { writer().println(); if (showStatements) { writer().println(ksqlEntity.getStatementText()); } printAsTable(ksqlEntity); } break; default: throw new RuntimeException(String.format( "Unexpected output format: '%s'", outputFormat.name() )); } }
@Test public void testPrintExecuptionPlan() { // Given: final KsqlEntityList entityList = new KsqlEntityList(ImmutableList.of( new ExecutionPlan("Test Execution Plan") )); // When: console.printKsqlEntityList(entityList); // Then: final String output = terminal.getOutputString(); Approvals.verify(output, approvalOptions); }
public static Stream<DeterministicKey> generate(DeterministicKey parent, int childNumber) { return Stream.generate(new KeySupplier(parent, childNumber)); }
@Test public void testGenerate() { DeterministicKey parent = new DeterministicKey(HDPath.m(), new byte[32], BigInteger.TEN, null); assertFalse(parent.isPubKeyOnly()); assertFalse(parent.isEncrypted()); List<DeterministicKey> keys0 = HDKeyDerivation.generate(parent, CHILD_NUMBER.num()) .limit(0) .collect(Collectors.toList()); assertEquals(0, keys0.size()); List<DeterministicKey> keys1 = HDKeyDerivation.generate(parent, CHILD_NUMBER.num()) .limit(1) .collect(Collectors.toList()); assertEquals(1, keys1.size()); assertEquals(HDPath.m(CHILD_NUMBER), keys1.get(0).getPath()); List<DeterministicKey> keys2 = HDKeyDerivation.generate(parent, CHILD_NUMBER.num()) .limit(2) .collect(Collectors.toList()); assertEquals(2, keys2.size()); assertEquals(HDPath.parsePath("m/1"), keys2.get(0).getPath()); assertEquals(HDPath.parsePath("m/2"), keys2.get(1).getPath()); }
public static JSONObject getJsonObject(JSONObject jsonObject, String key, int fromIndex) { int firstOccr = key.indexOf('.', fromIndex); if (firstOccr == -1) { String token = key.substring(key.lastIndexOf('.') + 1); if (jsonObject.has(token)) { return (JSONObject) jsonObject.get(token); } else { return null; } } String fieldName = key.substring(fromIndex, firstOccr); if (jsonObject.has(fieldName)) { return getJsonObject((JSONObject) jsonObject.get(fieldName), key, firstOccr + 1); } else { return null; } }
@Test(expected = ClassCastException.class) public void testGetJsonObjectWithException() { JSONObject json = new JSONObject(jsonStr); // only support json object could not get string value directly from this api, exception will be threw EsUtil.getJsonObject(json, "settings.index.bpack.partition.upperbound", 0); }
public AggregateAnalysisResult analyze( final ImmutableAnalysis analysis, final List<SelectExpression> finalProjection ) { if (!analysis.getGroupBy().isPresent()) { throw new IllegalArgumentException("Not an aggregate query"); } final AggAnalyzer aggAnalyzer = new AggAnalyzer(analysis, functionRegistry); aggAnalyzer.process(finalProjection); return aggAnalyzer.result(); }
@Test public void shouldNotThrowOnAggregateFunctionInHavingThatReferencesColumnNotInGroupBy() { // Given: givenHavingExpression(AGG_FUNCTION_CALL); // When: analyzer.analyze(analysis, selects); // Then: did not throw. }
@Override public Expr clone() { return new VirtualSlotRef(this); }
@Test public void testClone() { Expr v = virtualSlot.clone(); Assert.assertTrue(v instanceof VirtualSlotRef); Assert.assertTrue(((VirtualSlotRef) v).getRealSlots().get(0).equals(virtualSlot.getRealSlots().get(0))); Assert.assertFalse(((VirtualSlotRef) v).getRealSlots().get(0) == virtualSlot.getRealSlots().get(0)); }
@Override public double variance() { return 2 * nu; }
@Test public void testVariance() { System.out.println("variance"); ChiSquareDistribution instance = new ChiSquareDistribution(20); instance.rand(); assertEquals(40, instance.variance(), 1E-7); }
@Override @SuppressWarnings({"unchecked"}) public RestLiResponseData<PartialUpdateResponseEnvelope> buildRestLiResponseData(Request request, RoutingResult routingResult, Object result, Map<String, String> headers, List<HttpCookie> cookies) { UpdateResponse updateResponse = (UpdateResponse) result; // Verify that the status in the UpdateResponse is not null. If so, this is a developer error. if (updateResponse.getStatus() == null) { throw new RestLiServiceException(HttpStatus.S_500_INTERNAL_SERVER_ERROR, "Unexpected null encountered. HttpStatus is null inside of an UpdateResponse returned by the resource method: " + routingResult.getResourceMethod()); } final ResourceContext resourceContext = routingResult.getContext(); RecordTemplate entityResponse = null; // Add patched entity to the response if result is an UpdateEntityResponse and the client is asking for the entity if (result instanceof UpdateEntityResponse && resourceContext.isReturnEntityRequested()) { UpdateEntityResponse<?> updateEntityResponse = (UpdateEntityResponse<?>) updateResponse; if (updateEntityResponse.hasEntity()) { DataMap entityData = updateEntityResponse.getEntity().data(); TimingContextUtil.beginTiming(resourceContext.getRawRequestContext(), FrameworkTimingKeys.SERVER_RESPONSE_RESTLI_PROJECTION_APPLY.key()); final DataMap data = RestUtils.projectFields(entityData, resourceContext); TimingContextUtil.endTiming(resourceContext.getRawRequestContext(), FrameworkTimingKeys.SERVER_RESPONSE_RESTLI_PROJECTION_APPLY.key()); // Returned entity is to be added to the response envelope entityResponse = new EntityResponse<>(data, updateEntityResponse.getEntity().getClass()); } else { // The entity in the UpdateEntityResponse should not be null. This is a developer error. // If trying to return an error response, a RestLiServiceException should be thrown in the resource method. throw new RestLiServiceException(HttpStatus.S_500_INTERNAL_SERVER_ERROR, "Unexpected null encountered. Entity is null inside of an UpdateEntityResponse returned by the resource method: " + routingResult.getResourceMethod()); } } return new RestLiResponseDataImpl<>(new PartialUpdateResponseEnvelope(updateResponse.getStatus(), entityResponse), headers, cookies); }
@Test(dataProvider = "responseExceptionData") public void testBuilderException(UpdateResponse response) { Map<String, String> headers = ResponseBuilderUtil.getHeaders(); PartialUpdateResponseBuilder partialUpdateResponseBuilder = new PartialUpdateResponseBuilder(); RoutingResult routingResult = getMockRoutingResult(true, null); try { partialUpdateResponseBuilder.buildRestLiResponseData(null, routingResult, response, headers, Collections.emptyList()); Assert.fail("buildRestLiResponseData should have failed because of a null HTTP status or a null entity."); } catch (RestLiServiceException e) { Assert.assertTrue(e.getMessage().contains("Unexpected null encountered.")); } }
@Override public CheckpointStateToolset createTaskOwnedCheckpointStateToolset() { if (fileSystem instanceof PathsCopyingFileSystem) { return new FsCheckpointStateToolset( taskOwnedStateDirectory, (PathsCopyingFileSystem) fileSystem); } else { return new NotDuplicatingCheckpointStateToolset(); } }
@Test void testDuplicationCheckpointStateToolset() throws Exception { CheckpointStorageAccess checkpointStorage = new FsCheckpointStorageAccess( new TestDuplicatingFileSystem(), randomTempPath(), null, true, new JobID(), FILE_SIZE_THRESHOLD, WRITE_BUFFER_SIZE); assertThat(checkpointStorage.createTaskOwnedCheckpointStateToolset()) .isInstanceOf(FsCheckpointStateToolset.class); }
@PostConstruct public void init() { if (!environment.getAgentStatusEnabled()) { LOG.info("Agent status HTTP API server has been disabled."); return; } InetSocketAddress address = new InetSocketAddress(environment.getAgentStatusHostname(), environment.getAgentStatusPort()); try { server = HttpServer.create(address, SERVER_SOCKET_BACKLOG); setupRoutes(server); server.start(); LOG.info("Agent status HTTP API server running on http://{}:{}.", server.getAddress().getHostName(), server.getAddress().getPort()); } catch (Exception e) { LOG.warn("Could not start agent status HTTP API server on host {}, port {}.", address.getHostName(), address.getPort(), e); } }
@Test void shouldNotInitializeServerIfSettingIsTurnedOff() { try (MockedStatic<HttpServer> mockedStaticHttpServer = mockStatic(HttpServer.class)) { when(systemEnvironment.getAgentStatusEnabled()).thenReturn(false); agentStatusHttpd.init(); mockedStaticHttpServer.verifyNoInteractions(); } }
public Map<String, String> confirm(RdaConfirmRequest params) { AppSession appSession = appSessionService.getSession(params.getAppSessionId()); AppAuthenticator appAuthenticator = appAuthenticatorService.findByUserAppId(appSession.getUserAppId()); if(!checkSecret(params, appSession) || !checkAccount(params, appSession)){ appSession.setRdaSessionStatus("ABORTED"); appSessionService.save(appSession); return Map.of("arrivalStatus", "NOK"); } if(checkAndProcessError(params, appSession)){ appSessionService.save(appSession); return Map.of("arrivalStatus", "OK"); } if (!switchService.digidAppSwitchEnabled()) { digidClient.remoteLog("853", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(HIDDEN), true)); appSession.setRdaSessionStatus("REFUTED"); } else if (!switchService.digidRdaSwitchEnabled()){ digidClient.remoteLog("579", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(HIDDEN), true)); appSession.setRdaSessionStatus("REFUTED"); } else if (params.isVerified() && (SCANNING.equals(appSession.getRdaSessionStatus()) || SCANNING_FOREIGN.equals(appSession.getRdaSessionStatus()))) { appSession.setRdaSessionStatus("VERIFIED"); appAuthenticator.setSubstantieelActivatedAt(ZonedDateTime.now()); appAuthenticator.setSubstantieelDocumentType(params.getDocumentType().toLowerCase()); if (appAuthenticator.getWidActivatedAt() == null) { appAuthenticator.setIssuerType("rda"); } storeIdCheckDocument(params.getDocumentNumber(), params.getDocumentType(), appSession.getAccountId(), appAuthenticator.getUserAppId()); if (ID_CHECK_ACTION.equals(appSession.getRdaAction())) { digidClient.remoteLog("1321", Map.of("document_type", params.getDocumentType().toLowerCase(), lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId())); } else { digidClient.remoteLog("848", Map.of("document_type", params.getDocumentType().toLowerCase(), lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(APP_CODE), appAuthenticator.getAppCode(), lowerUnderscore(DEVICE_NAME), appAuthenticator.getDeviceName())); } appAuthenticatorService.save(appAuthenticator); if(appSession.getFlow().equals(UpgradeLoginLevel.NAME)) { digidClient.sendNotificationMessage(appSession.getAccountId(), "ED024", "SMS20"); logger.debug("Sending notify email ED024 / SMS20 for device {}", appAuthenticator.getDeviceName()); } } appSession.setAppAuthenticationLevel(appAuthenticator.getAuthenticationLevel()); appSessionService.save(appSession); return Map.of("arrivalStatus", "OK"); }
@Test void checkDigidAppSwitchError(){ when(appSessionService.getSession(any())).thenReturn(appSession); when(appAuthenticatorService.findByUserAppId(any())).thenReturn(appAuthenticator); when(switchService.digidAppSwitchEnabled()).thenReturn(Boolean.FALSE); Map<String, String> result = rdaService.confirm(rdaConfirmRequest); verify(digidClient, times(1)).remoteLog("853", Map.of(lowerUnderscore(ACCOUNT_ID), T_ACCOUNT_ID, lowerUnderscore(HIDDEN), true)); assertEquals("REFUTED", appSession.getRdaSessionStatus()); assertEquals("OK", result.get("arrivalStatus")); }
public static Throwable getRootCause(Throwable throwable) { if (throwable == null) { return null; } Throwable rootCause = throwable; // this is to avoid infinite loops for recursive cases final Set<Throwable> seenThrowables = new HashSet<>(); seenThrowables.add(rootCause); while ((rootCause.getCause() != null && !seenThrowables.contains(rootCause.getCause()))) { seenThrowables.add(rootCause.getCause()); rootCause = rootCause.getCause(); } return rootCause; }
@Test void rootCauseIsDifferent() { Throwable rootCause = new Exception(); Throwable e = new Exception(rootCause); Throwable actualRootCause = ExceptionUtils.getRootCause(e); assertThat(actualRootCause).isSameAs(rootCause); }
static List<String> parseEtcResolverSearchDomains() throws IOException { return parseEtcResolverSearchDomains(new File(ETC_RESOLV_CONF_FILE)); }
@Test public void searchDomainsWithOnlySearch(@TempDir Path tempDir) throws IOException { File f = buildFile(tempDir, "search linecorp.local\n" + "nameserver 127.0.0.2\n"); List<String> domains = UnixResolverDnsServerAddressStreamProvider.parseEtcResolverSearchDomains(f); assertEquals(Collections.singletonList("linecorp.local"), domains); }
String getAgentStatusReportRequestBody(JobIdentifier identifier, String elasticAgentId) { JsonObject jsonObject = new JsonObject(); if (identifier != null) { jsonObject.add("job_identifier", jobIdentifierJson(identifier)); } jsonObject.addProperty("elastic_agent_id", elasticAgentId); return FORCED_EXPOSE_GSON.toJson(jsonObject); }
@Test public void shouldJSONizeElasticAgentStatusReportRequestBodyWhenElasticAgentIdIsProvided() throws Exception { String elasticAgentId = "my-fancy-elastic-agent-id"; String actual = new ElasticAgentExtensionConverterV4().getAgentStatusReportRequestBody(null, elasticAgentId); String expected = format("{" + " \"elastic_agent_id\": \"%s\"" + "}", elasticAgentId); assertThatJson(expected).isEqualTo(actual); }
@Override void toHtml() throws IOException { writeHtmlHeader(); htmlCoreReport.toHtml(); writeHtmlFooter(); }
@Test public void testJCache() throws IOException { final String cacheName = "test 1"; final javax.cache.CacheManager jcacheManager = Caching.getCachingProvider() .getCacheManager(); final MutableConfiguration<Object, Object> conf = new MutableConfiguration<>(); conf.setManagementEnabled(true); conf.setStatisticsEnabled(true); jcacheManager.createCache(cacheName, conf); // test empty cache name in the cache keys link: jcacheManager.createCache("", conf); final String cacheName2 = "test 2"; try { final javax.cache.Cache<Object, Object> cache = jcacheManager.getCache(cacheName); cache.put(1, Math.random()); cache.get(1); cache.get(0); final MutableConfiguration<Object, Object> conf2 = new MutableConfiguration<>(); conf2.setManagementEnabled(false); conf2.setStatisticsEnabled(false); jcacheManager.createCache(cacheName2, conf2); // JavaInformations doit être réinstancié pour récupérer les caches final List<JavaInformations> javaInformationsList2 = Collections .singletonList(new JavaInformations(null, true)); final HtmlReport htmlReport = new HtmlReport(collector, null, javaInformationsList2, Period.TOUT, writer); htmlReport.toHtml(null, null); assertNotEmptyAndClear(writer); setProperty(Parameter.SYSTEM_ACTIONS_ENABLED, "false"); htmlReport.toHtml(null, null); assertNotEmptyAndClear(writer); } finally { setProperty(Parameter.SYSTEM_ACTIONS_ENABLED, null); jcacheManager.destroyCache(cacheName); jcacheManager.destroyCache(cacheName2); } }
@Override public Map<String, ConfigChangeItem> doParse(String oldContent, String newContent, String type) { Map<String, Object> oldMap = Collections.emptyMap(); Map<String, Object> newMap = Collections.emptyMap(); try { Yaml yaml = new Yaml(new SafeConstructor(new LoaderOptions())); if (StringUtils.isNotBlank(oldContent)) { oldMap = yaml.load(oldContent); oldMap = getFlattenedMap(oldMap); } if (StringUtils.isNotBlank(newContent)) { newMap = yaml.load(newContent); newMap = getFlattenedMap(newMap); } } catch (MarkedYAMLException e) { handleYamlException(e); } return filterChangeData(oldMap, newMap); }
@Test void testModifyKey() throws IOException { Map<String, ConfigChangeItem> map = parser.doParse("app:\n name: rocketMQ", "app:\n name: nacos", type); assertEquals("rocketMQ", map.get("app.name").getOldValue()); assertEquals("nacos", map.get("app.name").getNewValue()); }
String validateElasticProfileRequestBody(Map<String, String> configuration) { JsonObject properties = mapToJsonObject(configuration); return new GsonBuilder().serializeNulls().create().toJson(properties); }
@Test public void shouldConstructValidationRequest() { HashMap<String, String> configuration = new HashMap<>(); configuration.put("key1", "value1"); configuration.put("key2", "value2"); configuration.put("key3", null); String requestBody = new ElasticAgentExtensionConverterV5().validateElasticProfileRequestBody(configuration); assertThatJson(requestBody).isEqualTo("{\"key3\":null,\"key2\":\"value2\",\"key1\":\"value1\"}"); }
public static String hex(char ch) { return Integer.toHexString(ch).toUpperCase(); }
@Test public void testHex() { Assertions.assertEquals("61", Utils.hex('a')); Assertions.assertEquals("24", Utils.hex('$')); }
@Override public void close() { close(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS)); }
@Test void testReaperInvokedInClose() { consumer = newConsumer(); completeUnsubscribeApplicationEventSuccessfully(); consumer.close(); verify(backgroundEventReaper).reap(backgroundEventQueue); }
public void validateUrl(String serverUrl) { HttpUrl url = buildUrl(serverUrl, "/rest/api/1.0/repos"); doGet("", url, body -> buildGson().fromJson(body, RepositoryList.class)); }
@Test public void fail_validate_url_when_on_http_error() { server.enqueue(new MockResponse().setResponseCode(500) .setBody("something unexpected")); String serverUrl = server.url("/").toString(); assertThatThrownBy(() -> underTest.validateUrl(serverUrl)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Unable to contact Bitbucket server"); }
public static KeyStore loadKeyStore(final String name, final char[] password) { InputStream stream = null; try { stream = Config.getInstance().getInputStreamFromFile(name); if (stream == null) { String message = "Unable to load keystore '" + name + "', please provide the keystore matching the configuration in client.yml/server.yml to enable TLS connection."; if (logger.isErrorEnabled()) { logger.error(message); } throw new RuntimeException(message); } // try to load keystore as JKS try { KeyStore loadedKeystore = KeyStore.getInstance("JKS"); loadedKeystore.load(stream, password); return loadedKeystore; } catch (Exception e) { // if JKS fails, attempt to load as PKCS12 try { stream.close(); stream = Config.getInstance().getInputStreamFromFile(name); KeyStore loadedKeystore = KeyStore.getInstance("PKCS12"); loadedKeystore.load(stream, password); return loadedKeystore; } catch (Exception e2) { logger.error("Unable to load keystore " + name, e2); throw new RuntimeException("Unable to load keystore " + name, e2); } } } catch (Exception e) { logger.error("Unable to load stream for keystore " + name, e); throw new RuntimeException("Unable to load stream for keystore " + name, e); } finally { if (stream != null) { try { stream.close(); } catch (IOException e) { logger.error("Unable to close stream for keystore " + name, e); } } } }
@Test public void testLoadInvalidKeyStore() { try { KeyStore keyStore = TlsUtil.loadKeyStore(INVALID_KEYSTORE_NAME, PASSWORD); fail(); } catch (Exception e) { Assert.assertEquals(e.getMessage(), "Unable to load stream for keystore " + INVALID_KEYSTORE_NAME); } try { KeyStore keyStore = TlsUtil.loadKeyStore(OTHER_EXTENTION, PASSWORD); fail(); } catch (Exception e) { Assert.assertEquals(e.getMessage(), "Unable to load stream for keystore " + OTHER_EXTENTION); } }
int run() { final Map<String, String> configProps = options.getConfigFile() .map(Ksql::loadProperties) .orElseGet(Collections::emptyMap); final Map<String, String> sessionVariables = options.getVariables(); try (KsqlRestClient restClient = buildClient(configProps)) { try (Cli cli = cliBuilder.build( options.getStreamedQueryRowLimit(), options.getStreamedQueryTimeoutMs(), options.getOutputFormat(), restClient) ) { // Add CLI variables If defined by parameters cli.addSessionVariables(sessionVariables); if (options.getExecute().isPresent()) { return cli.runCommand(options.getExecute().get()); } else if (options.getScriptFile().isPresent()) { final File scriptFile = new File(options.getScriptFile().get()); if (scriptFile.exists() && scriptFile.isFile()) { return cli.runScript(scriptFile.getPath()); } else { throw new KsqlException("No such script file: " + scriptFile.getPath()); } } else { return cli.runInteractively(); } } } }
@Test public void shouldUseSslConfigInSystemConfigInPreferenceToAnyInConfigFile() throws Exception { // Given: givenConfigFile( "ssl.truststore.location=should not use" + System.lineSeparator() + "ssl.truststore.password=should not use" ); givenSystemProperties( "ssl.truststore.location", "some/path", "ssl.truststore.password", "letmein" ); // When: ksql.run(); // Then: verify(clientBuilder).build(any(), any(), eq(ImmutableMap.of( "ssl.truststore.location", "some/path", "ssl.truststore.password", "letmein" )), any(), any()); }
@VisibleForTesting Database getDatabase( LoggingObjectInterface parentObject, PGBulkLoaderMeta pgBulkLoaderMeta ) { DatabaseMeta dbMeta = pgBulkLoaderMeta.getDatabaseMeta(); // If dbNameOverride is present, clone the origin db meta and override the DB name String dbNameOverride = environmentSubstitute( pgBulkLoaderMeta.getDbNameOverride() ); if ( !Utils.isEmpty( dbNameOverride ) ) { dbMeta = (DatabaseMeta) pgBulkLoaderMeta.getDatabaseMeta().clone(); dbMeta.setDBName( dbNameOverride.trim() ); logDebug( "DB name overridden to the value: " + dbNameOverride ); } return new Database( parentObject, dbMeta ); }
@Test public void testDBNameOverridden_IfDbNameOverrideSetUp() throws Exception { // Db Name Override is set up PGBulkLoaderMeta pgBulkLoaderMock = getPgBulkLoaderMock( DB_NAME_OVVERRIDE ); Database database = pgBulkLoader.getDatabase( pgBulkLoader, pgBulkLoaderMock ); assertNotNull( database ); // Verify DB name is overridden assertEquals( DB_NAME_OVVERRIDE, database.getDatabaseMeta().getDatabaseName() ); // Check additionally other connection information assertEquals( CONNECTION_NAME, database.getDatabaseMeta().getName() ); assertEquals( CONNECTION_DB_HOST, database.getDatabaseMeta().getHostname() ); assertEquals( CONNECTION_DB_PORT, database.getDatabaseMeta().getDatabasePortNumberString() ); assertEquals( CONNECTION_DB_USERNAME, database.getDatabaseMeta().getUsername() ); assertEquals( CONNECTION_DB_PASSWORD, database.getDatabaseMeta().getPassword() ); }
void registerColumnFamily(String columnFamilyName, ColumnFamilyHandle handle) { boolean columnFamilyAsVariable = options.isColumnFamilyAsVariable(); MetricGroup group = columnFamilyAsVariable ? metricGroup.addGroup(COLUMN_FAMILY_KEY, columnFamilyName) : metricGroup.addGroup(columnFamilyName); for (RocksDBProperty property : options.getProperties()) { RocksDBNativePropertyMetricView gauge = new RocksDBNativePropertyMetricView(handle, property); group.gauge(property.getRocksDBProperty(), gauge); } }
@Test void testReturnsUnsigned() throws Throwable { RocksDBExtension localRocksDBExtension = new RocksDBExtension(); localRocksDBExtension.before(); SimpleMetricRegistry registry = new SimpleMetricRegistry(); GenericMetricGroup group = new GenericMetricGroup( registry, UnregisteredMetricGroups.createUnregisteredTaskMetricGroup(), OPERATOR_NAME); RocksDBNativeMetricOptions options = new RocksDBNativeMetricOptions(); options.enableSizeAllMemTables(); RocksDBNativeMetricMonitor monitor = new RocksDBNativeMetricMonitor( options, group, localRocksDBExtension.getRocksDB(), localRocksDBExtension.getDbOptions().statistics()); ColumnFamilyHandle handle = rocksDBExtension.createNewColumnFamily(COLUMN_FAMILY_NAME); monitor.registerColumnFamily(COLUMN_FAMILY_NAME, handle); RocksDBNativeMetricMonitor.RocksDBNativePropertyMetricView view = registry.propertyMetrics.get(0); view.setValue(-1); BigInteger result = view.getValue(); localRocksDBExtension.after(); assertThat(result.signum()) .withFailMessage("Failed to interpret RocksDB result as an unsigned long") .isOne(); }
public int getConnectionRequestTimeout() { return connectionRequestTimeout; }
@Test void testGetConnectionRequestTimeout() { HttpClientConfig config = HttpClientConfig.builder().setConnectionRequestTimeout(5000).build(); assertEquals(5000, config.getConnectionRequestTimeout()); }
@Override public Health check() { Platform.Status platformStatus = platform.status(); if (platformStatus == Platform.Status.UP && VALID_DATABASEMIGRATION_STATUSES.contains(migrationState.getStatus()) && !restartFlagHolder.isRestarting()) { return Health.GREEN; } return Health.builder() .setStatus(Health.Status.RED) .addCause("SonarQube webserver is not up") .build(); }
@Test public void returns_GREEN_without_cause_if_platform_status_is_UP_migration_status_is_valid_and_SQ_is_not_restarting() { when(platform.status()).thenReturn(Platform.Status.UP); when(migrationState.getStatus()).thenReturn(random.nextBoolean() ? DatabaseMigrationState.Status.NONE : DatabaseMigrationState.Status.SUCCEEDED); when(restartFlagHolder.isRestarting()).thenReturn(false); Health health = underTest.check(); assertThat(health).isEqualTo(Health.GREEN); }
@Override public SecurityGroup securityGroup(String sgId) { checkArgument(!Strings.isNullOrEmpty(sgId), ERR_NULL_SG_ID); return osSecurityGroupStore.securityGroup(sgId); }
@Test public void testGetSecurityGroupById() { createBasicSecurityGroups(); assertNotNull("Instance port did not match", target.securityGroup(SECURITY_GROUP_ID_1)); assertNotNull("Instance port did not match", target.securityGroup(SECURITY_GROUP_ID_2)); assertNull("Instance port did not match", target.securityGroup(UNKNOWN_ID)); }
private Preconditions() { }
@Test public void testPreconditions(){ Preconditions.checkArgument(true); try{ Preconditions.checkArgument(false); } catch (IllegalArgumentException e){ assertNull(e.getMessage()); } Preconditions.checkArgument(true, "Message %s here", 10); try{ Preconditions.checkArgument(false, "Message %s here", 10); } catch (IllegalArgumentException e){ assertEquals("Message 10 here", e.getMessage()); } Preconditions.checkArgument(true, "Message %s here %s there", 10, 20); try{ Preconditions.checkArgument(false, "Message %s here %s there", 10, 20); } catch (IllegalArgumentException e){ assertEquals("Message 10 here 20 there", e.getMessage()); } Preconditions.checkArgument(true, "Message %s here %s there %s more", 10, 20, 30); try{ Preconditions.checkArgument(false, "Message %s here %s there %s more", 10, 20, 30); } catch (IllegalArgumentException e){ assertEquals("Message 10 here 20 there 30 more", e.getMessage()); } Preconditions.checkArgument(true, "Message %s here", 10L); try{ Preconditions.checkArgument(false, "Message %s here", 10L); } catch (IllegalArgumentException e){ assertEquals("Message 10 here", e.getMessage()); } Preconditions.checkArgument(true, "Message %s here %s there", 10L, 20L); try{ Preconditions.checkArgument(false, "Message %s here %s there", 10L, 20L); } catch (IllegalArgumentException e){ assertEquals("Message 10 here 20 there", e.getMessage()); } Preconditions.checkArgument(true, "Message %s here %s there %s more", 10L, 20L, 30L); try{ Preconditions.checkArgument(false, "Message %s here %s there %s more", 10L, 20L, 30L); } catch (IllegalArgumentException e){ assertEquals("Message 10 here 20 there 30 more", e.getMessage()); } Preconditions.checkArgument(true, "Message %s here %s there %s more", "A", "B", "C"); try{ Preconditions.checkArgument(false, "Message %s here %s there %s more", "A", "B", "C"); } catch (IllegalArgumentException e){ assertEquals("Message A here B there C more", e.getMessage()); } }
@Override public Collection<String> doSharding(final Collection<String> availableTargetNames, final ComplexKeysShardingValue<Comparable<?>> shardingValue) { if (!shardingValue.getColumnNameAndRangeValuesMap().isEmpty()) { ShardingSpherePreconditions.checkState(allowRangeQuery, () -> new UnsupportedSQLOperationException(String.format("Since the property of `%s` is false, inline sharding algorithm can not tackle with range query", ALLOW_RANGE_QUERY_KEY))); return availableTargetNames; } Map<String, Collection<Comparable<?>>> columnNameAndShardingValuesMap = shardingValue.getColumnNameAndShardingValuesMap(); ShardingSpherePreconditions.checkState(shardingColumns.isEmpty() || shardingColumns.size() == columnNameAndShardingValuesMap.size(), () -> new MismatchedComplexInlineShardingAlgorithmColumnAndValueSizeException(shardingColumns.size(), columnNameAndShardingValuesMap.size())); return flatten(columnNameAndShardingValuesMap).stream().map(this::doSharding).collect(Collectors.toList()); }
@Test void assertDoShardingWithMultiValue() { Properties props = PropertiesBuilder.build(new Property("algorithm-expression", "t_order_${type % 2}_${order_id % 2}"), new Property("sharding-columns", "type,order_id")); ComplexInlineShardingAlgorithm algorithm = (ComplexInlineShardingAlgorithm) TypedSPILoader.getService(ShardingAlgorithm.class, "COMPLEX_INLINE", props); List<String> availableTargetNames = Arrays.asList("t_order_0_0", "t_order_0_1", "t_order_1_0", "t_order_1_1"); Collection<String> actual = algorithm.doSharding(availableTargetNames, createComplexKeysShardingValue(Arrays.asList(1, 2))); assertTrue(actual.containsAll(availableTargetNames)); }
public static String toUriAuthority(NetworkEndpoint networkEndpoint) { return toHostAndPort(networkEndpoint).toString(); }
@Test public void toUriString_withHostnameEndpoint_returnsHostname() { NetworkEndpoint hostnameEndpoint = NetworkEndpoint.newBuilder() .setType(NetworkEndpoint.Type.HOSTNAME) .setHostname(Hostname.newBuilder().setName("localhost")) .build(); assertThat(NetworkEndpointUtils.toUriAuthority(hostnameEndpoint)).isEqualTo("localhost"); }
@Override public void start() { if (!PluginConfigManager.getPluginConfig(DiscoveryPluginConfig.class).isEnableRegistry()) { return; } final LbConfig lbConfig = PluginConfigManager.getPluginConfig(LbConfig.class); maxSize = lbConfig.getMaxRetryConfigCache(); defaultRetry = Retry.create(DefaultRetryConfig.create()); initRetryPolicy(lbConfig); }
@Test public void disableAllTimeout() { lbConfig.setEnableSocketConnectTimeoutRetry(false); lbConfig.setEnableTimeoutExRetry(false); lbConfig.setEnableSocketReadTimeoutRetry(false); lbConfig.setSpecificExceptionsForRetry(Collections.singletonList("java.lang.IllegalArgumentException")); final RetryServiceImpl retryService = new RetryServiceImpl(); retryService.start(); final Optional<Object> defaultRetry = ReflectUtils.getFieldValue(retryService, "defaultRetry"); Assert.assertTrue(defaultRetry.isPresent() && defaultRetry.get() instanceof Retry); final Predicate<Throwable> throwablePredicate = ((Retry) defaultRetry.get()).config().getThrowablePredicate(); Assert.assertFalse(throwablePredicate.test(null)); Assert.assertFalse(throwablePredicate.test(new SocketTimeoutException("read timed out"))); Assert.assertFalse(throwablePredicate.test(new SocketTimeoutException("connect timed out"))); final Exception exception = new Exception("error", new SocketTimeoutException("connect timed out")); Assert.assertFalse(throwablePredicate.test(exception)); final Exception exception2 = new Exception("error", new SocketTimeoutException("read timed out")); Assert.assertFalse(throwablePredicate.test(exception2)); final TimeoutException timeoutException = new TimeoutException(); Assert.assertFalse(throwablePredicate.test(timeoutException)); final Exception timeoutException2 = new Exception("error", new TimeoutException("read timed out")); Assert.assertFalse(throwablePredicate.test(timeoutException2)); final IllegalArgumentException illegalArgumentException = new IllegalArgumentException(); Assert.assertTrue(throwablePredicate.test(illegalArgumentException)); }
@Override public List<HasMetadata> buildAccompanyingKubernetesResources() throws IOException { final Service service = kubernetesJobManagerParameters .getRestServiceExposedType() .serviceType() .buildUpExternalRestService(kubernetesJobManagerParameters); return Collections.singletonList(service); }
@Test void testSetServiceExposedTypeWithHeadless() throws IOException { this.flinkConfig.set( KubernetesConfigOptions.REST_SERVICE_EXPOSED_TYPE, KubernetesConfigOptions.ServiceExposedType.Headless_ClusterIP); final List<HasMetadata> servicesWithHeadlessClusterIP = this.externalServiceDecorator.buildAccompanyingKubernetesResources(); assertThat(((Service) servicesWithHeadlessClusterIP.get(0)).getSpec().getType()) .isEqualTo(KubernetesConfigOptions.ServiceExposedType.ClusterIP.name()); assertThat(((Service) servicesWithHeadlessClusterIP.get(0)).getSpec().getClusterIP()) .isEqualTo(HeadlessClusterIPService.HEADLESS_CLUSTER_IP); }
@Override public int hashCode() { return Objects.hash(targetImage, imageDigest, imageId, tags, imagePushed); }
@Test public void testEquality_differentImageDigest() { JibContainer container1 = new JibContainer(targetImage1, digest1, digest2, tags1, true); JibContainer container2 = new JibContainer(targetImage1, digest2, digest2, tags1, true); Assert.assertNotEquals(container1, container2); Assert.assertNotEquals(container1.hashCode(), container2.hashCode()); }
public List<WorkflowInstance> getWorkflowInstancesWithLatestRun( String workflowId, long startInstanceId, long endInstanceId, boolean aggregated) { List<WorkflowInstance> instances = withMetricLogError( () -> withRetryableQuery( LATEST_RUN_WORKFLOW_INSTANCE_IDS_QUERY, stmt -> { int idx = 0; stmt.setString(++idx, workflowId); stmt.setLong(++idx, endInstanceId); stmt.setLong(++idx, startInstanceId); }, result -> { List<WorkflowInstance> rows = new ArrayList<>(); while (result.next()) { rows.add(workflowInstanceFromResult(result)); } return rows; }), "getLatestWorkflowInstanceRuns", "Failed to get workflow instances for workflow id: {} between instance id : {} and {}", workflowId, startInstanceId, endInstanceId); if (aggregated) { instances.forEach( instance -> instance.setAggregatedInfo( AggregatedViewHelper.computeAggregatedView(instance, true))); } return instances; }
@Test public void testGetWorkflowInstanceLatestRuns() throws Exception { initializeForGetWorkflowInstancesLatestRun(); // pagination call to get all the latest runs workflow instance id's in a particular workflow // id, null cursor List<WorkflowInstance> workflows = instanceDao.getWorkflowInstancesWithLatestRun(TEST_WORKFLOW_ID, 1, 100, false); workflows.sort(Comparator.comparing(WorkflowInstance::getWorkflowInstanceId).reversed()); assertNotNull(workflows); assertEquals(10, workflows.size()); for (int i = 0; i < 10; i++) { WorkflowInstance instance = workflows.get(i); assertNotNull(instance); assertEquals(TEST_WORKFLOW_ID, instance.getWorkflowId()); assertEquals(10 - i, instance.getWorkflowInstanceId()); assertEquals(109 - i, instance.getWorkflowRunId()); } // pagination NEXT call to get a subset of pages, which fill the entire page request workflows = instanceDao.getWorkflowInstancesWithLatestRun(TEST_WORKFLOW_ID, 2, 6, false); workflows.sort(Comparator.comparing(WorkflowInstance::getWorkflowInstanceId).reversed()); assertNotNull(workflows); assertEquals(5, workflows.size()); for (int i = 0; i < workflows.size(); i++) { WorkflowInstance instance = workflows.get(i); assertNotNull(instance); assertEquals(TEST_WORKFLOW_ID, instance.getWorkflowId()); assertEquals(6 - i, instance.getWorkflowInstanceId()); assertEquals(105 - i, instance.getWorkflowRunId()); } // pagination NEXT call to get a subset of pages, which fill portion of the page requested workflows = instanceDao.getWorkflowInstancesWithLatestRun(TEST_WORKFLOW_ID, 1, 3, false); workflows.sort(Comparator.comparing(WorkflowInstance::getWorkflowInstanceId).reversed()); assertNotNull(workflows); assertEquals(3, workflows.size()); for (int i = 0; i < workflows.size(); i++) { WorkflowInstance instance = workflows.get(i); assertNotNull(instance); assertEquals(TEST_WORKFLOW_ID, instance.getWorkflowId()); assertEquals(3 - i, instance.getWorkflowInstanceId()); assertEquals(102 - i, instance.getWorkflowRunId()); } // batch call with nothing workflows = instanceDao.getWorkflowInstancesWithLatestRun(TEST_WORKFLOW_ID, 0, 0, false); assertNotNull(workflows); assertTrue(workflows.isEmpty()); // non-existing workflow id workflows = instanceDao.getWorkflowInstancesWithLatestRun("sample-dag-test-random", 1, 1, false); assertNotNull(workflows); assertTrue(workflows.isEmpty()); cleanupForGetWorkflowInstancesLatestRun(); }
@Override public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException { DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook); defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis())); try { String key = commandLine.getOptionValue('k').trim(); String value = commandLine.getOptionValue('v').trim(); Properties properties = new Properties(); properties.put(key, value); if (commandLine.hasOption('b')) { String brokerAddr = commandLine.getOptionValue('b').trim(); defaultMQAdminExt.start(); defaultMQAdminExt.updateBrokerConfig(brokerAddr, properties); System.out.printf("update broker config success, %s\n", brokerAddr); return; } else if (commandLine.hasOption('c')) { String clusterName = commandLine.getOptionValue('c').trim(); defaultMQAdminExt.start(); Set<String> brokerAddrSet; if (commandLine.hasOption('a')) { brokerAddrSet = CommandUtil.fetchMasterAndSlaveAddrByClusterName(defaultMQAdminExt, clusterName); } else { brokerAddrSet = CommandUtil.fetchMasterAddrByClusterName(defaultMQAdminExt, clusterName); } for (String brokerAddr : brokerAddrSet) { try { defaultMQAdminExt.updateBrokerConfig(brokerAddr, properties); System.out.printf("update broker config success, %s\n", brokerAddr); } catch (Exception e) { e.printStackTrace(); } } return; } ServerUtil.printCommandLineHelp("mqadmin " + this.commandName(), options); } catch (Exception e) { throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e); } finally { defaultMQAdminExt.shutdown(); } }
@Test public void testExecute() throws SubCommandException { UpdateBrokerConfigSubCommand cmd = new UpdateBrokerConfigSubCommand(); Options options = ServerUtil.buildCommandlineOptions(new Options()); String[] subargs = new String[] {"-b 127.0.0.1:" + listenPort(), "-c default-cluster", "-k topicname", "-v unit_test"}; final CommandLine commandLine = ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs, cmd.buildCommandlineOptions(options), new DefaultParser()); cmd.execute(commandLine, options, null); }
public static AppsInfo mergeAppsInfo(ArrayList<AppInfo> appsInfo, boolean returnPartialResult) { AppsInfo allApps = new AppsInfo(); Map<String, AppInfo> federationAM = new HashMap<>(); Map<String, AppInfo> federationUAMSum = new HashMap<>(); for (AppInfo a : appsInfo) { // Check if this AppInfo is an AM if (a.getAMHostHttpAddress() != null) { // Insert in the list of AM federationAM.put(a.getAppId(), a); // Check if there are any UAM found before if (federationUAMSum.containsKey(a.getAppId())) { // Merge the current AM with the found UAM mergeAMWithUAM(a, federationUAMSum.get(a.getAppId())); // Remove the sum of the UAMs federationUAMSum.remove(a.getAppId()); } // This AppInfo is an UAM } else { if (federationAM.containsKey(a.getAppId())) { // Merge the current UAM with its own AM mergeAMWithUAM(federationAM.get(a.getAppId()), a); } else if (federationUAMSum.containsKey(a.getAppId())) { // Merge the current UAM with its own UAM and update the list of UAM federationUAMSum.put(a.getAppId(), mergeUAMWithUAM(federationUAMSum.get(a.getAppId()), a)); } else { // Insert in the list of UAM federationUAMSum.put(a.getAppId(), a); } } } // Check the remaining UAMs are depending or not from federation for (AppInfo a : federationUAMSum.values()) { if (returnPartialResult || (a.getName() != null && !(a.getName().startsWith(UnmanagedApplicationManager.APP_NAME) || a.getName().startsWith(PARTIAL_REPORT)))) { federationAM.put(a.getAppId(), a); } } allApps.addAll(new ArrayList<>(federationAM.values())); return allApps; }
@Test public void testMergeUAM() { AppsInfo apps = new AppsInfo(); AppInfo app1 = new AppInfo(); app1.setAppId(APPID1.toString()); app1.setName("Test"); apps.add(app1); // in this case the result does not change if we enable partial result AppsInfo result = RouterWebServiceUtil.mergeAppsInfo(apps.getApps(), false); Assert.assertNotNull(result); Assert.assertEquals(1, result.getApps().size()); }
@Override public void onMatch(RelOptRuleCall call) { final Sort sort = call.rel(0); final SortExchange exchange = call.rel(1); final RelMetadataQuery metadataQuery = call.getMetadataQuery(); if (RelMdUtil.checkInputForCollationAndLimit( metadataQuery, exchange.getInput(), sort.getCollation(), sort.offset, sort.fetch)) { // Don't rewrite anything if the input is already sorted AND the // input node would already return fewer than sort.offset + sort.fetch // rows (e.g. there is already an inner limit applied) return; } RelCollation collation = sort.getCollation(); Preconditions.checkArgument( collation.equals(exchange.getCollation()), "Expected collation on exchange and sort to be the same" ); final RexNode fetch; if (sort.fetch == null) { fetch = null; } else if (sort.offset == null) { fetch = sort.fetch; } else { int total = RexExpressionUtils.getValueAsInt(sort.fetch) + RexExpressionUtils.getValueAsInt(sort.offset); fetch = REX_BUILDER.makeLiteral(total, TYPE_FACTORY.createSqlType(SqlTypeName.INTEGER)); } // do not transform sort-exchange copy when there's no fetch limit, or fetch amount is larger than threshold if (!collation.getFieldCollations().isEmpty() && (fetch == null || RexExpressionUtils.getValueAsInt(fetch) > DEFAULT_SORT_EXCHANGE_COPY_THRESHOLD)) { return; } final RelNode newExchangeInput = sort.copy(sort.getTraitSet(), exchange.getInput(), collation, null, fetch); final RelNode exchangeCopy = exchange.copy(exchange.getTraitSet(), newExchangeInput, exchange.getDistribution()); final RelNode sortCopy = sort.copy(sort.getTraitSet(), exchangeCopy, collation, sort.offset == null ? REX_ZERO : sort.offset, sort.fetch); call.transformTo(sortCopy); }
@Test public void shouldMatchLimitNoOffsetYesSortOnSender() { // Given: RelCollation collation = RelCollations.of(1); SortExchange exchange = PinotLogicalSortExchange.create(_input, RelDistributions.SINGLETON, collation, true, false); Sort sort = LogicalSort.create(exchange, collation, null, literal(1)); Mockito.when(_call.rel(0)).thenReturn(sort); Mockito.when(_call.rel(1)).thenReturn(exchange); // When: PinotSortExchangeCopyRule.SORT_EXCHANGE_COPY.onMatch(_call); // Then: ArgumentCaptor<RelNode> sortCopyCapture = ArgumentCaptor.forClass(LogicalSort.class); Mockito.verify(_call, Mockito.times(1)).transformTo(sortCopyCapture.capture()); RelNode sortCopy = sortCopyCapture.getValue(); Assert.assertTrue(sortCopy instanceof LogicalSort); Assert.assertTrue(((LogicalSort) sortCopy).getInput() instanceof PinotLogicalSortExchange); Assert.assertTrue(((LogicalSort) sortCopy).getInput().getInput(0) instanceof LogicalSort); LogicalSort innerSort = (LogicalSort) ((LogicalSort) sortCopy).getInput().getInput(0); Assert.assertEquals(innerSort.getCollation().getKeys().size(), 1); Assert.assertNull((innerSort).offset); Assert.assertEquals((innerSort).fetch, literal(1)); }
@Override public void execute(final List<String> args, final PrintWriter terminal) { CliCmdUtil.ensureArgCountBounds(args, 0, 1, HELP); if (args.isEmpty()) { final String setting = requestPipeliningSupplier.get() ? "ON" : "OFF"; terminal.printf("Current %s configuration: %s%n", NAME, setting); } else { final String newSetting = args.get(0); switch (newSetting.toUpperCase()) { case "ON": requestPipeliningConsumer.accept(true); break; case "OFF": requestPipeliningConsumer.accept(false); break; default: terminal.printf("Invalid %s setting: %s. ", NAME, newSetting); terminal.println("Valid options are 'ON' and 'OFF'."); return; } terminal.println(NAME + " configuration is now " + newSetting.toUpperCase()); } }
@Test public void shouldPrintCurrentSettingOfOff() { // Given: when(settingSupplier.get()).thenReturn(false); // When: requestPipeliningCommand.execute(Collections.emptyList(), terminal); // Then: assertThat(out.toString(), containsString(String.format("Current %s configuration: OFF", RequestPipeliningCommand.NAME))); }
public static boolean isInstanceOf(Class<?> clazz, Object object) { return clazz.isInstance(object); }
@Test public void testIsInstance() { Object object = new ZTest(); assertTrue(TypeUtil.isInstanceOf(I0Test.class, object)); assertTrue(TypeUtil.isInstanceOf("py4j.reflection.I0Test", object)); object = new ATest(); assertFalse(TypeUtil.isInstanceOf(I0Test.class, object)); assertFalse(TypeUtil.isInstanceOf("py4j.reflection.I0Test", object)); }
public static List<ReservationAllocationState> convertAllocationsToReservationInfo(Set<ReservationAllocation> res, boolean includeResourceAllocations) { List<ReservationAllocationState> reservationInfo = new ArrayList<>(); Map<ReservationInterval, Resource> requests; for (ReservationAllocation allocation : res) { List<ResourceAllocationRequest> allocations = new ArrayList<>(); if (includeResourceAllocations) { requests = allocation.getAllocationRequests(); for (Map.Entry<ReservationInterval, Resource> request : requests.entrySet()) { ReservationInterval interval = request.getKey(); allocations.add(ResourceAllocationRequest.newInstance( interval.getStartTime(), interval.getEndTime(), request.getValue())); } } reservationInfo.add(ReservationAllocationState.newInstance( allocation.getAcceptanceTime(), allocation.getUser(), allocations, allocation.getReservationId(), allocation.getReservationDefinition())); } return reservationInfo; }
@Test public void testConvertAllocationsToReservationInfo() { long startTime = new Date().getTime(); long step = 10000; int[] alloc = {10, 10, 10}; ReservationId id = ReservationSystemTestUtil.getNewReservationId(); ReservationAllocation allocation = createReservationAllocation( startTime, startTime + 10 * step, step, alloc, id, createResource(4000, 2)); List<ReservationAllocationState> infoList = ReservationSystemUtil .convertAllocationsToReservationInfo( Collections.singleton(allocation), true); assertThat(infoList).hasSize(1); assertThat(infoList.get(0).getReservationId().toString()).isEqualTo( id.toString()); Assert.assertFalse(infoList.get(0).getResourceAllocationRequests() .isEmpty()); }
@CanIgnoreReturnValue public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) { List<@Nullable Object> expected = (varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs); return containsExactlyElementsIn( expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable); }
@Test public void iterableContainsExactlyInOrderWithFailure() { expectFailureWhenTestingThat(asList(1, null, 3)).containsExactly(null, 1, 3).inOrder(); assertFailureKeys("contents match, but order was wrong", "expected", "but was"); assertFailureValue("expected", "[null, 1, 3]"); }
@Override public boolean removeServiceSubscriber(Service service) { if (null != subscribers.remove(service)) { MetricsMonitor.decrementSubscribeCount(); } return true; }
@Test void removeServiceSubscriber() { boolean result = abstractClient.removeServiceSubscriber(service); assertTrue(result); }
public static ObjectEncoder createEncoder(Type type, ObjectInspector inspector) { String base = type.getTypeSignature().getBase(); switch (base) { case BIGINT: checkArgument(inspector instanceof PrimitiveObjectInspector); return compose(primitive(inspector), o -> ((Long) o)); case INTEGER: checkArgument(inspector instanceof PrimitiveObjectInspector); return compose(primitive(inspector), o -> ((Integer) o).longValue()); case SMALLINT: checkArgument(inspector instanceof PrimitiveObjectInspector); return compose(primitive(inspector), o -> ((Short) o).longValue()); case TINYINT: checkArgument(inspector instanceof PrimitiveObjectInspector); return compose(primitive(inspector), o -> ((Byte) o).longValue()); case BOOLEAN: checkArgument(inspector instanceof PrimitiveObjectInspector); return compose(primitive(inspector), o -> ((Boolean) o)); case DATE: checkArgument(inspector instanceof PrimitiveObjectInspector); return compose(primitive(inspector), o -> ((Date) o).getTime()); case DECIMAL: if (Decimals.isShortDecimal(type)) { DecimalType decimalType = (DecimalType) type; return compose(decimal(inspector), o -> DecimalUtils.encodeToLong((BigDecimal) o, decimalType)); } else if (Decimals.isLongDecimal(type)) { DecimalType decimalType = (DecimalType) type; return compose(decimal(inspector), o -> DecimalUtils.encodeToSlice((BigDecimal) o, decimalType)); } break; case REAL: checkArgument(inspector instanceof PrimitiveObjectInspector); return compose(primitive(inspector), o -> floatToRawIntBits(((Number) o).floatValue())); case DOUBLE: checkArgument(inspector instanceof PrimitiveObjectInspector); return compose(primitive(inspector), o -> (Double) o); case TIMESTAMP: checkArgument(inspector instanceof PrimitiveObjectInspector); return compose(primitive(inspector), o -> ((Timestamp) o).getTime()); case VARBINARY: if (inspector instanceof BinaryObjectInspector) { return compose(primitive(inspector), o -> Slices.wrappedBuffer(((byte[]) o))); } break; case VARCHAR: if (inspector instanceof StringObjectInspector) { return compose(primitive(inspector), o -> Slices.utf8Slice(o.toString())); } else if (inspector instanceof HiveVarcharObjectInspector) { return compose(o -> ((HiveVarcharObjectInspector) inspector).getPrimitiveJavaObject(o).getValue(), o -> Slices.utf8Slice(((String) o))); } break; case CHAR: if (inspector instanceof StringObjectInspector) { return compose(primitive(inspector), o -> Slices.utf8Slice(o.toString())); } else if (inspector instanceof HiveCharObjectInspector) { return compose(o -> ((HiveCharObjectInspector) inspector).getPrimitiveJavaObject(o).getValue(), o -> Slices.utf8Slice(((String) o))); } break; case ROW: return StructObjectEncoder.create(type, inspector); case ARRAY: return ListObjectEncoder.create(type, inspector); case MAP: return MapObjectEncoder.create(type, inspector); } throw unsupportedType(type); }
@Test public void testComplexObjectEncoders() { ObjectInspector inspector; ObjectEncoder encoder; inspector = ObjectInspectors.create(new ArrayType(BIGINT), typeManager); encoder = createEncoder(new ArrayType(BIGINT), inspector); assertTrue(encoder instanceof ObjectEncoders.ListObjectEncoder); Object arrayObject = encoder.encode(new Long[]{1L, 2L, 3L}); assertTrue(arrayObject instanceof LongArrayBlock); assertEquals(((LongArrayBlock) arrayObject).getLong(0), 1L); assertEquals(((LongArrayBlock) arrayObject).getLong(1), 2L); assertEquals(((LongArrayBlock) arrayObject).getLong(2), 3L); inspector = ObjectInspectors.create(new MapType( VARCHAR, BIGINT, methodHandle(TestRowType.class, "throwUnsupportedOperation"), methodHandle(TestRowType.class, "throwUnsupportedOperation")), typeManager); encoder = createEncoder(new MapType( VARCHAR, BIGINT, methodHandle(TestRowType.class, "throwUnsupportedOperation"), methodHandle(TestRowType.class, "throwUnsupportedOperation")), inspector); assertTrue(encoder instanceof ObjectEncoders.MapObjectEncoder); assertTrue(encoder.encode(new HashMap<String, Long>(){}) instanceof SingleMapBlock); }
@Override public void open() throws Exception { super.open(); windowSerializer = windowAssigner.getWindowSerializer(new ExecutionConfig()); internalTimerService = getInternalTimerService("window-timers", windowSerializer, this); triggerContext = new TriggerContext(); triggerContext.open(); StateDescriptor<ListState<RowData>, List<RowData>> windowStateDescriptor = new ListStateDescriptor<>("window-input", new RowDataSerializer(inputType)); StateDescriptor<ListState<RowData>, List<RowData>> dataRetractStateDescriptor = new ListStateDescriptor<>("data-retract", new RowDataSerializer(inputType)); this.windowAccumulateData = (InternalListState<K, W, RowData>) getOrCreateKeyedState(windowSerializer, windowStateDescriptor); this.windowRetractData = (InternalListState<K, W, RowData>) getOrCreateKeyedState(windowSerializer, dataRetractStateDescriptor); inputKeyAndWindow = new LinkedList<>(); windowProperty = new GenericRowData(namedProperties.length); windowAggResult = new JoinedRowData(); WindowContext windowContext = new WindowContext(); windowAssigner.open(windowContext); }
@Test void testFinishBundleTriggeredByCount() throws Exception { Configuration conf = new Configuration(); conf.set(PythonOptions.MAX_BUNDLE_SIZE, 4); OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = getTestHarness(conf); long initialTime = 0L; ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>(); testHarness.open(); testHarness.processElement( new StreamRecord<>(newBinaryRow(true, "c1", "c2", 0L, 0L), initialTime + 1)); testHarness.processElement( new StreamRecord<>(newBinaryRow(true, "c1", "c4", 1L, 6000L), initialTime + 2)); testHarness.processElement( new StreamRecord<>(newBinaryRow(true, "c1", "c6", 2L, 10000L), initialTime + 3)); testHarness.processElement( new StreamRecord<>(newBinaryRow(true, "c2", "c8", 3L, 0L), initialTime + 4)); testHarness.processWatermark(new Watermark(10000L)); expectedOutput.add( new StreamRecord<>( newRow( true, "c1", 0L, TimestampData.fromEpochMillis(-5000L), TimestampData.fromEpochMillis(5000L)))); expectedOutput.add( new StreamRecord<>( newRow( true, "c2", 3L, TimestampData.fromEpochMillis(-5000L), TimestampData.fromEpochMillis(5000L)))); expectedOutput.add( new StreamRecord<>( newRow( true, "c2", 3L, TimestampData.fromEpochMillis(0L), TimestampData.fromEpochMillis(10000L)))); expectedOutput.add( new StreamRecord<>( newRow( true, "c1", 0L, TimestampData.fromEpochMillis(0L), TimestampData.fromEpochMillis(10000L)))); expectedOutput.add(new Watermark(10000L)); assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput()); testHarness.processWatermark(20000L); testHarness.close(); expectedOutput.add( new StreamRecord<>( newRow( true, "c1", 1L, TimestampData.fromEpochMillis(5000L), TimestampData.fromEpochMillis(15000L)))); expectedOutput.add( new StreamRecord<>( newRow( true, "c1", 2L, TimestampData.fromEpochMillis(10000L), TimestampData.fromEpochMillis(20000L)))); expectedOutput.add(new Watermark(20000L)); assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput()); }
@VisibleForTesting String upload(Configuration config, String artifactUriStr) throws IOException, URISyntaxException { final URI artifactUri = PackagedProgramUtils.resolveURI(artifactUriStr); if (!"local".equals(artifactUri.getScheme())) { return artifactUriStr; } final String targetDir = config.get(KubernetesConfigOptions.LOCAL_UPLOAD_TARGET); checkArgument( !StringUtils.isNullOrWhitespaceOnly(targetDir), String.format( "Setting '%s' to a valid remote path is required.", KubernetesConfigOptions.LOCAL_UPLOAD_TARGET.key())); final FileSystem.WriteMode writeMode = config.get(KubernetesConfigOptions.LOCAL_UPLOAD_OVERWRITE) ? FileSystem.WriteMode.OVERWRITE : FileSystem.WriteMode.NO_OVERWRITE; final File src = new File(artifactUri.getPath()); final Path target = new Path(targetDir, src.getName()); if (target.getFileSystem().exists(target) && writeMode == FileSystem.WriteMode.NO_OVERWRITE) { LOG.info( "Skip uploading artifact '{}', as it already exists." + " To overwrite existing artifacts, please set the '{}' config option.", target, KubernetesConfigOptions.LOCAL_UPLOAD_OVERWRITE.key()); } else { final long start = System.currentTimeMillis(); final FileSystem fs = target.getFileSystem(); try (FSDataOutputStream os = fs.create(target, writeMode)) { FileUtils.copyFile(src, os); } LOG.debug( "Copied file from {} to {}, cost {} ms", src, target, System.currentTimeMillis() - start); } return target.toString(); }
@Test void testUploadOverwrite() throws Exception { File jar = getFlinkKubernetesJar(); String localUri = "local://" + jar.getAbsolutePath(); Files.createFile(tmpDir.resolve(jar.getName())); config.set(KubernetesConfigOptions.LOCAL_UPLOAD_OVERWRITE, true); artifactUploader.upload(config, localUri); assertThat(dummyFs.getExistsCallCounter()).isEqualTo(2); assertThat(dummyFs.getCreateCallCounter()).isOne(); }
public String encode(long... numbers) { if (numbers.length == 0) { return ""; } for (final long number : numbers) { if (number < 0) { return ""; } if (number > MAX_NUMBER) { throw new IllegalArgumentException("number can not be greater than " + MAX_NUMBER + "L"); } } return this._encode(numbers); }
@Test public void test_issue32() throws Exception { final long num_to_hash = -1; final Hashids a = new Hashids("this is my salt"); Assert.assertEquals("", a.encode(num_to_hash)); }
@Override protected int command() { if (!validateConfigFilePresent()) { return 1; } final MigrationConfig config; try { config = MigrationConfig.load(getConfigFile()); } catch (KsqlException | MigrationException e) { LOGGER.error(e.getMessage()); return 1; } return command( config, MigrationsUtil::getKsqlClient, getMigrationsDir(getConfigFile(), config), Clock.systemDefaultZone() ); }
@Test public void shouldApplyAssertSchemaCommands() throws Exception { command = PARSER.parse("-v", "3"); createMigrationFile(1, NAME, migrationsDir, COMMAND); createMigrationFile(3, NAME, migrationsDir, ASSERT_SCHEMA_COMMANDS); givenCurrentMigrationVersion("1"); givenAppliedMigration(1, NAME, MigrationState.MIGRATED); // When: final int result = command.command(config, (cfg, headers) -> ksqlClient, migrationsDir, Clock.fixed( Instant.ofEpochMilli(1000), ZoneId.systemDefault())); // Then: assertThat(result, is(0)); final InOrder inOrder = inOrder(ksqlClient); verifyMigratedVersion(inOrder, 3, "1", MigrationState.MIGRATED, () -> { inOrder.verify(ksqlClient).assertSchema("abc", true); inOrder.verify(ksqlClient).assertSchema(6, false); inOrder.verify(ksqlClient).assertSchema("abc", 6, true); inOrder.verify(ksqlClient).assertSchema("abc", true, Duration.ofSeconds(10)); inOrder.verify(ksqlClient).assertSchema(6, true, Duration.ofSeconds(10)); inOrder.verify(ksqlClient).assertSchema("abc", 6, false, Duration.ofSeconds(10)); }); inOrder.verify(ksqlClient).close(); inOrder.verifyNoMoreInteractions(); }
public static <T> Partition<T> of( int numPartitions, PartitionWithSideInputsFn<? super T> partitionFn, Requirements requirements) { Contextful ctfFn = Contextful.fn( (T element, Contextful.Fn.Context c) -> partitionFn.partitionFor(element, numPartitions, c), requirements); return new Partition<>(new PartitionDoFn<T>(numPartitions, ctfFn, partitionFn)); }
@Test @Category(NeedsRunner.class) public void testOutOfBoundsPartitions() { pipeline.apply(Create.of(-1)).apply(Partition.of(5, new IdentityFn())); thrown.expect(RuntimeException.class); thrown.expectMessage("Partition function returned out of bounds index: -1 not in [0..5)"); pipeline.run(); }
public Collection<Component<?, ?>> getAllComponents() { List<Component<?, ?>> allComponents = new ArrayList<>(); recursivelyFindAllComponents(allComponents, this); // We need consistent ordering Collections.sort(allComponents); return Collections.unmodifiableCollection(allComponents); }
@Test void testThatLinguisticsIsExcludedForClusterControllerCluster() { MockRoot root = createRoot(false); ClusterControllerContainerCluster cluster = createClusterControllerCluster(root); addClusterController(cluster, "host-c1", root.getDeployState()); assertFalse(contains("com.yahoo.language.provider.DefaultLinguisticsProvider", cluster.getAllComponents())); }
public static Properties getProperties(File file) throws AnalysisException { try (BufferedReader utf8Reader = Files.newBufferedReader(file.toPath(), StandardCharsets.UTF_8)) { return getProperties(utf8Reader); } catch (IOException | IllegalArgumentException e) { throw new AnalysisException("Error parsing PyPA core-metadata file", e); } }
@Test public void getProperties_should_throw_exception_for_too_large_major() throws IOException { try { PyPACoreMetadataParser.getProperties(new BufferedReader(new StringReader("Metadata-Version: 3.0"))); Assert.fail("Expected IllegalArgumentException for too large major in Metadata-Version"); } catch (IllegalArgumentException e) { Assert.assertTrue(e.getMessage().contains("Unsupported PyPA Wheel metadata")); } }
synchronized void add(int splitCount) { int pos = count % history.length; history[pos] = splitCount; count += 1; }
@Test public void testThreeMoreThanFullHistory() { EnumerationHistory history = new EnumerationHistory(3); history.add(1); history.add(2); history.add(3); history.add(4); history.add(5); history.add(6); int[] expectedHistorySnapshot = {4, 5, 6}; testHistory(history, expectedHistorySnapshot); }
public abstract boolean exists();
@Test public void testApplicationFileExists() throws Exception { assertTrue(getApplicationFile(Path.fromString("vespa-services.xml")).exists()); assertTrue(getApplicationFile(Path.fromString("searchdefinitions")).exists()); assertTrue(getApplicationFile(Path.fromString("searchdefinitions/sock.sd")).exists()); assertFalse(getApplicationFile(Path.fromString("doesnotexist")).exists()); }
@Override public boolean nullsAreSortedAtEnd() { return false; }
@Test void assertNullsAreSortedAtEnd() { assertFalse(metaData.nullsAreSortedAtEnd()); }
@SuppressWarnings("unchecked") public static String jobName(RunContext runContext) { Map<String, String> flow = (Map<String, String>) runContext.getVariables().get("flow"); Map<String, String> task = (Map<String, String>) runContext.getVariables().get("task"); String name = Slugify.of(String.join( "-", flow.get("namespace"), flow.get("id"), task.get("id") )); String normalized = normalizeValue(name, true, true); if (normalized.length() > 58) { normalized = normalized.substring(0, 57); } // we add a suffix of 5 chars, this should be enough as it's the standard k8s way String suffix = RandomStringUtils.randomAlphanumeric(5).toLowerCase(); return normalized + "-" + suffix; }
@Test void jobName() { var runContext = runContext(runContextFactory, "namespace"); String jobName = ScriptService.jobName(runContext); assertThat(jobName, startsWith("namespace-flowid-task-")); assertThat(jobName.length(), is(27)); runContext = runContext(runContextFactory, "very.very.very.very.very.very.very.very.very.very.very.very.long.namespace"); jobName = ScriptService.jobName(runContext); assertThat(jobName, startsWith("veryveryveryveryveryveryveryveryveryveryveryverylongnames-")); assertThat(jobName.length(), is(63)); }
@VisibleForTesting static Optional<String> performUpdateCheck( Path configDir, String currentVersion, String versionUrl, String toolName, Consumer<LogEvent> log) { Path lastUpdateCheck = configDir.resolve(LAST_UPDATE_CHECK_FILENAME); try { // Check time of last update check if (Files.exists(lastUpdateCheck)) { try { String fileContents = new String(Files.readAllBytes(lastUpdateCheck), StandardCharsets.UTF_8); Instant modifiedTime = Instant.parse(fileContents); if (modifiedTime.plus(Duration.ofDays(1)).isAfter(Instant.now())) { return Optional.empty(); } } catch (DateTimeParseException | IOException ex) { // If reading update time failed, file might be corrupt, so delete it log.accept(LogEvent.debug("Failed to read lastUpdateCheck; " + ex.getMessage())); Files.delete(lastUpdateCheck); } } // Check for update FailoverHttpClient httpClient = new FailoverHttpClient(true, false, ignored -> {}); try { Response response = httpClient.get( new URL(versionUrl), Request.builder() .setHttpTimeout(3000) .setUserAgent("jib " + currentVersion + " " + toolName) .build()); VersionJsonTemplate version = JsonTemplateMapper.readJson(response.getBody(), VersionJsonTemplate.class); Path lastUpdateCheckTemp = Files.createTempFile(configDir, LAST_UPDATE_CHECK_FILENAME, null); lastUpdateCheckTemp.toFile().deleteOnExit(); Files.write(lastUpdateCheckTemp, Instant.now().toString().getBytes(StandardCharsets.UTF_8)); Files.move(lastUpdateCheckTemp, lastUpdateCheck, StandardCopyOption.REPLACE_EXISTING); if (currentVersion.equals(version.latest)) { return Optional.empty(); } return Optional.of(version.latest); } finally { httpClient.shutDown(); } } catch (IOException ex) { log.accept(LogEvent.debug("Update check failed; " + ex.getMessage())); } return Optional.empty(); }
@Test public void testPerformUpdateCheck_badLastUpdateTime() throws IOException, InterruptedException { Instant before = Instant.now(); Thread.sleep(100); Files.write( configDir.resolve("lastUpdateCheck"), "bad timestamp".getBytes(StandardCharsets.UTF_8)); Optional<String> message = UpdateChecker.performUpdateCheck( configDir, "1.0.2", testWebServer.getEndpoint(), "tool-name", ignored -> {}); String modifiedTime = new String( Files.readAllBytes(configDir.resolve("lastUpdateCheck")), StandardCharsets.UTF_8); assertThat(Instant.parse(modifiedTime)).isGreaterThan(before); assertThat(message).hasValue("2.0.0"); }
@Override protected void log(String configKey, String format, Object... args) { // Not using SLF4J's support for parameterized messages (even though it would be more efficient) // because it would // require the incoming message formats to be SLF4J-specific. if (logger.isDebugEnabled()) { logger.debug(String.format(methodTag(configKey) + format, args)); } }
@Test void useSpecifiedLoggerIfRequested() throws Exception { slf4j.logLevel("debug"); slf4j.expectMessages( "DEBUG specified.logger - [someMethod] This is my message" + System.lineSeparator()); logger = new Slf4jLogger(LoggerFactory.getLogger("specified.logger")); logger.log(CONFIG_KEY, "This is my message"); }
@POST @Path("{networkId}/devices") @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) public Response createVirtualDevice(@PathParam("networkId") long networkId, InputStream stream) { try { ObjectNode jsonTree = readTreeFromStream(mapper(), stream); final VirtualDevice vdevReq = codec(VirtualDevice.class).decode(jsonTree, this); JsonNode specifiedNetworkId = jsonTree.get("networkId"); if (specifiedNetworkId == null || specifiedNetworkId.asLong() != (networkId)) { throw new IllegalArgumentException(INVALID_FIELD + "networkId"); } final VirtualDevice vdevRes = vnetAdminService.createVirtualDevice(vdevReq.networkId(), vdevReq.id()); UriBuilder locationBuilder = uriInfo.getBaseUriBuilder() .path("vnets").path(specifiedNetworkId.asText()) .path("devices").path(vdevRes.id().toString()); return Response .created(locationBuilder.build()) .build(); } catch (IOException e) { throw new IllegalArgumentException(e); } }
@Test public void testPostVirtualDevice() { NetworkId networkId = networkId3; DeviceId deviceId = devId2; expect(mockVnetAdminService.createVirtualDevice(networkId, deviceId)).andReturn(vdev2); expectLastCall(); replay(mockVnetAdminService); WebTarget wt = target(); InputStream jsonStream = VirtualNetworkWebResourceTest.class .getResourceAsStream("post-virtual-device.json"); String reqLocation = "vnets/" + networkId.toString() + "/devices"; Response response = wt.path(reqLocation).request(MediaType.APPLICATION_JSON_TYPE) .post(Entity.json(jsonStream)); assertThat(response.getStatus(), is(HttpURLConnection.HTTP_CREATED)); String location = response.getLocation().getPath(); assertThat(location, Matchers.startsWith("/" + reqLocation + "/" + vdev2.id().toString())); verify(mockVnetAdminService); }
public static KubernetesJobManagerSpecification buildKubernetesJobManagerSpecification( FlinkPod podTemplate, KubernetesJobManagerParameters kubernetesJobManagerParameters) throws IOException { FlinkPod flinkPod = Preconditions.checkNotNull(podTemplate).copy(); List<HasMetadata> accompanyingResources = new ArrayList<>(); final List<KubernetesStepDecorator> stepDecorators = new ArrayList<>( Arrays.asList( new InitJobManagerDecorator(kubernetesJobManagerParameters), new EnvSecretsDecorator(kubernetesJobManagerParameters), new MountSecretsDecorator(kubernetesJobManagerParameters), new CmdJobManagerDecorator(kubernetesJobManagerParameters), new InternalServiceDecorator(kubernetesJobManagerParameters), new ExternalServiceDecorator(kubernetesJobManagerParameters))); Configuration configuration = kubernetesJobManagerParameters.getFlinkConfiguration(); if (configuration.get(KUBERNETES_HADOOP_CONF_MOUNT_DECORATOR_ENABLED)) { stepDecorators.add(new HadoopConfMountDecorator(kubernetesJobManagerParameters)); } if (configuration.get(KUBERNETES_KERBEROS_MOUNT_DECORATOR_ENABLED)) { stepDecorators.add(new KerberosMountDecorator(kubernetesJobManagerParameters)); } stepDecorators.addAll( Arrays.asList( new FlinkConfMountDecorator(kubernetesJobManagerParameters), new PodTemplateMountDecorator(kubernetesJobManagerParameters))); for (KubernetesStepDecorator stepDecorator : stepDecorators) { flinkPod = stepDecorator.decorateFlinkPod(flinkPod); accompanyingResources.addAll(stepDecorator.buildAccompanyingKubernetesResources()); } final Deployment deployment = createJobManagerDeployment(flinkPod, kubernetesJobManagerParameters); return new KubernetesJobManagerSpecification(deployment, accompanyingResources); }
@Test void testEmptyHadoopConfDirectory() throws IOException { setHadoopConfDirEnv(); kubernetesJobManagerSpecification = KubernetesJobManagerFactory.buildKubernetesJobManagerSpecification( flinkPod, kubernetesJobManagerParameters); assertThat(kubernetesJobManagerSpecification.getAccompanyingResources()) .noneMatch( resource -> resource.getMetadata() .getName() .equals( HadoopConfMountDecorator.getHadoopConfConfigMapName( CLUSTER_ID))); }
@SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = "lookupConstraints is ImmutableList") public List<LookupConstraint> getLookupConstraints() { return lookupConstraints; }
@Test public void shouldReturnKeyConstraintInt() { // Given: when(plannerOptions.getTableScansEnabled()).thenReturn(true); final Expression keyExp1 = new ComparisonExpression( Type.GREATER_THAN, new UnqualifiedColumnReferenceExp(ColumnName.of("K")), new IntegerLiteral(1) ); final Expression keyExp2 = new ComparisonExpression( Type.EQUAL, new UnqualifiedColumnReferenceExp(ColumnName.of("K")), new IntegerLiteral(3) ); final Expression expression = new LogicalBinaryExpression( LogicalBinaryExpression.Type.AND, keyExp1, keyExp2 ); QueryFilterNode filterNode = new QueryFilterNode( NODE_ID, source, expression, metaStore, ksqlConfig, false, plannerOptions); // When: final List<LookupConstraint> keys = filterNode.getLookupConstraints(); // Then: assertThat(keys.size(), is(1)); assertThat(keys.get(0), instanceOf(KeyConstraint.class)); final KeyConstraint keyConstraint = (KeyConstraint) keys.get(0); assertThat(keyConstraint.getKey(), is(GenericKey.genericKey(3))); assertThat(keyConstraint.getOperator(), is(KeyConstraint.ConstraintOperator.EQUAL)); }
public List<String> toPrefix(String in) { List<String> tokens = buildTokens(alignINClause(in)); List<String> output = new ArrayList<>(); List<String> stack = new ArrayList<>(); for (String token : tokens) { if (isOperand(token)) { if (token.equals(")")) { while (openParanthesesFound(stack)) { output.add(stack.remove(stack.size() - 1)); } if (!stack.isEmpty()) { // temporarily fix for issue #189 stack.remove(stack.size() - 1); } } else { while (openParanthesesFound(stack) && !hasHigherPrecedence(token, stack.get(stack.size() - 1))) { output.add(stack.remove(stack.size() - 1)); } stack.add(token); } } else { output.add(token); } } while (!stack.isEmpty()) { output.add(stack.remove(stack.size() - 1)); } return output; }
@Test public void parseAeqBandOpenBsmlCorDgtEclose() { String query = "A = B AND ( B < C OR D > E )"; List<String> list = parser.toPrefix(query); assertEquals(Arrays.asList("A", "B", "=", "B", "C", "<", "D", "E", ">", "OR", "AND"), list); }
@Override public ListOffsetsResult listOffsets(Map<TopicPartition, OffsetSpec> topicPartitionOffsets, ListOffsetsOptions options) { AdminApiFuture.SimpleAdminApiFuture<TopicPartition, ListOffsetsResultInfo> future = ListOffsetsHandler.newFuture(topicPartitionOffsets.keySet()); Map<TopicPartition, Long> offsetQueriesByPartition = topicPartitionOffsets.entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> getOffsetFromSpec(e.getValue()))); ListOffsetsHandler handler = new ListOffsetsHandler(offsetQueriesByPartition, options, logContext); invokeDriver(handler, future, options.timeoutMs); return new ListOffsetsResult(future.all()); }
@Test public void testListOffsetsMetadataRetriableErrors() throws Exception { Node node0 = new Node(0, "localhost", 8120); Node node1 = new Node(1, "localhost", 8121); List<Node> nodes = asList(node0, node1); List<PartitionInfo> pInfos = new ArrayList<>(); pInfos.add(new PartitionInfo("foo", 0, node0, new Node[]{node0}, new Node[]{node0})); pInfos.add(new PartitionInfo("foo", 1, node1, new Node[]{node1}, new Node[]{node1})); final Cluster cluster = new Cluster( "mockClusterId", nodes, pInfos, Collections.emptySet(), Collections.emptySet(), node0); final TopicPartition tp0 = new TopicPartition("foo", 0); final TopicPartition tp1 = new TopicPartition("foo", 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.LEADER_NOT_AVAILABLE)); // We retry when a partition of a topic (but not the topic itself) is unknown env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE, Errors.UNKNOWN_TOPIC_OR_PARTITION)); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); // listoffsets response from broker 0 ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 345L, 543); ListOffsetsResponseData responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) .setTopics(singletonList(t0)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0); // listoffsets response from broker 1 ListOffsetsTopicResponse t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 789L, 987); responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) .setTopics(singletonList(t1)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1); Map<TopicPartition, OffsetSpec> partitions = new HashMap<>(); partitions.put(tp0, OffsetSpec.latest()); partitions.put(tp1, OffsetSpec.latest()); ListOffsetsResult result = env.adminClient().listOffsets(partitions); Map<TopicPartition, ListOffsetsResultInfo> offsets = result.all().get(); assertFalse(offsets.isEmpty()); assertEquals(345L, offsets.get(tp0).offset()); assertEquals(543, offsets.get(tp0).leaderEpoch().get().intValue()); assertEquals(-1L, offsets.get(tp0).timestamp()); assertEquals(789L, offsets.get(tp1).offset()); assertEquals(987, offsets.get(tp1).leaderEpoch().get().intValue()); assertEquals(-1L, offsets.get(tp1).timestamp()); } }
public static int parseBytesToInt(List<Byte> data) { return parseBytesToInt(data, 0); }
@Test public void parseBytesToInt_checkLists() { int expected = 257; List<Byte> data = toList(ByteBuffer.allocate(4).putInt(expected).array()); Assertions.assertEquals(expected, TbUtils.parseBytesToInt(data, 0, 4)); Assertions.assertEquals(expected, TbUtils.parseBytesToInt(data, 2, 2, true)); Assertions.assertEquals(1, TbUtils.parseBytesToInt(data, 3, 1, true)); expected = Integer.MAX_VALUE; data = toList(ByteBuffer.allocate(4).putInt(expected).array()); Assertions.assertEquals(expected, TbUtils.parseBytesToInt(data, 0, 4, true)); expected = 0xAABBCCDD; data = toList(new byte[]{(byte) 0xAA, (byte) 0xBB, (byte) 0xCC, (byte) 0xDD}); Assertions.assertEquals(expected, TbUtils.parseBytesToInt(data, 0, 4, true)); data = toList(new byte[]{(byte) 0xDD, (byte) 0xCC, (byte) 0xBB, (byte) 0xAA}); Assertions.assertEquals(expected, TbUtils.parseBytesToInt(data, 0, 4, false)); expected = 0xAABBCC; data = toList(new byte[]{(byte) 0xAA, (byte) 0xBB, (byte) 0xCC}); Assertions.assertEquals(expected, TbUtils.parseBytesToInt(data, 0, 3, true)); data = toList(new byte[]{(byte) 0xCC, (byte) 0xBB, (byte) 0xAA}); Assertions.assertEquals(expected, TbUtils.parseBytesToInt(data, 0, 3, false)); }
public static CatalogTable of(TableIdentifier tableId, CatalogTable catalogTable) { CatalogTable newTable = catalogTable.copy(); return new CatalogTable( tableId, newTable.getTableSchema(), newTable.getOptions(), newTable.getPartitionKeys(), newTable.getComment(), newTable.getCatalogName()); }
@Test public void testCatalogTableWithIllegalFieldNames() { CatalogTable catalogTable = CatalogTable.of( TableIdentifier.of("catalog", "database", "table"), TableSchema.builder() .column( PhysicalColumn.of( " ", BasicType.STRING_TYPE, 1L, true, null, "")) .build(), Collections.emptyMap(), Collections.emptyList(), "comment"); SeaTunnelException exception = Assertions.assertThrows( SeaTunnelException.class, () -> new TableTransformFactoryContext( Collections.singletonList(catalogTable), null, null)); SeaTunnelException exception2 = Assertions.assertThrows( SeaTunnelException.class, () -> new TableSinkFactoryContext(catalogTable, null, null)); Assertions.assertEquals( "Table database.table field name cannot be empty", exception.getMessage()); Assertions.assertEquals( "Table database.table field name cannot be empty", exception2.getMessage()); CatalogTable catalogTable2 = CatalogTable.of( TableIdentifier.of("catalog", "database", "table"), TableSchema.builder() .column( PhysicalColumn.of( "name1", BasicType.STRING_TYPE, 1L, true, null, "")) .column( PhysicalColumn.of( "name1", BasicType.STRING_TYPE, 1L, true, null, "")) .build(), Collections.emptyMap(), Collections.emptyList(), "comment"); SeaTunnelException exception3 = Assertions.assertThrows( SeaTunnelException.class, () -> new TableTransformFactoryContext( Collections.singletonList(catalogTable2), null, null)); SeaTunnelException exception4 = Assertions.assertThrows( SeaTunnelException.class, () -> new TableSinkFactoryContext(catalogTable2, null, null)); Assertions.assertEquals( "Table database.table field name1 duplicate", exception3.getMessage()); Assertions.assertEquals( "Table database.table field name1 duplicate", exception4.getMessage()); }
public static SchemaAndValue parseString(String value) { if (value == null) { return NULL_SCHEMA_AND_VALUE; } if (value.isEmpty()) { return new SchemaAndValue(Schema.STRING_SCHEMA, value); } ValueParser parser = new ValueParser(new Parser(value)); return parser.parse(false); }
@Test public void shouldParseStringsBeginningWithFalseAsStrings() { SchemaAndValue schemaAndValue = Values.parseString("false]"); assertEquals(Type.STRING, schemaAndValue.schema().type()); assertEquals("false]", schemaAndValue.value()); }