focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public List<Integer> applyTransforms(List<Integer> originalGlyphIds) { List<Integer> intermediateGlyphsFromGsub = originalGlyphIds; for (String feature : FEATURES_IN_ORDER) { if (!gsubData.isFeatureSupported(feature)) { LOG.debug("the feature {} was not found", feature); continue; } LOG.debug("applying the feature {}", feature); ScriptFeature scriptFeature = gsubData.getFeature(feature); intermediateGlyphsFromGsub = applyGsubFeature(scriptFeature, intermediateGlyphsFromGsub); } return Collections.unmodifiableList(repositionGlyphs(intermediateGlyphsFromGsub)); }
@Test void testApplyTransforms_kha_e_murddhana_swa_e_khiwa() { // given List<Integer> glyphsAfterGsub = Arrays.asList(167, 103, 438, 93, 93); // when List<Integer> result = gsubWorkerForBengali.applyTransforms(getGlyphIds("ক্ষীরের")); // then assertEquals(glyphsAfterGsub, result); }
static Optional<RawMetric> parse(String s) { Matcher matcher = PATTERN.matcher(s); if (matcher.matches()) { String value = matcher.group("value"); String metricName = matcher.group("metricName"); if (metricName == null || !NumberUtils.isCreatable(value)) { return Optional.empty(); } var labels = Arrays.stream(matcher.group("properties").split(",")) .filter(str -> !"".equals(str)) .map(str -> str.split("=")) .filter(spit -> spit.length == 2) .collect(Collectors.toUnmodifiableMap( str -> str[0].trim(), str -> str[1].trim().replace("\"", ""))); return Optional.of(RawMetric.create(metricName, labels, new BigDecimal(value))); } return Optional.empty(); }
@Test void test() { String metricsString = "kafka_server_BrokerTopicMetrics_FifteenMinuteRate" + "{name=\"BytesOutPerSec\",topic=\"__confluent.support.metrics\",} 123.1234"; Optional<RawMetric> parsedOpt = PrometheusEndpointMetricsParser.parse(metricsString); Assertions.assertThat(parsedOpt).hasValueSatisfying(metric -> { assertThat(metric.name()).isEqualTo("kafka_server_BrokerTopicMetrics_FifteenMinuteRate"); assertThat(metric.value()).isEqualTo("123.1234"); assertThat(metric.labels()).containsExactlyEntriesOf( Map.of( "name", "BytesOutPerSec", "topic", "__confluent.support.metrics" )); }); }
@Override public Object apply(Object input) { return PropertyOrFieldSupport.EXTRACTION.getValueOf(propertyOrFieldName, input); }
@Test void should_extract_field_values_even_if_property_does_not_exist() { // GIVEN ByNameSingleExtractor underTest = new ByNameSingleExtractor("id"); // WHEN Object result = underTest.apply(YODA); // THEN then(result).isEqualTo(1L); }
@SuppressWarnings("unchecked") public static <W extends BoundedWindow> StateContext<W> nullContext() { return (StateContext<W>) NULL_CONTEXT; }
@Test public void nullContextThrowsOnWindow() { StateContext<BoundedWindow> context = StateContexts.nullContext(); thrown.expect(IllegalArgumentException.class); context.window(); }
IdBatchAndWaitTime newIdBaseLocal(int batchSize) { return newIdBaseLocal(Clock.currentTimeMillis(), getNodeId(), batchSize); }
@Test public void when_twoIdsAtTheSameMoment_then_higherSeq() { long id1 = gen.newIdBaseLocal(1516028439000L, 1234, 1).idBatch.base(); long id2 = gen.newIdBaseLocal(1516028439000L, 1234, 1).idBatch.base(); assertEquals(5300086112257234L, id1); assertEquals(id1 + (1 << DEFAULT_BITS_NODE_ID), id2); }
@InvokeOnHeader(Web3jConstants.ETH_SEND_RAW_TRANSACTION) void ethSendRawTransaction(Message message) throws IOException { String signedTransactionData = message.getHeader(Web3jConstants.SIGNED_TRANSACTION_DATA, configuration::getSignedTransactionData, String.class); Request<?, EthSendTransaction> request = web3j.ethSendRawTransaction(signedTransactionData); setRequestId(message, request); EthSendTransaction response = request.send(); boolean hasError = checkForError(message, response); if (!hasError) { message.setBody(response.getTransactionHash()); } }
@Test public void ethSendRawTransactionTest() throws Exception { EthSendTransaction response = Mockito.mock(EthSendTransaction.class); Mockito.when(mockWeb3j.ethSendRawTransaction(any())).thenReturn(request); Mockito.when(request.send()).thenReturn(response); Mockito.when(response.getTransactionHash()).thenReturn("test"); Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_SEND_RAW_TRANSACTION); template.send(exchange); String body = exchange.getIn().getBody(String.class); assertEquals("test", body); }
@Udf public String rpad( @UdfParameter(description = "String to be padded") final String input, @UdfParameter(description = "Target length") final Integer targetLen, @UdfParameter(description = "Padding string") final String padding) { if (input == null) { return null; } if (padding == null || padding.isEmpty() || targetLen == null || targetLen < 0) { return null; } final StringBuilder sb = new StringBuilder(targetLen + padding.length()); sb.append(input); final int padChars = Math.max(targetLen - input.length(), 0); for (int i = 0; i < padChars; i += padding.length()) { sb.append(padding); } sb.setLength(targetLen); return sb.toString(); }
@Test public void shouldReturnNullForNullLengthBytes() { final ByteBuffer result = udf.rpad(BYTES_123, null, BYTES_45); assertThat(result, is(nullValue())); }
public List<Stream> match(Message message) { final Set<Stream> result = Sets.newHashSet(); final Set<String> blackList = Sets.newHashSet(); for (final Rule rule : rulesList) { if (blackList.contains(rule.getStreamId())) { continue; } final StreamRule streamRule = rule.getStreamRule(); final StreamRuleType streamRuleType = streamRule.getType(); final Stream.MatchingType matchingType = rule.getMatchingType(); if (!ruleTypesNotNeedingFieldPresence.contains(streamRuleType) && !message.hasField(streamRule.getField())) { if (matchingType == Stream.MatchingType.AND) { result.remove(rule.getStream()); // blacklist stream because it can't match anymore blackList.add(rule.getStreamId()); } continue; } final Stream stream; if (streamRuleType != StreamRuleType.REGEX) { stream = rule.match(message); } else { stream = rule.matchWithTimeOut(message, streamProcessingTimeout, TimeUnit.MILLISECONDS); } if (stream == null) { if (matchingType == Stream.MatchingType.AND) { result.remove(rule.getStream()); // blacklist stream because it can't match anymore blackList.add(rule.getStreamId()); } } else { result.add(stream); if (matchingType == Stream.MatchingType.OR) { // blacklist stream because it is already matched blackList.add(rule.getStreamId()); } } } final Stream defaultStream = defaultStreamProvider.get(); boolean alreadyRemovedDefaultStream = false; for (Stream stream : result) { if (stream.getRemoveMatchesFromDefaultStream()) { if (alreadyRemovedDefaultStream || message.removeStream(defaultStream)) { alreadyRemovedDefaultStream = true; if (LOG.isTraceEnabled()) { LOG.trace("Successfully removed default stream <{}> from message <{}>", defaultStream.getId(), message.getId()); } } else { // A previously executed message processor (or Illuminate) has likely already removed the // default stream from the message. Now, the message has matched a stream in the Graylog // MessageFilterChain, and the matching stream is also set to remove the default stream. // This is usually from user-defined stream rules, and is generally not a problem. cannotRemoveDefaultMeter.inc(); if (LOG.isTraceEnabled()) { LOG.trace("Couldn't remove default stream <{}> from message <{}>", defaultStream.getId(), message.getId()); } } } } return ImmutableList.copyOf(result); }
@Test public void testInvertedRulesMatch() throws Exception { final StreamMock stream = getStreamMock("test"); final StreamRuleMock rule1 = new StreamRuleMock(ImmutableMap.of( "_id", new ObjectId(), "field", "testfield1", "value", "1", "type", StreamRuleType.PRESENCE.toInteger(), "stream_id", stream.getId() )); final StreamRuleMock rule2 = new StreamRuleMock(ImmutableMap.of( "_id", new ObjectId(), "field", "testfield2", "inverted", true, "type", StreamRuleType.PRESENCE.toInteger(), "stream_id", stream.getId() )); stream.setStreamRules(Lists.newArrayList(rule1, rule2)); final StreamRouterEngine engine = newEngine(Lists.newArrayList(stream)); // Without testfield1 and testfield2 in the message. final Message message1 = getMessage(); assertTrue(engine.match(message1).isEmpty()); // With testfield1 and testfield2 in the message. final Message message2 = getMessage(); message2.addField("testfield1", "testvalue"); message2.addField("testfield2", "testvalue"); assertTrue(engine.match(message2).isEmpty()); // With testfield1 and not testfield2 in the message. final Message message3 = getMessage(); message3.addField("testfield1", "testvalue"); assertEquals(Lists.newArrayList(stream), engine.match(message3)); // With testfield2 in the message. final Message message4 = getMessage(); message4.addField("testfield2", "testvalue"); assertTrue(engine.match(message4).isEmpty()); }
public static int[] indexsOfStrings( String[] lookup, String[] array ) { int[] indexes = new int[lookup.length]; for ( int i = 0; i < indexes.length; i++ ) { indexes[i] = indexOfString( lookup[i], array ); } return indexes; }
@Test public void testIndexsOfStrings() { Assert.assertArrayEquals( new int[] { 2, 1, -1 }, Const.indexsOfStrings( new String[] { "foo", "bar", "qux" }, new String[] { "baz", "bar", "foo" } ) ); }
public static boolean isInPrivateAddressSpace(String ip) { InetAddress inetAddress = InetAddresses.forString(ip); if (inetAddress instanceof Inet6Address) { // Inet6Address#isSiteLocalAddress is wrong: it only checks for FEC0:: prefixes, which is deprecated in RFC 3879 // instead we need to check for unique local addresses, which are in FC00::/7 (in practice assigned are in FD00::/8, // but the RFC allows others in the future) return UNIQUE_LOCAL_ADDR_MASK.contains(inetAddress); } return inetAddress.isSiteLocalAddress(); }
@Test public void testIsInPrivateAddressSpace() throws Exception { assertTrue(PrivateNet.isInPrivateAddressSpace("10.0.0.1")); assertTrue(PrivateNet.isInPrivateAddressSpace("172.16.20.50")); assertTrue(PrivateNet.isInPrivateAddressSpace("192.168.1.1")); assertFalse(PrivateNet.isInPrivateAddressSpace("99.42.44.219")); assertFalse(PrivateNet.isInPrivateAddressSpace("ff02:0:0:0:0:0:0:fb")); assertTrue(PrivateNet.isInPrivateAddressSpace("fd80:0:0:0:0:0:0:fb")); assertThrows(IllegalArgumentException.class, () -> PrivateNet.isInPrivateAddressSpace("this is not an IP address")); }
@Override public AuthenticationState newAuthState(AuthData authData, SocketAddress remoteAddress, SSLSession sslSession) throws AuthenticationException { final List<AuthenticationState> states = new ArrayList<>(providers.size()); AuthenticationException authenticationException = null; for (AuthenticationProvider provider : providers) { try { AuthenticationState state = provider.newAuthState(authData, remoteAddress, sslSession); states.add(state); } catch (AuthenticationException ae) { if (log.isDebugEnabled()) { log.debug("Authentication failed for auth provider " + provider.getClass() + ": ", ae); } // Store the exception so we can throw it later instead of a generic one authenticationException = ae; } } if (states.isEmpty()) { log.debug("Failed to initialize a new auth state from {}", remoteAddress, authenticationException); if (authenticationException != null) { throw authenticationException; } else { throw new AuthenticationException("Failed to initialize a new auth state from " + remoteAddress); } } else { return new AuthenticationListState(states); } }
@Test public void testNewAuthState() throws Exception { AuthenticationState authStateAA = newAuthState(expiringTokenAA, SUBJECT_A); AuthenticationState authStateAB = newAuthState(expiringTokenAB, SUBJECT_B); AuthenticationState authStateBA = newAuthState(expiringTokenBA, SUBJECT_A); AuthenticationState authStateBB = newAuthState(expiringTokenBB, SUBJECT_B); Thread.sleep(TimeUnit.SECONDS.toMillis(6)); verifyAuthStateExpired(authStateAA, SUBJECT_A); verifyAuthStateExpired(authStateAB, SUBJECT_B); verifyAuthStateExpired(authStateBA, SUBJECT_A); verifyAuthStateExpired(authStateBB, SUBJECT_B); }
public T send() throws IOException { return web3jService.send(this, responseType); }
@Test public void testAdminRemovePeer() throws Exception { web3j.adminRemovePeer("url").send(); verifyResult( "{\"jsonrpc\":\"2.0\",\"method\":\"admin_removePeer\",\"params\":[\"url\"],\"id\":1}"); }
public PaginationContext createPaginationContext(final SelectStatement selectStatement, final ProjectionsContext projectionsContext, final List<Object> params, final Collection<WhereSegment> whereSegments) { Optional<LimitSegment> limitSegment = selectStatement.getLimit(); if (limitSegment.isPresent()) { return new LimitPaginationContextEngine().createPaginationContext(limitSegment.get(), params); } Optional<TopProjectionSegment> topProjectionSegment = findTopProjection(selectStatement); Collection<ExpressionSegment> expressions = new LinkedList<>(); for (WhereSegment each : whereSegments) { expressions.add(each.getExpr()); } if (topProjectionSegment.isPresent()) { return new TopPaginationContextEngine().createPaginationContext(topProjectionSegment.get(), expressions, params); } if (!expressions.isEmpty() && containsRowNumberPagination(selectStatement)) { return new RowNumberPaginationContextEngine(databaseType).createPaginationContext(expressions, projectionsContext, params); } return new PaginationContext(null, null, params); }
@Test void assertCreatePaginationContextWhenLimitSegmentIsPresentForMySQL() { MySQLSelectStatement selectStatement = new MySQLSelectStatement(); selectStatement.setLimit(new LimitSegment(0, 10, new NumberLiteralLimitValueSegment(0, 10, 100L), new NumberLiteralLimitValueSegment(0, 10, 100L))); PaginationContext paginationContext = new PaginationContextEngine(new MySQLDatabaseType()).createPaginationContext( selectStatement, mock(ProjectionsContext.class), Collections.emptyList(), Collections.emptyList()); assertTrue(paginationContext.getOffsetSegment().isPresent()); assertTrue(paginationContext.getRowCountSegment().isPresent()); }
@Override public KTable<K, VOut> aggregate(final Initializer<VOut> initializer, final Materialized<K, VOut, KeyValueStore<Bytes, byte[]>> materialized) { return aggregate(initializer, NamedInternal.empty(), materialized); }
@Test public void shouldNotHaveNullInitializerOnAggregateWitNamed() { assertThrows(NullPointerException.class, () -> cogroupedStream.aggregate(null, Named.as("name"))); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testOrderByExpressionOnOutputColumn() { // TODO: analyze output analyze("SELECT a x FROM t1 ORDER BY x + 1"); analyze("SELECT max(a) FROM (values (1,2), (2,1)) t(a,b) GROUP BY b ORDER BY max(b*1e0)"); analyze("SELECT CAST(ROW(1) AS ROW(someField BIGINT)) AS a FROM (values (1,2)) t(a,b) GROUP BY b ORDER BY a.someField"); analyze("SELECT 1 AS x FROM (values (1,2)) t(x, y) GROUP BY y ORDER BY sum(apply(1, x -> x))"); }
public static String serialize(Object obj) throws JsonProcessingException { return MAPPER.writeValueAsString(obj); }
@Test void serializeMeter() throws JsonProcessingException { DSeries series = new DSeries(); series.add(new DMeter(new TestMeter(0, 1), METRIC, HOST, tags, () -> MOCKED_SYSTEM_MILLIS)); assertSerialization( DatadogHttpClient.serialize(series), new MetricAssertion(MetricType.gauge, true, "1.0")); }
CacheConfig<K, V> asCacheConfig() { return this.copy(new CacheConfig<>(), false); }
@Test public void serializationSucceeds_whenKeyTypeNotResolvable() { PreJoinCacheConfig preJoinCacheConfig = new PreJoinCacheConfig(newDefaultCacheConfig("test")); preJoinCacheConfig.setKeyClassName("some.inexistent.Class"); preJoinCacheConfig.setValueClassName("java.lang.String"); Data data = serializationService.toData(preJoinCacheConfig); PreJoinCacheConfig deserialized = serializationService.toObject(data); assertEquals(deserialized, preJoinCacheConfig); try { Class klass = deserialized.asCacheConfig().getKeyType(); fail("Getting the key type on deserialized CacheConfig should fail because the key type cannot be resolved"); } catch (HazelcastException e) { if (!(e.getCause() instanceof ClassNotFoundException)) { fail("Unexpected exception: " + e.getCause()); } } }
public static CompositeEvictionChecker newCompositeEvictionChecker(CompositionOperator compositionOperator, EvictionChecker... evictionCheckers) { Preconditions.isNotNull(compositionOperator, "composition"); Preconditions.isNotNull(evictionCheckers, "evictionCheckers"); if (evictionCheckers.length == 0) { throw new IllegalArgumentException("EvictionCheckers cannot be empty!"); } switch (compositionOperator) { case AND: return new CompositeEvictionCheckerWithAndComposition(evictionCheckers); case OR: return new CompositeEvictionCheckerWithOrComposition(evictionCheckers); default: throw new IllegalArgumentException("Invalid composition operator: " + compositionOperator); } }
@Test public void resultShouldReturnFalse_whenAllIsFalse_withOrCompositionOperator() { EvictionChecker evictionChecker1ReturnsFalse = mock(EvictionChecker.class); EvictionChecker evictionChecker2ReturnsFalse = mock(EvictionChecker.class); when(evictionChecker1ReturnsFalse.isEvictionRequired()).thenReturn(false); when(evictionChecker2ReturnsFalse.isEvictionRequired()).thenReturn(false); CompositeEvictionChecker compositeEvictionChecker = CompositeEvictionChecker.newCompositeEvictionChecker( CompositeEvictionChecker.CompositionOperator.OR, evictionChecker1ReturnsFalse, evictionChecker2ReturnsFalse); assertFalse(compositeEvictionChecker.isEvictionRequired()); }
void runOnce() { if (transactionManager != null) { try { transactionManager.maybeResolveSequences(); RuntimeException lastError = transactionManager.lastError(); // do not continue sending if the transaction manager is in a failed state if (transactionManager.hasFatalError()) { if (lastError != null) maybeAbortBatches(lastError); client.poll(retryBackoffMs, time.milliseconds()); return; } if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) { return; } // Check whether we need a new producerId. If so, we will enqueue an InitProducerId // request which will be sent below transactionManager.bumpIdempotentEpochAndResetIdIfNeeded(); if (maybeSendAndPollTransactionalRequest()) { return; } } catch (AuthenticationException e) { // This is already logged as error, but propagated here to perform any clean ups. log.trace("Authentication exception while processing transactional request", e); transactionManager.authenticationFailed(e); } } long currentTimeMs = time.milliseconds(); long pollTimeout = sendProducerData(currentTimeMs); client.poll(pollTimeout, currentTimeMs); }
@Test public void testResetOfProducerStateShouldAllowQueuedBatchesToDrain() throws Exception { final long producerId = 343434L; TransactionManager transactionManager = createTransactionManager(); setupWithTransactionState(transactionManager); prepareAndReceiveInitProducerId(producerId, Short.MAX_VALUE, Errors.NONE); assertTrue(transactionManager.hasProducerId()); int maxRetries = 10; Metrics m = new Metrics(); SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager, apiVersions); appendToAccumulator(tp0); // failed response Future<RecordMetadata> successfulResponse = appendToAccumulator(tp1); sender.runOnce(); // connect and send. assertEquals(1, client.inFlightRequestCount()); Map<TopicPartition, OffsetAndError> responses = new LinkedHashMap<>(); responses.put(tp1, new OffsetAndError(-1, Errors.NOT_LEADER_OR_FOLLOWER)); responses.put(tp0, new OffsetAndError(-1, Errors.OUT_OF_ORDER_SEQUENCE_NUMBER)); client.respond(produceResponse(responses)); sender.runOnce(); // trigger epoch bump prepareAndReceiveInitProducerId(producerId + 1, Errors.NONE); // also send request to tp1 sender.runOnce(); // reset producer ID because epoch is maxed out assertEquals(producerId + 1, transactionManager.producerIdAndEpoch().producerId); assertFalse(successfulResponse.isDone()); client.respond(produceResponse(tp1, 10, Errors.NONE, -1)); sender.runOnce(); assertTrue(successfulResponse.isDone()); assertEquals(10, successfulResponse.get().offset()); // The epoch and the sequence are updated when the next batch is sent. assertEquals(1, transactionManager.sequenceNumber(tp1)); }
static void dissectRemoveImageCleanup( final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int absoluteOffset = offset; absoluteOffset += dissectLogHeader(CONTEXT, REMOVE_IMAGE_CLEANUP, buffer, absoluteOffset, builder); builder.append(": sessionId=").append(buffer.getInt(absoluteOffset, LITTLE_ENDIAN)); absoluteOffset += SIZE_OF_INT; builder.append(" streamId=").append(buffer.getInt(absoluteOffset, LITTLE_ENDIAN)); absoluteOffset += SIZE_OF_INT; builder.append(" id=").append(buffer.getLong(absoluteOffset, LITTLE_ENDIAN)); absoluteOffset += SIZE_OF_LONG; builder.append(" channel="); buffer.getStringAscii(absoluteOffset, builder); }
@Test void dissectRemoveImageCleanup() { final int offset = 32; internalEncodeLogHeader(buffer, offset, 66, 99, () -> 12345678900L); buffer.putInt(offset + LOG_HEADER_LENGTH, 77, LITTLE_ENDIAN); buffer.putInt(offset + LOG_HEADER_LENGTH + SIZE_OF_INT, 55, LITTLE_ENDIAN); buffer.putLong(offset + LOG_HEADER_LENGTH + SIZE_OF_INT * 2, 1_000_000L, LITTLE_ENDIAN); buffer.putStringAscii(offset + LOG_HEADER_LENGTH + SIZE_OF_INT * 2 + SIZE_OF_LONG, "URI"); DriverEventDissector.dissectRemoveImageCleanup(buffer, offset, builder); assertEquals("[12.345678900] " + CONTEXT + ": " + REMOVE_IMAGE_CLEANUP.name() + " [66/99]: sessionId=77 streamId=55 id=1000000 channel=URI", builder.toString()); }
public JobMetaDataParameterObject processJobMultipart(JobMultiPartParameterObject parameterObject) throws IOException, NoSuchAlgorithmException { // Change the timestamp in the beginning to avoid expiration changeLastUpdatedTime(); validateReceivedParameters(parameterObject); validateReceivedPartNumbersAreExpected(parameterObject); validatePartChecksum(parameterObject); // Parts numbers are good. Save them currentPart = parameterObject.getCurrentPartNumber(); totalPart = parameterObject.getTotalPartNumber(); Path jarPath = jobMetaDataParameterObject.getJarPath(); // Append data to file try (OutputStream outputStream = Files.newOutputStream(jarPath, StandardOpenOption.CREATE, StandardOpenOption.APPEND)) { outputStream.write(parameterObject.getPartData(), 0, parameterObject.getPartSize()); } if (LOGGER.isInfoEnabled()) { String message = String.format("Session : %s jarPath: %s PartNumber: %d/%d Total file size : %d bytes", parameterObject.getSessionId(), jarPath, currentPart, totalPart, Files.size(jarPath)); LOGGER.info(message); } JobMetaDataParameterObject result = null; // If parts are complete if (currentPart == totalPart) { validateJarChecksum(); result = jobMetaDataParameterObject; } return result; }
@Test public void testEmptyPartData() { byte[] partData = new byte[]{}; JobMultiPartParameterObject jobMultiPartParameterObject = new JobMultiPartParameterObject(); jobMultiPartParameterObject.setSessionId(null); jobMultiPartParameterObject.setCurrentPartNumber(1); jobMultiPartParameterObject.setTotalPartNumber(1); jobMultiPartParameterObject.setPartData(partData); jobMultiPartParameterObject.setPartSize(0); Assert.assertThrows(JetException.class, () -> jobUploadStatus.processJobMultipart(jobMultiPartParameterObject)); }
public static void getSemanticPropsSingleFromString( SingleInputSemanticProperties result, String[] forwarded, String[] nonForwarded, String[] readSet, TypeInformation<?> inType, TypeInformation<?> outType) { getSemanticPropsSingleFromString( result, forwarded, nonForwarded, readSet, inType, outType, false); }
@Test void testForwardedWildCard() { String[] forwardedFields = {"*"}; SingleInputSemanticProperties sp = new SingleInputSemanticProperties(); SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, threeIntTupleType, threeIntTupleType); assertThat(sp.getForwardingTargetFields(0, 0)).contains(0); assertThat(sp.getForwardingTargetFields(0, 1)).contains(1); assertThat(sp.getForwardingTargetFields(0, 2)).contains(2); assertThat(sp.getForwardingTargetFields(0, 3)).isEmpty(); forwardedFields[0] = "*"; sp = new SingleInputSemanticProperties(); SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, deepNestedTupleType, deepNestedTupleType); assertThat(sp.getForwardingTargetFields(0, 0)).contains(0); assertThat(sp.getForwardingTargetFields(0, 1)).contains(1); assertThat(sp.getForwardingTargetFields(0, 2)).contains(2); assertThat(sp.getForwardingTargetFields(0, 3)).contains(3); assertThat(sp.getForwardingTargetFields(0, 4)).contains(4); }
@Override public void execute(final ConnectionSession connectionSession) { String databaseName = sqlStatement.getFromDatabase().map(schema -> schema.getDatabase().getIdentifier().getValue()).orElseGet(connectionSession::getUsedDatabaseName); queryResultMetaData = createQueryResultMetaData(databaseName); mergedResult = new TransparentMergedResult(getQueryResult(databaseName)); }
@Test void assertShowTableFromUncompletedDatabase() throws SQLException { MySQLShowTablesStatement showTablesStatement = new MySQLShowTablesStatement(); showTablesStatement.setFromDatabase(new FromDatabaseSegment(0, 0, new DatabaseSegment(0, 0, new IdentifierValue("uncompleted")))); ShowTablesExecutor executor = new ShowTablesExecutor(showTablesStatement, TypedSPILoader.getService(DatabaseType.class, "MySQL")); ContextManager contextManager = mockContextManager(getDatabases()); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); executor.execute(mockConnectionSession()); QueryResultMetaData actualMetaData = executor.getQueryResultMetaData(); assertThat(actualMetaData.getColumnCount(), is(1)); assertThat(actualMetaData.getColumnName(1), is("Tables_in_uncompleted")); MergedResult actualResult = executor.getMergedResult(); assertFalse(actualResult.next()); }
public Fury getFury() { try { lock.lock(); Fury fury = idleCacheQueue.poll(); while (fury == null) { if (activeCacheNumber.get() < maxPoolSize) { addFury(); } else { furyCondition.await(); } fury = idleCacheQueue.poll(); } activeCacheNumber.incrementAndGet(); return fury; } catch (Exception e) { LOG.error(e.getMessage(), e); throw new RuntimeException(e); } finally { lock.unlock(); } }
@Test public void testGetFuryNormal() { ClassLoaderFuryPooled pooled = getPooled(3, 5); Fury fury = pooled.getFury(); Assert.assertNotNull(fury); }
public List<Connection> getConnectionByIp(String clientIp) { Set<Map.Entry<String, Connection>> entries = connections.entrySet(); List<Connection> connections = new ArrayList<>(); for (Map.Entry<String, Connection> entry : entries) { Connection value = entry.getValue(); if (clientIp.equals(value.getMetaInfo().clientIp)) { connections.add(value); } } return connections; }
@Test void testGetConnectionsByClientIp() { assertEquals(1, connectionManager.getConnectionByIp(clientIp).size()); }
@Override public void preflight(final Path source, final Path target) throws BackgroundException { if(!CteraTouchFeature.validate(target.getName())) { throw new InvalidFilenameException(MessageFormat.format(LocaleFactory.localizedString("Cannot rename {0}", "Error"), source.getName())).withFile(source); } assumeRole(source, DELETEPERMISSION); // defaults to Acl.EMPTY (disabling role checking) if target does not exist assumeRole(target, WRITEPERMISSION); // no createfilespermission required for now if(source.isDirectory()) { assumeRole(target.getParent(), target.getName(), CREATEDIRECTORIESPERMISSION); } }
@Test public void testPreflightDirectoryAccessDeniedTargetExistsNotWritablePermissionCustomProps() throws Exception { final Path source = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)); source.setAttributes(source.attributes().withAcl(new Acl(new Acl.CanonicalUser(), CteraAttributesFinderFeature.DELETEPERMISSION))); final Path target = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)); target.setAttributes(target.attributes().withAcl(new Acl(new Acl.CanonicalUser()))); target.getParent().setAttributes(target.getParent().attributes().withAcl(new Acl(new Acl.CanonicalUser(), CteraAttributesFinderFeature.CREATEDIRECTORIESPERMISSION))); final CteraAttributesFinderFeature mock = mock(CteraAttributesFinderFeature.class); // target exists and not writable when(mock.find(eq(target))).thenReturn(new PathAttributes().withAcl(new Acl(new Acl.CanonicalUser()))); final AccessDeniedException accessDeniedException = assertThrows(AccessDeniedException.class, () -> new CteraMoveFeature(session, mock).preflight(source, target)); assertTrue(accessDeniedException.getDetail().contains(MessageFormat.format(LocaleFactory.localizedString("Upload {0} failed", "Error"), target.getName()))); }
public boolean sendMessageBack(final MessageExt msg) { try { // max reconsume times exceeded then send to dead letter queue. Message newMsg = new Message(MixAll.getRetryTopic(this.defaultMQPushConsumer.getConsumerGroup()), msg.getBody()); MessageAccessor.setProperties(newMsg, msg.getProperties()); String originMsgId = MessageAccessor.getOriginMessageId(msg); MessageAccessor.setOriginMessageId(newMsg, UtilAll.isBlank(originMsgId) ? msg.getMsgId() : originMsgId); newMsg.setFlag(msg.getFlag()); MessageAccessor.putProperty(newMsg, MessageConst.PROPERTY_RETRY_TOPIC, msg.getTopic()); MessageAccessor.setReconsumeTime(newMsg, String.valueOf(msg.getReconsumeTimes())); MessageAccessor.setMaxReconsumeTimes(newMsg, String.valueOf(getMaxReconsumeTimes())); newMsg.setDelayTimeLevel(3 + msg.getReconsumeTimes()); this.defaultMQPushConsumerImpl.getmQClientFactory().getDefaultMQProducer().send(newMsg); return true; } catch (Exception e) { log.error("sendMessageBack exception, group: " + this.consumerGroup + " msg: " + msg.toString(), e); } return false; }
@Test public void testSendMessageBack() { assertTrue(popService.sendMessageBack(createMessageExt())); }
@Override public List<RedisClientInfo> getClientList(RedisClusterNode node) { RedisClient entry = getEntry(node); RFuture<List<String>> f = executorService.readAsync(entry, StringCodec.INSTANCE, RedisCommands.CLIENT_LIST); List<String> list = syncFuture(f); return CONVERTER.convert(list.toArray(new String[list.size()])); }
@Test public void testGetClientList() { testInCluster(connection -> { RedisClusterNode master = getFirstMaster(connection); List<RedisClientInfo> list = connection.getClientList(master); assertThat(list.size()).isGreaterThan(10); }); }
@Override public BackupRequestsStrategyStats getDiffStats() { BackupRequestsStrategyStats stats = doGetDiffStats(); while (stats == null) { stats = doGetDiffStats(); } return stats; }
@Test public void testNoActivityDiffStats() { TrackingBackupRequestsStrategy trackingStrategy = new TrackingBackupRequestsStrategy(new MockBackupRequestsStrategy(() -> Optional.of(10000000L), () -> true)); BackupRequestsStrategyStats stats = trackingStrategy.getDiffStats(); assertNotNull(stats); assertEquals(stats.getAllowed(), 0); assertEquals(stats.getSuccessful(), 0); assertEquals(stats.getMinDelayNano(), 0); assertEquals(stats.getMaxDelayNano(), 0); assertEquals(stats.getAvgDelayNano(), 0); stats = trackingStrategy.getDiffStats(); assertNotNull(stats); assertEquals(stats.getAllowed(), 0); assertEquals(stats.getSuccessful(), 0); assertEquals(stats.getMinDelayNano(), 0); assertEquals(stats.getMaxDelayNano(), 0); assertEquals(stats.getAvgDelayNano(), 0); }
@Override public ConfigOperateResult insertOrUpdate(String srcIp, String srcUser, ConfigInfo configInfo, Map<String, Object> configAdvanceInfo) { if (Objects.isNull( findConfigInfoState(configInfo.getDataId(), configInfo.getGroup(), configInfo.getTenant()))) { return addConfigInfo(srcIp, srcUser, configInfo, configAdvanceInfo); } else { return updateConfigInfo(configInfo, srcIp, srcUser, configAdvanceInfo); } }
@Test void testInsertOrUpdateOfUpdateConfigSuccess() { Map<String, Object> configAdvanceInfo = new HashMap<>(); String desc = "testdesc"; String use = "testuse"; String effect = "testeffect"; String type = "testtype"; String schema = "testschema"; configAdvanceInfo.put("config_tags", "tag1,tag2"); configAdvanceInfo.put("desc", desc); configAdvanceInfo.put("use", use); configAdvanceInfo.put("effect", effect); configAdvanceInfo.put("type", type); configAdvanceInfo.put("schema", schema); String dataId = "dataId"; String group = "group"; String tenant = "tenant"; String content = "content132456"; String appName = "app1233"; ConfigInfo configInfo = new ConfigInfo(dataId, group, tenant, appName, content); String encryptedDataKey = "key34567"; configInfo.setEncryptedDataKey(encryptedDataKey); //mock get config state,first and second is not null Mockito.when( databaseOperate.queryOne(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))) .thenReturn(new ConfigInfoStateWrapper(), new ConfigInfoStateWrapper()); //mock select config info before update ConfigInfoWrapper configInfoWrapperOld = new ConfigInfoWrapper(); configInfoWrapperOld.setDataId(dataId); configInfoWrapperOld.setGroup(group); configInfoWrapperOld.setTenant(tenant); configInfoWrapperOld.setAppName("old_app"); configInfoWrapperOld.setMd5("old_md5"); configInfoWrapperOld.setId(12345678765L); Mockito.when(databaseOperate.queryOne(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_INFO_WRAPPER_ROW_MAPPER))) .thenReturn(configInfoWrapperOld); String srcIp = "srcIp"; String srcUser = "srcUser"; embeddedConfigInfoPersistService.insertOrUpdate(srcIp, srcUser, configInfo, configAdvanceInfo); //expect update config info invoked. embeddedStorageContextHolderMockedStatic.verify(() -> EmbeddedStorageContextHolder.addSqlContext(anyString(), eq(content), eq(MD5Utils.md5Hex(content, Constants.PERSIST_ENCODE)), eq(srcIp), eq(srcUser), eq(appName), eq(desc), eq(use), eq(effect), eq(type), eq(schema), eq(encryptedDataKey), eq(dataId), eq(group), eq(tenant)), times(1)); //expect insert config tags embeddedStorageContextHolderMockedStatic.verify( () -> EmbeddedStorageContextHolder.addSqlContext(anyString(), anyLong(), eq("tag1"), eq(StringUtils.EMPTY), eq(dataId), eq(group), eq(tenant)), times(1)); embeddedStorageContextHolderMockedStatic.verify( () -> EmbeddedStorageContextHolder.addSqlContext(anyString(), anyLong(), eq("tag2"), eq(StringUtils.EMPTY), eq(dataId), eq(group), eq(tenant)), times(1)); //expect insert history info of U Mockito.verify(historyConfigInfoPersistService, times(1)) .insertConfigHistoryAtomic(eq(configInfoWrapperOld.getId()), any(ConfigInfo.class), eq(srcIp), eq(srcUser), any(Timestamp.class), eq("U")); }
@Override public PipelineDef parse(Path pipelineDefPath, Configuration globalPipelineConfig) throws Exception { return parse(mapper.readTree(pipelineDefPath.toFile()), globalPipelineConfig); }
@Test void testValidTimeZone() throws Exception { URL resource = Resources.getResource("definitions/pipeline-definition-minimized.yaml"); YamlPipelineDefinitionParser parser = new YamlPipelineDefinitionParser(); PipelineDef pipelineDef = parser.parse( Paths.get(resource.toURI()), Configuration.fromMap( ImmutableMap.<String, String>builder() .put(PIPELINE_LOCAL_TIME_ZONE.key(), "Asia/Shanghai") .build())); assertThat(pipelineDef.getConfig().get(PIPELINE_LOCAL_TIME_ZONE)) .isEqualTo("Asia/Shanghai"); pipelineDef = parser.parse( Paths.get(resource.toURI()), Configuration.fromMap( ImmutableMap.<String, String>builder() .put(PIPELINE_LOCAL_TIME_ZONE.key(), "GMT+08:00") .build())); assertThat(pipelineDef.getConfig().get(PIPELINE_LOCAL_TIME_ZONE)).isEqualTo("GMT+08:00"); pipelineDef = parser.parse( Paths.get(resource.toURI()), Configuration.fromMap( ImmutableMap.<String, String>builder() .put(PIPELINE_LOCAL_TIME_ZONE.key(), "UTC") .build())); assertThat(pipelineDef.getConfig().get(PIPELINE_LOCAL_TIME_ZONE)).isEqualTo("UTC"); }
@Bean public ShenyuPlugin oAuth2Plugin(final ObjectProvider<ReactiveOAuth2AuthorizedClientService> authorizedClientServiceProvider) { return new OAuth2Plugin(authorizedClientServiceProvider); }
@Test public void testOAuth2Plugin() { applicationContextRunner.run(context -> { ShenyuPlugin plugin = context.getBean("oAuth2Plugin", ShenyuPlugin.class); assertNotNull(plugin); assertThat(plugin.named()).isEqualTo(PluginEnum.OAUTH2.getName()); } ); }
final void saveDuplications(final DefaultInputComponent component, List<CloneGroup> duplications) { if (duplications.size() > MAX_CLONE_GROUP_PER_FILE) { LOG.warn("Too many duplication groups on file {}. Keep only the first {} groups.", component, MAX_CLONE_GROUP_PER_FILE); } Iterable<ScannerReport.Duplication> reportDuplications = duplications.stream() .limit(MAX_CLONE_GROUP_PER_FILE) .map( new Function<CloneGroup, Duplication>() { private final ScannerReport.Duplication.Builder dupBuilder = ScannerReport.Duplication.newBuilder(); private final ScannerReport.Duplicate.Builder blockBuilder = ScannerReport.Duplicate.newBuilder(); @Override public ScannerReport.Duplication apply(CloneGroup input) { return toReportDuplication(component, dupBuilder, blockBuilder, input); } })::iterator; publisher.getWriter().writeComponentDuplications(component.scannerId(), reportDuplications); }
@Test public void should_limit_number_of_clones() { // 1 origin part + 101 duplicates = 102 List<CloneGroup> dups = new ArrayList<>(CpdExecutor.MAX_CLONE_GROUP_PER_FILE + 1); for (int i = 0; i < CpdExecutor.MAX_CLONE_GROUP_PER_FILE + 1; i++) { ClonePart clonePart = new ClonePart(batchComponent1.key(), i, i, i + 1); ClonePart dupPart = new ClonePart(batchComponent1.key(), i + 1, i + 1, i + 2); dups.add(newCloneGroup(clonePart, dupPart)); } executor.saveDuplications(batchComponent1, dups); assertThat(reader.readComponentDuplications(batchComponent1.scannerId())).toIterable().hasSize(CpdExecutor.MAX_CLONE_GROUP_PER_FILE); assertThat(logTester.logs(Level.WARN)) .contains("Too many duplication groups on file " + batchComponent1 + ". Keep only the first " + CpdExecutor.MAX_CLONE_GROUP_PER_FILE + " groups."); }
@Override protected int command() { if (!validateConfigFilePresent()) { return 1; } final MigrationConfig config; try { config = MigrationConfig.load(getConfigFile()); } catch (KsqlException | MigrationException e) { LOGGER.error(e.getMessage()); return 1; } return command( config, MigrationsUtil::getKsqlClient, getMigrationsDir(getConfigFile(), config), Clock.systemDefaultZone() ); }
@Test public void shouldApplyInsertStatement() throws Exception { // Given: command = PARSER.parse("-v", "3"); createMigrationFile(1, NAME, migrationsDir, COMMAND); createMigrationFile(3, NAME, migrationsDir, INSERTS); givenCurrentMigrationVersion("1"); givenAppliedMigration(1, NAME, MigrationState.MIGRATED); // When: final int result = command.command(config, (cfg, headers) -> ksqlClient, migrationsDir, Clock.fixed( Instant.ofEpochMilli(1000), ZoneId.systemDefault())); // Then: assertThat(result, is(0)); final InOrder inOrder = inOrder(ksqlClient); verifyMigratedVersion(inOrder, 3, "1", MigrationState.MIGRATED, () -> { inOrder.verify(ksqlClient).insertInto("`FOO`", new KsqlObject(ImmutableMap.of("`A`", "abcd"))); inOrder.verify(ksqlClient).insertInto("`FOO`", new KsqlObject(ImmutableMap.of("`A`", "efgh"))); inOrder.verify(ksqlClient).insertInto("`FOO`", new KsqlObject(ImmutableMap.of("`A`", "ijkl"))); }); inOrder.verify(ksqlClient).close(); inOrder.verifyNoMoreInteractions(); }
private void releaseSlot(LogicalSlot slot, Throwable cause) { requestedPhysicalSlots.removeKeyB(slot.getSlotRequestId()); slotProvider.cancelSlotRequest(slot.getSlotRequestId(), cause); }
@Test void testLogicalSlotReleasingCancelsPhysicalSlotRequest() throws Exception { testLogicalSlotRequestCancellationOrRelease( true, true, (context, slotFuture) -> slotFuture.get().releaseSlot(null)); }
public Service createGenericResourceService(String name, String version, String resource, String referencePayload) throws EntityAlreadyExistsException { log.info("Creating a new Service '{}-{}' for generic resource {}", name, version, resource); // Check if corresponding Service already exists. Service existingService = serviceRepository.findByNameAndVersion(name, version); if (existingService != null) { log.warn("A Service '{}-{}' is already existing. Throwing an Exception", name, version); throw new EntityAlreadyExistsException( String.format("Service '%s-%s' is already present in store", name, version)); } // Create new service with GENERIC_REST type. Service service = new Service(); service.setName(name); service.setVersion(version); service.setType(ServiceType.GENERIC_REST); service.setMetadata(new Metadata()); // Now create basic crud operations for the resource. Operation createOp = new Operation(); createOp.setName("POST /" + resource); createOp.setMethod("POST"); service.addOperation(createOp); Operation getOp = new Operation(); getOp.setName("GET /" + resource + "/:id"); getOp.setMethod("GET"); getOp.setDispatcher(DispatchStyles.URI_PARTS); getOp.setDispatcherRules("id"); service.addOperation(getOp); Operation updateOp = new Operation(); updateOp.setName("PUT /" + resource + "/:id"); updateOp.setMethod("PUT"); updateOp.setDispatcher(DispatchStyles.URI_PARTS); updateOp.setDispatcherRules("id"); service.addOperation(updateOp); Operation listOp = new Operation(); listOp.setName("GET /" + resource); listOp.setMethod("GET"); service.addOperation(listOp); Operation delOp = new Operation(); delOp.setName("DELETE /" + resource + "/:id"); delOp.setMethod("DELETE"); delOp.setDispatcher(DispatchStyles.URI_PARTS); delOp.setDispatcherRules("id"); service.addOperation(delOp); serviceRepository.save(service); log.info("Having created Service '{}' for generic resource {}", service.getId(), resource); // If reference payload is provided, record a first resource. if (referencePayload != null) { GenericResource genericResource = new GenericResource(); genericResource.setServiceId(service.getId()); genericResource.setReference(true); try { Document document = Document.parse(referencePayload); genericResource.setPayload(document); genericResourceRepository.save(genericResource); } catch (JsonParseException jpe) { log.error("Cannot parse the provided reference payload as JSON: {}", referencePayload); log.error("Reference is ignored, please provide JSON the next time"); } } // Publish a Service create event before returning. publishServiceChangeEvent(service, ChangeType.CREATED); return service; }
@Test void testCreateGenericResourceServiceWithReference() { Service created = null; try { created = service.createGenericResourceService("Order Service", "1.0", "order", "{\"customerId\": \"123456789\", \"amount\": 12.5}"); } catch (Exception e) { fail("No exception should be thrown"); } // Check created object. assertNotNull(created.getId()); // Check that service has created a reference generic resource. List<GenericResource> resources = genericResourceRepository.findReferencesByServiceId(created.getId()); assertNotNull(resources); assertEquals(1, resources.size()); GenericResource resource = resources.get(0); assertTrue(resource.isReference()); assertEquals("123456789", resource.getPayload().get("customerId")); assertEquals(12.5, resource.getPayload().get("amount")); }
public void clear() { bitSets.clear(); entries.clear(); }
@Test public void testClear() { // insert for (long i = 0; i < COUNT; ++i) { insert(i, i); verify(); } clear(); verify(); // reinsert for (long i = 0; i < COUNT; ++i) { insert(i, i); verify(); } clear(); }
@Override public void removePod(String uid) { checkArgument(!Strings.isNullOrEmpty(uid), ERR_NULL_POD_UID); synchronized (this) { if (isPodInUse(uid)) { final String error = String.format(MSG_POD, uid, ERR_IN_USE); throw new IllegalStateException(error); } Pod pod = kubevirtPodStore.removePod(uid); if (pod != null) { log.debug(String.format(MSG_POD, pod.getMetadata().getName(), MSG_REMOVED)); } } }
@Test(expected = IllegalArgumentException.class) public void testRemovePodWithNull() { target.removePod(null); }
public int capacity() { return capacity; }
@Test void shouldThrowExceptionForCapacityThatIsNotPowerOfTwo() { final int capacity = 777; final int totalBufferLength = capacity + BroadcastBufferDescriptor.TRAILER_LENGTH; when(buffer.capacity()).thenReturn(totalBufferLength); assertThrows(IllegalStateException.class, () -> new BroadcastReceiver(buffer)); }
public boolean isLldp() { return LLDP.contains(this); }
@Test public void testIsLldp() throws Exception { assertFalse(MAC_NORMAL.isLldp()); assertFalse(MAC_BCAST.isLldp()); assertFalse(MAC_MCAST.isLldp()); assertFalse(MAC_MCAST_2.isLldp()); assertTrue(MAC_LLDP.isLldp()); assertTrue(MAC_LLDP_2.isLldp()); assertTrue(MAC_LLDP_3.isLldp()); assertFalse(MAC_ONOS.isLldp()); }
@Override public int getOrder() { return PluginEnum.GENERAL_CONTEXT.getCode(); }
@Test public void testGetOrder() { assertEquals(this.generalContextPlugin.getOrder(), PluginEnum.GENERAL_CONTEXT.getCode()); }
public static ConnectionGroup getOrCreateGroup(String namespace) { AssertUtil.assertNotBlank(namespace, "namespace should not be empty"); ConnectionGroup group = CONN_MAP.get(namespace); if (group == null) { synchronized (CREATE_LOCK) { if ((group = CONN_MAP.get(namespace)) == null) { group = new ConnectionGroup(namespace); CONN_MAP.put(namespace, group); } } } return group; }
@Test(expected = IllegalArgumentException.class) public void testGetOrCreateGroupBadNamespace() { ConnectionManager.getOrCreateGroup(""); }
public int tryClaim(final int msgTypeId, final int length) { checkTypeId(msgTypeId); checkMsgLength(length); final AtomicBuffer buffer = this.buffer; final int recordLength = length + HEADER_LENGTH; final int recordIndex = claimCapacity(buffer, recordLength); if (INSUFFICIENT_CAPACITY == recordIndex) { return recordIndex; } buffer.putIntOrdered(lengthOffset(recordIndex), -recordLength); MemoryAccess.releaseFence(); buffer.putInt(typeOffset(recordIndex), msgTypeId); return encodedMsgOffset(recordIndex); }
@Test void tryClaimReturnsOffsetAtWhichMessageBodyCanBeWritten() { final int msgTypeId = MSG_TYPE_ID; final int length = 333; final int recordLength = HEADER_LENGTH + length; final int alignedRecordLength = align(recordLength, ALIGNMENT); final int index = ringBuffer.tryClaim(msgTypeId, length); assertEquals(HEADER_LENGTH, index); final InOrder inOrder = inOrder(buffer); inOrder.verify(buffer).putLongOrdered(TAIL_COUNTER_INDEX, alignedRecordLength); inOrder.verify(buffer).putLong(alignedRecordLength, 0L); inOrder.verify(buffer).putIntOrdered(lengthOffset(0), -recordLength); inOrder.verify(buffer).putInt(typeOffset(0), msgTypeId); }
public Response get(URL url, Request request) throws IOException { return call(HttpMethods.GET, url, request); }
@Test public void testGet_insecureClientOnHttpServerAndNoPortSpecified() throws IOException { FailoverHttpClient insecureHttpClient = newHttpClient(true, false); Mockito.when(mockHttpRequest.execute()) .thenThrow(new ConnectException()) // server is not listening on 443 .thenReturn(mockHttpResponse); // respond when connected through 80 try (Response response = insecureHttpClient.get(new URL("https://insecure"), fakeRequest(null))) { byte[] bytes = new byte[4]; Assert.assertEquals(4, response.getBody().read(bytes)); Assert.assertEquals("body", new String(bytes, StandardCharsets.UTF_8)); } Mockito.verify(mockHttpRequest, Mockito.times(2)).execute(); Mockito.verifyNoInteractions(mockInsecureHttpRequest); verifyCapturedUrls("https://insecure", "http://insecure"); verifyWarnings("Failed to connect to https://insecure over HTTPS. Attempting again with HTTP."); }
public void finishTransaction(long dbId, long transactionId, Set<Long> errorReplicaIds) throws UserException { DatabaseTransactionMgr dbTransactionMgr = getDatabaseTransactionMgr(dbId); dbTransactionMgr.finishTransaction(transactionId, errorReplicaIds); }
@Test public void testFinishTransaction() throws UserException { long transactionId = masterTransMgr .beginTransaction(GlobalStateMgrTestUtil.testDbId1, Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), GlobalStateMgrTestUtil.testTxnLable1, transactionSource, LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); // commit a transaction TabletCommitInfo tabletCommitInfo1 = new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, GlobalStateMgrTestUtil.testBackendId1); TabletCommitInfo tabletCommitInfo2 = new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, GlobalStateMgrTestUtil.testBackendId2); TabletCommitInfo tabletCommitInfo3 = new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, GlobalStateMgrTestUtil.testBackendId3); List<TabletCommitInfo> transTablets = Lists.newArrayList(); transTablets.add(tabletCommitInfo1); transTablets.add(tabletCommitInfo2); transTablets.add(tabletCommitInfo3); masterTransMgr.commitTransaction(GlobalStateMgrTestUtil.testDbId1, transactionId, transTablets, Lists.newArrayList(), null); TransactionState transactionState = fakeEditLog.getTransaction(transactionId); slaveTransMgr.replayUpsertTransactionState(transactionState); assertEquals(TransactionStatus.COMMITTED, transactionState.getTransactionStatus()); Set<Long> errorReplicaIds = Sets.newHashSet(); errorReplicaIds.add(GlobalStateMgrTestUtil.testReplicaId1); masterTransMgr.finishTransaction(GlobalStateMgrTestUtil.testDbId1, transactionId, errorReplicaIds); transactionState = fakeEditLog.getTransaction(transactionId); assertEquals(TransactionStatus.VISIBLE, transactionState.getTransactionStatus()); // check replica version Partition testPartition = masterGlobalStateMgr.getDb(GlobalStateMgrTestUtil.testDbId1).getTable(GlobalStateMgrTestUtil.testTableId1) .getPartition(GlobalStateMgrTestUtil.testPartition1); // check partition version assertEquals(GlobalStateMgrTestUtil.testStartVersion + 1, testPartition.getVisibleVersion()); assertEquals(GlobalStateMgrTestUtil.testStartVersion + 2, testPartition.getNextVersion()); // check partition next version LocalTablet tablet = (LocalTablet) testPartition.getIndex(GlobalStateMgrTestUtil.testIndexId1) .getTablet(GlobalStateMgrTestUtil.testTabletId1); for (Replica replica : tablet.getImmutableReplicas()) { if (replica.getId() == GlobalStateMgrTestUtil.testReplicaId1) { assertEquals(GlobalStateMgrTestUtil.testStartVersion, replica.getVersion()); } else { assertEquals(GlobalStateMgrTestUtil.testStartVersion + 1, replica.getVersion()); } } // slave replay new state and compare globalStateMgr slaveTransMgr.replayUpsertTransactionState(transactionState); assertTrue(GlobalStateMgrTestUtil.compareState(masterGlobalStateMgr, slaveGlobalStateMgr)); }
public static String checkValidName(String name) { checkArgument(!isNullOrEmpty(name), "name is null or empty"); checkArgument('a' <= name.charAt(0) && name.charAt(0) <= 'z', "name must start with a lowercase latin letter: '%s'", name); for (int i = 1; i < name.length(); i++) { char ch = name.charAt(i); checkArgument('a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' || ch == '_', "name must contain only lowercase latin letters, digits or underscores: '%s'", name); } return name; }
@Test public void testCheckValidColumnName() { checkValidName("abc01_def2"); assertThrows(() -> checkValidName(null)); assertThrows(() -> checkValidName("")); assertThrows(() -> checkValidName("Abc")); assertThrows(() -> checkValidName("0abc")); assertThrows(() -> checkValidName("_abc")); assertThrows(() -> checkValidName("aBc")); assertThrows(() -> checkValidName("ab-c")); }
public CompactionTask.TaskResult getResult() { int allSuccess = 0; int partialSuccess = 0; int noneSuccess = 0; for (CompactionTask task : tasks) { CompactionTask.TaskResult subTaskResult = task.getResult(); switch (subTaskResult) { case NOT_FINISHED: return subTaskResult; // early return case PARTIAL_SUCCESS: partialSuccess++; break; case NONE_SUCCESS: noneSuccess++; break; case ALL_SUCCESS: allSuccess++; break; default: Preconditions.checkArgument(false, "unhandled compaction task result: %s", subTaskResult.name()); break; } } if (allSuccess == tasks.size()) { return CompactionTask.TaskResult.ALL_SUCCESS; } else if (noneSuccess == tasks.size()) { return CompactionTask.TaskResult.NONE_SUCCESS; } else { return CompactionTask.TaskResult.PARTIAL_SUCCESS; } }
@Test public void testGetResult() { Database db = new Database(); Table table = new Table(Table.TableType.CLOUD_NATIVE); PhysicalPartition partition = new PhysicalPartitionImpl(0, "", 1, 2, null); CompactionJob job = new CompactionJob(db, table, partition, 10010, true); Assert.assertTrue(job.getAllowPartialSuccess()); List<CompactionTask> list = new ArrayList<>(); list.add(new CompactionTask(100)); list.add(new CompactionTask(101)); job.setTasks(list); new MockUp<CompactionTask>() { @Mock public CompactionTask.TaskResult getResult() { return CompactionTask.TaskResult.NOT_FINISHED; } }; Assert.assertEquals(CompactionTask.TaskResult.NOT_FINISHED, job.getResult()); new MockUp<CompactionTask>() { @Mock public CompactionTask.TaskResult getResult() { return CompactionTask.TaskResult.NONE_SUCCESS; } }; Assert.assertEquals(CompactionTask.TaskResult.NONE_SUCCESS, job.getResult()); new MockUp<CompactionTask>() { @Mock public CompactionTask.TaskResult getResult() { return CompactionTask.TaskResult.ALL_SUCCESS; } }; Assert.assertEquals(CompactionTask.TaskResult.ALL_SUCCESS, job.getResult()); new MockUp<CompactionTask>() { @Mock public CompactionTask.TaskResult getResult() { return CompactionTask.TaskResult.PARTIAL_SUCCESS; } }; Assert.assertEquals(CompactionTask.TaskResult.PARTIAL_SUCCESS, job.getResult()); }
@Override public void run() { try { // make sure we call afterRun() even on crashes // and operate countdown latches, else we may hang the parallel runner if (steps == null) { beforeRun(); } if (skipped) { return; } int count = steps.size(); int index = 0; while ((index = nextStepIndex()) < count) { currentStep = steps.get(index); execute(currentStep); if (currentStepResult != null) { // can be null if debug step-back or hook skip result.addStepResult(currentStepResult); } } } catch (Exception e) { if (currentStepResult != null) { result.addStepResult(currentStepResult); } logError("scenario [run] failed\n" + StringUtils.throwableToString(e)); currentStepResult = result.addFakeStepResult("scenario [run] failed", e); } finally { if (!skipped) { afterRun(); if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) { featureRuntime.suite.abort(); } } if (caller.isNone()) { logAppender.close(); // reclaim memory } } }
@Test void testContinueOnStepFailure6() { fail = true; run( "def var = 'foo'", "configure continueOnStepFailure = { enabled: true, continueAfter: true, keywords: ['match', 'eval', 'if'] }", "match var == 'bar'", "if(true == true) { syntax error within JS line }", "match var == 'crawl'", "match var == 'foo'", "configure continueOnStepFailure = false", "match var == 'foo'", "match var == 'foo'" ); // the last failed step will be show as the result failed step assertEquals("match var == 'crawl'", sr.result.getFailedStep().getStep().getText()); }
public static Optional<IndexSetValidator.Violation> validate(ElasticsearchConfiguration elasticsearchConfiguration, IndexLifetimeConfig retentionConfig) { Period indexLifetimeMin = retentionConfig.indexLifetimeMin(); Period indexLifetimeMax = retentionConfig.indexLifetimeMax(); final Period leeway = indexLifetimeMax.minus(indexLifetimeMin); if (leeway.toStandardSeconds().getSeconds() < 0) { return Optional.of(IndexSetValidator.Violation.create(f("%s <%s> is shorter than %s <%s>", FIELD_INDEX_LIFETIME_MAX, indexLifetimeMax, FIELD_INDEX_LIFETIME_MIN, indexLifetimeMin))); } if (leeway.toStandardSeconds().isLessThan(elasticsearchConfiguration.getTimeSizeOptimizingRotationPeriod().toStandardSeconds())) { return Optional.of(IndexSetValidator.Violation.create(f("The duration between %s and %s <%s> cannot be shorter than %s <%s>", FIELD_INDEX_LIFETIME_MAX, FIELD_INDEX_LIFETIME_MIN, leeway, TIME_SIZE_OPTIMIZING_ROTATION_PERIOD, elasticsearchConfiguration.getTimeSizeOptimizingRotationPeriod()))); } Period fixedLeeway = elasticsearchConfiguration.getTimeSizeOptimizingRetentionFixedLeeway(); if (Objects.nonNull(fixedLeeway) && leeway.toStandardSeconds().isLessThan(fixedLeeway.toStandardSeconds())) { return Optional.of(IndexSetValidator.Violation.create(f("The duration between %s and %s <%s> cannot be shorter than %s <%s>", FIELD_INDEX_LIFETIME_MAX, FIELD_INDEX_LIFETIME_MIN, leeway, TIME_SIZE_OPTIMIZING_RETENTION_FIXED_LEEWAY, fixedLeeway))); } final Period maxRetentionPeriod = elasticsearchConfiguration.getMaxIndexRetentionPeriod(); if (maxRetentionPeriod != null && indexLifetimeMax.toStandardSeconds().isGreaterThan(maxRetentionPeriod.toStandardSeconds())) { return Optional.of(IndexSetValidator.Violation.create(f("Lifetime setting %s <%s> exceeds the configured maximum of %s=%s.", FIELD_INDEX_LIFETIME_MAX, indexLifetimeMax, ElasticsearchConfiguration.MAX_INDEX_RETENTION_PERIOD, maxRetentionPeriod))); } if (periodOtherThanDays(indexLifetimeMax) && !elasticsearchConfiguration.allowFlexibleRetentionPeriod()) { return Optional.of(IndexSetValidator.Violation.create(f("Lifetime setting %s <%s> can only be a multiple of days", FIELD_INDEX_LIFETIME_MAX, indexLifetimeMax))); } if (periodOtherThanDays(indexLifetimeMin) && !elasticsearchConfiguration.allowFlexibleRetentionPeriod()) { return Optional.of(IndexSetValidator.Violation.create(f("Lifetime setting %s <%s> can only be a multiple of days", FIELD_INDEX_LIFETIME_MIN, indexLifetimeMin))); } return Optional.empty(); }
@Test public void timeBasedSizeOptimizingHonorsFixedLeeWay() { when(elasticConfig.getTimeSizeOptimizingRotationPeriod()).thenReturn(Period.days(1)); when(elasticConfig.getTimeSizeOptimizingRetentionFixedLeeway()).thenReturn(Period.days(10)); IndexLifetimeConfig config = IndexLifetimeConfig.builder() .indexLifetimeMin(Period.days(10)) .indexLifetimeMax(Period.days(19)) .build(); assertThat(validate(elasticConfig, config)).hasValueSatisfying(v -> assertThat(v.message()) .contains("The duration between index_lifetime_max and index_lifetime_min <P9D> " + "cannot be shorter than time_size_optimizing_retention_fixed_leeway <P10D>")); assertThat(validate(elasticConfig, config.toBuilder().indexLifetimeMax(Period.days(20)).build())).isEmpty(); }
@Override public byte[] serialize(final String topic, final List<?> data) { if (data == null) { return null; } try { final StringWriter stringWriter = new StringWriter(); final CSVPrinter csvPrinter = new CSVPrinter(stringWriter, csvFormat); csvPrinter.printRecord(() -> new FieldIterator(data, schema)); final String result = stringWriter.toString(); return result.substring(0, result.length() - 2).getBytes(StandardCharsets.UTF_8); } catch (final Exception e) { throw new SerializationException("Error serializing CSV message", e); } }
@Test public void shouldThrowOnInvalidDate() { // Given: givenSingleColumnSerializer(SqlTypes.DATE); final List<?> values = Collections.singletonList(new Date(1234)); // Then: final Exception e = assertThrows( SerializationException.class, () -> serializer.serialize("", values) ); assertThat(e.getCause().getMessage(), is("Date type should not have any time fields set to non-zero values.")); }
public Predicate convert(List<ScalarOperator> operators, DeltaLakeContext context) { DeltaLakeExprVisitor visitor = new DeltaLakeExprVisitor(); List<Predicate> predicates = Lists.newArrayList(); for (ScalarOperator operator : operators) { Predicate predicate = operator.accept(visitor, context); if (predicate != null) { predicates.add(predicate); } } Optional<Predicate> result = predicates.stream().reduce(And::new); return result.orElse(ALWAYS_TRUE); }
@Test public void testConvertCastLitervalValue() { ScalarOperationToDeltaLakeExpr converter = new ScalarOperationToDeltaLakeExpr(); ScalarOperationToDeltaLakeExpr.DeltaLakeContext context = new ScalarOperationToDeltaLakeExpr.DeltaLakeContext(schema, new HashSet<>()); List<ScalarOperator> operators; ConstantOperator value; ScalarOperator operator; // int -> boolean value = ConstantOperator.createInt(0); operator = new BinaryPredicateOperator(BinaryType.EQ, cBoolCol, value); operators = new ArrayList<>(List.of(operator)); Predicate convertExpr = converter.convert(operators, context); Predicate expectedExpr = new Predicate("=", cDeltaBoolCol, Literal.ofBoolean(false)); Assert.assertEquals(expectedExpr.toString(), convertExpr.toString()); // int -> smallint value = ConstantOperator.createInt(5); operator = new BinaryPredicateOperator(BinaryType.EQ, cShortCol, value); operators = new ArrayList<>(List.of(operator)); convertExpr = converter.convert(operators, context); expectedExpr = new Predicate("=", cDeltaShortCol, Literal.ofShort((short) 5)); Assert.assertEquals(expectedExpr.toString(), convertExpr.toString()); // bigint -> int value = ConstantOperator.createBigint(5); operator = new BinaryPredicateOperator(BinaryType.EQ, cIntCol, value); operators = new ArrayList<>(List.of(operator)); convertExpr = converter.convert(operators, context); expectedExpr = new Predicate("=", cDeltaIntCol, Literal.ofLong(5)); Assert.assertEquals(expectedExpr.toString(), convertExpr.toString()); // int -> bigint value = ConstantOperator.createInt(5); operator = new BinaryPredicateOperator(BinaryType.EQ, cLongCol, value); operators = new ArrayList<>(List.of(operator)); convertExpr = converter.convert(operators, context); expectedExpr = new Predicate("=", cDeltaLongCol, Literal.ofInt(5)); Assert.assertEquals(expectedExpr.toString(), convertExpr.toString()); // string -> date value = ConstantOperator.createVarchar("2023-01-05"); operator = new BinaryPredicateOperator(BinaryType.EQ, cDateCol, value); operators = new ArrayList<>(List.of(operator)); convertExpr = converter.convert(operators, context); expectedExpr = new Predicate("=", cDeltaDateCol, Literal.ofDate(19362)); Assert.assertEquals(expectedExpr.toString(), convertExpr.toString()); // string -> datetime value = ConstantOperator.createVarchar("2023-01-05 01:01:01"); operator = new BinaryPredicateOperator(BinaryType.EQ, cTimestampCol, value); operators = new ArrayList<>(List.of(operator)); convertExpr = converter.convert(operators, context); expectedExpr = new Predicate("=", cDeltaTimestampCol, Literal.ofTimestamp(1672851661000000L)); Assert.assertEquals(expectedExpr.toString(), convertExpr.toString()); // date -> string value = ConstantOperator.createDate(LocalDateTime.of(2023, 1, 5, 0, 0)); operator = new BinaryPredicateOperator(BinaryType.EQ, cVarcharCol, value); operators = new ArrayList<>(List.of(operator)); convertExpr = converter.convert(operators, context); expectedExpr = new Predicate("=", cDeltaVarcharCol, Literal.ofString("2023-01-05")); Assert.assertEquals(expectedExpr.toString(), convertExpr.toString()); // int -> string (not supported) value = ConstantOperator.createInt(12345); operator = new BinaryPredicateOperator(BinaryType.EQ, cVarcharCol, value); operators = new ArrayList<>(List.of(operator)); convertExpr = converter.convert(operators, context); expectedExpr = AlwaysTrue.ALWAYS_TRUE; Assert.assertEquals(expectedExpr.toString(), convertExpr.toString()); // not supported value = ConstantOperator.createInt(12345); operator = new BinaryPredicateOperator(BinaryType.EQ, cDoubleCol, value); operators = new ArrayList<>(List.of(operator)); convertExpr = converter.convert(operators, context); Assert.assertEquals(AlwaysTrue.ALWAYS_TRUE.toString(), convertExpr.toString()); }
public void parseStepParameter( Map<String, Map<String, Object>> allStepOutputData, Map<String, Parameter> workflowParams, Map<String, Parameter> stepParams, Parameter param, String stepId) { parseStepParameter( allStepOutputData, workflowParams, stepParams, param, stepId, new HashSet<>()); }
@Test public void testParseInjectedStepIdStepParameter() { StringParameter bar = StringParameter.builder().name("id").value("test ${step_id}").build(); // create all the mock instances. InstanceWrapper mockInstanceWrapper = mock(InstanceWrapper.class); StepInstanceAttributes mockStepAttributes = mock(StepInstanceAttributes.class); when(mockStepAttributes.getStepId()).thenReturn("step1"); when(mockInstanceWrapper.isWorkflowParam()).thenReturn(false); when(mockInstanceWrapper.getStepInstanceAttributes()).thenReturn(mockStepAttributes); paramExtensionRepo.reset(Collections.emptyMap(), Collections.emptyMap(), mockInstanceWrapper); paramEvaluator.parseStepParameter( Collections.emptyMap(), Collections.emptyMap(), Collections.singletonMap( "step_id", StringParameter.builder() .mode(ParamMode.CONSTANT) .name("step_id") .expression("return params.getFromStep(\"step_id\");") .build()), bar, "step1"); assertEquals("test step1", bar.getEvaluatedResult()); }
private void sendResponse(Response response) { try { ((GrpcConnection) this.currentConnection).sendResponse(response); } catch (Exception e) { LOGGER.error("[{}]Error to send ack response, ackId->{}", this.currentConnection.getConnectionId(), response.getRequestId()); } }
@Test void testBindRequestStreamOnNextOtherRequest() throws NoSuchFieldException, IllegalAccessException, NoSuchMethodException, InvocationTargetException { BiRequestStreamGrpc.BiRequestStreamStub stub = mock(BiRequestStreamGrpc.BiRequestStreamStub.class); GrpcConnection grpcConnection = mock(GrpcConnection.class); when(stub.requestBiStream(any())).thenAnswer((Answer<StreamObserver<Payload>>) invocationOnMock -> { ((StreamObserver<Payload>) invocationOnMock.getArgument(0)).onNext(GrpcUtils.convert(new ConnectResetRequest())); return null; }); grpcClient.registerServerRequestHandler((request, connection) -> { if (request instanceof ConnectResetRequest) { return new ConnectResetResponse(); } return null; }); setCurrentConnection(grpcConnection, grpcClient); invokeBindRequestStream(grpcClient, stub, grpcConnection); verify(grpcConnection).sendResponse(any(ConnectResetResponse.class)); }
@Override public Long sendSingleNotifyToAdmin(Long userId, String templateCode, Map<String, Object> templateParams) { return sendSingleNotify(userId, UserTypeEnum.ADMIN.getValue(), templateCode, templateParams); }
@Test public void testSendSingleNotifyToAdmin() { // 准备参数 Long userId = randomLongId(); String templateCode = randomString(); Map<String, Object> templateParams = MapUtil.<String, Object>builder().put("code", "1234") .put("op", "login").build(); // mock NotifyTemplateService 的方法 NotifyTemplateDO template = randomPojo(NotifyTemplateDO.class, o -> { o.setStatus(CommonStatusEnum.ENABLE.getStatus()); o.setContent("验证码为{code}, 操作为{op}"); o.setParams(Lists.newArrayList("code", "op")); }); when(notifyTemplateService.getNotifyTemplateByCodeFromCache(eq(templateCode))).thenReturn(template); String content = randomString(); when(notifyTemplateService.formatNotifyTemplateContent(eq(template.getContent()), eq(templateParams))) .thenReturn(content); // mock NotifyMessageService 的方法 Long messageId = randomLongId(); when(notifyMessageService.createNotifyMessage(eq(userId), eq(UserTypeEnum.ADMIN.getValue()), eq(template), eq(content), eq(templateParams))).thenReturn(messageId); // 调用 Long resultMessageId = notifySendService.sendSingleNotifyToAdmin(userId, templateCode, templateParams); // 断言 assertEquals(messageId, resultMessageId); }
@VisibleForTesting static Object convertAvroField(Object avroValue, Schema schema) { if (avroValue == null) { return null; } switch (schema.getType()) { case NULL: case INT: case LONG: case DOUBLE: case FLOAT: case BOOLEAN: return avroValue; case ENUM: case STRING: return avroValue.toString(); // can be a String or org.apache.avro.util.Utf8 case UNION: for (Schema s : schema.getTypes()) { if (s.getType() == Schema.Type.NULL) { continue; } return convertAvroField(avroValue, s); } throw new IllegalArgumentException("Found UNION schema but it doesn't contain any type"); case ARRAY: case BYTES: case FIXED: case RECORD: case MAP: default: throw new UnsupportedOperationException("Unsupported avro schema type=" + schema.getType() + " for value field schema " + schema.getName()); } }
@Test public void testConvertAvroUnion() { Object converted = BaseJdbcAutoSchemaSink.convertAvroField(Integer.MAX_VALUE, createFieldAndGetSchema((builder) -> builder.name("field").type().unionOf().intType().endUnion().noDefault())); Assert.assertEquals(converted, Integer.MAX_VALUE); }
public static String formatSql(final AstNode root) { final StringBuilder builder = new StringBuilder(); new Formatter(builder).process(root, 0); return StringUtils.stripEnd(builder.toString(), "\n"); }
@Test public void shouldFormatSelectCorrectlyWithDuplicateFields() { final String statementString = "CREATE STREAM S AS SELECT address AS one, address AS two FROM address;"; final Statement statement = parseSingle(statementString); assertThat(SqlFormatter.formatSql(statement), equalTo("CREATE STREAM S AS SELECT\n" + " ADDRESS ONE,\n" + " ADDRESS TWO\n" + "FROM ADDRESS ADDRESS\n" + "EMIT CHANGES")); }
public static String fix(final String raw) { if ( raw == null || "".equals( raw.trim() )) { return raw; } MacroProcessor macroProcessor = new MacroProcessor(); macroProcessor.setMacros( macros ); return macroProcessor.parse( raw ); }
@Test public void testAdd__Handle__rComplex() { String result = KnowledgeHelperFixerTest.fixer.fix( "something update( myObject); other" ); assertEqualsIgnoreWhitespace( "something drools.update( myObject); other", result ); result = KnowledgeHelperFixerTest.fixer.fix( "something update ( myObject );" ); assertEqualsIgnoreWhitespace( "something drools.update( myObject );", result ); result = KnowledgeHelperFixerTest.fixer.fix( " update( myObject ); x" ); assertEqualsIgnoreWhitespace( " drools.update( myObject ); x", result ); //should not touch, as it is not a stand alone word result = KnowledgeHelperFixerTest.fixer.fix( "xxupdate(myObject ) x" ); assertEqualsIgnoreWhitespace( "xxupdate(myObject ) x", result ); }
@Override public Long del(byte[]... keys) { if (isQueueing() || isPipelined()) { for (byte[] key: keys) { write(key, LongCodec.INSTANCE, RedisCommands.DEL, key); } return null; } CommandBatchService es = new CommandBatchService(executorService); for (byte[] key: keys) { es.writeAsync(key, StringCodec.INSTANCE, RedisCommands.DEL, key); } BatchResult<Long> b = (BatchResult<Long>) es.execute(); return b.getResponses().stream().collect(Collectors.summarizingLong(v -> v)).getSum(); }
@Test public void testDel() { List<byte[]> keys = new ArrayList<>(); for (int i = 0; i < 10; i++) { byte[] key = ("test" + i).getBytes(); keys.add(key); connection.set(key, ("test" + i).getBytes()); } assertThat(connection.del(keys.toArray(new byte[0][]))).isEqualTo(10); }
public JmxCollector register() { return register(PrometheusRegistry.defaultRegistry); }
@Test public void testBooleanValues() throws Exception { JmxCollector jc = new JmxCollector("---").register(prometheusRegistry); assertEquals( 1.0, getSampleValue("boolean_Test_True", new String[] {}, new String[] {}), .001); assertEquals( 0.0, getSampleValue("boolean_Test_False", new String[] {}, new String[] {}), .001); }
public static SerdeFeatures of(final SerdeFeature... features) { return new SerdeFeatures(ImmutableSet.copyOf(features)); }
@Test(expected = IllegalArgumentException.class) public void shouldThrowOnIncompatibleFeatures() { // When: SerdeFeatures.of(WRAP_SINGLES, UNWRAP_SINGLES); }
@GetInitialRestriction public OffsetRange initialRestriction(@Element KafkaSourceDescriptor kafkaSourceDescriptor) { Map<String, Object> updatedConsumerConfig = overrideBootstrapServersConfig(consumerConfig, kafkaSourceDescriptor); TopicPartition partition = kafkaSourceDescriptor.getTopicPartition(); LOG.info("Creating Kafka consumer for initial restriction for {}", partition); try (Consumer<byte[], byte[]> offsetConsumer = consumerFactoryFn.apply(updatedConsumerConfig)) { ConsumerSpEL.evaluateAssign(offsetConsumer, ImmutableList.of(partition)); long startOffset; @Nullable Instant startReadTime = kafkaSourceDescriptor.getStartReadTime(); if (kafkaSourceDescriptor.getStartReadOffset() != null) { startOffset = kafkaSourceDescriptor.getStartReadOffset(); } else if (startReadTime != null) { startOffset = ConsumerSpEL.offsetForTime(offsetConsumer, partition, startReadTime); } else { startOffset = offsetConsumer.position(partition); } long endOffset = Long.MAX_VALUE; @Nullable Instant stopReadTime = kafkaSourceDescriptor.getStopReadTime(); if (kafkaSourceDescriptor.getStopReadOffset() != null) { endOffset = kafkaSourceDescriptor.getStopReadOffset(); } else if (stopReadTime != null) { endOffset = ConsumerSpEL.offsetForTime(offsetConsumer, partition, stopReadTime); } new OffsetRange(startOffset, endOffset); Lineage.getSources() .add( "kafka", ImmutableList.of( (String) updatedConsumerConfig.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG), MoreObjects.firstNonNull(kafkaSourceDescriptor.getTopic(), partition.topic()))); return new OffsetRange(startOffset, endOffset); } }
@Test public void testInitialRestrictionWhenHasStopOffset() throws Exception { long expectedStartOffset = 10L; long expectedStopOffset = 20L; consumer.setStartOffsetForTime(15L, Instant.now()); consumer.setStopOffsetForTime(18L, Instant.now()); consumer.setCurrentPos(5L); OffsetRange result = dofnInstance.initialRestriction( KafkaSourceDescriptor.of( topicPartition, expectedStartOffset, null, expectedStopOffset, null, ImmutableList.of())); assertEquals(new OffsetRange(expectedStartOffset, expectedStopOffset), result); }
@Override public ExecuteContext before(ExecuteContext context) { String name = context.getMethod().getName(); if (context.getArguments() == null || context.getArguments().length == 0) { return context; } Object argument = context.getArguments()[0]; if ("setName".equals(name)) { if (argument == null || argument instanceof String) { setAppNameAndPutParameters(context.getObject(), (String) argument); } } else { if (argument == null || argument instanceof Map<?, ?>) { context.getArguments()[0] = ParametersUtils.putParameters((Map<String, String>) argument, routerConfig); } } return context; }
@Test public void testPutParametersWithNotEmpty() throws NoSuchMethodException { // map is not empty Map<String, String> map = new HashMap<>(); map.put("bar", "bar1"); Object[] args = new Object[1]; args[0] = map; ExecuteContext context = ExecuteContext.forMemberMethod(new Object(), ApplicationConfig.class.getMethod("setParameters", Map.class), args, null, null); interceptor.before(context); Map<String, String> parameters = (Map<String, String>) context.getArguments()[0]; Assert.assertEquals(config.getParameters().size() + 3, parameters.size()); Assert.assertEquals(config.getRouterVersion(), parameters.get(RouterConstant.META_VERSION_KEY)); Assert.assertEquals(config.getZone(), parameters.get(RouterConstant.META_ZONE_KEY)); Map<String, String> configParameters = config.getParameters(); configParameters.forEach( (key, value) -> Assert.assertEquals(value, parameters.get(RouterConstant.PARAMETERS_KEY_PREFIX + key))); map.forEach( (key, value) -> Assert.assertEquals(value, parameters.get(key))); }
@Override public int hashCode() { if (value == null) { return 31; } // Using recommended hashing algorithm from Effective Java for longs and doubles if (isIntegral(this)) { long value = getAsNumber().longValue(); return (int) (value ^ (value >>> 32)); } if (value instanceof Number) { long value = Double.doubleToLongBits(getAsNumber().doubleValue()); return (int) (value ^ (value >>> 32)); } return value.hashCode(); }
@Test public void testByteEqualsShort() { JsonPrimitive p1 = new JsonPrimitive((byte) 10); JsonPrimitive p2 = new JsonPrimitive((short) 10); assertThat(p1).isEqualTo(p2); assertThat(p1.hashCode()).isEqualTo(p2.hashCode()); }
static SerializationConverter createNullableExternalConverter( DataType type, ZoneId pipelineZoneId) { return wrapIntoNullableExternalConverter(createExternalConverter(type, pipelineZoneId)); }
@Test public void testExternalConvert() { List<Column> columns = Arrays.asList( Column.physicalColumn("f2", DataTypes.BOOLEAN()), Column.physicalColumn("f3", DataTypes.FLOAT()), Column.physicalColumn("f4", DataTypes.DOUBLE()), Column.physicalColumn("f7", DataTypes.TINYINT()), Column.physicalColumn("f8", DataTypes.SMALLINT()), Column.physicalColumn("f9", DataTypes.INT()), Column.physicalColumn("f10", DataTypes.BIGINT()), Column.physicalColumn("f12", DataTypes.TIMESTAMP()), Column.physicalColumn("f14", DataTypes.DATE()), Column.physicalColumn("f15", DataTypes.CHAR(1)), Column.physicalColumn("f16", DataTypes.VARCHAR(256)), Column.physicalColumn("f17", DataTypes.TIMESTAMP()), Column.physicalColumn("f18", DataTypes.TIMESTAMP()), Column.physicalColumn("f19", DataTypes.TIMESTAMP()), Column.physicalColumn("f20", DataTypes.TIMESTAMP_LTZ()), Column.physicalColumn("f21", DataTypes.TIMESTAMP_LTZ()), Column.physicalColumn("f22", DataTypes.TIMESTAMP_LTZ())); List<DataType> dataTypes = columns.stream().map(v -> v.getType()).collect(Collectors.toList()); LocalDateTime time1 = LocalDateTime.ofInstant(Instant.parse("2021-01-01T08:00:00Z"), ZoneId.of("Z")); LocalDate date1 = LocalDate.of(2021, 1, 1); LocalDateTime f17 = LocalDateTime.ofInstant(Instant.parse("2021-01-01T08:01:11Z"), ZoneId.of("Z")); LocalDateTime f18 = LocalDateTime.ofInstant(Instant.parse("2021-01-01T08:01:11.123Z"), ZoneId.of("Z")); LocalDateTime f19 = LocalDateTime.ofInstant( Instant.parse("2021-01-01T08:01:11.123456Z"), ZoneId.of("Z")); Instant f20 = Instant.parse("2021-01-01T08:01:11Z"); Instant f21 = Instant.parse("2021-01-01T08:01:11.123Z"); Instant f22 = Instant.parse("2021-01-01T08:01:11.123456Z"); BinaryRecordDataGenerator generator = new BinaryRecordDataGenerator(RowType.of(dataTypes.toArray(new DataType[] {}))); BinaryRecordData recordData = generator.generate( new Object[] { true, 1.2F, 1.2345D, (byte) 1, (short) 32, 64, 128L, TimestampData.fromLocalDateTime(time1), (int) date1.toEpochDay(), BinaryStringData.fromString("a"), BinaryStringData.fromString("doris"), TimestampData.fromLocalDateTime(f17), TimestampData.fromLocalDateTime(f18), TimestampData.fromLocalDateTime(f19), LocalZonedTimestampData.fromInstant(f20), LocalZonedTimestampData.fromInstant(f21), LocalZonedTimestampData.fromInstant(f22), }); List row = new ArrayList(); for (int i = 0; i < recordData.getArity(); i++) { DorisRowConverter.SerializationConverter converter = DorisRowConverter.createNullableExternalConverter( columns.get(i).getType(), ZoneId.of("GMT+08:00")); row.add(converter.serialize(i, recordData)); } Assert.assertEquals( "[true, 1.2, 1.2345, 1, 32, 64, 128, 2021-01-01 08:00:00.000000, 2021-01-01, a, doris, 2021-01-01 " + "08:01:11.000000, 2021-01-01 08:01:11.123000, 2021-01-01 08:01:11.123456, 2021-01-01 " + "16:01:11.000000, 2021-01-01 16:01:11.123000, 2021-01-01 16:01:11.123456]", row.toString()); }
public void publishArtifacts(List<ArtifactPlan> artifactPlans, EnvironmentVariableContext environmentVariableContext) { final File pluggableArtifactFolder = publishPluggableArtifacts(artifactPlans, environmentVariableContext); try { final List<ArtifactPlan> mergedPlans = artifactPlanFilter.getBuiltInMergedArtifactPlans(artifactPlans); if (isMetadataFolderEmpty(pluggableArtifactFolder)) { LOGGER.info("Pluggable metadata folder is empty."); } else if (pluggableArtifactFolder != null) { mergedPlans.add(0, new ArtifactPlan(ArtifactPlanType.file, format("%s%s*", pluggableArtifactFolder.getName(), File.separator), PLUGGABLE_ARTIFACT_METADATA_FOLDER)); } for (ArtifactPlan artifactPlan : mergedPlans) { try { artifactPlan.publishBuiltInArtifacts(goPublisher, workingDirectory); } catch (Exception e) { failedArtifact.add(artifactPlan); } } if (!failedArtifact.isEmpty()) { StringBuilder builder = new StringBuilder(); for (ArtifactPlan artifactPlan : failedArtifact) { artifactPlan.printArtifactInfo(builder); } throw new RuntimeException(format("[%s] Uploading finished. Failed to upload %s.", PRODUCT_NAME, builder)); } } finally { FileUtils.deleteQuietly(pluggableArtifactFolder); } }
@Test public void shouldMergeTestReportFilesAndUploadResult() throws Exception { List<ArtifactPlan> artifactPlans = new ArrayList<>(); new DefaultJobPlan(new Resources(), artifactPlans, -1, null, null, new EnvironmentVariables(), new EnvironmentVariables(), null, null); artifactPlans.add(new ArtifactPlan(ArtifactPlanType.unit, "test1", "test")); artifactPlans.add(new ArtifactPlan(ArtifactPlanType.unit, "test2", "test")); final File firstTestFolder = prepareTestFolder(workingFolder, "test1"); final File secondTestFolder = prepareTestFolder(workingFolder, "test2"); artifactsPublisher.publishArtifacts(artifactPlans, env); publisher.assertPublished(firstTestFolder.getAbsolutePath(), "test"); publisher.assertPublished(secondTestFolder.getAbsolutePath(), "test"); publisher.assertPublished("result", "testoutput"); publisher.assertPublished("result" + File.separator + "index.html", "testoutput"); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } if(containerService.isContainer(file)) { final PathAttributes attributes = new PathAttributes(); if(log.isDebugEnabled()) { log.debug(String.format("Read location for bucket %s", file)); } attributes.setRegion(new S3LocationFeature(session, session.getClient().getRegionEndpointCache()).getLocation(file).getIdentifier()); return attributes; } if(file.getType().contains(Path.Type.upload)) { final Write.Append append = new S3MultipartUploadService(session, new S3WriteFeature(session, acl), acl).append(file, new TransferStatus()); if(append.append) { return new PathAttributes().withSize(append.offset); } throw new NotfoundException(file.getAbsolute()); } try { PathAttributes attr; final Path bucket = containerService.getContainer(file); try { attr = new S3AttributesAdapter(session.getHost()).toAttributes(session.getClient().getVersionedObjectDetails( file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))); } catch(ServiceException e) { switch(e.getResponseCode()) { case 405: if(log.isDebugEnabled()) { log.debug(String.format("Mark file %s as delete marker", file)); } // Only DELETE method is allowed for delete markers attr = new PathAttributes(); attr.setCustom(Collections.singletonMap(KEY_DELETE_MARKER, Boolean.TRUE.toString())); attr.setDuplicate(true); return attr; } throw new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file); } if(StringUtils.isNotBlank(attr.getVersionId())) { if(log.isDebugEnabled()) { log.debug(String.format("Determine if %s is latest version for %s", attr.getVersionId(), file)); } // Determine if latest version try { final String latest = new S3AttributesAdapter(session.getHost()).toAttributes(session.getClient().getObjectDetails( bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))).getVersionId(); if(null != latest) { if(log.isDebugEnabled()) { log.debug(String.format("Found later version %s for %s", latest, file)); } // Duplicate if not latest version attr.setDuplicate(!latest.equals(attr.getVersionId())); } } catch(ServiceException e) { final BackgroundException failure = new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file); if(failure instanceof NotfoundException) { attr.setDuplicate(true); } else { throw failure; } } } return attr; } catch(NotfoundException e) { if(file.isDirectory()) { if(log.isDebugEnabled()) { log.debug(String.format("Search for common prefix %s", file)); } // File may be marked as placeholder but no placeholder file exists. Check for common prefix returned. try { new S3ObjectListService(session, acl).list(file, new CancellingListProgressListener(), String.valueOf(Path.DELIMITER), 1); } catch(ListCanceledException l) { // Found common prefix return PathAttributes.EMPTY; } catch(NotfoundException n) { throw e; } // Found common prefix return PathAttributes.EMPTY; } throw e; } }
@Test public void testVirtualHostStyle() throws Exception { final S3AttributesFinderFeature f = new S3AttributesFinderFeature(virtualhost, new S3AccessControlListFeature(virtualhost)); assertEquals(PathAttributes.EMPTY, f.find(new Path("/", EnumSet.of(Path.Type.directory)))); final String name = new AlphanumericRandomStringService().random(); final TransferStatus status = new TransferStatus(); final Path file = new S3TouchFeature(virtualhost, new S3AccessControlListFeature(session)).touch( new Path(name, EnumSet.of(Path.Type.file)), status); final PathAttributes attributes = f.find(new Path(file.getName(), EnumSet.of(Path.Type.file))); assertEquals(0L, attributes.getSize()); assertEquals("d41d8cd98f00b204e9800998ecf8427e", attributes.getChecksum().hash); assertNotEquals(-1L, attributes.getModificationDate()); new S3DefaultDeleteFeature(virtualhost).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@GetMapping("") @RequiresPermissions("system:pluginHandler:list") public ShenyuAdminResult queryPluginHandles(final String pluginId, final String field, @RequestParam @NotNull final Integer currentPage, @RequestParam @NotNull final Integer pageSize) { CommonPager<PluginHandleVO> commonPager = pluginHandleService.listByPage(new PluginHandleQuery(pluginId, field, null, new PageParameter(currentPage, pageSize))); return ShenyuAdminResult.success(ShenyuResultMessage.QUERY_SUCCESS, commonPager); }
@Test public void testQueryPluginHandles() throws Exception { given(this.pluginHandleService.listByPage(new PluginHandleQuery("2", null, null, new PageParameter(1, 1)))) .willReturn(new CommonPager<>()); this.mockMvc.perform(MockMvcRequestBuilders.get("/plugin-handle") .param("currentPage", "1") .param("pageSize", "1")) .andExpect(status().isOk()) .andReturn(); }
@Override public String convertDestination(ProtocolConverter converter, Destination d) { if (d == null) { return null; } ActiveMQDestination activeMQDestination = (ActiveMQDestination)d; String physicalName = activeMQDestination.getPhysicalName(); String rc = converter.getCreatedTempDestinationName(activeMQDestination); if( rc!=null ) { return rc; } StringBuilder buffer = new StringBuilder(); if (activeMQDestination.isQueue()) { if (activeMQDestination.isTemporary()) { buffer.append("/remote-temp-queue/"); } else { buffer.append("/queue/"); } } else { if (activeMQDestination.isTemporary()) { buffer.append("/remote-temp-topic/"); } else { buffer.append("/topic/"); } } buffer.append(physicalName); return buffer.toString(); }
@Test(timeout = 10000) public void testConvertCompositeTopics() throws Exception { String destinationA = "destinationA"; String destinationB = "destinationB"; String composite = "/topic/" + destinationA + ",/topic/" + destinationB; ActiveMQDestination destination = translator.convertDestination(converter, composite, false); assertEquals(ActiveMQDestination.TOPIC_TYPE, destination.getDestinationType()); assertTrue(destination.isComposite()); ActiveMQDestination[] composites = destination.getCompositeDestinations(); assertEquals(2, composites.length); Arrays.sort(composites); assertEquals(ActiveMQDestination.TOPIC_TYPE, composites[0].getDestinationType()); assertEquals(ActiveMQDestination.TOPIC_TYPE, composites[1].getDestinationType()); assertEquals(destinationA, composites[0].getPhysicalName()); assertEquals(destinationB, composites[1].getPhysicalName()); }
public boolean removeIf(Predicate<? super Map.Entry<HeaderName, String>> filter) { Objects.requireNonNull(filter, "filter"); boolean removed = false; int w = 0; for (int r = 0; r < size(); r++) { if (filter.test(new SimpleImmutableEntry<>(new HeaderName(originalName(r), name(r)), value(r)))) { removed = true; } else { originalName(w, originalName(r)); name(w, name(r)); value(w, value(r)); w++; } } truncate(w); return removed; }
@Test void removeIf() { Headers headers = new Headers(); headers.add("Via", "duct"); headers.add("Cookie", "this=that"); headers.add("COOkie", "frizzle=frazzle"); headers.add("Soup", "salad"); boolean removed = headers.removeIf(entry -> entry.getKey().getName().equals("Cookie")); assertTrue(removed); Truth.assertThat(headers.getAll("cOoKie")).containsExactly("frizzle=frazzle"); Truth.assertThat(headers.size()).isEqualTo(3); }
public final void containsKey(@Nullable Object key) { check("keySet()").that(checkNotNull(actual).keySet()).contains(key); }
@Test public void containsKeyFailure() { ImmutableMultimap<String, String> multimap = ImmutableMultimap.of("kurt", "kluever"); expectFailureWhenTestingThat(multimap).containsKey("daniel"); assertFailureKeys("value of", "expected to contain", "but was", "multimap was"); assertFailureValue("value of", "multimap.keySet()"); assertFailureValue("expected to contain", "daniel"); assertFailureValue("but was", "[kurt]"); }
@Override public <T extends Response> T send(Request request, Class<T> responseType) throws IOException { try { return sendAsync(request, responseType).get(); } catch (InterruptedException e) { Thread.interrupted(); throw new IOException("Interrupted WebSocket request", e); } catch (ExecutionException e) { if (e.getCause() instanceof IOException) { throw (IOException) e.getCause(); } throw new RuntimeException("Unexpected exception", e.getCause()); } }
@Test public void testSyncRequest() throws Exception { CountDownLatch requestSent = new CountDownLatch(1); // Wait for a request to be sent doAnswer( invocation -> { requestSent.countDown(); return null; }) .when(webSocketClient) .send(anyString()); // Send reply asynchronously runAsync( () -> { try { requestSent.await(2, TimeUnit.SECONDS); sendGethVersionReply(); } catch (Exception e) { throw new RuntimeException(e); } }); Web3ClientVersion reply = service.send(request, Web3ClientVersion.class); assertEquals(reply.getWeb3ClientVersion(), "geth-version"); }
static DescriptorDigest generateSelector(ImmutableList<FileEntry> layerEntries) throws IOException { return Digests.computeJsonDigest(toSortedJsonTemplates(layerEntries)); }
@Test public void testGenerateSelector_targetModificationTimeChanged() throws IOException { Path layerFile = temporaryFolder.newFile().toPath(); AbsoluteUnixPath pathInContainer = AbsoluteUnixPath.get("/bar"); FilePermissions permissions = FilePermissions.fromOctalString("111"); FileEntry layerEntry1 = new FileEntry(layerFile, pathInContainer, permissions, Instant.now()); FileEntry layerEntry2 = new FileEntry(layerFile, pathInContainer, permissions, Instant.EPOCH); // Verify that different target modification times generate different selectors Assert.assertNotEquals( LayerEntriesSelector.generateSelector(ImmutableList.of(layerEntry1)), LayerEntriesSelector.generateSelector(ImmutableList.of(layerEntry2))); }
public int controlledPoll(final ControlledFragmentHandler handler, final int fragmentLimit) { if (isClosed) { return 0; } int fragmentsRead = 0; long initialPosition = subscriberPosition.get(); int initialOffset = (int)initialPosition & termLengthMask; int offset = initialOffset; final UnsafeBuffer termBuffer = activeTermBuffer(initialPosition); final int capacity = termBuffer.capacity(); final Header header = this.header; header.buffer(termBuffer); try { while (fragmentsRead < fragmentLimit && offset < capacity) { final int length = frameLengthVolatile(termBuffer, offset); if (length <= 0) { break; } final int frameOffset = offset; final int alignedLength = BitUtil.align(length, FRAME_ALIGNMENT); offset += alignedLength; if (isPaddingFrame(termBuffer, frameOffset)) { continue; } ++fragmentsRead; header.offset(frameOffset); final Action action = handler.onFragment( termBuffer, frameOffset + HEADER_LENGTH, length - HEADER_LENGTH, header); if (ABORT == action) { --fragmentsRead; offset -= alignedLength; break; } if (BREAK == action) { break; } if (COMMIT == action) { initialPosition += (offset - initialOffset); initialOffset = offset; subscriberPosition.setOrdered(initialPosition); } } } catch (final Exception ex) { errorHandler.onError(ex); } finally { final long resultingPosition = initialPosition + (offset - initialOffset); if (resultingPosition > initialPosition) { subscriberPosition.setOrdered(resultingPosition); } } return fragmentsRead; }
@Test void shouldPollFragmentsToControlledFragmentHandlerOnCommit() { final long initialPosition = computePosition(INITIAL_TERM_ID, 0, POSITION_BITS_TO_SHIFT, INITIAL_TERM_ID); position.setOrdered(initialPosition); final Image image = createImage(); insertDataFrame(INITIAL_TERM_ID, offsetForFrame(0)); insertDataFrame(INITIAL_TERM_ID, offsetForFrame(1)); when(mockControlledFragmentHandler.onFragment(any(DirectBuffer.class), anyInt(), anyInt(), any(Header.class))) .thenReturn(Action.COMMIT); final int fragmentsRead = image.controlledPoll(mockControlledFragmentHandler, Integer.MAX_VALUE); assertThat(fragmentsRead, is(2)); final InOrder inOrder = Mockito.inOrder(position, mockControlledFragmentHandler); inOrder.verify(mockControlledFragmentHandler).onFragment( any(UnsafeBuffer.class), eq(HEADER_LENGTH), eq(DATA.length), any(Header.class)); inOrder.verify(position).setOrdered(initialPosition + ALIGNED_FRAME_LENGTH); inOrder.verify(mockControlledFragmentHandler).onFragment( any(UnsafeBuffer.class), eq(ALIGNED_FRAME_LENGTH + HEADER_LENGTH), eq(DATA.length), any(Header.class)); inOrder.verify(position).setOrdered(initialPosition + (ALIGNED_FRAME_LENGTH * 2L)); }
public static Path compose(final Path root, final String path) { if(StringUtils.startsWith(path, String.valueOf(Path.DELIMITER))) { // Mount absolute path final String normalized = normalize(StringUtils.replace(path, "\\", String.valueOf(Path.DELIMITER)), true); if(StringUtils.equals(normalized, String.valueOf(Path.DELIMITER))) { return root; } return new Path(normalized, normalized.equals(String.valueOf(Path.DELIMITER)) ? EnumSet.of(Path.Type.volume, Path.Type.directory) : EnumSet.of(Path.Type.directory)); } else { final String normalized; if(StringUtils.startsWith(path, String.format("%s%s", Path.HOME, Path.DELIMITER))) { // Relative path to the home directory normalized = normalize(StringUtils.removeStart(StringUtils.removeStart( StringUtils.replace(path, "\\", String.valueOf(Path.DELIMITER)), Path.HOME), String.valueOf(Path.DELIMITER)), false); } else { // Relative path normalized = normalize(StringUtils.replace(path, "\\", String.valueOf(Path.DELIMITER)), false); } if(StringUtils.equals(normalized, String.valueOf(Path.DELIMITER))) { return root; } return new Path(String.format("%s%s%s", root.getAbsolute(), root.isRoot() ? StringUtils.EMPTY : Path.DELIMITER, normalized), EnumSet.of(Path.Type.directory)); } }
@Test public void testHomeParent() { final Path home = PathNormalizer.compose(new Path("/", EnumSet.of(Path.Type.directory)), String.format("%s/sandbox/sub", Path.HOME)); assertEquals(new Path("/sandbox/sub", EnumSet.of(Path.Type.directory)), home); assertEquals(new Path("/sandbox", EnumSet.of(Path.Type.directory)), home.getParent()); }
@Override public Stream<MappingField> resolveAndValidateFields( boolean isKey, List<MappingField> userFields, Map<String, String> options, InternalSerializationService serializationService ) { Map<QueryPath, MappingField> fieldsByPath = extractFields(userFields, isKey); Class<?> typeClass = getMetadata(fieldsByPath) .<Class<?>>map(KvMetadataJavaResolver::loadClass) .orElseGet(() -> loadClass(options, isKey)); QueryDataType type = QueryDataTypeUtils.resolveTypeForClass(typeClass); if (type.getTypeFamily() != QueryDataTypeFamily.OBJECT || type.isCustomType()) { return userFields.isEmpty() ? resolvePrimitiveField(isKey, type) : resolveAndValidatePrimitiveField(isKey, fieldsByPath, type); } else { return userFields.isEmpty() ? resolveObjectFields(isKey, typeClass) : resolveAndValidateObjectFields(isKey, fieldsByPath, typeClass); } }
@Test @Parameters({ "true, __key", "false, this" }) public void when_typeMismatchBetweenObjectDeclaredAndSchemaField_then_throws(boolean key, String prefix) { Map<String, String> options = Map.of( (key ? OPTION_KEY_FORMAT : OPTION_VALUE_FORMAT), JAVA_FORMAT, (key ? OPTION_KEY_CLASS : OPTION_VALUE_CLASS), Type.class.getName() ); assertThatThrownBy(() -> INSTANCE.resolveAndValidateFields( key, singletonList(field("field", QueryDataType.VARCHAR, prefix + ".field")), options, null )).isInstanceOf(QueryException.class) .hasMessageContaining("Mismatch between declared and resolved type for field 'field'"); }
public static void mergeOutputDataParams( Map<String, Parameter> allParams, Map<String, Parameter> params) { params.forEach( (name, param) -> { if (!allParams.containsKey(name)) { throw new MaestroValidationException( "Invalid output parameter [%s], not defined in params", name); } MergeContext context = MergeContext.stepCreate(ParamSource.OUTPUT_PARAMETER); if (param.getType() == ParamType.MAP && param.isLiteral()) { ParamDefinition baseDef = allParams.get(name).toDefinition(); Map<String, ParamDefinition> baseMap = baseDef.asMapParamDef().getValue(); ParamDefinition toMergeDef = param.toDefinition(); Map<String, ParamDefinition> toMergeMap = toMergeDef.asMapParamDef().getValue(); mergeParams(baseMap, toMergeMap, context); Parameter mergedParam = buildMergedParamDefinition(name, toMergeDef, baseDef, context, baseMap) .toParameter(); populateEvaluatedResultAndTime(mergedParam, param.getEvaluatedTime()); allParams.put(name, mergedParam); } else if (param.getType() == ParamType.STRING_MAP && param.isLiteral()) { ParamDefinition baseDef = allParams.get(name).toDefinition(); Map<String, String> baseMap = baseDef.asStringMapParamDef().getValue(); ParamDefinition toMergeDef = param.toDefinition(); Map<String, String> toMergeMap = toMergeDef.asStringMapParamDef().getValue(); baseMap.putAll(toMergeMap); Parameter mergedParam = buildMergedParamDefinition(name, toMergeDef, baseDef, context, baseMap) .toParameter(); populateEvaluatedResultAndTime(mergedParam, param.getEvaluatedTime()); allParams.put(name, mergedParam); } else { ParamDefinition paramDefinition = ParamsMergeHelper.buildMergedParamDefinition( name, param.toDefinition(), allParams.get(name).toDefinition(), MergeContext.stepCreate(ParamSource.OUTPUT_PARAMETER), param.getValue()); Parameter parameter = paramDefinition.toParameter(); parameter.setEvaluatedResult(param.getEvaluatedResult()); parameter.setEvaluatedTime(param.getEvaluatedTime()); allParams.put(name, parameter); } }); }
@Test public void testMergeOutputDataParamsInvalidException() { Map<String, Parameter> allParams = new LinkedHashMap<>(); Map<String, Parameter> paramsToMerge = new LinkedHashMap<>(); paramsToMerge.put("key", StringParameter.builder().value("test").build()); AssertHelper.assertThrows( "throws exception when output params to merge are not present in all params", MaestroValidationException.class, "Invalid output parameter [key], not defined in params", () -> ParamsMergeHelper.mergeOutputDataParams(allParams, paramsToMerge)); }
public static <T extends PipelineOptions> T validate(Class<T> klass, PipelineOptions options) { return validate(klass, options, false); }
@Test public void testWhenRequiredOptionIsSet() { Required required = PipelineOptionsFactory.as(Required.class); required.setRunner(CrashingRunner.class); required.setObject("blah"); PipelineOptionsValidator.validate(Required.class, required); }
@Override public DeterministicKeyChain toDecrypted(CharSequence password) { Objects.requireNonNull(password); checkArgument(password.length() > 0); KeyCrypter crypter = getKeyCrypter(); checkState(crypter != null, () -> "chain not encrypted"); AesKey derivedKey = crypter.deriveKey(password); return toDecrypted(derivedKey); }
@Test(expected = IllegalStateException.class) public void notEncrypted() { chain.toDecrypted("fail"); }
@Override public <T> void register(Class<T> remoteInterface, T object) { register(remoteInterface, object, 1); }
@Test public void testInvocationWithSerializationCodec() { RedissonClient server = Redisson.create(createConfig().setCodec(new SerializationCodec())); RedissonClient client = Redisson.create(createConfig().setCodec(new SerializationCodec())); try { server.getRemoteService().register(RemoteInterface.class, new RemoteImpl()); RemoteInterface service = client.getRemoteService().get(RemoteInterface.class); try { assertThat(service.resultMethod(21L)).isEqualTo(42L); } catch (Exception e) { Assertions.fail("Should be compatible with SerializationCodec"); } try { assertThat(service.doSomethingWithSerializablePojo(new SerializablePojo("test")).getStringField()).isEqualTo("test"); } catch (Exception e) { e.printStackTrace(); Assertions.fail("Should be compatible with SerializationCodec"); } try { assertThat(service.doSomethingWithPojo(new Pojo("test")).getStringField()).isEqualTo("test"); Assertions.fail("SerializationCodec should not be able to serialize a not serializable class"); } catch (Exception e) { e.printStackTrace(); assertThat(e.getCause()).isInstanceOf(NotSerializableException.class); assertThat(e.getCause().getMessage()).contains("Pojo"); } } finally { client.shutdown(); server.shutdown(); } }
@Override public Network network() { return network; }
@Test public void getAltNetworkUsingNetworks() { // An alternative network NetworkParameters altNetParams = new MockAltNetworkParams(); // Add new network params, this MODIFIES GLOBAL STATE in `Networks` Networks.register(altNetParams); try { // Check if can parse address Address altAddress = AddressParser.getLegacy().parseAddress("LLxSnHLN2CYyzB5eWTR9K9rS9uWtbTQFb6"); assertEquals(altNetParams.getId(), altAddress.network().id()); // Check if main network works as before Address mainAddress = AddressParser.getLegacy(MAINNET).parseAddress("17kzeh4N8g49GFvdDzSf8PjaPfyoD1MndL"); assertEquals(MAINNET.id(), mainAddress.network().id()); } finally { // Unregister network. Do this in a finally block so other tests don't fail if the try block fails to complete Networks.unregister(altNetParams); } try { AddressParser.getLegacy().parseAddress("LLxSnHLN2CYyzB5eWTR9K9rS9uWtbTQFb6"); fail(); } catch (AddressFormatException e) { } }
@Override public @Nullable String getFilename() { if (!isDirectory()) { return key.substring(key.lastIndexOf('/') + 1); } if ("/".equals(key)) { return null; } String keyWithoutTrailingSlash = key.substring(0, key.length() - 1); return keyWithoutTrailingSlash.substring(keyWithoutTrailingSlash.lastIndexOf('/') + 1); }
@Test public void testGetFilename() { assertNull(S3ResourceId.fromUri("s3://my_bucket/").getFilename()); assertEquals("abc", S3ResourceId.fromUri("s3://my_bucket/abc").getFilename()); assertEquals("abc", S3ResourceId.fromUri("s3://my_bucket/abc/").getFilename()); assertEquals("def", S3ResourceId.fromUri("s3://my_bucket/abc/def").getFilename()); assertEquals("def", S3ResourceId.fromUri("s3://my_bucket/abc/def/").getFilename()); assertEquals("xyz.txt", S3ResourceId.fromUri("s3://my_bucket/abc/xyz.txt").getFilename()); }
@Override public FileEntity upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final ThreadPool pool = ThreadPoolFactory.get("multipart", concurrency); try { // Full size of file final long size = status.getLength() + status.getOffset(); final List<Future<TransferStatus>> parts = new ArrayList<>(); long offset = 0; long remaining = status.getLength(); String ref = null; for(int partNumber = 1; remaining > 0; partNumber++) { final FileUploadPartEntity uploadPartEntity = this.continueUpload(file, ref, partNumber); final long length; if(uploadPartEntity.isParallelParts()) { length = Math.min(Math.max(size / (MAXIMUM_UPLOAD_PARTS - 1), partsize), remaining); } else { length = remaining; } parts.add(this.submit(pool, file, local, throttle, listener, status, uploadPartEntity.getUploadUri(), partNumber, offset, length, callback)); remaining -= length; offset += length; ref = uploadPartEntity.getRef(); } final List<TransferStatus> checksums = Interruptibles.awaitAll(parts); final FileEntity entity = this.completeUpload(file, ref, status, checksums); // Mark parent status as complete status.withResponse(new BrickAttributesFinderFeature(session).toAttributes(entity)).setComplete(); return entity; } finally { // Cancel future tasks pool.shutdown(false); } }
@Test public void testUploadSinglePart() throws Exception { final BrickUploadFeature feature = new BrickUploadFeature(session, new BrickWriteFeature(session), 5 * 1024L * 1024L, 2); final Path root = new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume)); final String name = new AlphanumericRandomStringService().random(); final Path test = new Path(root, name, EnumSet.of(Path.Type.file)); final Local local = new Local(System.getProperty("java.io.tmpdir"), name); final int length = 2 * 1024 * 1024; final byte[] content = RandomUtils.nextBytes(length); IOUtils.write(content, local.getOutputStream(false)); final TransferStatus status = new TransferStatus(); status.setLength(content.length); status.setMime("text/plain"); final BytecountStreamListener count = new BytecountStreamListener(); feature.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), count, status, new DisabledLoginCallback()); assertEquals(content.length, count.getSent()); assertTrue(status.isComplete()); assertNotSame(PathAttributes.EMPTY, status.getResponse()); assertTrue(new BrickFindFeature(session).find(test)); final PathAttributes attributes = new BrickAttributesFinderFeature(session).find(test); assertEquals(content.length, attributes.getSize()); final byte[] compare = new byte[length]; IOUtils.readFully(new BrickReadFeature(session).read(test, new TransferStatus().withLength(length), new DisabledConnectionCallback()), compare); assertArrayEquals(content, compare); new BrickDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); local.delete(); }
@CheckForNull public static Number tryParseNumber(@CheckForNull String numberStr, @CheckForNull Number defaultNumber) { if (numberStr == null || numberStr.isEmpty()) { return defaultNumber; } try { return NumberFormat.getNumberInstance().parse(numberStr); } catch (ParseException e) { return defaultNumber; } }
@Test public void testTryParseNumber() { assertEquals("Successful parse did not return the parsed value", 20, Util.tryParseNumber("20", 10).intValue()); assertEquals("Failed parse did not return the default value", 10, Util.tryParseNumber("ss", 10).intValue()); assertEquals("Parsing empty string did not return the default value", 10, Util.tryParseNumber("", 10).intValue()); assertEquals("Parsing null string did not return the default value", 10, Util.tryParseNumber(null, 10).intValue()); }
public List<String> getAll() { return new ArrayList<>(options); }
@Test public void constructor_without_arguments_creates_empty_JvmOptions() { JvmOptions<JvmOptions> testJvmOptions = new JvmOptions<>(); assertThat(testJvmOptions.getAll()).isEmpty(); }
@Override public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { return new DriverPropertyInfo[0]; }
@Test public void testGetPropertyInfo() throws SQLException { assertNotNull("getPropertyInfo", driver.getPropertyInfo(null, null)); }
public static void initSSL(Properties consumerProps) { // Check if one-way SSL is enabled. In this scenario, the client validates the server certificate. String trustStoreLocation = consumerProps.getProperty(SSL_TRUSTSTORE_LOCATION); String trustStorePassword = consumerProps.getProperty(SSL_TRUSTSTORE_PASSWORD); String serverCertificate = consumerProps.getProperty(STREAM_KAFKA_SSL_SERVER_CERTIFICATE); if (StringUtils.isAnyEmpty(trustStoreLocation, trustStorePassword, serverCertificate)) { LOGGER.info("Skipping auto SSL server validation since it's not configured."); return; } if (shouldRenewTrustStore(consumerProps)) { initTrustStore(consumerProps); } // Set the security protocol String securityProtocol = consumerProps.getProperty(SECURITY_PROTOCOL, DEFAULT_SECURITY_PROTOCOL); consumerProps.setProperty(SECURITY_PROTOCOL, securityProtocol); // Check if two-way SSL is enabled. In this scenario, the client validates the server's certificate and the server // validates the client's certificate. String keyStoreLocation = consumerProps.getProperty(SSL_KEYSTORE_LOCATION); String keyStorePassword = consumerProps.getProperty(SSL_KEYSTORE_PASSWORD); String keyPassword = consumerProps.getProperty(SSL_KEY_PASSWORD); String clientCertificate = consumerProps.getProperty(STREAM_KAFKA_SSL_CLIENT_CERTIFICATE); if (StringUtils.isAnyEmpty(keyStoreLocation, keyStorePassword, keyPassword, clientCertificate)) { LOGGER.info("Skipping auto SSL client validation since it's not configured."); return; } if (shouldRenewKeyStore(consumerProps)) { initKeyStore(consumerProps); } }
@Test public void testInitSSLBackwardsCompatibilityCheck() throws CertificateException, NoSuchAlgorithmException, OperatorCreationException, NoSuchProviderException, IOException, KeyStoreException { Properties consumerProps = new Properties(); setTrustStoreProps(consumerProps); setKeyStoreProps(consumerProps); KafkaSSLUtils.initSSL(consumerProps); // validate validateTrustStoreCertificateCount(1); validateKeyStoreCertificateCount(1); setTrustStoreProps(consumerProps); // new server certificate is generated consumerProps.remove("stream.kafka.ssl.server.certificate"); setKeyStoreProps(consumerProps); // new client certificate is generated consumerProps.remove("stream.kafka.ssl.client.certificate"); // Attempt to initialize the trust store and key store again without passing the required certificates KafkaSSLUtils.initSSL(consumerProps); // validate again that the existing certificates are untouched. validateTrustStoreCertificateCount(1); validateKeyStoreCertificateCount(1); }
public void setProperty(String name, String value) { if (value == null) { return; } Method setter = aggregationAssessor.findSetterMethod(name); if (setter == null) { addWarn("No setter for property [" + name + "] in " + objClass.getName() + "."); } else { try { setProperty(setter, value); } catch (PropertySetterException ex) { addWarn("Failed to set property [" + name + "] to value \"" + value + "\". ", ex); } } }
@Test public void testSetProperty() { { House house = new House(); PropertySetter setter = new PropertySetter(new BeanDescriptionCache(context), house); setter.setProperty("count", "10"); setter.setProperty("temperature", "33.1"); setter.setProperty("name", "jack"); setter.setProperty("open", "true"); assertEquals(10, house.getCount()); assertEquals(33.1d, (double) house.getTemperature(), 0.01); assertEquals("jack", house.getName()); assertTrue(house.isOpen()); } { House house = new House(); PropertySetter setter = new PropertySetter(new BeanDescriptionCache(context), house); setter.setProperty("Count", "10"); setter.setProperty("Name", "jack"); setter.setProperty("Open", "true"); assertEquals(10, house.getCount()); assertEquals("jack", house.getName()); assertTrue(house.isOpen()); } }
public void onOK() { DatabaseMeta database = new DatabaseMeta(); this.getInfo( database ); boolean passed = checkPoolingParameters(); if ( !passed ) { return; } String[] remarks = database.checkParameters(); String message = ""; if ( remarks.length != 0 ) { for ( int i = 0; i < remarks.length; i++ ) { message = message.concat( "* " ).concat( remarks[ i ] ).concat( System.getProperty( "line.separator" ) ); } showMessage( message, false ); } else { if ( databaseMeta == null ) { databaseMeta = new DatabaseMeta(); } this.getInfo( databaseMeta ); databaseMeta.setChanged(); close(); } }
@Test public void testOnOK() throws Exception { }
@Nonnull @Override public CpcSketch getResult() { return unionAll(); }
@Test public void testThresholdBehavior() { CpcSketch sketch1 = new CpcSketch(_lgNominalEntries); IntStream.range(0, 1000).forEach(sketch1::update); CpcSketch sketch2 = new CpcSketch(_lgNominalEntries); IntStream.range(1000, 2000).forEach(sketch2::update); CpcSketchAccumulator accumulator = new CpcSketchAccumulator(_lgNominalEntries, 3); accumulator.apply(sketch1); accumulator.apply(sketch2); Assert.assertEquals(accumulator.getResult().getEstimate(), sketch1.getEstimate() + sketch2.getEstimate(), _epsilon); }
public static void notBlack(final String str, final String message) { isTrue(StringUtils.isNoneBlank(str), message); }
@Test public void notBlack() { Assertions.assertDoesNotThrow(() -> Assert.notBlack("notBlack", "error message")); Assertions.assertThrows(ValidFailException.class, () -> Assert.notBlack("", "error message")); }
@Override public NonTokenizer clone() { try { NonTokenizer copy = (NonTokenizer) super.clone(); copy.done = false; copy.cs = null; return copy; } catch (CloneNotSupportedException e) { throw new Error("Assertion error, NonTokenizer is Cloneable."); } }
@Test public void testClone() { Tokenizer tokenizer = new NonTokenizer(); testClones(tokenizer, "1.0n", "1.0n"); testClones(tokenizer, "Hello there!", "Hello there!"); }
Set<String> findConsumerGroups() throws InterruptedException, ExecutionException { List<String> filteredGroups = listConsumerGroups().stream() .map(ConsumerGroupListing::groupId) .filter(this::shouldReplicateByGroupFilter) .collect(Collectors.toList()); Set<String> checkpointGroups = new HashSet<>(); Set<String> irrelevantGroups = new HashSet<>(); for (String group : filteredGroups) { Set<String> consumedTopics = listConsumerGroupOffsets(group).keySet().stream() .map(TopicPartition::topic) .filter(this::shouldReplicateByTopicFilter) .collect(Collectors.toSet()); // Only perform checkpoints for groups that have offsets for at least one topic that's accepted // by the topic filter. if (consumedTopics.isEmpty()) { irrelevantGroups.add(group); } else { checkpointGroups.add(group); } } log.debug("Ignoring the following groups which do not have any offsets for topics that are accepted by " + "the topic filter: {}", irrelevantGroups); return checkpointGroups; }
@Test public void testFindConsumerGroups() throws Exception { MirrorCheckpointConfig config = new MirrorCheckpointConfig(makeProps()); MirrorCheckpointConnector connector = new MirrorCheckpointConnector(Collections.emptySet(), config); connector = spy(connector); Collection<ConsumerGroupListing> groups = Arrays.asList( new ConsumerGroupListing("g1", true), new ConsumerGroupListing("g2", false)); Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); offsets.put(new TopicPartition("t1", 0), new OffsetAndMetadata(0)); doReturn(groups).when(connector).listConsumerGroups(); doReturn(true).when(connector).shouldReplicateByTopicFilter(anyString()); doReturn(true).when(connector).shouldReplicateByGroupFilter(anyString()); doReturn(offsets).when(connector).listConsumerGroupOffsets(anyString()); Set<String> groupFound = connector.findConsumerGroups(); Set<String> expectedGroups = groups.stream().map(ConsumerGroupListing::groupId).collect(Collectors.toSet()); assertEquals(expectedGroups, groupFound, "Expected groups are not the same as findConsumerGroups"); doReturn(false).when(connector).shouldReplicateByTopicFilter(anyString()); Set<String> topicFilterGroupFound = connector.findConsumerGroups(); assertEquals(Collections.emptySet(), topicFilterGroupFound); }
@Override public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException { Map<String, Object> attachments = invocation.getObjectAttachments(); if (attachments != null) { Map<String, Object> newAttach = new HashMap<>(attachments.size()); for (Map.Entry<String, Object> entry : attachments.entrySet()) { String key = entry.getKey(); if (!UNLOADING_KEYS.contains(key)) { newAttach.put(key, entry.getValue()); } } attachments = newAttach; } RpcContext.getServiceContext().setInvoker(invoker).setInvocation(invocation); RpcContext context = RpcContext.getServerAttachment(); // .setAttachments(attachments) // merged from dubbox if (context.getLocalAddress() == null) { context.setLocalAddress(invoker.getUrl().getHost(), invoker.getUrl().getPort()); } String remoteApplication = invocation.getAttachment(REMOTE_APPLICATION_KEY); if (StringUtils.isNotEmpty(remoteApplication)) { RpcContext.getServiceContext().setRemoteApplicationName(remoteApplication); } else { RpcContext.getServiceContext().setRemoteApplicationName(context.getAttachment(REMOTE_APPLICATION_KEY)); } long timeout = RpcUtils.getTimeout(invocation, -1); if (timeout != -1) { // pass to next hop RpcContext.getServerAttachment() .setObjectAttachment( TIME_COUNTDOWN_KEY, TimeoutCountDown.newCountDown(timeout, TimeUnit.MILLISECONDS)); } // merged from dubbox // we may already add some attachments into RpcContext before this filter (e.g. in rest protocol) if (CollectionUtils.isNotEmptyMap(attachments)) { if (context.getObjectAttachments().size() > 0) { context.getObjectAttachments().putAll(attachments); } else { context.setObjectAttachments(attachments); } } if (invocation instanceof RpcInvocation) { RpcInvocation rpcInvocation = (RpcInvocation) invocation; rpcInvocation.setInvoker(invoker); } try { context.clearAfterEachInvoke(false); return invoker.invoke(invocation); } finally { context.clearAfterEachInvoke(true); if (context.isAsyncStarted()) { removeContext(); } } }
@SuppressWarnings("unchecked") @Test void testSetContext() { invocation = mock(Invocation.class); given(invocation.getMethodName()).willReturn("$enumlength"); given(invocation.getParameterTypes()).willReturn(new Class<?>[] {Enum.class}); given(invocation.getArguments()).willReturn(new Object[] {"hello"}); given(invocation.getObjectAttachments()).willReturn(null); invoker = mock(Invoker.class); given(invoker.isAvailable()).willReturn(true); given(invoker.getInterface()).willReturn(DemoService.class); AppResponse result = new AppResponse(); result.setValue("High"); given(invoker.invoke(invocation)).willReturn(result); URL url = URL.valueOf("test://test:11/test?group=dubbo&version=1.1"); given(invoker.getUrl()).willReturn(url); contextFilter.invoke(invoker, invocation); assertNotNull(RpcContext.getServiceContext().getInvoker()); }
public static void validate(WindowConfig windowConfig) { if (windowConfig.getWindowLengthDurationMs() == null && windowConfig.getWindowLengthCount() == null) { throw new IllegalArgumentException("Window length is not specified"); } if (windowConfig.getWindowLengthDurationMs() != null && windowConfig.getWindowLengthCount() != null) { throw new IllegalArgumentException( "Window length for time and count are set! Please set one or the other."); } if (windowConfig.getWindowLengthCount() != null) { if (windowConfig.getWindowLengthCount() <= 0) { throw new IllegalArgumentException( "Window length must be positive [" + windowConfig.getWindowLengthCount() + "]"); } } if (windowConfig.getWindowLengthDurationMs() != null) { if (windowConfig.getWindowLengthDurationMs() <= 0) { throw new IllegalArgumentException( "Window length must be positive [" + windowConfig.getWindowLengthDurationMs() + "]"); } } if (windowConfig.getSlidingIntervalCount() != null) { if (windowConfig.getSlidingIntervalCount() <= 0) { throw new IllegalArgumentException( "Sliding interval must be positive [" + windowConfig.getSlidingIntervalCount() + "]"); } } if (windowConfig.getSlidingIntervalDurationMs() != null) { if (windowConfig.getSlidingIntervalDurationMs() <= 0) { throw new IllegalArgumentException( "Sliding interval must be positive [" + windowConfig.getSlidingIntervalDurationMs() + "]"); } } if (windowConfig.getTimestampExtractorClassName() != null) { if (windowConfig.getMaxLagMs() != null) { if (windowConfig.getMaxLagMs() < 0) { throw new IllegalArgumentException( "Lag duration must be positive [" + windowConfig.getMaxLagMs() + "]"); } } if (windowConfig.getWatermarkEmitIntervalMs() != null) { if (windowConfig.getWatermarkEmitIntervalMs() <= 0) { throw new IllegalArgumentException( "Watermark interval must be positive [" + windowConfig.getWatermarkEmitIntervalMs() + "]"); } } } }
@Test public void testSettingLagTime() throws Exception { final Object[] args = new Object[]{-1L, 0L, 1L, 2L, 5L, 10L, null}; for (Object arg : args) { Object arg0 = arg; try { Long maxLagMs = null; if (arg0 != null) { maxLagMs = (Long) arg0; } WindowConfig windowConfig = new WindowConfig(); windowConfig.setWindowLengthCount(1); windowConfig.setSlidingIntervalCount(1); windowConfig.setMaxLagMs(maxLagMs); windowConfig.setTimestampExtractorClassName("SomeClass"); WindowConfigUtils.validate(windowConfig); if(arg0 != null && (Long) arg0 < 0) { fail(String.format("Window lag cannot be less than zero -- lagTime: %s", arg0)); } } catch (IllegalArgumentException e) { if (arg0 != null && (Long) arg0 > 0) { fail(String.format("Exception: %s thrown on valid input -- lagTime: %s", e.getMessage(), arg0)); } } } }
@VisibleForTesting void validateParentMenu(Long parentId, Long childId) { if (parentId == null || ID_ROOT.equals(parentId)) { return; } // 不能设置自己为父菜单 if (parentId.equals(childId)) { throw exception(MENU_PARENT_ERROR); } MenuDO menu = menuMapper.selectById(parentId); // 父菜单不存在 if (menu == null) { throw exception(MENU_PARENT_NOT_EXISTS); } // 父菜单必须是目录或者菜单类型 if (!MenuTypeEnum.DIR.getType().equals(menu.getType()) && !MenuTypeEnum.MENU.getType().equals(menu.getType())) { throw exception(MENU_PARENT_NOT_DIR_OR_MENU); } }
@Test public void testValidateParentMenu_success() { // mock 数据 MenuDO menuDO = buildMenuDO(MenuTypeEnum.MENU, "parent", 0L); menuMapper.insert(menuDO); // 准备参数 Long parentId = menuDO.getId(); // 调用,无需断言 menuService.validateParentMenu(parentId, null); }
@Override public Mono<GetDevicesResponse> getDevices(final GetDevicesRequest request) { final AuthenticatedDevice authenticatedDevice = AuthenticationUtil.requireAuthenticatedDevice(); return Mono.fromFuture(() -> accountsManager.getByAccountIdentifierAsync(authenticatedDevice.accountIdentifier())) .map(maybeAccount -> maybeAccount.orElseThrow(Status.UNAUTHENTICATED::asRuntimeException)) .flatMapMany(account -> Flux.fromIterable(account.getDevices())) .reduce(GetDevicesResponse.newBuilder(), (builder, device) -> { final GetDevicesResponse.LinkedDevice.Builder linkedDeviceBuilder = GetDevicesResponse.LinkedDevice.newBuilder(); if (device.getName() != null) { linkedDeviceBuilder.setName(ByteString.copyFrom(device.getName())); } return builder.addDevices(linkedDeviceBuilder .setId(device.getId()) .setCreated(device.getCreated()) .setLastSeen(device.getLastSeen()) .build()); }) .map(GetDevicesResponse.Builder::build); }
@Test void getDevices() { final Instant primaryDeviceCreated = Instant.now().minus(Duration.ofDays(7)).truncatedTo(ChronoUnit.MILLIS); final Instant primaryDeviceLastSeen = primaryDeviceCreated.plus(Duration.ofHours(6)); final Instant linkedDeviceCreated = Instant.now().minus(Duration.ofDays(1)).truncatedTo(ChronoUnit.MILLIS); final Instant linkedDeviceLastSeen = linkedDeviceCreated.plus(Duration.ofHours(7)); final Device primaryDevice = mock(Device.class); when(primaryDevice.getId()).thenReturn(Device.PRIMARY_ID); when(primaryDevice.getCreated()).thenReturn(primaryDeviceCreated.toEpochMilli()); when(primaryDevice.getLastSeen()).thenReturn(primaryDeviceLastSeen.toEpochMilli()); final String linkedDeviceName = "A linked device"; final Device linkedDevice = mock(Device.class); when(linkedDevice.getId()).thenReturn((byte) (Device.PRIMARY_ID + 1)); when(linkedDevice.getCreated()).thenReturn(linkedDeviceCreated.toEpochMilli()); when(linkedDevice.getLastSeen()).thenReturn(linkedDeviceLastSeen.toEpochMilli()); when(linkedDevice.getName()).thenReturn(linkedDeviceName.getBytes(StandardCharsets.UTF_8)); when(authenticatedAccount.getDevices()).thenReturn(List.of(primaryDevice, linkedDevice)); final GetDevicesResponse expectedResponse = GetDevicesResponse.newBuilder() .addDevices(GetDevicesResponse.LinkedDevice.newBuilder() .setId(Device.PRIMARY_ID) .setCreated(primaryDeviceCreated.toEpochMilli()) .setLastSeen(primaryDeviceLastSeen.toEpochMilli()) .build()) .addDevices(GetDevicesResponse.LinkedDevice.newBuilder() .setId(Device.PRIMARY_ID + 1) .setCreated(linkedDeviceCreated.toEpochMilli()) .setLastSeen(linkedDeviceLastSeen.toEpochMilli()) .setName(ByteString.copyFrom(linkedDeviceName.getBytes(StandardCharsets.UTF_8))) .build()) .build(); assertEquals(expectedResponse, authenticatedServiceStub().getDevices(GetDevicesRequest.newBuilder().build())); }
@VisibleForTesting static <K, V> Cache<K, V> forMaximumBytes(long maximumBytes) { // We specifically use Guava cache since it allows for recursive computeIfAbsent calls // preventing deadlock from occurring when a loading function mutates the underlying cache LongAdder weightInBytes = new LongAdder(); return new SubCache<>( new ShrinkOnEviction( CacheBuilder.newBuilder() .maximumWeight(maximumBytes >> WEIGHT_RATIO) .weigher( new Weigher<CompositeKey, WeightedValue<Object>>() { @Override public int weigh(CompositeKey key, WeightedValue<Object> value) { // Round up to the next closest multiple of WEIGHT_RATIO long size = ((key.getWeight() + value.getWeight() - 1) >> WEIGHT_RATIO) + 1; if (size > Integer.MAX_VALUE) { LOG.warn( "Entry with size {} MiBs inserted into the cache. This is larger than the maximum individual entry size of {} MiBs. The cache will under report its memory usage by the difference. This may lead to OutOfMemoryErrors.", ((size - 1) >> 20) + 1, 2 << (WEIGHT_RATIO + 10)); return Integer.MAX_VALUE; } return (int) size; } }) // The maximum size of an entry in the cache is maxWeight / concurrencyLevel // which is why we set the concurrency level to 1. See // https://github.com/google/guava/issues/3462 for further details. // // The PrecombineGroupingTable showed contention here since it was working in // a tight loop. We were able to resolve the contention by reducing the // frequency of updates. Reconsider this value if we could solve the maximum // entry size issue. Note that using Runtime.getRuntime().availableProcessors() // is subject to docker CPU shares issues // (https://bugs.openjdk.org/browse/JDK-8281181). // // We could revisit the caffeine cache library based upon reinvestigating // recursive computeIfAbsent calls since it doesn't have this limit. .concurrencyLevel(1) .recordStats(), weightInBytes) .getCache(), CompositeKeyPrefix.ROOT, maximumBytes, weightInBytes); }
@Test public void testShrinkableIsShrunk() throws Exception { WeightedValue<String> shrinkableKey = WeightedValue.of("shrinkable", MB); Shrinkable<Object> shrinkable = new Shrinkable<Object>() { @Override public Object shrink() { return WeightedValue.of("wasShrunk", 1); } }; Cache<Object, Object> cache = Caches.forMaximumBytes(2 * MB); cache.put(shrinkableKey, WeightedValue.of(shrinkable, MB)); // Check that we didn't evict it yet assertSame(shrinkable, cache.peek(shrinkableKey)); // The next insertion should cause the value to be "shrunk" cache.put(WeightedValue.of("other", 1), WeightedValue.of("value", 1)); assertEquals("wasShrunk", cache.peek(shrinkableKey)); }
public static PaginationInformation.Builder forPageIndex(int pageIndex) { return new PaginationInformation.Builder(pageIndex); }
@Test public void paginationInformation_whenPageIndexIsZero_shouldThrow() { assertThatIllegalArgumentException().isThrownBy(() -> PaginationInformation.forPageIndex(0).withPageSize(1).andTotal(1)) .withMessage("Page index must be strictly positive. Got 0"); }
public static SerializableFunction<Row, GenericRecord> getRowToGenericRecordFunction( org.apache.avro.@Nullable Schema avroSchema) { return new RowToGenericRecordFn(avroSchema); }
@Test public void testRowToGenericRecordFunction() { SerializableUtils.ensureSerializable(AvroUtils.getRowToGenericRecordFunction(NULL_SCHEMA)); SerializableUtils.ensureSerializable(AvroUtils.getRowToGenericRecordFunction(null)); }
public String getHelp() { return help; }
@Test public void testDefault() { assertEquals("HTTP URL", new DescriptiveUrl(URI.create("http://me")).getHelp()); }