focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public Bson createDbQuery(final List<String> filters, final String query) { try { final var searchQuery = searchQueryParser.parse(query); final var filterExpressionFilters = dbFilterParser.parse(filters, attributes); return buildDbQuery(searchQuery, filterExpressionFilters); } catch (IllegalArgumentException e) { throw new BadRequestException("Invalid argument in search query: " + e.getMessage()); } }
@Test void throwsBadRequestExceptionIfSearchQueryParserThrowsIllegalArgumentException() { doThrow(IllegalArgumentException.class).when(searchQueryParser).parse(eq("wrong #$%#$%$ query")); assertThrows(BadRequestException.class, () -> toTest.createDbQuery(List.of(), "wrong #$%#$%$ query")); }
@Operation(summary = "get", description = "Get a service") @GetMapping("/{id}") public ResponseEntity<ServiceVO> get(@PathVariable Long id) { return ResponseEntity.success(serviceService.get(id)); }
@Test void getReturnsNotFoundForInvalidId() { Long id = 999L; when(serviceService.get(id)).thenReturn(null); ResponseEntity<ServiceVO> response = serviceController.get(id); assertTrue(response.isSuccess()); assertNull(response.getData()); }
@Description("current time with time zone") @ScalarFunction @SqlType(StandardTypes.TIME_WITH_TIME_ZONE) public static long currentTime(SqlFunctionProperties properties) { // We do all calculation in UTC, as session.getStartTime() is in UTC // and we need to have UTC millis for packDateTimeWithZone long millis = UTC_CHRONOLOGY.millisOfDay().get(properties.getSessionStartTime()); if (!properties.isLegacyTimestamp()) { // However, those UTC millis are pointing to the correct UTC timestamp // Our TIME WITH TIME ZONE representation does use UTC 1970-01-01 representation // So we have to hack here in order to get valid representation // of TIME WITH TIME ZONE millis -= valueToSessionTimeZoneOffsetDiff(properties.getSessionStartTime(), getDateTimeZone(properties.getTimeZoneKey())); } try { return packDateTimeWithZone(millis, properties.getTimeZoneKey()); } catch (NotSupportedException | TimeZoneNotSupportedException e) { throw new PrestoException(NOT_SUPPORTED, e.getMessage(), e); } catch (IllegalArgumentException e) { throw new PrestoException(INVALID_FUNCTION_ARGUMENT, e.getMessage(), e); } }
@Test public void testCurrentTime() { Session localSession = Session.builder(session) // we use Asia/Kathmandu here to test the difference in semantic change of current_time // between legacy and non-legacy timestamp .setTimeZoneKey(KATHMANDU_ZONE_KEY) .setStartTime(new DateTime(2017, 3, 1, 15, 45, 0, 0, KATHMANDU_ZONE).getMillis()) .build(); try (FunctionAssertions localAssertion = new FunctionAssertions(localSession)) { localAssertion.assertFunctionString("CURRENT_TIME", TIME_WITH_TIME_ZONE, "15:45:00.000 Asia/Kathmandu"); } }
public ConvertedTime getConvertedTime(long duration) { Set<Seconds> keys = RULES.keySet(); for (Seconds seconds : keys) { if (duration <= seconds.getSeconds()) { return RULES.get(seconds).getConvertedTime(duration); } } return new TimeConverter.OverTwoYears().getConvertedTime(duration); }
@Test public void testShouldReportOneMinuteFor89Seconds() { assertEquals(TimeConverter.ABOUT_1_MINUTE_AGO, timeConverter.getConvertedTime(89)); }
public static String stripTrailingSlash(String path) { Preconditions.checkArgument(!Strings.isNullOrEmpty(path), "path must not be null or empty"); String result = path; while (!result.endsWith("://") && result.endsWith("/")) { result = result.substring(0, result.length() - 1); } return result; }
@Test public void testStripTrailingSlash() { String pathWithoutTrailingSlash = "s3://bucket/db/tbl"; assertThat(LocationUtil.stripTrailingSlash(pathWithoutTrailingSlash)) .as("Should have no trailing slashes") .isEqualTo(pathWithoutTrailingSlash); String pathWithSingleTrailingSlash = pathWithoutTrailingSlash + "/"; assertThat(LocationUtil.stripTrailingSlash(pathWithSingleTrailingSlash)) .as("Should have no trailing slashes") .isEqualTo(pathWithoutTrailingSlash); String pathWithMultipleTrailingSlash = pathWithoutTrailingSlash + "////"; assertThat(LocationUtil.stripTrailingSlash(pathWithMultipleTrailingSlash)) .as("Should have no trailing slashes") .isEqualTo(pathWithoutTrailingSlash); String pathWithOnlySlash = "////"; assertThat(LocationUtil.stripTrailingSlash(pathWithOnlySlash)) .as("Should have no trailing slashes") .isEmpty(); }
@Override public BundleContext bundleContext() { if (restrictedBundleContext == null) { throw newException(); } return restrictedBundleContext; }
@Test void require_that_bundleContext_throws_exception() throws BundleException { assertThrows(RuntimeException.class, () -> { new DisableOsgiFramework().bundleContext(); }); }
@Override public boolean isDone() { if (delegate == null) { return isDone; } return delegate.isDone(); }
@Test public void isDone() { final Future<HttpResponse> delegate = Mockito.mock(Future.class); FutureDecorator decorator = new FutureDecorator(null); ReflectUtils.setFieldValue(decorator, "delegate", delegate); decorator.isDone(); Mockito.verify(delegate, Mockito.times(1)).isDone(); }
public static InsertRetryPolicy neverRetry() { return new InsertRetryPolicy() { @Override public boolean shouldRetry(Context context) { return false; } }; }
@Test public void testNeverRetry() { assertFalse( InsertRetryPolicy.neverRetry() .shouldRetry(new Context(new TableDataInsertAllResponse.InsertErrors()))); }
long snapshotsRetrieved() { return snapshotsRetrieved.get(); }
@Test public void metrics_are_refreshed_on_every_update() { assertEquals(0, nodeMetricsClient.snapshotsRetrieved()); updateSnapshot(defaultMetricsConsumerId, TTL); assertEquals(1, nodeMetricsClient.snapshotsRetrieved()); updateSnapshot(defaultMetricsConsumerId, Duration.ZERO); assertEquals(2, nodeMetricsClient.snapshotsRetrieved()); }
public static FieldScope allowingFieldDescriptors( FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest) { return FieldScopeImpl.createAllowingFieldDescriptors(asList(firstFieldDescriptor, rest)); }
@Test public void testIgnoringTopLevelField_fieldScopes_allowingFieldDescriptors() { expectThat(ignoringFieldDiffMessage) .withPartialScope(FieldScopes.allowingFieldDescriptors(goodFieldDescriptor)) .isEqualTo(ignoringFieldMessage); expectThat(ignoringFieldDiffMessage) .ignoringFieldScope(FieldScopes.allowingFieldDescriptors(goodFieldDescriptor)) .isNotEqualTo(ignoringFieldMessage); expectThat(ignoringFieldDiffMessage) .withPartialScope(FieldScopes.allowingFieldDescriptors(badFieldDescriptor)) .isNotEqualTo(ignoringFieldMessage); expectThat(ignoringFieldDiffMessage) .ignoringFieldScope(FieldScopes.allowingFieldDescriptors(badFieldDescriptor)) .isEqualTo(ignoringFieldMessage); }
public static byte[] compress(String urlString) throws MalformedURLException { byte[] compressedBytes = null; if (urlString != null) { // Figure the compressed bytes can't be longer than the original string. byte[] byteBuffer = new byte[urlString.length()]; int byteBufferIndex = 0; Arrays.fill(byteBuffer, (byte) 0x00); Pattern urlPattern = Pattern.compile(EDDYSTONE_URL_REGEX); Matcher urlMatcher = urlPattern.matcher(urlString); if (urlMatcher.matches()) { // www. String wwwdot = urlMatcher.group(EDDYSTONE_URL_WWW_GROUP); boolean haswww = (wwwdot != null); // Protocol. String rawProtocol = urlMatcher.group(EDDYSTONE_URL_PROTOCOL_GROUP); String protocol = rawProtocol.toLowerCase(); if (protocol.equalsIgnoreCase(URL_PROTOCOL_HTTP)) { byteBuffer[byteBufferIndex] = (haswww ? EDDYSTONE_URL_PROTOCOL_HTTP_WWW : EDDYSTONE_URL_PROTOCOL_HTTP); } else { byteBuffer[byteBufferIndex] = (haswww ? EDDYSTONE_URL_PROTOCOL_HTTPS_WWW : EDDYSTONE_URL_PROTOCOL_HTTPS); } byteBufferIndex++; // Fully-qualified domain name (FQDN). This includes the hostname and any other components after the dots // but BEFORE the first single slash in the URL. byte[] hostnameBytes = urlMatcher.group(EDDYSTONE_URL_FQDN_GROUP).getBytes(); String rawHostname = new String(hostnameBytes); String hostname = rawHostname.toLowerCase(); String[] domains = hostname.split(Pattern.quote(".")); boolean consumedSlash = false; if (domains != null) { // Write the hostname/subdomains prior to the last one. If there's only one (e. g. http://localhost) // then that's the only thing to write out. byte[] periodBytes = {'.'}; int writableDomainsCount = (domains.length == 1 ? 1 : domains.length - 1); for (int domainIndex = 0; domainIndex < writableDomainsCount; domainIndex++) { // Write out leading period, if necessary. if (domainIndex > 0) { System.arraycopy(periodBytes, 0, byteBuffer, byteBufferIndex, periodBytes.length); byteBufferIndex += periodBytes.length; } byte[] domainBytes = domains[domainIndex].getBytes(); int domainLength = domainBytes.length; System.arraycopy(domainBytes, 0, byteBuffer, byteBufferIndex, domainLength); byteBufferIndex += domainLength; } // Is the TLD one that we can encode? if (domains.length > 1) { String tld = "." + domains[domains.length - 1]; String slash = urlMatcher.group(EDDYSTONE_URL_SLASH_GROUP); String encodableTLDCandidate = (slash == null ? tld : tld + slash); byte encodedTLDByte = encodedByteForTopLevelDomain(encodableTLDCandidate); if (encodedTLDByte != TLD_NOT_ENCODABLE) { byteBuffer[byteBufferIndex++] = encodedTLDByte; consumedSlash = (slash != null); } else { byte[] tldBytes = tld.getBytes(); int tldLength = tldBytes.length; System.arraycopy(tldBytes, 0, byteBuffer, byteBufferIndex, tldLength); byteBufferIndex += tldLength; } } } // Optional slash. if (! consumedSlash) { String slash = urlMatcher.group(EDDYSTONE_URL_SLASH_GROUP); if (slash != null) { int slashLength = slash.length(); System.arraycopy(slash.getBytes(), 0, byteBuffer, byteBufferIndex, slashLength); byteBufferIndex += slashLength; } } // Path. String path = urlMatcher.group(EDDYSTONE_URL_PATH_GROUP); if (path != null) { int pathLength = path.length(); System.arraycopy(path.getBytes(), 0, byteBuffer, byteBufferIndex, pathLength); byteBufferIndex += pathLength; } // Copy the result. compressedBytes = new byte[byteBufferIndex]; System.arraycopy(byteBuffer, 0, compressedBytes, 0, compressedBytes.length); } else { throw new MalformedURLException(); } } else { throw new MalformedURLException(); } return compressedBytes; }
@Test public void testCompressWithSubdomainsWithTrailingSlash() throws MalformedURLException { String testURL = "http://www.forums.google.com/"; byte[] expectedBytes = {0x00, 'f', 'o', 'r', 'u', 'm', 's', '.', 'g', 'o', 'o', 'g', 'l', 'e', 0x00}; assertTrue(Arrays.equals(expectedBytes, UrlBeaconUrlCompressor.compress(testURL))); }
@CanIgnoreReturnValue public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) { List<@Nullable Object> expected = (varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs); return containsExactlyElementsIn( expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable); }
@Test public void iterableContainsExactlyInOrderWithOneShotIterableWrongOrder() { Iterator<Object> iterator = asList((Object) 1, null, 3).iterator(); Iterable<Object> iterable = new Iterable<Object>() { @Override public Iterator<Object> iterator() { return iterator; } @Override public String toString() { return "BadIterable"; } }; expectFailureWhenTestingThat(iterable).containsExactly(1, 3, null).inOrder(); assertFailureKeys("contents match, but order was wrong", "expected", "but was"); assertFailureValue("expected", "[1, 3, null]"); }
public Item and(Item item) { Item result = and(getRoot(), item); setRoot(result); return result; }
@Test void addNotToNot() { NotItem not1 = new NotItem(); not1.addPositiveItem(new WordItem("p1")); not1.addNegativeItem(new WordItem("n1.1")); not1.addNegativeItem(new WordItem("n1.2")); NotItem not2 = new NotItem(); not2.addPositiveItem(new WordItem("p2")); not2.addNegativeItem(new WordItem("n2.1")); not2.addNegativeItem(new WordItem("n2.2")); QueryTree tree = new QueryTree(not1); tree.and(not2); assertEquals("+(AND p1 p2) -n1.1 -n1.2 -n2.1 -n2.2", tree.toString()); }
public static ColumnSegment bind(final ColumnSegment segment, final SegmentType parentSegmentType, final SQLStatementBinderContext binderContext, final Map<String, TableSegmentBinderContext> tableBinderContexts, final Map<String, TableSegmentBinderContext> outerTableBinderContexts) { if (EXCLUDE_BIND_COLUMNS.contains(segment.getIdentifier().getValue().toUpperCase())) { return segment; } ColumnSegment result = copy(segment); Collection<TableSegmentBinderContext> tableSegmentBinderContexts = getTableSegmentBinderContexts(segment, parentSegmentType, binderContext, tableBinderContexts, outerTableBinderContexts); Optional<ColumnSegment> inputColumnSegment = findInputColumnSegment(segment, parentSegmentType, tableSegmentBinderContexts, outerTableBinderContexts, binderContext); inputColumnSegment.ifPresent(optional -> result.setVariable(optional.isVariable())); result.setColumnBoundInfo(createColumnSegmentBoundInfo(segment, inputColumnSegment.orElse(null))); return result; }
@Test void assertBindFromOuterTable() { Map<String, TableSegmentBinderContext> outerTableBinderContexts = new LinkedHashMap<>(2, 1F); ColumnSegment boundOrderStatusColumn = new ColumnSegment(0, 0, new IdentifierValue("status")); boundOrderStatusColumn.setColumnBoundInfo(new ColumnSegmentBoundInfo(new IdentifierValue(DefaultDatabase.LOGIC_NAME), new IdentifierValue(DefaultDatabase.LOGIC_NAME), new IdentifierValue("t_order"), new IdentifierValue("status"))); outerTableBinderContexts.put("t_order", new SimpleTableSegmentBinderContext(Collections.singleton(new ColumnProjectionSegment(boundOrderStatusColumn)))); ColumnSegment boundOrderItemStatusColumn = new ColumnSegment(0, 0, new IdentifierValue("status")); boundOrderItemStatusColumn.setColumnBoundInfo(new ColumnSegmentBoundInfo(new IdentifierValue(DefaultDatabase.LOGIC_NAME), new IdentifierValue(DefaultDatabase.LOGIC_NAME), new IdentifierValue("t_order_item"), new IdentifierValue("status"))); outerTableBinderContexts.put("t_order_item", new SimpleTableSegmentBinderContext(Collections.singleton(new ColumnProjectionSegment(boundOrderItemStatusColumn)))); SQLStatementBinderContext binderContext = new SQLStatementBinderContext(mock(ShardingSphereMetaData.class), DefaultDatabase.LOGIC_NAME, TypedSPILoader.getService(DatabaseType.class, "FIXTURE"), Collections.emptySet()); ColumnSegment columnSegment = new ColumnSegment(0, 0, new IdentifierValue("status")); ColumnSegment actual = ColumnSegmentBinder.bind(columnSegment, SegmentType.PROJECTION, binderContext, Collections.emptyMap(), outerTableBinderContexts); assertNotNull(actual.getColumnBoundInfo()); assertNull(actual.getOtherUsingColumnBoundInfo()); assertThat(actual.getColumnBoundInfo().getOriginalDatabase().getValue(), is(DefaultDatabase.LOGIC_NAME)); assertThat(actual.getColumnBoundInfo().getOriginalSchema().getValue(), is(DefaultDatabase.LOGIC_NAME)); assertThat(actual.getColumnBoundInfo().getOriginalTable().getValue(), is("t_order_item")); assertThat(actual.getColumnBoundInfo().getOriginalColumn().getValue(), is("status")); }
@Override public void execute(final ChannelHandlerContext context, final Object message, final DatabaseProtocolFrontendEngine databaseProtocolFrontendEngine, final ConnectionSession connectionSession) { context.writeAndFlush(databaseProtocolFrontendEngine.getCommandExecuteEngine().getErrorPacket(new CircuitBreakException())); databaseProtocolFrontendEngine.getCommandExecuteEngine().getOtherPacket(connectionSession).ifPresent(context::writeAndFlush); }
@Test void assertExecute() { ChannelHandlerContext channelHandlerContext = mock(ChannelHandlerContext.class); DatabaseProtocolFrontendEngine engine = mock(DatabaseProtocolFrontendEngine.class, RETURNS_DEEP_STUBS); ConnectionSession connectionSession = mock(ConnectionSession.class); DatabasePacket errorPacket = mock(DatabasePacket.class); when(engine.getCommandExecuteEngine().getErrorPacket(any(CircuitBreakException.class))).thenReturn(errorPacket); DatabasePacket otherPacket = mock(DatabasePacket.class); when(engine.getCommandExecuteEngine().getOtherPacket(connectionSession)).thenReturn(Optional.of(otherPacket)); new CircuitBreakProxyState().execute(channelHandlerContext, null, engine, connectionSession); verify(channelHandlerContext).writeAndFlush(errorPacket); verify(channelHandlerContext).writeAndFlush(otherPacket); }
CompressionServletResponseWrapper(HttpServletResponse response, int compressionThreshold) { super(response); assert compressionThreshold >= 0; this.compressionThreshold = compressionThreshold; }
@Test public void testCompressionServletResponseWrapper() throws IOException { final CompressionServletResponseWrapper wrapper = new CompressionServletResponseWrapper( new HttpResponse(), 1024); wrapper.setStatus(HttpServletResponse.SC_NOT_FOUND); assertEquals("status", HttpServletResponse.SC_NOT_FOUND, wrapper.getCurrentStatus()); wrapper.sendError(HttpServletResponse.SC_BAD_GATEWAY); assertEquals("status", HttpServletResponse.SC_BAD_GATEWAY, wrapper.getCurrentStatus()); wrapper.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE, "message"); assertEquals("status", HttpServletResponse.SC_SERVICE_UNAVAILABLE, wrapper.getCurrentStatus()); assertNotNull("outputStream", wrapper.createOutputStream()); assertNotNull("writer", wrapper.getWriter()); wrapper.flushStream(); wrapper.flushBuffer(); wrapper.close(); wrapper.setContentLength(0); wrapper.finishResponse(); boolean ok = false; try { wrapper.getOutputStream(); } catch (final Exception e) { ok = true; } assertTrue("exception", ok); final CompressionServletResponseWrapper wrapper2 = new CompressionServletResponseWrapper( new HttpResponse(), 1024); assertNotNull("outputStream", wrapper2.getOutputStream()); wrapper2.flushBuffer(); wrapper2.close(); boolean ok2 = false; try { wrapper2.getWriter(); } catch (final Exception e) { ok2 = true; } assertTrue("exception", ok2); }
public RouteResult<T> route(HttpMethod method, String path) { return route(method, path, Collections.emptyMap()); }
@Test void testNone() { RouteResult<String> routed = router.route(GET, "/noexist"); assertThat(routed.target()).isEqualTo("404"); }
@Override public void start() { client = new NacosClient(); }
@Test public void start() { nacosRegister.start(); }
@Override public boolean equals(final Object o) { if(this == o) { return true; } if(!(o instanceof Application)) { return false; } final Application that = (Application) o; if(!Objects.equals(identifier, that.identifier)) { return false; } return true; }
@Test public void testEquals() { assertEquals(new Application("com.apple.textedit"), new Application("com.apple.textedit")); assertEquals(new Application("com.apple.textedit"), new Application("com.apple.textedit", "TextEdit")); assertEquals(new Application("com.apple.textedit"), new Application("com.apple.TextEdit")); assertEquals(new Application("com.apple.textedit"), new Application("com.apple.TextEdit", "TextEdit")); }
public List<Document> export(final String collectionName, final List<String> exportedFieldNames, final int limit, final Bson dbFilter, final List<Sort> sorts, final Subject subject) { final MongoCollection<Document> collection = mongoConnection.getMongoDatabase().getCollection(collectionName); final FindIterable<Document> resultsWithoutLimit = collection.find(Objects.requireNonNullElse(dbFilter, Filters.empty())) .projection(Projections.fields(Projections.include(exportedFieldNames))) .sort(toMongoDbSort(sorts)); final var userCanReadAllEntities = permissionsUtils.hasAllPermission(subject) || permissionsUtils.hasReadPermissionForWholeCollection(subject, collectionName); final var checkPermission = permissionsUtils.createPermissionCheck(subject, collectionName); final var documents = userCanReadAllEntities ? getFromMongo(resultsWithoutLimit, limit) : getWithInMemoryPermissionCheck(resultsWithoutLimit, limit, checkPermission); return documents.collect(Collectors.toList()); }
@Test void testExportUsesSortAndLimitCorrectly() { insertTestData(); simulateAdminUser(); final List<Document> exportedDocuments = toTest.export(TEST_COLLECTION_NAME, List.of("name"), 2, Filters.empty(), List.of(Sort.create("age", Sort.Order.ASC)), subject); assertThat(exportedDocuments) .isNotNull() .hasSize(2) .containsExactly( new Document(Map.of("_id", "0000000000000000000000c7", "name", "Judith")), new Document(Map.of("_id", "0000000000000000000000b6", "name", "Jerry")) ); }
@Override public boolean matches(Issue issue) { return !condition.matches(issue); }
@Test public void should_match_opposite() { NotCondition condition = new NotCondition(target); when(target.matches(any(Issue.class))).thenReturn(true); assertThat(condition.matches(issue)).isFalse(); when(target.matches(any(Issue.class))).thenReturn(false); assertThat(condition.matches(issue)).isTrue(); }
public static String substVars(String val, PropertyContainer pc1) { return substVars(val, pc1, null); }
@Test(timeout = 1000) public void detectCircularReferences1() { context.putProperty("A", "${A}a"); expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("Circular variable reference detected while parsing input [${A} --> ${A}]"); OptionHelper.substVars("${A}", context); }
@Override public Page<ConfigInfoBetaWrapper> findAllConfigInfoBetaForDumpAll(final int pageNo, final int pageSize) { final int startRow = (pageNo - 1) * pageSize; ConfigInfoBetaMapper configInfoBetaMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO_BETA); String sqlCountRows = configInfoBetaMapper.count(null); MapperContext context = new MapperContext(); context.setStartRow(startRow); context.setPageSize(pageSize); MapperResult mapperResult = configInfoBetaMapper.findAllConfigInfoBetaForDumpAllFetchRows(context); String sqlFetchRows = mapperResult.getSql(); PaginationHelper<ConfigInfoBetaWrapper> helper = createPaginationHelper(); try { return helper.fetchPageLimit(sqlCountRows, sqlFetchRows, new Object[] {}, pageNo, pageSize, CONFIG_INFO_BETA_WRAPPER_ROW_MAPPER); } catch (CannotGetJdbcConnectionException e) { LogUtil.FATAL_LOG.error("[db-error] " + e, e); throw e; } }
@Test void testFindAllConfigInfoBetaForDumpAll() { //mock count when(jdbcTemplate.queryForObject(anyString(), eq(Integer.class))).thenReturn(12345); //mock page list List<ConfigInfoBetaWrapper> mockList = new ArrayList<>(); mockList.add(new ConfigInfoBetaWrapper()); mockList.add(new ConfigInfoBetaWrapper()); mockList.add(new ConfigInfoBetaWrapper()); mockList.get(0).setLastModified(System.currentTimeMillis()); mockList.get(1).setLastModified(System.currentTimeMillis()); mockList.get(2).setLastModified(System.currentTimeMillis()); when(jdbcTemplate.query(anyString(), eq(new Object[] {}), eq(CONFIG_INFO_BETA_WRAPPER_ROW_MAPPER))).thenReturn(mockList); int pageNo = 1; int pageSize = 101; when(jdbcTemplate.queryForObject(anyString(), eq(Integer.class))).thenReturn(101); //execute & expect Page<ConfigInfoBetaWrapper> pageReturn = externalConfigInfoBetaPersistService.findAllConfigInfoBetaForDumpAll(pageNo, pageSize); assertEquals(mockList, pageReturn.getPageItems()); assertEquals(101, pageReturn.getTotalCount()); //mock count throw CannotGetJdbcConnectionException when(jdbcTemplate.queryForObject(anyString(), eq(Integer.class))).thenThrow(new CannotGetJdbcConnectionException("345678909fail")); //execute &expect try { externalConfigInfoBetaPersistService.findAllConfigInfoBetaForDumpAll(pageNo, pageSize); assertTrue(false); } catch (Exception exception) { assertEquals("345678909fail", exception.getMessage()); } }
public static String from(Path path) { return from(path.toString()); }
@Test void testTextContentType() { assertThat(ContentType.from(Path.of("index.txt"))).isEqualTo(TEXT_PLAIN); }
@Override public void updateFileConfig(FileConfigSaveReqVO updateReqVO) { // 校验存在 FileConfigDO config = validateFileConfigExists(updateReqVO.getId()); // 更新 FileConfigDO updateObj = FileConfigConvert.INSTANCE.convert(updateReqVO) .setConfig(parseClientConfig(config.getStorage(), updateReqVO.getConfig())); fileConfigMapper.updateById(updateObj); // 清空缓存 clearCache(config.getId(), null); }
@Test public void testUpdateFileConfig_notExists() { // 准备参数 FileConfigSaveReqVO reqVO = randomPojo(FileConfigSaveReqVO.class); // 调用, 并断言异常 assertServiceException(() -> fileConfigService.updateFileConfig(reqVO), FILE_CONFIG_NOT_EXISTS); }
@GET @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) @Override public ClusterInfo get() { return getClusterInfo(); }
@Test public void testClusterMetricsDefault() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("cluster") .path("metrics").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, response.getType().toString()); JSONObject json = response.getEntity(JSONObject.class); verifyClusterMetricsJSON(json); }
@Override public boolean decide(final SelectStatementContext selectStatementContext, final List<Object> parameters, final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database, final ShardingRule rule, final Collection<DataNode> includedDataNodes) { Collection<String> tableNames = rule.getShardingLogicTableNames(selectStatementContext.getTablesContext().getTableNames()); if (tableNames.isEmpty()) { return false; } includedDataNodes.addAll(getTableDataNodes(rule, tableNames, database)); if (selectStatementContext.isContainsSubquery() || selectStatementContext.isContainsHaving() || selectStatementContext.isContainsCombine() || selectStatementContext.isContainsPartialDistinctAggregation()) { return true; } if (!selectStatementContext.isContainsJoinQuery() || rule.isAllTablesInSameDataSource(tableNames)) { return false; } if (1 == tableNames.size() && selectStatementContext.isContainsJoinQuery() && !rule.isAllBindingTables(database, selectStatementContext, tableNames)) { return true; } return tableNames.size() > 1 && !rule.isAllBindingTables(database, selectStatementContext, tableNames); }
@Test void assertDecideWhenContainsOnlyOneTable() { SelectStatementContext select = createStatementContext(); when(select.getTablesContext().getTableNames()).thenReturn(Collections.singletonList("t_order")); when(select.isContainsJoinQuery()).thenReturn(true); ShardingRule shardingRule = createShardingRule(); when(shardingRule.getShardingLogicTableNames(Collections.singletonList("t_order"))).thenReturn(Collections.singletonList("t_order")); ShardingSphereDatabase database = createDatabase(shardingRule); when(shardingRule.isAllBindingTables(database, select, Collections.singletonList("t_order"))).thenReturn(false); Collection<DataNode> includedDataNodes = new HashSet<>(); assertTrue(new ShardingSQLFederationDecider().decide(select, Collections.emptyList(), mock(RuleMetaData.class), database, shardingRule, includedDataNodes)); assertThat(includedDataNodes.size(), is(2)); }
@Override public void close() throws Exception { super.close(); if (checkpointLock != null) { synchronized (checkpointLock) { issuedInstant = null; isRunning = false; } } if (LOG.isDebugEnabled()) { LOG.debug("Closed File Monitoring Source for path: " + path + "."); } }
@Test public void testConsumeFromLastCommit() throws Exception { TestData.writeData(TestData.DATA_SET_INSERT, conf); StreamReadMonitoringFunction function = TestUtils.getMonitorFunc(conf); try (AbstractStreamOperatorTestHarness<MergeOnReadInputSplit> harness = createHarness(function)) { harness.setup(); harness.open(); CountDownLatch latch = new CountDownLatch(4); CollectingSourceContext sourceContext = new CollectingSourceContext(latch); runAsync(sourceContext, function); assertTrue(latch.await(WAIT_TIME_MILLIS, TimeUnit.MILLISECONDS), "Should finish splits generation"); assertThat("Should produce the expected splits", sourceContext.getPartitionPaths(), is("par1,par2,par3,par4")); assertTrue(sourceContext.splits.stream().allMatch(split -> split.getInstantRange().isPresent()), "All instants should have range limit"); Thread.sleep(1000L); // reset the source context latch = new CountDownLatch(4); sourceContext.reset(latch); // write another instant and validate TestData.writeData(TestData.DATA_SET_UPDATE_INSERT, conf); assertTrue(latch.await(WAIT_TIME_MILLIS, TimeUnit.MILLISECONDS), "Should finish splits generation"); assertThat("Should produce the expected splits", sourceContext.getPartitionPaths(), is("par1,par2,par3,par4")); assertTrue(sourceContext.splits.stream().allMatch(split -> split.getInstantRange().isPresent()), "All the instants should have range limit"); // Stop the stream task. function.close(); } }
public static Map<String, Object> coerceTypes( final Map<String, Object> streamsProperties, final boolean ignoreUnresolved ) { if (streamsProperties == null) { return Collections.emptyMap(); } final Map<String, Object> validated = new HashMap<>(streamsProperties.size()); for (final Map.Entry<String, Object> e : streamsProperties.entrySet()) { try { validated.put(e.getKey(), coerceType(e.getKey(), e.getValue())); } catch (final PropertyNotFoundException p) { if (ignoreUnresolved) { validated.put(e.getKey(), e.getValue()); } else { throw p; } } } return validated; }
@Test public void shouldCoerceTypes() { // given/when: final Map<String, Object> coerced = PropertiesUtil.coerceTypes(ImmutableMap.of( "ksql.internal.topic.replicas", 3L, "cache.max.bytes.buffering", "0" ), false); // then: assertThat(coerced.get("ksql.internal.topic.replicas"), instanceOf(Short.class)); assertThat(coerced.get("ksql.internal.topic.replicas"), equalTo((short) 3)); assertThat(coerced.get("cache.max.bytes.buffering"), instanceOf(Long.class)); assertThat(coerced.get("cache.max.bytes.buffering"), equalTo(0L)); }
@Override public String toString() { return "ResourceConfig{" + "url=" + url + ", id='" + id + '\'' + ", resourceType=" + resourceType + '}'; }
@Test public void when_addNonexistentResourceWithFile_then_throwsException() { // Given String path = Paths.get("/i/do/not/exist").toString(); File file = new File(path); // Then expectedException.expect(JetException.class); expectedException.expectMessage("Not an existing, readable file: " + path); // When config.addClasspathResource(file); }
public static Object invokeMethod(Method method, Object target, Object... args) { try { return method.invoke(target, args); } catch (Exception ex) { handleReflectionException(ex); } throw new IllegalStateException("Should never get here"); }
@Test void testInvokeMethod() throws Exception { Method method = listStr.getClass().getDeclaredMethod("grow", int.class); method.setAccessible(true); ReflectUtils.invokeMethod(method, listStr, 4); Object elementData = ReflectUtils.getFieldValue(listStr, "elementData"); assertEquals(4, ((Object[]) elementData).length); }
@VisibleForTesting Optional<Set<String>> getSchedulerResourceTypeNamesUnsafe(final Object response) { if (getSchedulerResourceTypesMethod.isPresent() && response != null) { try { @SuppressWarnings("unchecked") final Set<? extends Enum> schedulerResourceTypes = (Set<? extends Enum>) getSchedulerResourceTypesMethod.get().invoke(response); return Optional.of( Preconditions.checkNotNull(schedulerResourceTypes).stream() .map(Enum::name) .collect(Collectors.toSet())); } catch (Exception e) { logger.error("Error invoking 'getSchedulerResourceTypes()'", e); } } return Optional.empty(); }
@Test void testDoesntCallGetSchedulerResourceTypesMethodIfAbsent() { final RegisterApplicationMasterResponseReflector registerApplicationMasterResponseReflector = new RegisterApplicationMasterResponseReflector(LOG, HasMethod.class); final Optional<Set<String>> schedulerResourceTypeNames = registerApplicationMasterResponseReflector.getSchedulerResourceTypeNamesUnsafe( new Object()); assertThat(schedulerResourceTypeNames).isNotPresent(); }
@Override public Integer doCall() throws Exception { // Operator id must be set if (ObjectHelper.isEmpty(operatorId)) { printer().println("Operator id must be set"); return -1; } delegate.setFile(name); delegate.setSource(source); delegate.setSink(sink); delegate.setSteps(steps); delegate.setErrorHandler(errorHandler); delegate.setProperties(properties); String pipe = delegate.constructPipe(); if (pipe.isEmpty()) { // Error in delegate exit now printer().println("Failed to construct Pipe resource"); return -1; } // --operator-id={id} is a syntax sugar for '--annotation camel.apache.org/operator.id={id}' if (annotations == null) { annotations = new String[] { "%s=%s".formatted(CamelKCommand.OPERATOR_ID_LABEL, operatorId) }; } else { annotations = Arrays.copyOf(annotations, annotations.length + 1); annotations[annotations.length - 1] = "%s=%s".formatted(CamelKCommand.OPERATOR_ID_LABEL, operatorId); } String annotationsContext = ""; if (annotations != null) { StringBuilder sb = new StringBuilder(" annotations:\n"); for (String annotation : annotations) { String[] keyValue = annotation.split("=", 2); if (keyValue.length != 2) { printer().printf( "annotation '%s' does not follow format <key>=<value>%n", annotation); continue; } sb.append(" ").append(keyValue[0]).append(": ").append(keyValue[1]).append("\n"); } annotationsContext = sb.toString(); } pipe = pipe.replaceFirst("\\{\\{ \\.Annotations }}\n", annotationsContext); String integrationSpec = ""; Traits traitsSpec = null; if (traits != null && traits.length > 0) { traitsSpec = TraitHelper.parseTraits(traits); } if (connects != null) { if (traitsSpec == null) { traitsSpec = new Traits(); } TraitHelper.configureConnects(traitsSpec, connects); } if (traitsSpec != null) { String traitYaml = KubernetesHelper.dumpYaml(traitsSpec); traitYaml = traitYaml.replaceAll("\n", "\n "); integrationSpec = " integration:\n spec:\n traits:\n %s\n".formatted(traitYaml.trim()); } pipe = pipe.replaceFirst("\\{\\{ \\.IntegrationSpec }}\n", integrationSpec); if (output != null) { delegate.setOutput(output); return delegate.dumpPipe(pipe); } Pipe pipeResource = KubernetesHelper.yaml(this.getClass().getClassLoader()).loadAs(pipe, Pipe.class); final AtomicBoolean updated = new AtomicBoolean(false); client(Pipe.class).resource(pipeResource).createOr(it -> { updated.set(true); return it.update(); }); if (updated.get()) { printer().printf("Pipe %s updated%n", pipeResource.getMetadata().getName()); } else { printer().printf("Pipe %s created%n", pipeResource.getMetadata().getName()); } if (wait || logs) { client(Pipe.class).withName(pipeResource.getMetadata().getName()) .waitUntilCondition(it -> "Running".equals(it.getStatus().getPhase()), 10, TimeUnit.MINUTES); } if (logs) { IntegrationLogs logsCommand = new IntegrationLogs(getMain()); logsCommand.withClient(client()); logsCommand.withName(pipeResource.getMetadata().getName()); logsCommand.doCall(); } return 0; }
@Test public void shouldBindWithDefaultOperatorId() throws Exception { Bind command = createCommand("timer", "log"); command.doCall(); String output = printer.getOutput(); Assertions.assertEquals(""" apiVersion: camel.apache.org/v1 kind: Pipe metadata: name: timer-to-log annotations: camel.apache.org/operator.id: camel-k spec: source: ref: kind: Kamelet apiVersion: camel.apache.org/v1 name: timer-source properties: message: "hello world" sink: ref: kind: Kamelet apiVersion: camel.apache.org/v1 name: log-sink #properties: #key: "value" """.trim(), output); } @Test public void shouldBindWithAnnotations() throws Exception { Bind command = createCommand("timer", "log"); command.annotations = new String[] { "app=camel-k" }; command.doCall(); String output = printer.getOutput(); Assertions.assertEquals(""" apiVersion: camel.apache.org/v1 kind: Pipe metadata: name: timer-to-log annotations: app: camel-k camel.apache.org/operator.id: camel-k spec: source: ref: kind: Kamelet apiVersion: camel.apache.org/v1 name: timer-source properties: message: "hello world" sink: ref: kind: Kamelet apiVersion: camel.apache.org/v1 name: log-sink #properties: #key: "value" """.trim(), output); } @Test public void shouldBindWithTraits() throws Exception { Bind command = createCommand("timer", "log"); command.traits = new String[] { "mount.configs=configmap:my-cm", "logging.color=true", "logging.level=DEBUG" }; command.doCall(); String output = printer.getOutput(); Assertions.assertEquals(""" apiVersion: camel.apache.org/v1 kind: Pipe metadata: name: timer-to-log annotations: camel.apache.org/operator.id: camel-k spec: integration: spec: traits: logging: color: true level: DEBUG mount: configs: - configmap:my-cm source: ref: kind: Kamelet apiVersion: camel.apache.org/v1 name: timer-source properties: message: "hello world" sink: ref: kind: Kamelet apiVersion: camel.apache.org/v1 name: log-sink #properties: #key: "value" """.trim(), output); } @Test public void shouldBindWithServiceBindings() throws Exception { Bind command = createCommand("timer", "http"); command.connects = new String[] { "serving.knative.dev/v1:Service:my-service" }; command.doCall(); String output = printer.getOutput(); Assertions.assertEquals(""" apiVersion: camel.apache.org/v1 kind: Pipe metadata: name: timer-to-http annotations: camel.apache.org/operator.id: camel-k spec: integration: spec: traits: service-binding: services: - serving.knative.dev/v1:Service:my-service source: ref: kind: Kamelet apiVersion: camel.apache.org/v1 name: timer-source properties: message: "hello world" sink: ref: kind: Kamelet apiVersion: camel.apache.org/v1 name: http-sink properties: url: "https://my-service/path" """.trim(), output); } @Test public void shouldFailWithMissingOperatorId() throws Exception { Bind command = createCommand("timer:tick", "log"); command.operatorId = ""; Assertions.assertEquals(-1, command.doCall()); Assertions.assertEquals("Operator id must be set", printer.getOutput()); }
public CharSequence format(Monetary monetary) { // determine maximum number of decimals that can be visible in the formatted string // (if all decimal groups were to be used) int max = minDecimals; if (decimalGroups != null) for (int group : decimalGroups) max += group; final int maxVisibleDecimals = max; int smallestUnitExponent = monetary.smallestUnitExponent(); checkState(maxVisibleDecimals <= smallestUnitExponent, () -> "maxVisibleDecimals cannot exceed " + smallestUnitExponent + ": " + maxVisibleDecimals); // convert to decimal long satoshis = Math.abs(monetary.getValue()); int decimalShift = smallestUnitExponent - shift; DecimalNumber decimal = satoshisToDecimal(satoshis, roundingMode, decimalShift, maxVisibleDecimals); long numbers = decimal.numbers; long decimals = decimal.decimals; // formatting String decimalsStr = decimalShift > 0 ? String.format(Locale.US, "%0" + Integer.toString(decimalShift) + "d", decimals) : ""; StringBuilder str = new StringBuilder(decimalsStr); while (str.length() > minDecimals && str.charAt(str.length() - 1) == '0') str.setLength(str.length() - 1); // trim trailing zero int i = minDecimals; if (decimalGroups != null) { for (int group : decimalGroups) { if (str.length() > i && str.length() < i + group) { while (str.length() < i + group) str.append('0'); break; } i += group; } } if (str.length() > 0) str.insert(0, decimalMark); str.insert(0, numbers); if (monetary.getValue() < 0) str.insert(0, negativeSign); else if (positiveSign != 0) str.insert(0, positiveSign); if (codes != null) { if (codePrefixed) { str.insert(0, codeSeparator); str.insert(0, code()); } else { str.append(codeSeparator); str.append(code()); } } // Convert to non-arabic digits. if (zeroDigit != '0') { int offset = zeroDigit - '0'; for (int d = 0; d < str.length(); d++) { char c = str.charAt(d); if (Character.isDigit(c)) str.setCharAt(d, (char) (c + offset)); } } return str; }
@Test public void sat() { assertEquals("0", format(ZERO, 8, 0)); assertEquals("100000000", format(COIN, 8, 0)); assertEquals("2100000000000000", format(BitcoinNetwork.MAX_MONEY, 8, 0)); }
public int readInt4() { return byteBuf.readIntLE(); }
@Test void assertReadInt4() { when(byteBuf.readIntLE()).thenReturn(1); assertThat(new MySQLPacketPayload(byteBuf, StandardCharsets.UTF_8).readInt4(), is(1)); }
@Override public void calculate(TradePriceCalculateReqBO param, TradePriceCalculateRespBO result) { if (param.getDeliveryType() == null) { return; } if (DeliveryTypeEnum.PICK_UP.getType().equals(param.getDeliveryType())) { calculateByPickUp(param); } else if (DeliveryTypeEnum.EXPRESS.getType().equals(param.getDeliveryType())) { calculateExpress(param, result); } }
@Test @DisplayName("按件计算运费包邮的情况") public void testCalculate_expressTemplateFree() { // SKU 1 : 100 * 2 = 200 // SKU 2 :200 * 10 = 2000 // 运费 0 // mock 方法 // 准备运费模板包邮配置数据 包邮 订单总件数 > 包邮件数时 12 > 10 templateRespBO.setFree(randomPojo(DeliveryExpressTemplateRespBO.Free.class, item -> item.setFreeCount(10).setFreePrice(1000))); when(deliveryExpressTemplateService.getExpressTemplateMapByIdsAndArea(eq(asSet(1L)), eq(10))) .thenReturn(MapUtil.of(1L, templateRespBO)); // 调用 calculator.calculate(reqBO, resultBO); // 断言 TradePriceCalculateRespBO.Price price = resultBO.getPrice(); assertThat(price) .extracting("totalPrice","discountPrice","couponPrice","pointPrice","deliveryPrice","payPrice") .containsExactly(2200, 0, 0, 0, 0, 2200); assertThat(resultBO.getItems()).hasSize(3); // 断言:SKU1 assertThat(resultBO.getItems().get(0)) .extracting("price", "count","discountPrice" ,"couponPrice", "pointPrice","deliveryPrice","payPrice") .containsExactly(100, 2, 0, 0, 0, 0, 200); // 断言:SKU2 assertThat(resultBO.getItems().get(1)) .extracting("price", "count","discountPrice" ,"couponPrice", "pointPrice","deliveryPrice","payPrice") .containsExactly(200, 10, 0, 0, 0, 0, 2000); // 断言:SKU3 未选中 assertThat(resultBO.getItems().get(2)) .extracting("price", "count","discountPrice" ,"couponPrice", "pointPrice","deliveryPrice","payPrice") .containsExactly(300, 1, 0, 0, 0, 0, 300); }
@Nullable protected TagsExtractor getQuickTextTagsSearcher() { return mTagsExtractor; }
@Test public void testOnSharedPreferenceChangedCauseLoading() throws Exception { SharedPrefsHelper.setPrefsValue(R.string.settings_key_search_quick_text_tags, false); Assert.assertSame( TagsExtractorImpl.NO_OP, mAnySoftKeyboardUnderTest.getQuickTextTagsSearcher()); SharedPrefsHelper.setPrefsValue(R.string.settings_key_search_quick_text_tags, true); Object searcher = mAnySoftKeyboardUnderTest.getQuickTextTagsSearcher(); Assert.assertNotSame(TagsExtractorImpl.NO_OP, searcher); SharedPrefsHelper.setPrefsValue(R.string.settings_key_search_quick_text_tags, true); Assert.assertSame(searcher, mAnySoftKeyboardUnderTest.getQuickTextTagsSearcher()); }
public static SchemaAndValue parseString(String value) { if (value == null) { return NULL_SCHEMA_AND_VALUE; } if (value.isEmpty()) { return new SchemaAndValue(Schema.STRING_SCHEMA, value); } ValueParser parser = new ValueParser(new Parser(value)); return parser.parse(false); }
@Test public void shouldParseBooleanLiteralsEmbeddedInArray() { SchemaAndValue schemaAndValue = Values.parseString("[true, false]"); assertEquals(Type.ARRAY, schemaAndValue.schema().type()); assertEquals(Type.BOOLEAN, schemaAndValue.schema().valueSchema().type()); assertEquals(Arrays.asList(true, false), schemaAndValue.value()); }
public static String clean(String charsetName) { try { return forName(charsetName).name(); } catch (IllegalArgumentException e) { return null; } }
@Test public void testCleaningCharsetName() { assertEquals("UTF-8", CharsetUtils.clean("utf-8")); assertEquals(null, CharsetUtils.clean("")); assertEquals(null, CharsetUtils.clean(null)); assertEquals("US-ASCII", CharsetUtils.clean(" us-ascii ")); assertEquals("UTF-8", CharsetUtils.clean("\"utf-8\"")); assertEquals("ISO-8859-1", CharsetUtils.clean("ISO-8859-1, latin1")); }
public ServerHealthState trump(ServerHealthState otherServerHealthState) { int result = healthStateLevel.compareTo(otherServerHealthState.healthStateLevel); return result > 0 ? this : otherServerHealthState; }
@Test public void shouldTrumpSuccessIfCurrentIsSuccess() { assertThat(SUCCESS_SERVER_HEALTH_STATE.trump(ANOTHER_SUCCESS_SERVER_HEALTH_STATE), is( ANOTHER_SUCCESS_SERVER_HEALTH_STATE)); }
public static String toFileURI(File file) { URI uri = file.toURI(); String uriString = uri.toASCIIString(); return uriString.replaceAll("^file:/", "file:///"); }
@Test @DisabledOnOs(OS.WINDOWS) void shouldCreateFileURIForFile() { assertThat(FileUtil.toFileURI(new File("/var/lib/foo/"))).isEqualTo("file:///var/lib/foo"); assertThat(FileUtil.toFileURI(new File("/var/a dir with spaces/foo"))).isEqualTo("file:///var/a%20dir%20with%20spaces/foo"); assertThat(FileUtil.toFileURI(new File("/var/司徒空在此/foo"))).isEqualTo("file:///var/%E5%8F%B8%E5%BE%92%E7%A9%BA%E5%9C%A8%E6%AD%A4/foo"); }
private Gamma() {}
@Test public void testExamples() { assertEquals(Double.NaN, gamma(0.0)); assertEquals(1.77245385, gamma(0.5), 1e-8); assertEquals(1.0, gamma(1.0)); assertEquals(24.0, gamma(5.0), 1e-8); //some random examples betwixt -100 and 100 assertEquals(8.06474995572965e+79, gamma(59.86728989339031), 1e+67); assertEquals(0.0005019871198070064, gamma(-7.260823951121694), 1e-18); assertEquals(1.5401131084717308e-110, gamma(-75.48705446197417), 1e-124); assertEquals(95932082427.69138, gamma(15.035762406520718), 1e-3); assertEquals(4.2868413548339677e+154, gamma(99.32984689647557), 1e+140); assertEquals(-4.971777508910858e-48, gamma(-40.14784332381653), 1e-60); assertEquals(5.3603547985340755e-96, gamma(-67.85881128534656), 1e-108); assertEquals(-1.887428186224555e-151, gamma(-96.63801919072759), 1e-163); assertEquals(6.0472720813564265e+125, gamma(84.61636884564746), 1e+113); assertEquals(-7.495823228458869e-128, gamma(-84.57833815656579), 1e-140); assertEquals(-2.834337137147687e-14, gamma(-16.831988025996992), 1e-26); assertEquals(8.990293245462624e+78, gamma(59.32945503543496), 1e+66); assertEquals(3.604695169965482e-83, gamma(-61.045472852581774), 1e-95); assertEquals(0.00020572694516842935, gamma(-7.545439745563854), 1e-16); assertEquals(-7.906506608405116e-105, gamma(-72.4403778408159), 1e-117); assertEquals(780133888.913568, gamma(13.192513244283958), 1e-4); assertEquals(-3.0601588660760365e-130, gamma(-86.09108451479372), 1e-142); assertEquals(2.310606358803366e+90, gamma(65.69557419730668), 1e+78); assertEquals(4.574728496203664e+16, gamma(19.669827320262186), 1e+4); assertEquals(1.5276823676246256e+74, gamma(56.618507066510915), 1e+62); assertEquals(0.0, gamma(-199.55885272585897), 1e-8); assertEquals(Double.POSITIVE_INFINITY, gamma(404.5418705074535)); assertEquals(Double.NaN, gamma(-2)); }
public static String printLogical(List<PlanFragment> fragments, FunctionAndTypeManager functionAndTypeManager, Session session) { Map<PlanFragmentId, PlanFragment> fragmentsById = Maps.uniqueIndex(fragments, PlanFragment::getId); PlanNodeIdGenerator idGenerator = new PlanNodeIdGenerator(); StringBuilder output = new StringBuilder(); output.append("digraph logical_plan {\n"); for (PlanFragment fragment : fragments) { printFragmentNodes(output, fragment, idGenerator, functionAndTypeManager, session); } for (PlanFragment fragment : fragments) { fragment.getRoot().accept(new EdgePrinter(output, fragmentsById, idGenerator), null); } output.append("}\n"); return output.toString(); }
@Test public void testPrintLogical() { String actual = printLogical( ImmutableList.of(createTestPlanFragment(0, TEST_TABLE_SCAN_NODE)), FUNCTION_AND_TYPE_MANAGER, testSessionBuilder().build()); String expected = join( System.lineSeparator(), "digraph logical_plan {", "subgraph cluster_0 {", "label = \"SOURCE\"", format("plannode_1[%s];", TEST_TABLE_SCAN_NODE_INNER_OUTPUT), "}", "}", ""); assertEquals(actual, expected); }
@Override public RedisScript<List<Long>> getScript() { return script; }
@Test public void getScriptTest() { DefaultRedisScript<List> redisScript = new DefaultRedisScript<>(); String scriptPath = "/META-INF/scripts/" + abstractRateLimiterAlgorithm.getScriptName(); redisScript.setScriptSource(new ResourceScriptSource(new ClassPathResource(scriptPath))); redisScript.setResultType(List.class); assertThat(redisScript.getScriptAsString(), is(abstractRateLimiterAlgorithm.getScript().getScriptAsString())); assertThat(redisScript.getResultType(), is(abstractRateLimiterAlgorithm.getScript().getResultType())); }
synchronized boolean tryToMoveTo(State to) { boolean res = false; State currentState = state; if (TRANSITIONS.get(currentState).contains(to)) { this.state = to; res = true; } LOG.debug("{} tryToMoveTo from {} to {} => {}", Thread.currentThread().getName(), currentState, to, res); return res; }
@Test @UseDataProvider("allStates") public void RESTARTING_is_only_allowed_from_STARTING_and_OPERATIONAL(NodeLifecycle.State state) { if (state == STARTING || state == OPERATIONAL) { verifyMoveTo(newNodeLifecycle(state), RESTARTING); } else { assertThat(newNodeLifecycle(state).tryToMoveTo(RESTARTING)).isFalse(); } }
public static <T> T newInstanceWithArgs(Class<T> clazz, Class<?>[] argTypes, Object[] args) throws SofaRpcRuntimeException { if (CommonUtils.isEmpty(argTypes)) { return newInstance(clazz); } try { if (!(clazz.isMemberClass() && !Modifier.isStatic(clazz.getModifiers()))) { Constructor<T> constructor = clazz.getDeclaredConstructor(argTypes); constructor.setAccessible(true); return constructor.newInstance(args); } else { Constructor<T>[] constructors = (Constructor<T>[]) clazz.getDeclaredConstructors(); if (constructors == null || constructors.length == 0) { throw new SofaRpcRuntimeException("The " + clazz.getCanonicalName() + " has no constructor with argTypes :" + Arrays.toString(argTypes)); } Constructor<T> constructor = null; for (Constructor<T> c : constructors) { Class[] ps = c.getParameterTypes(); if (ps.length == argTypes.length + 1) { // 长度多一 boolean allMath = true; for (int i = 1; i < ps.length; i++) { // 而且第二个开始的参数类型匹配 if (ps[i] != argTypes[i - 1]) { allMath = false; break; } } if (allMath) { constructor = c; break; } } } if (constructor == null) { throw new SofaRpcRuntimeException("The " + clazz.getCanonicalName() + " has no constructor with argTypes :" + Arrays.toString(argTypes)); } else { constructor.setAccessible(true); Object[] newArgs = new Object[args.length + 1]; System.arraycopy(args, 0, newArgs, 1, args.length); return constructor.newInstance(newArgs); } } } catch (SofaRpcRuntimeException e) { throw e; } catch (Throwable e) { throw new SofaRpcRuntimeException(e.getMessage(), e); } }
@Test public void testNewInstanceWithArgs() throws Exception { Assert.assertNotNull(ClassUtils.newInstanceWithArgs(TestMemberClass3.class, null, null)); Assert.assertNotNull(ClassUtils.newInstanceWithArgs(TestMemberClass3.class, new Class[] { String.class }, new Object[] { "2222" })); Assert.assertNotNull(ClassUtils.newInstanceWithArgs(TestMemberClass6.class, null, null)); Assert.assertNotNull(ClassUtils.newInstanceWithArgs(TestMemberClass6.class, new Class[] { int.class }, new Object[] { 222 })); Assert.assertNotNull(ClassUtils.newInstanceWithArgs(TestClass3.class, null, null)); Assert.assertNotNull(ClassUtils.newInstanceWithArgs(TestClass3.class, new Class[] { String.class, int.class }, new Object[] { "xxx", 222 })); }
public boolean verifySignature(String jwksUri, SignedJWT signedJwt) throws JOSEException, InvalidSignatureException, IOException, ParseException { var publicKeys = getPublicKeys(jwksUri); var kid = signedJwt.getHeader().getKeyID(); if (kid != null) { var key = ((RSAKey) publicKeys.getKeyByKeyId(kid)); if (key != null) { RSASSAVerifier rsaSSAVerifier = new RSASSAVerifier(key.toRSAPublicKey()); if (signedJwt.verify(rsaSSAVerifier)) return true; } } for (JWK jwk : publicKeys.getKeys()) { if (signedJwt.verify(new RSASSAVerifier(((RSAKey) jwk).toRSAPublicKey()))) return true; } throw new InvalidSignatureException("Could not validate signature of JWT token"); }
@Test void verifyInvalidSignatureTest() { assertThrows( InvalidSignatureException.class, () -> provider.verifySignature("jwskUri", SignedJWT.parse(VALID_REQUEST)) ); }
public static <T> T[] checkNonEmpty(T[] array, String name) { //No String concatenation for check if (checkNotNull(array, name).length == 0) { throw new IllegalArgumentException("Param '" + name + "' must not be empty"); } return array; }
@Test public void testCheckNonEmptyCharSequenceString() { Exception actualEx = null; try { ObjectUtil.checkNonEmpty((CharSequence) NULL_CHARSEQUENCE, NULL_NAME); } catch (Exception e) { actualEx = e; } assertNotNull(actualEx, TEST_RESULT_NULLEX_OK); assertTrue(actualEx instanceof NullPointerException, TEST_RESULT_EXTYPE_NOK); actualEx = null; try { ObjectUtil.checkNonEmpty((CharSequence) NON_NULL_CHARSEQUENCE, NON_NULL_NAME); } catch (Exception e) { actualEx = e; } assertNull(actualEx, TEST_RESULT_NULLEX_NOK); actualEx = null; try { ObjectUtil.checkNonEmpty((CharSequence) NON_NULL_EMPTY_CHARSEQUENCE, NON_NULL_EMPTY_NAME); } catch (Exception e) { actualEx = e; } assertNotNull(actualEx, TEST_RESULT_NULLEX_OK); assertTrue(actualEx instanceof IllegalArgumentException, TEST_RESULT_EXTYPE_NOK); actualEx = null; try { ObjectUtil.checkNonEmpty((CharSequence) NON_NULL_WHITESPACE_STRING, NON_NULL_EMPTY_NAME); } catch (Exception e) { actualEx = e; } assertNull(actualEx, TEST_RESULT_NULLEX_NOK); }
public static String[] split(String splittee, String splitChar, boolean truncate) { //NOSONAR if (splittee == null || splitChar == null) { return new String[0]; } final String EMPTY_ELEMENT = ""; int spot; final int splitLength = splitChar.length(); final String adjacentSplit = splitChar + splitChar; final int adjacentSplitLength = adjacentSplit.length(); if (truncate) { while ((spot = splittee.indexOf(adjacentSplit)) != -1) { splittee = splittee.substring(0, spot + splitLength) + splittee.substring(spot + adjacentSplitLength, splittee.length()); } if (splittee.startsWith(splitChar)) { splittee = splittee.substring(splitLength); } if (splittee.endsWith(splitChar)) { // Remove trailing splitter splittee = splittee.substring(0, splittee.length() - splitLength); } } List<String> returns = new ArrayList<>(); final int length = splittee.length(); // This is the new length int start = 0; spot = 0; while (start < length && (spot = splittee.indexOf(splitChar, start)) > -1) { if (spot > 0) { returns.add(splittee.substring(start, spot)); } else { returns.add(EMPTY_ELEMENT); } start = spot + splitLength; } if (start < length) { returns.add(splittee.substring(start)); } else if (spot == length - splitLength) {// Found splitChar at end of line returns.add(EMPTY_ELEMENT); } return returns.toArray(new String[returns.size()]); }
@Test public void testSplitStringStringTrueWithTrailingSplitChars() { // Test ignore trailing split characters // Ignore adjacent delimiters assertThat("Ignore trailing split chars", JOrphanUtils.split("a,bc,,", ",", true), CoreMatchers.equalTo(new String[]{"a", "bc"})); }
public void add(T element) { Preconditions.checkNotNull(element); if (elements.add(element) && elements.size() > maxSize) { elements.poll(); } }
@Test void testQueueWithMaxSize2() { final BoundedFIFOQueue<Integer> testInstance = new BoundedFIFOQueue<>(2); assertThat(testInstance).isEmpty(); testInstance.add(1); assertThat(testInstance).contains(1); testInstance.add(2); assertThat(testInstance).contains(1, 2); testInstance.add(3); assertThat(testInstance).contains(2, 3); }
public int getColorForPoint(double x, double y, int px, int py, int plane, double brightness, ChunkMapper chunkMapper) { x /= 8.d; y /= 8.d; int centerChunkData = chunkData(px / 8, py / 8, plane, chunkMapper); if (centerChunkData == -1) { // No data in the center chunk? return 0; } double t = 0; double ty = 0; double tco = 0; double tcg = 0; int xmin = (int) (x - BLEND_RADIUS); int xmax = (int) Math.ceil(x + BLEND_RADIUS); int ymin = (int) (y - BLEND_RADIUS); int ymax = (int) Math.ceil(y + BLEND_RADIUS); for (int ucx = xmin; ucx < xmax; ucx++) { for (int ucy = ymin; ucy <= ymax; ucy++) { int val = chunkData(ucx, ucy, plane, chunkMapper); if (val == -1) { continue; } // Get the blend value, add 1/8 tile to make sure we don't div/0, convert to chunks double sigma = ((val >>> 24) + .125) / 8.d; // Calculate how far we have to be away before we can discard this value without // becoming visibly discontinuous double minDist = 1 + (sigma * BLEND_DISTRIBUTION); // Try to fast-fail double dxl = ucx - x; double dxh = dxl + 1.d; if (dxl < -minDist || dxl > minDist) { continue; } double dyl = ucy - y; double dyh = dyl + 1.d; if (dyl < -minDist || dyh > minDist) { continue; } // Calculate integrate a gaussian distribution in each dimension for // this chunk relative to the requested point double erfdivc = sigma * SQRT2; double m = (erf(dxl / erfdivc) - erf(dxh / erfdivc)) * (erf(dyl / erfdivc) - erf(dyh / erfdivc)); // Load our YCoCg24 values into floats double vy = (val >>> 16 & 0xFF) / 255.d; double vco = (byte) (val >>> 8) / 128.d; double vcg = (byte) val / 128.d; // And multiply by the weight ty += vy * m; tco += vco * m; tcg += vcg * m; t += m; } } // Convert back to int range values, and bounds check while we are at it byte ay = (byte) Math.min(Math.max(Math.round(ty / t * 255.d), 0), 255); byte aco = (byte) Math.min(Math.max(Math.round(tco * 128.d / t), -128), 127); byte acg = (byte) Math.min(Math.max(Math.round(tcg * 128.d / t), -128), 127); // convert back to rgb from YCoCg24 int g = (ay - (acg >> 1)) & 0xFF; int tmp = (g + acg) & 0xFF; int r = (tmp - (aco >> 1)) & 0xFF; int b = (r + aco) & 0xFF; // increase brightness with HSB float[] hsb = Color.RGBtoHSB(r, g, b, null); hsb[2] = (float) Math.pow(hsb[2], brightness); return 0xFFFFFF & Color.HSBtoRGB(hsb[0], hsb[1], hsb[2]); }
@Test public void testLoadSimple() throws IOException { Skybox skybox = new Skybox(CharSource.wrap("bounds 0 0 100 100 #00F // R 0 0 100 100\r\nr 99 99").openStream(), "simple"); Assert.assertEquals(0, skybox.getColorForPoint(0, 0, 0, 0, 0, 1, null)); int x = (99 * 64) + 32; int y = (99 * 64) + 32; Assert.assertEquals(0x0000FF, skybox.getColorForPoint(x, y, x, y, 0, 1, null)); }
@Override public void updateMember(ConsumerGroupMember newMember) { if (newMember == null) { throw new IllegalArgumentException("newMember cannot be null."); } ConsumerGroupMember oldMember = members.put(newMember.memberId(), newMember); maybeUpdateSubscribedTopicNamesAndGroupSubscriptionType(oldMember, newMember); maybeUpdateServerAssignors(oldMember, newMember); maybeUpdatePartitionEpoch(oldMember, newMember); updateStaticMember(newMember); maybeUpdateGroupState(); maybeUpdateNumClassicProtocolMembers(oldMember, newMember); maybeUpdateClassicProtocolMembersSupportedProtocols(oldMember, newMember); }
@Test public void testUpdateMember() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); ConsumerGroupMember member; member = consumerGroup.getOrMaybeCreateMember("member", true); member = new ConsumerGroupMember.Builder(member) .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .build(); consumerGroup.updateMember(member); assertEquals(member, consumerGroup.getOrMaybeCreateMember("member", false)); }
@Override public ConnectorTableMetadata getTableMetadata(ConnectorSession session, ConnectorTableHandle table) { JdbcTableHandle handle = (JdbcTableHandle) table; ImmutableList.Builder<ColumnMetadata> columnMetadata = ImmutableList.builder(); for (JdbcColumnHandle column : jdbcMetadataCache.getColumns(session, handle)) { columnMetadata.add(column.getColumnMetadata()); } return new ConnectorTableMetadata(handle.getSchemaTableName(), columnMetadata.build()); }
@Test public void getTableMetadata() { // known table ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(SESSION, tableHandle); assertEquals(tableMetadata.getTable(), new SchemaTableName("example", "numbers")); assertEquals(tableMetadata.getColumns(), ImmutableList.of( new ColumnMetadata("text", VARCHAR, false, null, null, false, emptyMap()), // primary key is not null in H2 new ColumnMetadata("text_short", createVarcharType(32)), new ColumnMetadata("value", BIGINT))); // escaping name patterns JdbcTableHandle specialTableHandle = metadata.getTableHandle(SESSION, new SchemaTableName("exa_ple", "num_ers")); ConnectorTableMetadata specialTableMetadata = metadata.getTableMetadata(SESSION, specialTableHandle); assertEquals(specialTableMetadata.getTable(), new SchemaTableName("exa_ple", "num_ers")); assertEquals(specialTableMetadata.getColumns(), ImmutableList.of( new ColumnMetadata("te_t", VARCHAR, false, null, null, false, emptyMap()), // primary key is not null in H2 new ColumnMetadata("va%ue", BIGINT))); // unknown tables should produce null unknownTableMetadata(new JdbcTableHandle(CONNECTOR_ID, new SchemaTableName("u", "numbers"), null, "unknown", "unknown")); unknownTableMetadata(new JdbcTableHandle(CONNECTOR_ID, new SchemaTableName("example", "numbers"), null, "example", "unknown")); unknownTableMetadata(new JdbcTableHandle(CONNECTOR_ID, new SchemaTableName("example", "numbers"), null, "unknown", "numbers")); }
@Override public String getRomOAID() { String oaid = null; try { Intent intent = new Intent(); intent.setComponent(new ComponentName("com.coolpad.deviceidsupport", "com.coolpad.deviceidsupport.DeviceIdService")); if (context.bindService(intent, service, Context.BIND_AUTO_CREATE)) { CoolpadInterface anInterface = new CoolpadInterface(OAIDService.BINDER_QUEUE.take()); oaid = anInterface.getOAID(context.getPackageName()); context.unbindService(service); } } catch (Throwable throwable) { SALog.i(TAG, throwable); } return oaid; }
@Test public void getRomOAID() { CoolpadImpl coolpad = new CoolpadImpl(mApplication); // if(coolpad.isSupported()) { // Assert.assertNull(coolpad.getRomOAID()); // } }
@Override public DataTableType dataTableType() { return dataTableType; }
@Test void target_type_must_class_type() throws NoSuchMethodException { Method method = JavaDataTableTypeDefinitionTest.class.getMethod("converts_datatable_to_optional_string", DataTable.class); JavaDataTableTypeDefinition definition = new JavaDataTableTypeDefinition(method, lookup, new String[0]); assertThat(definition.dataTableType().transform(dataTable.cells()), is(Optional.of("converts_datatable_to_optional_string"))); }
public synchronized ListenableFuture<?> waitForMinimumWorkers() { if (currentWorkerCount >= workerMinCount) { return immediateFuture(null); } SettableFuture<?> future = SettableFuture.create(); workerSizeFutures.add(future); // if future does not finish in wait period, complete with an exception ScheduledFuture<?> timeoutTask = executor.schedule( () -> { synchronized (this) { future.setException(new PrestoException( GENERIC_INSUFFICIENT_RESOURCES, format("Insufficient active worker nodes. Waited %s for at least %s workers, but only %s workers are active", executionMaxWait, workerMinCount, currentWorkerCount))); } }, executionMaxWait.toMillis(), MILLISECONDS); // remove future if finished (e.g., canceled, timed out) future.addListener(() -> { timeoutTask.cancel(true); removeWorkerFuture(future); }, executor); return future; }
@Test(timeOut = 60_000) public void testWaitForMinimumWorkers() throws InterruptedException { ListenableFuture<?> workersFuture = waitForMinimumWorkers(); for (int i = numWorkers.get() + 1; i < DESIRED_WORKER_COUNT - 1; i++) { assertFalse(workersTimeout.get()); addWorker(nodeManager); } assertFalse(monitor.hasRequiredWorkers()); assertFalse(workersTimeout.get()); assertEquals(minWorkersLatch.getCount(), 1); addWorker(nodeManager); minWorkersLatch.await(1, SECONDS); assertTrue(workersFuture.isDone()); assertFalse(workersTimeout.get()); assertTrue(monitor.hasRequiredWorkers()); }
@Override public synchronized void close() { super.close(); notifyAll(); }
@Test public void testMetadataEquivalentResponsesBackoff() throws InterruptedException { long time = 0; metadata.updateWithCurrentRequestVersion(responseWithCurrentTopics(), false, time); assertTrue(metadata.timeToNextUpdate(time) > 0, "No update needed"); metadata.requestUpdate(false); assertTrue(metadata.timeToNextUpdate(time) > 0, "Still no update needed due to backoff"); time += (long) (refreshBackoffMs * (1 + CommonClientConfigs.RETRY_BACKOFF_JITTER)); metadata.updateWithCurrentRequestVersion(responseWithCurrentTopics(), false, time); assertTrue(metadata.timeToNextUpdate(time) > 0, "No update needed after equivalent metadata response"); metadata.requestUpdate(false); assertTrue(metadata.timeToNextUpdate(time) > 0, "Still no update needed due to backoff"); assertTrue(metadata.timeToNextUpdate(time + refreshBackoffMs) > 0, "Still no updated needed due to exponential backoff"); time += (long) (refreshBackoffMs * CommonClientConfigs.RETRY_BACKOFF_EXP_BASE * (1 + CommonClientConfigs.RETRY_BACKOFF_JITTER)); assertEquals(0, metadata.timeToNextUpdate(time), "Update needed now that backoff time expired"); String topic = "my-topic"; metadata.close(); Thread t1 = asyncFetch(topic, 500); t1.join(); assertEquals(KafkaException.class, backgroundError.get().getClass()); assertTrue(backgroundError.get().toString().contains("Requested metadata update after close")); clearBackgroundError(); }
public void reloadAllNotes(AuthenticationInfo subject) throws IOException { this.noteManager.reloadNotes(); if (notebookRepo instanceof NotebookRepoSync) { NotebookRepoSync mainRepo = (NotebookRepoSync) notebookRepo; if (mainRepo.getRepoCount() > 1) { mainRepo.sync(subject); } } }
@Test void testReloadAllNotes() throws IOException { String note1Id = notebook.createNote("note1", AuthenticationInfo.ANONYMOUS); notebook.processNote(note1Id, note1 -> { Paragraph p1 = note1.insertNewParagraph(0, AuthenticationInfo.ANONYMOUS); p1.setText("%md hello world"); return null; }); String note2Id = notebook.cloneNote(note1Id, "copied note", AuthenticationInfo.ANONYMOUS); // load copied notebook on memory when reloadAllNotes() is called Note copiedNote = notebookRepo.get(note2Id, "/copied note", anonymous); notebook.reloadAllNotes(anonymous); List<NoteInfo> notesInfo = notebook.getNotesInfo(); assertEquals(2 , notesInfo.size()); NoteInfo found = null; for (NoteInfo noteInfo : notesInfo) { if (noteInfo.getId().equals(copiedNote.getId())) { found = noteInfo; break; } } assertNotNull(found); assertEquals(found.getId(), copiedNote.getId()); assertEquals(notebook.processNote(found.getId(), Note::getName), copiedNote.getName()); // format has make some changes due to // Notebook.convertFromSingleResultToMultipleResultsFormat notebook.processNote(found.getId(), note -> { assertEquals(note.getParagraphs().size(), copiedNote.getParagraphs().size()); assertEquals(note.getParagraphs().get(0).getText(), copiedNote.getParagraphs().get(0).getText()); assertEquals(note.getParagraphs().get(0).settings, copiedNote.getParagraphs().get(0).settings); assertEquals(note.getParagraphs().get(0).getTitle(), copiedNote.getParagraphs().get(0).getTitle()); return null; }); // delete notebook from notebook list when reloadAllNotes() is called notebook.reloadAllNotes(anonymous); notesInfo = notebook.getNotesInfo(); assertEquals(2, notesInfo.size()); }
@VisibleForTesting public Map<String, HashSet<String>> runTest(Set<String> inputList, Map<String, Long> sizes) { try { conf = msConf; testDatasizes = sizes; coverageList.clear(); removeNestedStructure(inputList); createOutputList(inputList, "test", "test"); } catch (Exception e) { LOG.error("MetaToolTask failed on ListExtTblLocs test: ", e); } return coverageList; }
@Test public void testGroupLocations() { Set<String> inputLocations = new TreeSet<>(); Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetaToolTaskListExtTblLocs.msConf = conf; MetaToolTaskListExtTblLocs task = new MetaToolTaskListExtTblLocs(); //Case 1: Multiple unpartitioned external tables, expected o/p: 1 location inputLocations.add("/warehouse/customLocation/t1"); inputLocations.add("/warehouse/customLocation/t2"); inputLocations.add("/warehouse/customLocation/t3"); Map<String, HashSet<String>> output = task.runTest(inputLocations, null); Assert.assertEquals(1, output.size()); String expectedOutput = "/warehouse/customLocation"; Assert.assertTrue(output.containsKey(expectedOutput)); HashSet<String> coveredLocs = output.get(expectedOutput); Assert.assertEquals(3, coveredLocs.size()); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/t1")); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/t2")); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/t3")); //Case 2 : inputs at multiple depths // inputs ../ext/b0 - contains 1 location // ../ext/p=0 - contains 1 location // ../ext/b1/b2/b3 - contains 3 locations (p1, p2, p3) // expected output : [../ext/b1/b2/b3 containing 3 elements, t1, p0] inputLocations.clear(); inputLocations.add("/warehouse/customLocation/ext/b0"); inputLocations.add("/warehouse/customLocation/ext/p=0"); inputLocations.add("/warehouse/customLocation/ext/b1/b2/b3/p=1"); inputLocations.add("/warehouse/customLocation/ext/b1/b2/b3/p=2"); inputLocations.add("/warehouse/customLocation/ext/b1/b2/b3/p=3"); output = task.runTest(inputLocations, null); Assert.assertEquals(3, output.size()); String expectedOutput1 = "/warehouse/customLocation/ext/b0"; Assert.assertTrue(output.containsKey(expectedOutput1)); coveredLocs = output.get(expectedOutput1); Assert.assertEquals(1, coveredLocs.size()); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b0")); String expectedOutput2 = "/warehouse/customLocation/ext/p=0"; Assert.assertTrue(output.containsKey(expectedOutput2)); coveredLocs = output.get(expectedOutput2); Assert.assertEquals(1, coveredLocs.size()); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/p=0")); String expectedOutput3 = "/warehouse/customLocation/ext/b1/b2/b3"; Assert.assertTrue(output.containsKey(expectedOutput3)); coveredLocs = output.get(expectedOutput3); Assert.assertEquals(3, coveredLocs.size()); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b1/b2/b3/p=1")); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b1/b2/b3/p=2")); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b1/b2/b3/p=3")); //Case 3 : root with a lot of leaves // inputs ../ext/ - contains 4 locations // ../ext/b1 - contains 3 locations // expected output : [../ext covering all locations] since root (ext) has more than half of locations inputLocations.clear(); inputLocations.add("/warehouse/customLocation/ext/p=0"); inputLocations.add("/warehouse/customLocation/ext/p=1"); inputLocations.add("/warehouse/customLocation/ext/p=2"); inputLocations.add("/warehouse/customLocation/ext/p=3"); inputLocations.add("/warehouse/customLocation/ext/b1/p=4"); inputLocations.add("/warehouse/customLocation/ext/b1/p=5"); inputLocations.add("/warehouse/customLocation/ext/b1/p=6"); output = task.runTest(inputLocations, null); Assert.assertEquals(1, output.size()); expectedOutput = "/warehouse/customLocation/ext"; Assert.assertTrue(output.containsKey(expectedOutput)); coveredLocs = output.get(expectedOutput); Assert.assertEquals(7, coveredLocs.size()); Assert.assertTrue(coveredLocs.containsAll(inputLocations)); //Case 4 : root with a lot of trivial locations (non leaf) // inputs ../ext/ - contains 4 trivial locations // ../ext/b1 - contains 3 locations // expected output : [../ext covering all locations] since non trivial (grouped) locations under ext is less than half inputLocations.clear(); inputLocations.add("/warehouse/customLocation/ext/dir01/dir02/p=0"); inputLocations.add("/warehouse/customLocation/ext/dir11/dir12/p=1"); inputLocations.add("/warehouse/customLocation/ext/dir21/dir22/p=2"); inputLocations.add("/warehouse/customLocation/ext/dir31/dir32/p=3"); inputLocations.add("/warehouse/customLocation/ext/b1/p=4"); inputLocations.add("/warehouse/customLocation/ext/b1/p=5"); inputLocations.add("/warehouse/customLocation/ext/b1/p=6"); output = task.runTest(inputLocations, null); Assert.assertEquals(1, output.size()); expectedOutput = "/warehouse/customLocation/ext"; Assert.assertTrue(output.containsKey(expectedOutput)); coveredLocs = output.get(expectedOutput); Assert.assertEquals(7, coveredLocs.size()); Assert.assertTrue(coveredLocs.containsAll(inputLocations)); //Case 5 : several grouped locations and 1 outlier at root // inputs ../ext/b0 - contains 4 locations // ../ext/b1 - contains 3 locations // expected output : [../ext/b0, ../ext/b1, p=7 ] inputLocations.clear(); inputLocations.add("/warehouse/customLocation/ext/b0/p=0"); inputLocations.add("/warehouse/customLocation/ext/b0/p=1"); inputLocations.add("/warehouse/customLocation/ext/b0/p=2"); inputLocations.add("/warehouse/customLocation/ext/b0/p=3"); inputLocations.add("/warehouse/customLocation/ext/b1/p=4"); inputLocations.add("/warehouse/customLocation/ext/b1/p=5"); inputLocations.add("/warehouse/customLocation/ext/b1/p=6"); inputLocations.add("/warehouse/customLocation/ext/p=7"); output = task.runTest(inputLocations, null); Assert.assertEquals(3, output.size()); expectedOutput1 = "/warehouse/customLocation/ext/b0"; Assert.assertTrue(output.containsKey(expectedOutput1)); coveredLocs = output.get(expectedOutput1); Assert.assertEquals(4, coveredLocs.size()); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b0/p=0")); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b0/p=1")); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b0/p=2")); expectedOutput2 = "/warehouse/customLocation/ext/b1"; Assert.assertTrue(output.containsKey(expectedOutput2)); coveredLocs = output.get(expectedOutput2); Assert.assertEquals(3, coveredLocs.size()); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b1/p=4")); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b1/p=5")); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b1/p=6")); expectedOutput3 = "/warehouse/customLocation/ext/p=7"; Assert.assertTrue(output.containsKey(expectedOutput3)); coveredLocs = output.get(expectedOutput3); Assert.assertEquals(1, coveredLocs.size()); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/p=7")); //Case 6 : inputs with nested structure // inputs ../ext/b0 - contains 4 locations // ../ext/b1 // ../ext/b1/b2 - contains 4 locations // expected output : [../ext/b0, ../ext/b1 ] : (no extra location for b2 since covered by b1 itself) inputLocations.clear(); inputLocations.add("/warehouse/customLocation/ext/b0/p=0"); inputLocations.add("/warehouse/customLocation/ext/b0/p=1"); inputLocations.add("/warehouse/customLocation/ext/b0/p=2"); inputLocations.add("/warehouse/customLocation/ext/b0/p=3"); inputLocations.add("/warehouse/customLocation/ext/b1"); inputLocations.add("/warehouse/customLocation/ext/b1/b2/p=7"); inputLocations.add("/warehouse/customLocation/ext/b1/b2/p=8"); inputLocations.add("/warehouse/customLocation/ext/b1/b2/p=9"); output = task.runTest(inputLocations, null); Assert.assertEquals(2, output.size()); expectedOutput1 = "/warehouse/customLocation/ext/b0"; Assert.assertTrue(output.containsKey(expectedOutput1)); coveredLocs = output.get(expectedOutput1); Assert.assertEquals(4, coveredLocs.size()); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b0/p=0")); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b0/p=1")); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b0/p=2")); expectedOutput2 = "/warehouse/customLocation/ext/b1"; Assert.assertTrue(output.containsKey(expectedOutput2)); coveredLocs = output.get(expectedOutput2); Assert.assertEquals(4, coveredLocs.size()); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b1")); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b1/b2/p=7")); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b1/b2/p=8")); Assert.assertTrue(coveredLocs.contains("/warehouse/customLocation/ext/b1/b2/p=9")); }
@Override public MapperResult findChangeConfigFetchRows(MapperContext context) { final String tenant = (String) context.getWhereParameter(FieldConstant.TENANT); final String dataId = (String) context.getWhereParameter(FieldConstant.DATA_ID); final String group = (String) context.getWhereParameter(FieldConstant.GROUP_ID); final String appName = (String) context.getWhereParameter(FieldConstant.APP_NAME); final Timestamp startTime = (Timestamp) context.getWhereParameter(FieldConstant.START_TIME); final Timestamp endTime = (Timestamp) context.getWhereParameter(FieldConstant.END_TIME); List<Object> paramList = new ArrayList<>(); final String sqlFetchRows = "SELECT id,data_id,group_id,tenant_id,app_name,content,type,md5,gmt_modified FROM" + " config_info WHERE "; String where = " 1=1 "; if (!StringUtils.isBlank(dataId)) { where += " AND data_id LIKE ? "; paramList.add(dataId); } if (!StringUtils.isBlank(group)) { where += " AND group_id LIKE ? "; paramList.add(group); } if (!StringUtils.isBlank(tenant)) { where += " AND tenant_id = ? "; paramList.add(tenant); } if (!StringUtils.isBlank(appName)) { where += " AND app_name = ? "; paramList.add(appName); } if (startTime != null) { where += " AND gmt_modified >=? "; paramList.add(startTime); } if (endTime != null) { where += " AND gmt_modified <=? "; paramList.add(endTime); } return new MapperResult( sqlFetchRows + where + " OFFSET " + context.getStartRow() + " ROWS FETCH NEXT " + context.getPageSize() + " ROWS ONLY", paramList); }
@Test void testFindChangeConfigFetchRows() { MapperResult mapperResult = configInfoMapperByDerby.findChangeConfigFetchRows(context); assertEquals(mapperResult.getSql(), "SELECT id,data_id,group_id,tenant_id,app_name,content,type,md5,gmt_modified FROM config_info " + "WHERE 1=1 AND app_name = ? AND gmt_modified >=? AND gmt_modified <=? OFFSET " + startRow + " ROWS FETCH NEXT " + pageSize + " ROWS ONLY"); assertArrayEquals(new Object[] {appName, startTime, endTime}, mapperResult.getParamList().toArray()); }
@Override public void deleteArticleCategory(Long id) { // 校验存在 validateArticleCategoryExists(id); // 校验是不是存在关联文章 Long count = articleService.getArticleCountByCategoryId(id); if (count > 0) { throw exception(ARTICLE_CATEGORY_DELETE_FAIL_HAVE_ARTICLES); } // 删除 articleCategoryMapper.deleteById(id); }
@Test public void testDeleteArticleCategory_success() { // mock 数据 ArticleCategoryDO dbArticleCategory = randomPojo(ArticleCategoryDO.class); articleCategoryMapper.insert(dbArticleCategory);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbArticleCategory.getId(); // 调用 articleCategoryService.deleteArticleCategory(id); // 校验数据不存在了 assertNull(articleCategoryMapper.selectById(id)); }
synchronized void updateServiceStatuses(List<VespaService> services) { try { setStatus(services); } catch (Exception e) { log.log(Level.SEVERE, "Unable to update service pids from sentinel", e); } }
@Test public void testElastic() { String response = "container state=RUNNING mode=AUTO pid=14338 exitstatus=0 autostart=TRUE autorestart=TRUE id=\"get/container.0\"\n" + "container-clustercontroller state=RUNNING mode=AUTO pid=25020 exitstatus=0 autostart=TRUE autorestart=TRUE id=\"admin/cluster-controllers/0\"\n" + "distributor state=RUNNING mode=AUTO pid=25024 exitstatus=0 autostart=TRUE autorestart=TRUE id=\"search/distributor/0\"\n" + "docprocservice state=RUNNING mode=AUTO pid=11973 exitstatus=0 autostart=TRUE autorestart=TRUE id=\"docproc/cluster.search.indexing/0\"\n" + "logd state=RUNNING mode=AUTO pid=25016 exitstatus=0 autostart=TRUE autorestart=TRUE id=\"hosts/vespa19.dev.gq1.yahoo.com/logd\"\n" + "logserver state=RUNNING mode=AUTO pid=25018 exitstatus=0 autostart=TRUE autorestart=TRUE id=\"admin/logserver\"\n" + "metricsproxy state=RUNNING mode=AUTO pid=13107 exitstatus=0 autostart=TRUE autorestart=TRUE id=\"hosts/vespa19.dev.gq1.yahoo.com/metricsproxy\"\n" + "searchnode state=RUNNING mode=AUTO pid=25023 exitstatus=0 autostart=TRUE autorestart=TRUE id=\"search/search/cluster.search/0\"\n" + "slobrok state=RUNNING mode=AUTO pid=25019 exitstatus=0 autostart=TRUE autorestart=TRUE id=\"client\"\n" + "\n"; ConfigSentinelDummy configsentinel = new ConfigSentinelDummy(response); List<VespaService> services = new ArrayList<>(); VespaService container = VespaService.create("container", "get/container.0", -1); VespaService containerClusterController = VespaService.create("container-clustercontroller", "get/container.0", -1); VespaService notPresent = VespaService.create("dummy","fake", -1); services.add(container); services.add(containerClusterController); services.add(notPresent); try (MockConfigSentinelClient client = new MockConfigSentinelClient(configsentinel)) { client.updateServiceStatuses(services); assertTrue(container.isAlive()); assertEquals(14338, container.getPid()); assertEquals("RUNNING", container.getState()); assertTrue(containerClusterController.isAlive()); assertEquals(25020, containerClusterController.getPid()); assertEquals("RUNNING", containerClusterController.getState()); } }
public static String identifyDriver(String nameContainsProductInfo) { return DialectFactory.identifyDriver(nameContainsProductInfo); }
@Test public void identifyDriverTest(){ String url = "jdbc:h2:file:./db/test;AUTO_SERVER=TRUE;DB_CLOSE_ON_EXIT=FALSE;MODE=MYSQL"; String driver = DriverUtil.identifyDriver(url); // driver 返回 mysql 的 driver assertEquals("org.h2.Driver", driver); }
public ProcessWrapper create(@Nullable Path baseDir, Consumer<String> stdOutLineConsumer, String... command) { return new ProcessWrapper(baseDir, stdOutLineConsumer, Map.of(), command); }
@Test public void should_log_error_output_in_debug_mode() throws IOException { logTester.setLevel(Level.DEBUG); var root = temp.newFolder().toPath(); var processWrapper = underTest.create(root, v -> {}, Map.of("LANG", "en_US"), "git", "blame"); assertThatThrownBy(processWrapper::execute) .isInstanceOf(IllegalStateException.class); assertThat(logTester.logs(Level.DEBUG).get(0)).startsWith("fatal:"); }
public void onLoaded() { groups.forEach((groupId, group) -> { switch (group.type()) { case CONSUMER: ConsumerGroup consumerGroup = (ConsumerGroup) group; log.info("Loaded consumer group {} with {} members.", groupId, consumerGroup.members().size()); consumerGroup.members().forEach((memberId, member) -> { log.debug("Loaded member {} in consumer group {}.", memberId, groupId); scheduleConsumerGroupSessionTimeout(groupId, memberId); if (member.state() == MemberState.UNREVOKED_PARTITIONS) { scheduleConsumerGroupRebalanceTimeout( groupId, member.memberId(), member.memberEpoch(), member.rebalanceTimeoutMs() ); } }); break; case CLASSIC: ClassicGroup classicGroup = (ClassicGroup) group; log.info("Loaded classic group {} with {} members.", groupId, classicGroup.allMembers().size()); classicGroup.allMembers().forEach(member -> { log.debug("Loaded member {} in classic group {}.", member.memberId(), groupId); rescheduleClassicGroupMemberHeartbeat(classicGroup, member); }); if (classicGroup.numMembers() > classicGroupMaxSize) { // In case the max size config has changed. prepareRebalance(classicGroup, "Freshly-loaded group " + groupId + " (size " + classicGroup.numMembers() + ") is over capacity " + classicGroupMaxSize + ". Rebalancing in order to give a chance for consumers to commit offsets"); } break; case SHARE: // Nothing for now for the ShareGroup, as no members are persisted. break; default: log.warn("Loaded group {} with an unknown group type {}.", groupId, group.type()); break; } }); }
@Test public void testOnLoaded() { Uuid fooTopicId = Uuid.randomUuid(); String fooTopicName = "foo"; Uuid barTopicId = Uuid.randomUuid(); String barTopicName = "bar"; GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .withConsumerGroupAssignors(Collections.singletonList(new MockPartitionAssignor("range"))) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) .build()) .withConsumerGroup(new ConsumerGroupBuilder("foo", 10) .withMember(new ConsumerGroupMember.Builder("foo-1") .setState(MemberState.UNREVOKED_PARTITIONS) .setMemberEpoch(9) .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setSubscribedTopicNames(Collections.singletonList("foo")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2))) .setPartitionsPendingRevocation(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5))) .build()) .withMember(new ConsumerGroupMember.Builder("foo-2") .setState(MemberState.STABLE) .setMemberEpoch(10) .setPreviousMemberEpoch(10) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setSubscribedTopicNames(Collections.singletonList("foo")) .setServerAssignorName("range") .build()) .withAssignment("foo-1", mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5))) .withAssignmentEpoch(10)) .build(); // Let's assume that all the records have been replayed and now // onLoaded is called to signal it. context.groupMetadataManager.onLoaded(); // All members should have a session timeout in place. assertNotNull(context.timer.timeout(groupSessionTimeoutKey("foo", "foo-1"))); assertNotNull(context.timer.timeout(groupSessionTimeoutKey("foo", "foo-2"))); // foo-1 should also have a revocation timeout in place. assertNotNull(context.timer.timeout(consumerGroupRebalanceTimeoutKey("foo", "foo-1"))); }
@Override public DataSink createDataSink(Context context) { FactoryHelper.createFactoryHelper(this, context) .validateExcept(PREFIX_TABLE_PROPERTIES, PREFIX_CATALOG_PROPERTIES); Map<String, String> allOptions = context.getFactoryConfiguration().toMap(); Map<String, String> catalogOptions = new HashMap<>(); Map<String, String> tableOptions = new HashMap<>(); allOptions.forEach( (key, value) -> { if (key.startsWith(PREFIX_TABLE_PROPERTIES)) { tableOptions.put(key.substring(PREFIX_TABLE_PROPERTIES.length()), value); } else if (key.startsWith(PaimonDataSinkOptions.PREFIX_CATALOG_PROPERTIES)) { catalogOptions.put( key.substring( PaimonDataSinkOptions.PREFIX_CATALOG_PROPERTIES.length()), value); } }); Options options = Options.fromMap(catalogOptions); try (Catalog catalog = FlinkCatalogFactory.createPaimonCatalog(options)) { Preconditions.checkNotNull( catalog.listDatabases(), "catalog option of Paimon is invalid."); } catch (Exception e) { throw new RuntimeException("failed to create or use paimon catalog", e); } ZoneId zoneId = ZoneId.systemDefault(); if (!Objects.equals( context.getPipelineConfiguration().get(PipelineOptions.PIPELINE_LOCAL_TIME_ZONE), PipelineOptions.PIPELINE_LOCAL_TIME_ZONE.defaultValue())) { zoneId = ZoneId.of( context.getPipelineConfiguration() .get(PipelineOptions.PIPELINE_LOCAL_TIME_ZONE)); } String commitUser = context.getFactoryConfiguration().get(PaimonDataSinkOptions.COMMIT_USER); String partitionKey = context.getFactoryConfiguration().get(PaimonDataSinkOptions.PARTITION_KEY); Map<TableId, List<String>> partitionMaps = new HashMap<>(); if (!partitionKey.isEmpty()) { for (String tables : partitionKey.split(";")) { String[] splits = tables.split(":"); if (splits.length == 2) { TableId tableId = TableId.parse(splits[0]); List<String> partitions = Arrays.asList(splits[1].split(",")); partitionMaps.put(tableId, partitions); } else { throw new IllegalArgumentException( PaimonDataSinkOptions.PARTITION_KEY.key() + " is malformed, please refer to the documents"); } } } PaimonRecordSerializer<Event> serializer = new PaimonRecordEventSerializer(zoneId); String schemaOperatorUid = context.getPipelineConfiguration() .get(PipelineOptions.PIPELINE_SCHEMA_OPERATOR_UID); return new PaimonDataSink( options, tableOptions, commitUser, partitionMaps, serializer, zoneId, schemaOperatorUid); }
@Test void testPrefixRequireOption() { DataSinkFactory sinkFactory = FactoryDiscoveryUtils.getFactoryByIdentifier("paimon", DataSinkFactory.class); Assertions.assertThat(sinkFactory).isInstanceOf(PaimonDataSinkFactory.class); Configuration conf = Configuration.fromMap( ImmutableMap.<String, String>builder() .put(PaimonDataSinkOptions.METASTORE.key(), "filesystem") .put( PaimonDataSinkOptions.WAREHOUSE.key(), new File( temporaryFolder.toFile(), UUID.randomUUID().toString()) .toString()) .put("catalog.properties.uri", "") .put("table.properties.bucket", "2") .build()); DataSink dataSink = sinkFactory.createDataSink( new FactoryHelper.DefaultContext( conf, conf, Thread.currentThread().getContextClassLoader())); Assertions.assertThat(dataSink).isInstanceOf(PaimonDataSink.class); }
public void complete(T value) { try { if (value instanceof RuntimeException) throw new IllegalArgumentException("The argument to complete can not be an instance of RuntimeException"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, value)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireSuccess(); } finally { completedLatch.countDown(); } }
@Test public void invokeCompleteAfterAlreadyComplete() { RequestFuture<Void> future = new RequestFuture<>(); future.complete(null); assertThrows(IllegalStateException.class, () -> future.complete(null)); }
@Override public List<RedisClientInfo> getClientList(RedisClusterNode node) { RedisClient entry = getEntry(node); RFuture<List<String>> f = executorService.readAsync(entry, StringCodec.INSTANCE, RedisCommands.CLIENT_LIST); List<String> list = syncFuture(f); return CONVERTER.convert(list.toArray(new String[list.size()])); }
@Test public void testGetClientList() { RedisClusterNode master = getFirstMaster(); List<RedisClientInfo> list = connection.getClientList(master); assertThat(list.size()).isGreaterThan(10); }
@Override public T deserialize(final String topic, final byte[] bytes) { try { if (bytes == null) { return null; } // don't use the JsonSchemaConverter to read this data because // we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS, // which is not currently available in the standard converters final JsonNode value = isJsonSchema ? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class) : MAPPER.readTree(bytes); final Object coerced = enforceFieldType( "$", new JsonValueContext(value, schema) ); if (LOG.isTraceEnabled()) { LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced); } return SerdeUtils.castToTargetType(coerced, targetType); } catch (final Exception e) { // Clear location in order to avoid logging data, for security reasons if (e instanceof JsonParseException) { ((JsonParseException) e).clearLocation(); } throw new SerializationException( "Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e); } }
@Test public void shouldTreatNullAsNull() { // Given: final HashMap<String, Object> mapValue = new HashMap<>(); mapValue.put("a", 1.0); mapValue.put("b", null); final Map<String, Object> row = new HashMap<>(); row.put("ordertime", null); row.put("@orderid", null); row.put("itemid", null); row.put("orderunits", null); row.put("arrayCol", new Double[]{0.0, null}); row.put("mapCol", mapValue); row.put("timefield", null); row.put("datefield", null); row.put("timestampfield", null); row.put("bytesfield", null); final byte[] bytes = serializeJson(row); // When: final Struct result = deserializer.deserialize(SOME_TOPIC, bytes); // Then: assertThat(result, is(expectedOrder .put(ORDERTIME, null) .put(ORDERID, null) .put(ITEMID, null) .put(ORDERUNITS, null) .put(ARRAYCOL, Arrays.asList(0.0, null)) .put(MAPCOL, mapValue) .put(CASE_SENSITIVE_FIELD, null) .put(TIMEFIELD, null) .put(DATEFIELD, null) .put(TIMESTAMPFIELD, null) .put(BYTESFIELD, null) )); }
public static MapBuilder<Schema> map() { return builder().map(); }
@Test void map() { Schema intSchema = Schema.create(Schema.Type.INT); Schema expected = Schema.createMap(intSchema); Schema schema1 = SchemaBuilder.map().values().intType(); assertEquals(expected, schema1); Schema schema2 = SchemaBuilder.map().values(intSchema); assertEquals(expected, schema2); Schema schema3 = SchemaBuilder.map().prop("p", "v").values().type("int"); expected.addProp("p", "v"); assertEquals(expected, schema3); }
public boolean poll(Timer timer, boolean waitForJoinGroup) { maybeUpdateSubscriptionMetadata(); invokeCompletedOffsetCommitCallbacks(); if (subscriptions.hasAutoAssignedPartitions()) { if (protocol == null) { throw new IllegalStateException("User configured " + ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG + " to empty while trying to subscribe for group protocol to auto assign partitions"); } // Always update the heartbeat last poll time so that the heartbeat thread does not leave the // group proactively due to application inactivity even if (say) the coordinator cannot be found. pollHeartbeat(timer.currentTimeMs()); if (coordinatorUnknownAndUnreadySync(timer)) { return false; } if (rejoinNeededOrPending()) { // due to a race condition between the initial metadata fetch and the initial rebalance, // we need to ensure that the metadata is fresh before joining initially. This ensures // that we have matched the pattern against the cluster's topics at least once before joining. if (subscriptions.hasPatternSubscription()) { // For consumer group that uses pattern-based subscription, after a topic is created, // any consumer that discovers the topic after metadata refresh can trigger rebalance // across the entire consumer group. Multiple rebalances can be triggered after one topic // creation if consumers refresh metadata at vastly different times. We can significantly // reduce the number of rebalances caused by single topic creation by asking consumer to // refresh metadata before re-joining the group as long as the refresh backoff time has // passed. if (this.metadata.timeToAllowUpdate(timer.currentTimeMs()) == 0) { this.metadata.requestUpdate(true); } if (!client.ensureFreshMetadata(timer)) { return false; } maybeUpdateSubscriptionMetadata(); } // if not wait for join group, we would just use a timer of 0 if (!ensureActiveGroup(waitForJoinGroup ? timer : time.timer(0L))) { // since we may use a different timer in the callee, we'd still need // to update the original timer's current time after the call timer.update(time.milliseconds()); return false; } } } else { // For manually assigned partitions, we do not try to pro-actively lookup coordinator; // instead we only try to refresh metadata when necessary. // If connections to all nodes fail, wakeups triggered while attempting to send fetch // requests result in polls returning immediately, causing a tight loop of polls. Without // the wakeup, poll() with no channels would block for the timeout, delaying re-connection. // awaitMetadataUpdate() in ensureCoordinatorReady initiates new connections with configured backoff and avoids the busy loop. if (metadata.updateRequested() && !client.hasReadyNodes(timer.currentTimeMs())) { client.awaitMetadataUpdate(timer); } // if there is pending coordinator requests, ensure they have a chance to be transmitted. client.pollNoWakeup(); } maybeAutoCommitOffsetsAsync(timer.currentTimeMs()); return true; }
@Test public void testNormalHeartbeat() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); // normal heartbeat time.sleep(sessionTimeoutMs); RequestFuture<Void> future = coordinator.sendHeartbeatRequest(); // should send out the heartbeat assertEquals(1, consumerClient.pendingRequestCount()); assertFalse(future.isDone()); client.prepareResponse(heartbeatResponse(Errors.NONE)); consumerClient.poll(time.timer(0)); assertTrue(future.isDone()); assertTrue(future.succeeded()); }
Optional<DescriptorDigest> select(DescriptorDigest selector) throws CacheCorruptedException, IOException { Path selectorFile = cacheStorageFiles.getSelectorFile(selector); if (!Files.exists(selectorFile)) { return Optional.empty(); } String selectorFileContents = new String(Files.readAllBytes(selectorFile), StandardCharsets.UTF_8); try { return Optional.of(DescriptorDigest.fromHash(selectorFileContents)); } catch (DigestException ex) { throw new CacheCorruptedException( cacheStorageFiles.getCacheDirectory(), "Expected valid layer digest as contents of selector file `" + selectorFile + "` for selector `" + selector.getHash() + "`, but got: " + selectorFileContents); } }
@Test public void testSelect() throws IOException, CacheCorruptedException { DescriptorDigest selector = layerDigest1; Path selectorFile = cacheStorageFiles.getSelectorFile(selector); Files.createDirectories(selectorFile.getParent()); Files.write(selectorFile, layerDigest2.getHash().getBytes(StandardCharsets.UTF_8)); Optional<DescriptorDigest> selectedLayerDigest = cacheStorageReader.select(selector); Assert.assertTrue(selectedLayerDigest.isPresent()); Assert.assertEquals(layerDigest2, selectedLayerDigest.get()); }
public Column getColumn(String value) { Matcher m = PATTERN.matcher(value); if (!m.matches()) { throw new IllegalArgumentException("value " + value + " is not a valid column definition"); } String name = m.group(1); String type = m.group(6); type = type == null ? "String" : type; boolean array = (m.group(4) != null) || (m.group(7) != null); if (array) { return new ArrayColumn(name, createColumn(name, type)); } return createColumn(name, type); }
@Test public void testGetColumn() { ColumnFactory f = new ColumnFactory(); Column column = f.getColumn("column"); assertThat(column instanceof StringColumn).isTrue(); assertThat(column.getName()).isEqualTo("column"); }
@Override public T computeIfAbsent(String key, Function<String, T> supplier) { return cache.compute( key, (String k, T current) -> { if (isValidLongEnough(current)) { return current; } return supplier.apply(key); }); }
@Test void fetchesRightOne() { var ttl = Duration.ofSeconds(10); var sut = new InMemoryCacheImpl<CacheEntry>(Clock.fixed(NOW, ZoneId.of("UTC")), ttl); var source = IntStream.range(0, 10) .mapToObj(i -> CacheEntry.of(Integer.toString(i), NOW.plusSeconds(60))) .collect(Collectors.toMap(CacheEntry::value, Function.identity())); var rand = new Random(42L); for (int i = 0; i < 30; i++) { var key = Integer.toString(rand.nextInt(source.size())); // when var r = sut.computeIfAbsent(key, source::get); // then assertEquals(key, r.value()); } }
public StreamDestinationFilterRuleDTO createForStream(String streamId, StreamDestinationFilterRuleDTO dto) { if (!isBlank(dto.id())) { throw new IllegalArgumentException("id must be blank"); } // We don't want to allow the creation of a filter rule for a different stream, so we enforce the stream ID. final var dtoId = insertedId(collection.insertOne(dto.withStream(streamId))); clusterEventBus.post(StreamDestinationFilterUpdatedEvent.of(dtoId.toHexString())); return utils.getById(dtoId) .orElseThrow(() -> new IllegalArgumentException(f("Couldn't insert document: %s", dto))); }
@Test void createForStreamWithExistingID() { final StreamDestinationFilterRuleDTO dto = StreamDestinationFilterRuleDTO.builder() .id("54e3deadbeefdeadbeef0000") .title("Test") .description("A Test") .streamId("stream-1") .destinationType("indexer") .status(StreamDestinationFilterRuleDTO.Status.DISABLED) .rule(RuleBuilder.builder() .operator(RuleBuilderStep.Operator.AND) .conditions(List.of( RuleBuilderStep.builder() .function("has_field") .parameters(Map.of("field", "is_debug")) .build() )) .build()) .build(); assertThatThrownBy(() -> service.createForStream("stream-1", dto)).isInstanceOf(IllegalArgumentException.class); }
@ExecuteOn(TaskExecutors.IO) @Delete(uri = "{key}") @Operation(tags = {"KV"}, summary = "Delete a key-value pair") public boolean delete( @Parameter(description = "The namespace id") @PathVariable String namespace, @Parameter(description = "The key") @PathVariable String key ) throws IOException, URISyntaxException, ResourceExpiredException { return kvStore(namespace).delete(key); }
@Test void delete() throws IOException { storageInterface.put( null, toKVUri(NAMESPACE, "my-key"), new StorageObject( Map.of("expirationDate", Instant.now().plus(Duration.ofMinutes(5)).toString()), new ByteArrayInputStream("\"content\"".getBytes()) ) ); assertThat(storageInterface.exists(null, toKVUri(NAMESPACE, "my-key")), is(true)); client.toBlocking().exchange(HttpRequest.DELETE("/api/v1/namespaces/" + NAMESPACE + "/kv/my-key")); assertThat(storageInterface.exists(null, toKVUri(NAMESPACE, "my-key")), is(false)); }
public Serde<GenericKey> buildKeySerde( final FormatInfo format, final PhysicalSchema schema, final QueryContext queryContext ) { final String loggerNamePrefix = QueryLoggerUtil.queryLoggerName(queryId, queryContext); schemas.trackKeySerdeCreation( loggerNamePrefix, schema.logicalSchema(), KeyFormat.nonWindowed(format, schema.keySchema().features()) ); return keySerdeFactory.create( format, schema.keySchema(), ksqlConfig, serviceContext.getSchemaRegistryClientFactory(), loggerNamePrefix, processingLogContext, getSerdeTracker(loggerNamePrefix) ); }
@Test public void shouldBuildNonWindowedKeySerde() { // Then: runtimeBuildContext.buildKeySerde( FORMAT_INFO, PHYSICAL_SCHEMA, queryContext ); // Then: verify(keySerdeFactory).create( FORMAT_INFO, PHYSICAL_SCHEMA.keySchema(), ksqlConfig, srClientFactory, QueryLoggerUtil.queryLoggerName(QUERY_ID, queryContext), processingLogContext, Optional.empty() ); }
public static InetSocketAddress parseAddress(String address, int defaultPort) { return parseAddress(address, defaultPort, false); }
@Test void shouldParseAddressForIPv6WithoutBrackets() { InetSocketAddress socketAddress = AddressUtils.parseAddress("1abc:2abc:3abc:0:0:0:5abc:6abc", 80); assertThat(socketAddress.isUnresolved()).isFalse(); assertThat(socketAddress.getAddress().getHostAddress()).isEqualTo("1abc:2abc:3abc:0:0:0:5abc:6abc"); assertThat(socketAddress.getPort()).isEqualTo(80); assertThat(socketAddress.getHostString()).isEqualTo("1abc:2abc:3abc:0:0:0:5abc:6abc"); }
public int runInteractively() { displayWelcomeMessage(); RemoteServerSpecificCommand.validateClient(terminal.writer(), restClient); boolean eof = false; while (!eof) { try { handleLine(nextNonCliCommand()); } catch (final EndOfFileException exception) { // EOF is fine, just terminate the REPL terminal.writer().println("Exiting ksqlDB."); eof = true; } catch (final Exception exception) { LOGGER.error("An error occurred while running a command. Error = " + exception.getMessage(), exception); terminal.printError(ErrorMessageUtil.buildErrorMessage(exception), exception.toString()); } terminal.flush(); } return NO_ERROR; }
@Test public void shouldPrintErrorOnUnsupportedAPI() throws Exception { givenRunInteractivelyWillExit(); final KsqlRestClient mockRestClient = givenMockRestClient(); when(mockRestClient.getServerInfo()).thenReturn( RestResponse.erroneous( NOT_ACCEPTABLE.code(), new KsqlErrorMessage( Errors.toErrorCode(NOT_ACCEPTABLE.code()), "Minimum supported client version: 1.0") )); new Cli(1L, 1L, mockRestClient, console) .runInteractively(); assertThat( terminal.getOutputString(), containsString("This CLI version no longer supported")); assertThat( terminal.getOutputString(), containsString("Minimum supported client version: 1.0")); }
public OffsetPosition entry(int n) { return maybeLock(lock, () -> { if (n >= entries()) throw new IllegalArgumentException("Attempt to fetch the " + n + "th entry from index " + file().getAbsolutePath() + ", which has size " + entries()); return parseEntry(mmap(), n); }); }
@Test public void testEntryOverflow() { assertThrows(IllegalArgumentException.class, () -> index.entry(0)); }
@Override public KsMaterializedQueryResult<Row> get( final GenericKey key, final int partition, final Optional<Position> position ) { try { final KeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = KeyQuery.withKey(key); StateQueryRequest<ValueAndTimestamp<GenericRow>> request = inStore(stateStore.getStateStoreName()) .withQuery(query) .withPartitions(ImmutableSet.of(partition)); if (position.isPresent()) { request = request.withPositionBound(PositionBound.at(position.get())); } final StateQueryResult<ValueAndTimestamp<GenericRow>> result = stateStore.getKafkaStreams().query(request); final QueryResult<ValueAndTimestamp<GenericRow>> queryResult = result.getPartitionResults().get(partition); // Some of these failures are retriable, and in the future, we may want to retry // locally before throwing. if (queryResult.isFailure()) { throw failedQueryException(queryResult); } else if (queryResult.getResult() == null) { return KsMaterializedQueryResult.rowIteratorWithPosition( Collections.emptyIterator(), queryResult.getPosition()); } else { final ValueAndTimestamp<GenericRow> row = queryResult.getResult(); return KsMaterializedQueryResult.rowIteratorWithPosition( ImmutableList.of(Row.of(stateStore.schema(), key, row.value(), row.timestamp())) .iterator(), queryResult.getPosition()); } } catch (final NotUpToBoundException | MaterializationException e) { throw e; } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldThrowIfQueryFails() { // Given: when(stateStore.getKafkaStreams().query(any())).thenThrow(new MaterializationTimeOutException("Boom")); // When: final Exception e = assertThrows( MaterializationException.class, () -> table.get(A_KEY, PARTITION) ); // Then: assertThat(e.getMessage(), containsString("Boom")); assertThat(e, (instanceOf(MaterializationTimeOutException.class))); }
public boolean authenticate(LDAPConnection connection, String bindDn, EncryptedValue password) throws LDAPException { checkArgument(!isNullOrEmpty(bindDn), "Binding with empty principal is forbidden."); checkArgument(password != null, "Binding with null credentials is forbidden."); checkArgument(password.isSet(), "Binding with empty credentials is forbidden."); final SimpleBindRequest bindRequest = new SimpleBindRequest(bindDn, encryptedValueService.decrypt(password)); LOG.trace("Re-binding with DN <{}> using password", bindDn); try { final BindResult bind = connection.bind(bindRequest); if (!bind.getResultCode().equals(ResultCode.SUCCESS)) { LOG.trace("Re-binding DN <{}> failed", bindDn); throw new RuntimeException(bind.toString()); } final boolean authenticated = connection.getLastBindRequest().equals(bindRequest); LOG.trace("Binding DN <{}> did not throw, connection authenticated: {}", bindDn, authenticated); return authenticated; } catch (LDAPBindException e) { LOG.trace("Re-binding DN <{}> failed", bindDn); return false; } }
@Test public void authenticateThrowsIllegalArgumentExceptionIfPrincipalIsEmpty() throws LDAPException { assertThatThrownBy(() -> connector.authenticate(connection, "", encryptedValueService.encrypt("secret"))) .hasMessageContaining("Binding with empty principal is forbidden.") .isInstanceOf(IllegalArgumentException.class); }
@Override @SuppressWarnings("UseOfSystemOutOrSystemErr") public void run(Namespace namespace, Liquibase liquibase) throws Exception { final Set<Class<? extends DatabaseObject>> compareTypes = new HashSet<>(); if (isTrue(namespace.getBoolean("columns"))) { compareTypes.add(Column.class); } if (isTrue(namespace.getBoolean("data"))) { compareTypes.add(Data.class); } if (isTrue(namespace.getBoolean("foreign-keys"))) { compareTypes.add(ForeignKey.class); } if (isTrue(namespace.getBoolean("indexes"))) { compareTypes.add(Index.class); } if (isTrue(namespace.getBoolean("primary-keys"))) { compareTypes.add(PrimaryKey.class); } if (isTrue(namespace.getBoolean("sequences"))) { compareTypes.add(Sequence.class); } if (isTrue(namespace.getBoolean("tables"))) { compareTypes.add(Table.class); } if (isTrue(namespace.getBoolean("unique-constraints"))) { compareTypes.add(UniqueConstraint.class); } if (isTrue(namespace.getBoolean("views"))) { compareTypes.add(View.class); } final DiffToChangeLog diffToChangeLog = new DiffToChangeLog(new DiffOutputControl()); final Database database = liquibase.getDatabase(); final String filename = namespace.getString("output"); if (filename != null) { try (PrintStream file = new PrintStream(filename, StandardCharsets.UTF_8.name())) { generateChangeLog(database, database.getDefaultSchema(), diffToChangeLog, file, compareTypes); } } else { generateChangeLog(database, database.getDefaultSchema(), diffToChangeLog, outputStream, compareTypes); } }
@Test void testWriteToFile() throws Exception { final File file = File.createTempFile("migration", ".xml"); dumpCommand.run(null, new Namespace(Collections.singletonMap("output", file.getAbsolutePath())), existedDbConf); // Check that file is exist, and has some XML content (no reason to make a full-blown XML assertion) assertThat(file).content(UTF_8) .startsWith("<?xml version=\"1.1\" encoding=\"UTF-8\" standalone=\"no\"?>"); }
static CatalogLoader createCatalogLoader( String name, Map<String, String> properties, Configuration hadoopConf) { String catalogImpl = properties.get(CatalogProperties.CATALOG_IMPL); if (catalogImpl != null) { String catalogType = properties.get(ICEBERG_CATALOG_TYPE); Preconditions.checkArgument( catalogType == null, "Cannot create catalog %s, both catalog-type and catalog-impl are set: catalog-type=%s, catalog-impl=%s", name, catalogType, catalogImpl); return CatalogLoader.custom(name, properties, hadoopConf, catalogImpl); } String catalogType = properties.getOrDefault(ICEBERG_CATALOG_TYPE, ICEBERG_CATALOG_TYPE_HIVE); switch (catalogType.toLowerCase(Locale.ENGLISH)) { case ICEBERG_CATALOG_TYPE_HIVE: // The values of properties 'uri', 'warehouse', 'hive-conf-dir' are allowed to be null, in // that case it will // fallback to parse those values from hadoop configuration which is loaded from classpath. String hiveConfDir = properties.get(HIVE_CONF_DIR); String hadoopConfDir = properties.get(HADOOP_CONF_DIR); Configuration newHadoopConf = mergeHiveConf(hadoopConf, hiveConfDir, hadoopConfDir); return CatalogLoader.hive(name, newHadoopConf, properties); case ICEBERG_CATALOG_TYPE_HADOOP: return CatalogLoader.hadoop(name, hadoopConf, properties); case ICEBERG_CATALOG_TYPE_REST: return CatalogLoader.rest(name, hadoopConf, properties); default: throw new UnsupportedOperationException( "Unknown catalog-type: " + catalogType + " (Must be 'hive', 'hadoop' or 'rest')"); } }
@Test public void testLoadCatalogUnknown() { String catalogName = "unknownCatalog"; props.put(FlinkCatalogFactory.ICEBERG_CATALOG_TYPE, "fooType"); assertThatThrownBy( () -> FlinkCatalogFactory.createCatalogLoader(catalogName, props, new Configuration())) .isInstanceOf(UnsupportedOperationException.class) .hasMessageStartingWith("Unknown catalog-type: fooType"); }
@Override public CiConfiguration loadConfiguration() { String revision = system2.envVariable("BITRISE_GIT_COMMIT"); return new CiConfigurationImpl(revision, getName()); }
@Test public void loadConfiguration() { setEnvVariable("CI", "true"); setEnvVariable("BITRISE_IO", "true"); setEnvVariable("BITRISE_GIT_COMMIT", "abd12fc"); assertThat(underTest.loadConfiguration().getScmRevision()).hasValue("abd12fc"); }
@ScalarFunction @LiteralParameters("x") @SqlType(ColorType.NAME) public static long color(@SqlType("varchar(x)") Slice color) { int rgb = parseRgb(color); if (rgb != -1) { return rgb; } // encode system colors (0-15) as negative values, offset by one try { SystemColor systemColor = SystemColor.valueOf(upper(color).toStringUtf8()); int index = systemColor.getIndex(); return -(index + 1); } catch (IllegalArgumentException e) { throw new PrestoException(INVALID_FUNCTION_ARGUMENT, format("Invalid color: '%s'", color.toStringUtf8()), e); } }
@Test public void testColor() { assertEquals(color(toSlice("black")), -1); assertEquals(color(toSlice("red")), -2); assertEquals(color(toSlice("green")), -3); assertEquals(color(toSlice("yellow")), -4); assertEquals(color(toSlice("blue")), -5); assertEquals(color(toSlice("magenta")), -6); assertEquals(color(toSlice("cyan")), -7); assertEquals(color(toSlice("white")), -8); assertEquals(color(toSlice("#f00")), 0xFF_00_00); assertEquals(color(toSlice("#0f0")), 0x00_FF_00); assertEquals(color(toSlice("#00f")), 0x00_00_FF); }
public static boolean isContentType(String contentType, Message message) { if (contentType == null) { return message.getContentType() == null; } else { return contentType.equals(message.getContentType()); } }
@Test public void testIsContentTypeWithNonNullStringValueAndNullMessageContentType() { Message message = Proton.message(); assertFalse(AmqpMessageSupport.isContentType("test", message)); }
@Override public NativeEntity<GrokPattern> createNativeEntity(Entity entity, Map<String, ValueReference> parameters, Map<EntityDescriptor, Object> nativeEntities, String username) { if (entity instanceof EntityV1) { return decode((EntityV1) entity); } else { throw new IllegalArgumentException("Unsupported entity version: " + entity.getClass()); } }
@Test public void createNativeEntity() throws NotFoundException { final Entity grokPatternEntity = EntityV1.builder() .id(ModelId.of("1")) .type(ModelTypes.GROK_PATTERN_V1) .data(objectMapper.convertValue(GrokPatternEntity.create("Test","[a-z]+"), JsonNode.class)) .build(); final NativeEntity<GrokPattern> nativeEntity = facade.createNativeEntity(grokPatternEntity, Collections.emptyMap(), Collections.emptyMap(), "admin"); final GrokPattern expectedGrokPattern = GrokPattern.create("1", "Test", "[a-z]+", null); final NativeEntityDescriptor expectedDescriptor = NativeEntityDescriptor.create("1", "1", ModelTypes.GROK_PATTERN_V1, "Test"); assertThat(nativeEntity.descriptor().title()).isEqualTo(expectedDescriptor.title()); assertThat(nativeEntity.descriptor().type()).isEqualTo(expectedDescriptor.type()); assertThat(nativeEntity.entity()).isEqualTo(expectedGrokPattern); assertThat(grokPatternService.load("1")).isEqualTo(expectedGrokPattern); }
private void jobAlreadyDone(UUID leaderSessionId) { LOG.info( "{} for job {} was granted leadership with leader id {}, but job was already done.", getClass().getSimpleName(), getJobID(), leaderSessionId); resultFuture.complete( JobManagerRunnerResult.forSuccess( new ExecutionGraphInfo( jobMasterServiceProcessFactory.createArchivedExecutionGraph( JobStatus.FAILED, new JobAlreadyDoneException(getJobID()))))); }
@Test void testJobAlreadyDone() throws Exception { final JobID jobId = new JobID(); final JobResult jobResult = TestingJobResultStore.createJobResult(jobId, ApplicationStatus.UNKNOWN); jobResultStore.createDirtyResultAsync(new JobResultEntry(jobResult)).get(); try (JobManagerRunner jobManagerRunner = newJobMasterServiceLeadershipRunnerBuilder() .setJobMasterServiceProcessFactory( TestingJobMasterServiceProcessFactory.newBuilder() .setJobId(jobId) .build()) .build()) { jobManagerRunner.start(); leaderElection.isLeader(UUID.randomUUID()); final CompletableFuture<JobManagerRunnerResult> resultFuture = jobManagerRunner.getResultFuture(); JobManagerRunnerResult result = resultFuture.get(); assertThat(result.getExecutionGraphInfo().getArchivedExecutionGraph().getState()) .isEqualTo(JobStatus.FAILED); } }
@Override public List<Node> sniff(List<Node> nodes) { if (attribute == null || value == null) { return nodes; } return nodes.stream() .filter(node -> nodeMatchesFilter(node, attribute, value)) .collect(Collectors.toList()); }
@Test void returnsAllNodesIfFilterMatchesAll() throws Exception { final List<Node> nodes = mockNodes(); final NodesSniffer nodesSniffer = new FilteredElasticsearchNodesSniffer("always", "true"); assertThat(nodesSniffer.sniff(nodes)).isEqualTo(nodes); }
public static DoFnInstanceManager cloningPool(DoFnInfo<?, ?> info, PipelineOptions options) { return new ConcurrentQueueInstanceManager(info, options); }
@Test public void testCloningPoolTearsDownAfterAbort() throws Exception { DoFnInfo<?, ?> info = DoFnInfo.forFn( initialFn, WindowingStrategy.globalDefault(), null /* side input views */, null /* input coder */, new TupleTag<>(PropertyNames.OUTPUT) /* main output id */, DoFnSchemaInformation.create(), Collections.emptyMap()); DoFnInstanceManager mgr = DoFnInstanceManagers.cloningPool(info, options); DoFnInfo<?, ?> retrievedInfo = mgr.get(); mgr.abort(retrievedInfo); TestFn fn = (TestFn) retrievedInfo.getDoFn(); assertThat(fn.tornDown, is(true)); DoFnInfo<?, ?> afterAbortInfo = mgr.get(); assertThat(afterAbortInfo, not(Matchers.<DoFnInfo<?, ?>>theInstance(retrievedInfo))); assertThat(afterAbortInfo.getDoFn(), not(theInstance(retrievedInfo.getDoFn()))); assertThat(((TestFn) afterAbortInfo.getDoFn()).tornDown, is(false)); }
@Override public ClassLoaderLease registerClassLoaderLease(JobID jobId) { synchronized (lockObject) { return cacheEntries .computeIfAbsent(jobId, jobID -> new LibraryCacheEntry(jobId)) .obtainLease(); } }
@Test public void differentLeasesForSameJob_returnSameClassLoader() throws IOException { final BlobLibraryCacheManager libraryCacheManager = createSimpleBlobLibraryCacheManager(); final JobID jobId = new JobID(); final LibraryCacheManager.ClassLoaderLease classLoaderLease1 = libraryCacheManager.registerClassLoaderLease(jobId); final LibraryCacheManager.ClassLoaderLease classLoaderLease2 = libraryCacheManager.registerClassLoaderLease(jobId); final UserCodeClassLoader classLoader1 = classLoaderLease1.getOrResolveClassLoader( Collections.emptyList(), Collections.emptyList()); final UserCodeClassLoader classLoader2 = classLoaderLease2.getOrResolveClassLoader( Collections.emptyList(), Collections.emptyList()); assertThat(classLoader1, sameInstance(classLoader2)); }
public static void dumpSystemInfo() { dumpSystemInfo(new PrintWriter(System.out)); }
@Test @Disabled public void dumpTest() { SystemUtil.dumpSystemInfo(); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { final FileEntity entity = new FilesApi(new BrickApiClient(session)) .download(StringUtils.removeStart(file.getAbsolute(), String.valueOf(Path.DELIMITER)), null, null, null, null); final HttpUriRequest request = new HttpGet(entity.getDownloadUri()); if(status.isAppend()) { final HttpRange range = HttpRange.withStatus(status); final String header; if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) { header = String.format("bytes=%d-", range.getStart()); } else { header = String.format("bytes=%d-%d", range.getStart(), range.getEnd()); } if(log.isDebugEnabled()) { log.debug(String.format("Add range header %s for file %s", header, file)); } request.addHeader(new BasicHeader(HttpHeaders.RANGE, header)); // Disable compression request.addHeader(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, "identity")); } final HttpResponse response = session.getClient().execute(request); switch(response.getStatusLine().getStatusCode()) { case HttpStatus.SC_OK: case HttpStatus.SC_PARTIAL_CONTENT: return new HttpMethodReleaseInputStream(response, status); default: throw new DefaultHttpResponseExceptionMappingService().map("Download {0} failed", new HttpResponseException( response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()), file); } } catch(ApiException e) { throw new BrickExceptionMappingService().map("Download {0} failed", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Download {0} failed", e, file); } }
@Test(expected = NotfoundException.class) public void testReadNotFound() throws Exception { final TransferStatus status = new TransferStatus(); final Path room = new BrickDirectoryFeature(session).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); try { new BrickReadFeature(session).read(new Path(room, "nosuchname", EnumSet.of(Path.Type.file)), status, new DisabledConnectionCallback()); } finally { new BrickDeleteFeature(session).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback()); } }
@CanIgnoreReturnValue public final Ordered containsExactlyEntriesIn(Map<?, ?> expectedMap) { if (expectedMap.isEmpty()) { if (checkNotNull(actual).isEmpty()) { return IN_ORDER; } else { isEmpty(); // fails return ALREADY_FAILED; } } boolean containsAnyOrder = containsEntriesInAnyOrder(expectedMap, /* allowUnexpected= */ false); if (containsAnyOrder) { return new MapInOrder(expectedMap, /* allowUnexpected= */ false, /* correspondence= */ null); } else { return ALREADY_FAILED; } }
@Test public void containsExactlyEntriesInEmpty_fails() { ImmutableMap<String, Integer> actual = ImmutableMap.of("jan", 1); expectFailureWhenTestingThat(actual).containsExactlyEntriesIn(ImmutableMap.of()); assertFailureKeys("expected to be empty", "but was"); }
@Override public OAuth2ClientDO getOAuth2Client(Long id) { return oauth2ClientMapper.selectById(id); }
@Test public void testGetOAuth2Client() { // mock 数据 OAuth2ClientDO clientDO = randomPojo(OAuth2ClientDO.class); oauth2ClientMapper.insert(clientDO); // 准备参数 Long id = clientDO.getId(); // 调用,并断言 OAuth2ClientDO dbClientDO = oauth2ClientService.getOAuth2Client(id); assertPojoEquals(clientDO, dbClientDO); }
@Override public void handle(TaskEvent event) { if (LOG.isDebugEnabled()) { LOG.debug("Processing " + event.getTaskID() + " of type " + event.getType()); } try { writeLock.lock(); TaskStateInternal oldState = getInternalState(); try { stateMachine.doTransition(event.getType(), event); } catch (InvalidStateTransitionException e) { LOG.error("Can't handle this event at current state for " + this.taskId, e); internalError(event.getType()); } if (oldState != getInternalState()) { LOG.info(taskId + " Task Transitioned from " + oldState + " to " + getInternalState()); } } finally { writeLock.unlock(); } }
@Test /** * Kill map attempt for succeeded map task * {@link TaskState#SUCCEEDED}->{@link TaskState#SCHEDULED} */ public void testKillAttemptForSuccessfulTask() { LOG.info("--- START: testKillAttemptForSuccessfulTask ---"); mockTask = createMockTask(TaskType.MAP); TaskId taskId = getNewTaskID(); scheduleTaskAttempt(taskId); launchTaskAttempt(getLastAttempt().getAttemptId()); commitTaskAttempt(getLastAttempt().getAttemptId()); mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(), TaskEventType.T_ATTEMPT_SUCCEEDED)); assertTaskSucceededState(); mockTask.handle( new TaskTAttemptKilledEvent(getLastAttempt().getAttemptId(), true)); assertEquals(TaskAttemptEventType.TA_RESCHEDULE, taskAttemptEventHandler.lastTaskAttemptEvent.getType()); assertTaskScheduledState(); }
@Override public Mono<DeleteAccountResponse> deleteAccount(final DeleteAccountRequest request) { final AuthenticatedDevice authenticatedDevice = AuthenticationUtil.requireAuthenticatedPrimaryDevice(); return Mono.fromFuture(() -> accountsManager.getByAccountIdentifierAsync(authenticatedDevice.accountIdentifier())) .map(maybeAccount -> maybeAccount.orElseThrow(Status.UNAUTHENTICATED::asRuntimeException)) .flatMap(account -> Mono.fromFuture(() -> accountsManager.delete(account, AccountsManager.DeletionReason.USER_REQUEST))) .thenReturn(DeleteAccountResponse.newBuilder().build()); }
@Test void deleteAccount() { final Account account = mock(Account.class); when(accountsManager.getByAccountIdentifierAsync(AUTHENTICATED_ACI)) .thenReturn(CompletableFuture.completedFuture(Optional.of(account))); when(accountsManager.delete(any(), any())) .thenReturn(CompletableFuture.completedFuture(null)); final DeleteAccountResponse ignored = authenticatedServiceStub().deleteAccount(DeleteAccountRequest.newBuilder().build()); verify(accountsManager).delete(account, AccountsManager.DeletionReason.USER_REQUEST); }
public static int availableProcessors() { return org.infinispan.commons.jdkspecific.ProcessorInfo.availableProcessors(); }
@Test @Category(Java11.class) public void testCPUCount() { assertTrue(ProcessorInfo.availableProcessors() <= Runtime.getRuntime().availableProcessors()); }
@Override public RuleNodePath getRuleNodePath() { return INSTANCE; }
@Test void assertNew() { RuleNodePathProvider ruleNodePathProvider = new MaskRuleNodePathProvider(); RuleNodePath actualRuleNodePath = ruleNodePathProvider.getRuleNodePath(); assertThat(actualRuleNodePath.getNamedItems().size(), is(2)); assertTrue(actualRuleNodePath.getNamedItems().containsKey(MaskRuleNodePathProvider.MASK_ALGORITHMS)); assertTrue(actualRuleNodePath.getNamedItems().containsKey(MaskRuleNodePathProvider.TABLES)); assertTrue(actualRuleNodePath.getUniqueItems().isEmpty()); assertThat(actualRuleNodePath.getRoot().getRuleType(), is(MaskRuleNodePathProvider.RULE_TYPE)); }