focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void syncHoodieTable() { switch (bqSyncClient.getTableType()) { case COPY_ON_WRITE: case MERGE_ON_READ: syncTable(bqSyncClient); break; default: throw new UnsupportedOperationException(bqSyncClient.getTableType() + " table type is not supported yet."); } }
@Test void useBQManifestFile_newTableNonPartitioned() { properties.setProperty(BigQuerySyncConfig.BIGQUERY_SYNC_USE_BQ_MANIFEST_FILE.key(), "true"); when(mockBqSyncClient.getTableType()).thenReturn(HoodieTableType.COPY_ON_WRITE); when(mockBqSyncClient.getBasePath()).thenReturn(TEST_TABLE_BASE_PATH); when(mockBqSyncClient.datasetExists()).thenReturn(true); when(mockBqSyncClient.tableNotExistsOrDoesNotMatchSpecification(TEST_TABLE)).thenReturn(true); Path manifestPath = new Path("file:///local/path"); when(mockManifestFileWriter.getManifestSourceUri(true)).thenReturn(manifestPath.toUri().getPath()); when(mockBqSchemaResolver.getTableSchema(any(), eq(Collections.emptyList()))).thenReturn(schema); BigQuerySyncTool tool = new BigQuerySyncTool(properties, mockManifestFileWriter, mockBqSyncClient, mockMetaClient, mockBqSchemaResolver); tool.syncHoodieTable(); verify(mockBqSyncClient).createOrUpdateTableUsingBqManifestFile(TEST_TABLE, manifestPath.toUri().getPath(), null, schema); verify(mockManifestFileWriter).writeManifestFile(true); }
public static ThreadFactory namedThreads(String pattern) { return new ThreadFactoryBuilder() .setNameFormat(pattern) .setUncaughtExceptionHandler((t, e) -> log.error("Uncaught exception on " + t.getName(), e)) .build(); }
@Test public void namedThreads() { ThreadFactory f = Tools.namedThreads("foo-%d"); Thread t = f.newThread(() -> TestTools.print("yo")); assertTrue("wrong pattern", t.getName().startsWith("foo-")); }
private Set<ConfigKey<?>> listConfigs(Application application, ConfigKey<?> keyToMatch, boolean recursive) { Set<ConfigKey<?>> ret = new LinkedHashSet<>(); for (ConfigKey<?> key : application.allConfigsProduced()) { String configId = key.getConfigId(); if (recursive) { key = new ConfigKey<>(key.getName(), configId, key.getNamespace()); } else { // Include first part of id as id key = new ConfigKey<>(key.getName(), configId.split("/")[0], key.getNamespace()); } if (keyToMatch != null) { String n = key.getName(); // Never null String ns = key.getNamespace(); // Never null if (n.equals(keyToMatch.getName()) && ns.equals(keyToMatch.getNamespace()) && configId.startsWith(keyToMatch.getConfigId()) && !(configId.equals(keyToMatch.getConfigId()))) { if (!recursive) { // For non-recursive, include the id segment we were searching for, and first part of the rest key = new ConfigKey<>(key.getName(), appendOneLevelOfId(keyToMatch.getConfigId(), configId), key.getNamespace()); } ret.add(key); } } else { ret.add(key); } } return ret; }
@Test public void testListConfigs() throws IOException, SAXException { TenantApplications applications = createTenantApplications(TenantName.defaultName(), curator, configserverConfig, new MockConfigActivationListener(), new InMemoryFlagSource()); assertFalse(applications.hasApplication(ApplicationId.defaultId(), Optional.of(vespaVersion))); VespaModel model = new VespaModel(FilesApplicationPackage.fromFile(new File("src/test/apps/app"))); ApplicationId applicationId = ApplicationId.defaultId(); applications.createApplication(applicationId); writeActiveTransaction(applications, applicationId, 1); applications.activateApplication(ApplicationVersions.from(new Application(model, new ServerCache(), 1, vespaVersion, MetricUpdater.createTestUpdater(), applicationId)), 1); Set<ConfigKey<?>> configNames = applications.listConfigs(applicationId, Optional.of(vespaVersion), false); assertTrue(configNames.contains(new ConfigKey<>("sentinel", "hosts", "cloud.config"))); configNames = applications.listConfigs(ApplicationId.defaultId(), Optional.of(vespaVersion), true); assertTrue(configNames.contains(new ConfigKey<>("documentmanager", "container", "document.config"))); assertTrue(configNames.contains(new ConfigKey<>("documentmanager", "", "document.config"))); assertTrue(configNames.contains(new ConfigKey<>("documenttypes", "", "document.config"))); assertTrue(configNames.contains(new ConfigKey<>("documentmanager", "container", "document.config"))); assertTrue(configNames.contains(new ConfigKey<>("health-monitor", "container", "container.jdisc.config"))); assertTrue(configNames.contains(new ConfigKey<>("specific", "container", "project"))); }
public FEELFnResult<List> invoke(@ParameterName("list") List list, @ParameterName("position") BigDecimal position, @ParameterName("newItem") Object newItem) { if (list == null) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", CANNOT_BE_NULL)); } if (position == null) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", CANNOT_BE_NULL)); } int intPosition = position.intValue(); if (intPosition == 0 || Math.abs(intPosition) > list.size()) { String paramProblem = String.format("%s outside valid boundaries (1-%s)", intPosition, list.size()); return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", paramProblem)); } Object e = NumberEvalHelper.coerceNumber(newItem); List toReturn = new ArrayList(list); int replacementPosition = intPosition > 0 ? intPosition -1 : list.size() - Math.abs(intPosition); toReturn.set(replacementPosition, e); return FEELFnResult.ofResult(toReturn); }
@Test void invokePositionInvalid() { FunctionTestUtil.assertResultError(listReplaceFunction.invoke(Collections.emptyList(), BigDecimal.ONE, ""), InvalidParametersEvent.class); List list = getList(); FunctionTestUtil.assertResultError(listReplaceFunction.invoke(list, BigDecimal.ZERO, ""), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(listReplaceFunction.invoke(list, BigDecimal.valueOf(4), ""), InvalidParametersEvent.class); }
public Map<String, String> findAliasesInSQL(String[] sqlArguments) { Map<String, String> res = new HashMap<>(); for (int i = 0; i < sqlArguments.length - 1; i++) { if (columnsCompleters.keySet().contains(sqlArguments[i]) && sqlArguments[i + 1].matches("[a-zA-Z]+")) { res.put(sqlArguments[i + 1], sqlArguments[i]); } } return res; }
@Test void testFindAliasesInSQL_Simple() { String sql = "select * from prod_emart.financial_account a"; Map<String, String> res = sqlCompleter.findAliasesInSQL( delimiter.delimit(sql, 0).getArguments()); assertEquals(1, res.size()); assertEquals("prod_emart.financial_account", res.get("a")); }
public CredentialRetriever googleApplicationDefaultCredentials() { return () -> { try { if (imageReference.getRegistry().endsWith("gcr.io") || imageReference.getRegistry().endsWith("docker.pkg.dev")) { GoogleCredentials googleCredentials = googleCredentialsProvider.get(); logger.accept(LogEvent.info("Google ADC found")); if (googleCredentials.createScopedRequired()) { // not scoped if service account // The short-lived OAuth2 access token to be generated from the service account with // refreshIfExpired() below will have one-hour expiry (as of Aug 2019). Instead of using // an access token, it is technically possible to use the service account private key to // auth with GCR, but it does not worth writing complex code to achieve that. logger.accept(LogEvent.info("ADC is a service account. Setting GCS read-write scope")); List<String> scope = Collections.singletonList(OAUTH_SCOPE_STORAGE_READ_WRITE); googleCredentials = googleCredentials.createScoped(scope); } googleCredentials.refreshIfExpired(); logGotCredentialsFrom("Google Application Default Credentials"); AccessToken accessToken = googleCredentials.getAccessToken(); // https://cloud.google.com/container-registry/docs/advanced-authentication#access_token return Optional.of(Credential.from("oauth2accesstoken", accessToken.getTokenValue())); } } catch (IOException ex) { // Includes the case where ADC is simply not available. logger.accept( LogEvent.info("ADC not present or error fetching access token: " + ex.getMessage())); } return Optional.empty(); }; }
@Test public void testGoogleApplicationDefaultCredentials_refreshFailure() throws CredentialRetrievalException, IOException { Mockito.doThrow(new IOException("refresh failed")) .when(mockGoogleCredentials) .refreshIfExpired(); CredentialRetrieverFactory credentialRetrieverFactory = createCredentialRetrieverFactory("awesome.gcr.io", "repository"); Assert.assertFalse( credentialRetrieverFactory.googleApplicationDefaultCredentials().retrieve().isPresent()); Mockito.verify(mockLogger).accept(LogEvent.info("Google ADC found")); Mockito.verify(mockLogger) .accept(LogEvent.info("ADC not present or error fetching access token: refresh failed")); Mockito.verifyNoMoreInteractions(mockLogger); }
static ProjectMeasuresQuery newProjectMeasuresQuery(List<Criterion> criteria, @Nullable Set<String> projectUuids) { ProjectMeasuresQuery query = new ProjectMeasuresQuery(); Optional.ofNullable(projectUuids).ifPresent(query::setProjectUuids); criteria.forEach(criterion -> processCriterion(criterion, query)); return query; }
@Test public void fail_to_create_query_on_tag_using_in_operator_and_value() { assertThatThrownBy(() -> { newProjectMeasuresQuery(singletonList(Criterion.builder().setKey("tags").setOperator(IN).setValue("java").build()), emptySet()); }) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Tags should be set either by using 'tags = java' or 'tags IN (finance, platform)'"); }
@Override public UnderFileSystem create(String path, UnderFileSystemConfiguration conf) { Preconditions.checkNotNull(path, "Unable to create UnderFileSystem instance:" + " URI path should not be null"); if (checkOSSCredentials(conf)) { try { return OSSUnderFileSystem.createInstance(new AlluxioURI(path), conf); } catch (Exception e) { throw Throwables.propagate(e); } } String err = "OSS Credentials not available, cannot create OSS Under File System."; throw Throwables.propagate(new IOException(err)); }
@Test public void createInstanceWithoutCredentials() { Configuration.unset(PropertyKey.OSS_ACCESS_KEY); Configuration.unset(PropertyKey.OSS_SECRET_KEY); Configuration.unset(PropertyKey.OSS_ENDPOINT_KEY); mAlluxioConf = Configuration.global(); mConf = UnderFileSystemConfiguration.defaults(mAlluxioConf); try { mFactory.create(mOssPath, mConf); } catch (Exception e) { Assert.assertTrue(e instanceof RuntimeException); Assert.assertTrue(e.getMessage().contains("OSS Credentials not available, " + "cannot create OSS Under File System.")); } Exception e = Assert.assertThrows(RuntimeException.class, () -> mFactory.create( mOssPath, mConf)); Assert.assertTrue(e.getMessage().contains("OSS Credentials not available, " + "cannot create OSS Under File System.")); }
public void onRequest(FilterRequestContext requestContext, RestLiFilterResponseContextFactory filterResponseContextFactory) { // Initiate the filter chain iterator. The RestLiCallback will be passed to the method invoker at the end of the // filter chain. _filterChainIterator.onRequest(requestContext, filterResponseContextFactory, new RestLiCallback(requestContext, filterResponseContextFactory, this)); }
@SuppressWarnings("unchecked") @Test public void testFilterInvocationRequestErrorOnError() throws Exception { _restLiFilterChain = new RestLiFilterChain(Arrays.asList(_filters), _mockFilterChainDispatcher, _mockFilterChainCallback); _filters[1] = new CountFilterRequestErrorOnError(); when(_responseHandler.buildExceptionResponseData(eq(_method), any(RestLiServiceException.class), anyMap(), anyList())) .thenReturn(_mockRestLiResponseData); when(_mockRestLiResponseData.getResponseEnvelope()).thenReturn(mock(RestLiResponseEnvelope.class)); _restLiFilterChain.onRequest(_mockFilterRequestContext, new RestLiFilterResponseContextFactory(_request, _method, _responseHandler)); verifySecondFilterRequestException(); }
public static ExecutableStage forGrpcPortRead( QueryablePipeline pipeline, PipelineNode.PCollectionNode inputPCollection, Set<PipelineNode.PTransformNode> initialNodes) { checkArgument( !initialNodes.isEmpty(), "%s must contain at least one %s.", GreedyStageFuser.class.getSimpleName(), PipelineNode.PTransformNode.class.getSimpleName()); // Choose the environment from an arbitrary node. The initial nodes may not be empty for this // subgraph to make any sense, there has to be at least one processor node // (otherwise the stage is gRPC Read -> gRPC Write, which doesn't do anything). Environment environment = getStageEnvironment(pipeline, initialNodes); ImmutableSet.Builder<PipelineNode.PTransformNode> fusedTransforms = ImmutableSet.builder(); fusedTransforms.addAll(initialNodes); Set<SideInputReference> sideInputs = new LinkedHashSet<>(); Set<UserStateReference> userStates = new LinkedHashSet<>(); Set<TimerReference> timers = new LinkedHashSet<>(); Set<PipelineNode.PCollectionNode> fusedCollections = new LinkedHashSet<>(); Set<PipelineNode.PCollectionNode> materializedPCollections = new LinkedHashSet<>(); Queue<PipelineNode.PCollectionNode> fusionCandidates = new ArrayDeque<>(); for (PipelineNode.PTransformNode initialConsumer : initialNodes) { fusionCandidates.addAll(pipeline.getOutputPCollections(initialConsumer)); sideInputs.addAll(pipeline.getSideInputs(initialConsumer)); userStates.addAll(pipeline.getUserStates(initialConsumer)); timers.addAll(pipeline.getTimers(initialConsumer)); } while (!fusionCandidates.isEmpty()) { PipelineNode.PCollectionNode candidate = fusionCandidates.poll(); if (fusedCollections.contains(candidate) || materializedPCollections.contains(candidate)) { // This should generally mean we get to a Flatten via multiple paths through the graph and // we've already determined what to do with the output. LOG.debug( "Skipping fusion candidate {} because it is {} in this {}", candidate, fusedCollections.contains(candidate) ? "fused" : "materialized", ExecutableStage.class.getSimpleName()); continue; } PCollectionFusibility fusibility = canFuse(pipeline, candidate, environment, fusedCollections); switch (fusibility) { case MATERIALIZE: materializedPCollections.add(candidate); break; case FUSE: // All of the consumers of the candidate PCollection can be fused into this stage. Do so. fusedCollections.add(candidate); fusedTransforms.addAll(pipeline.getPerElementConsumers(candidate)); for (PipelineNode.PTransformNode consumer : pipeline.getPerElementConsumers(candidate)) { // The outputs of every transform fused into this stage must be either materialized or // themselves fused away, so add them to the set of candidates. fusionCandidates.addAll(pipeline.getOutputPCollections(consumer)); sideInputs.addAll(pipeline.getSideInputs(consumer)); } break; default: throw new IllegalStateException( String.format( "Unknown type of %s %s", PCollectionFusibility.class.getSimpleName(), fusibility)); } } return ImmutableExecutableStage.ofFullComponents( pipeline.getComponents(), environment, inputPCollection, sideInputs, userStates, timers, fusedTransforms.build(), materializedPCollections, ExecutableStage.DEFAULT_WIRE_CODER_SETTINGS); }
@Test public void materializesWithStatefulConsumer() { // (impulse.out) -> parDo -> (parDo.out) // (parDo.out) -> stateful -> stateful.out // stateful has a state spec which prevents it from fusing with an upstream ParDo PTransform parDoTransform = PTransform.newBuilder() .putInputs("input", "impulse.out") .putOutputs("output", "parDo.out") .setSpec( FunctionSpec.newBuilder() .setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN) .setPayload( ParDoPayload.newBuilder() .setDoFn(FunctionSpec.newBuilder()) .build() .toByteString())) .setEnvironmentId("common") .build(); PTransform statefulTransform = PTransform.newBuilder() .putInputs("input", "parDo.out") .putOutputs("output", "stateful.out") .setSpec( FunctionSpec.newBuilder() .setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN) .setPayload( ParDoPayload.newBuilder() .setDoFn(FunctionSpec.newBuilder()) .putStateSpecs("state", StateSpec.getDefaultInstance()) .build() .toByteString())) .setEnvironmentId("common") .build(); QueryablePipeline p = QueryablePipeline.forPrimitivesIn( partialComponents .toBuilder() .putTransforms("parDo", parDoTransform) .putPcollections( "parDo.out", PCollection.newBuilder().setUniqueName("parDo.out").build()) .putTransforms("stateful", statefulTransform) .putPcollections( "stateful.out", PCollection.newBuilder().setUniqueName("stateful.out").build()) .putEnvironments("common", Environments.createDockerEnvironment("common")) .build()); ExecutableStage subgraph = GreedyStageFuser.forGrpcPortRead( p, impulseOutputNode, ImmutableSet.of(PipelineNode.pTransform("parDo", parDoTransform))); assertThat( subgraph.getOutputPCollections(), contains( PipelineNode.pCollection( "parDo.out", PCollection.newBuilder().setUniqueName("parDo.out").build()))); assertThat(subgraph, hasSubtransforms("parDo")); }
public T send() throws IOException { return web3jService.send(this, responseType); }
@Test public void testEthHashrate() throws Exception { web3j.ethHashrate().send(); verifyResult("{\"jsonrpc\":\"2.0\",\"method\":\"eth_hashrate\",\"params\":[],\"id\":1}"); }
public static void getSemanticPropsSingleFromString( SingleInputSemanticProperties result, String[] forwarded, String[] nonForwarded, String[] readSet, TypeInformation<?> inType, TypeInformation<?> outType) { getSemanticPropsSingleFromString( result, forwarded, nonForwarded, readSet, inType, outType, false); }
@Test void testReadFieldsNestedTuples() { String[] readFields = {"f0.f1; f0.f2; f2"}; SingleInputSemanticProperties sp = new SingleInputSemanticProperties(); SemanticPropUtil.getSemanticPropsSingleFromString( sp, null, null, readFields, nestedTupleType, intType); FieldSet fs = sp.getReadFields(0); assertThat(fs).containsExactly(1, 2, 4); readFields[0] = "f0;f1"; sp = new SingleInputSemanticProperties(); SemanticPropUtil.getSemanticPropsSingleFromString( sp, null, null, readFields, nestedTupleType, intType); fs = sp.getReadFields(0); assertThat(fs).containsExactly(0, 1, 2, 3); }
public static ByteString dataMapToByteString(Map<String, String> headers, DataMap dataMap) throws MimeTypeParseException, IOException { return ByteString.unsafeWrap(getContentType(headers).getCodec().mapToBytes(dataMap)); }
@Test public void testDataMapToJSONByteString() throws MimeTypeParseException, IOException { DataMap testDataMap = createTestDataMap(); byte[] expectedBytes = JACKSON_DATA_CODEC.mapToBytes(testDataMap); Map<String, String> headers = Collections.singletonMap(RestConstants.HEADER_CONTENT_TYPE, "application/json"); ByteString byteString = DataMapConverter.dataMapToByteString(headers, testDataMap); Assert.assertEquals(byteString.copyBytes(), expectedBytes); }
public static Type convertType(TypeInfo typeInfo) { switch (typeInfo.getOdpsType()) { case BIGINT: return Type.BIGINT; case INT: return Type.INT; case SMALLINT: return Type.SMALLINT; case TINYINT: return Type.TINYINT; case FLOAT: return Type.FLOAT; case DECIMAL: DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) typeInfo; return ScalarType.createUnifiedDecimalType(decimalTypeInfo.getPrecision(), decimalTypeInfo.getScale()); case DOUBLE: return Type.DOUBLE; case CHAR: CharTypeInfo charTypeInfo = (CharTypeInfo) typeInfo; return ScalarType.createCharType(charTypeInfo.getLength()); case VARCHAR: VarcharTypeInfo varcharTypeInfo = (VarcharTypeInfo) typeInfo; return ScalarType.createVarcharType(varcharTypeInfo.getLength()); case STRING: case JSON: return ScalarType.createDefaultCatalogString(); case BINARY: return Type.VARBINARY; case BOOLEAN: return Type.BOOLEAN; case DATE: return Type.DATE; case TIMESTAMP: case DATETIME: return Type.DATETIME; case MAP: MapTypeInfo mapTypeInfo = (MapTypeInfo) typeInfo; return new MapType(convertType(mapTypeInfo.getKeyTypeInfo()), convertType(mapTypeInfo.getValueTypeInfo())); case ARRAY: ArrayTypeInfo arrayTypeInfo = (ArrayTypeInfo) typeInfo; return new ArrayType(convertType(arrayTypeInfo.getElementTypeInfo())); case STRUCT: StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo; List<Type> fieldTypeList = structTypeInfo.getFieldTypeInfos().stream().map(EntityConvertUtils::convertType) .collect(Collectors.toList()); return new StructType(fieldTypeList); default: return Type.VARCHAR; } }
@Test public void testConvertTypeCaseStruct() { TypeInfo fieldTypeInfo1 = TypeInfoFactory.STRING; TypeInfo fieldTypeInfo2 = TypeInfoFactory.INT; StructTypeInfo structTypeInfo = TypeInfoFactory.getStructTypeInfo(ImmutableList.of("fieldTypeInfo1", "fieldTypeInfo2"), ImmutableList.of(fieldTypeInfo1, fieldTypeInfo2)); Type result = EntityConvertUtils.convertType(structTypeInfo); Type expectedType1 = ScalarType.createDefaultCatalogString(); Type expectedType2 = Type.INT; Type expectedType = new StructType(ImmutableList.of(expectedType1, expectedType2)); assertEquals(expectedType, result); }
public static <T> T getFirst(Iterable<T> iterable) { return IterUtil.getFirst(iterable); }
@Test public void getFirstTest() { final List<?> nullList = null; final Object first = CollUtil.getFirst(nullList); assertNull(first); }
public static ConnectorOffsets consumerGroupOffsetsToConnectorOffsets(Map<TopicPartition, OffsetAndMetadata> consumerGroupOffsets) { List<ConnectorOffset> connectorOffsets = new ArrayList<>(); for (Map.Entry<TopicPartition, OffsetAndMetadata> topicPartitionOffset : consumerGroupOffsets.entrySet()) { Map<String, Object> partition = new HashMap<>(); partition.put(KAFKA_TOPIC_KEY, topicPartitionOffset.getKey().topic()); partition.put(KAFKA_PARTITION_KEY, topicPartitionOffset.getKey().partition()); connectorOffsets.add(new ConnectorOffset(partition, Collections.singletonMap(KAFKA_OFFSET_KEY, topicPartitionOffset.getValue().offset()))); } return new ConnectorOffsets(connectorOffsets); }
@Test public void testConsumerGroupOffsetsToConnectorOffsets() { Map<TopicPartition, OffsetAndMetadata> consumerGroupOffsets = new HashMap<>(); ConnectorOffsets connectorOffsets = SinkUtils.consumerGroupOffsetsToConnectorOffsets(consumerGroupOffsets); assertEquals(0, connectorOffsets.offsets().size()); consumerGroupOffsets.put(new TopicPartition("test-topic", 0), new OffsetAndMetadata(100)); connectorOffsets = SinkUtils.consumerGroupOffsetsToConnectorOffsets(consumerGroupOffsets); assertEquals(1, connectorOffsets.offsets().size()); assertEquals(Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, 100L), connectorOffsets.offsets().get(0).offset()); Map<String, Object> expectedPartition = new HashMap<>(); expectedPartition.put(SinkUtils.KAFKA_TOPIC_KEY, "test-topic"); expectedPartition.put(SinkUtils.KAFKA_PARTITION_KEY, 0); assertEquals(expectedPartition, connectorOffsets.offsets().get(0).partition()); }
@VisibleForTesting static Set<AbsoluteUnixPath> getVolumesSet(RawConfiguration rawConfiguration) throws InvalidContainerVolumeException { Set<AbsoluteUnixPath> volumes = new HashSet<>(); for (String path : rawConfiguration.getVolumes()) { try { AbsoluteUnixPath absoluteUnixPath = AbsoluteUnixPath.get(path); volumes.add(absoluteUnixPath); } catch (IllegalArgumentException exception) { throw new InvalidContainerVolumeException(path, path, exception); } } return volumes; }
@Test public void testGetValidVolumesList() throws InvalidContainerVolumeException { when(rawConfiguration.getVolumes()).thenReturn(Collections.singletonList("/some/root")); assertThat(PluginConfigurationProcessor.getVolumesSet(rawConfiguration)) .containsExactly(AbsoluteUnixPath.get("/some/root")); }
@Override public String select(List<String> columns, List<String> where) { StringBuilder sql = new StringBuilder(); String method = "SELECT "; sql.append(method); for (int i = 0; i < columns.size(); i++) { sql.append(columns.get(i)); if (i == columns.size() - 1) { sql.append(" "); } else { sql.append(","); } } sql.append("FROM "); sql.append(getTableName()); sql.append(" "); if (CollectionUtils.isEmpty(where)) { return sql.toString(); } appendWhereClause(where, sql); return sql.toString(); }
@Test void testSelectAll() { String sql = abstractMapper.select(Arrays.asList("id", "name"), null); assertEquals("SELECT id,name FROM tenant_info ", sql); }
@SneakyThrows(PSQLException.class) @Override public void write(final PostgreSQLPacketPayload payload, final Object value) { byte[] binaryDate = new byte[4]; new TimestampUtils(false, null).toBinDate(null, binaryDate, (Date) value); payload.writeBytes(binaryDate); }
@Test void assertWrite() throws PSQLException { byte[] actual = new byte[4]; PostgreSQLPacketPayload payload = new PostgreSQLPacketPayload(Unpooled.wrappedBuffer(actual).writerIndex(0), StandardCharsets.UTF_8); Date input = Date.valueOf("2023-01-30"); new PostgreSQLDateBinaryProtocolValue().write(payload, input); byte[] expected = new byte[4]; new TimestampUtils(false, null).toBinDate(null, expected, input); assertThat(actual, is(expected)); }
public int[] findMatchingLines(List<String> left, List<String> right) { int[] index = new int[right.size()]; int dbLine = left.size(); int reportLine = right.size(); try { PathNode node = new MyersDiff<String>().buildPath(left, right); while (node.prev != null) { PathNode prevNode = node.prev; if (!node.isSnake()) { // additions reportLine -= (node.j - prevNode.j); // removals dbLine -= (node.i - prevNode.i); } else { // matches for (int i = node.i; i > prevNode.i; i--) { index[reportLine - 1] = dbLine; reportLine--; dbLine--; } } node = prevNode; } } catch (DifferentiationFailedException e) { LOG.error("Error finding matching lines", e); return index; } return index; }
@Test public void shouldFindNothingWhenContentAreIdentical() { List<String> database = new ArrayList<>(); database.add("line - 0"); database.add("line - 1"); database.add("line - 2"); database.add("line - 3"); database.add("line - 4"); List<String> report = new ArrayList<>(); report.add("line - 0"); report.add("line - 1"); report.add("line - 2"); report.add("line - 3"); report.add("line - 4"); int[] diff = new SourceLinesDiffFinder().findMatchingLines(database, report); assertThat(diff).containsExactly(1, 2, 3, 4, 5); }
public static BadRequestException clusterNotExists(String clusterName) { return new BadRequestException("cluster not exists for clusterName:%s", clusterName); }
@Test public void testClusterNotExists(){ BadRequestException clusterNotExists = BadRequestException.clusterNotExists(clusterName); assertEquals("cluster not exists for clusterName:test", clusterNotExists.getMessage()); }
@Transactional public void update(MemberDto memberDto, Long templateId, UpdateTemplateRequest updateTemplateRequest) { Member member = memberRepository.fetchById(memberDto.id()); Category category = categoryRepository.fetchById(updateTemplateRequest.categoryId()); validateCategoryAuthorizeMember(category, member); Template template = templateRepository.fetchById(templateId); validateTemplateAuthorizeMember(template, member); template.updateTemplate(updateTemplateRequest.title(), updateTemplateRequest.description(), category); updateSourceCodes(updateTemplateRequest, template); updateTags(updateTemplateRequest, template); validateSourceCodesCount(updateTemplateRequest, template); }
@Test @DisplayName("템플릿 수정 실패: 권한 없음") void updateTemplateFailWithUnauthorized() { // given MemberDto memberDto = MemberDtoFixture.getFirstMemberDto(); Member member = memberRepository.fetchById(memberDto.id()); CreateTemplateRequest createdTemplate = makeTemplateRequest("title"); Template template = saveTemplate(createdTemplate, new Category("category1", member), member); categoryRepository.save(new Category("category2", member)); // when MemberDto otherMemberDto = MemberDtoFixture.getSecondMemberDto(); UpdateTemplateRequest updateTemplateRequest = makeUpdateTemplateRequest(2L); // then Long templateId = template.getId(); assertThatThrownBy(() -> templateService.update(otherMemberDto, templateId, updateTemplateRequest)) .isInstanceOf(CodeZapException.class) .hasMessage("해당 템플릿에 대한 권한이 없습니다."); }
public Response request(Request request) throws NacosException { return request(request, rpcClientConfig.timeOutMills()); }
@Test void testRequestWithoutAnyTry() throws NacosException { assertThrows(NacosException.class, () -> { when(rpcClientConfig.retryTimes()).thenReturn(-1); rpcClient.request(null); }); }
@VisibleForTesting Object evaluate(final GenericRow row) { return term.getValue(new TermEvaluationContext(row)); }
@Test public void shouldEvaluateArithmetic() { // Given: final Expression expression1 = new ArithmeticBinaryExpression( Operator.ADD, new IntegerLiteral(1), new IntegerLiteral(2) ); final Expression expression2 = new ArithmeticBinaryExpression( Operator.ADD, new IntegerLiteral(1), new LongLiteral(4) ); final Expression expression3 = new ArithmeticBinaryExpression( Operator.ADD, new DoubleLiteral(5.5), new LongLiteral(4) ); final Expression expression4 = new ArithmeticBinaryExpression( Operator.MULTIPLY, new IntegerLiteral(5), new LongLiteral(4) ); final Expression expression5 = new ArithmeticBinaryExpression( Operator.DIVIDE, new LongLiteral(18), new LongLiteral(3) ); final Expression expression6 = new ArithmeticBinaryExpression( Operator.MODULUS, new LongLiteral(20), new LongLiteral(3) ); final Expression expression7 = new ArithmeticBinaryExpression( Operator.ADD, new DecimalLiteral(new BigDecimal("12.5").setScale(2)), new DecimalLiteral(new BigDecimal("1.25").setScale(2)) ); final Expression expression8 = new ArithmeticBinaryExpression( Operator.ADD, new DecimalLiteral(new BigDecimal("12.5").setScale(2)), new DoubleLiteral(2.0d) ); // When: InterpretedExpression interpreter1 = interpreter(expression1); InterpretedExpression interpreter2 = interpreter(expression2); InterpretedExpression interpreter3 = interpreter(expression3); InterpretedExpression interpreter4 = interpreter(expression4); InterpretedExpression interpreter5 = interpreter(expression5); InterpretedExpression interpreter6 = interpreter(expression6); InterpretedExpression interpreter7 = interpreter(expression7); InterpretedExpression interpreter8 = interpreter(expression8); // Then: assertThat(interpreter1.evaluate(ROW), is(3)); assertThat(interpreter2.evaluate(ROW), is(5L)); assertThat(interpreter3.evaluate(ROW), is(9.5d)); assertThat(interpreter4.evaluate(ROW), is(20L)); assertThat(interpreter5.evaluate(ROW), is(6L)); assertThat(interpreter6.evaluate(ROW), is(2L)); assertThat(interpreter7.evaluate(ROW), is(BigDecimal.valueOf(13.75).setScale(2))); assertThat(interpreter8.evaluate(ROW), is(14.5d)); }
public static <T> Patch<T> diff(List<T> original, List<T> revised, DiffAlgorithmListener progress) { return DiffUtils.diff(original, revised, DEFAULT_DIFF.create(), progress); }
@Test public void testDiff_EmptyListWithNonEmpty() { final Patch<String> patch = DiffUtils.diff(new ArrayList<>(), Arrays.asList("aaa")); assertNotNull(patch); assertEquals(1, patch.getDeltas().size()); final AbstractDelta<String> delta = patch.getDeltas().get(0); assertTrue(delta instanceof InsertDelta); }
@Override public void run() { if (processor != null) { processor.execute(); } else { if (!beforeHook()) { logger.info("before-feature hook returned [false], aborting: {}", this); } else { scenarios.forEachRemaining(this::processScenario); } afterFeature(); } }
@Test void testCallFeature() { run("call-feature.feature"); }
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { if (schema != null && schema.type() != Type.BOOLEAN) throw new DataException("Invalid schema type for BooleanConverter: " + schema.type().toString()); try { return serializer.serialize(topic, (Boolean) value); } catch (ClassCastException e) { throw new DataException("BooleanConverter is not compatible with objects of type " + value.getClass()); } }
@Test public void testFromConnectNullSchema() { assertArrayEquals( TRUE, converter.fromConnectData(TOPIC, null, Boolean.TRUE) ); assertArrayEquals( FALSE, converter.fromConnectData(TOPIC, null, Boolean.FALSE) ); }
@Override public boolean isSatisfied(int index, TradingRecord tradingRecord) { if (tradingRecord != null && !tradingRecord.isClosed()) { Num entryPrice = tradingRecord.getCurrentPosition().getEntry().getNetPrice(); Num currentPrice = this.referencePrice.getValue(index); Num threshold = this.stopLossThreshold.getValue(index); if (tradingRecord.getCurrentPosition().getEntry().isBuy()) { return currentPrice.isLessThan(entryPrice.minus(threshold)); } else { return currentPrice.isGreaterThan(entryPrice.plus(threshold)); } } return false; }
@Test public void testShortPositionStopLoss() { ZonedDateTime initialEndDateTime = ZonedDateTime.now(); for (int i = 0; i < 10; i++) { series.addBar(initialEndDateTime.plusDays(i), 100, 105, 95, 100); } AverageTrueRangeStopLossRule rule = new AverageTrueRangeStopLossRule(series, 5, 2); // Enter short position TradingRecord tradingRecord = new BaseTradingRecord(Trade.TradeType.SELL, new FixedTransactionCostModel(1), new ZeroCostModel()); tradingRecord.enter(0, series.numOf(100), series.numOf(1)); // Price below stop loss series.addBar(series.getLastBar().getEndTime().plusDays(1), 110, 123, 113, 123); assertFalse(rule.isSatisfied(series.getEndIndex(), tradingRecord)); // Price rises above stop loss series.addBar(series.getLastBar().getEndTime().plusDays(1), 110, 127, 117, 127); assertTrue(rule.isSatisfied(series.getEndIndex(), tradingRecord)); }
@Override public Map<String, Object> assembleFrom(OAuth2AccessTokenEntity accessToken, UserInfo userInfo, Set<String> authScopes) { Map<String, Object> result = newLinkedHashMap(); OAuth2Authentication authentication = accessToken.getAuthenticationHolder().getAuthentication(); result.put(ACTIVE, true); if (accessToken.getPermissions() != null && !accessToken.getPermissions().isEmpty()) { Set<Object> permissions = Sets.newHashSet(); for (Permission perm : accessToken.getPermissions()) { Map<String, Object> o = newLinkedHashMap(); o.put("resource_set_id", perm.getResourceSet().getId().toString()); Set<String> scopes = Sets.newHashSet(perm.getScopes()); o.put("scopes", scopes); permissions.add(o); } result.put("permissions", permissions); } else { Set<String> scopes = Sets.intersection(authScopes, accessToken.getScope()); result.put(SCOPE, Joiner.on(SCOPE_SEPARATOR).join(scopes)); } if (accessToken.getExpiration() != null) { try { result.put(EXPIRES_AT, dateFormat.valueToString(accessToken.getExpiration())); result.put(EXP, accessToken.getExpiration().getTime() / 1000L); } catch (ParseException e) { logger.error("Parse exception in token introspection", e); } } if (userInfo != null) { // if we have a UserInfo, use that for the subject result.put(SUB, userInfo.getSub()); } else { // otherwise, use the authentication's username result.put(SUB, authentication.getName()); } if(authentication.getUserAuthentication() != null) { result.put(USER_ID, authentication.getUserAuthentication().getName()); } result.put(CLIENT_ID, authentication.getOAuth2Request().getClientId()); result.put(TOKEN_TYPE, accessToken.getTokenType()); return result; }
@Test public void shouldAssembleExpectedResultForAccessToken_withPermissions() throws ParseException { // given OAuth2AccessTokenEntity accessToken = accessToken(new Date(123 * 1000L), scopes("foo", "bar"), permissions(permission(1L, "foo", "bar")), "Bearer", oauth2AuthenticationWithUser(oauth2Request("clientId"), "name")); UserInfo userInfo = userInfo("sub"); Set<String> authScopes = scopes("foo", "bar", "baz"); // when Map<String, Object> result = assembler.assembleFrom(accessToken, userInfo, authScopes); // then Map<String, Object> expected = new ImmutableMap.Builder<String, Object>() .put("sub", "sub") .put("exp", 123L) .put("expires_at", dateFormat.valueToString(new Date(123 * 1000L))) .put("permissions", new ImmutableSet.Builder<>() .add(new ImmutableMap.Builder<String, Object>() .put("resource_set_id", "1") // note that the resource ID comes out as a string .put("scopes", new ImmutableSet.Builder<>() .add("bar") .add("foo") .build()) .build()) .build()) // note that scopes are not included if permissions are included .put("active", Boolean.TRUE) .put("user_id", "name") .put("client_id", "clientId") .put("token_type", "Bearer") .build(); assertThat(result, is(equalTo(expected))); }
public AnalysisResult analysis(AnalysisResult result) { // 1. Set sub package name by source.metrics Class<? extends Metrics> metricsClass = MetricsHolder.find(result.getAggregationFuncStmt().getAggregationFunctionName()); String metricsClassSimpleName = metricsClass.getSimpleName(); result.setMetricsClassName(metricsClassSimpleName); // Optional for filter List<ConditionExpression> expressions = result.getFilters().getFilterExpressionsParserResult(); if (expressions != null && expressions.size() > 0) { for (ConditionExpression expression : expressions) { final FilterMatchers.MatcherInfo matcherInfo = FilterMatchers.INSTANCE.find( expression.getExpressionType()); final String getter = matcherInfo.isBooleanType() ? ClassMethodUtil.toIsMethod(expression.getAttributes()) : ClassMethodUtil.toGetMethod(expression.getAttributes()); final Expression filterExpression = new Expression(); filterExpression.setExpressionObject(matcherInfo.getMatcher().getName()); filterExpression.setLeft(TypeCastUtil.withCast(expression.getCastType(), "source." + getter)); filterExpression.setRight(expression.getValue()); result.getFilters().addFilterExpressions(filterExpression); } } // 3. Find Entrance method of this metrics Class<?> c = metricsClass; Method entranceMethod = null; SearchEntrance: while (!c.equals(Object.class)) { for (Method method : c.getMethods()) { Entrance annotation = method.getAnnotation(Entrance.class); if (annotation != null) { entranceMethod = method; break SearchEntrance; } } c = c.getSuperclass(); } if (entranceMethod == null) { throw new IllegalArgumentException("Can't find Entrance method in class: " + metricsClass.getName()); } EntryMethod entryMethod = new EntryMethod(); result.setEntryMethod(entryMethod); entryMethod.setMethodName(entranceMethod.getName()); // 4. Use parameter's annotation of entrance method to generate aggregation entrance. for (Parameter parameter : entranceMethod.getParameters()) { Class<?> parameterType = parameter.getType(); Annotation[] parameterAnnotations = parameter.getAnnotations(); if (parameterAnnotations == null || parameterAnnotations.length == 0) { throw new IllegalArgumentException( "Entrance method:" + entranceMethod + " doesn't include the annotation."); } Annotation annotation = parameterAnnotations[0]; if (annotation instanceof SourceFrom) { entryMethod.addArg( parameterType, TypeCastUtil.withCast( result.getFrom().getSourceCastType(), "source." + ClassMethodUtil.toGetMethod(result.getFrom().getSourceAttribute()) ) ); } else if (annotation instanceof ConstOne) { entryMethod.addArg(parameterType, "1"); } else if (annotation instanceof org.apache.skywalking.oap.server.core.analysis.metrics.annotation.Expression) { if (isNull(result.getAggregationFuncStmt().getFuncConditionExpressions()) || result.getAggregationFuncStmt().getFuncConditionExpressions().isEmpty()) { throw new IllegalArgumentException( "Entrance method:" + entranceMethod + " argument can't find funcParamExpression."); } else { ConditionExpression expression = result.getAggregationFuncStmt().getNextFuncConditionExpression(); final FilterMatchers.MatcherInfo matcherInfo = FilterMatchers.INSTANCE.find( expression.getExpressionType()); final String getter = matcherInfo.isBooleanType() ? ClassMethodUtil.toIsMethod(expression.getAttributes()) : ClassMethodUtil.toGetMethod(expression.getAttributes()); final Expression argExpression = new Expression(); argExpression.setRight(expression.getValue()); argExpression.setExpressionObject(matcherInfo.getMatcher().getName()); argExpression.setLeft(TypeCastUtil.withCast(expression.getCastType(), "source." + getter)); entryMethod.addArg(argExpression); } } else if (annotation instanceof Arg) { entryMethod.addArg(parameterType, result.getAggregationFuncStmt().getNextFuncArg()); } else { throw new IllegalArgumentException( "Entrance method:" + entranceMethod + " doesn't the expected annotation."); } } // 5. Get all column declared in MetricsHolder class. c = metricsClass; while (!c.equals(Object.class)) { for (Field field : c.getDeclaredFields()) { Column column = field.getAnnotation(Column.class); if (column != null) { result.addPersistentField( field.getName(), column.name(), field.getType()); } } c = c.getSuperclass(); } // 6. Based on Source, generate default columns List<SourceColumn> columns = SourceColumnsFactory.getColumns(result.getFrom().getSourceName()); result.setFieldsFromSource(columns); result.generateSerializeFields(); return result; }
@Test public void testEndpointAnalysis() { AnalysisResult result = new AnalysisResult(); result.getFrom().setSourceName("Endpoint"); result.getFrom().getSourceAttribute().add("latency"); result.setMetricsName("EndpointAvg"); result.getAggregationFuncStmt().setAggregationFunctionName("longAvg"); DeepAnalysis analysis = new DeepAnalysis(); result = analysis.analysis(result); EntryMethod method = result.getEntryMethod(); Assertions.assertEquals("combine", method.getMethodName()); Assertions.assertEquals("(long)(source.getLatency())", method.getArgsExpressions().get(0)); Assertions.assertEquals("(long)(1)", method.getArgsExpressions().get(1)); List<SourceColumn> source = result.getFieldsFromSource(); Assertions.assertEquals(2, source.size()); List<DataColumn> persistentFields = result.getPersistentFields(); Assertions.assertEquals(4, persistentFields.size()); }
static String getObjectKey(Extension extension) { return PrimaryKeySpecUtils.getObjectPrimaryKey(extension); }
@Test void getObjectKey() { var fake = createFakeExtension(); assertThat(DefaultIndexer.getObjectKey(fake)).isEqualTo("fake-extension"); }
static CastExpr getNumericPredictorExpression(final NumericPredictor numericPredictor) { boolean withExponent = !Objects.equals(1, numericPredictor.getExponent()); final String lambdaExpressionMethodName = withExponent ? "evaluateNumericWithExponent" : "evaluateNumericWithoutExponent"; final String parameterName = "input"; final MethodCallExpr lambdaMethodCallExpr = new MethodCallExpr(); lambdaMethodCallExpr.setName(lambdaExpressionMethodName); lambdaMethodCallExpr.setScope(new NameExpr(KiePMMLRegressionTable.class.getSimpleName())); final NodeList<Expression> arguments = new NodeList<>(); arguments.add(0, new NameExpr(parameterName)); arguments.add(1, getExpressionForObject(numericPredictor.getCoefficient().doubleValue())); if (withExponent) { arguments.add(2, getExpressionForObject(numericPredictor.getExponent().doubleValue())); } lambdaMethodCallExpr.setArguments(arguments); final ExpressionStmt lambdaExpressionStmt = new ExpressionStmt(lambdaMethodCallExpr); final LambdaExpr lambdaExpr = new LambdaExpr(); final Parameter lambdaParameter = new Parameter(new UnknownType(), parameterName); lambdaExpr.setParameters(NodeList.nodeList(lambdaParameter)); lambdaExpr.setBody(lambdaExpressionStmt); final String doubleClassName = Double.class.getSimpleName(); final ClassOrInterfaceType serializableFunctionType = getTypedClassOrInterfaceTypeByTypeNames(SerializableFunction.class.getCanonicalName(), Arrays.asList(doubleClassName, doubleClassName)); final CastExpr toReturn = new CastExpr(); toReturn.setType(serializableFunctionType); toReturn.setExpression(lambdaExpr); return toReturn; }
@Test void getNumericPredictorExpressionWithoutExponent() throws IOException { String predictorName = "predictorName"; int exponent = 1; double coefficient = 1.23; NumericPredictor numericPredictor = PMMLModelTestUtils.getNumericPredictor(predictorName, exponent, coefficient); CastExpr retrieved = KiePMMLRegressionTableFactory.getNumericPredictorExpression(numericPredictor); String text = getFileContent(TEST_02_SOURCE); Expression expected = JavaParserUtils.parseExpression(String.format(text, coefficient)); assertThat(retrieved).isEqualTo(expected); }
@Override public <VO, VR> KStream<K, VR> leftJoin(final KStream<K, VO> otherStream, final ValueJoiner<? super V, ? super VO, ? extends VR> joiner, final JoinWindows windows) { return leftJoin(otherStream, toValueJoinerWithKey(joiner), windows); }
@Test public void shouldNotAllowNullTableOnTableLeftJoin() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.leftJoin(null, MockValueJoiner.TOSTRING_JOINER)); assertThat(exception.getMessage(), equalTo("table can't be null")); }
public void begin() { databaseConnectionManager.getConnectionPostProcessors().add(target -> target.setAutoCommit(false)); }
@Test void assertBegin() { localTransactionManager.begin(); verify(databaseConnectionManager).getConnectionPostProcessors(); }
public boolean sendEvents( final Message inMessage, final Consumer<StitchResponse> resultCallback, final AsyncCallback callback) { sendAsyncEvents(inMessage) .subscribe(resultCallback, error -> { // error but we continue if (LOG.isDebugEnabled()) { LOG.debug("Error processing async exchange with error: {}", error.getMessage()); } inMessage.getExchange().setException(error); callback.done(false); }, () -> { // we are done from everything, so mark it as sync done LOG.trace("All events with exchange have been sent successfully."); callback.done(false); }); return false; }
@Test void testNormalSend() { final StitchConfiguration configuration = new StitchConfiguration(); configuration.setTableName("table_1"); configuration.setStitchSchema(StitchSchema.builder().addKeyword("field_1", "string").build()); configuration.setKeyNames("field_1"); final StitchMessage message = StitchMessage.builder() .withData("field_1", "data") .withSequence(0) .build(); final Exchange exchange = new DefaultExchange(context); exchange.getMessage().setBody(message); final StitchProducerOperations operations = new StitchProducerOperations(new TestClient(), configuration); final AtomicBoolean done = new AtomicBoolean(false); operations.sendEvents(exchange.getMessage(), response -> { assertEquals(200, response.getHttpStatusCode()); assertEquals("OK", response.getStatus()); assertEquals("All good!", response.getMessage()); assertEquals(Collections.singletonMap("header-1", "test"), response.getHeaders()); done.set(true); }, doneSync -> { }); Awaitility .await() .atMost(1, TimeUnit.SECONDS) .pollInterval(10, TimeUnit.MILLISECONDS) .untilTrue(done); }
@Override public AttributedList<Path> search(final Path workdir, final Filter<Path> regex, final ListProgressListener listener) throws BackgroundException { if(workdir.isRoot()) { if(StringUtils.isEmpty(RequestEntityRestStorageService.findBucketInHostname(session.getHost()))) { final AttributedList<Path> result = new AttributedList<>(); final AttributedList<Path> buckets = new S3BucketListService(session).list(workdir, listener); for(Path bucket : buckets) { result.addAll(filter(regex, new S3ObjectListService(session, acl).list(bucket, listener, null))); } result.addAll(filter(regex, buckets)); return result; } } try { return filter(regex, new S3ObjectListService(session, acl).list(workdir, listener, null)); } catch(NotfoundException e) { return AttributedList.emptyList(); } }
@Test public void testSearchInDirectory() throws Exception { final Path bucket = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final S3AccessControlListFeature acl = new S3AccessControlListFeature(session); final String name = new AlphanumericRandomStringService().random(); final Path file = new Path(bucket, name, EnumSet.of(Path.Type.file)); session.getFeature(Touch.class).touch(file, new TransferStatus()); final S3SearchFeature feature = new S3SearchFeature(session, acl); assertTrue(feature.search(bucket, new SearchFilter(name), new DisabledListProgressListener()).contains(file)); assertTrue(feature.search(bucket, new SearchFilter(StringUtils.upperCase(name)), new DisabledListProgressListener()).contains(file)); assertTrue(feature.search(bucket, new SearchFilter(StringUtils.substring(name, 2)), new DisabledListProgressListener()).contains(file)); // Glob pattern assertTrue(feature.search(bucket, new SearchFilter(String.format("*%s", StringUtils.substring(name, 2))), new DisabledListProgressListener()).contains(file)); { final AttributedList<Path> result = feature.search(bucket, new SearchFilter(StringUtils.substring(name, 0, name.length() - 2)), new DisabledListProgressListener()); assertTrue(result.contains(file)); } assertFalse(feature.search(new Path(bucket, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new SearchFilter(name), new DisabledListProgressListener()).contains(file)); final Path subdir = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir(new Path(bucket, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); assertTrue(feature.search(bucket, new SearchFilter(new AlphanumericRandomStringService().random()), new DisabledListProgressListener()).isEmpty()); assertFalse(feature.search(subdir, new SearchFilter(name), new DisabledListProgressListener()).contains(file)); final Path filesubdir = new S3TouchFeature(session, acl).touch(new Path(subdir, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); { final AttributedList<Path> result = feature.search(bucket, new SearchFilter(filesubdir.getName()), new DisabledListProgressListener()); assertNotNull(result.find(new SimplePathPredicate(filesubdir))); assertEquals(subdir, result.find(new SimplePathPredicate(filesubdir)).getParent()); } { final AttributedList<Path> result = feature.search(subdir, new SearchFilter(filesubdir.getName()), new DisabledListProgressListener()); assertNotNull(result.find(new SimplePathPredicate(filesubdir))); assertEquals(subdir, result.find(new SimplePathPredicate(filesubdir)).getParent()); } new S3DefaultDeleteFeature(session).delete(Arrays.asList(file, filesubdir, subdir), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public List<MappingField> resolveAndValidateFields( List<MappingField> userFields, Map<String, String> options, NodeEngine nodeEngine ) { final InternalSerializationService serializationService = (InternalSerializationService) nodeEngine .getSerializationService(); final AbstractRelationsStorage relationsStorage = ((CalciteSqlOptimizer) nodeEngine.getSqlService().getOptimizer()) .relationsStorage(); // normalize and validate the names and external names for (MappingField field : userFields) { String name = field.name(); String externalName = field.externalName(); if (externalName == null) { if (name.equals(KEY) || name.equals(VALUE)) { externalName = name; } else { externalName = VALUE_PREFIX + name; } field.setExternalName(name); } if ((name.equals(KEY) && !externalName.equals(KEY)) || (name.equals(VALUE) && !externalName.equals(VALUE))) { throw QueryException.error("Cannot rename field: '" + name + '\''); } if (!EXT_NAME_PATTERN.matcher(externalName).matches()) { throw QueryException.error("Invalid external name: " + externalName); } } Stream<MappingField> keyFields = resolveAndValidateFields(true, userFields, options, serializationService, relationsStorage); Stream<MappingField> valueFields = resolveAndValidateFields(false, userFields, options, serializationService, relationsStorage); Map<String, MappingField> fields = Stream.concat(keyFields, valueFields) .collect(LinkedHashMap::new, (map, field) -> map.putIfAbsent(field.name(), field), Map::putAll); if (fields.isEmpty()) { throw QueryException.error("The resolved field list is empty"); } return new ArrayList<>(fields.values()); }
@Test public void when_keyClashesWithValue_then_keyIsChosen() { Map<String, String> options = ImmutableMap.of( OPTION_KEY_FORMAT, JAVA_FORMAT, OPTION_VALUE_FORMAT, JAVA_FORMAT ); given(resolver.resolveAndValidateFields(eq(true), eq(emptyList()), eq(options), eq(ss))) .willReturn(Stream.of(field("field", QueryDataType.INT, "__key.field"))); given(resolver.resolveAndValidateFields(eq(false), eq(emptyList()), eq(options), eq(ss))) .willReturn(Stream.of(field("field", QueryDataType.VARCHAR, "this.field"))); List<MappingField> fields = resolvers.resolveAndValidateFields(emptyList(), options, nodeEngine); assertThat(fields).containsExactly(field("field", QueryDataType.INT, "__key.field")); }
@Override @CacheEvict(cacheNames = RedisKeyConstants.OAUTH_CLIENT, allEntries = true) // allEntries 清空所有缓存,因为可能修改到 clientId 字段,不好清理 public void updateOAuth2Client(OAuth2ClientSaveReqVO updateReqVO) { // 校验存在 validateOAuth2ClientExists(updateReqVO.getId()); // 校验 Client 未被占用 validateClientIdExists(updateReqVO.getId(), updateReqVO.getClientId()); // 更新 OAuth2ClientDO updateObj = BeanUtils.toBean(updateReqVO, OAuth2ClientDO.class); oauth2ClientMapper.updateById(updateObj); }
@Test public void testUpdateOAuth2Client_success() { // mock 数据 OAuth2ClientDO dbOAuth2Client = randomPojo(OAuth2ClientDO.class); oauth2ClientMapper.insert(dbOAuth2Client);// @Sql: 先插入出一条存在的数据 // 准备参数 OAuth2ClientSaveReqVO reqVO = randomPojo(OAuth2ClientSaveReqVO.class, o -> { o.setId(dbOAuth2Client.getId()); // 设置更新的 ID o.setLogo(randomString()); }); // 调用 oauth2ClientService.updateOAuth2Client(reqVO); // 校验是否更新正确 OAuth2ClientDO oAuth2Client = oauth2ClientMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, oAuth2Client); }
@Override @Description("The status of the Kafka and ZooKeeper clusters, and Topic Operator.") public KafkaStatus getStatus() { return super.getStatus(); }
@Test public void testListenersTypeAndName() { Kafka model = TestUtils.fromYaml("Kafka-listener-name-and-status" + ".yaml", Kafka.class); assertThat(model.getStatus().getListeners(), is(notNullValue())); assertThat(model.getStatus().getListeners().size(), is(2)); List<ListenerStatus> listeners = model.getStatus().getListeners(); assertThat(listeners.get(0).getName(), is("plain")); assertThat(listeners.get(1).getName(), is("external")); }
@Override public void writeToParcel(Parcel out, int flags) { super.writeToParcel(out, flags); }
@Test public void testCanSerializeParcelable() { org.robolectric.shadows.ShadowLog.stream = System.err; BeaconManager.setDebug(true); final Beacon original = new AltBeacon.Builder().setMfgReserved(2) .setBluetoothAddress("aa:bb:cc:dd:ee:ff") .setBluetoothName("Any Bluetooth") .setBeaconTypeCode(1) .setExtraDataFields(Arrays.asList(4L, 5L)) .setId1("6") .setId2("7") .setId3("8") .setManufacturer(10) .setMultiFrameBeacon(true) .setParserIdentifier("Any Parser ID") .setRssi(-11) .setRunningAverageRssi(-12.3) .setServiceUuid(13) .setTxPower(14) .build(); original.setPacketCount(15); original.setRssiMeasurementCount(16); aParcel = Parcel.obtain(); original.writeToParcel(aParcel, 0); aParcel.setDataPosition(0); final AltBeacon parceled = AltBeacon.CREATOR.createFromParcel(aParcel); assertThat( parceled, allOf( hasProperty("bluetoothAddress", equalTo("aa:bb:cc:dd:ee:ff")), hasProperty("bluetoothName", equalTo("Any Bluetooth")), hasProperty("beaconTypeCode", equalTo(1)), hasProperty("dataFields", equalTo(Arrays.asList(2L))), hasProperty("extraDataFields", equalTo(Arrays.asList(4L, 5L))), hasProperty("id1", equalTo(Identifier.fromInt(6))), hasProperty("id2", equalTo(Identifier.fromInt(7))), hasProperty("id3", equalTo(Identifier.fromInt(8))), hasProperty("manufacturer", equalTo(10)), hasProperty("multiFrameBeacon", equalTo(true)), hasProperty("parserIdentifier", equalTo("Any Parser ID")), hasProperty("rssi", equalTo(-11)), hasProperty("runningAverageRssi", equalTo(-12.3)), hasProperty("serviceUuid", equalTo(13)), hasProperty("txPower", equalTo(14)), hasProperty("mfgReserved", equalTo(2)), hasProperty("packetCount", equalTo(15)), hasProperty("measurementCount", equalTo(16)) ) ); }
int getMinLonForTile(double lon) { return (int) (Math.floor((180 + lon) / lonDegree) * lonDegree) - 180; }
@Test public void testMinLon() { assertEquals(-60, instance.getMinLonForTile(-59.9)); assertEquals(0, instance.getMinLonForTile(0.9)); }
@SuppressWarnings("unchecked") public static void validateResponse(HttpURLConnection conn, int expectedStatus) throws IOException { if (conn.getResponseCode() != expectedStatus) { Exception toThrow; InputStream es = null; try { es = conn.getErrorStream(); Map json = JsonSerialization.mapReader().readValue(es); json = (Map) json.get(ERROR_JSON); String exClass = (String) json.get(ERROR_CLASSNAME_JSON); String exMsg = (String) json.get(ERROR_MESSAGE_JSON); if (exClass != null) { try { ClassLoader cl = HttpExceptionUtils.class.getClassLoader(); Class klass = cl.loadClass(exClass); Preconditions.checkState(Exception.class.isAssignableFrom(klass), "Class [%s] is not a subclass of Exception", klass); MethodHandle methodHandle = PUBLIC_LOOKUP.findConstructor( klass, EXCEPTION_CONSTRUCTOR_TYPE); toThrow = (Exception) methodHandle.invoke(exMsg); } catch (Throwable t) { toThrow = new IOException(String.format( "HTTP status [%d], exception [%s], message [%s], URL [%s]", conn.getResponseCode(), exClass, exMsg, conn.getURL())); } } else { String msg = (exMsg != null) ? exMsg : conn.getResponseMessage(); toThrow = new IOException(String.format( "HTTP status [%d], message [%s], URL [%s]", conn.getResponseCode(), msg, conn.getURL())); } } catch (Exception ex) { toThrow = new IOException(String.format( "HTTP status [%d], message [%s], URL [%s], exception [%s]", conn.getResponseCode(), conn.getResponseMessage(), conn.getURL(), ex.toString()), ex); } finally { if (es != null) { try { es.close(); } catch (IOException ex) { //ignore } } } throwEx(toThrow); } }
@Test public void testValidateResponseNonJsonErrorMessage() throws Exception { String msg = "stream"; InputStream is = new ByteArrayInputStream(msg.getBytes(StandardCharsets.UTF_8)); HttpURLConnection conn = Mockito.mock(HttpURLConnection.class); Mockito.when(conn.getErrorStream()).thenReturn(is); Mockito.when(conn.getResponseMessage()).thenReturn("msg"); Mockito.when(conn.getResponseCode()).thenReturn(HttpURLConnection.HTTP_BAD_REQUEST); LambdaTestUtils.interceptAndValidateMessageContains(IOException.class, Arrays.asList(Integer.toString(HttpURLConnection.HTTP_BAD_REQUEST), "msg", "com.fasterxml.jackson.core.JsonParseException"), () -> HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_CREATED)); }
@Override protected Optional<String> get(String key) { // search for the first value available in // 1. system properties // 2. core property from environment variable // 3. thread local cache (if enabled) // 4. db String value = systemProps.getProperty(key); if (value != null) { return Optional.of(value); } Optional<String> envVal = getDefinitions().getValueFromEnv(key); if (envVal.isPresent()) { return envVal; } Map<String, String> dbProps = CACHE.get(); // caching is disabled if (dbProps == null) { return Optional.ofNullable(load(key)); } String loadedValue; if (dbProps.containsKey(key)) { // property may not exist in db. In this case key is present // in cache but value is null loadedValue = dbProps.get(key); } else { // cache the effective value (null if the property // is not persisted) loadedValue = load(key); dbProps.put(key, loadedValue); } return Optional.ofNullable(loadedValue); }
@Test public void system_settings_have_precedence_over_database() { insertPropertyIntoDb("foo", "from db"); underTest = create(system, ImmutableMap.of("foo", "from system")); assertThat(underTest.get("foo")).hasValue("from system"); }
public void generate() throws IOException { Path currentWorkingDir = Paths.get("").toAbsolutePath(); final InputStream rawDoc = Files.newInputStream(currentWorkingDir.resolve(clientParameters.inputFile)); BufferedReader reader = new BufferedReader(new InputStreamReader(rawDoc)); long i = 1; while (reader.ready()) { String line = reader.readLine(); JsonReader jsonReader = new JsonReader(types, new ByteArrayInputStream(Utf8.toBytes(line)), parserFactory); String wikimediaId = "id:wikimedia:" + languageTag.languageCode() + "::" + i; ParsedDocumentOperation operation = jsonReader.readSingleDocumentStreaming(DocumentOperationType.PUT, wikimediaId); DocumentPut put = (DocumentPut) operation.operation(); Document document = put.getDocument(); FieldValue fieldValue = document.getFieldValue(clientParameters.field); this.handleTokenization(fieldValue.toString()); if (i % 50000 == 0) { System.out.println("Documents processed: " + i + ", unique words: " + documentFrequency.size()); } i++; } long pageCount = i - 1; System.out.println("Total documents processed: " + pageCount + ", unique words: " + documentFrequency.size()); SignificanceModelFile modelFile; File outputFile = Paths.get(clientParameters.outputFile).toFile(); String languagesKey = String.join(",", this.languages.stream().map(Language::languageCode).toList()); if (outputFile.exists()) { InputStream in = outputFile.toString().endsWith(".zst") ? new ZstdInputStream(new FileInputStream(outputFile)) : new FileInputStream(outputFile); modelFile = objectMapper.readValue(in, SignificanceModelFile.class); modelFile.addLanguage(languagesKey, new DocumentFrequencyFile(DOC_FREQ_DESCRIPTION, pageCount, getFinalDocumentFrequency())); } else { HashMap<String, DocumentFrequencyFile> languages = new HashMap<>() {{ put(languagesKey, new DocumentFrequencyFile(DOC_FREQ_DESCRIPTION, pageCount, getFinalDocumentFrequency())); }}; modelFile = new SignificanceModelFile(VERSION, ID, SIGNIFICANCE_DESCRIPTION + clientParameters.inputFile, languages); } try { ObjectWriter writer = objectMapper.writerWithDefaultPrettyPrinter(); OutputStream out = useZstCompression ? new ZstdOutputStream(new FileOutputStream(clientParameters.outputFile)) : new FileOutputStream(clientParameters.outputFile); writer.writeValue(out, modelFile); } catch (IOException e) { throw new IllegalStateException("Failed to write model to output file", e); } }
@Test void testOverwriteExistingDocumentFrequencyLanguage() throws IOException { String inputPath = "no.jsonl"; String outputPath = "output.json"; ClientParameters params1 = createParameters(inputPath, outputPath, "text", "nb", "nb", "false").build(); SignificanceModelGenerator generator = createSignificanceModelGenerator(params1); generator.generate(); File outputFile = new File(tempDir.resolve(outputPath).toString()); assertTrue(outputFile.exists()); SignificanceModelFile preUpdatedFile = objectMapper.readValue(outputFile, SignificanceModelFile.class) ; HashMap<String, DocumentFrequencyFile> oldLanguages = preUpdatedFile.languages(); assertEquals(1, oldLanguages.size()); assertTrue(oldLanguages.containsKey("nb")); DocumentFrequencyFile oldDf = oldLanguages.get("nb"); assertEquals(3, oldDf.frequencies().get("fra")); assertEquals(3, oldDf.frequencies().get("skriveform")); assertFalse(oldDf.frequencies().containsKey("nytt")); String inputPath2 = "no_2.jsonl"; ClientParameters params2 = createParameters(inputPath2, outputPath, "text", "nb", "nb", "false").build(); SignificanceModelGenerator generator2 = createSignificanceModelGenerator(params2); generator2.generate(); outputFile = new File(tempDir.resolve(outputPath).toString()); assertTrue(outputFile.exists()); SignificanceModelFile modelFile = objectMapper.readValue(outputFile, SignificanceModelFile.class); HashMap<String, DocumentFrequencyFile> languages = modelFile.languages(); assertEquals(1, languages.size()); assertTrue(languages.containsKey("nb")); DocumentFrequencyFile df = languages.get("nb"); assertEquals(2, df.frequencies().get("fra")); assertEquals(3, df.frequencies().get("skriveform")); assertTrue(df.frequencies().containsKey("nytt")); assertEquals(2, df.frequencies().get("nytt")); }
@Override public boolean isEnabled(CeWorker ceWorker) { return ceWorker.getOrdinal() < ceConfiguration.getWorkerCount(); }
@Test public void isEnabled_returns_false_if_worker_ordinal_is_equal_to_CeConfiguration_workerCount() { when(ceWorker.getOrdinal()).thenReturn(randomWorkerCount); assertThat(underTest.isEnabled(ceWorker)).isFalse(); }
public void generateAcknowledgementPayload( MllpSocketBuffer mllpSocketBuffer, byte[] hl7MessageBytes, String acknowledgementCode) throws MllpAcknowledgementGenerationException { generateAcknowledgementPayload(mllpSocketBuffer, hl7MessageBytes, acknowledgementCode, null); }
@Test public void testGenerateAcknowledgementPayloadWithoutEnoughFields() { final byte[] testMessage = TEST_MESSAGE.replace("||ORM^O01|00001|D|2.3|||||||", "").getBytes(); MllpSocketBuffer mllpSocketBuffer = new MllpSocketBuffer(new MllpEndpointStub()); assertThrows(MllpAcknowledgementGenerationException.class, () -> hl7util.generateAcknowledgementPayload(mllpSocketBuffer, testMessage, "AA")); }
public static void deleteDirectory(File directory) throws IOException { requireNonNull(directory, DIRECTORY_CAN_NOT_BE_NULL); if (!directory.exists()) { return; } Path path = directory.toPath(); if (Files.isSymbolicLink(path)) { throw new IOException(format("Directory '%s' is a symbolic link", directory)); } if (directory.isFile()) { throw new IOException(format("Directory '%s' is a file", directory)); } deleteDirectoryImpl(path); }
@Test public void deleteDirectory_throws_IOE_if_file_is_symbolicLink() throws IOException { assumeTrue(SystemUtils.IS_OS_UNIX); Path folder = temporaryFolder.newFolder().toPath(); Path file1 = Files.createFile(folder.resolve("file1.txt")); Path symLink = Files.createSymbolicLink(folder.resolve("link1"), file1); assertThat(file1).isRegularFile(); assertThat(symLink).isSymbolicLink(); assertThatThrownBy(() -> FileUtils2.deleteDirectory(symLink.toFile())) .isInstanceOf(IOException.class) .hasMessage("Directory '" + symLink.toFile().getAbsolutePath() + "' is a symbolic link"); }
@Override public void initialize(URI uri, Configuration conf) throws IOException { requireNonNull(uri, "uri is null"); requireNonNull(conf, "conf is null"); super.initialize(uri, conf); setConf(conf); this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority()); this.workingDirectory = new Path(PATH_SEPARATOR).makeQualified(this.uri, new Path(PATH_SEPARATOR)); HiveS3Config defaults = new HiveS3Config(); this.stagingDirectory = new File(conf.get(S3_STAGING_DIRECTORY, defaults.getS3StagingDirectory().toString())); this.maxAttempts = conf.getInt(S3_MAX_CLIENT_RETRIES, defaults.getS3MaxClientRetries()) + 1; this.maxBackoffTime = Duration.valueOf(conf.get(S3_MAX_BACKOFF_TIME, defaults.getS3MaxBackoffTime().toString())); this.maxRetryTime = Duration.valueOf(conf.get(S3_MAX_RETRY_TIME, defaults.getS3MaxRetryTime().toString())); int maxErrorRetries = conf.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries()); boolean sslEnabled = conf.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled()); Duration connectTimeout = Duration.valueOf(conf.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString())); Duration socketTimeout = Duration.valueOf(conf.get(S3_SOCKET_TIMEOUT, defaults.getS3SocketTimeout().toString())); int maxConnections = conf.getInt(S3_MAX_CONNECTIONS, defaults.getS3MaxConnections()); this.multiPartUploadMinFileSize = conf.getLong(S3_MULTIPART_MIN_FILE_SIZE, defaults.getS3MultipartMinFileSize().toBytes()); this.multiPartUploadMinPartSize = conf.getLong(S3_MULTIPART_MIN_PART_SIZE, defaults.getS3MultipartMinPartSize().toBytes()); this.isPathStyleAccess = conf.getBoolean(S3_PATH_STYLE_ACCESS, defaults.isS3PathStyleAccess()); this.useInstanceCredentials = conf.getBoolean(S3_USE_INSTANCE_CREDENTIALS, defaults.isS3UseInstanceCredentials()); this.pinS3ClientToCurrentRegion = conf.getBoolean(S3_PIN_CLIENT_TO_CURRENT_REGION, defaults.isPinS3ClientToCurrentRegion()); this.s3IamRole = conf.get(S3_IAM_ROLE, defaults.getS3IamRole()); this.s3IamRoleSessionName = conf.get(S3_IAM_ROLE_SESSION_NAME, defaults.getS3IamRoleSessionName()); verify(!(useInstanceCredentials && conf.get(S3_IAM_ROLE) != null), "Invalid configuration: either use instance credentials or specify an iam role"); verify((pinS3ClientToCurrentRegion && conf.get(S3_ENDPOINT) == null) || !pinS3ClientToCurrentRegion, "Invalid configuration: either endpoint can be set or S3 client can be pinned to the current region"); this.sseEnabled = conf.getBoolean(S3_SSE_ENABLED, defaults.isS3SseEnabled()); this.sseType = PrestoS3SseType.valueOf(conf.get(S3_SSE_TYPE, defaults.getS3SseType().name())); this.sseKmsKeyId = conf.get(S3_SSE_KMS_KEY_ID, defaults.getS3SseKmsKeyId()); this.s3AclType = PrestoS3AclType.valueOf(conf.get(S3_ACL_TYPE, defaults.getS3AclType().name())); String userAgentPrefix = conf.get(S3_USER_AGENT_PREFIX, defaults.getS3UserAgentPrefix()); this.skipGlacierObjects = conf.getBoolean(S3_SKIP_GLACIER_OBJECTS, defaults.isSkipGlacierObjects()); this.s3StorageClass = conf.getEnum(S3_STORAGE_CLASS, defaults.getS3StorageClass()); ClientConfiguration configuration = new ClientConfiguration() .withMaxErrorRetry(maxErrorRetries) .withProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP) .withConnectionTimeout(toIntExact(connectTimeout.toMillis())) .withSocketTimeout(toIntExact(socketTimeout.toMillis())) .withMaxConnections(maxConnections) .withUserAgentPrefix(userAgentPrefix) .withUserAgentSuffix(S3_USER_AGENT_SUFFIX); this.credentialsProvider = createAwsCredentialsProvider(uri, conf); this.s3 = createAmazonS3Client(conf, configuration); }
@Test public void testAssumeRoleCredentials() throws Exception { Configuration config = new Configuration(); config.set(S3_IAM_ROLE, "role"); config.setBoolean(S3_USE_INSTANCE_CREDENTIALS, false); try (PrestoS3FileSystem fs = new PrestoS3FileSystem()) { fs.initialize(new URI("s3n://test-bucket/"), config); assertInstanceOf(getAwsCredentialsProvider(fs), STSAssumeRoleSessionCredentialsProvider.class); } }
@Override @CacheEvict(cacheNames = RedisKeyConstants.DEPT_CHILDREN_ID_LIST, allEntries = true) // allEntries 清空所有缓存,因为操作一个部门,涉及到多个缓存 public void deleteDept(Long id) { // 校验是否存在 validateDeptExists(id); // 校验是否有子部门 if (deptMapper.selectCountByParentId(id) > 0) { throw exception(DEPT_EXITS_CHILDREN); } // 删除部门 deptMapper.deleteById(id); }
@Test public void testDeleteDept_success() { // mock 数据 DeptDO dbDeptDO = randomPojo(DeptDO.class); deptMapper.insert(dbDeptDO);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbDeptDO.getId(); // 调用 deptService.deleteDept(id); // 校验数据不存在了 assertNull(deptMapper.selectById(id)); }
public void clearAll() { synchronized (registeredHandlers) { registeredHandlers.clear(); } }
@Test void clearAll() throws Exception { ResultPartitionID partitionId = new ResultPartitionID(); TaskEventDispatcher ted = new TaskEventDispatcher(); ted.registerPartition(partitionId); //noinspection unchecked ZeroShotEventListener eventListener1 = new ZeroShotEventListener(); ted.subscribeToEvent(partitionId, eventListener1, AllWorkersDoneEvent.class); ted.clearAll(); assertThat(ted.publish(partitionId, new AllWorkersDoneEvent())).isFalse(); }
public Future<KafkaVersionChange> reconcile() { return getVersionFromController() .compose(i -> getPods()) .compose(this::detectToAndFromVersions) .compose(i -> prepareVersionChange()); }
@Test public void testNoopWithAllVersionsFromSts(VertxTestContext context) { String kafkaVersion = VERSIONS.defaultVersion().version(); String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); VersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), mockNewCluster( mockSts(kafkaVersion), null, mockUniformPods(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion) ) ); Checkpoint async = context.checkpoint(); vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { assertThat(c.from(), is(VERSIONS.defaultVersion())); assertThat(c.to(), is(VERSIONS.defaultVersion())); assertThat(c.interBrokerProtocolVersion(), nullValue()); assertThat(c.logMessageFormatVersion(), nullValue()); assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); async.flag(); }))); }
@Subscribe public void onChatMessage(ChatMessage chatMessage) { if (chatMessage.getType() != ChatMessageType.TRADE && chatMessage.getType() != ChatMessageType.GAMEMESSAGE && chatMessage.getType() != ChatMessageType.SPAM && chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION) { return; } String message = chatMessage.getMessage(); Matcher matcher = KILLCOUNT_PATTERN.matcher(message); if (matcher.find()) { final String boss = matcher.group("boss"); final int kc = Integer.parseInt(matcher.group("kc")); final String pre = matcher.group("pre"); final String post = matcher.group("post"); if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post)) { unsetKc(boss); return; } String renamedBoss = KILLCOUNT_RENAMES .getOrDefault(boss, boss) // The config service doesn't support keys with colons in them .replace(":", ""); if (boss != renamedBoss) { // Unset old TOB kc unsetKc(boss); unsetPb(boss); unsetKc(boss.replace(":", ".")); unsetPb(boss.replace(":", ".")); // Unset old story mode unsetKc("Theatre of Blood Story Mode"); unsetPb("Theatre of Blood Story Mode"); } setKc(renamedBoss, kc); // We either already have the pb, or need to remember the boss for the upcoming pb if (lastPb > -1) { log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb); if (renamedBoss.contains("Theatre of Blood")) { // TOB team size isn't sent in the kill message, but can be computed from varbits int tobTeamSize = tobTeamSize(); lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players"); } else if (renamedBoss.contains("Tombs of Amascut")) { // TOA team size isn't sent in the kill message, but can be computed from varbits int toaTeamSize = toaTeamSize(); lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players"); } final double pb = getPb(renamedBoss); // If a raid with a team size, only update the pb if it is lower than the existing pb // so that the pb is the overall lowest of any team size if (lastTeamSize == null || pb == 0 || lastPb < pb) { log.debug("Setting overall pb (old: {})", pb); setPb(renamedBoss, lastPb); } if (lastTeamSize != null) { log.debug("Setting team size pb: {}", lastTeamSize); setPb(renamedBoss + " " + lastTeamSize, lastPb); } lastPb = -1; lastTeamSize = null; } else { lastBossKill = renamedBoss; lastBossTime = client.getTickCount(); } return; } matcher = DUEL_ARENA_WINS_PATTERN.matcher(message); if (matcher.find()) { final int oldWins = getKc("Duel Arena Wins"); final int wins = matcher.group(2).equals("one") ? 1 : Integer.parseInt(matcher.group(2).replace(",", "")); final String result = matcher.group(1); int winningStreak = getKc("Duel Arena Win Streak"); int losingStreak = getKc("Duel Arena Lose Streak"); if (result.equals("won") && wins > oldWins) { losingStreak = 0; winningStreak += 1; } else if (result.equals("were defeated")) { losingStreak += 1; winningStreak = 0; } else { log.warn("unrecognized duel streak chat message: {}", message); } setKc("Duel Arena Wins", wins); setKc("Duel Arena Win Streak", winningStreak); setKc("Duel Arena Lose Streak", losingStreak); } matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message); if (matcher.find()) { int losses = matcher.group(1).equals("one") ? 1 : Integer.parseInt(matcher.group(1).replace(",", "")); setKc("Duel Arena Losses", losses); } matcher = KILL_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = NEW_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = HS_PB_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group("floor")); String floortime = matcher.group("floortime"); String floorpb = matcher.group("floorpb"); String otime = matcher.group("otime"); String opb = matcher.group("opb"); String pb = MoreObjects.firstNonNull(floorpb, floortime); setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb)); if (otime != null) { pb = MoreObjects.firstNonNull(opb, otime); setPb("Hallowed Sepulchre", timeStringToSeconds(pb)); } } matcher = HS_KC_FLOOR_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group(1)); int kc = Integer.parseInt(matcher.group(2).replaceAll(",", "")); setKc("Hallowed Sepulchre Floor " + floor, kc); } matcher = HS_KC_GHC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hallowed Sepulchre", kc); } matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hunter Rumours", kc); } if (lastBossKill != null && lastBossTime != client.getTickCount()) { lastBossKill = null; lastBossTime = -1; } matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message); if (matcher.find()) { String item = matcher.group(1); int petId = findPet(item); if (petId != -1) { final List<Integer> petList = new ArrayList<>(getPetList()); if (!petList.contains(petId)) { log.debug("New pet added: {}/{}", item, petId); petList.add(petId); setPetList(petList); } } } matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1)); setKc("Guardians of the Rift", kc); } }
@Test public void testZukNewPb() { ChatMessage chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your TzKal-Zuk kill count is: <col=ff0000>2</col>.", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Duration: <col=ff0000>104:31</col> (new personal best)", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration("personalbest", "tzkal-zuk", 104 * 60 + 31.0); verify(configManager).setRSProfileConfiguration("killcount", "tzkal-zuk", 2); // Precise times chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Duration: <col=ff0000>104:31.20</col> (new personal best)", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration("personalbest", "tzkal-zuk", 104 * 60 + 31.2); }
@Override public void uploadPart(RefCountedFSOutputStream file) throws IOException { // this is to guarantee that nobody is // writing to the file we are uploading. checkState(file.isClosed()); final CompletableFuture<PartETag> future = new CompletableFuture<>(); uploadsInProgress.add(future); final long partLength = file.getPos(); currentUploadInfo.registerNewPart(partLength); file.retain(); // keep the file while the async upload still runs uploadThreadPool.execute(new UploadTask(s3AccessHelper, currentUploadInfo, file, future)); }
@Test public void multiplePartAndObjectUploadsShouldBeReflectedInRecoverable() throws IOException { final byte[] firstCompletePart = bytesOf("hello world"); final byte[] secondCompletePart = bytesOf("hello again"); final byte[] thirdIncompletePart = bytesOf("!!!"); uploadPart(firstCompletePart); uploadPart(secondCompletePart); final S3Recoverable recoverable = uploadObject(thirdIncompletePart); assertThat( recoverable, isEqualTo(thirdIncompletePart, firstCompletePart, secondCompletePart)); }
public static Expression generateFilterExpression(SearchArgument sarg) { return translate(sarg.getExpression(), sarg.getLeaves()); }
@Test public void testLessThanOperand() { SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); SearchArgument arg = builder.startAnd().lessThan("salary", PredicateLeaf.Type.LONG, 3000L).end().build(); UnboundPredicate expected = Expressions.lessThan("salary", 3000L); UnboundPredicate actual = (UnboundPredicate) HiveIcebergFilterFactory.generateFilterExpression(arg); assertEquals(actual.op(), expected.op()); assertEquals(actual.literal(), expected.literal()); assertEquals(actual.ref().name(), expected.ref().name()); }
@Override public void addServiceInstancesChangedListener(ServiceInstancesChangedListener listener) throws NullPointerException, IllegalArgumentException { // check if listener has already been added through another interface/service if (!instanceListeners.add(listener)) { return; } for (String serviceName : listener.getServiceNames()) { NacosEventListener nacosEventListener = eventListeners.get(serviceName); if (nacosEventListener != null) { nacosEventListener.addListener(listener); } else { try { nacosEventListener = new NacosEventListener(); nacosEventListener.addListener(listener); namingService.subscribe(serviceName, group, nacosEventListener); eventListeners.put(serviceName, nacosEventListener); } catch (NacosException e) { logger.error( REGISTRY_NACOS_EXCEPTION, "", "", "add nacos service instances changed listener fail ", e); } } } }
@Test void testAddServiceInstancesChangedListener() { List<ServiceInstance> serviceInstances = new LinkedList<>(); // Add Listener nacosServiceDiscovery.addServiceInstancesChangedListener( new ServiceInstancesChangedListener(Sets.newSet(SERVICE_NAME), nacosServiceDiscovery) { @Override public void onEvent(ServiceInstancesChangedEvent event) { serviceInstances.addAll(event.getServiceInstances()); } }); nacosServiceDiscovery.register(); nacosServiceDiscovery.update(); nacosServiceDiscovery.unregister(); assertTrue(serviceInstances.isEmpty()); }
@Override public T setLong(K name, long value) { throw new UnsupportedOperationException("read only"); }
@Test public void testSetLong() { assertThrows(UnsupportedOperationException.class, new Executable() { @Override public void execute() { HEADERS.setLong("name", 0); } }); }
public static void applyLocaleToContext(@NonNull Context context, @Nullable String localeString) { final Locale forceLocale = LocaleTools.getLocaleForLocaleString(localeString); final Configuration configuration = context.getResources().getConfiguration(); if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1) { configuration.setLocale(forceLocale); } else { //noinspection deprecation configuration.locale = forceLocale; } context.getResources().updateConfiguration(configuration, null); }
@Test @Config(sdk = Build.VERSION_CODES.JELLY_BEAN_MR1) @Ignore("Robolectric does not support this API") public void testSetAndResetValueAPI17WithUnknownLocale() { Assert.assertEquals( "English (United States)", mContext.getResources().getConfiguration().locale.getDisplayName()); LocaleTools.applyLocaleToContext(mContext, "eu"); Assert.assertEquals("eu", mContext.getResources().getConfiguration().locale.getLanguage()); Assert.assertTrue( mContext.getResources().getConfiguration().locale.getDisplayName().contains("Basque")); LocaleTools.applyLocaleToContext(mContext, ""); Assert.assertSame(Locale.getDefault(), mContext.getResources().getConfiguration().locale); LocaleTools.applyLocaleToContext(mContext, "NONE_EXISTING"); Assert.assertEquals( "none_existing", mContext.getResources().getConfiguration().locale.getLanguage()); }
public Result getNewName( File destDir, String newPath ) { try { FileProvider<File> fileProvider = providerService.get( destDir.getProvider() ); return Result.success( "", fileProvider.getNewName( destDir, newPath, space ) ); } catch ( InvalidFileProviderException | FileException e ) { return null; } }
@Test public void testGetNewName() { TestDirectory testDirectory = new TestDirectory(); testDirectory.setPath( "/directory1" ); String newName = (String) fileController.getNewName( testDirectory, "/directory1/file1" ).getData(); Assert.assertEquals( "/directory1/file1 1", newName ); }
public CoordinatorResult<OffsetCommitResponseData, CoordinatorRecord> commitOffset( RequestContext context, OffsetCommitRequestData request ) throws ApiException { Group group = validateOffsetCommit(context, request); // In the old consumer group protocol, the offset commits maintain the session if // the group is in Stable or PreparingRebalance state. if (group.type() == Group.GroupType.CLASSIC) { ClassicGroup classicGroup = (ClassicGroup) group; if (classicGroup.isInState(ClassicGroupState.STABLE) || classicGroup.isInState(ClassicGroupState.PREPARING_REBALANCE)) { groupMetadataManager.rescheduleClassicGroupMemberHeartbeat( classicGroup, classicGroup.member(request.memberId()) ); } } final OffsetCommitResponseData response = new OffsetCommitResponseData(); final List<CoordinatorRecord> records = new ArrayList<>(); final long currentTimeMs = time.milliseconds(); final OptionalLong expireTimestampMs = expireTimestampMs(request.retentionTimeMs(), currentTimeMs); request.topics().forEach(topic -> { final OffsetCommitResponseTopic topicResponse = new OffsetCommitResponseTopic().setName(topic.name()); response.topics().add(topicResponse); topic.partitions().forEach(partition -> { if (isMetadataInvalid(partition.committedMetadata())) { topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code())); } else { log.debug("[GroupId {}] Committing offsets {} for partition {}-{} from member {} with leader epoch {}.", request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(), request.memberId(), partition.committedLeaderEpoch()); topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.NONE.code())); final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest( partition, currentTimeMs, expireTimestampMs ); records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord( request.groupId(), topic.name(), partition.partitionIndex(), offsetAndMetadata, metadataImage.features().metadataVersion() )); } }); }); if (!records.isEmpty()) { metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size()); } return new CoordinatorResult<>(records, response); }
@Test public void testConsumerGroupOffsetCommitWithStaleMemberEpoch() { OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build(); // Create an empty group. ConsumerGroup group = context.groupMetadataManager.getOrMaybeCreatePersistedConsumerGroup( "foo", true ); // Add member. group.updateMember(new ConsumerGroupMember.Builder("member") .setMemberEpoch(10) .setPreviousMemberEpoch(10) .build() ); OffsetCommitRequestData request = new OffsetCommitRequestData() .setGroupId("foo") .setMemberId("member") .setGenerationIdOrMemberEpoch(9) .setTopics(Collections.singletonList( new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName("bar") .setPartitions(Collections.singletonList( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(100L) )) )); // Verify that a smaller epoch is rejected. assertThrows(StaleMemberEpochException.class, () -> context.commitOffset(request)); // Verify that a larger epoch is rejected. request.setGenerationIdOrMemberEpoch(11); assertThrows(StaleMemberEpochException.class, () -> context.commitOffset(request)); }
@GetMapping("") @RequiresPermissions("system:role:list") public ShenyuAdminResult queryRole(final String roleName, @RequestParam @NotNull final Integer currentPage, @RequestParam @NotNull final Integer pageSize) { CommonPager<RoleVO> commonPager = roleService.listByPage(new RoleQuery(roleName, new PageParameter(currentPage, pageSize))); return ShenyuAdminResult.success(ShenyuResultMessage.QUERY_SUCCESS, commonPager); }
@Test public void testQueryRole() throws Exception { RoleVO roleVO = buildRoleVO(); PageParameter pageParameter = new PageParameter(); RoleQuery query = new RoleQuery(roleVO.getRoleName(), pageParameter); given(roleService.listByPage(query)).willReturn(new CommonPager<>(pageParameter, Collections.singletonList(roleVO))); String urlTemplate = "/role?roleName={roleName}&currentPage={currentPage}&pageSize={pageSize}"; this.mockMvc.perform(MockMvcRequestBuilders.get(urlTemplate, roleVO.getRoleName(), pageParameter.getCurrentPage(), pageParameter.getPageSize())) .andExpect(status().isOk()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.QUERY_SUCCESS))) .andExpect(jsonPath("$.data.dataList[0].id", is(roleVO.getId()))) .andReturn(); }
@Override public void init(ServletConfig config) throws ServletException { super.init(config); final ServletContext context = config.getServletContext(); if (null == registry) { final Object registryAttr = context.getAttribute(HEALTH_CHECK_REGISTRY); if (registryAttr instanceof HealthCheckRegistry) { this.registry = (HealthCheckRegistry) registryAttr; } else { throw new ServletException("Couldn't find a HealthCheckRegistry instance."); } } final Object executorAttr = context.getAttribute(HEALTH_CHECK_EXECUTOR); if (executorAttr instanceof ExecutorService) { this.executorService = (ExecutorService) executorAttr; } final Object filterAttr = context.getAttribute(HEALTH_CHECK_FILTER); if (filterAttr instanceof HealthCheckFilter) { filter = (HealthCheckFilter) filterAttr; } if (filter == null) { filter = HealthCheckFilter.ALL; } final Object mapperAttr = context.getAttribute(HEALTH_CHECK_MAPPER); if (mapperAttr instanceof ObjectMapper) { this.mapper = (ObjectMapper) mapperAttr; } else { this.mapper = new ObjectMapper(); } this.mapper.registerModule(new HealthCheckModule()); final Object httpStatusIndicatorAttr = context.getAttribute(HEALTH_CHECK_HTTP_STATUS_INDICATOR); if (httpStatusIndicatorAttr instanceof Boolean) { this.httpStatusIndicator = (Boolean) httpStatusIndicatorAttr; } else { this.httpStatusIndicator = true; } }
@Test public void constructorWithRegistryAsArgumentUsesServletConfigWhenNull() throws Exception { final HealthCheckRegistry healthCheckRegistry = mock(HealthCheckRegistry.class); final ServletContext servletContext = mock(ServletContext.class); final ServletConfig servletConfig = mock(ServletConfig.class); when(servletConfig.getServletContext()).thenReturn(servletContext); when(servletContext.getAttribute(HealthCheckServlet.HEALTH_CHECK_REGISTRY)) .thenReturn(healthCheckRegistry); final HealthCheckServlet healthCheckServlet = new HealthCheckServlet(null); healthCheckServlet.init(servletConfig); verify(servletConfig, times(1)).getServletContext(); verify(servletContext, times(1)).getAttribute(HealthCheckServlet.HEALTH_CHECK_REGISTRY); }
@Override public HashSlotCursor16byteKey cursor() { return new CursorLongKey2(); }
@Test(expected = AssertionError.class) @RequireAssertEnabled public void testCursor_key1_withoutAdvance() { HashSlotCursor16byteKey cursor = hsa.cursor(); cursor.key1(); }
public Set<RemotingChannel> removeChannel(Channel channel) { Set<RemotingChannel> removedChannelSet = new HashSet<>(); Set<String> groupKeySet = groupChannelMap.keySet(); for (String group : groupKeySet) { RemotingChannel remotingChannel = removeChannel(group, channel); if (remotingChannel != null) { removedChannelSet.add(remotingChannel); } } return removedChannelSet; }
@Test public void testRemoveChannel() { String consumerGroup = "consumerGroup"; String producerGroup = "producerGroup"; String clientId = RandomStringUtils.randomAlphabetic(10); Channel consumerChannel = createMockChannel(); RemotingChannel consumerRemotingChannel = this.remotingChannelManager.createConsumerChannel(ctx, consumerChannel, consumerGroup, clientId, new HashSet<>()); Channel producerChannel = createMockChannel(); RemotingChannel producerRemotingChannel = this.remotingChannelManager.createProducerChannel(ctx, producerChannel, producerGroup, clientId); assertSame(consumerRemotingChannel, this.remotingChannelManager.removeChannel(consumerChannel).stream().findFirst().get()); assertSame(producerRemotingChannel, this.remotingChannelManager.removeChannel(producerChannel).stream().findFirst().get()); assertTrue(this.remotingChannelManager.groupChannelMap.isEmpty()); }
protected Date getTimeBefore(final Date targetDate) { final Calendar cl = Calendar.getInstance(getTimeZone()); // CronTrigger does not deal with milliseconds, so truncate target cl.setTime(targetDate); cl.set(Calendar.MILLISECOND, 0); final Date targetDateNoMs = cl.getTime(); // to match this Date start = targetDateNoMs; final long minIncrement = findMinIncrement(); Date prevFireTime; do { final Date prevCheckDate = new Date(start.getTime() - minIncrement); prevFireTime = getTimeAfter(prevCheckDate); if (prevFireTime == null || prevFireTime.before(MIN_DATE)) { return null; } start = prevCheckDate; } while (prevFireTime.compareTo(targetDateNoMs) >= 0); return prevFireTime; }
@Test void getTimeBefore() throws ParseException { CronExpression cronExpression = new CronExpression("0 0 8 1 JAN ?"); Date beforeDate = cronExpression.getTimeBefore(new Date()); System.out.println(beforeDate); assertThat(beforeDate).isNotNull(); cronExpression = new CronExpression("0 */5 * * * ?"); beforeDate = cronExpression.getTimeBefore(new Date()); System.out.println(beforeDate); assertThat(beforeDate).isNotNull(); }
static String effectiveServices(File servicesFile, ZoneId zone, CloudName cloud, InstanceName instance, Tags tags) throws Exception { Document processedServicesXml = new XmlPreProcessor(servicesFile.getParentFile(), servicesFile, instance, zone.environment(), zone.region(), cloud, tags) .run(); Transformer transformer = TransformerFactory.newInstance().newTransformer(); transformer.setOutputProperty(OutputKeys.INDENT, "yes"); Writer writer = new StringWriter(); transformer.transform(new DOMSource(processedServicesXml), new StreamResult(writer)); return writer.toString().replaceAll("\n(\\s*\n)+","\n"); }
@Test @DisplayName("when zone matches region-and-environment directive") void prodUsEast3() throws Exception { assertEquals(Files.readString(Paths.get("src/test/resources/effective-services/prod_us-east-3.xml")), effectiveServices(servicesFile, ZoneId.from("prod", "us-east-3"), CloudName.DEFAULT, InstanceName.defaultName(), Tags.empty())); }
@Override public Object decode(Response response, Type type) throws IOException, DecodeException { if (response.status() == 404 || response.status() == 204) if (JSONObject.class.isAssignableFrom((Class<?>) type)) return new JSONObject(); else if (JSONArray.class.isAssignableFrom((Class<?>) type)) return new JSONArray(); else if (String.class.equals(type)) return null; else throw new DecodeException(response.status(), format("%s is not a type supported by this decoder.", type), response.request()); if (response.body() == null) return null; try (Reader reader = response.body().asReader(response.charset())) { Reader bodyReader = (reader.markSupported()) ? reader : new BufferedReader(reader); bodyReader.mark(1); if (bodyReader.read() == -1) { return null; // Empty body } bodyReader.reset(); return decodeBody(response, type, bodyReader); } catch (JSONException jsonException) { if (jsonException.getCause() != null && jsonException.getCause() instanceof IOException) { throw (IOException) jsonException.getCause(); } throw new DecodeException(response.status(), jsonException.getMessage(), response.request(), jsonException); } }
@Test void decodeExtendedObject() throws IOException { String json = "{\"a\":\"b\",\"c\":1}"; Response response = Response.builder() .status(200) .reason("OK") .headers(Collections.emptyMap()) .body(json, UTF_8) .request(request) .build(); assertThat(jsonObject.similar(new JsonDecoder().decode(response, ExtendedJSONObject.class))) .isTrue(); }
public void refresh(List<Pair<T>> itemsWithWeight) { Ref<T> newRef = new Ref<>(itemsWithWeight); newRef.refresh(); newRef.poller = this.ref.poller.refresh(newRef.items); this.ref = newRef; }
@Test void testRefresh() { Chooser<String, String> chooser = new Chooser<>("test"); assertEquals("test", chooser.getUniqueKey()); assertNull(chooser.random()); List<Pair<String>> list = new LinkedList<>(); list.add(new Pair<>("test", 1)); chooser.refresh(list); String actual = chooser.random(); assertNotNull(actual); assertEquals("test", actual); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { if(directory.isRoot()) { final AttributedList<Path> list = new AttributedList<>(); list.add(MYFILES_NAME); list.add(SHARED_NAME); listener.chunk(directory, list); return list; } else if(new SimplePathPredicate(SHARED_NAME).test(directory)) { return new SharedWithMeListService(session, fileid).list(directory, listener); } else { return new GraphItemListService(session, fileid).list(directory, listener); } }
@Test public void testListDrives() throws Exception { // "Drives" rather placeholders for "My Files" and "Shared". final AttributedList<Path> list = new OneDriveListService(session, fileid).list(new Path("/", EnumSet.of(Path.Type.directory)), new DisabledListProgressListener()); assertFalse(list.isEmpty()); for(Path f : list) { assertEquals(new Path("/", EnumSet.of(Path.Type.directory)), f.getParent()); } assertTrue(list.contains(new OneDriveHomeFinderService().find())); }
public void alterTableProperties(Database db, OlapTable table, Map<String, String> properties) throws DdlException { Map<String, String> propertiesToPersist = new HashMap<>(properties); Map<String, Object> results = validateToBeModifiedProps(properties, table); TableProperty tableProperty = table.getTableProperty(); for (String key : results.keySet()) { if (propertiesToPersist.containsKey(PropertyAnalyzer.PROPERTIES_PARTITION_LIVE_NUMBER)) { int partitionLiveNumber = (int) results.get(key); tableProperty.getProperties().put(PropertyAnalyzer.PROPERTIES_PARTITION_LIVE_NUMBER, String.valueOf(partitionLiveNumber)); if (partitionLiveNumber == TableProperty.INVALID) { GlobalStateMgr.getCurrentState().getDynamicPartitionScheduler().removeTtlPartitionTable(db.getId(), table.getId()); } else { GlobalStateMgr.getCurrentState().getDynamicPartitionScheduler().registerTtlPartitionTable(db.getId(), table.getId()); } tableProperty.setPartitionTTLNumber(partitionLiveNumber); ModifyTablePropertyOperationLog info = new ModifyTablePropertyOperationLog(db.getId(), table.getId(), ImmutableMap.of(key, propertiesToPersist.get(key))); GlobalStateMgr.getCurrentState().getEditLog().logAlterTableProperties(info); } if (propertiesToPersist.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_MEDIUM)) { DataProperty dataProperty = (DataProperty) results.get(key); TStorageMedium storageMedium = dataProperty.getStorageMedium(); table.setStorageMedium(storageMedium); tableProperty.getProperties() .put(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TIME, String.valueOf(dataProperty.getCooldownTimeMs())); ModifyTablePropertyOperationLog info = new ModifyTablePropertyOperationLog(db.getId(), table.getId(), ImmutableMap.of(key, propertiesToPersist.get(key))); GlobalStateMgr.getCurrentState().getEditLog().logAlterTableProperties(info); } if (propertiesToPersist.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TTL)) { String storageCoolDownTTL = propertiesToPersist.get(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TTL); tableProperty.getProperties().put(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TTL, storageCoolDownTTL); tableProperty.buildStorageCoolDownTTL(); ModifyTablePropertyOperationLog info = new ModifyTablePropertyOperationLog(db.getId(), table.getId(), ImmutableMap.of(key, propertiesToPersist.get(key))); GlobalStateMgr.getCurrentState().getEditLog().logAlterTableProperties(info); } if (propertiesToPersist.containsKey(PropertyAnalyzer.PROPERTIES_DATACACHE_PARTITION_DURATION)) { String partitionDuration = propertiesToPersist.get(PropertyAnalyzer.PROPERTIES_DATACACHE_PARTITION_DURATION); tableProperty.getProperties().put(PropertyAnalyzer.PROPERTIES_DATACACHE_PARTITION_DURATION, partitionDuration); tableProperty.buildDataCachePartitionDuration(); ModifyTablePropertyOperationLog info = new ModifyTablePropertyOperationLog(db.getId(), table.getId(), ImmutableMap.of(key, propertiesToPersist.get(key))); GlobalStateMgr.getCurrentState().getEditLog().logAlterTableProperties(info); } if (propertiesToPersist.containsKey(PropertyAnalyzer.PROPERTIES_LABELS_LOCATION)) { String location = propertiesToPersist.get(PropertyAnalyzer.PROPERTIES_LABELS_LOCATION); table.setLocation(location); ModifyTablePropertyOperationLog info = new ModifyTablePropertyOperationLog(db.getId(), table.getId(), ImmutableMap.of(key, propertiesToPersist.get(key))); GlobalStateMgr.getCurrentState().getEditLog().logAlterTableProperties(info); } } }
@Test public void testAlterTableProperties() throws Exception { Database db = connectContext.getGlobalStateMgr().getDb("test"); OlapTable table = (OlapTable) db.getTable("t1"); Map<String, String> properties = Maps.newHashMap(); properties.put(PropertyAnalyzer.PROPERTIES_DATACACHE_PARTITION_DURATION, "abcd"); LocalMetastore localMetastore = connectContext.getGlobalStateMgr().getLocalMetastore(); try { localMetastore.alterTableProperties(db, table, properties); } catch (RuntimeException e) { Assert.assertEquals("Cannot parse text to Duration", e.getMessage()); } }
@Override @CacheEvict(value = RedisKeyConstants.PERMISSION_MENU_ID_LIST, allEntries = true) // allEntries 清空所有缓存,因为 permission 如果变更,涉及到新老两个 permission。直接清理,简单有效 public void updateMenu(MenuSaveVO updateReqVO) { // 校验更新的菜单是否存在 if (menuMapper.selectById(updateReqVO.getId()) == null) { throw exception(MENU_NOT_EXISTS); } // 校验父菜单存在 validateParentMenu(updateReqVO.getParentId(), updateReqVO.getId()); // 校验菜单(自己) validateMenu(updateReqVO.getParentId(), updateReqVO.getName(), updateReqVO.getId()); // 更新到数据库 MenuDO updateObj = BeanUtils.toBean(updateReqVO, MenuDO.class); initMenuProperty(updateObj); menuMapper.updateById(updateObj); }
@Test public void testUpdateMenu_sonIdNotExist() { // 准备参数 MenuSaveVO reqVO = randomPojo(MenuSaveVO.class); // 调用,并断言异常 assertServiceException(() -> menuService.updateMenu(reqVO), MENU_NOT_EXISTS); }
@Override public FSDataOutputStream create(Path path, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { String confUmask = mAlluxioConf.getString(PropertyKey.SECURITY_AUTHORIZATION_PERMISSION_UMASK); Mode mode = ModeUtils.applyFileUMask(Mode.defaults(), confUmask); return this.create(path, new FsPermission(mode.toShort()), overwrite, bufferSize, replication, blockSize, progress); }
@Test public void resetContextUsingZookeeperUris() throws Exception { // Change to signle zookeeper uri URI uri = URI.create(Constants.HEADER + "zk@zkHost:2181/"); FileSystem fs = getHadoopFilesystem(org.apache.hadoop.fs.FileSystem.get(uri, getConf())); assertTrue(fs.mFileSystem.getConf().getBoolean(PropertyKey.ZOOKEEPER_ENABLED)); assertEquals("zkHost:2181", fs.mFileSystem.getConf().get(PropertyKey.ZOOKEEPER_ADDRESS)); uri = URI.create(Constants.HEADER + "zk@host1:2181,host2:2181,host3:2181/tmp/path.txt"); fs = getHadoopFilesystem(org.apache.hadoop.fs.FileSystem.get(uri, getConf())); assertTrue(fs.mFileSystem.getConf().getBoolean(PropertyKey.ZOOKEEPER_ENABLED)); assertEquals("host1:2181,host2:2181,host3:2181", fs.mFileSystem.getConf().get(PropertyKey.ZOOKEEPER_ADDRESS)); uri = URI.create(Constants.HEADER + "zk@host1:2181;host2:2181;host3:2181/tmp/path.txt"); fs = getHadoopFilesystem(org.apache.hadoop.fs.FileSystem.get(uri, getConf())); assertTrue(fs.mFileSystem.getConf().getBoolean(PropertyKey.ZOOKEEPER_ENABLED)); assertEquals("host1:2181,host2:2181,host3:2181", fs.mFileSystem.getConf().get(PropertyKey.ZOOKEEPER_ADDRESS)); }
@Override protected void activate() { move(0, 0, 20); playSound("SKYLAUNCH_SOUND", 1); spawnParticles("SKYLAUNCH_PARTICLE", 100); }
@Test void testActivate() throws Exception { var skyLaunch = new SkyLaunch(); var logs = tapSystemOutNormalized(skyLaunch::activate) .split("\n"); final var expectedSize = 3; final var log1 = getLogContent(logs[0]); final var expectedLog1 = "Move to ( 0.0, 0.0, 20.0 )"; final var log2 = getLogContent(logs[1]); final var expectedLog2 = "Play SKYLAUNCH_SOUND with volume 1"; final var log3 = getLogContent(logs[2]); final var expectedLog3 = "Spawn 100 particle with type SKYLAUNCH_PARTICLE"; assertEquals(logs.length, expectedSize); assertEquals(log1, expectedLog1); assertEquals(log2, expectedLog2); assertEquals(log3, expectedLog3); }
public static <T, E extends Throwable> CompletableFuture<T> handleException( CompletableFuture<? extends T> completableFuture, Class<E> exceptionClass, Function<? super E, ? extends T> exceptionHandler) { final CompletableFuture<T> handledFuture = new CompletableFuture<>(); checkNotNull(completableFuture) .whenComplete( (result, throwable) -> { if (throwable == null) { handledFuture.complete(result); } else if (exceptionClass.isAssignableFrom(throwable.getClass())) { final E exception = exceptionClass.cast(throwable); try { handledFuture.complete(exceptionHandler.apply(exception)); } catch (Throwable t) { handledFuture.completeExceptionally(t); } } else { handledFuture.completeExceptionally(throwable); } }); return handledFuture; }
@Test void testHandleExceptionWithCompletedFuture() { final CompletableFuture<String> future = CompletableFuture.completedFuture("foobar"); final CompletableFuture<String> handled = FutureUtils.handleException(future, Exception.class, exception -> "handled"); assertThatFuture(handled).eventuallySucceeds().isEqualTo("foobar"); }
public static String toHumanReadable(long size) { if (size < 0) return String.valueOf(size); if (size >= EB) return formatSize(size, EB, "EB"); if (size >= PB) return formatSize(size, PB, "PB"); if (size >= TB) return formatSize(size, TB, "TB"); if (size >= GB) return formatSize(size, GB, "GB"); if (size >= MB) return formatSize(size, MB, "MB"); if (size >= KB) return formatSize(size, KB, "KB"); return formatSize(size, BYTE, "B"); }
@Test public void toHumanReadableTest() { Map<Long, String> capacityTable = new HashMap<Long, String>() { { put(-1L, "-1"); put(0L, "0B"); put(1023L, "1023B"); put(1024L, "1KB"); put(12_345L, "12.06KB"); put(10_123_456L, "9.65MB"); put(10_123_456_798L, "9.43GB"); put(123 * 1024L * 1024L * 1024L * 1024L, "123TB"); put(123 * 1024L * 1024L * 1024L * 1024L * 1024L, "123PB"); put(1_777_777_777_777_777_777L, "1.54EB"); } }; capacityTable.forEach((in, expected) -> Assert.assertEquals(expected, MessageStoreUtil.toHumanReadable(in))); }
public synchronized long nextGtid() { long timestamp = timeGen(); if (timestamp < lastTimestamp) { timestamp = lastTimestamp; } if (lastTimestamp == timestamp) { sequence = (sequence + 1) & MAX_SEQUENCE; if (sequence == 0) { timestamp += 1; } } else { sequence = 0L; } if (timestamp - EPOCH >= (1L << 42)) { throw new IllegalStateException("Timestamp overflow"); } lastTimestamp = timestamp; return ((timestamp - EPOCH) << TIMESTAMP_SHIFT) | (CLUSTER_ID << CLUSTER_ID_SHIFT) | sequence; }
@Test public void testGtidTimestampAdvancement() { long firstGtid = gtidGenerator.nextGtid(); long secondGtid = gtidGenerator.nextGtid(); long firstTimestamp = firstGtid >> GtidGenerator.TIMESTAMP_SHIFT; long secondTimestamp = secondGtid >> GtidGenerator.TIMESTAMP_SHIFT; Assertions.assertTrue(secondTimestamp >= firstTimestamp, "Timestamp should advance or stay the same on subsequent GTIDs"); }
public void checkPAT(String serverUrl, String token) { String url = String.format("%s/_apis/projects?%s", getTrimmedUrl(serverUrl), API_VERSION_3); doGet(token, url); }
@Test public void check_pat() throws InterruptedException { enqueueResponse(200, " { \"count\": 1,\n" + " \"value\": [\n" + " {\n" + " \"id\": \"3311cd05-3f00-4a5e-b47f-df94a9982b6e\",\n" + " \"name\": \"Project\",\n" + " \"description\": \"Project Description\",\n" + " \"url\": \"https://ado.sonarqube.com/DefaultCollection/_apis/projects/3311cd05-3f00-4a5e-b47f-df94a9982b6e\",\n" + " \"state\": \"wellFormed\",\n" + " \"revision\": 63,\n" + " \"visibility\": \"private\"\n" + " }]}"); underTest.checkPAT(server.url("").toString(), "token"); RecordedRequest request = server.takeRequest(10, TimeUnit.SECONDS); String azureDevOpsUrlCall = request.getRequestUrl().toString(); assertThat(azureDevOpsUrlCall).isEqualTo(server.url("") + "_apis/projects?api-version=3.0"); assertThat(request.getMethod()).isEqualTo("GET"); assertThat(logTester.logs(Level.DEBUG)) .contains("--> GET " + server.url("").toString() + "_apis/projects?api-version=3.0"); }
@Override public BarSeries aggregate(BarSeries series, String aggregatedSeriesName) { final List<Bar> aggregatedBars = barAggregator.aggregate(series.getBarData()); return new BaseBarSeries(aggregatedSeriesName, aggregatedBars); }
@Test public void testAggregateWithTheSameName() { final List<Bar> bars = new LinkedList<>(); final ZonedDateTime time = ZonedDateTime.of(2019, 6, 12, 4, 1, 0, 0, ZoneId.systemDefault()); final Bar bar0 = new MockBar(time, 1d, 2d, 3d, 4d, 5d, 6d, 7, numFunction); final Bar bar1 = new MockBar(time.plusDays(1), 2d, 3d, 3d, 4d, 5d, 6d, 7, numFunction); final Bar bar2 = new MockBar(time.plusDays(2), 3d, 4d, 4d, 5d, 6d, 7d, 7, numFunction); bars.add(bar0); bars.add(bar1); bars.add(bar2); final BarSeries barSeries = new BaseBarSeries("name", bars); final BarSeries aggregated = baseBarSeriesAggregator.aggregate(barSeries); assertEquals("name", aggregated.getName()); assertEquals(2, aggregated.getBarCount()); assertSame(bar0, aggregated.getBar(0)); assertSame(bar2, aggregated.getBar(1)); }
private KsqlScalarFunction createFunction( final Class theClass, final UdfDescription udfDescriptionAnnotation, final Udf udfAnnotation, final Method method, final String path, final String sensorName, final Class<? extends Kudf> udfClass ) { // sanity check FunctionLoaderUtils .instantiateFunctionInstance(method.getDeclaringClass(), udfDescriptionAnnotation.name()); final FunctionInvoker invoker = FunctionLoaderUtils.createFunctionInvoker(method); final String functionName = udfDescriptionAnnotation.name(); LOGGER.info("Adding function " + functionName + " for method " + method); final List<ParameterInfo> parameters = FunctionLoaderUtils .createParameters(method, functionName, typeParser); final ParamType javaReturnSchema = FunctionLoaderUtils .getReturnType(method, udfAnnotation.schema(), typeParser); final SchemaProvider schemaProviderFunction = FunctionLoaderUtils .handleUdfReturnSchema( theClass, javaReturnSchema, udfAnnotation.schema(), typeParser, udfAnnotation.schemaProvider(), udfDescriptionAnnotation.name(), method.isVarArgs() ); return KsqlScalarFunction.create( schemaProviderFunction, javaReturnSchema, parameters, FunctionName.of(functionName.toUpperCase()), udfClass, getUdfFactory(method, udfDescriptionAnnotation, functionName, invoker, sensorName), udfAnnotation.description(), path, method.isVarArgs() ); }
@Test @SuppressWarnings("unchecked") public void shouldPassSqlInputTypesToUdafs() throws Exception { final UdafFactoryInvoker creator = createUdafLoader().createUdafFactoryInvoker( TestUdaf.class.getMethod("createSumT"), FunctionName.of("test-udf"), "desc", new String[]{""}, "", ""); final KsqlAggregateFunction<Long, Long, Long> executable = creator.createFunction(AggregateFunctionInitArguments.EMPTY_ARGS, Collections.singletonList(SqlArgument.of(SqlTypes.BIGINT))); executable.aggregate(1L, 1L); Long agg = executable.aggregate(1L, 1L); assertThat(agg, equalTo(2L)); }
@Override public DiagnosticsBuilder getDiagnosticsInfo() { return diagnostics() .withTitle("Concurrent modified jobs:") .with(concurrentJobModificationResolveResults, (resolveResult, diagnosticsBuilder) -> appendDiagnosticsInfo(diagnosticsBuilder, resolveResult)); }
@Test void canGenerateCorrectDiagnosticsInfoEvenWithOnly1State() { final Job localJob = anEnqueuedJob().build(); final Job jobFromStorage = aFailedJob().build(); final ConcurrentJobModificationResolveResult resolveResult = ConcurrentJobModificationResolveResult.failed(localJob, jobFromStorage); final UnresolvableConcurrentJobModificationException unresolvableConcurrentJobModificationException = new UnresolvableConcurrentJobModificationException(singletonList(resolveResult), new Exception()); final String markDown = unresolvableConcurrentJobModificationException.getDiagnosticsInfo().asMarkDown(); assertThat(markDown) .containsPattern("ENQUEUED") .containsPattern("FAILED (.*) ← PROCESSING (.*) ← ENQUEUED"); }
public boolean isValid(String value) { if (value == null) { return false; } URI uri; // ensure value is a valid URI try { uri = new URI(value); } catch (URISyntaxException e) { return false; } // OK, perfom additional validation String scheme = uri.getScheme(); if (!isValidScheme(scheme)) { return false; } String authority = uri.getRawAuthority(); if ("file".equals(scheme) && (authority == null || "".equals(authority))) { // Special case - file: allows an empty authority return true; // this is a local file - nothing more to do here } else if ("file".equals(scheme) && authority != null && authority.contains(":")) { return false; } else { // Validate the authority if (!isValidAuthority(authority)) { return false; } } if (!isValidPath(uri.getRawPath())) { return false; } if (!isValidQuery(uri.getRawQuery())) { return false; } if (!isValidFragment(uri.getRawFragment())) { return false; } return true; }
@Test public void testValidator361() { UrlValidator validator = new UrlValidator(); assertTrue(validator.isValid("http://hello.tokyo/")); }
@Override @CheckForNull public EmailMessage format(Notification notif) { if (!(notif instanceof ChangesOnMyIssuesNotification)) { return null; } ChangesOnMyIssuesNotification notification = (ChangesOnMyIssuesNotification) notif; if (notification.getChange() instanceof AnalysisChange) { checkState(!notification.getChangedIssues().isEmpty(), "changedIssues can't be empty"); return formatAnalysisNotification(notification.getChangedIssues().keySet().iterator().next(), notification); } return formatMultiProject(notification); }
@Test public void formats_returns_html_message_for_single_issue_on_branch_when_analysis_change() { String branchName = randomAlphabetic(6); Project project = newBranch("1", branchName); String ruleName = randomAlphabetic(8); String host = randomAlphabetic(15); String key = "key"; ChangedIssue changedIssue = newChangedIssue(key, randomValidStatus(), project, ruleName, randomRuleTypeHotspotExcluded()); AnalysisChange analysisChange = newAnalysisChange(); when(emailSettings.getServerBaseURL()).thenReturn(host); EmailMessage emailMessage = underTest.format(new ChangesOnMyIssuesNotification(analysisChange, ImmutableSet.of(changedIssue))); HtmlFragmentAssert.assertThat(emailMessage.getMessage()) .hasParagraph().hasParagraph() // skip header .hasParagraph()// skip title based on status .hasList("Rule " + ruleName + " - See the single issue") .withLink("See the single issue", host + "/project/issues?id=" + project.getKey() + "&branch=" + branchName + "&issues=" + changedIssue.getKey() + "&open=" + changedIssue.getKey()) .hasParagraph().hasParagraph() // skip footer .noMoreBlock(); }
public static InternalFactHandle[] orderFacts(ObjectStore objectStore) { // this method is just needed for testing purposes, to allow round tripping int size = objectStore.size(); InternalFactHandle[] handles = new InternalFactHandle[size]; int i = 0; for ( Iterator<InternalFactHandle> it = objectStore.iterateFactHandles(); it.hasNext(); ) { handles[i++] = it.next(); } Arrays.sort( handles, new HandleSorter() ); return handles; }
@Test public void testOrderFacts() throws Exception { List<InternalFactHandle> list = new ArrayList<InternalFactHandle>(); List<Integer> ids = Arrays.asList(0, 1, 2, 3, 4, 5, 6, 30, 31, 32, -2147483640, 7, 8, 9, 10, 11, 12, 13,14, 15, 28, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27); for(Integer i : ids) { list.add(new DefaultFactHandle(i.intValue(), i)); } InternalFactHandle first = ProtobufOutputMarshaller.orderFacts(list)[0]; assertThat(first.getId()).isEqualTo(-2147483640); }
@InvokeOnHeader(Web3jConstants.ETH_GET_COMPILERS) void ethGetCompilers(Message message) throws IOException { Request<?, EthGetCompilers> request = web3j.ethGetCompilers(); setRequestId(message, request); EthGetCompilers response = request.send(); boolean hasError = checkForError(message, response); if (!hasError) { message.setBody(response.getCompilers()); } }
@Test public void ethGetCompilersTest() throws Exception { EthGetCompilers response = Mockito.mock(EthGetCompilers.class); Mockito.when(mockWeb3j.ethGetCompilers()).thenReturn(request); Mockito.when(request.send()).thenReturn(response); Mockito.when(response.getCompilers()).thenReturn(Collections.EMPTY_LIST); Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_GET_COMPILERS); template.send(exchange); List body = exchange.getIn().getBody(List.class); assertTrue(body.isEmpty()); }
public static String prepareUrl(@NonNull String url) { url = url.trim(); String lowerCaseUrl = url.toLowerCase(Locale.ROOT); // protocol names are case insensitive if (lowerCaseUrl.startsWith("feed://")) { Log.d(TAG, "Replacing feed:// with http://"); return prepareUrl(url.substring("feed://".length())); } else if (lowerCaseUrl.startsWith("pcast://")) { Log.d(TAG, "Removing pcast://"); return prepareUrl(url.substring("pcast://".length())); } else if (lowerCaseUrl.startsWith("pcast:")) { Log.d(TAG, "Removing pcast:"); return prepareUrl(url.substring("pcast:".length())); } else if (lowerCaseUrl.startsWith("itpc")) { Log.d(TAG, "Replacing itpc:// with http://"); return prepareUrl(url.substring("itpc://".length())); } else if (lowerCaseUrl.startsWith(AP_SUBSCRIBE)) { Log.d(TAG, "Removing antennapod-subscribe://"); return prepareUrl(url.substring(AP_SUBSCRIBE.length())); } else if (lowerCaseUrl.contains(AP_SUBSCRIBE_DEEPLINK)) { Log.d(TAG, "Removing " + AP_SUBSCRIBE_DEEPLINK); String query = Uri.parse(url).getQueryParameter("url"); try { return prepareUrl(URLDecoder.decode(query, "UTF-8")); } catch (UnsupportedEncodingException e) { return prepareUrl(query); } } else if (!(lowerCaseUrl.startsWith("http://") || lowerCaseUrl.startsWith("https://"))) { Log.d(TAG, "Adding http:// at the beginning of the URL"); return "http://" + url; } else { return url; } }
@Test public void testItpcProtocol() { final String in = "itpc://example.com"; final String out = UrlChecker.prepareUrl(in); assertEquals("http://example.com", out); }
@Override public Optional<Instant> getStartedAt() { return Optional.ofNullable(startedAt); }
@Test void getStartedAt_whenComponentIsCreated_shouldNotBePresent() { assertThat(underTest.getStartedAt()).isEmpty(); }
public static BadRequestException create(String... errorMessages) { return create(asList(errorMessages)); }
@Test public void fail_when_creating_exception_with_empty_list() { assertThatThrownBy(() -> BadRequestException.create(Collections.emptyList())) .isInstanceOf(IllegalArgumentException.class) .hasMessage("At least one error message is required"); }
@ScalarFunction @SqlType(StandardTypes.BOOLEAN) public static boolean isJsonScalar(@SqlType(StandardTypes.JSON) Slice json) { try (JsonParser parser = createJsonParser(JSON_FACTORY, json)) { JsonToken nextToken = parser.nextToken(); if (nextToken == null) { throw new PrestoException(INVALID_FUNCTION_ARGUMENT, "Invalid JSON value: " + truncateIfNecessaryForErrorMessage(json)); } if (nextToken == START_ARRAY || nextToken == START_OBJECT) { parser.skipChildren(); if (parser.nextToken() != null) { // extra trailing token after json array/object throw new PrestoException(INVALID_FUNCTION_ARGUMENT, "Invalid JSON value: " + truncateIfNecessaryForErrorMessage(json)); } return false; } if (parser.nextToken() != null) { // extra trailing token after json scalar throw new PrestoException(INVALID_FUNCTION_ARGUMENT, "Invalid JSON value: " + truncateIfNecessaryForErrorMessage(json)); } return true; } catch (IOException e) { throw new PrestoException(INVALID_FUNCTION_ARGUMENT, "Invalid JSON value: " + truncateIfNecessaryForErrorMessage(json)); } }
@Test public void testIsJsonScalar() { assertFunction("IS_JSON_SCALAR(null)", BOOLEAN, null); assertFunction("IS_JSON_SCALAR(JSON 'null')", BOOLEAN, true); assertFunction("IS_JSON_SCALAR(JSON 'true')", BOOLEAN, true); assertFunction("IS_JSON_SCALAR(JSON '1')", BOOLEAN, true); assertFunction("IS_JSON_SCALAR(JSON '\"str\"')", BOOLEAN, true); assertFunction("IS_JSON_SCALAR('null')", BOOLEAN, true); assertFunction("IS_JSON_SCALAR('true')", BOOLEAN, true); assertFunction("IS_JSON_SCALAR('1')", BOOLEAN, true); assertFunction("IS_JSON_SCALAR('\"str\"')", BOOLEAN, true); assertFunction("IS_JSON_SCALAR(JSON '[1, 2, 3]')", BOOLEAN, false); assertFunction("IS_JSON_SCALAR(JSON '{\"a\": 1, \"b\": 2}')", BOOLEAN, false); assertFunction("IS_JSON_SCALAR('[1, 2, 3]')", BOOLEAN, false); assertFunction("IS_JSON_SCALAR('{\"a\": 1, \"b\": 2}')", BOOLEAN, false); assertInvalidFunction("IS_JSON_SCALAR('')", INVALID_FUNCTION_ARGUMENT, "Invalid JSON value: "); assertInvalidFunction("IS_JSON_SCALAR('[1')", INVALID_FUNCTION_ARGUMENT, "Invalid JSON value: [1"); assertInvalidFunction("IS_JSON_SCALAR('1 trailing')", INVALID_FUNCTION_ARGUMENT, "Invalid JSON value: 1 trailing"); assertInvalidFunction("IS_JSON_SCALAR('[1, 2] trailing')", INVALID_FUNCTION_ARGUMENT, "Invalid JSON value: [1, 2] trailing"); }
@Override public List<Port> getPorts(DeviceId deviceId) { checkNotNull(deviceId, DEVICE_NULL); return manager.getVirtualPorts(this.networkId, deviceId) .stream() .collect(Collectors.toList()); }
@Test public void testGetPorts() { manager.registerTenantId(TenantId.tenantId(tenantIdValue1)); VirtualNetwork virtualNetwork = manager.createVirtualNetwork(TenantId.tenantId(tenantIdValue1)); VirtualDevice virtualDevice = manager.createVirtualDevice(virtualNetwork.id(), DID1); manager.createVirtualDevice(virtualNetwork.id(), DID2); DeviceService deviceService = manager.get(virtualNetwork.id(), DeviceService.class); ConnectPoint cp = new ConnectPoint(virtualDevice.id(), PortNumber.portNumber(1)); manager.createVirtualPort(virtualNetwork.id(), virtualDevice.id(), PortNumber.portNumber(1), cp); manager.createVirtualPort(virtualNetwork.id(), virtualDevice.id(), PortNumber.portNumber(2), cp); // test the getPorts() method assertEquals("The port set size did not match.", 2, deviceService.getPorts(DID1).size()); assertEquals("The port set size did not match.", 0, deviceService.getPorts(DID2).size()); }
public TaskRunScheduler getTaskRunScheduler() { return taskRunScheduler; }
@Test public void testTaskRunMergeTimeFirst() { TaskRunManager taskRunManager = new TaskRunManager(); Task task = new Task("test"); task.setDefinition("select 1"); long taskId = 1; TaskRun taskRun1 = TaskRunBuilder .newBuilder(task) .setExecuteOption(DEFAULT_MERGE_OPTION) .build(); long now = System.currentTimeMillis(); taskRun1.setTaskId(taskId); taskRun1.initStatus("1", now + 10); taskRun1.getStatus().setPriority(0); TaskRun taskRun2 = TaskRunBuilder .newBuilder(task) .setExecuteOption(DEFAULT_MERGE_OPTION) .build(); taskRun2.setTaskId(taskId); taskRun2.initStatus("2", now); taskRun2.getStatus().setPriority(0); taskRunManager.arrangeTaskRun(taskRun1, false); taskRunManager.arrangeTaskRun(taskRun2, false); TaskRunScheduler taskRunScheduler = taskRunManager.getTaskRunScheduler(); List<TaskRun> taskRuns = Lists.newArrayList(taskRunScheduler.getPendingTaskRunsByTaskId(taskId)); Assert.assertTrue(taskRuns != null); Assert.assertEquals(1, taskRuns.size()); TaskRun taskRun = taskRuns.get(0); Assert.assertEquals(now, taskRun.getStatus().getCreateTime()); }
void commitOffsetsOrTransaction(final Map<Task, Map<TopicPartition, OffsetAndMetadata>> offsetsPerTask) { log.debug("Committing task offsets {}", offsetsPerTask.entrySet().stream().collect(Collectors.toMap(t -> t.getKey().id(), Entry::getValue))); // avoid logging actual Task objects final Set<TaskId> corruptedTasks = new HashSet<>(); if (executionMetadata.processingMode() == EXACTLY_ONCE_ALPHA) { for (final Task task : taskManager.activeRunningTaskIterable()) { final Map<TopicPartition, OffsetAndMetadata> taskOffsetsToCommit = offsetsPerTask.getOrDefault(task, emptyMap()); if (!taskOffsetsToCommit.isEmpty() || taskManager.streamsProducerForTask(task.id()).transactionInFlight()) { try { taskManager.streamsProducerForTask(task.id()) .commitTransaction(taskOffsetsToCommit, taskManager.consumerGroupMetadata()); updateTaskCommitMetadata(taskOffsetsToCommit); } catch (final TimeoutException timeoutException) { log.error( String.format("Committing task %s failed.", task.id()), timeoutException ); corruptedTasks.add(task.id()); } } } } else if (executionMetadata.processingMode() == EXACTLY_ONCE_V2) { if (!offsetsPerTask.isEmpty() || taskManager.threadProducer().transactionInFlight()) { final Map<TopicPartition, OffsetAndMetadata> allOffsets = offsetsPerTask.values().stream() .flatMap(e -> e.entrySet().stream()).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); try { taskManager.threadProducer().commitTransaction(allOffsets, taskManager.consumerGroupMetadata()); updateTaskCommitMetadata(allOffsets); } catch (final TimeoutException timeoutException) { log.error( String.format("Committing task(s) %s failed.", offsetsPerTask .keySet() .stream() .map(t -> t.id().toString()) .collect(Collectors.joining(", "))), timeoutException ); offsetsPerTask .keySet() .forEach(task -> corruptedTasks.add(task.id())); } } } else { // processingMode == ALOS if (!offsetsPerTask.isEmpty()) { final Map<TopicPartition, OffsetAndMetadata> allOffsets = offsetsPerTask.values().stream() .flatMap(e -> e.entrySet().stream()).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); try { taskManager.consumerCommitSync(allOffsets); updateTaskCommitMetadata(allOffsets); } catch (final CommitFailedException error) { throw new TaskMigratedException("Consumer committing offsets failed, " + "indicating the corresponding thread is no longer part of the group", error); } catch (final TimeoutException timeoutException) { log.error( String.format("Committing task(s) %s failed.", offsetsPerTask .keySet() .stream() .map(t -> t.id().toString()) .collect(Collectors.joining(", "))), timeoutException ); throw timeoutException; } catch (final KafkaException error) { throw new StreamsException("Error encountered committing offsets via consumer", error); } } } if (!corruptedTasks.isEmpty()) { throw new TaskCorruptedException(corruptedTasks); } }
@Test public void testCommitWithOpenTransactionButNoOffsetsEOSV1() { final TaskId taskId = new TaskId(0, 0); final Task task = mock(Task.class); when(task.id()).thenReturn(taskId); final Tasks tasks = mock(Tasks.class); final ConsumerGroupMetadata groupMetadata = mock(ConsumerGroupMetadata.class); final TaskManager taskManager = mock(TaskManager.class); when(taskManager.activeRunningTaskIterable()).thenReturn(Collections.singletonList(task)); when(taskManager.consumerGroupMetadata()).thenReturn(groupMetadata); final StreamsProducer producer = mock(StreamsProducer.class); final TaskExecutionMetadata metadata = mock(TaskExecutionMetadata.class); when(metadata.processingMode()).thenReturn(EXACTLY_ONCE_ALPHA); when(taskManager.streamsProducerForTask(taskId)).thenReturn(producer); when(producer.transactionInFlight()).thenReturn(true); final TaskExecutor taskExecutor = new TaskExecutor(tasks, taskManager, metadata, new LogContext()); taskExecutor.commitOffsetsOrTransaction(Collections.emptyMap()); verify(producer).commitTransaction(Collections.emptyMap(), groupMetadata); }
@Override public Server build(Environment environment) { printBanner(environment.getName()); final ThreadPool threadPool = createThreadPool(environment.metrics()); final Server server = buildServer(environment.lifecycle(), threadPool); final Handler applicationHandler = createAppServlet(server, environment.jersey(), environment.getObjectMapper(), environment.getValidator(), environment.getApplicationContext(), environment.getJerseyServletContainer(), environment.metrics()); final Handler adminHandler = createAdminServlet(server, environment.getAdminContext(), environment.metrics(), environment.healthChecks(), environment.admin()); final RoutingHandler routingHandler = buildRoutingHandler(environment.metrics(), server, applicationHandler, adminHandler); final Handler gzipHandler = buildGzipHandler(routingHandler); server.setHandler(addStatsHandler(addRequestLog(server, gzipHandler, environment.getName()))); return server; }
@Test void doesNotDefaultExceptionMappers() { http.setRegisterDefaultExceptionMappers(false); assertThat(http.getRegisterDefaultExceptionMappers()).isFalse(); Environment environment = new Environment("test"); http.build(environment); assertThat(environment.jersey().getResourceConfig().getSingletons()) .filteredOn(x -> x instanceof ExceptionMapperBinder).isEmpty(); }
@Override public boolean validateTree(ValidationContext validationContext) { validate(validationContext); return (onCancelConfig.validateTree(validationContext) && errors.isEmpty() && !configuration.hasErrors()); }
@Test public void validateTreeShouldVerifyIfOnCancelTasksHasErrors() { PluggableTask pluggableTask = new PluggableTask(new PluginConfiguration(), new Configuration()); pluggableTask.onCancelConfig = mock(OnCancelConfig.class); com.thoughtworks.go.domain.Task cancelTask = mock(com.thoughtworks.go.domain.Task.class); when(pluggableTask.onCancelConfig.getTask()).thenReturn(cancelTask); when(cancelTask.hasCancelTask()).thenReturn(false); when(pluggableTask.onCancelConfig.validateTree(null)).thenReturn(false); assertFalse(pluggableTask.validateTree(null)); }
@UdafFactory(description = "collect distinct values of a Bigint field into a single Array") public static <T> Udaf<T, List<T>, List<T>> createCollectSetT() { return new Collect<>(); }
@Test public void shouldCollectDistinctInts() { final Udaf<Integer, List<Integer>, List<Integer>> udaf = CollectSetUdaf.createCollectSetT(); final Integer[] values = new Integer[] {3, 4, 5, 3}; List<Integer> runningList = udaf.initialize(); for (final Integer i : values) { runningList = udaf.aggregate(i, runningList); } assertThat(runningList, contains(3, 4, 5)); }
public static JSONObject checkOrSetChannelCallbackEvent(String eventName, JSONObject properties, Context context) { if (properties == null) { properties = new JSONObject(); } try { boolean isFirst = isFirstChannelEvent(eventName); properties.put("$is_channel_callback_event", isFirst); if (context != null && !ChannelUtils.hasUtmProperties(properties)) { ChannelUtils.mergeUtmByMetaData(context, properties); } properties.put("$channel_device_info", "1"); } catch (JSONException e) { SALog.printStackTrace(e); } return properties; }
@Test public void checkOrSetChannelCallbackEvent() { final String eventName = "eventName"; JSONObject property = new JSONObject(); try { property.put("a", "a"); property.put("b", "b"); } catch (JSONException e) { e.printStackTrace(); } try { ChannelUtils.checkOrSetChannelCallbackEvent(eventName, property, mApplication); } catch (Exception e) { e.printStackTrace(); } Assert.assertTrue(property.optBoolean("$is_channel_callback_event")); }
public List<Column> value() { return byNamespace() .get(VALUE); }
@Test public void shouldExposeValueColumns() { assertThat(SOME_SCHEMA.value(), contains( valueColumn(F0, STRING), valueColumn(F1, BIGINT) )); }
T init(Object value) { return (T) value; }
@Test public void testHllUnionAggregator() { HllUnionAggregator aggregator = new HllUnionAggregator(); Hll value = aggregator.init(null); Assert.assertEquals(Hll.HLL_DATA_EMPTY, value.getType()); }
protected HttpUriRequest setupConnection(final String method, final String bucketName, final String objectKey, final Map<String, String> requestParameters) throws S3ServiceException { return this.setupConnection(HTTP_METHOD.valueOf(method), bucketName, objectKey, requestParameters); }
@Test public void testSetupConnection() throws Exception { final RequestEntityRestStorageService service = new RequestEntityRestStorageService(session, new HttpConnectionPoolBuilder(session.getHost(), new ThreadLocalHostnameDelegatingTrustManager(new DisabledX509TrustManager(), session.getHost().getHostname()), new DefaultX509KeyManager(), new DisabledProxyFinder()).build(new DisabledProxyFinder(), new DisabledTranscriptListener(), new DisabledLoginCallback())); final RegionEndpointCache cache = service.getRegionEndpointCache(); cache.clear(); final String key = new AlphanumericRandomStringService().random(); { final HttpUriRequest request = service.setupConnection("GET", "test-eu-central-1-cyberduck", key, Collections.emptyMap()); assertEquals(String.format("https://test-eu-central-1-cyberduck.s3.dualstack.eu-central-1.amazonaws.com:443/%s", key), request.getURI().toString()); } cache.clear(); { final HttpUriRequest request = service.setupConnection("HEAD", "test-eu-central-1-cyberduck", key, Collections.singletonMap("location", "")); assertEquals(String.format("https://test-eu-central-1-cyberduck.s3.amazonaws.com:443/%s?location=", key), request.getURI().toString()); } }
public int computeIfAbsent(final int key, final IntUnaryOperator mappingFunction) { requireNonNull(mappingFunction); final int missingValue = this.missingValue; final int[] entries = this.entries; @DoNotSub final int mask = entries.length - 1; @DoNotSub int index = Hashing.evenHash(key, mask); int value; while (missingValue != (value = entries[index + 1])) { if (key == entries[index]) { break; } index = next(index, mask); } if (missingValue == value && missingValue != (value = mappingFunction.applyAsInt(key))) { entries[index] = key; entries[index + 1] = value; size++; increaseCapacity(); } return value; }
@Test void shouldComputeIfAbsentUsingInterface() { final Map<Integer, Integer> map = new Int2IntHashMap(-1); final int key = 0; final int result = map.computeIfAbsent(key, (k) -> k); assertEquals(key, result); }
@Override public int mkdir(String path, long mode) { return AlluxioFuseUtils.call(LOG, () -> mkdirInternal(path, mode), FuseConstants.FUSE_MKDIR, "path=%s,mode=%o,", path, mode); }
@Test @DoraTestTodoItem(action = DoraTestTodoItem.Action.FIX, owner = "LuQQiu") @Ignore public void mkDir() throws Exception { long mode = 0755L; mFuseFs.mkdir("/foo/bar", mode); verify(mFileSystem).createDirectory(BASE_EXPECTED_URI.join("/foo/bar"), CreateDirectoryPOptions.newBuilder() .setMode(new alluxio.security.authorization.Mode((short) mode).toProto()) .setRecursive(true) .build()); }
private <T> T newPlugin(Class<T> klass) { // KAFKA-8340: The thread classloader is used during static initialization and must be // set to the plugin's classloader during instantiation try (LoaderSwap loaderSwap = withClassLoader(klass.getClassLoader())) { return Utils.newInstance(klass); } catch (Throwable t) { throw new ConnectException("Instantiation error", t); } }
@Test public void newPluginShouldServiceLoadWithPluginClassLoader() { Converter plugin = plugins.newPlugin( TestPlugin.SERVICE_LOADER.className(), new AbstractConfig(new ConfigDef(), Collections.emptyMap()), Converter.class ); assertInstanceOf(SamplingTestPlugin.class, plugin, "Cannot collect samples"); Map<String, SamplingTestPlugin> samples = ((SamplingTestPlugin) plugin).flatten(); // Assert that the service loaded subclass is found in both environments assertTrue(samples.containsKey("ServiceLoadedSubclass.static")); assertTrue(samples.containsKey("ServiceLoadedSubclass.dynamic")); assertPluginClassLoaderAlwaysActive(plugin); }