focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public void init(String keyId, String applicationKey, String exportService) throws BackblazeCredentialsException, IOException { // Fetch all the available buckets and use that to find which region the user is in ListBucketsResponse listBucketsResponse = null; String userRegion = null; // The Key ID starts with the region identifier number, so reorder the regions such that // the first region is most likely the user's region String regionId = keyId.substring(0, 3); BACKBLAZE_REGIONS.sort( (String region1, String region2) -> { if (region1.endsWith(regionId)) { return -1; } return 0; }); Throwable s3Exception = null; for (String region : BACKBLAZE_REGIONS) { try { s3Client = backblazeS3ClientFactory.createS3Client(keyId, applicationKey, region); listBucketsResponse = s3Client.listBuckets(); userRegion = region; break; } catch (S3Exception e) { s3Exception = e; if (s3Client != null) { s3Client.close(); } if (e.statusCode() == 403) { monitor.debug(() -> String.format("User is not in region %s", region)); } } } if (listBucketsResponse == null || userRegion == null) { throw new BackblazeCredentialsException( "User's credentials or permissions are not valid for any regions available", s3Exception); } bucketName = getOrCreateBucket(s3Client, listBucketsResponse, userRegion, exportService); }
@Test public void testInitErrorCreatingBucket() throws BackblazeCredentialsException, IOException { createEmptyBucketList(); when(s3Client.createBucket(any(CreateBucketRequest.class))) .thenThrow(AwsServiceException.builder().build()); BackblazeDataTransferClient client = createDefaultClient(); assertThrows(IOException.class, () -> { client.init(KEY_ID, APP_KEY, EXPORT_SERVICE); }); }
public FloatArrayAsIterable usingTolerance(double tolerance) { return new FloatArrayAsIterable(tolerance(tolerance), iterableSubject()); }
@Test public void usingTolerance_contains_otherTypes() { // Expected value is Double assertThat(array(1.0f, 2.0f + 0.5f * DEFAULT_TOLERANCE, 3.0f)) .usingTolerance(DEFAULT_TOLERANCE) .contains(2.0); // Expected value is Integer assertThat(array(1.0f, 2.0f + 0.5f * DEFAULT_TOLERANCE, 3.0f)) .usingTolerance(DEFAULT_TOLERANCE) .contains(2); // Expected value is Integer.MIN_VALUE. This is -1*2^31, which has an exact float // representation. For the actual value we use the next value down, which is 2^8 smaller // (because the resolution of floats with absolute values between 2^31 and 2^32 is 2^8). So // we'll make the assertion with a tolerance of 2^9. assertThat(array(1.0f, Integer.MIN_VALUE + 0.5f * DEFAULT_TOLERANCE, 3.0f)) .usingTolerance(1 << 9) .contains(Integer.MIN_VALUE); // Expected value is Long assertThat(array(1.0f, 2.0f + 0.5f * DEFAULT_TOLERANCE, 3.0f)) .usingTolerance(DEFAULT_TOLERANCE) .contains(2L); // Expected value is Long.MIN_VALUE. This is -1*2^63, which has an exact float representation. // For the actual value we use the next value down, which is is 2^40 smaller (because the // resolution of floats with absolute values between 2^63 and 2^64 is 2^40). So we'll make the // assertion with a tolerance of 2^41. assertThat(array(1.0f, UNDER_LONG_MIN, 3.0f)).usingTolerance(1L << 41).contains(Long.MIN_VALUE); // Expected value is BigInteger assertThat(array(1.0f, 2.0f + 0.5f * DEFAULT_TOLERANCE, 3.0f)) .usingTolerance(DEFAULT_TOLERANCE) .contains(BigInteger.valueOf(2)); // Expected value is BigDecimal assertThat(array(1.0f, 2.0f + 0.5f * DEFAULT_TOLERANCE, 3.0f)) .usingTolerance(DEFAULT_TOLERANCE) .contains(BigDecimal.valueOf(2.0)); }
public static Dish createDish(Recipe recipe) { Map<Product, BigDecimal> calculatedRecipeToGram = new HashMap<>(); recipe.getIngredientsProportion().forEach(((product, proportion) -> { calculatedRecipeToGram.put(product, recipe.getBasePortionInGrams() .multiply(proportion.divide(BigDecimal.valueOf(100), 2, RoundingMode.FLOOR))); })); return new Dish(calculatedRecipeToGram, recipe); }
@Test void testCreateDish() { }
public static <T extends Comparable<? super T>> T max(T[] numberArray) { return ArrayUtil.max(numberArray); }
@Test public void maxTest() { final int max = NumberUtil.max(5,4,3,6,1); assertEquals(6, max); }
public static <T> AvroSchema<T> of(SchemaDefinition<T> schemaDefinition) { if (schemaDefinition.getSchemaReaderOpt().isPresent() && schemaDefinition.getSchemaWriterOpt().isPresent()) { return new AvroSchema<>(schemaDefinition.getSchemaReaderOpt().get(), schemaDefinition.getSchemaWriterOpt().get(), parseSchemaInfo(schemaDefinition, SchemaType.AVRO)); } ClassLoader pojoClassLoader = null; if (schemaDefinition.getClassLoader() != null) { pojoClassLoader = schemaDefinition.getClassLoader(); } else if (schemaDefinition.getPojo() != null) { pojoClassLoader = schemaDefinition.getPojo().getClassLoader(); } return new AvroSchema<>(parseSchemaInfo(schemaDefinition, SchemaType.AVRO), pojoClassLoader); }
@Test public void testAllowNullSchema() throws JSONException { AvroSchema<Foo> avroSchema = AvroSchema.of(SchemaDefinition.<Foo>builder().withPojo(Foo.class).build()); assertEquals(avroSchema.getSchemaInfo().getType(), SchemaType.AVRO); Schema.Parser parser = new Schema.Parser(); parser.setValidateDefaults(false); String schemaJson = new String(avroSchema.getSchemaInfo().getSchema()); assertJSONEquals(schemaJson, SCHEMA_AVRO_ALLOW_NULL); Schema schema = parser.parse(schemaJson); for (String fieldName : FOO_FIELDS) { Schema.Field field = schema.getField(fieldName); Assert.assertNotNull(field); if (field.name().equals("field4")) { Assert.assertNotNull(field.schema().getTypes().get(1).getField("field1")); } if (field.name().equals("fieldUnableNull")) { Assert.assertNotNull(field.schema().getType()); } } }
@Override public void startBundle(Receiver... receivers) throws Exception { checkArgument( receivers.length == outputTupleTagsToReceiverIndices.size(), "unexpected number of receivers for DoFn"); this.receivers = receivers; if (hasStreamingSideInput) { // There is non-trivial setup that needs to be performed for watermark propagation // even on empty bundles. reallyStartBundle(); } }
@Test @SuppressWarnings("AssertionFailureIgnored") public void testUnexpectedNumberOfReceivers() throws Exception { TestDoFn fn = new TestDoFn(Collections.emptyList()); DoFnInfo<?, ?> fnInfo = DoFnInfo.forFn( fn, WindowingStrategy.globalDefault(), null /* side input views */, null /* input coder */, MAIN_OUTPUT, DoFnSchemaInformation.create(), Collections.emptyMap()); TestReceiver receiver = new TestReceiver(); ParDoFn userParDoFn = new SimpleParDoFn<>( options, DoFnInstanceManagers.singleInstance(fnInfo), new EmptySideInputReader(), MAIN_OUTPUT, ImmutableMap.of(MAIN_OUTPUT, 0), BatchModeExecutionContext.forTesting(options, "testStage") .getStepContext(operationContext), operationContext, DoFnSchemaInformation.create(), Collections.emptyMap(), SimpleDoFnRunnerFactory.INSTANCE); try { userParDoFn.startBundle(); fail("should have failed"); } catch (Throwable exn) { assertThat(exn.toString(), containsString("unexpected number of receivers")); } try { userParDoFn.startBundle(receiver, receiver); fail("should have failed"); } catch (Throwable exn) { assertThat(exn.toString(), containsString("unexpected number of receivers")); } }
public static TypeInformation<?> readTypeInfo(String typeString) { final List<Token> tokens = tokenize(typeString); final TokenConverter converter = new TokenConverter(typeString, tokens); return converter.convert(); }
@Test void testSyntaxError3() { assertThatThrownBy(() -> TypeStringUtils.readTypeInfo("ROW<f0 INVALID, f1 TINYINT>")) .isInstanceOf(ValidationException.class); // invalid type }
public static void checkTopic(String topic) throws MQClientException { if (UtilAll.isBlank(topic)) { throw new MQClientException("The specified topic is blank", null); } if (topic.length() > TOPIC_MAX_LENGTH) { throw new MQClientException( String.format("The specified topic is longer than topic max length %d.", TOPIC_MAX_LENGTH), null); } if (isTopicOrGroupIllegal(topic)) { throw new MQClientException(String.format( "The specified topic[%s] contains illegal characters, allowing only %s", topic, "^[%|a-zA-Z0-9_-]+$"), null); } }
@Test public void testCheckTopic_HasIllegalCharacters() { String illegalTopic = "TOPIC&*^"; try { Validators.checkTopic(illegalTopic); failBecauseExceptionWasNotThrown(MQClientException.class); } catch (MQClientException e) { assertThat(e).hasMessageStartingWith(String.format("The specified topic[%s] contains illegal characters, allowing only %s", illegalTopic, "^[%|a-zA-Z0-9_-]+$")); } }
public static byte[] generateScalarCallStub(Class<?> clazz, Method method) { final BatchCallEvaluateGenerator generator = new BatchCallEvaluateGenerator(clazz, method); generator.declareCallStubClazz(); generator.genBatchUpdateSingle(); generator.finish(); return generator.getByteCode(); }
@Test public void testScalarCallStub() throws NoSuchMethodException, ClassNotFoundException, InvocationTargetException, IllegalAccessException { Class<?> clazz = ScalarAdd.class; final String genClassName = CallStubGenerator.CLAZZ_NAME.replace("/", "."); Method m = clazz.getMethod("evaluate", String.class, Integer.class); final byte[] updates = CallStubGenerator.generateScalarCallStub(clazz, m); ClassLoader classLoader = new TestClassLoader(genClassName, updates); final Class<?> stubClazz = classLoader.loadClass(genClassName); Method batchCall = getFirstMethod(stubClazz, "batchCallV"); ScalarAdd concat = new ScalarAdd(); int testSize = 1000; String[] inputs1 = new String[testSize]; Integer[] inputs2 = new Integer[testSize]; String[] expects = new String[testSize]; for (int i = 0; i < testSize; i++) { inputs1[i] = i + ""; inputs2[i] = i; expects[i] = inputs1[i] + inputs2[i]; } final String[] res = (String[])batchCall.invoke(null, testSize, concat, inputs1, inputs2); for (int i = 0; i < testSize; i++) { Assertions.assertEquals(expects[i], res[i]); } }
@Override public void unbindSocialUser(Long userId, Integer userType, Integer socialType, String openid) { // 获得 openid 对应的 SocialUserDO 社交用户 SocialUserDO socialUser = socialUserMapper.selectByTypeAndOpenid(socialType, openid); if (socialUser == null) { throw exception(SOCIAL_USER_NOT_FOUND); } // 获得对应的社交绑定关系 socialUserBindMapper.deleteByUserTypeAndUserIdAndSocialType(userType, userId, socialUser.getType()); }
@Test public void testUnbindSocialUser_notFound() { // 调用,并断言 assertServiceException( () -> socialUserService.unbindSocialUser(randomLong(), UserTypeEnum.ADMIN.getValue(), SocialTypeEnum.GITEE.getType(), "test_openid"), SOCIAL_USER_NOT_FOUND); }
public static Duration parse(final String text) { try { final String[] parts = text.split("\\s"); if (parts.length != 2) { throw new IllegalArgumentException("Expected 2 tokens, got: " + parts.length); } final long size = parseNumeric(parts[0]); return buildDuration(size, parts[1]); } catch (final Exception e) { throw new IllegalArgumentException("Invalid duration: '" + text + "'. " + e.getMessage(), e); } }
@Test public void shouldThrowOnUnknownTimeUnit() { // Then: // When: final Exception e = assertThrows( IllegalArgumentException.class, () -> parse("10 Green_Bottles") ); // Then: assertThat(e.getMessage(), containsString("Unknown time unit: 'GREEN_BOTTLES'")); }
@Override public void open(ExecutionContext ctx) throws Exception { super.open(ctx); equaliser = genRecordEqualiser.newInstance(ctx.getRuntimeContext().getUserCodeClassLoader()); }
@Test public void testWithGenerateUpdateBeforeAndStateTtl() throws Exception { ProcTimeMiniBatchDeduplicateKeepLastRowFunction func = createFunction(true, true, minTime.toMilliseconds()); OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = createTestHarness(func); testHarness.setup(); testHarness.open(); testHarness.processElement(insertRecord("book", 1L, 10)); testHarness.processElement(insertRecord("book", 2L, 11)); // output is empty because bundle not trigger yet. assertThat(testHarness.getOutput()).isEmpty(); testHarness.processElement(insertRecord("book", 1L, 13)); testHarness.setStateTtlProcessingTime(30); testHarness.processElement(insertRecord("book", 1L, 17)); testHarness.processElement(insertRecord("book", 2L, 18)); testHarness.processElement(insertRecord("book", 1L, 19)); List<Object> expectedOutput = new ArrayList<>(); expectedOutput.add(insertRecord("book", 2L, 11)); expectedOutput.add(insertRecord("book", 1L, 13)); // because (2L,11), (1L,13) retired, so no UPDATE_BEFORE message send to downstream expectedOutput.add(insertRecord("book", 1L, 19)); expectedOutput.add(insertRecord("book", 2L, 18)); assertor.assertOutputEqualsSorted("output wrong.", expectedOutput, testHarness.getOutput()); }
public String toHumanReadableString() { if (humanReadableStr == null) { humanReadableStr = formatToHumanReadableString(); } return humanReadableStr; }
@Test void testToHumanReadableString() { assertThat(new MemorySize(0L).toHumanReadableString()).isEqualTo("0 bytes"); assertThat(new MemorySize(1L).toHumanReadableString()).isEqualTo("1 bytes"); assertThat(new MemorySize(1024L).toHumanReadableString()).isEqualTo("1024 bytes"); assertThat(new MemorySize(1025L).toHumanReadableString()).isEqualTo("1.001kb (1025 bytes)"); assertThat(new MemorySize(1536L).toHumanReadableString()).isEqualTo("1.500kb (1536 bytes)"); assertThat(new MemorySize(1_000_000L).toHumanReadableString()) .isEqualTo("976.563kb (1000000 bytes)"); assertThat(new MemorySize(1_000_000_000L).toHumanReadableString()) .isEqualTo("953.674mb (1000000000 bytes)"); assertThat(new MemorySize(1_000_000_000_000L).toHumanReadableString()) .isEqualTo("931.323gb (1000000000000 bytes)"); assertThat(new MemorySize(1_000_000_000_000_000L).toHumanReadableString()) .isEqualTo("909.495tb (1000000000000000 bytes)"); }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStartBounds, final Range<Instant> windowEndBounds, final Optional<Position> position ) { try { final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds); final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds); final WindowKeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = WindowKeyQuery.withKeyAndWindowStartRange(key, lower, upper); StateQueryRequest<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> request = inStore(stateStore.getStateStoreName()).withQuery(query); if (position.isPresent()) { request = request.withPositionBound(PositionBound.at(position.get())); } final KafkaStreams streams = stateStore.getKafkaStreams(); final StateQueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> result = streams.query(request); final QueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> queryResult = result.getPartitionResults().get(partition); if (queryResult.isFailure()) { throw failedQueryException(queryResult); } if (queryResult.getResult() == null) { return KsMaterializedQueryResult.rowIteratorWithPosition( Collections.emptyIterator(), queryResult.getPosition()); } try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it = queryResult.getResult()) { final Builder<WindowedRow> builder = ImmutableList.builder(); while (it.hasNext()) { final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next(); final Instant windowStart = Instant.ofEpochMilli(next.key); if (!windowStartBounds.contains(windowStart)) { continue; } final Instant windowEnd = windowStart.plus(windowSize); if (!windowEndBounds.contains(windowEnd)) { continue; } final TimeWindow window = new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli()); final WindowedRow row = WindowedRow.of( stateStore.schema(), new Windowed<>(key, window), next.value.value(), next.value.timestamp() ); builder.add(row); } return KsMaterializedQueryResult.rowIteratorWithPosition( builder.build().iterator(), queryResult.getPosition()); } } catch (final NotUpToBoundException | MaterializationException e) { throw e; } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test @SuppressWarnings("unchecked") public void shouldReturnValuesForOpenStartBounds_fetchAll() { // Given: final Range<Instant> start = Range.open( NOW, NOW.plusSeconds(10) ); final StateQueryResult<KeyValueIterator<Windowed<GenericKey>, ValueAndTimestamp<GenericRow>>> partitionResult = new StateQueryResult<>(); final QueryResult<KeyValueIterator<Windowed<GenericKey>, ValueAndTimestamp<GenericRow>>> queryResult = QueryResult.forResult(keyValueIterator); queryResult.setPosition(POSITION); partitionResult.addResult(PARTITION, queryResult); when(kafkaStreams.query(any(StateQueryRequest.class))).thenReturn(partitionResult); when(keyValueIterator.hasNext()) .thenReturn(true, true, true, false); when(keyValueIterator.next()) .thenReturn(new KeyValue<>(new Windowed<>(A_KEY, new TimeWindow(start.lowerEndpoint().toEpochMilli(), start.lowerEndpoint().toEpochMilli() + WINDOW_SIZE.toMillis())), VALUE_1)) .thenReturn(new KeyValue<>(new Windowed<>(A_KEY2, new TimeWindow(start.lowerEndpoint().plusMillis(1).toEpochMilli(), start.lowerEndpoint().toEpochMilli() + WINDOW_SIZE.toMillis() + 1)), VALUE_2)) .thenReturn(new KeyValue<>(new Windowed<>(A_KEY3, new TimeWindow(start.upperEndpoint().toEpochMilli(), start.upperEndpoint().toEpochMilli() + WINDOW_SIZE.toMillis())), VALUE_3)) .thenThrow(new AssertionError()); // When: final KsMaterializedQueryResult<WindowedRow> result = table.get(PARTITION, start, Range.all()); // Then: final Iterator<WindowedRow> rowIterator = result.getRowIterator(); assertThat(rowIterator.hasNext(), is(true)); assertThat(rowIterator.next(), is (WindowedRow.of( SCHEMA, windowedKey(A_KEY2, start.lowerEndpoint().plusMillis(1)), VALUE_2.value(), VALUE_2.timestamp()))); assertThat(rowIterator.hasNext(), is(false)); assertThat(result.getPosition(), not(Optional.empty())); assertThat(result.getPosition().get(), is(POSITION)); }
@Override public long estimateCount(Sketch<?> sketch) { if (sketch instanceof NormalSketch) { return estimateCount((NormalSketch) sketch); } else { return estimateCount((SparseSketch) sketch); } }
@Test public void requireThatEstimateIsReasonableForFullNormalSketch() { HyperLogLogEstimator estimator = new HyperLogLogEstimator(10); NormalSketch sketch = new NormalSketch(10); // Fill sketch with 23 - highest possible zero prefix for precision 10. Arrays.fill(sketch.data(), (byte) 23); long estimate = estimator.estimateCount(sketch); assertTrue(estimate > 6_000_000_000l); }
public CompletableFuture<QueryRouteResponse> queryRoute(ProxyContext ctx, QueryRouteRequest request) { CompletableFuture<QueryRouteResponse> future = new CompletableFuture<>(); try { validateTopic(request.getTopic()); List<org.apache.rocketmq.proxy.common.Address> addressList = this.convertToAddressList(request.getEndpoints()); String topicName = request.getTopic().getName(); ProxyTopicRouteData proxyTopicRouteData = this.messagingProcessor.getTopicRouteDataForProxy( ctx, addressList, topicName); List<MessageQueue> messageQueueList = new ArrayList<>(); Map<String, Map<Long, Broker>> brokerMap = buildBrokerMap(proxyTopicRouteData.getBrokerDatas()); TopicMessageType topicMessageType = messagingProcessor.getMetadataService().getTopicMessageType(ctx, topicName); for (QueueData queueData : proxyTopicRouteData.getQueueDatas()) { String brokerName = queueData.getBrokerName(); Map<Long, Broker> brokerIdMap = brokerMap.get(brokerName); if (brokerIdMap == null) { break; } for (Broker broker : brokerIdMap.values()) { messageQueueList.addAll(this.genMessageQueueFromQueueData(queueData, request.getTopic(), topicMessageType, broker)); } } QueryRouteResponse response = QueryRouteResponse.newBuilder() .setStatus(ResponseBuilder.getInstance().buildStatus(Code.OK, Code.OK.name())) .addAllMessageQueues(messageQueueList) .build(); future.complete(response); } catch (Throwable t) { future.completeExceptionally(t); } return future; }
@Test public void testQueryRoute() throws Throwable { ConfigurationManager.getProxyConfig().setGrpcServerPort(8080); ArgumentCaptor<List<org.apache.rocketmq.proxy.common.Address>> addressListCaptor = ArgumentCaptor.forClass(List.class); when(this.messagingProcessor.getTopicRouteDataForProxy(any(), addressListCaptor.capture(), anyString())) .thenReturn(createProxyTopicRouteData(2, 2, 6)); MetadataService metadataService = Mockito.mock(LocalMetadataService.class); when(this.messagingProcessor.getMetadataService()).thenReturn(metadataService); when(metadataService.getTopicMessageType(any(), anyString())).thenReturn(TopicMessageType.NORMAL); QueryRouteResponse response = this.routeActivity.queryRoute( createContext(), QueryRouteRequest.newBuilder() .setEndpoints(grpcEndpoints) .setTopic(Resource.newBuilder().setName(TOPIC).build()) .build() ).get(); assertEquals(Code.OK, response.getStatus().getCode()); assertEquals(4, response.getMessageQueuesCount()); for (MessageQueue messageQueue : response.getMessageQueuesList()) { assertEquals(grpcEndpoints, messageQueue.getBroker().getEndpoints()); assertEquals(Permission.READ_WRITE, messageQueue.getPermission()); } }
public static String createApiFileUrl(String baseUrl, ExtensionVersion extVersion, String fileName) { var extension = extVersion.getExtension(); var namespaceName = extension.getNamespace().getName(); return createApiFileUrl(baseUrl, namespaceName, extension.getName(), extVersion.getTargetPlatform(), extVersion.getVersion(), fileName); }
@Test public void testCreateApiFileUrlUniversalTarget() throws Exception { var baseUrl = "http://localhost/"; assertThat(UrlUtil.createApiFileUrl(baseUrl, "foo", "bar", "universal", "0.1.0", "foo.bar-0.1.0.vsix")) .isEqualTo("http://localhost/api/foo/bar/0.1.0/file/foo.bar-0.1.0.vsix"); }
public void addToIfExists(EnvironmentVariableContext context) { if (context.hasProperty(getName())) { addTo(context); } }
@Test void addToIfExists_shouldNotAddEnvironmentVariableToEnvironmentVariableContextWhenVariableIDoesNotExistInContext() { final EnvironmentVariableContext environmentVariableContext = mock(EnvironmentVariableContext.class); final EnvironmentVariable environmentVariable = new EnvironmentVariable("foo", "bar"); when(environmentVariableContext.hasProperty("foo")).thenReturn(false); environmentVariable.addToIfExists(environmentVariableContext); verify(environmentVariableContext, times(0)).setProperty("foo", "bar", false); }
@Override public void profilePushId(String pushTypeKey, String pushId) { }
@Test public void profilePushId() { mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() { @Override public boolean onTrackEvent(String eventName, JSONObject eventProperties) { Assert.fail(); return false; } }); mSensorsAPI.profilePushId("jpush_id", "eacsdilkjiads123"); }
@ApiOperation(value = "Update a user’s info", tags = { "Users" }, nickname = "updateUserInfo") @ApiResponses(value = { @ApiResponse(code = 200, message = "Indicates the user was found and the info has been updated."), @ApiResponse(code = 400, message = "Indicates the value was missing from the request body."), @ApiResponse(code = 404, message = "Indicates the requested user was not found or the user does not have info for the given key. Status description contains additional information about the error.") }) @PutMapping(value = "/identity/users/{userId}/info/{key}", produces = "application/json") public UserInfoResponse setUserInfo(@ApiParam(name = "userId") @PathVariable("userId") String userId, @ApiParam(name = "key") @PathVariable("key") String key, @RequestBody UserInfoRequest userRequest) { User user = getUserFromRequest(userId); String validKey = getValidKeyFromRequest(user, key); if (userRequest.getValue() == null) { throw new FlowableIllegalArgumentException("The value cannot be null."); } if (userRequest.getKey() == null || validKey.equals(userRequest.getKey())) { identityService.setUserInfo(user.getId(), key, userRequest.getValue()); } else { throw new FlowableIllegalArgumentException("Key provided in request body does not match the key in the resource URL."); } return restResponseFactory.createUserInfoResponse(key, userRequest.getValue(), user.getId()); }
@Test public void testCreateUserInfoExceptions() throws Exception { User savedUser = null; try { User newUser = identityService.newUser("testuser"); newUser.setFirstName("Fred"); newUser.setLastName("McDonald"); newUser.setEmail("no-reply@flowable.org"); identityService.saveUser(newUser); savedUser = newUser; // Test creating without value ObjectNode requestNode = objectMapper.createObjectNode(); requestNode.put("key", "key1"); HttpPost httpPost = new HttpPost(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_USER_INFO_COLLECTION, "testuser")); httpPost.setEntity(new StringEntity(requestNode.toString())); closeResponse(executeRequest(httpPost, HttpStatus.SC_BAD_REQUEST)); // Test creating without key requestNode = objectMapper.createObjectNode(); requestNode.put("value", "The value"); httpPost.setEntity(new StringEntity(requestNode.toString())); closeResponse(executeRequest(httpPost, HttpStatus.SC_BAD_REQUEST)); // Test creating an already existing info identityService.setUserInfo(newUser.getId(), "key1", "The value"); requestNode = objectMapper.createObjectNode(); requestNode.put("key", "key1"); requestNode.put("value", "The value"); httpPost.setEntity(new StringEntity(requestNode.toString())); closeResponse(executeRequest(httpPost, HttpStatus.SC_CONFLICT)); // Test creating info for unexisting user httpPost = new HttpPost(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_USER_INFO_COLLECTION, "unexistinguser")); httpPost.setEntity(new StringEntity(requestNode.toString())); closeResponse(executeRequest(httpPost, HttpStatus.SC_NOT_FOUND)); } finally { // Delete user after test passes or fails if (savedUser != null) { identityService.deleteUser(savedUser.getId()); } } }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void createForumTopic() { Integer color = 7322096; String emoji = "5434144690511290129"; CreateForumTopicResponse createResponse = bot.execute( new CreateForumTopic(forum, "test_topic").iconColor(color).iconCustomEmojiId(emoji) ); assertTrue(createResponse.isOk()); ForumTopic topic = createResponse.forumTopic(); assertNotNull(topic); assertEquals(color, topic.iconColor()); assertEquals(emoji, topic.iconCustomEmojiId()); String name = "test_topic_edit"; BaseResponse response = bot.execute( new EditForumTopic(forum, topic.messageThreadId()).name(name).iconCustomEmojiId("") ); assertTrue(response.isOk()); response = bot.execute(new CloseForumTopic(forum, topic.messageThreadId())); assertTrue(response.isOk()); response = bot.execute(new ReopenForumTopic(forum, topic.messageThreadId())); assertTrue(response.isOk()); response = bot.execute(new DeleteForumTopic(forum, topic.messageThreadId())); assertTrue(response.isOk()); }
@Override public void handle(final RoutingContext routingContext) { // We must set it to allow chunked encoding if we're using http1.1 if (routingContext.request().version() == HttpVersion.HTTP_1_1) { routingContext.response().putHeader(TRANSFER_ENCODING, CHUNKED_ENCODING); } else if (routingContext.request().version() == HttpVersion.HTTP_2) { // Nothing required } else { routingContext.fail(BAD_REQUEST.code(), new KsqlApiException("This endpoint is only available when using HTTP1.1 or HTTP2", ERROR_CODE_BAD_REQUEST)); } final CommonRequest request = getRequest(routingContext); if (request == null) { return; } final Optional<Boolean> internalRequest = ServerVerticle.isInternalRequest(routingContext); final MetricsCallbackHolder metricsCallbackHolder = new MetricsCallbackHolder(); final long startTimeNanos = Time.SYSTEM.nanoseconds(); endpoints.createQueryPublisher( request.sql, request.configOverrides, request.sessionProperties, request.requestProperties, context, server.getWorkerExecutor(), DefaultApiSecurityContext.create(routingContext, server), metricsCallbackHolder, internalRequest) .thenAccept(publisher -> { if (publisher instanceof BlockingPrintPublisher) { handlePrintPublisher( routingContext, (BlockingPrintPublisher) publisher); } else { handleQueryPublisher( routingContext, (QueryPublisher) publisher, metricsCallbackHolder, startTimeNanos); } }) .exceptionally(t -> ServerUtils.handleEndpointException(t, routingContext, "Failed to execute query")); }
@Test public void shouldSucceed_pullQuery() { // Given: final QueryStreamArgs req = new QueryStreamArgs("select * from foo;", Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()); givenRequest(req); // When: handler.handle(routingContext); endHandler.getValue().handle(null); // Then: assertThat(subscriber.getValue(), notNullValue()); verify(publisher).close(); }
public @Nullable DirectoryEntry get(Name name) { int index = bucketIndex(name, table.length); DirectoryEntry entry = table[index]; while (entry != null) { if (name.equals(entry.name())) { return entry; } entry = entry.next; } return null; }
@Test public void testGet() { assertThat(root.get(Name.simple("foo"))).isEqualTo(entry(root, "foo", dir)); assertThat(dir.get(Name.simple("foo"))).isNull(); assertThat(root.get(Name.simple("Foo"))).isNull(); }
public static boolean isBlank(String str) { return str == null || isEmpty(str.trim()); }
@Test public void testIsBlank() { assertTrue(StringUtil.isBlank(null)); assertTrue(StringUtil.isBlank("")); assertTrue(StringUtil.isBlank(" ")); assertFalse(StringUtil.isBlank("A String")); }
@Override public void decorate(final ShardingRule shardingRule, final ConfigurationProperties props, final SQLRewriteContext sqlRewriteContext, final RouteContext routeContext) { SQLStatementContext sqlStatementContext = sqlRewriteContext.getSqlStatementContext(); if (!isAlterOrDropIndexStatement(sqlStatementContext) && !isCursorAvailableStatement(sqlStatementContext) && !containsShardingTable(shardingRule, sqlStatementContext)) { return; } if (!sqlRewriteContext.getParameters().isEmpty()) { Collection<ParameterRewriter> parameterRewriters = new ShardingParameterRewriterBuilder(routeContext, sqlRewriteContext.getDatabase().getSchemas(), sqlStatementContext).getParameterRewriters(); rewriteParameters(sqlRewriteContext, parameterRewriters); } sqlRewriteContext.addSQLTokenGenerators(new ShardingTokenGenerateBuilder(shardingRule, routeContext, sqlStatementContext).getSQLTokenGenerators()); }
@Test void assertDecorateWhenInsertStatementNotContainsShardingTable() { SQLRewriteContext sqlRewriteContext = mock(SQLRewriteContext.class); InsertStatementContext insertStatementContext = mock(InsertStatementContext.class, RETURNS_DEEP_STUBS); when(insertStatementContext.getTablesContext().getTableNames()).thenReturn(Collections.singleton("t_order")); when(sqlRewriteContext.getSqlStatementContext()).thenReturn(insertStatementContext); ShardingRule shardingRule = mock(ShardingRule.class); when(shardingRule.findShardingTable("t_order")).thenReturn(Optional.empty()); new ShardingSQLRewriteContextDecorator().decorate(shardingRule, mock(ConfigurationProperties.class), sqlRewriteContext, mock(RouteContext.class)); assertTrue(sqlRewriteContext.getSqlTokens().isEmpty()); }
public static String getTypeName(final int type) { switch (type) { case START_EVENT_V3: return "Start_v3"; case STOP_EVENT: return "Stop"; case QUERY_EVENT: return "Query"; case ROTATE_EVENT: return "Rotate"; case INTVAR_EVENT: return "Intvar"; case LOAD_EVENT: return "Load"; case NEW_LOAD_EVENT: return "New_load"; case SLAVE_EVENT: return "Slave"; case CREATE_FILE_EVENT: return "Create_file"; case APPEND_BLOCK_EVENT: return "Append_block"; case DELETE_FILE_EVENT: return "Delete_file"; case EXEC_LOAD_EVENT: return "Exec_load"; case RAND_EVENT: return "RAND"; case XID_EVENT: return "Xid"; case USER_VAR_EVENT: return "User var"; case FORMAT_DESCRIPTION_EVENT: return "Format_desc"; case TABLE_MAP_EVENT: return "Table_map"; case PRE_GA_WRITE_ROWS_EVENT: return "Write_rows_event_old"; case PRE_GA_UPDATE_ROWS_EVENT: return "Update_rows_event_old"; case PRE_GA_DELETE_ROWS_EVENT: return "Delete_rows_event_old"; case WRITE_ROWS_EVENT_V1: return "Write_rows_v1"; case UPDATE_ROWS_EVENT_V1: return "Update_rows_v1"; case DELETE_ROWS_EVENT_V1: return "Delete_rows_v1"; case BEGIN_LOAD_QUERY_EVENT: return "Begin_load_query"; case EXECUTE_LOAD_QUERY_EVENT: return "Execute_load_query"; case INCIDENT_EVENT: return "Incident"; case HEARTBEAT_LOG_EVENT: case HEARTBEAT_LOG_EVENT_V2: return "Heartbeat"; case IGNORABLE_LOG_EVENT: return "Ignorable"; case ROWS_QUERY_LOG_EVENT: return "Rows_query"; case WRITE_ROWS_EVENT: return "Write_rows"; case UPDATE_ROWS_EVENT: return "Update_rows"; case DELETE_ROWS_EVENT: return "Delete_rows"; case GTID_LOG_EVENT: return "Gtid"; case ANONYMOUS_GTID_LOG_EVENT: return "Anonymous_Gtid"; case PREVIOUS_GTIDS_LOG_EVENT: return "Previous_gtids"; case PARTIAL_UPDATE_ROWS_EVENT: return "Update_rows_partial"; case TRANSACTION_CONTEXT_EVENT : return "Transaction_context"; case VIEW_CHANGE_EVENT : return "view_change"; case XA_PREPARE_LOG_EVENT : return "Xa_prepare"; case TRANSACTION_PAYLOAD_EVENT : return "transaction_payload"; default: return "Unknown type:" + type; } }
@Test public void getTypeNameInputPositiveOutputNotNull22() { // Arrange final int type = 27; // Act final String actual = LogEvent.getTypeName(type); // Assert result Assert.assertEquals("Heartbeat", actual); }
public double bps() { return bps; }
@Test public void testBps() { Bandwidth expected = Bandwidth.bps(one); assertEquals(one, expected.bps(), 0.0); }
public Optional<PluginMatchingResult<ServiceFingerprinter>> getServiceFingerprinter( NetworkService networkService) { return tsunamiPlugins.entrySet().stream() .filter(entry -> entry.getKey().type().equals(PluginType.SERVICE_FINGERPRINT)) .filter(entry -> hasMatchingServiceName(networkService, entry.getKey())) .map( entry -> PluginMatchingResult.<ServiceFingerprinter>builder() .setPluginDefinition(entry.getKey()) .setTsunamiPlugin((ServiceFingerprinter) entry.getValue().get()) .addMatchedService(networkService) .build()) .findFirst(); }
@Test public void getServiceFingerprinter_whenFingerprinterHasMatch_returnsMatch() { NetworkService httpService = NetworkService.newBuilder() .setNetworkEndpoint(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 80)) .setTransportProtocol(TransportProtocol.TCP) .setServiceName("http") .build(); PluginManager pluginManager = Guice.createInjector( new FakePortScannerBootstrapModule(), new FakeServiceFingerprinterBootstrapModule()) .getInstance(PluginManager.class); Optional<PluginMatchingResult<ServiceFingerprinter>> fingerprinter = pluginManager.getServiceFingerprinter(httpService); assertThat(fingerprinter).isPresent(); assertThat(fingerprinter.get().matchedServices()).containsExactly(httpService); }
public static Map<String, Map<String, Field>> rowListToMap(List<Row> rowList, List<String> primaryKeyList) { // {value of primaryKey, value of all columns} Map<String, Map<String, Field>> rowMap = new HashMap<>(); for (Row row : rowList) { //ensure the order of column List<Field> rowFieldList = row.getFields().stream() .sorted(Comparator.comparing(Field::getName)) .collect(Collectors.toList()); // {uppercase fieldName : field} Map<String, Field> colsMap = new HashMap<>(); StringBuilder rowKey = new StringBuilder(); boolean firstUnderline = false; for (int j = 0; j < rowFieldList.size(); j++) { Field field = rowFieldList.get(j); if (primaryKeyList.stream().anyMatch(e -> field.getName().equals(e))) { if (firstUnderline && j > 0) { rowKey.append("_"); } rowKey.append(String.valueOf(field.getValue())); firstUnderline = true; } colsMap.put(field.getName().trim().toUpperCase(), field); } rowMap.put(rowKey.toString(), colsMap); } return rowMap; }
@Test public void testRowListToMapWithMultipPk(){ List<String> primaryKeyList = new ArrayList<>(); primaryKeyList.add("id1"); primaryKeyList.add("id2"); List<Row> rows = new ArrayList<>(); Field field1 = new Field("id1", 1, "1"); Field field11 = new Field("id2", 1, "2"); Row row = new Row(); row.add(field1); row.add(field11); rows.add(row); Field field2 = new Field("id1", 1, "3"); Field field22 = new Field("id2", 1, "4"); Row row2 = new Row(); row2.add(field2); row2.add(field22); rows.add(row2); Field field3 = new Field("id1", 1, "5"); Field field33 = new Field("id2", 1, "6"); Row row3 = new Row(); row3.add(field3); row3.add(field33); rows.add(row3); Map<String, Map<String, Field>> result =DataCompareUtils.rowListToMap(rows,primaryKeyList); Assertions.assertEquals(3, result.size()); Assertions.assertEquals(result.keySet().iterator().next(),"1_2"); }
public static <T> Partition<T> of( int numPartitions, PartitionWithSideInputsFn<? super T> partitionFn, Requirements requirements) { Contextful ctfFn = Contextful.fn( (T element, Contextful.Fn.Context c) -> partitionFn.partitionFor(element, numPartitions, c), requirements); return new Partition<>(new PartitionDoFn<T>(numPartitions, ctfFn, partitionFn)); }
@Test @Category(NeedsRunner.class) public void testPartitionFnOutputTypeDescriptorRaw() throws Exception { PCollectionList<String> output = pipeline.apply(Create.of("hello")).apply(Partition.of(1, (element, numPartitions) -> 0)); thrown.expect(CannotProvideCoderException.class); pipeline.getCoderRegistry().getCoder(output.get(0).getTypeDescriptor()); }
public static String validateSubject(String claimName, String claimValue) throws ValidateException { return validateString(claimName, claimValue); }
@Test public void testValidateSubject() { String expected = "jdoe"; String actual = ClaimValidationUtils.validateSubject("sub", expected); assertEquals(expected, actual); }
@Override public boolean next() throws SQLException { if (getCurrentQueryResult().next()) { return true; } if (!queryResults.hasNext()) { return false; } setCurrentQueryResult(queryResults.next()); boolean hasNext = getCurrentQueryResult().next(); if (hasNext) { return true; } while (!hasNext && queryResults.hasNext()) { setCurrentQueryResult(queryResults.next()); hasNext = getCurrentQueryResult().next(); } return hasNext; }
@Test void assertNextForResultSetsAllEmpty() throws SQLException { List<QueryResult> queryResults = Arrays.asList(mock(QueryResult.class, RETURNS_DEEP_STUBS), mock(QueryResult.class, RETURNS_DEEP_STUBS), mock(QueryResult.class, RETURNS_DEEP_STUBS)); ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "MySQL")); ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); when(database.getName()).thenReturn(DefaultDatabase.LOGIC_NAME); MergedResult actual = resultMerger.merge(queryResults, selectStatementContext, database, mock(ConnectionContext.class)); assertFalse(actual.next()); }
public Command create( final ConfiguredStatement<? extends Statement> statement, final KsqlExecutionContext context) { return create(statement, context.getServiceContext(), context); }
@Test public void shouldValidateResumeQuery() { // Given: givenResume(); // When: commandFactory.create(configuredStatement, executionContext); // Then: verify(executionContext).getPersistentQuery(QUERY_ID); verify(query1).resume(); }
@Override public Optional<Code> remove(String code) { return Optional.ofNullable(store.asMap().remove(code)); }
@Test void remove_nonExisting() { Cache<String, Code> cache = Caffeine.newBuilder().build(); var sut = new CaffeineCodeRepo(cache); // when var c1 = sut.remove("x"); // then assertTrue(c1.isEmpty()); }
protected void handleInboundMessage(Object msg) { inboundMessages().add(msg); }
@Test public void testHandleInboundMessage() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); EmbeddedChannel channel = new EmbeddedChannel() { @Override protected void handleInboundMessage(Object msg) { latch.countDown(); } }; channel.writeOneInbound("Hello, Netty!"); if (!latch.await(1L, TimeUnit.SECONDS)) { fail("Nobody called #handleInboundMessage() in time."); } }
public static void checkArgument(boolean expression, Object errorMessage) { if (Objects.isNull(errorMessage)) { throw new IllegalArgumentException("errorMessage cannot be null."); } if (!expression) { throw new IllegalArgumentException(String.valueOf(errorMessage)); } }
@Test void testCheckArgument3Args1false3null() { assertThrows(IllegalArgumentException.class, () -> { Preconditions.checkArgument(false, ERRORMSG, null); }); }
@Override public boolean addTopicConfig(final String topicName, final Map<String, ?> overrides) { final ConfigResource resource = new ConfigResource(ConfigResource.Type.TOPIC, topicName); final Map<String, String> stringConfigs = toStringConfigs(overrides); try { final Map<String, String> existingConfig = topicConfig(topicName, false); final boolean changed = stringConfigs.entrySet().stream() .anyMatch(e -> !Objects.equals(existingConfig.get(e.getKey()), e.getValue())); if (!changed) { return false; } final Set<AlterConfigOp> entries = stringConfigs.entrySet().stream() .map(e -> new ConfigEntry(e.getKey(), e.getValue())) .map(ce -> new AlterConfigOp(ce, AlterConfigOp.OpType.SET)) .collect(Collectors.toSet()); final Map<ConfigResource, Collection<AlterConfigOp>> request = Collections.singletonMap(resource, entries); ExecutorUtil.executeWithRetries( () -> adminClient.get().incrementalAlterConfigs(request).all().get(), ExecutorUtil.RetryBehaviour.ON_RETRYABLE); return true; } catch (final UnsupportedVersionException e) { return addTopicConfigLegacy(topicName, stringConfigs); } catch (final Exception e) { throw new KafkaResponseGetFailedException( "Failed to set config for Kafka Topic " + topicName, e); } }
@Test public void shouldRetryAddingTopicConfig() { // Given: givenTopicConfigs( "peter", overriddenConfigEntry(TopicConfig.RETENTION_MS_CONFIG, "12345"), defaultConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "snappy") ); final Map<String, ?> overrides = ImmutableMap.of( CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT ); when(adminClient.incrementalAlterConfigs(any())) .thenAnswer(alterConfigsResult(new DisconnectException())) .thenAnswer(alterConfigsResult()); // When: kafkaTopicClient.addTopicConfig("peter", overrides); // Then: verify(adminClient, times(2)).incrementalAlterConfigs(any()); }
@CanIgnoreReturnValue public final Ordered containsExactly() { return containsExactlyEntriesIn(ImmutableMap.of()); }
@Test public void containsExactlyExtraKeyAndMissingKey_failsWithSameToStringForKeys() { expectFailureWhenTestingThat(ImmutableMap.of(1L, "jan", 2, "feb")) .containsExactly(1, "jan", 2, "feb"); assertFailureKeys( "missing keys", "for key", "expected value", "unexpected keys", "for key", "unexpected value", "---", "expected", "but was"); assertFailureValueIndexed("for key", 0, "1 (java.lang.Integer)"); assertFailureValue("expected value", "jan"); assertFailureValueIndexed("for key", 1, "1 (java.lang.Long)"); assertFailureValue("unexpected value", "jan"); }
public static HostAndPort toHostAndPort(NetworkEndpoint networkEndpoint) { switch (networkEndpoint.getType()) { case IP: return HostAndPort.fromHost(networkEndpoint.getIpAddress().getAddress()); case IP_PORT: return HostAndPort.fromParts( networkEndpoint.getIpAddress().getAddress(), networkEndpoint.getPort().getPortNumber()); case HOSTNAME: case IP_HOSTNAME: return HostAndPort.fromHost(networkEndpoint.getHostname().getName()); case HOSTNAME_PORT: case IP_HOSTNAME_PORT: return HostAndPort.fromParts( networkEndpoint.getHostname().getName(), networkEndpoint.getPort().getPortNumber()); case UNRECOGNIZED: case TYPE_UNSPECIFIED: throw new AssertionError("Type for NetworkEndpoint must be specified."); } throw new AssertionError( String.format( "Should never happen. Unchecked NetworkEndpoint type: %s", networkEndpoint.getType())); }
@Test public void toHostAndPort_withIpAddress_returnsHostWithIp() { NetworkEndpoint ipV4Endpoint = NetworkEndpoint.newBuilder() .setType(NetworkEndpoint.Type.IP) .setIpAddress( IpAddress.newBuilder().setAddress("1.2.3.4").setAddressFamily(AddressFamily.IPV4)) .build(); assertThat(NetworkEndpointUtils.toHostAndPort(ipV4Endpoint)) .isEqualTo(HostAndPort.fromHost("1.2.3.4")); }
public static int getVCores(Configuration conf) { if (!isHardwareDetectionEnabled(conf)) { return getConfiguredVCores(conf); } // is this os for which we can determine cores? ResourceCalculatorPlugin plugin = ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf); if (plugin == null) { return getConfiguredVCores(conf); } return getVCoresInternal(plugin, conf); }
@Test public void testGetVCores() { ResourceCalculatorPlugin plugin = new TestResourceCalculatorPlugin(); YarnConfiguration conf = new YarnConfiguration(); conf.setFloat(YarnConfiguration.NM_PCORES_VCORES_MULTIPLIER, 1.25f); int ret = NodeManagerHardwareUtils.getVCores(plugin, conf); Assert.assertEquals(YarnConfiguration.DEFAULT_NM_VCORES, ret); conf.setBoolean(YarnConfiguration.NM_ENABLE_HARDWARE_CAPABILITY_DETECTION, true); ret = NodeManagerHardwareUtils.getVCores(plugin, conf); Assert.assertEquals(5, ret); conf.setBoolean(YarnConfiguration.NM_COUNT_LOGICAL_PROCESSORS_AS_CORES, true); ret = NodeManagerHardwareUtils.getVCores(plugin, conf); Assert.assertEquals(10, ret); conf.setInt(YarnConfiguration.NM_VCORES, 10); ret = NodeManagerHardwareUtils.getVCores(plugin, conf); Assert.assertEquals(10, ret); YarnConfiguration conf1 = new YarnConfiguration(); conf1.setBoolean(YarnConfiguration.NM_ENABLE_HARDWARE_CAPABILITY_DETECTION, false); conf.setInt(YarnConfiguration.NM_VCORES, 10); ret = NodeManagerHardwareUtils.getVCores(plugin, conf); Assert.assertEquals(10, ret); }
@Override public MergedResult merge(final List<QueryResult> queryResults, final SQLStatementContext sqlStatementContext, final ShardingSphereDatabase database, final ConnectionContext connectionContext) throws SQLException { SQLStatement dalStatement = sqlStatementContext.getSqlStatement(); if (dalStatement instanceof MySQLShowDatabasesStatement) { return new LocalDataMergedResult(Collections.singleton(new LocalDataQueryResultRow(databaseName))); } ShardingSphereSchema schema = getSchema(sqlStatementContext, database); if (dalStatement instanceof MySQLShowTablesStatement) { return new LogicTablesMergedResult(shardingRule, sqlStatementContext, schema, queryResults); } if (dalStatement instanceof MySQLShowTableStatusStatement) { return new ShowTableStatusMergedResult(shardingRule, sqlStatementContext, schema, queryResults); } if (dalStatement instanceof MySQLShowIndexStatement) { return new ShowIndexMergedResult(shardingRule, sqlStatementContext, schema, queryResults); } if (dalStatement instanceof MySQLShowCreateTableStatement) { return new ShowCreateTableMergedResult(shardingRule, sqlStatementContext, schema, queryResults); } return new TransparentMergedResult(queryResults.get(0)); }
@Test void assertMergeForShowCreateTableStatement() throws SQLException { DALStatement dalStatement = new MySQLShowCreateTableStatement(); SQLStatementContext sqlStatementContext = mockSQLStatementContext(dalStatement); ShardingDALResultMerger resultMerger = new ShardingDALResultMerger(DefaultDatabase.LOGIC_NAME, null); ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); when(database.getName()).thenReturn(DefaultDatabase.LOGIC_NAME); assertThat(resultMerger.merge(queryResults, sqlStatementContext, database, mock(ConnectionContext.class)), instanceOf(ShowCreateTableMergedResult.class)); }
@Override public HttpResponse validateAndCreate(@JsonBody JSONObject request) { String accessToken = (String) request.get("accessToken"); if(accessToken == null){ throw new ServiceException.BadRequestException("accessToken is required"); } accessToken = accessToken.trim(); try { User authenticatedUser = getAuthenticatedUser(); checkPermission(); HttpURLConnection connection = connect(String.format("%s/%s", getUri(), "user"),accessToken); validateAccessTokenScopes(connection); String data = IOUtils.toString(HttpRequest.getInputStream(connection), Charset.defaultCharset()); GHUser user = GithubScm.getMappingObjectReader().forType(GHUser.class).readValue(data); if(user.getEmail() != null){ Mailer.UserProperty p = authenticatedUser.getProperty(Mailer.UserProperty.class); //XXX: If there is already email address of this user, should we update it with // the one from Github? if (p==null){ authenticatedUser.addProperty(new Mailer.UserProperty(user.getEmail())); } } //Now we know the token is valid. Lets find credential String credentialId = createCredentialId(getUri()); StandardUsernamePasswordCredentials githubCredential = CredentialsUtils.findCredential(credentialId, StandardUsernamePasswordCredentials.class, new BlueOceanDomainRequirement()); final StandardUsernamePasswordCredentials credential = new UsernamePasswordCredentialsImpl(CredentialsScope.USER, credentialId, getCredentialDescription(), authenticatedUser.getId(), accessToken); if(githubCredential == null) { CredentialsUtils.createCredentialsInUserStore( credential, authenticatedUser, getCredentialDomainName(), Collections.singletonList(new BlueOceanDomainSpecification())); }else{ CredentialsUtils.updateCredentialsInUserStore( githubCredential, credential, authenticatedUser, getCredentialDomainName(), Collections.singletonList(new BlueOceanDomainSpecification())); } return createResponse(credential.getId()); } catch (IOException e) { if (e instanceof MalformedURLException || e instanceof UnknownHostException) { throw new ServiceException.BadRequestException( new ErrorMessage(400, "Invalid apiUrl").add( new ErrorMessage.Error("apiUrl", ErrorMessage.Error.ErrorCodes.INVALID.toString(), e.getMessage()) ) ); } throw new ServiceException.UnexpectedErrorException(e.getMessage()); } }
@Test public void validateAndCreate() throws Exception { validateAndCreate("12345"); }
@Override public Map<String, String> discoverLocalMetadata() { if (memberMetadata.isEmpty()) { memberMetadata.put(PartitionGroupMetaData.PARTITION_GROUP_ZONE, gcpClient.getAvailabilityZone()); } return memberMetadata; }
@Test public void discoverLocalMetadata() { // given given(gcpClient.getAvailabilityZone()).willReturn(ZONE); // when Map<String, String> result1 = gcpDiscoveryStrategy.discoverLocalMetadata(); Map<String, String> result2 = gcpDiscoveryStrategy.discoverLocalMetadata(); // then assertEquals(ZONE, result1.get(PartitionGroupMetaData.PARTITION_GROUP_ZONE)); assertEquals(ZONE, result2.get(PartitionGroupMetaData.PARTITION_GROUP_ZONE)); verify(gcpClient).getAvailabilityZone(); }
public ClusterStateBundle.FeedBlock inferContentClusterFeedBlockOrNull(ContentCluster cluster) { if (!feedBlockEnabled) { return null; } var nodeInfos = cluster.getNodeInfos(); var exhaustions = enumerateNodeResourceExhaustionsAcrossAllNodes(nodeInfos); if (exhaustions.isEmpty()) { return null; } int maxDescriptions = 3; String description = exhaustions.stream() .limit(maxDescriptions) .map(NodeResourceExhaustion::toExhaustionAddedDescription) .collect(Collectors.joining(", ")); if (exhaustions.size() > maxDescriptions) { description += String.format(" (... and %d more)", exhaustions.size() - maxDescriptions); } description = decoratedMessage(cluster, description); // FIXME we currently will trigger a cluster state recomputation even if the number of // exhaustions is greater than what is returned as part of the description. Though at // that point, cluster state recomputations will be the least of your worries...! return ClusterStateBundle.FeedBlock.blockedWith(description, exhaustions); }
@Test void no_feed_block_returned_when_no_resources_lower_than_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.49), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster()); assertNull(feedBlock); }
public Map<String, Object> getContext(Map<String, Object> modelMap, Class<? extends SparkController> controller, String viewName) { Map<String, Object> context = new HashMap<>(modelMap); context.put("currentGoCDVersion", CurrentGoCDVersion.getInstance().getGocdDistVersion()); context.put("railsAssetsService", railsAssetsService); context.put("webpackAssetsService", webpackAssetsService); context.put("securityService", securityService); context.put("maintenanceModeService", maintenanceModeService); context.put("currentUser", SessionUtils.currentUsername()); context.put("controllerName", humanizedControllerName(controller)); context.put("viewName", viewName); context.put("currentVersion", CurrentGoCDVersion.getInstance()); context.put("toggles", Toggles.class); context.put("goUpdate", versionInfoService.getGoUpdate()); context.put("goUpdateCheckEnabled", versionInfoService.isGOUpdateCheckEnabled()); context.put("serverTimezoneUTCOffset", TimeZone.getDefault().getOffset(new Date().getTime())); context.put("spaRefreshInterval", SystemEnvironment.goSpaRefreshInterval()); context.put("spaTimeout", SystemEnvironment.goSpaTimeout()); context.put("showAnalyticsDashboard", showAnalyticsDashboard()); context.put("devMode", !new SystemEnvironment().useCompressedJs()); context.put("serverSiteUrls", GSON.toJson(serverConfigService.getServerSiteUrls())); return context; }
@Test void shouldShowAnalyticsDashboard() { Map<String, Object> modelMap = new HashMap<>(); when(securityService.isUserAdmin(any(Username.class))).thenReturn(true); CombinedPluginInfo combinedPluginInfo = new CombinedPluginInfo(analyticsPluginInfo()); when(pluginInfoFinder.allPluginInfos(PluginConstants.ANALYTICS_EXTENSION)).thenReturn(List.of(combinedPluginInfo)); Map<String, Object> contect = initialContextProvider.getContext(modelMap, dummySparkController.getClass(), "viewName"); assertThat(contect.get("showAnalyticsDashboard")).isEqualTo(true); }
@Override public void validateSmsCode(SmsCodeValidateReqDTO reqDTO) { validateSmsCode0(reqDTO.getMobile(), reqDTO.getCode(), reqDTO.getScene()); }
@Test public void validateSmsCode_success() { // 准备参数 SmsCodeValidateReqDTO reqDTO = randomPojo(SmsCodeValidateReqDTO.class, o -> { o.setMobile("15601691300"); o.setScene(randomEle(SmsSceneEnum.values()).getScene()); }); // mock 数据 SqlConstants.init(DbType.MYSQL); smsCodeMapper.insert(randomPojo(SmsCodeDO.class, o -> o.setMobile(reqDTO.getMobile()) .setScene(reqDTO.getScene()).setCode(reqDTO.getCode()).setUsed(false))); // 调用 smsCodeService.validateSmsCode(reqDTO); }
Map<Long, Map<Integer, InformationElementDefinition>> buildPenToIedsMap(JsonNode jsonNode) { final long enterpriseNumber = jsonNode.get("enterprise_number").asLong(); ImmutableMap.Builder<Integer, InformationElementDefinition> iedBuilder = ImmutableMap.builder(); jsonNode.path("information_elements").elements() .forEachRemaining(ied -> { final int elementId = ied.get("element_id").asInt(); final String dataType = ied.get("data_type").asText(); final String fieldName = ied.get("name").asText(); iedBuilder.put(elementId, InformationElementDefinition.create(dataType, fieldName, elementId)); }); penToIedsMap.put(enterpriseNumber, iedBuilder.build()); return penToIedsMap; }
@Test public void buildPenToIedsMap() throws IOException { // Standard Definition file InformationElementDefinitions definitions = new InformationElementDefinitions( Resources.getResource("ipfix-iana-elements.json") ); // Load the custom definition file String custDefStr = "{ \"enterprise_number\": 3054, \"information_elements\": [ { \"element_id\": 110, \"name\": \"l7ApplicationId\", \"data_type\": \"unsigned32\" }, { \"element_id\": 111, \"name\": \"l7ApplicationName\", \"data_type\": \"string\" }, { \"element_id\": 120, \"name\": \"sourceIpCountryCode\", \"data_type\": \"string\" }, { \"element_id\": 121, \"name\": \"sourceIpCountryName\", \"data_type\": \"string\" }, { \"element_id\": 122, \"name\": \"sourceIpRegionCode\", \"data_type\": \"string\" }, { \"element_id\": 123, \"name\": \"sourceIpRegionName\", \"data_type\": \"string\" }, { \"element_id\": 125, \"name\": \"sourceIpCityName\", \"data_type\": \"string\" }, { \"element_id\": 126, \"name\": \"sourceIpLatitude\", \"data_type\": \"float32\" }, { \"element_id\": 127, \"name\": \"sourceIpLongitude\", \"data_type\": \"float32\" }, { \"element_id\": 140, \"name\": \"destinationIpCountryCode\", \"data_type\": \"string\" }, { \"element_id\": 141, \"name\": \"destinationIpCountryName\", \"data_type\": \"string\" }, { \"element_id\": 142, \"name\": \"destinationIpRegionCode\", \"data_type\": \"string\" }, { \"element_id\": 143, \"name\": \"destinationIpRegionName\", \"data_type\": \"string\" }, { \"element_id\": 145, \"name\": \"destinationIpCityName\", \"data_type\": \"string\" }, { \"element_id\": 146, \"name\": \"destinationIpLatitude\", \"data_type\": \"float32\" }, { \"element_id\": 147, \"name\": \"destinationIpLongitude\", \"data_type\": \"float32\" }, { \"element_id\": 160, \"name\": \"osDeviceId\", \"data_type\": \"unsigned8\" }, { \"element_id\": 161, \"name\": \"osDeviceName\", \"data_type\": \"string\" }, { \"element_id\": 162, \"name\": \"browserId\", \"data_type\": \"unsigned8\" }, { \"element_id\": 163, \"name\": \"browserName\", \"data_type\": \"string\" }, { \"element_id\": 176, \"name\": \"reverseOctetDeltaCount\", \"data_type\": \"unsigned64\" }, { \"element_id\": 177, \"name\": \"reversePacketDeltaCount\", \"data_type\": \"unsigned64\" }, { \"element_id\": 178, \"name\": \"sslConnectionEncryptionType\", \"data_type\": \"string\" }, { \"element_id\": 179, \"name\": \"sslEncryptionCipherName\", \"data_type\": \"string\" }, { \"element_id\": 180, \"name\": \"sslEncryptionKeyLength\", \"data_type\": \"unsigned16\" }, { \"element_id\": 182, \"name\": \"userAgent\", \"data_type\": \"string\" }, { \"element_id\": 183, \"name\": \"hostName\", \"data_type\": \"string\" }, { \"element_id\": 184, \"name\": \"uri\", \"data_type\": \"string\" }, { \"element_id\": 185, \"name\": \"dnsText\", \"data_type\": \"string\" }, { \"element_id\": 186, \"name\": \"sourceAsName\", \"data_type\": \"string\" }, { \"element_id\": 187, \"name\": \"destinationAsName\", \"data_type\": \"string\" }, { \"element_id\": 188, \"name\": \"transactionLatency\", \"data_type\": \"unsigned32\" }, { \"element_id\": 189, \"name\": \"dnsQueryHostName\", \"data_type\": \"string\" }, { \"element_id\": 190, \"name\": \"dnsResponseHostName\", \"data_type\": \"string\" }, { \"element_id\": 191, \"name\": \"dnsClasses\", \"data_type\": \"string\" }, { \"element_id\": 192, \"name\": \"threatType\", \"data_type\": \"string\" }, { \"element_id\": 193, \"name\": \"threatIpv4\", \"data_type\": \"ipv4address\" }, { \"element_id\": 194, \"name\": \"threatIpv6\", \"data_type\": \"ipv6address\" }, { \"element_id\": 195, \"name\": \"httpSession\", \"data_type\": \"subtemplatelist\" }, { \"element_id\": 196, \"name\": \"requestTime\", \"data_type\": \"unsigned32\" }, { \"element_id\": 197, \"name\": \"dnsRecord\", \"data_type\": \"subtemplatelist\" }, { \"element_id\": 198, \"name\": \"dnsName\", \"data_type\": \"string\" }, { \"element_id\": 199, \"name\": \"dnsIpv4Address\", \"data_type\": \"ipv4address\" }, { \"element_id\": 200, \"name\": \"dnsIpv6Address\", \"data_type\": \"ipv6address\" }, { \"element_id\": 201, \"name\": \"sni\", \"data_type\": \"string\" }, { \"element_id\": 457, \"name\": \"httpStatusCode\", \"data_type\": \"unsigned16\" }, { \"element_id\": 459, \"name\": \"httpRequestMethod\", \"data_type\": \"string\" }, { \"element_id\": 462, \"name\": \"httpMessageVersion\", \"data_type\": \"string\" } ] }\n"; // Create a temporary json file. final File tempFile = tempFolder.newFile("tempFile.json"); // Write customDefString to it. FileUtils.writeStringToFile(tempFile, custDefStr, StandardCharsets.UTF_8); ObjectMapper objectMapper = new ObjectMapper(); JsonNode custDefJsonNode = objectMapper.readTree(tempFile); Map map = definitions.buildPenToIedsMap(custDefJsonNode); // enterprise number holds the key assertEquals(map.size(), 2); }
public void close() { try { httpTlsClient.close(); } catch (Exception ignore) { // Ignore } try { httpNonTlsClient.close(); } catch (Exception ignore) { // Ignore } if (vertx != null && ownedVertx) { vertx.close(); } }
@Test public void shouldFailToStartClientRequestWithNullKeystorePassword() throws Exception { ksqlClient.close(); stopServer(); // Given: startServerWithTls(); // When: final KsqlRestClientException e = assertThrows( KsqlRestClientException.class, () -> startClientWithTlsAndTruststorePassword(null) ); // Then: assertThat(e.getCause().getMessage(), containsString( "java.io.IOException: Keystore was tampered with, or password was incorrect" )); }
public static void processEnvVariables(Map<String, String> inputProperties) { processEnvVariables(inputProperties, System.getenv()); }
@Test void jsonEnvVariablesShouldNotOverrideGenericEnv() { var inputProperties = new HashMap<String, String>(); EnvironmentConfig.processEnvVariables(inputProperties, Map.of("SONAR_SCANNER_FOO", "value1", "SONAR_SCANNER_JSON_PARAMS", "{\"sonar.scanner.foo\":\"should not override\", \"key2\":\"value2\"}")); assertThat(inputProperties).containsOnly( entry("sonar.scanner.foo", "value1"), entry("key2", "value2")); assertThat(logTester.logs(Level.WARN)).containsOnly("Ignoring property 'sonar.scanner.foo' from env variable 'SONAR_SCANNER_JSON_PARAMS' because it is already defined"); }
@VisibleForTesting public static JobGraph createJobGraph(StreamGraph streamGraph) { return new StreamingJobGraphGenerator( Thread.currentThread().getContextClassLoader(), streamGraph, null, Runnable::run) .createJobGraph(); }
@Test void testCoordinatedSerializationException() { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Integer> source = env.fromData(1, 2, 3); env.addOperator( new OneInputTransformation<>( source.getTransformation(), "serializationTestOperator", new SerializationTestOperatorFactory(true), Types.INT, 1)); StreamGraph streamGraph = env.getStreamGraph(); assertThatThrownBy(() -> StreamingJobGraphGenerator.createJobGraph(streamGraph)) .hasRootCauseInstanceOf(IOException.class) .hasRootCauseMessage("This provider is not serializable."); }
public boolean isAbsolute() { return isUriPathAbsolute(); }
@Test (timeout = 30000) public void testIsAbsolute() { assertTrue(new Path("/").isAbsolute()); assertTrue(new Path("/foo").isAbsolute()); assertFalse(new Path("foo").isAbsolute()); assertFalse(new Path("foo/bar").isAbsolute()); assertFalse(new Path(".").isAbsolute()); if (Path.WINDOWS) { assertTrue(new Path("c:/a/b").isAbsolute()); assertFalse(new Path("c:a/b").isAbsolute()); } }
public Promise<T> fulfillInAsync(final Callable<T> task, Executor executor) { executor.execute(() -> { try { fulfill(task.call()); } catch (Exception ex) { fulfillExceptionally(ex); } }); return this; }
@Test void promiseIsFulfilledWithTheResultantValueOfExecutingTheTask() throws InterruptedException, ExecutionException { promise.fulfillInAsync(new NumberCrunchingTask(), executor); assertEquals(NumberCrunchingTask.CRUNCHED_NUMBER, promise.get()); assertTrue(promise.isDone()); assertFalse(promise.isCancelled()); }
@VisibleForTesting static BlobKey createKey(BlobType type) { if (type == PERMANENT_BLOB) { return new PermanentBlobKey(); } else { return new TransientBlobKey(); } }
@Test void testToFromStringTransientKey() { testToFromString(BlobKey.createKey(TRANSIENT_BLOB)); }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testReadCommittedWithCompactedTopic() { buildFetcher(OffsetResetStrategy.EARLIEST, new StringDeserializer(), new StringDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); long pid1 = 1L; long pid2 = 2L; long pid3 = 3L; appendTransactionalRecords(buffer, pid3, 3L, new SimpleRecord("3".getBytes(), "value".getBytes()), new SimpleRecord("4".getBytes(), "value".getBytes())); appendTransactionalRecords(buffer, pid2, 15L, new SimpleRecord("15".getBytes(), "value".getBytes()), new SimpleRecord("16".getBytes(), "value".getBytes()), new SimpleRecord("17".getBytes(), "value".getBytes())); appendTransactionalRecords(buffer, pid1, 22L, new SimpleRecord("22".getBytes(), "value".getBytes()), new SimpleRecord("23".getBytes(), "value".getBytes())); abortTransaction(buffer, pid2, 28L); appendTransactionalRecords(buffer, pid3, 30L, new SimpleRecord("30".getBytes(), "value".getBytes()), new SimpleRecord("31".getBytes(), "value".getBytes()), new SimpleRecord("32".getBytes(), "value".getBytes())); commitTransaction(buffer, pid3, 35L); appendTransactionalRecords(buffer, pid1, 39L, new SimpleRecord("39".getBytes(), "value".getBytes()), new SimpleRecord("40".getBytes(), "value".getBytes())); // transaction from pid1 is aborted, but the marker is not included in the fetch buffer.flip(); // send the fetch assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); assertEquals(1, sendFetches()); // prepare the response. the aborted transactions begin at offsets which are no longer in the log List<FetchResponseData.AbortedTransaction> abortedTransactions = Arrays.asList( new FetchResponseData.AbortedTransaction().setProducerId(pid2).setFirstOffset(6), new FetchResponseData.AbortedTransaction().setProducerId(pid1).setFirstOffset(0) ); client.prepareResponse(fullFetchResponseWithAbortedTransactions(MemoryRecords.readableRecords(buffer), abortedTransactions, Errors.NONE, 100L, 100L, 0)); consumerClient.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); Map<TopicPartition, List<ConsumerRecord<String, String>>> allFetchedRecords = fetchRecords(); assertTrue(allFetchedRecords.containsKey(tp0)); List<ConsumerRecord<String, String>> fetchedRecords = allFetchedRecords.get(tp0); assertEquals(5, fetchedRecords.size()); assertEquals(Arrays.asList(3L, 4L, 30L, 31L, 32L), collectRecordOffsets(fetchedRecords)); }
public DLQEntry pollEntry(long timeout) throws IOException, InterruptedException { byte[] bytes = pollEntryBytes(timeout); if (bytes == null) { return null; } return DLQEntry.deserialize(bytes); }
@Test public void testReaderFindSegmentHoleAfterSimulatingRetentionPolicyClean() throws IOException, InterruptedException { final int eventsPerSegment = prepareFilledSegmentFiles(3); assertEquals(319, eventsPerSegment); int remainingEventsInSegment = eventsPerSegment; try (DeadLetterQueueReader reader = new DeadLetterQueueReader(dir)) { // read the first event to initialize reader structures final DLQEntry dlqEntry = reader.pollEntry(1_000); assertEquals("00000", dlqEntry.getReason()); remainingEventsInSegment--; // simulate a storage policy clean, drop the middle segment file final List<Path> allSegments = listSegmentsSorted(dir); assertThat(allSegments.size(), greaterThanOrEqualTo(2)); Files.delete(allSegments.remove(0)); // tail segment Files.delete(allSegments.remove(0)); // the segment after // consume the first segment for (int i = 0; i < remainingEventsInSegment; i++) { reader.pollEntry(1_000); } // Exercise // consume the first event after the hole final DLQEntry entryAfterHole = reader.pollEntry(1_000); assertEquals(String.format("%05d", eventsPerSegment * 2), entryAfterHole.getReason()); } }
public void setMaxFetchKBSec(int rate) { kp.put("maxFetchKBSec",rate); }
@Test public void testMaxFetchKBSec() throws Exception { CrawlURI curi = makeCrawlURI("http://localhost:7777/200k"); fetcher().setMaxFetchKBSec(100); // if the wire logger is enabled, it can slow things down enough to make // this test failed, so disable it temporarily Level savedWireLevel = Logger.getLogger("org.apache.http.wire").getLevel(); Logger.getLogger("org.apache.http.wire").setLevel(Level.INFO); fetcher().process(curi); Logger.getLogger("org.apache.http.wire").setLevel(savedWireLevel); assertEquals(200000, curi.getContentLength()); assertTrue(curi.getFetchDuration() > 1800 && curi.getFetchDuration() < 2200); }
public PrimitiveValue maxValue() { return maxValue; }
@Test void shouldReturnDefaultMaxValueWhenSpecified() throws Exception { final String testXmlString = "<types>" + " <type name=\"testTypeDefaultCharMaxValue\" primitiveType=\"char\"/>" + "</types>"; final Map<String, Type> map = parseTestXmlWithMap("/types/type", testXmlString); assertNull(((EncodedDataType)map.get("testTypeDefaultCharMaxValue")).maxValue()); }
@Override public void destroy() { super.destroy(); try { ExecutorUtil.cancelScheduledFuture(cleanFuture); } catch (Throwable t) { logger.warn(REGISTRY_SOCKET_EXCEPTION, "", "", t.getMessage(), t); } try { multicastSocket.leaveGroup(multicastAddress); multicastSocket.close(); } catch (Throwable t) { logger.warn(REGISTRY_SOCKET_EXCEPTION, "", "", t.getMessage(), t); } ExecutorUtil.gracefulShutdown(cleanExecutor, cleanPeriod); }
@Test void testDestroy() { MulticastSocket socket = registry.getMulticastSocket(); assertFalse(socket.isClosed()); // then destroy, the multicast socket will be closed registry.destroy(); socket = registry.getMulticastSocket(); assertTrue(socket.isClosed()); }
public String toBaseMessageIdString(Object messageId) { if (messageId == null) { return null; } else if (messageId instanceof String) { String stringId = (String) messageId; // If the given string has a type encoding prefix, // we need to escape it as an encoded string (even if // the existing encoding prefix was also for string) if (hasTypeEncodingPrefix(stringId)) { return AMQP_STRING_PREFIX + stringId; } else { return stringId; } } else if (messageId instanceof UUID) { return AMQP_UUID_PREFIX + messageId.toString(); } else if (messageId instanceof UnsignedLong) { return AMQP_ULONG_PREFIX + messageId.toString(); } else if (messageId instanceof Binary) { ByteBuffer dup = ((Binary) messageId).asByteBuffer(); byte[] bytes = new byte[dup.remaining()]; dup.get(bytes); String hex = convertBinaryToHexString(bytes); return AMQP_BINARY_PREFIX + hex; } else { throw new IllegalArgumentException("Unsupported type provided: " + messageId.getClass()); } }
@Test public void testToBaseMessageIdStringThrowsIAEWithUnexpectedType() { try { messageIdHelper.toBaseMessageIdString(new Object()); fail("expected exception not thrown"); } catch (IllegalArgumentException iae) { // expected } }
public void evaluate(List<AuthorizationContext> contexts) { if (CollectionUtils.isEmpty(contexts)) { return; } contexts.forEach(this.authorizationStrategy::evaluate); }
@Test public void evaluate6() { if (MixAll.isMac()) { return; } this.authConfig.setAuthorizationWhitelist("10"); this.evaluator = new AuthorizationEvaluator(this.authConfig); Subject subject = Subject.of("User:test"); Resource resource = Resource.ofTopic("test"); Action action = Action.PUB; String sourceIp = "192.168.0.1"; DefaultAuthorizationContext context = DefaultAuthorizationContext.of(subject, resource, action, sourceIp); context.setRpcCode("10"); this.evaluator.evaluate(Collections.singletonList(context)); }
public static Env transformEnv(String envName) { final String envWellFormName = getWellFormName(envName); if (Env.exists(envWellFormName)) { return Env.valueOf(envWellFormName); } // cannot be found or blank name return Env.UNKNOWN; }
@Test public void testTransformEnvNotExist() { assertEquals(Env.UNKNOWN, Env.transformEnv("notexisting")); assertEquals(Env.LOCAL, Env.transformEnv("LOCAL")); }
@Override public String put(String key, String value) { if (value == null) throw new IllegalArgumentException("Null value not allowed as an environment variable: " + key); return super.put(key, value); }
@Test public void overrideOrderCalculatorMultiple() { EnvVars env = new EnvVars(); EnvVars overrides = new EnvVars(); overrides.put("A", "Noreference"); overrides.put("B", "${A}"); overrides.put("C", "${A}${B}"); OverrideOrderCalculator calc = new OverrideOrderCalculator(env, overrides); List<String> order = calc.getOrderedVariableNames(); assertEquals(Arrays.asList("A", "B", "C"), order); }
public static <T> ClassPluginDocumentation<T> of(JsonSchemaGenerator jsonSchemaGenerator, RegisteredPlugin plugin, Class<? extends T> cls, Class<T> baseCls) { return new ClassPluginDocumentation<>(jsonSchemaGenerator, plugin, cls, baseCls, null); }
@SuppressWarnings("unchecked") @Test void trigger() throws URISyntaxException { Helpers.runApplicationContext(throwConsumer((applicationContext) -> { JsonSchemaGenerator jsonSchemaGenerator = applicationContext.getBean(JsonSchemaGenerator.class); PluginScanner pluginScanner = new PluginScanner(ClassPluginDocumentationTest.class.getClassLoader()); RegisteredPlugin scan = pluginScanner.scan(); ClassPluginDocumentation<? extends AbstractTrigger> doc = ClassPluginDocumentation.of(jsonSchemaGenerator, scan, Schedule.class, null); assertThat(doc.getDefs().size(), is(2)); assertThat(doc.getDocLicense(), nullValue()); assertThat(((Map<String, Object>) doc.getDefs().get("io.kestra.core.models.tasks.WorkerGroup")).get("type"), is("object")); assertThat(((Map<String, Object>) ((Map<String, Object>) doc.getDefs().get("io.kestra.core.models.tasks.WorkerGroup")).get("properties")).size(), is(1)); })); }
@Override public boolean cleanExpiredConsumerQueue( String cluster) throws RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, MQClientException, InterruptedException { return defaultMQAdminExtImpl.cleanExpiredConsumerQueue(cluster); }
@Test public void testCleanExpiredConsumerQueue() throws InterruptedException, RemotingTimeoutException, MQClientException, RemotingSendRequestException, RemotingConnectException { boolean result = defaultMQAdminExt.cleanExpiredConsumerQueue("default-cluster"); assertThat(result).isFalse(); }
@Override public ServerId create() { return ServerId.of(computeDatabaseId(), serverIdGenerator.generate()); }
@Test public void create_from_scratch_fails_with_ISE_if_JDBC_property_not_set() { expectMissingJdbcUrlISE(() -> underTest.create()); }
@Override public CucumberOptionsAnnotationParser.CucumberOptions getOptions(Class<?> clazz) { CucumberOptions annotation = clazz.getAnnotation(CucumberOptions.class); if (annotation != null) { return new JunitCucumberOptions(annotation); } warnWhenTestNGCucumberOptionsAreUsed(clazz); return null; }
@Test void testObjectFactory() { io.cucumber.core.options.CucumberOptionsAnnotationParser.CucumberOptions options = this.optionsProvider .getOptions(ClassWithCustomObjectFactory.class); assertNotNull(options); assertEquals(TestObjectFactory.class, options.objectFactory()); }
public static File getPluginFile(final String path) { String pluginPath = getPluginPath(path); return new File(pluginPath); }
@Test public void testGetPluginPathByCustomPath() { File jarFile = ShenyuPluginPathBuilder.getPluginFile("/testpath"); assertNotNull(jarFile); }
public SqlType getExpressionSqlType(final Expression expression) { return getExpressionSqlType(expression, Collections.emptyMap()); }
@Test public void shouldThrowOnMapOfNullValues() { // Given: Expression expression = new CreateMapExpression( ImmutableMap.of( new StringLiteral("foo"), new NullLiteral() ) ); // When: final Exception e = assertThrows( KsqlException.class, () -> expressionTypeManager.getExpressionSqlType(expression) ); // Then: assertThat(e.getMessage(), containsString( "Cannot construct a map with all NULL values")); }
@Override public String ping(RedisClusterNode node) { return execute(node, RedisCommands.PING); }
@Test public void testClusterPing() { RedisClusterNode master = getFirstMaster(); String res = connection.ping(master); assertThat(res).isEqualTo("PONG"); }
public static boolean isNotEmpty(Collection<?> collection) { return !isEmpty(collection); }
@SuppressWarnings("ConstantValue") @Test public void isNotEmptyTest() { assertFalse(CollUtil.isNotEmpty((Collection<?>) null)); }
public static void setPort(int port) { XID.port = port; }
@Test public void testSetPort() { XID.setPort(8080); assertThat(XID.getPort()).isEqualTo(8080); }
@POST @Path("getByPath") @ZeppelinApi public Response getNoteByPath(String message, @QueryParam("reload") boolean reload) throws IOException { // notePath may contains special character like space. // it should be in http body instead of in url // to avoid problem of url conversion by external service like knox GetNoteByPathRequest request = GSON.fromJson(message, GetNoteByPathRequest.class); String notePath = request.getNotePath(); return notebookService.getNoteByPath(notePath, reload, getServiceContext(), new RestServiceCallback<>(), note -> new JsonResponse<>(Status.OK, "", note).build()); }
@Test void testGetNoteByPath() throws IOException { LOG.info("Running testGetNoteByPath"); String note1Id = null; try { String notePath = "dir1/note1"; note1Id = notebook.createNote(notePath, anonymous); notebook.processNote(note1Id, note1 -> { note1.addNewParagraph(AuthenticationInfo.ANONYMOUS); notebook.saveNote(note1, anonymous); return null; }); CloseableHttpResponse post = httpPost("/notebook/getByPath" , "{\"notePath\":\""+ notePath + "\"}" ); assertThat(post, isAllowed()); Map<String, Object> resp = gson.fromJson(EntityUtils.toString(post.getEntity(), StandardCharsets.UTF_8), new TypeToken<Map<String, Object>>() {}.getType()); Map<String, Object> noteObject = (Map<String, Object>) resp.get("body"); assertEquals(notePath, ((String)noteObject.get("path")).substring(1)); post.close(); } finally { // cleanup if (null != note1Id) { notebook.removeNote(note1Id, anonymous); } } }
@Override public CodegenTableDO getCodegenTable(Long id) { return codegenTableMapper.selectById(id); }
@Test public void testGetCodegenTable() { // mock 数据 CodegenTableDO tableDO = randomPojo(CodegenTableDO.class, o -> o.setScene(CodegenSceneEnum.ADMIN.getScene())); codegenTableMapper.insert(tableDO); // 准备参数 Long id = tableDO.getId(); // 调用 CodegenTableDO result = codegenService.getCodegenTable(id); // 断言 assertPojoEquals(tableDO, result); }
public void completeTx(SendRequest req) throws InsufficientMoneyException, CompletionException { lock.lock(); try { checkArgument(!req.completed, () -> "given SendRequest has already been completed"); log.info("Completing send tx with {} outputs totalling {} and a fee of {}/vkB", req.tx.getOutputs().size(), req.tx.getOutputSum().toFriendlyString(), req.feePerKb.toFriendlyString()); // Calculate a list of ALL potential candidates for spending and then ask a coin selector to provide us // with the actual outputs that'll be used to gather the required amount of value. In this way, users // can customize coin selection policies. The call below will ignore immature coinbases and outputs // we don't have the keys for. List<TransactionOutput> prelimCandidates = calculateAllSpendCandidates(true, req.missingSigsMode == MissingSigsMode.THROW); // Connect (add a value amount) unconnected inputs List<TransactionInput> inputs = connectInputs(prelimCandidates, req.tx.getInputs()); req.tx.clearInputs(); inputs.forEach(req.tx::addInput); // Warn if there are remaining unconnected inputs whose value we do not know // TODO: Consider throwing if there are inputs that we don't have a value for if (req.tx.getInputs().stream() .map(TransactionInput::getValue) .anyMatch(Objects::isNull)) log.warn("SendRequest transaction already has inputs but we don't know how much they are worth - they will be added to fee."); // If any inputs have already been added, we don't need to get their value from wallet Coin totalInput = req.tx.getInputSum(); // Calculate the amount of value we need to import. Coin valueNeeded = req.tx.getOutputSum().subtract(totalInput); // Enforce the OP_RETURN limit if (req.tx.getOutputs().stream() .filter(o -> ScriptPattern.isOpReturn(o.getScriptPubKey())) .count() > 1) // Only 1 OP_RETURN per transaction allowed. throw new MultipleOpReturnRequested(); // Check for dusty sends if (req.ensureMinRequiredFee && !req.emptyWallet) { // Min fee checking is handled later for emptyWallet. if (req.tx.getOutputs().stream().anyMatch(TransactionOutput::isDust)) throw new DustySendRequested(); } // Filter out candidates that are already included in the transaction inputs List<TransactionOutput> candidates = prelimCandidates.stream() .filter(output -> alreadyIncluded(req.tx.getInputs(), output)) .collect(StreamUtils.toUnmodifiableList()); CoinSelection bestCoinSelection; TransactionOutput bestChangeOutput = null; List<Coin> updatedOutputValues = null; if (!req.emptyWallet) { // This can throw InsufficientMoneyException. FeeCalculation feeCalculation = calculateFee(req, valueNeeded, req.ensureMinRequiredFee, candidates); bestCoinSelection = feeCalculation.bestCoinSelection; bestChangeOutput = feeCalculation.bestChangeOutput; updatedOutputValues = feeCalculation.updatedOutputValues; } else { // We're being asked to empty the wallet. What this means is ensuring "tx" has only a single output // of the total value we can currently spend as determined by the selector, and then subtracting the fee. checkState(req.tx.getOutputs().size() == 1, () -> "empty wallet TX must have a single output only"); CoinSelector selector = req.coinSelector == null ? coinSelector : req.coinSelector; bestCoinSelection = selector.select((Coin) network.maxMoney(), candidates); candidates = null; // Selector took ownership and might have changed candidates. Don't access again. req.tx.getOutput(0).setValue(bestCoinSelection.totalValue()); log.info(" emptying {}", bestCoinSelection.totalValue().toFriendlyString()); } bestCoinSelection.outputs() .forEach(req.tx::addInput); if (req.emptyWallet) { if (!adjustOutputDownwardsForFee(req.tx, bestCoinSelection, req.feePerKb, req.ensureMinRequiredFee)) throw new CouldNotAdjustDownwards(); } if (updatedOutputValues != null) { for (int i = 0; i < updatedOutputValues.size(); i++) { req.tx.getOutput(i).setValue(updatedOutputValues.get(i)); } } if (bestChangeOutput != null) { req.tx.addOutput(bestChangeOutput); log.info(" with {} change", bestChangeOutput.getValue().toFriendlyString()); } // Now shuffle the outputs to obfuscate which is the change. if (req.shuffleOutputs) req.tx.shuffleOutputs(); // Now sign the inputs, thus proving that we are entitled to redeem the connected outputs. if (req.signInputs) signTransaction(req); // Check size. final int size = req.tx.messageSize(); if (size > Transaction.MAX_STANDARD_TX_SIZE) throw new ExceededMaxTransactionSize(); // Label the transaction as being self created. We can use this later to spend its change output even before // the transaction is confirmed. We deliberately won't bother notifying listeners here as there's not much // point - the user isn't interested in a confidence transition they made themselves. getConfidence(req.tx).setSource(TransactionConfidence.Source.SELF); // Label the transaction as being a user requested payment. This can be used to render GUI wallet // transaction lists more appropriately, especially when the wallet starts to generate transactions itself // for internal purposes. req.tx.setPurpose(Transaction.Purpose.USER_PAYMENT); // Record the exchange rate that was valid when the transaction was completed. req.tx.setExchangeRate(req.exchangeRate); req.tx.setMemo(req.memo); req.completed = true; log.info(" completed: {}", req.tx); } finally { lock.unlock(); } }
@Test public void opReturnOneOutputWithValueTest() throws Exception { // Tests basic send of transaction with one output that destroys coins and has an OP_RETURN. receiveATransaction(wallet, myAddress); Transaction tx = new Transaction(); Coin messagePrice = CENT; Script script = ScriptBuilder.createOpReturnScript("hello world!".getBytes()); tx.addOutput(messagePrice, script); SendRequest request = SendRequest.forTx(tx); wallet.completeTx(request); }
@Override public long checksum() { if (this.checksum == 0) { this.checksum = CrcUtil.crc64(AsciiStringUtil.unsafeEncode(toString())); } return this.checksum; }
@Test public void testChecksum() { PeerId peer = new PeerId("192.168.1.1", 8081, 1); long c = peer.checksum(); assertTrue(c != 0); assertEquals(c, peer.checksum()); }
public DirectoryEntry lookUp( File workingDirectory, JimfsPath path, Set<? super LinkOption> options) throws IOException { checkNotNull(path); checkNotNull(options); DirectoryEntry result = lookUp(workingDirectory, path, options, 0); if (result == null) { // an intermediate file in the path did not exist or was not a directory throw new NoSuchFileException(path.toString()); } return result; }
@Test public void testLookup_absolute_nonDirectoryIntermediateFile() throws IOException { try { lookup("/work/one/eleven/twelve"); fail(); } catch (NoSuchFileException expected) { } try { lookup("/work/one/eleven/twelve/thirteen/fourteen"); fail(); } catch (NoSuchFileException expected) { } }
public static InternalLogger getInstance(Class<?> clazz) { return getInstance(clazz.getName()); }
@Test public void testInfoWithException() { final InternalLogger logger = InternalLoggerFactory.getInstance("mock"); logger.info("a", e); verify(mockLogger).info("a", e); }
@Override public IMetaverseNode createResourceNode( IExternalResourceInfo resource ) throws MetaverseException { return createFileNode( resource.getName(), getDescriptor() ); }
@Test public void testCreateResourceNode() throws Exception { IExternalResourceInfo res = mock( IExternalResourceInfo.class ); when( res.getName() ).thenReturn( "file:///Users/home/tmp/xyz.xml" ); IMetaverseNode resourceNode = analyzer.createResourceNode( res ); assertNotNull( resourceNode ); assertEquals( DictionaryConst.NODE_TYPE_FILE, resourceNode.getType() ); }
public static <T> AvroSchema<T> of(SchemaDefinition<T> schemaDefinition) { if (schemaDefinition.getSchemaReaderOpt().isPresent() && schemaDefinition.getSchemaWriterOpt().isPresent()) { return new AvroSchema<>(schemaDefinition.getSchemaReaderOpt().get(), schemaDefinition.getSchemaWriterOpt().get(), parseSchemaInfo(schemaDefinition, SchemaType.AVRO)); } ClassLoader pojoClassLoader = null; if (schemaDefinition.getClassLoader() != null) { pojoClassLoader = schemaDefinition.getClassLoader(); } else if (schemaDefinition.getPojo() != null) { pojoClassLoader = schemaDefinition.getPojo().getClassLoader(); } return new AvroSchema<>(parseSchemaInfo(schemaDefinition, SchemaType.AVRO), pojoClassLoader); }
@Test public void testNotAllowNullSchema() throws JSONException { AvroSchema<Foo> avroSchema = AvroSchema.of(SchemaDefinition.<Foo>builder().withPojo(Foo.class).withAlwaysAllowNull(false).build()); assertEquals(avroSchema.getSchemaInfo().getType(), SchemaType.AVRO); Schema.Parser parser = new Schema.Parser(); String schemaJson = new String(avroSchema.getSchemaInfo().getSchema()); assertJSONEquals(schemaJson, SCHEMA_AVRO_NOT_ALLOW_NULL); Schema schema = parser.parse(schemaJson); for (String fieldName : FOO_FIELDS) { Schema.Field field = schema.getField(fieldName); Assert.assertNotNull(field); if (field.name().equals("field4")) { Assert.assertNotNull(field.schema().getTypes().get(1).getField("field1")); } if (field.name().equals("fieldUnableNull")) { Assert.assertNotNull(field.schema().getType()); } } }
@Override public void isNotEqualTo(@Nullable Object expected) { super.isNotEqualTo(expected); }
@Test public void isNotEqualTo_WithoutToleranceParameter_Success_Shorter() { assertThat(array(2.2d, 3.3d)).isNotEqualTo(array(2.2d)); }
protected boolean isListEmpty(ArrayNode json) { for (JsonNode node : json) { if (!isNodeEmpty(node)) { return false; } } return true; }
@Test public void isListEmpty_noNode() { ArrayNode json = new ArrayNode(factory); assertThat(expressionEvaluator.isListEmpty(json)).isTrue(); }
@Override public KeyValueIterator<Windowed<K>, V> backwardFetch(final K key) { Objects.requireNonNull(key, "key cannot be null"); return new MeteredWindowedKeyValueIterator<>( wrapped().backwardFetch(keyBytes(key)), fetchSensor, iteratorDurationSensor, streamsMetrics, serdes::keyFrom, serdes::valueFrom, time, numOpenIterators, openIterators ); }
@Test public void shouldThrowNullPointerOnBackwardFetchIfKeyIsNull() { setUpWithoutContext(); assertThrows(NullPointerException.class, () -> store.backwardFetch(null)); }
@Override public BroadcastRuleConfiguration swapToObject(final YamlBroadcastRuleConfiguration yamlConfig) { return new BroadcastRuleConfiguration(yamlConfig.getTables()); }
@Test void assertSwapToObject() { YamlBroadcastRuleConfiguration yamlRuleConfig = new YamlBroadcastRuleConfiguration(); yamlRuleConfig.getTables().add("t_address"); YamlBroadcastRuleConfigurationSwapper swapper = new YamlBroadcastRuleConfigurationSwapper(); BroadcastRuleConfiguration ruleConfig = swapper.swapToObject(yamlRuleConfig); assertThat(ruleConfig.getTables().size(), is(1)); assertThat(ruleConfig.getTables().iterator().next(), is("t_address")); }
@Override public void setFlushNetworkPolicy(int networkType) { }
@Test public void setFlushNetworkPolicy() { mSensorsAPI.setFlushNetworkPolicy(SensorsDataAPI.NetworkType.TYPE_5G); }
@Override public PipelineConfig get(int i) { if (i < 0) throw new IndexOutOfBoundsException(); int start = 0; for (PipelineConfigs part : this.parts) { int end = start + part.size(); if (i < end) return part.get(i - start); start = end; } throw new IndexOutOfBoundsException(); }
@Test public void shouldReturnPipelinesInOrder() { PipelineConfig pipeline1 = PipelineConfigMother.pipelineConfig("pipeline1"); PipelineConfig pipeline3 = PipelineConfigMother.pipelineConfig("pipeline3"); PipelineConfig pipeline5 = PipelineConfigMother.pipelineConfig("pipeline5"); PipelineConfig pipeline2 = PipelineConfigMother.pipelineConfig("pipeline2"); PipelineConfig pipeline4 = PipelineConfigMother.pipelineConfig("pipeline4"); PipelineConfigs group = new MergePipelineConfigs( new BasicPipelineConfigs(pipeline1, pipeline2), new BasicPipelineConfigs(pipeline3), new BasicPipelineConfigs(pipeline4, pipeline5)); assertThat(group.get(0), is(pipeline1)); assertThat(group.get(1), is(pipeline2)); assertThat(group.get(2), is(pipeline3)); assertThat(group.get(3), is(pipeline4)); assertThat(group.get(4), is(pipeline5)); }
@Nullable @Override public Message decode(@Nonnull RawMessage rawMessage) { final byte[] payload = rawMessage.getPayload(); final JsonNode event; try { event = objectMapper.readTree(payload); if (event == null || event.isMissingNode()) { throw new IOException("null result"); } } catch (IOException e) { LOG.error("Couldn't decode raw message {}", rawMessage); return null; } return parseEvent(event); }
@Test public void decodeMessagesHandlesGenericBeatWithCloudDigitalOcean() throws Exception { final Message message = codec.decode(messageFromJson("generic-with-cloud-digital-ocean.json")); assertThat(message).isNotNull(); assertThat(message.getMessage()).isEqualTo("-"); assertThat(message.getSource()).isEqualTo("unknown"); assertThat(message.getTimestamp()).isEqualTo(new DateTime(2016, 4, 1, 0, 0, DateTimeZone.UTC)); assertThat(message.getField("beats_type")).isEqualTo("beat"); assertThat(message.getField("beat_foo")).isEqualTo("bar"); assertThat(message.getField("beat_meta_cloud_provider")).isEqualTo("digitalocean"); assertThat(message.getField("beat_meta_cloud_instance_id")).isEqualTo("1234567"); assertThat(message.getField("beat_meta_cloud_region")).isEqualTo("nyc2"); }
public Timestamp insert(PartitionMetadata row) { final TransactionResult<Void> transactionResult = runInTransaction(transaction -> transaction.insert(row), "InsertsPartitionMetadata"); return transactionResult.getCommitTimestamp(); }
@Test public void testInsert() { when(databaseClient.readWriteTransaction(anyObject())).thenReturn(readWriteTransactionRunner); when(databaseClient.readWriteTransaction()).thenReturn(readWriteTransactionRunner); when(readWriteTransactionRunner.run(any())).thenReturn(null); when(readWriteTransactionRunner.getCommitTimestamp()) .thenReturn(Timestamp.ofTimeMicroseconds(1L)); Timestamp commitTimestamp = partitionMetadataDao.insert(ROW); verify(databaseClient, times(1)).readWriteTransaction(anyObject()); verify(readWriteTransactionRunner, times(1)).run(any()); verify(readWriteTransactionRunner, times(1)).getCommitTimestamp(); assertEquals(Timestamp.ofTimeMicroseconds(1L), commitTimestamp); }
public static HoodieWriteConfig.Builder newBuilder() { return new Builder(); }
@Test public void testAutoAdjustCleanPolicyForNonBlockingConcurrencyControl() { TypedProperties props = new TypedProperties(); props.setProperty(HoodieTableConfig.TYPE.key(), HoodieTableType.MERGE_ON_READ.name()); props.setProperty(KeyGeneratorOptions.RECORDKEY_FIELD_NAME.key(), "uuid"); HoodieWriteConfig writeConfig = HoodieWriteConfig.newBuilder() .withPath("/tmp") .withIndexConfig( HoodieIndexConfig.newBuilder() .fromProperties(props) .withIndexType(HoodieIndex.IndexType.BUCKET) .withBucketIndexEngineType(HoodieIndex.BucketIndexEngineType.SIMPLE) .build()) .withWriteConcurrencyMode(WriteConcurrencyMode.NON_BLOCKING_CONCURRENCY_CONTROL) .build(); // Verify automatically set hoodie.clean.failed.writes.policy=LAZY for non-blocking concurrency control verifyConcurrencyControlRelatedConfigs(writeConfig, true, true, true, WriteConcurrencyMode.NON_BLOCKING_CONCURRENCY_CONTROL, HoodieFailedWritesCleaningPolicy.LAZY, HoodieLockConfig.LOCK_PROVIDER_CLASS_NAME.defaultValue()); }
public <T> void notifyReadyAsync(Callable<T> callable, BiConsumer<T, Throwable> handler) { workerExecutor.execute( () -> { try { T result = callable.call(); executorToNotify.execute(() -> handler.accept(result, null)); } catch (Throwable t) { executorToNotify.execute(() -> handler.accept(null, t)); } }); }
@Test public void testBasic() throws InterruptedException { CountDownLatch latch = new CountDownLatch(1); AtomicInteger result = new AtomicInteger(0); notifier.notifyReadyAsync( () -> 1234, (v, e) -> { result.set(v); latch.countDown(); }); latch.await(); assertEquals(1234, result.get()); }
List<Cell> adjacentCells(int y, int x) { var adjacent = new ArrayList<Cell>(); if (y == 0) { adjacent.add(this.cells[1][x]); } if (x == 0) { adjacent.add(this.cells[y][1]); } if (y == cells.length - 1) { adjacent.add(this.cells[cells.length - 2][x]); } if (x == cells.length - 1) { adjacent.add(this.cells[y][cells.length - 2]); } if (y > 0 && y < cells.length - 1) { adjacent.add(this.cells[y - 1][x]); adjacent.add(this.cells[y + 1][x]); } if (x > 0 && x < cells.length - 1) { adjacent.add(this.cells[y][x - 1]); adjacent.add(this.cells[y][x + 1]); } return adjacent; }
@Test void adjacentCellsTest() { var cg = new CandyGame(3, new CellPool(9)); var arr1 = cg.adjacentCells(0, 0); var arr2 = cg.adjacentCells(1, 2); var arr3 = cg.adjacentCells(1, 1); assertTrue(arr1.size() == 2 && arr2.size() == 3 && arr3.size() == 4); }
public FEELFnResult<Boolean> invoke(@ParameterName( "point" ) Comparable point, @ParameterName( "range" ) Range range) { if ( point == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be null")); } if ( range == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range", "cannot be null")); } try { boolean result = ( range.getHighBoundary() == Range.RangeBoundary.CLOSED && point.compareTo( range.getHighEndPoint() ) == 0 ); return FEELFnResult.ofResult( result ); } catch( Exception e ) { // points are not comparable return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be compared to range")); } }
@Test void invokeParamSingleAndRange() { FunctionTestUtil.assertResult( finishesFunction.invoke( "f", new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED )), Boolean.TRUE ); FunctionTestUtil.assertResult( finishesFunction.invoke( "a", new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED )), Boolean.FALSE ); FunctionTestUtil.assertResult( finishesFunction.invoke( "f", new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.OPEN )), Boolean.FALSE ); FunctionTestUtil.assertResult( finishesFunction.invoke( "g", new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED )), Boolean.FALSE ); }
public static Schema getPinotSchemaFromPinotSchemaWithComplexTypeHandling(Descriptors.Descriptor protoSchema, @Nullable Map<String, FieldSpec.FieldType> fieldTypeMap, @Nullable TimeUnit timeUnit, List<String> fieldsToUnnest, String delimiter) { Schema pinotSchema = new Schema(); for (Descriptors.FieldDescriptor field : protoSchema.getFields()) { extractSchemaWithComplexTypeHandling(field, fieldsToUnnest, delimiter, field.getName(), pinotSchema, fieldTypeMap, timeUnit); } return pinotSchema; }
@Test(dataProvider = "scalarCases") public void testExtractSchemaWithCompositeTypeHandling( String fieldName, FieldSpec.DataType type, boolean isSingleValue) { Descriptors.Descriptor desc = CompositeTypes.CompositeMessage.getDescriptor(); FieldSpec schema = ProtoBufSchemaUtils.getPinotSchemaFromPinotSchemaWithComplexTypeHandling( desc, new HashMap<>(), TimeUnit.SECONDS, Collections.emptyList(), ".").getFieldSpecFor("test_message." + fieldName); FieldSpec expectedSchema = new DimensionFieldSpec("test_message." + fieldName, type, isSingleValue); assertEquals(expectedSchema, schema); }
public static KTableHolder<GenericKey> build( final KGroupedTableHolder groupedTable, final TableAggregate aggregate, final RuntimeBuildContext buildContext, final MaterializedFactory materializedFactory) { return build( groupedTable, aggregate, buildContext, materializedFactory, new AggregateParamsFactory() ); }
@Test public void shouldBuildAggregateCorrectly() { // When: final KTableHolder<GenericKey> result = aggregate.build(planBuilder, planInfo); // Then: assertThat(result.getTable(), is(aggregatedWithResults)); final InOrder inOrder = Mockito.inOrder(groupedTable, aggregated, aggregatedWithResults); inOrder.verify(groupedTable).aggregate(initializer, aggregator, undoAggregator, materialized); inOrder.verify(aggregated).transformValues(any(), any(Named.class)); inOrder.verifyNoMoreInteractions(); }
public static String prettyMethodSignature(ClassSymbol origin, MethodSymbol m) { StringBuilder sb = new StringBuilder(); if (m.isConstructor()) { Name name = m.owner.enclClass().getSimpleName(); if (name.isEmpty()) { // use the superclass name of anonymous classes name = m.owner.enclClass().getSuperclass().asElement().getSimpleName(); } sb.append(name); } else { if (!m.owner.equals(origin)) { sb.append(m.owner.getSimpleName()).append('.'); } sb.append(m.getSimpleName()); } sb.append( m.getParameters().stream() .map(v -> v.type.accept(PRETTY_TYPE_VISITOR, null)) .collect(joining(", ", "(", ")"))); return sb.toString(); }
@Test public void prettyMethodSignature() throws Exception { FileSystem fileSystem = Jimfs.newFileSystem(Configuration.unix()); Path source = fileSystem.getPath("Test.java"); Files.write( source, ImmutableList.of( "class Test {", // " void f() {", " new Test();", " new Test() {};", " }", "}"), UTF_8); JavacFileManager fileManager = new JavacFileManager(new Context(), false, UTF_8); List<String> signatures = new ArrayList<>(); JavacTask task = JavacTool.create() .getTask( /* out= */ null, fileManager, /* diagnosticListener= */ null, /* options= */ ImmutableList.of(), /* classes= */ ImmutableList.of(), fileManager.getJavaFileObjects(source)); task.addTaskListener( new TaskListener() { @Override public void finished(TaskEvent e) { if (e.getKind() != Kind.ANALYZE) { return; } new TreePathScanner<Void, Void>() { @Override public Void visitNewClass(NewClassTree node, Void unused) { signatures.add( Signatures.prettyMethodSignature( (ClassSymbol) e.getTypeElement(), ASTHelpers.getSymbol(node))); return super.visitNewClass(node, null); } }.scan(e.getCompilationUnit(), null); } }); assertThat(task.call()).isTrue(); assertThat(signatures).containsExactly("Test()", "Test()"); }
static void encodeSubscriptionRemoval( final UnsafeBuffer encodingBuffer, final int offset, final int captureLength, final int length, final String channel, final int streamId, final long id) { int encodedLength = encodeLogHeader(encodingBuffer, offset, captureLength, length); encodingBuffer.putInt(offset + encodedLength, streamId, LITTLE_ENDIAN); encodedLength += SIZE_OF_INT; encodingBuffer.putLong(offset + encodedLength, id, LITTLE_ENDIAN); encodedLength += SIZE_OF_LONG; encodeTrailingString( encodingBuffer, offset + encodedLength, captureLength - SIZE_OF_INT - SIZE_OF_LONG, channel); }
@Test void encodeSubscriptionRemovalShouldTruncateChannelIfItExceedsMaxMessageLength() { final char[] data = new char[MAX_EVENT_LENGTH * 3 + 5]; fill(data, 'a'); final int offset = 0; final int length = SIZE_OF_INT * 2 + SIZE_OF_LONG + data.length; final int captureLength = captureLength(length); final String channel = new String(data); final int streamId = 1; final long id = -1; encodeSubscriptionRemoval(buffer, offset, captureLength, length, channel, streamId, id); assertEquals(captureLength, buffer.getInt(offset, LITTLE_ENDIAN)); assertEquals(length, buffer.getInt(offset + SIZE_OF_INT, LITTLE_ENDIAN)); assertNotEquals(0, buffer.getLong(offset + SIZE_OF_INT * 2, LITTLE_ENDIAN)); assertEquals(streamId, buffer.getInt(offset + LOG_HEADER_LENGTH, LITTLE_ENDIAN)); assertEquals(id, buffer.getLong(offset + LOG_HEADER_LENGTH + SIZE_OF_INT, LITTLE_ENDIAN)); assertEquals(channel.substring(0, captureLength - SIZE_OF_INT * 2 - SIZE_OF_LONG - 3) + "...", buffer.getStringAscii(offset + LOG_HEADER_LENGTH + SIZE_OF_INT + SIZE_OF_LONG, LITTLE_ENDIAN)); }
@Override public Stream<HoodieBaseFile> getAllBaseFiles(String partitionPath) { return execute(partitionPath, preferredView::getAllBaseFiles, (path) -> getSecondaryView().getAllBaseFiles(path)); }
@Test public void testGetAllBaseFiles() { Stream<HoodieBaseFile> actual; Stream<HoodieBaseFile> expected = testBaseFileStream; String partitionPath = "/table2"; when(primary.getAllBaseFiles(partitionPath)).thenReturn(testBaseFileStream); actual = fsView.getAllBaseFiles(partitionPath); assertEquals(expected, actual); verify(secondaryViewSupplier, never()).get(); resetMocks(); when(secondaryViewSupplier.get()).thenReturn(secondary); when(primary.getAllBaseFiles(partitionPath)).thenThrow(new RuntimeException()); when(secondary.getAllBaseFiles(partitionPath)).thenReturn(testBaseFileStream); actual = fsView.getAllBaseFiles(partitionPath); assertEquals(expected, actual); resetMocks(); when(secondary.getAllBaseFiles(partitionPath)).thenReturn(testBaseFileStream); actual = fsView.getAllBaseFiles(partitionPath); assertEquals(expected, actual); resetMocks(); when(secondary.getAllBaseFiles(partitionPath)).thenThrow(new RuntimeException()); assertThrows(RuntimeException.class, () -> { fsView.getAllBaseFiles(partitionPath); }); }
public static Builder builder() { return new Builder(); }
@Test public void testFailures() { assertThatThrownBy(() -> LoadTableResponse.builder().build()) .isInstanceOf(NullPointerException.class) .hasMessage("Invalid metadata: null"); }
public BufferPool get() { WeakReference<BufferPool> ref = threadLocal.get(); if (ref == null) { BufferPool pool = bufferPoolFactory.create(serializationService); ref = new WeakReference<>(pool); strongReferences.put(Thread.currentThread(), pool); threadLocal.set(ref); return pool; } else { BufferPool pool = ref.get(); if (pool == null) { throw notActiveExceptionSupplier.get(); } return pool; } }
@Test public void get_whenSameThread_samePoolInstance() { BufferPool pool1 = bufferPoolThreadLocal.get(); BufferPool pool2 = bufferPoolThreadLocal.get(); assertSame(pool1, pool2); }
@Override public List<DictDataDO> getDictDataList(Integer status, String dictType) { List<DictDataDO> list = dictDataMapper.selectListByStatusAndDictType(status, dictType); list.sort(COMPARATOR_TYPE_AND_SORT); return list; }
@Test public void testGetDictDataList() { // mock 数据 DictDataDO dictDataDO01 = randomDictDataDO().setDictType("yunai").setSort(2) .setStatus(CommonStatusEnum.ENABLE.getStatus()); dictDataMapper.insert(dictDataDO01); DictDataDO dictDataDO02 = randomDictDataDO().setDictType("yunai").setSort(1) .setStatus(CommonStatusEnum.ENABLE.getStatus()); dictDataMapper.insert(dictDataDO02); DictDataDO dictDataDO03 = randomDictDataDO().setDictType("yunai").setSort(3) .setStatus(CommonStatusEnum.DISABLE.getStatus()); dictDataMapper.insert(dictDataDO03); DictDataDO dictDataDO04 = randomDictDataDO().setDictType("yunai2").setSort(3) .setStatus(CommonStatusEnum.DISABLE.getStatus()); dictDataMapper.insert(dictDataDO04); // 准备参数 Integer status = CommonStatusEnum.ENABLE.getStatus(); String dictType = "yunai"; // 调用 List<DictDataDO> dictDataDOList = dictDataService.getDictDataList(status, dictType); // 断言 assertEquals(2, dictDataDOList.size()); assertPojoEquals(dictDataDO02, dictDataDOList.get(0)); assertPojoEquals(dictDataDO01, dictDataDOList.get(1)); }
@Override public MetadataStore create(String metadataURL, MetadataStoreConfig metadataStoreConfig, boolean enableSessionWatcher) throws MetadataStoreException { return new LocalMemoryMetadataStore(metadataURL, metadataStoreConfig); }
@Test public void testNotifyEvent() throws Exception { TestMetadataEventSynchronizer sync = new TestMetadataEventSynchronizer(); @Cleanup MetadataStore store1 = MetadataStoreFactory.create("memory:local", MetadataStoreConfig.builder().synchronizer(sync).build()); String path = "/test"; byte[] value = "value".getBytes(StandardCharsets.UTF_8); store1.put(path, value, Optional.empty()).join(); assertTrue(store1.exists(path).join()); MetadataEvent event = sync.notifiedEvents.get(path); Awaitility.await().atMost(30, TimeUnit.SECONDS).until(() -> event != null); assertNotNull(event); assertEquals(event.getPath(), path); assertEquals(event.getValue(), value); assertEquals(event.getOptions(), EMPTY_SET); assertEquals(event.getType(), NotificationType.Modified); assertEquals(event.getSourceCluster(), sync.clusterName); assertNull(event.getExpectedVersion()); // (2) with expected version long exptectedVersion = 0L; for (; exptectedVersion < 4; exptectedVersion++) { sync.notifiedEvents.remove(path); store1.put(path, value, Optional.of(exptectedVersion)).join(); MetadataEvent event2 = sync.notifiedEvents.get(path); Awaitility.await().atMost(30, TimeUnit.SECONDS).until(() -> event2 != null); assertNotNull(event2); assertEquals(event2.getPath(), path); assertEquals((long) event2.getExpectedVersion(), exptectedVersion); assertEquals(event2.getType(), NotificationType.Modified); } // (3) delete node sync.notifiedEvents.remove(path); store1.delete(path, Optional.of(exptectedVersion)).join(); MetadataEvent event2 = sync.notifiedEvents.get(path); Awaitility.await().atMost(30, TimeUnit.SECONDS).until(() -> event2 != null); assertNotNull(event2); assertEquals(event2.getPath(), path); assertEquals((long) event2.getExpectedVersion(), exptectedVersion); assertEquals(event2.getType(), NotificationType.Deleted); assertEquals(event2.getSourceCluster(), sync.clusterName); assertEquals(event2.getOptions(), EMPTY_SET); }