focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void containerAllocated(S schedulableEntity, RMContainer r) { entityRequiresReordering(schedulableEntity); }
@Test public void testIterators() { OrderingPolicy<MockSchedulableEntity> schedOrder = new FairOrderingPolicy<MockSchedulableEntity>(); MockSchedulableEntity msp1 = new MockSchedulableEntity(); MockSchedulableEntity msp2 = new MockSchedulableEntity(); MockSchedulableEntity msp3 = new MockSchedulableEntity(); msp1.setId("1"); msp2.setId("2"); msp3.setId("3"); msp1.setUsed(Resources.createResource(3)); msp2.setUsed(Resources.createResource(2)); msp3.setUsed(Resources.createResource(1)); AbstractComparatorOrderingPolicy.updateSchedulingResourceUsage( msp1.getSchedulingResourceUsage()); AbstractComparatorOrderingPolicy.updateSchedulingResourceUsage( msp2.getSchedulingResourceUsage()); AbstractComparatorOrderingPolicy.updateSchedulingResourceUsage( msp2.getSchedulingResourceUsage()); schedOrder.addSchedulableEntity(msp1); schedOrder.addSchedulableEntity(msp2); schedOrder.addSchedulableEntity(msp3); //Assignment, least to greatest consumption checkIds(schedOrder.getAssignmentIterator( IteratorSelector.EMPTY_ITERATOR_SELECTOR), new String[]{"3", "2", "1"}); //Preemption, greatest to least checkIds(schedOrder.getPreemptionIterator(), new String[]{"1", "2", "3"}); //Change value without inform, should see no change msp2.setUsed(Resources.createResource(6)); checkIds(schedOrder.getAssignmentIterator( IteratorSelector.EMPTY_ITERATOR_SELECTOR), new String[]{"3", "2", "1"}); checkIds(schedOrder.getPreemptionIterator(), new String[]{"1", "2", "3"}); //Do inform, will reorder schedOrder.containerAllocated(msp2, null); checkIds(schedOrder.getAssignmentIterator( IteratorSelector.EMPTY_ITERATOR_SELECTOR), new String[]{"3", "1", "2"}); checkIds(schedOrder.getPreemptionIterator(), new String[]{"2", "1", "3"}); }
@Override public void logout() { }
@Test public void logout() { mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() { @Override public boolean onTrackEvent(String eventName, JSONObject eventProperties) { Assert.fail(); return false; } }); mSensorsAPI.login("abcde"); mSensorsAPI.logout(); Assert.assertNull(mSensorsAPI.getLoginId()); }
static Schema getSchema(Class<? extends Message> clazz) { return getSchema(ProtobufUtil.getDescriptorForClass(clazz)); }
@Test public void testOptionalPrimitiveSchema() { assertEquals( TestProtoSchemas.OPTIONAL_PRIMITIVE_SCHEMA, ProtoSchemaTranslator.getSchema(Proto2SchemaMessages.OptionalPrimitive.class)); }
public GoPluginBundleDescriptor build(BundleOrPluginFileDetails bundleOrPluginJarFile) { if (!bundleOrPluginJarFile.exists()) { throw new RuntimeException(format("Plugin or bundle jar does not exist: %s", bundleOrPluginJarFile.file())); } String defaultId = bundleOrPluginJarFile.file().getName(); GoPluginBundleDescriptor goPluginBundleDescriptor = new GoPluginBundleDescriptor(GoPluginDescriptor.builder() .version("1") .id(defaultId) .bundleLocation(bundleOrPluginJarFile.extractionLocation()) .pluginJarFileLocation(bundleOrPluginJarFile.file().getAbsolutePath()) .isBundledPlugin(bundleOrPluginJarFile.isBundledPlugin()) .build()); try { if (bundleOrPluginJarFile.isBundleJar()) { return GoPluginBundleDescriptorParser.parseXML(bundleOrPluginJarFile.getBundleXml(), bundleOrPluginJarFile); } if (bundleOrPluginJarFile.isPluginJar()) { return GoPluginDescriptorParser.parseXML(bundleOrPluginJarFile.getPluginXml(), bundleOrPluginJarFile); } goPluginBundleDescriptor.markAsInvalid(List.of(format("Plugin with ID (%s) is not valid. The plugin does not seem to contain plugin.xml or gocd-bundle.xml", defaultId)), new RuntimeException("The plugin does not seem to contain plugin.xml or gocd-bundle.xml")); } catch (Exception e) { log.warn("Unable to load the jar file {}", bundleOrPluginJarFile.file(), e); final String message = requireNonNullElse(e.getMessage(), e.getClass().getCanonicalName()); String cause = e.getCause() != null ? format("%s. Cause: %s", message, e.getCause().getMessage()) : message; goPluginBundleDescriptor.markAsInvalid(List.of(format("Plugin with ID (%s) is not valid: %s", defaultId, cause)), e); } return goPluginBundleDescriptor; }
@Test void shouldCreateInvalidPluginDescriptorEvenIfPluginXMLIsNotFound() throws Exception { String pluginJarName = "descriptor-aware-test-plugin-with-no-plugin-xml.jar"; copyPluginToThePluginDirectory(pluginDirectory, pluginJarName); File pluginJarFile = new File(pluginDirectory, pluginJarName); BundleOrPluginFileDetails bundleOrPluginFileDetails = new BundleOrPluginFileDetails(pluginJarFile, true, pluginDirectory); final GoPluginBundleDescriptor bundleDescriptor = goPluginBundleDescriptorBuilder.build(bundleOrPluginFileDetails); final List<GoPluginDescriptor> descriptors = bundleDescriptor.descriptors(); assertThat(descriptors.size()).isEqualTo(1); assertThat(descriptors.get(0).isInvalid()).isTrue(); assertThat(descriptors.get(0).id()).isEqualTo(pluginJarName); }
public void receiveMessage(ProxyContext ctx, ReceiveMessageRequest request, StreamObserver<ReceiveMessageResponse> responseObserver) { ReceiveMessageResponseStreamWriter writer = createWriter(ctx, responseObserver); try { Settings settings = this.grpcClientSettingsManager.getClientSettings(ctx); Subscription subscription = settings.getSubscription(); boolean fifo = subscription.getFifo(); int maxAttempts = settings.getBackoffPolicy().getMaxAttempts(); ProxyConfig config = ConfigurationManager.getProxyConfig(); Long timeRemaining = ctx.getRemainingMs(); long pollingTime; if (request.hasLongPollingTimeout()) { pollingTime = Durations.toMillis(request.getLongPollingTimeout()); } else { pollingTime = timeRemaining - Durations.toMillis(settings.getRequestTimeout()) / 2; } if (pollingTime < config.getGrpcClientConsumerMinLongPollingTimeoutMillis()) { pollingTime = config.getGrpcClientConsumerMinLongPollingTimeoutMillis(); } if (pollingTime > config.getGrpcClientConsumerMaxLongPollingTimeoutMillis()) { pollingTime = config.getGrpcClientConsumerMaxLongPollingTimeoutMillis(); } if (pollingTime > timeRemaining) { if (timeRemaining >= config.getGrpcClientConsumerMinLongPollingTimeoutMillis()) { pollingTime = timeRemaining; } else { final String clientVersion = ctx.getClientVersion(); Code code = null == clientVersion || ILLEGAL_POLLING_TIME_INTRODUCED_CLIENT_VERSION.compareTo(clientVersion) > 0 ? Code.BAD_REQUEST : Code.ILLEGAL_POLLING_TIME; writer.writeAndComplete(ctx, code, "The deadline time remaining is not enough" + " for polling, please check network condition"); return; } } validateTopicAndConsumerGroup(request.getMessageQueue().getTopic(), request.getGroup()); String topic = request.getMessageQueue().getTopic().getName(); String group = request.getGroup().getName(); long actualInvisibleTime = Durations.toMillis(request.getInvisibleDuration()); ProxyConfig proxyConfig = ConfigurationManager.getProxyConfig(); if (proxyConfig.isEnableProxyAutoRenew() && request.getAutoRenew()) { actualInvisibleTime = proxyConfig.getDefaultInvisibleTimeMills(); } else { validateInvisibleTime(actualInvisibleTime, ConfigurationManager.getProxyConfig().getMinInvisibleTimeMillsForRecv()); } FilterExpression filterExpression = request.getFilterExpression(); SubscriptionData subscriptionData; try { subscriptionData = FilterAPI.build(topic, filterExpression.getExpression(), GrpcConverter.getInstance().buildExpressionType(filterExpression.getType())); } catch (Exception e) { writer.writeAndComplete(ctx, Code.ILLEGAL_FILTER_EXPRESSION, e.getMessage()); return; } this.messagingProcessor.popMessage( ctx, new ReceiveMessageQueueSelector( request.getMessageQueue().getBroker().getName() ), group, topic, request.getBatchSize(), actualInvisibleTime, pollingTime, ConsumeInitMode.MAX, subscriptionData, fifo, new PopMessageResultFilterImpl(maxAttempts), request.hasAttemptId() ? request.getAttemptId() : null, timeRemaining ).thenAccept(popResult -> { if (proxyConfig.isEnableProxyAutoRenew() && request.getAutoRenew()) { if (PopStatus.FOUND.equals(popResult.getPopStatus())) { List<MessageExt> messageExtList = popResult.getMsgFoundList(); for (MessageExt messageExt : messageExtList) { String receiptHandle = messageExt.getProperty(MessageConst.PROPERTY_POP_CK); if (receiptHandle != null) { MessageReceiptHandle messageReceiptHandle = new MessageReceiptHandle(group, topic, messageExt.getQueueId(), receiptHandle, messageExt.getMsgId(), messageExt.getQueueOffset(), messageExt.getReconsumeTimes()); messagingProcessor.addReceiptHandle(ctx, grpcChannelManager.getChannel(ctx.getClientID()), group, messageExt.getMsgId(), messageReceiptHandle); } } } } writer.writeAndComplete(ctx, request, popResult); }) .exceptionally(t -> { writer.writeAndComplete(ctx, request, t); return null; }); } catch (Throwable t) { writer.writeAndComplete(ctx, request, t); } }
@Test public void testReceiveMessageWithIllegalPollingTime() { StreamObserver<ReceiveMessageResponse> receiveStreamObserver = mock(ServerCallStreamObserver.class); ArgumentCaptor<ReceiveMessageResponse> responseArgumentCaptor0 = ArgumentCaptor.forClass(ReceiveMessageResponse.class); doNothing().when(receiveStreamObserver).onNext(responseArgumentCaptor0.capture()); when(this.grpcClientSettingsManager.getClientSettings(any())).thenReturn(Settings.newBuilder().getDefaultInstanceForType()); final ProxyContext context = createContext(); context.setClientVersion("5.0.2"); context.setRemainingMs(-1L); final ReceiveMessageRequest request = ReceiveMessageRequest.newBuilder() .setGroup(Resource.newBuilder().setName(CONSUMER_GROUP).build()) .setMessageQueue(MessageQueue.newBuilder().setTopic(Resource.newBuilder().setName(TOPIC).build()).build()) .setAutoRenew(false) .setLongPollingTimeout(Duration.newBuilder().setSeconds(20).build()) .setFilterExpression(FilterExpression.newBuilder() .setType(FilterType.TAG) .setExpression("*") .build()) .build(); this.receiveMessageActivity.receiveMessage( context, request, receiveStreamObserver ); assertEquals(Code.BAD_REQUEST, getResponseCodeFromReceiveMessageResponseList(responseArgumentCaptor0.getAllValues())); ArgumentCaptor<ReceiveMessageResponse> responseArgumentCaptor1 = ArgumentCaptor.forClass(ReceiveMessageResponse.class); doNothing().when(receiveStreamObserver).onNext(responseArgumentCaptor1.capture()); context.setClientVersion("5.0.3"); this.receiveMessageActivity.receiveMessage( context, request, receiveStreamObserver ); assertEquals(Code.ILLEGAL_POLLING_TIME, getResponseCodeFromReceiveMessageResponseList(responseArgumentCaptor1.getAllValues())); }
@Override @Nullable public String readUTF(@Nonnull String fieldName) throws IOException { return readString(fieldName); }
@Test public void testReadUTF() throws Exception { String aString = reader.readString("string"); assertEquals("test", aString); assertNull(reader.readString("NO SUCH FIELD")); }
@Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement ) { return inject(statement, new TopicProperties.Builder()); }
@Test public void shouldThrowIfRetentionConfigPresentInCreateTableAs() { // Given: givenStatement("CREATE TABLE foo_bar WITH (kafka_topic='doesntexist', partitions=2, format='avro', retention_ms=30000) AS SELECT * FROM SOURCE;"); // When: final Exception e = assertThrows( KsqlException.class, () -> injector.inject(statement, builder) ); // Then: assertThat( e.getMessage(), containsString("Invalid config variable in the WITH clause: RETENTION_MS." + " Non-windowed tables do not support retention.")); }
public void onChange(Multimap<QProfileName, ActiveRuleChange> changedProfiles, long startDate, long endDate) { if (config.getBoolean(DISABLE_NOTIFICATION_ON_BUILT_IN_QPROFILES).orElse(false)) { return; } BuiltInQPChangeNotificationBuilder builder = new BuiltInQPChangeNotificationBuilder(); changedProfiles.keySet().stream() .map(changedProfile -> { String profileName = changedProfile.getName(); Language language = languages.get(changedProfile.getLanguage()); Collection<ActiveRuleChange> activeRuleChanges = changedProfiles.get(changedProfile); int newRules = (int) activeRuleChanges.stream().map(ActiveRuleChange::getType).filter(ACTIVATED::equals).count(); int updatedRules = (int) activeRuleChanges.stream().map(ActiveRuleChange::getType).filter(UPDATED::equals).count(); int removedRules = (int) activeRuleChanges.stream().map(ActiveRuleChange::getType).filter(DEACTIVATED::equals).count(); return Profile.newBuilder() .setProfileName(profileName) .setLanguageKey(language.getKey()) .setLanguageName(language.getName()) .setNewRules(newRules) .setUpdatedRules(updatedRules) .setRemovedRules(removedRules) .setStartDate(startDate) .setEndDate(endDate) .build(); }) .forEach(builder::addProfile); notificationManager.scheduleForSending(builder.build()); }
@Test public void add_profile_to_notification_for_updated_rules() { enableNotificationInGlobalSettings(); Multimap<QProfileName, ActiveRuleChange> profiles = ArrayListMultimap.create(); Languages languages = new Languages(); Tuple expectedTuple = addProfile(profiles, languages, UPDATED); BuiltInQualityProfilesUpdateListener underTest = new BuiltInQualityProfilesUpdateListener(notificationManager, languages, settings.asConfig()); underTest.onChange(profiles, 0, 1); ArgumentCaptor<Notification> notificationArgumentCaptor = ArgumentCaptor.forClass(Notification.class); verify(notificationManager).scheduleForSending(notificationArgumentCaptor.capture()); verifyNoMoreInteractions(notificationManager); assertThat(BuiltInQPChangeNotificationBuilder.parse(notificationArgumentCaptor.getValue()).getProfiles()) .extracting(Profile::getProfileName, Profile::getLanguageKey, Profile::getLanguageName, Profile::getUpdatedRules) .containsExactlyInAnyOrder(expectedTuple); }
public @CheckForNull V start() throws Exception { V result = null; int currentAttempt = 0; boolean success = false; while (currentAttempt < attempts && !success) { currentAttempt++; try { if (LOGGER.isLoggable(Level.INFO)) { LOGGER.log(Level.INFO, Messages.Retrier_Attempt(currentAttempt, action)); } result = callable.call(); } catch (Exception e) { if (duringActionExceptions == null || Stream.of(duringActionExceptions).noneMatch(exception -> exception.isAssignableFrom(e.getClass()))) { // if the raised exception is not considered as a controlled exception doing the action, rethrow it LOGGER.log(Level.WARNING, Messages.Retrier_ExceptionThrown(currentAttempt, action), e); throw e; } else { // if the exception is considered as a failed action, notify it to the listener LOGGER.log(Level.INFO, Messages.Retrier_ExceptionFailed(currentAttempt, action), e); if (duringActionExceptionListener != null) { LOGGER.log(Level.INFO, Messages.Retrier_CallingListener(e.getLocalizedMessage(), currentAttempt, action)); result = duringActionExceptionListener.apply(currentAttempt, e); } } } // After the call and the call to the listener, which can change the result, test the result success = checkResult.test(currentAttempt, result); if (!success) { if (currentAttempt < attempts) { LOGGER.log(Level.WARNING, Messages.Retrier_AttemptFailed(currentAttempt, action)); LOGGER.log(Level.FINE, Messages.Retrier_Sleeping(delay, action)); try { Thread.sleep(delay); } catch (InterruptedException ie) { LOGGER.log(Level.FINE, Messages.Retrier_Interruption(action)); Thread.currentThread().interrupt(); // flag this thread as interrupted currentAttempt = attempts; // finish } } else { // Failed to perform the action LOGGER.log(Level.INFO, Messages.Retrier_NoSuccess(action, attempts)); } } else { LOGGER.log(Level.INFO, Messages.Retrier_Success(action, currentAttempt)); } } return result; }
@Test public void performedAtThirdAttemptTest() throws Exception { final int SUCCESSFUL_ATTEMPT = 3; final String ACTION = "print"; RingBufferLogHandler handler = new RingBufferLogHandler(20); Logger.getLogger(Retrier.class.getName()).addHandler(handler); // Set the required params Retrier<Boolean> r = new Retrier.Builder<>( // action to perform () -> { LOG.info("action performed"); return true; }, // check the result and return true if success (currentAttempt, result) -> currentAttempt == SUCCESSFUL_ATTEMPT, //name of the action ACTION ) // Set the optional parameters .withAttempts(SUCCESSFUL_ATTEMPT + 1) .withDelay(100) // Construct the object .build(); // Begin the process Boolean finalResult = r.start(); Assert.assertTrue(finalResult != null && finalResult); String text = Messages.Retrier_Success(ACTION, SUCCESSFUL_ATTEMPT); assertTrue(String.format("The log should contain '%s'", text), handler.getView().stream().anyMatch(m -> m.getMessage().contains(text))); }
@Override public Header getHeaders() { if (this.responseHeader == null) { this.responseHeader = Header.newInstance(); org.apache.http.Header[] allHeaders = response.getAllHeaders(); for (org.apache.http.Header header : allHeaders) { this.responseHeader.addParam(header.getName(), header.getValue()); } } return this.responseHeader; }
@Test void testGetHeaders() { assertEquals(3, clientHttpResponse.getHeaders().getHeader().size()); assertEquals("testValue", clientHttpResponse.getHeaders().getValue("testName")); }
@Override public Cursor<Tuple> zScan(byte[] key, ScanOptions options) { return new KeyBoundCursor<Tuple>(key, 0, options) { private RedisClient client; @Override protected ScanIteration<Tuple> doScan(byte[] key, long cursorId, ScanOptions options) { if (isQueueing() || isPipelined()) { throw new UnsupportedOperationException("'ZSCAN' cannot be called in pipeline / transaction mode."); } List<Object> args = new ArrayList<Object>(); args.add(key); args.add(Long.toUnsignedString(cursorId)); if (options.getPattern() != null) { args.add("MATCH"); args.add(options.getPattern()); } if (options.getCount() != null) { args.add("COUNT"); args.add(options.getCount()); } RFuture<ListScanResult<Tuple>> f = executorService.readAsync(client, key, ByteArrayCodec.INSTANCE, ZSCAN, args.toArray()); ListScanResult<Tuple> res = syncFuture(f); client = res.getRedisClient(); return new ScanIteration<Tuple>(Long.parseUnsignedLong(res.getPos()), res.getValues()); } }.open(); }
@Test public void testZScan() { connection.zAdd("key".getBytes(), 1, "value1".getBytes()); connection.zAdd("key".getBytes(), 2, "value2".getBytes()); Cursor<Tuple> t = connection.zScan("key".getBytes(), ScanOptions.scanOptions().build()); assertThat(t.hasNext()).isTrue(); assertThat(t.next().getValue()).isEqualTo("value1".getBytes()); assertThat(t.hasNext()).isTrue(); assertThat(t.next().getValue()).isEqualTo("value2".getBytes()); }
@Override public Column convert(BasicTypeDefine typeDefine) { PhysicalColumn.PhysicalColumnBuilder builder = PhysicalColumn.builder() .name(typeDefine.getName()) .sourceType(typeDefine.getColumnType()) .nullable(typeDefine.isNullable()) .defaultValue(typeDefine.getDefaultValue()) .comment(typeDefine.getComment()); String oracleType = typeDefine.getDataType().toUpperCase(); switch (oracleType) { case ORACLE_INTEGER: builder.dataType(new DecimalType(DEFAULT_PRECISION, 0)); builder.columnLength((long) DEFAULT_PRECISION); break; case ORACLE_NUMBER: Long precision = typeDefine.getPrecision(); if (precision == null || precision == 0 || precision > DEFAULT_PRECISION) { precision = Long.valueOf(DEFAULT_PRECISION); } Integer scale = typeDefine.getScale(); if (scale == null) { scale = 127; } if (scale <= 0) { int newPrecision = (int) (precision - scale); if (newPrecision == 1) { builder.dataType(BasicType.BOOLEAN_TYPE); } else if (newPrecision <= 9) { builder.dataType(BasicType.INT_TYPE); } else if (newPrecision <= 18) { builder.dataType(BasicType.LONG_TYPE); } else if (newPrecision < 38) { builder.dataType(new DecimalType(newPrecision, 0)); builder.columnLength((long) newPrecision); } else { builder.dataType(new DecimalType(DEFAULT_PRECISION, 0)); builder.columnLength((long) DEFAULT_PRECISION); } } else if (scale <= DEFAULT_SCALE) { builder.dataType(new DecimalType(precision.intValue(), scale)); builder.columnLength(precision); builder.scale(scale); } else { builder.dataType(new DecimalType(precision.intValue(), DEFAULT_SCALE)); builder.columnLength(precision); builder.scale(DEFAULT_SCALE); } break; case ORACLE_FLOAT: // The float type will be converted to DecimalType(10, -127), // which will lose precision in the spark engine DecimalType floatDecimal = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE); builder.dataType(floatDecimal); builder.columnLength((long) floatDecimal.getPrecision()); builder.scale(floatDecimal.getScale()); break; case ORACLE_BINARY_FLOAT: case ORACLE_REAL: builder.dataType(BasicType.FLOAT_TYPE); break; case ORACLE_BINARY_DOUBLE: builder.dataType(BasicType.DOUBLE_TYPE); break; case ORACLE_CHAR: case ORACLE_VARCHAR: case ORACLE_VARCHAR2: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(typeDefine.getLength()); break; case ORACLE_NCHAR: case ORACLE_NVARCHAR2: builder.dataType(BasicType.STRING_TYPE); builder.columnLength( TypeDefineUtils.doubleByteTo4ByteLength(typeDefine.getLength())); break; case ORACLE_ROWID: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(MAX_ROWID_LENGTH); break; case ORACLE_XML: case ORACLE_SYS_XML: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(typeDefine.getLength()); break; case ORACLE_LONG: builder.dataType(BasicType.STRING_TYPE); // The maximum length of the column is 2GB-1 builder.columnLength(BYTES_2GB - 1); break; case ORACLE_CLOB: case ORACLE_NCLOB: builder.dataType(BasicType.STRING_TYPE); // The maximum length of the column is 4GB-1 builder.columnLength(BYTES_4GB - 1); break; case ORACLE_BLOB: builder.dataType(PrimitiveByteArrayType.INSTANCE); // The maximum length of the column is 4GB-1 builder.columnLength(BYTES_4GB - 1); break; case ORACLE_RAW: builder.dataType(PrimitiveByteArrayType.INSTANCE); if (typeDefine.getLength() == null || typeDefine.getLength() == 0) { builder.columnLength(MAX_RAW_LENGTH); } else { builder.columnLength(typeDefine.getLength()); } break; case ORACLE_LONG_RAW: builder.dataType(PrimitiveByteArrayType.INSTANCE); // The maximum length of the column is 2GB-1 builder.columnLength(BYTES_2GB - 1); break; case ORACLE_DATE: builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); break; case ORACLE_TIMESTAMP: case ORACLE_TIMESTAMP_WITH_TIME_ZONE: case ORACLE_TIMESTAMP_WITH_LOCAL_TIME_ZONE: builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); if (typeDefine.getScale() == null) { builder.scale(TIMESTAMP_DEFAULT_SCALE); } else { builder.scale(typeDefine.getScale()); } break; default: throw CommonError.convertToSeaTunnelTypeError( DatabaseIdentifier.ORACLE, oracleType, typeDefine.getName()); } return builder.build(); }
@Test public void testNumberWithNegativeScale() { BasicTypeDefine<Object> typeDefine = BasicTypeDefine.builder() .name("test") .columnType("number(38,-1)") .dataType("number") .precision(38L) .scale(-1) .build(); Column column = OracleTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(new DecimalType(38, 0), column.getDataType()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); typeDefine = BasicTypeDefine.builder() .name("test") .columnType("number(5,-2)") .dataType("number") .precision(5L) .scale(-2) .build(); column = OracleTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(BasicType.INT_TYPE, column.getDataType()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); typeDefine = BasicTypeDefine.builder() .name("test") .columnType("number(9,-2)") .dataType("number") .precision(9L) .scale(-2) .build(); column = OracleTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(BasicType.LONG_TYPE, column.getDataType()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); typeDefine = BasicTypeDefine.builder() .name("test") .columnType("number(14,-11)") .dataType("number") .precision(14L) .scale(-11) .build(); column = OracleTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(new DecimalType(25, 0), column.getDataType()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); }
public static <T extends SpecificRecordBase> T dataMapToSpecificRecord(DataMap map, RecordDataSchema dataSchema, Schema avroSchema) throws DataTranslationException { DataMapToSpecificRecordTranslator translator = new DataMapToSpecificRecordTranslator(); try { T avroRecord = translator.translate(map, dataSchema, avroSchema); translator.checkMessageListForErrorsAndThrowDataTranslationException(); return avroRecord; } catch (RuntimeException e) { throw translator.dataTranslationException(e); } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) { throw translator.dataTranslationException(new RuntimeException(e)); } }
@Test public void testDataMapToSpecificRecordTranslatorInnerRecord() throws IOException { RecordDataSchema recordDataSchema = (RecordDataSchema) TestUtil.dataSchemaFromString(TestEventRecordOfRecord.TEST_SCHEMA.toString()); RecordDataSchema innerRecordDataSchema = (RecordDataSchema) TestUtil.dataSchemaFromString(TestEventWithUnionAndEnum.TEST_SCHEMA.toString()); Schema avroSchema = TestEventRecordOfRecord.TEST_SCHEMA; Schema innerAvroSchema = TestEventWithUnionAndEnum.TEST_SCHEMA; DataMap innerMap2 = new DataMap(); innerMap2.put("fieldName", "field"); innerMap2.put("eventData", new DataMap(ImmutableMap.of("long", 1L))); innerMap2.put("enumData", EnumData.APPROVED.toString()); TestEventRecordOfRecord testEventRecordOfRecord = DataTranslator.dataMapToSpecificRecord(new DataMap(ImmutableMap.of("innerField", innerMap2, "stringArray", new DataList(Arrays.asList("val1")))), recordDataSchema, avroSchema); TestEventWithUnionAndEnum innerEvent = DataTranslator.dataMapToSpecificRecord(innerMap2, innerRecordDataSchema, innerAvroSchema); Assert.assertEquals(testEventRecordOfRecord.get(0), innerEvent); Assert.assertEquals(testEventRecordOfRecord.get(1), Arrays.asList("[val1]")); }
static int linuxMinorVersion0(String version, boolean isLinux) { if (!isLinux) { return -1; } String[] versionTokens = version.split("\\."); try { return Integer.parseInt(versionTokens[1]); } catch (NumberFormatException e) { return -1; } }
@Test public void test_linuxMinorVersion0_whenNotIsLinux() { assertEquals(-1, OS.linuxMinorVersion0("5.16.12-200.fc35.x86_64", false)); }
@Override public Object read(final MySQLPacketPayload payload, final boolean unsigned) throws SQLException { int length = payload.readInt1(); switch (length) { case 0: throw new SQLFeatureNotSupportedException("Can not support date format if year, month, day is absent."); case 4: return getTimestampForDate(payload); case 7: return getTimestampForDatetime(payload); case 11: Timestamp result = getTimestampForDatetime(payload); result.setNanos(payload.readInt4() * 1000); return result; default: throw new SQLFeatureNotSupportedException(String.format("Wrong length `%d` of MYSQL_TYPE_TIME", length)); } }
@Test void assertReadWithSevenBytes() throws SQLException { when(payload.readInt1()).thenReturn(7, 12, 31, 10, 59, 0); when(payload.readInt2()).thenReturn(2018); LocalDateTime actual = LocalDateTime.ofInstant(Instant.ofEpochMilli(((Timestamp) new MySQLDateBinaryProtocolValue().read(payload, false)).getTime()), ZoneId.systemDefault()); assertThat(actual.getYear(), is(2018)); assertThat(actual.getMonthValue(), is(12)); assertThat(actual.getDayOfMonth(), is(31)); assertThat(actual.getHour(), is(10)); assertThat(actual.getMinute(), is(59)); assertThat(actual.getSecond(), is(0)); }
@Override public void setConfiguration(final Path file, final LoggingConfiguration configuration) throws BackgroundException { // Logging target bucket final Path bucket = containerService.getContainer(file); try { final S3BucketLoggingStatus status = new S3BucketLoggingStatus( StringUtils.isNotBlank(configuration.getLoggingTarget()) ? configuration.getLoggingTarget() : bucket.isRoot() ? RequestEntityRestStorageService.findBucketInHostname(session.getHost()) : bucket.getName(), null); if(configuration.isEnabled()) { status.setLogfilePrefix(new HostPreferences(session.getHost()).getProperty("s3.logging.prefix")); } session.getClient().setBucketLoggingStatus(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), status, true); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Failure to write attributes of {0}", e, file); } }
@Test(expected = NotfoundException.class) public void testWriteNotFound() throws Exception { new S3LoggingFeature(session).setConfiguration( new Path(UUID.randomUUID().toString(), EnumSet.of(Path.Type.directory)), new LoggingConfiguration(false) ); }
public static String formatToSimpleDate(Date date) { SimpleDateFormat simpleDate = new SimpleDateFormat("dd MMM yyyy"); return simpleDate.format(date); }
@Test public void shouldFormatDateToDisplayOnUI() { Calendar instance = Calendar.getInstance(); instance.set(2009, Calendar.NOVEMBER, 5); Date date = instance.getTime(); String formattedDate = DateUtils.formatToSimpleDate(date); assertThat(formattedDate, is("05 Nov 2009")); }
@Override public Optional<ReadError> read(DbFileSources.Line.Builder lineBuilder) { Predicate<Map.Entry<TextBlock, Integer>> containsLine = new TextBlockContainsLine(lineBuilder.getLine()); // list is sorted to cope with the non-guaranteed order of Map entries which would trigger false detection of changes // in {@link DbFileSources.Line#getDuplicationList()} duplicatedTextBlockIndexByTextBlock.entrySet().stream() .filter(containsLine) .map(Map.Entry::getValue) .sorted(Comparator.naturalOrder()) .forEach(lineBuilder::addDuplication); return Optional.empty(); }
@Test public void read_nothing() { DuplicationLineReader reader = new DuplicationLineReader(Collections.emptySet()); assertThat(reader.read(line1)).isEmpty(); assertThat(line1.getDuplicationList()).isEmpty(); }
public ConvertedTime getConvertedTime(long duration) { Set<Seconds> keys = RULES.keySet(); for (Seconds seconds : keys) { if (duration <= seconds.getSeconds()) { return RULES.get(seconds).getConvertedTime(duration); } } return new TimeConverter.OverTwoYears().getConvertedTime(duration); }
@Test public void testShouldReportAbout1MonthFor29Days23Hours59Minutes30Seconds() throws Exception { assertEquals(TimeConverter.ABOUT_1_MONTH_AGO, timeConverter.getConvertedTime(29 * TimeConverter.DAY_IN_SECONDS + 23 * 60 * 60 + 59 * 60 + 30)); }
public static boolean containsSkinTone( @NonNull CharSequence text, @NonNull JavaEmojiUtils.SkinTone skinTone) { return JavaEmojiUtils.containsSkinTone(text, skinTone); }
@Test public void testContainsSkinTone() { Assert.assertFalse( EmojiUtils.containsSkinTone("\uD83D\uDC4D", JavaEmojiUtils.SkinTone.Fitzpatrick_2)); Assert.assertTrue( EmojiUtils.containsSkinTone( "\uD83D\uDC4D\uD83C\uDFFB", JavaEmojiUtils.SkinTone.Fitzpatrick_2)); Assert.assertFalse( EmojiUtils.containsSkinTone( "\uD83D\uDC4D\uD83C\uDFFC", JavaEmojiUtils.SkinTone.Fitzpatrick_2)); Assert.assertFalse( EmojiUtils.containsSkinTone("\uD83D\uDC4D\uD83C", JavaEmojiUtils.SkinTone.Fitzpatrick_2)); Assert.assertFalse( EmojiUtils.containsSkinTone("\uD83D", JavaEmojiUtils.SkinTone.Fitzpatrick_2)); Assert.assertFalse( EmojiUtils.containsSkinTone("\uDFFB", JavaEmojiUtils.SkinTone.Fitzpatrick_2)); Assert.assertFalse(EmojiUtils.containsSkinTone("", JavaEmojiUtils.SkinTone.Fitzpatrick_2)); }
@SuppressWarnings({"unchecked", "rawtypes"}) @Override public @Nullable <InputT> TransformEvaluator<InputT> forApplication( AppliedPTransform<?, ?, ?> application, CommittedBundle<?> inputBundle) throws IOException { return createEvaluator((AppliedPTransform) application); }
@Test public void boundedSourceInMemoryTransformEvaluatorShardsOfSource() throws Exception { PipelineOptions options = PipelineOptionsFactory.create(); List<? extends BoundedSource<Long>> splits = source.split(source.getEstimatedSizeBytes(options) / 2, options); UncommittedBundle<BoundedSourceShard<Long>> rootBundle = bundleFactory.createRootBundle(); for (BoundedSource<Long> split : splits) { BoundedSourceShard<Long> shard = BoundedSourceShard.of(split); rootBundle.add(WindowedValue.valueInGlobalWindow(shard)); } CommittedBundle<BoundedSourceShard<Long>> shards = rootBundle.commit(Instant.now()); TransformEvaluator<BoundedSourceShard<Long>> evaluator = factory.forApplication(longsProducer, shards); for (WindowedValue<BoundedSourceShard<Long>> shard : shards.getElements()) { UncommittedBundle<Long> outputBundle = bundleFactory.createBundle(longs); when(context.createBundle(longs)).thenReturn(outputBundle); evaluator.processElement(shard); } TransformResult<?> result = evaluator.finishBundle(); assertThat(Iterables.size(result.getOutputBundles()), equalTo(splits.size())); List<WindowedValue<?>> outputElems = new ArrayList<>(); for (UncommittedBundle<?> outputBundle : result.getOutputBundles()) { CommittedBundle<?> outputs = outputBundle.commit(Instant.now()); for (WindowedValue<?> outputElem : outputs.getElements()) { outputElems.add(outputElem); } } assertThat( outputElems, containsInAnyOrder( gw(1L), gw(2L), gw(4L), gw(8L), gw(9L), gw(7L), gw(6L), gw(5L), gw(3L), gw(0L))); }
public void incQueuePutNums(final String topic, final Integer queueId) { if (enableQueueStat) { this.statsTable.get(Stats.QUEUE_PUT_NUMS).addValue(buildStatsKey(topic, queueId), 1, 1); } }
@Test public void testIncQueuePutNums() { brokerStatsManager.incQueuePutNums(TOPIC, QUEUE_ID); String statsKey = brokerStatsManager.buildStatsKey(TOPIC, String.valueOf(QUEUE_ID)); assertThat(brokerStatsManager.getStatsItem(QUEUE_PUT_NUMS, statsKey).getTimes().doubleValue()).isEqualTo(1L); brokerStatsManager.incQueuePutNums(TOPIC, QUEUE_ID, 2, 2); assertThat(brokerStatsManager.getStatsItem(QUEUE_PUT_NUMS, statsKey).getValue().doubleValue()).isEqualTo(3L); }
@Override public int getOrder() { return PluginEnum.SENTINEL.getCode(); }
@Test public void testGetOrder() { final int result = sentinelPlugin.getOrder(); assertEquals(PluginEnum.SENTINEL.getCode(), result); }
@SuppressWarnings("WeakerAccess") public Map<String, Object> getMainConsumerConfigs(final String groupId, final String clientId, final int threadIdx) { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); // Get main consumer override configs final Map<String, Object> mainConsumerProps = originalsWithPrefix(MAIN_CONSUMER_PREFIX); consumerProps.putAll(mainConsumerProps); // this is a hack to work around StreamsConfig constructor inside StreamsPartitionAssignor to avoid casting consumerProps.put(APPLICATION_ID_CONFIG, groupId); // add group id, client id with stream client id prefix, and group instance id consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId); final String groupInstanceId = (String) consumerProps.get(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG); // Suffix each thread consumer with thread.id to enforce uniqueness of group.instance.id. if (groupInstanceId != null) { consumerProps.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, groupInstanceId + "-" + threadIdx); } // add configs required for stream partition assignor consumerProps.put(UPGRADE_FROM_CONFIG, getString(UPGRADE_FROM_CONFIG)); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ACCEPTABLE_RECOVERY_LAG_CONFIG, getLong(ACCEPTABLE_RECOVERY_LAG_CONFIG)); consumerProps.put(MAX_WARMUP_REPLICAS_CONFIG, getInt(MAX_WARMUP_REPLICAS_CONFIG)); consumerProps.put(PROBING_REBALANCE_INTERVAL_MS_CONFIG, getLong(PROBING_REBALANCE_INTERVAL_MS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamsPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG, getInt(RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG)); consumerProps.put(RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG, getString(RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG)); consumerProps.put(RACK_AWARE_ASSIGNMENT_TAGS_CONFIG, getList(RACK_AWARE_ASSIGNMENT_TAGS_CONFIG)); consumerProps.put(RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG, getInt(RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG)); consumerProps.put(TASK_ASSIGNOR_CLASS_CONFIG, getString(TASK_ASSIGNOR_CLASS_CONFIG)); // disable auto topic creation consumerProps.put(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, "false"); // verify that producer batch config is no larger than segment size, then add topic configs required for creating topics final Map<String, Object> topicProps = originalsWithPrefix(TOPIC_PREFIX, false); final Map<String, Object> producerProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (topicProps.containsKey(topicPrefix(TopicConfig.SEGMENT_BYTES_CONFIG)) && producerProps.containsKey(ProducerConfig.BATCH_SIZE_CONFIG)) { final int segmentSize = Integer.parseInt(topicProps.get(topicPrefix(TopicConfig.SEGMENT_BYTES_CONFIG)).toString()); final int batchSize = Integer.parseInt(producerProps.get(ProducerConfig.BATCH_SIZE_CONFIG).toString()); if (segmentSize < batchSize) { throw new IllegalArgumentException(String.format("Specified topic segment size %d is is smaller than the configured producer batch size %d, this will cause produced batch not able to be appended to the topic", segmentSize, batchSize)); } } consumerProps.putAll(topicProps); return consumerProps; }
@Test public void shouldNotSetInternalThrowOnFetchStableOffsetUnsupportedConfigToFalseInConsumerForEosDisabled() { final Map<String, Object> consumerConfigs = streamsConfig.getMainConsumerConfigs(groupId, clientId, threadIdx); assertThat(consumerConfigs.get("internal.throw.on.fetch.stable.offset.unsupported"), is(nullValue())); }
@Override public ObjectNode encode(MappingInstruction instruction, CodecContext context) { checkNotNull(instruction, "Mapping instruction cannot be null"); return new EncodeMappingInstructionCodecHelper(instruction, context).encode(); }
@Test public void multicastWeightInstructionTest() { final MulticastMappingInstruction.WeightMappingInstruction instruction = (MulticastMappingInstruction.WeightMappingInstruction) MappingInstructions.multicastWeight(MULTICAST_WEIGHT); final ObjectNode instructionJson = instructionCodec.encode(instruction, context); assertThat(instructionJson, matchesInstruction(instruction)); }
public static FailoverStrategy.Factory loadFailoverStrategyFactory(final Configuration config) { checkNotNull(config); final String strategyParam = config.get(JobManagerOptions.EXECUTION_FAILOVER_STRATEGY); switch (strategyParam.toLowerCase()) { case FULL_RESTART_STRATEGY_NAME: return new RestartAllFailoverStrategy.Factory(); case PIPELINED_REGION_RESTART_STRATEGY_NAME: return new RestartPipelinedRegionFailoverStrategy.Factory(); default: throw new IllegalConfigurationException( "Unknown failover strategy: " + strategyParam); } }
@Test void testLoadRestartPipelinedRegionStrategyFactory() { final Configuration config = new Configuration(); config.set( JobManagerOptions.EXECUTION_FAILOVER_STRATEGY, FailoverStrategyFactoryLoader.PIPELINED_REGION_RESTART_STRATEGY_NAME); assertThat(FailoverStrategyFactoryLoader.loadFailoverStrategyFactory(config)) .isInstanceOf(RestartPipelinedRegionFailoverStrategy.Factory.class); }
public String getBaseUrl() { String url = config.get(SERVER_BASE_URL).orElse(""); if (isEmpty(url)) { url = computeBaseUrl(); } // Remove trailing slashes return StringUtils.removeEnd(url, "/"); }
@Test public void base_url_is_http_specified_host_9000_when_host_is_set() { settings.setProperty(HOST_PROPERTY, "my_host"); assertThat(underTest().getBaseUrl()).isEqualTo("http://my_host:9000"); }
public static Sessions withGapDuration(Duration gapDuration) { return new Sessions(gapDuration); }
@Test public void testDisplayData() { Duration gapDuration = Duration.standardMinutes(234); Sessions session = Sessions.withGapDuration(gapDuration); assertThat(DisplayData.from(session), hasDisplayItem("gapDuration", gapDuration)); }
@Override public boolean isReusable() { return true; }
@Test public void testIsReusable() { assertThat(parser.isReusable(), CoreMatchers.is(true)); }
public CompletableFuture<Triple<MessageExt, String, Boolean>> getMessageAsync(String topic, long offset, int queueId, String brokerName, boolean deCompressBody) { MessageStore messageStore = brokerController.getMessageStoreByBrokerName(brokerName); if (messageStore != null) { return messageStore.getMessageAsync(innerConsumerGroupName, topic, queueId, offset, 1, null) .thenApply(result -> { if (result == null) { LOG.warn("getMessageResult is null , innerConsumerGroupName {}, topic {}, offset {}, queueId {}", innerConsumerGroupName, topic, offset, queueId); return Triple.of(null, "getMessageResult is null", false); // local store, so no retry } List<MessageExt> list = decodeMsgList(result, deCompressBody); if (list == null || list.isEmpty()) { // OFFSET_FOUND_NULL returned by TieredMessageStore indicates exception occurred boolean needRetry = GetMessageStatus.OFFSET_FOUND_NULL.equals(result.getStatus()) && messageStore instanceof TieredMessageStore; LOG.warn("Can not get msg , topic {}, offset {}, queueId {}, needRetry {}, result is {}", topic, offset, queueId, needRetry, result); return Triple.of(null, "Can not get msg", needRetry); } return Triple.of(list.get(0), "", false); }); } else { return getMessageFromRemoteAsync(topic, offset, queueId, brokerName); } }
@Test public void getMessageAsyncTest_localStore_decodeNothing_DefaultMessageStore() throws Exception { when(brokerController.getMessageStoreByBrokerName(any())).thenReturn(defaultMessageStore); for (GetMessageStatus status : GetMessageStatus.values()) { GetMessageResult getMessageResult = mockGetMessageResult(0, TEST_TOPIC, null); getMessageResult.setStatus(status); when(defaultMessageStore.getMessageAsync(anyString(), anyString(), anyInt(), anyLong(), anyInt(), any())) .thenReturn(CompletableFuture.completedFuture(getMessageResult)); Triple<MessageExt, String, Boolean> rst = escapeBridge.getMessageAsync(TEST_TOPIC, 0, DEFAULT_QUEUE_ID, BROKER_NAME, false).join(); Assert.assertNull(rst.getLeft()); Assert.assertEquals("Can not get msg", rst.getMiddle()); Assert.assertFalse(rst.getRight()); // DefaultMessageStore, no retry } }
@Override public Status check() { Runtime runtime = Runtime.getRuntime(); long freeMemory = runtime.freeMemory(); long totalMemory = runtime.totalMemory(); long maxMemory = runtime.maxMemory(); boolean ok = (maxMemory - (totalMemory - freeMemory) > 2 * 1024 * 1024); // Alarm when spare memory < 2M String msg = "max:" + (maxMemory / 1024 / 1024) + "M,total:" + (totalMemory / 1024 / 1024) + "M,used:" + ((totalMemory / 1024 / 1024) - (freeMemory / 1024 / 1024)) + "M,free:" + (freeMemory / 1024 / 1024) + "M"; return new Status(ok ? Status.Level.OK : Status.Level.WARN, msg); }
@Test void test() { MemoryStatusChecker statusChecker = new MemoryStatusChecker(); Status status = statusChecker.check(); assertThat(status.getLevel(), anyOf(is(OK), is(WARN))); logger.info("memory status level: " + status.getLevel()); logger.info("memory status message: " + status.getMessage()); }
@Override public T deserialize(final String topic, final byte[] bytes) { try { if (bytes == null) { return null; } // don't use the JsonSchemaConverter to read this data because // we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS, // which is not currently available in the standard converters final JsonNode value = isJsonSchema ? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class) : MAPPER.readTree(bytes); final Object coerced = enforceFieldType( "$", new JsonValueContext(value, schema) ); if (LOG.isTraceEnabled()) { LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced); } return SerdeUtils.castToTargetType(coerced, targetType); } catch (final Exception e) { // Clear location in order to avoid logging data, for security reasons if (e instanceof JsonParseException) { ((JsonParseException) e).clearLocation(); } throw new SerializationException( "Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e); } }
@Test public void shouldThrowIfCanNotCoerceToTimestamp() { // Given: final KsqlJsonDeserializer<java.sql.Timestamp> deserializer = givenDeserializerForSchema(Timestamp.SCHEMA, java.sql.Timestamp.class); final byte[] bytes = serializeJson(BooleanNode.valueOf(true)); // When: final Exception e = assertThrows( SerializationException.class, () -> deserializer.deserialize(SOME_TOPIC, bytes) ); // Then: assertThat(e.getCause(), (hasMessage(startsWith( "Can't convert type. sourceType: BooleanNode, requiredType: TIMESTAMP")))); }
@SuppressWarnings("unchecked") @Override public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) throws YarnException, IOException { NodeStatus remoteNodeStatus = request.getNodeStatus(); /** * Here is the node heartbeat sequence... * 1. Check if it's a valid (i.e. not excluded) node * 2. Check if it's a registered node * 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat * 4. Send healthStatus to RMNode * 5. Update node's labels if distributed Node Labels configuration is enabled */ NodeId nodeId = remoteNodeStatus.getNodeId(); // 1. Check if it's a valid (i.e. not excluded) node, if not, see if it is // in decommissioning. if (!this.nodesListManager.isValidNode(nodeId.getHost()) && !isNodeInDecommissioning(nodeId)) { String message = "Disallowed NodeManager nodeId: " + nodeId + " hostname: " + nodeId.getHost(); LOG.info(message); return YarnServerBuilderUtils.newNodeHeartbeatResponse( NodeAction.SHUTDOWN, message); } // 2. Check if it's a registered node RMNode rmNode = this.rmContext.getRMNodes().get(nodeId); if (rmNode == null) { /* node does not exist */ String message = "Node not found resyncing " + remoteNodeStatus.getNodeId(); LOG.info(message); return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC, message); } // Send ping this.nmLivelinessMonitor.receivedPing(nodeId); this.decommissioningWatcher.update(rmNode, remoteNodeStatus); // 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat NodeHeartbeatResponse lastNodeHeartbeatResponse = rmNode.getLastNodeHeartBeatResponse(); if (getNextResponseId( remoteNodeStatus.getResponseId()) == lastNodeHeartbeatResponse .getResponseId()) { LOG.info("Received duplicate heartbeat from node " + rmNode.getNodeAddress()+ " responseId=" + remoteNodeStatus.getResponseId()); return lastNodeHeartbeatResponse; } else if (remoteNodeStatus.getResponseId() != lastNodeHeartbeatResponse .getResponseId()) { String message = "Too far behind rm response id:" + lastNodeHeartbeatResponse.getResponseId() + " nm response id:" + remoteNodeStatus.getResponseId(); LOG.info(message); // TODO: Just sending reboot is not enough. Think more. this.rmContext.getDispatcher().getEventHandler().handle( new RMNodeEvent(nodeId, RMNodeEventType.REBOOTING)); return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC, message); } // Evaluate whether a DECOMMISSIONING node is ready to be DECOMMISSIONED. if (rmNode.getState() == NodeState.DECOMMISSIONING && decommissioningWatcher.checkReadyToBeDecommissioned( rmNode.getNodeID())) { String message = "DECOMMISSIONING " + nodeId + " is ready to be decommissioned"; LOG.info(message); this.rmContext.getDispatcher().getEventHandler().handle( new RMNodeEvent(nodeId, RMNodeEventType.DECOMMISSION)); this.nmLivelinessMonitor.unregister(nodeId); return YarnServerBuilderUtils.newNodeHeartbeatResponse( NodeAction.SHUTDOWN, message); } if (timelineServiceV2Enabled) { // Check & update collectors info from request. updateAppCollectorsMap(request); } // Heartbeat response long newInterval = nextHeartBeatInterval; if (heartBeatIntervalScalingEnable) { newInterval = rmNode.calculateHeartBeatInterval( nextHeartBeatInterval, heartBeatIntervalMin, heartBeatIntervalMax, heartBeatIntervalSpeedupFactor, heartBeatIntervalSlowdownFactor); } NodeHeartbeatResponse nodeHeartBeatResponse = YarnServerBuilderUtils.newNodeHeartbeatResponse( getNextResponseId(lastNodeHeartbeatResponse.getResponseId()), NodeAction.NORMAL, null, null, null, null, newInterval); rmNode.setAndUpdateNodeHeartbeatResponse(nodeHeartBeatResponse); populateKeys(request, nodeHeartBeatResponse); populateTokenSequenceNo(request, nodeHeartBeatResponse); if (timelineServiceV2Enabled) { // Return collectors' map that NM needs to know setAppCollectorsMapToResponse(rmNode.getRunningApps(), nodeHeartBeatResponse); } // 4. Send status to RMNode, saving the latest response. RMNodeStatusEvent nodeStatusEvent = new RMNodeStatusEvent(nodeId, remoteNodeStatus); if (request.getLogAggregationReportsForApps() != null && !request.getLogAggregationReportsForApps().isEmpty()) { nodeStatusEvent.setLogAggregationReportsForApps(request .getLogAggregationReportsForApps()); } this.rmContext.getDispatcher().getEventHandler().handle(nodeStatusEvent); // 5. Update node's labels to RM's NodeLabelManager. if (isDistributedNodeLabelsConf && request.getNodeLabels() != null) { try { updateNodeLabelsFromNMReport( NodeLabelsUtils.convertToStringSet(request.getNodeLabels()), nodeId); nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(true); } catch (IOException ex) { //ensure the error message is captured and sent across in response nodeHeartBeatResponse.setDiagnosticsMessage(ex.getMessage()); nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(false); } } // 6. check if node's capacity is load from dynamic-resources.xml // if so, send updated resource back to NM. String nid = nodeId.toString(); Resource capability = loadNodeResourceFromDRConfiguration(nid); // sync back with new resource if not null. if (capability != null) { nodeHeartBeatResponse.setResource(capability); } // Check if we got an event (AdminService) that updated the resources if (rmNode.isUpdatedCapability()) { nodeHeartBeatResponse.setResource(rmNode.getTotalCapability()); rmNode.resetUpdatedCapability(); } // 7. Send Container Queuing Limits back to the Node. This will be used by // the node to truncate the number of Containers queued for execution. if (this.rmContext.getNodeManagerQueueLimitCalculator() != null) { nodeHeartBeatResponse.setContainerQueuingLimit( this.rmContext.getNodeManagerQueueLimitCalculator() .createContainerQueuingLimit()); } // 8. Get node's attributes and update node-to-attributes mapping // in RMNodeAttributeManager. if (request.getNodeAttributes() != null) { try { // update node attributes if necessary then update heartbeat response updateNodeAttributesIfNecessary(nodeId, request.getNodeAttributes()); nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(true); } catch (IOException ex) { //ensure the error message is captured and sent across in response String errorMsg = nodeHeartBeatResponse.getDiagnosticsMessage() == null ? ex.getMessage() : nodeHeartBeatResponse.getDiagnosticsMessage() + "\n" + ex .getMessage(); nodeHeartBeatResponse.setDiagnosticsMessage(errorMsg); nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(false); } } return nodeHeartBeatResponse; }
@Test public void testDecommissionWithExcludeHosts() throws Exception { Configuration conf = new Configuration(); conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, hostFile .getAbsolutePath()); writeToHostsFile(""); rm = new MockRM(conf); rm.start(); MockNM nm1 = rm.registerNode("host1:1234", 5120); MockNM nm2 = rm.registerNode("host2:5678", 10240); MockNM nm3 = rm.registerNode("localhost:4433", 1024); int metricCount = ClusterMetrics.getMetrics().getNumDecommisionedNMs(); NodeHeartbeatResponse nodeHeartbeat = nm1.nodeHeartbeat(true); Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction())); nodeHeartbeat = nm2.nodeHeartbeat(true); Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction())); rm.drainEvents(); // To test that IPs also work String ip = NetUtils.normalizeHostName("localhost"); writeToHostsFile("host2", ip); rm.getNodesListManager().refreshNodes(conf); checkDecommissionedNMCount(rm, metricCount + 2); nodeHeartbeat = nm1.nodeHeartbeat(true); Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction())); nodeHeartbeat = nm2.nodeHeartbeat(true); Assert.assertTrue("The decommisioned metrics are not updated", NodeAction.SHUTDOWN.equals(nodeHeartbeat.getNodeAction())); nodeHeartbeat = nm3.nodeHeartbeat(true); Assert.assertTrue("The decommisioned metrics are not updated", NodeAction.SHUTDOWN.equals(nodeHeartbeat.getNodeAction())); rm.drainEvents(); writeToHostsFile(""); rm.getNodesListManager().refreshNodes(conf); nm3 = rm.registerNode("localhost:4433", 1024); nodeHeartbeat = nm3.nodeHeartbeat(true); rm.drainEvents(); Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction())); // decommissined node is 1 since 1 node is rejoined after updating exclude // file checkDecommissionedNMCount(rm, metricCount + 1); }
@Override public HashSlotCursor16byteKey cursor() { return new CursorLongKey2(); }
@Test(expected = AssertionError.class) public void testCursor_key1_whenDisposed() { HashSlotCursor16byteKey cursor = hsa.cursor(); hsa.dispose(); cursor.key1(); }
public Optional<Search> getForUser(String id, SearchUser searchUser) { final Optional<Search> search = dbService.get(id); search.ifPresent(s -> checkPermission(searchUser, s)); return search; }
@Test public void throwsPermissionExceptionIfNeitherOwnedNorPermittedFromViews() { final Search search = mockSearchWithOwner("someone else"); final SearchUser searchUser = mock(SearchUser.class); when(viewService.forSearch(anyString())).thenReturn(ImmutableList.of()); assertThatExceptionOfType(PermissionException.class) .isThrownBy(() -> sut.getForUser(search.id(), searchUser)); }
public AcceptState getAcceptState() { return acceptState; }
@Test public void testConstructor() { Verifier verifier = new VerifierNone(); RpcAcceptedReply reply = new RpcAcceptedReply(0, ReplyState.MSG_ACCEPTED, verifier, AcceptState.SUCCESS); assertEquals(0, reply.getXid()); assertEquals(RpcMessage.Type.RPC_REPLY, reply.getMessageType()); assertEquals(ReplyState.MSG_ACCEPTED, reply.getState()); assertEquals(verifier, reply.getVerifier()); assertEquals(AcceptState.SUCCESS, reply.getAcceptState()); }
public void replayCreateCatalog(Catalog catalog) throws DdlException { String type = catalog.getType(); String catalogName = catalog.getName(); Map<String, String> config = catalog.getConfig(); CatalogConnector catalogConnector = null; try { if (Strings.isNullOrEmpty(type)) { throw new DdlException("Missing properties 'type'"); } // skip unsupported connector type if (!ConnectorType.isSupport(type)) { LOG.error("Replay catalog [{}] encounter unknown catalog type [{}], ignore it", catalogName, type); return; } readLock(); try { Preconditions.checkState(!catalogs.containsKey(catalogName), "Catalog '%s' already exists", catalogName); } finally { readUnlock(); } Map<String, String> properties = catalog.getConfig(); String serviceName = properties.get("ranger.plugin.hive.service.name"); if (serviceName == null || serviceName.isEmpty()) { if (Config.access_control.equals("ranger")) { Authorizer.getInstance().setAccessControl(catalogName, new RangerStarRocksAccessController()); } else { Authorizer.getInstance().setAccessControl(catalogName, new NativeAccessController()); } } else { Authorizer.getInstance().setAccessControl(catalogName, new RangerHiveAccessController(serviceName)); } try { catalogConnector = connectorMgr.createConnector( new ConnectorContext(catalogName, type, config), true); if (catalogConnector == null) { LOG.error("{} connector [{}] create failed.", type, catalogName); throw new DdlException("connector create failed"); } } catch (StarRocksConnectorException e) { LOG.error("connector create failed [{}], reason {}", catalogName, e.getMessage()); throw new DdlException(String.format("connector create failed: %s", e.getMessage())); } writeLock(); try { catalogs.put(catalogName, catalog); } finally { writeUnLock(); } } catch (Exception e) { if (catalogConnector != null && connectorMgr.connectorExists(catalogName)) { connectorMgr.removeConnector(catalogName); } catalogs.remove(catalogName); throw e; } }
@Test public void testCreate() throws DdlException { CatalogMgr catalogMgr = GlobalStateMgr.getCurrentState().getCatalogMgr(); Map<String, String> config = new HashMap<>(); config.put("type", "paimon"); final ExternalCatalog catalog = new ExternalCatalog(10000, "catalog_0", "", config); catalogMgr.replayCreateCatalog(catalog); }
public static double validateLatitude(double latitude) { if (Double.isNaN(latitude) || latitude < LATITUDE_MIN || latitude > LATITUDE_MAX) { throw new IllegalArgumentException("invalid latitude: " + latitude); } return latitude; }
@Test public void validateLatitudeTest() { LatLongUtils.validateLatitude(LatLongUtils.LATITUDE_MAX); LatLongUtils.validateLatitude(LatLongUtils.LATITUDE_MIN); verifyInvalidLatitude(Double.NaN); verifyInvalidLatitude(Math.nextAfter(LatLongUtils.LATITUDE_MAX, Double.POSITIVE_INFINITY)); verifyInvalidLatitude(Math.nextAfter(LatLongUtils.LATITUDE_MIN, Double.NEGATIVE_INFINITY)); }
@Override public String deserialize(Asn1ObjectInputStream in) { final String oid = Asn1Utils.decodeObjectIdentifier(in.buffer(), in.position(), in.length); in.advanceToEnd(); return oid; }
@Test public void shouldDeserialize() { assertEquals("1.2.3", deserialize( new ObjectIdentifierConverter(), String.class, new byte[] { 0x2a, 0x03 } )); }
@Override public AnalysisPhase getAnalysisPhase() { return ANALYSIS_PHASE; }
@Test public void testGetAnalysisPhase() { FalsePositiveAnalyzer instance = new FalsePositiveAnalyzer(); AnalysisPhase expResult = AnalysisPhase.POST_IDENTIFIER_ANALYSIS; AnalysisPhase result = instance.getAnalysisPhase(); assertEquals(expResult, result); }
public String getLabel(String labelKey) { Map<String, String> routerLabels = labels.get(RouterConstant.ROUTER_LABELS); if (CollectionUtils.isEmpty(routerLabels)) { return StringUtils.EMPTY; } return routerLabels.get(labelKey); }
@Test public void testGetLabel() { Map<String, String> labels = new HashMap<>(); labels.put("k1", "v1"); labels.put("k2", "v2"); labels.put("k3", "v3"); PolarisRouterContext routerContext = new PolarisRouterContext(); routerContext.putLabels(RouterConstant.ROUTER_LABELS, labels); String resolvedLabel = routerContext.getLabel("k1"); assertThat(resolvedLabel).isEqualTo("v1"); }
@Override public void refreshPluginDataAll() { BaseDataCache.getInstance().cleanPluginData(); }
@Test public void testRefreshPluginDataAll() { baseDataCache.cleanPluginData(); PluginData firstCachedPluginData = PluginData.builder().name(mockName1).build(); PluginData secondCachedPluginData = PluginData.builder().name(mockName2).build(); baseDataCache.cachePluginData(firstCachedPluginData); baseDataCache.cachePluginData(secondCachedPluginData); assertNotNull(baseDataCache.obtainPluginData(firstCachedPluginData.getName())); assertNotNull(baseDataCache.obtainPluginData(secondCachedPluginData.getName())); commonPluginDataSubscriber.refreshPluginDataAll(); assertNull(baseDataCache.obtainPluginData(firstCachedPluginData.getName())); assertNull(baseDataCache.obtainPluginData(secondCachedPluginData.getName())); }
@Override public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo, List<String> partNames, boolean areAllPartsFound) throws MetaException { checkStatisticsList(colStatsWithSourceInfo); ColumnStatisticsObj statsObj = null; String colType; String colName; BinaryColumnStatsData aggregateData = null; for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); if (statsObj == null) { colName = cso.getColName(); colType = cso.getColType(); statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso.getStatsData().getSetField()); } BinaryColumnStatsData newData = cso.getStatsData().getBinaryStats(); if (aggregateData == null) { aggregateData = newData.deepCopy(); } else { aggregateData.setMaxColLen(Math.max(aggregateData.getMaxColLen(), newData.getMaxColLen())); aggregateData.setAvgColLen(Math.max(aggregateData.getAvgColLen(), newData.getAvgColLen())); aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); } } ColumnStatisticsData columnStatisticsData = initColumnStatisticsData(); columnStatisticsData.setBinaryStats(aggregateData); statsObj.setStatsData(columnStatisticsData); return statsObj; }
@Test public void testAggregateMultiStatsWhenAllAvailable() throws MetaException { List<String> partitions = Arrays.asList("part1", "part2", "part3"); ColumnStatisticsData data1 = new ColStatsBuilder<>(byte[].class).numNulls(1).avgColLen(20.0 / 3).maxColLen(13).build(); ColumnStatisticsData data2 = new ColStatsBuilder<>(byte[].class).numNulls(2).avgColLen(14).maxColLen(18).build(); ColumnStatisticsData data3 = new ColStatsBuilder<>(byte[].class).numNulls(3).avgColLen(17.5).maxColLen(18).build(); List<ColStatsObjWithSourceInfo> statsList = Arrays.asList( createStatsWithInfo(data1, TABLE, COL, partitions.get(0)), createStatsWithInfo(data2, TABLE, COL, partitions.get(1)), createStatsWithInfo(data3, TABLE, COL, partitions.get(2))); BinaryColumnStatsAggregator aggregator = new BinaryColumnStatsAggregator(); ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, true); ColumnStatisticsData expectedStats = new ColStatsBuilder<>(byte[].class).numNulls(6).avgColLen(17.5).maxColLen(18).build(); Assert.assertEquals(expectedStats, computedStatsObj.getStatsData()); }
public static void checkArgument(boolean expression, String errorMessage) { checkArgument(expression, () -> errorMessage); }
@Test(expected = IllegalArgumentException.class) public void testCheckingIncorrectArgument() { Utils.checkArgument(false, "Error"); }
@Override public Data getValueData() { return serializationService.toData(value); }
@Test public void getValueData_caching() { QueryableEntry entry = createEntry("key", "value"); assertThat(entry.getValueData()).isSameAs(entry.getValueData()); }
public <E extends Enum<E>> void logStateChange( final ClusterEventCode eventCode, final int memberId, final E oldState, final E newState) { final int length = stateChangeLength(oldState, newState); final int captureLength = captureLength(length); final int encodedLength = encodedLength(captureLength); final ManyToOneRingBuffer ringBuffer = this.ringBuffer; final int index = ringBuffer.tryClaim(eventCode.toEventCodeId(), encodedLength); if (index > 0) { try { encodeStateChange( (UnsafeBuffer)ringBuffer.buffer(), index, captureLength, length, memberId, oldState, newState ); } finally { ringBuffer.commit(index); } } }
@Test void logStateChange() { final int offset = ALIGNMENT * 11; logBuffer.putLong(CAPACITY + TAIL_POSITION_OFFSET, offset); final TimeUnit from = MINUTES; final TimeUnit to = SECONDS; final int memberId = 42; final String payload = from.name() + STATE_SEPARATOR + to.name(); final int captureLength = SIZE_OF_INT * 2 + payload.length(); logger.logStateChange(STATE_CHANGE, memberId, from, to); verifyLogHeader(logBuffer, offset, STATE_CHANGE.toEventCodeId(), captureLength, captureLength); final int index = encodedMsgOffset(offset) + LOG_HEADER_LENGTH; assertEquals(memberId, logBuffer.getInt(index, LITTLE_ENDIAN)); assertEquals(payload, logBuffer.getStringAscii(index + SIZE_OF_INT)); final StringBuilder sb = new StringBuilder(); ClusterEventDissector.dissectStateChange( ClusterEventCode.STATE_CHANGE, logBuffer, encodedMsgOffset(offset), sb); final String expectedMessagePattern = "\\[[0-9]+\\.[0-9]+] CLUSTER: STATE_CHANGE " + "\\[26/26]: memberId=42 MINUTES -> SECONDS"; assertThat(sb.toString(), Matchers.matchesPattern(expectedMessagePattern)); }
@Subscribe public void publishClusterEvent(Object event) { if (event instanceof DeadEvent) { LOG.debug("Skipping DeadEvent on cluster event bus"); return; } final String className = AutoValueUtils.getCanonicalName(event.getClass()); final ClusterEvent clusterEvent = ClusterEvent.create(nodeId.getNodeId(), className, Collections.singleton(nodeId.getNodeId()), event); try { final String id = dbCollection.save(clusterEvent, WriteConcern.JOURNALED).getSavedId(); // We are handling a locally generated event, so we can speed up processing by posting it to the local event // bus immediately. Due to having added the local node id to its list of consumers, it will not be picked up // by the db cursor again, avoiding double processing of the event. See #11263 for details. serverEventBus.post(event); LOG.debug("Published cluster event with ID <{}> and type <{}>", id, className); } catch (MongoException e) { LOG.error("Couldn't publish cluster event of type <" + className + ">", e); } }
@Test public void testPublishClusterEvent() throws Exception { @SuppressWarnings("deprecation") DBCollection collection = mongoConnection.getDatabase().getCollection(ClusterEventPeriodical.COLLECTION_NAME); SimpleEvent event = new SimpleEvent("test"); assertThat(collection.count()).isEqualTo(0L); clusterEventPeriodical.publishClusterEvent(event); verify(clusterEventBus, never()).post(any()); assertThat(collection.count()).isEqualTo(1L); DBObject dbObject = collection.findOne(); assertThat((String) dbObject.get("producer")).isEqualTo(nodeId.getNodeId()); assertThat((String) dbObject.get("event_class")).isEqualTo(SimpleEvent.class.getCanonicalName()); @SuppressWarnings("unchecked") Map<String, Object> payload = (Map<String, Object>) dbObject.get("payload"); assertThat(payload).containsEntry("payload", "test"); }
@Override public double mean() { return mean; }
@Test public void testMean() { System.out.println("mean"); KernelDensity instance = new KernelDensity(x); double expResult = 3.55417; double result = instance.mean(); assertEquals(expResult, result, 1E-5); }
@Override public ChannelBuffer copy() { return copy(readerIndex, readableBytes()); }
@Test void copyBoundaryCheck3() { Assertions.assertThrows(IndexOutOfBoundsException.class, () -> buffer.copy(buffer.capacity() + 1, 0)); }
@Override public Collection<String> getXADriverClassNames() { return Collections.singletonList("oracle.jdbc.xa.client.OracleXADataSource"); }
@Test void assertGetXADriverClassName() { assertThat(new OracleXADataSourceDefinition().getXADriverClassNames(), is(Collections.singletonList("oracle.jdbc.xa.client.OracleXADataSource"))); }
public static boolean isValidOrigin(String sourceHost, ZeppelinConfiguration zConf) throws UnknownHostException, URISyntaxException { String sourceUriHost = ""; if (sourceHost != null && !sourceHost.isEmpty()) { sourceUriHost = new URI(sourceHost).getHost(); sourceUriHost = (sourceUriHost == null) ? "" : sourceUriHost.toLowerCase(); } sourceUriHost = sourceUriHost.toLowerCase(); String currentHost = InetAddress.getLocalHost().getHostName().toLowerCase(); return zConf.getAllowedOrigins().contains("*") || currentHost.equals(sourceUriHost) || "localhost".equals(sourceUriHost) || zConf.getAllowedOrigins().contains(sourceHost); }
@Test void isValidFromConfig() throws URISyntaxException, UnknownHostException { assertTrue(CorsUtils.isValidOrigin("http://otherhost.com", ZeppelinConfiguration.load("zeppelin-site.xml"))); }
@Override public Map<String, Long> queuesDetail() { Map<String, Long> detail = new HashMap<>(); queues.forEach((k, v) -> detail.put(k, (long) v.size())); return detail; }
@Test public void testQueuesDetail() { String queueName = "test-queue"; String id = "abcd-1234-defg-5678"; queueDao.pushIfNotExists(queueName, id, 123); assertEquals(Collections.singletonMap(queueName, 1L), queueDao.queuesDetail()); }
@Override public Boolean isUsedInFetchArtifact(PipelineConfig pipelineConfig) { List<FetchTask> fetchTasks = pipelineConfig.getFetchTasks(); for (FetchTask fetchTask : fetchTasks) { if (pipelineName.equals(fetchTask.getDirectParentInAncestorPath())) return true; } return false; }
@Test void shouldDetectDependencyMaterialUsedInFetchArtifact() { DependencyMaterial material = new DependencyMaterial(new CaseInsensitiveString("pipeline-foo"), new CaseInsensitiveString("stage-bar")); PipelineConfig pipelineConfig = mock(PipelineConfig.class); ArrayList<FetchTask> fetchTasks = new ArrayList<>(); fetchTasks.add(new FetchTask(new CaseInsensitiveString("something"), new CaseInsensitiveString("new"), "src", "dest")); fetchTasks.add(new FetchTask(new CaseInsensitiveString("pipeline-foo"), new CaseInsensitiveString("stage-bar"), new CaseInsensitiveString("job"), "src", "dest")); when(pipelineConfig.getFetchTasks()).thenReturn(fetchTasks); assertThat(material.isUsedInFetchArtifact(pipelineConfig)).isTrue(); }
public int validate( final ServiceContext serviceContext, final List<ParsedStatement> statements, final SessionProperties sessionProperties, final String sql ) { requireSandbox(serviceContext); final KsqlExecutionContext ctx = requireSandbox(snapshotSupplier.apply(serviceContext)); final Injector injector = injectorFactory.apply(ctx, serviceContext); final KsqlConfig ksqlConfig = ctx.getKsqlConfig(); int numPersistentQueries = 0; for (final ParsedStatement parsed : statements) { final PreparedStatement<?> prepared = ctx.prepare( parsed, (isVariableSubstitutionEnabled(sessionProperties, ksqlConfig) ? sessionProperties.getSessionVariables() : Collections.emptyMap()) ); final ConfiguredStatement<?> configured = ConfiguredStatement.of(prepared, SessionConfig.of(ksqlConfig, sessionProperties.getMutableScopedProperties()) ); final int currNumPersistentQueries = validate( serviceContext, configured, sessionProperties, ctx, injector ); numPersistentQueries += currNumPersistentQueries; if (currNumPersistentQueries > 0 && QueryCapacityUtil.exceedsPersistentQueryCapacity(ctx, ksqlConfig)) { QueryCapacityUtil.throwTooManyActivePersistentQueriesException(ctx, ksqlConfig, sql); } } return numPersistentQueries; }
@Test public void shouldThrowIfTooManyPersistentQueries() { // Given: when(ksqlConfig.getInt(KsqlConfig.KSQL_ACTIVE_PERSISTENT_QUERY_LIMIT_CONFIG)).thenReturn(1); givenPersistentQueryCount(2); final List<ParsedStatement> statements = givenParsed( "CREATE STREAM sink AS SELECT * FROM source;" + "CREATE STREAM sink2 as SELECT * FROM sink;" ); // When: final Exception e = assertThrows( KsqlException.class, () -> validator.validate(serviceContext, statements, sessionProperties, "sql") ); // Then: assertThat(e.getMessage(), containsString( "persistent queries to exceed the configured limit")); }
@Override @CacheEvict(cacheNames = RedisKeyConstants.MAIL_TEMPLATE, allEntries = true) // allEntries 清空所有缓存,因为可能修改到 code 字段,不好清理 public void updateMailTemplate(@Valid MailTemplateSaveReqVO updateReqVO) { // 校验是否存在 validateMailTemplateExists(updateReqVO.getId()); // 校验 code 是否唯一 validateCodeUnique(updateReqVO.getId(),updateReqVO.getCode()); // 更新 MailTemplateDO updateObj = BeanUtils.toBean(updateReqVO, MailTemplateDO.class) .setParams(parseTemplateContentParams(updateReqVO.getContent())); mailTemplateMapper.updateById(updateObj); }
@Test public void testUpdateMailTemplate_success() { // mock 数据 MailTemplateDO dbMailTemplate = randomPojo(MailTemplateDO.class); mailTemplateMapper.insert(dbMailTemplate);// @Sql: 先插入出一条存在的数据 // 准备参数 MailTemplateSaveReqVO reqVO = randomPojo(MailTemplateSaveReqVO.class, o -> { o.setId(dbMailTemplate.getId()); // 设置更新的 ID }); // 调用 mailTemplateService.updateMailTemplate(reqVO); // 校验是否更新正确 MailTemplateDO mailTemplate = mailTemplateMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, mailTemplate); }
@Override public void execute(String commandName, BufferedReader reader, BufferedWriter writer) throws Py4JException, IOException { char subCommand = safeReadLine(reader).charAt(0); String returnCommand = null; if (subCommand == CREATE_VIEW_SUB_COMMAND_NAME) { returnCommand = createJVMView(reader); } else if (subCommand == IMPORT_SUB_COMMAND_NAME) { returnCommand = doImport(reader); } else if (subCommand == REMOVE_IMPORT_SUB_COMMAND_NAME) { returnCommand = removeImport(reader); } else if (subCommand == SEARCH_SUB_COMMAND_NAME) { returnCommand = search(reader); } else { returnCommand = Protocol.getOutputErrorCommand("Unknown JVM View SubCommand Name: " + subCommand); } logger.finest("Returning command: " + returnCommand); writer.write(returnCommand); writer.flush(); }
@Test public void testSubCommands() { String inputCommand1 = JVMViewCommand.CREATE_VIEW_SUB_COMMAND_NAME + "\n" + "custom" + "\ne\n"; String inputCommand2 = JVMViewCommand.IMPORT_SUB_COMMAND_NAME + "\nro0\n" + "java.util.*" + "\ne\n"; String inputCommand3 = JVMViewCommand.IMPORT_SUB_COMMAND_NAME + "\nro0\n" + "java.io.File" + "\ne\n"; String inputCommand4 = JVMViewCommand.REMOVE_IMPORT_SUB_COMMAND_NAME + "\nro0\n" + "java.io.File" + "\ne\n"; String inputCommand5 = JVMViewCommand.REMOVE_IMPORT_SUB_COMMAND_NAME + "\nro0\n" + "java.lang.*" + "\ne\n"; String inputCommand6 = JVMViewCommand.IMPORT_SUB_COMMAND_NAME + "\nrj\n" + "java.util.*" + "\ne\n"; try { command.execute("r", new BufferedReader(new StringReader(inputCommand1)), writer); assertEquals("!yro0\n", sWriter.toString()); JVMView view = (JVMView) gateway.getObject("o0"); command.execute("r", new BufferedReader(new StringReader(inputCommand2)), writer); assertEquals("!yro0\n!yv\n", sWriter.toString()); assertEquals(2, view.getStarImports().size()); // 1 for java.lang, 1 // for java.util assertTrue(view.getStarImports().contains("java.util")); command.execute("r", new BufferedReader(new StringReader(inputCommand3)), writer); assertEquals("!yro0\n!yv\n!yv\n", sWriter.toString()); assertTrue(view.getSingleImportsMap().containsKey("File")); assertEquals(1, view.getSingleImportsMap().size()); // 1 for // java.io.File // Duplicate command.execute("r", new BufferedReader(new StringReader(inputCommand2)), writer); assertEquals("!yro0\n!yv\n!yv\n!yv\n", sWriter.toString()); assertEquals(2, view.getStarImports().size()); command.execute("r", new BufferedReader(new StringReader(inputCommand3)), writer); assertEquals("!yro0\n!yv\n!yv\n!yv\n!yv\n", sWriter.toString()); assertTrue(view.getSingleImportsMap().containsKey("File")); assertEquals(1, view.getSingleImportsMap().size()); // 1 for // java.io.File command.execute("r", new BufferedReader(new StringReader(inputCommand4)), writer); assertEquals("!yro0\n!yv\n!yv\n!yv\n!yv\n!ybtrue\n", sWriter.toString()); assertFalse(view.getSingleImportsMap().containsKey("File")); assertEquals(0, view.getSingleImportsMap().size()); // 1 for // java.io.File command.execute("r", new BufferedReader(new StringReader(inputCommand4)), writer); assertEquals("!yro0\n!yv\n!yv\n!yv\n!yv\n!ybtrue\n!ybfalse\n", sWriter.toString()); assertFalse(view.getSingleImportsMap().containsKey("File")); assertEquals(0, view.getSingleImportsMap().size()); // 1 for // java.io.File command.execute("r", new BufferedReader(new StringReader(inputCommand5)), writer); assertEquals("!yro0\n!yv\n!yv\n!yv\n!yv\n!ybtrue\n!ybfalse\n!ybtrue\n", sWriter.toString()); assertFalse(view.getStarImports().contains("java.lang.*")); assertEquals(1, view.getStarImports().size()); // 1 for java.io.File command.execute("r", new BufferedReader(new StringReader(inputCommand5)), writer); assertEquals("!yro0\n!yv\n!yv\n!yv\n!yv\n!ybtrue\n!ybfalse\n!ybtrue\n!ybfalse\n", sWriter.toString()); assertFalse(view.getStarImports().contains("java.lang.*")); assertEquals(1, view.getStarImports().size()); // 1 for java.io.File command.execute("r", new BufferedReader(new StringReader(inputCommand6)), writer); assertEquals("!yro0\n!yv\n!yv\n!yv\n!yv\n!ybtrue\n!ybfalse\n!ybtrue\n!ybfalse\n!yv\n", sWriter.toString()); assertFalse(gateway.getDefaultJVMView().getStarImports().contains("java.util.*")); assertEquals(2, gateway.getDefaultJVMView().getStarImports().size()); // 1 // for // java.io.File } catch (Exception e) { e.printStackTrace(); fail(); } }
@Input @Optional public String getContainerizingMode() { String property = System.getProperty(PropertyNames.CONTAINERIZING_MODE); return property != null ? property : containerizingMode.get(); }
@Test public void testContainerizingMode() { assertThat(testJibExtension.getContainerizingMode()).isEqualTo("exploded"); }
public String destinationURL(File rootPath, File file) { return destinationURL(rootPath, file, getSrc(), getDest()); }
@Test public void shouldProvideAppendFilePathToDestWhenPathMatchingAtTheRoot() { ArtifactPlan artifactPlan = new ArtifactPlan(ArtifactPlanType.file, "*.jar", "logs"); assertThat(artifactPlan.destinationURL(new File("pipelines/pipelineA"), new File("pipelines/pipelineA/a.jar"))).isEqualTo("logs"); }
public static <T> T toObj(byte[] json, Class<T> cls) { try { return mapper.readValue(json, cls); } catch (Exception e) { throw new NacosDeserializationException(cls, e); } }
@Test void testToObjFromBytes() { String json = "{\"code\":0,\"data\":{\"string\":\"你好,中国!\",\"integer\":999}}"; RestResult<Map<String, Object>> restResult = JacksonUtils.toObj(json, RestResult.class); assertEquals(0, restResult.getCode()); assertEquals("你好,中国!", restResult.getData().get("string")); assertEquals(999, restResult.getData().get("integer")); restResult = JacksonUtils.toObj(json, new TypeReference<RestResult<Map<String, Object>>>() { }); assertEquals(0, restResult.getCode()); assertEquals("你好,中国!", restResult.getData().get("string")); assertEquals(999, restResult.getData().get("integer")); }
public DoubleArrayAsIterable usingTolerance(double tolerance) { return new DoubleArrayAsIterable(tolerance(tolerance), iterableSubject()); }
@Test public void usingTolerance_containsExactly_primitiveDoubleArray_inOrder_failure() { expectFailureWhenTestingThat(array(1.1, TOLERABLE_2POINT2, 3.3)) .usingTolerance(DEFAULT_TOLERANCE) .containsExactly(array(2.2, 1.1, 3.3)) .inOrder(); assertFailureKeys( "value of", "contents match, but order was wrong", "expected", "testing whether", "but was"); assertFailureValue("expected", "[2.2, 1.1, 3.3]"); }
public Set<GsonUser> getAllGroupMembers(String gitlabUrl, String token, String groupId) { return getMembers(gitlabUrl, token, format(GITLAB_GROUPS_MEMBERS_ENDPOINT + "/all", groupId)); }
@Test public void getAllGroupMembers_whenCallIsSuccessful_deserializesAndReturnsCorrectlyGroupMembers() throws IOException { ArgumentCaptor<Function<String, List<GsonUser>>> deserializerCaptor = ArgumentCaptor.forClass(Function.class); String token = "token-toto"; GitlabToken gitlabToken = new GitlabToken(token); List<GsonUser> expectedGroupMembers = expectedGroupMembers(); when(gitlabPaginatedHttpClient.get(eq(gitlabUrl), eq(gitlabToken), eq("/groups/42/members/all"), deserializerCaptor.capture())).thenReturn(expectedGroupMembers); Set<GsonUser> actualGroupMembers = underTest.getAllGroupMembers(gitlabUrl, token, "42"); assertThat(actualGroupMembers).containsExactlyInAnyOrderElementsOf(expectedGroupMembers); String responseContent = getResponseContent("group-members-full-response.json"); List<GsonUser> deserializedUsers = deserializerCaptor.getValue().apply(responseContent); assertThat(deserializedUsers).usingRecursiveComparison().isEqualTo(expectedGroupMembers); }
public Page<Organization> searchAllOrganizations(Organization org, int pageIndex, int pageSize) { OrganizationRole orgRole = org.getOrganizationRoles().isEmpty() ? new OrganizationRole() : org.getOrganizationRoles().get(0); return organizationRepository.searchAll(org, orgRole, PageRequest.of(pageIndex, pageSize)); }
@Test public void searchAllOrganizations() { when(repositoryMock.searchAll(any(Organization.class), any(OrganizationRole.class), any(Pageable.class))).thenReturn(getPageOrganizations()); Page<Organization> result = organizationServiceMock.searchAllOrganizations(newOrganization(), 1, 10); verify(repositoryMock, times(1)).searchAll(any(Organization.class), any(OrganizationRole.class), any(Pageable.class)); assertNotNull(result); }
public static void preserve(FileSystem targetFS, Path path, CopyListingFileStatus srcFileStatus, EnumSet<FileAttribute> attributes, boolean preserveRawXattrs) throws IOException { // strip out those attributes we don't need any more attributes.remove(FileAttribute.BLOCKSIZE); attributes.remove(FileAttribute.CHECKSUMTYPE); // If not preserving anything from FileStatus, don't bother fetching it. FileStatus targetFileStatus = attributes.isEmpty() ? null : targetFS.getFileStatus(path); String group = targetFileStatus == null ? null : targetFileStatus.getGroup(); String user = targetFileStatus == null ? null : targetFileStatus.getOwner(); boolean chown = false; if (attributes.contains(FileAttribute.ACL)) { List<AclEntry> srcAcl = srcFileStatus.getAclEntries(); List<AclEntry> targetAcl = getAcl(targetFS, targetFileStatus); if (!srcAcl.equals(targetAcl)) { targetFS.removeAcl(path); targetFS.setAcl(path, srcAcl); } // setAcl doesn't preserve sticky bit, so also call setPermission if needed. if (srcFileStatus.getPermission().getStickyBit() != targetFileStatus.getPermission().getStickyBit()) { targetFS.setPermission(path, srcFileStatus.getPermission()); } } else if (attributes.contains(FileAttribute.PERMISSION) && !srcFileStatus.getPermission().equals(targetFileStatus.getPermission())) { targetFS.setPermission(path, srcFileStatus.getPermission()); } final boolean preserveXAttrs = attributes.contains(FileAttribute.XATTR); if (preserveXAttrs || preserveRawXattrs) { final String rawNS = StringUtils.toLowerCase(XAttr.NameSpace.RAW.name()); Map<String, byte[]> srcXAttrs = srcFileStatus.getXAttrs(); Map<String, byte[]> targetXAttrs = getXAttrs(targetFS, path); if (srcXAttrs != null && !srcXAttrs.equals(targetXAttrs)) { for (Entry<String, byte[]> entry : srcXAttrs.entrySet()) { String xattrName = entry.getKey(); if (xattrName.startsWith(rawNS) || preserveXAttrs) { targetFS.setXAttr(path, xattrName, entry.getValue()); } } } } // The replication factor can only be preserved for replicated files. // It is ignored when either the source or target file are erasure coded. if (attributes.contains(FileAttribute.REPLICATION) && !targetFileStatus.isDirectory() && !targetFileStatus.isErasureCoded() && !srcFileStatus.isErasureCoded() && srcFileStatus.getReplication() != targetFileStatus.getReplication()) { targetFS.setReplication(path, srcFileStatus.getReplication()); } if (attributes.contains(FileAttribute.GROUP) && !group.equals(srcFileStatus.getGroup())) { group = srcFileStatus.getGroup(); chown = true; } if (attributes.contains(FileAttribute.USER) && !user.equals(srcFileStatus.getOwner())) { user = srcFileStatus.getOwner(); chown = true; } if (chown) { targetFS.setOwner(path, user, group); } if (attributes.contains(FileAttribute.TIMES)) { targetFS.setTimes(path, srcFileStatus.getModificationTime(), srcFileStatus.getAccessTime()); } }
@Test public void testPreserveTimestampOnDirectory() throws IOException { FileSystem fs = FileSystem.get(config); EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.TIMES); Path dst = new Path("/tmp/abc"); Path src = new Path("/tmp/src"); createDirectory(fs, src); createDirectory(fs, dst); fs.setPermission(src, fullPerm); fs.setOwner(src, "somebody", "somebody-group"); fs.setTimes(src, 0, 0); fs.setPermission(dst, noPerm); fs.setOwner(dst, "nobody", "nobody-group"); fs.setTimes(dst, 100, 100); CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src)); DistCpUtils.preserve(fs, dst, srcStatus, attributes, false); CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst)); // FileStatus.equals only compares path field, must explicitly compare all fields Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission())); Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner())); Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup())); Assert.assertTrue(srcStatus.getAccessTime() == dstStatus.getAccessTime()); Assert.assertTrue(srcStatus.getModificationTime() == dstStatus.getModificationTime()); }
@Override public Set<StoreBuilder<?>> stores() { return transformerSupplier.stores(); }
@Test public void shouldCallStoresOfAdaptedTransformerSupplier() { when(transformerSupplier.stores()).thenReturn(stores); final TransformerSupplierAdapter<String, String, Integer, Integer> adapter = new TransformerSupplierAdapter<>(transformerSupplier); adapter.stores(); }
@Override public BasicTypeDefine<MysqlType> reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.<MysqlType>builder() .name(column.getName()) .nullable(column.isNullable()) .comment(column.getComment()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case NULL: builder.nativeType(MysqlType.NULL); builder.columnType(MYSQL_NULL); builder.dataType(MYSQL_NULL); break; case BOOLEAN: builder.nativeType(MysqlType.BOOLEAN); builder.columnType(String.format("%s(%s)", MYSQL_TINYINT, 1)); builder.dataType(MYSQL_TINYINT); builder.length(1L); break; case TINYINT: builder.nativeType(MysqlType.TINYINT); builder.columnType(MYSQL_TINYINT); builder.dataType(MYSQL_TINYINT); break; case SMALLINT: builder.nativeType(MysqlType.SMALLINT); builder.columnType(MYSQL_SMALLINT); builder.dataType(MYSQL_SMALLINT); break; case INT: builder.nativeType(MysqlType.INT); builder.columnType(MYSQL_INT); builder.dataType(MYSQL_INT); break; case BIGINT: builder.nativeType(MysqlType.BIGINT); builder.columnType(MYSQL_BIGINT); builder.dataType(MYSQL_BIGINT); break; case FLOAT: builder.nativeType(MysqlType.FLOAT); builder.columnType(MYSQL_FLOAT); builder.dataType(MYSQL_FLOAT); break; case DOUBLE: builder.nativeType(MysqlType.DOUBLE); builder.columnType(MYSQL_DOUBLE); builder.dataType(MYSQL_DOUBLE); break; case DECIMAL: DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = (int) Math.max(0, scale - (precision - MAX_PRECISION)); precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } builder.nativeType(MysqlType.DECIMAL); builder.columnType(String.format("%s(%s,%s)", MYSQL_DECIMAL, precision, scale)); builder.dataType(MYSQL_DECIMAL); builder.precision(precision); builder.scale(scale); break; case BYTES: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.nativeType(MysqlType.VARBINARY); builder.columnType( String.format("%s(%s)", MYSQL_VARBINARY, MAX_VARBINARY_LENGTH / 2)); builder.dataType(MYSQL_VARBINARY); } else if (column.getColumnLength() < MAX_VARBINARY_LENGTH) { builder.nativeType(MysqlType.VARBINARY); builder.columnType( String.format("%s(%s)", MYSQL_VARBINARY, column.getColumnLength())); builder.dataType(MYSQL_VARBINARY); } else if (column.getColumnLength() < POWER_2_24) { builder.nativeType(MysqlType.MEDIUMBLOB); builder.columnType(MYSQL_MEDIUMBLOB); builder.dataType(MYSQL_MEDIUMBLOB); } else { builder.nativeType(MysqlType.LONGBLOB); builder.columnType(MYSQL_LONGBLOB); builder.dataType(MYSQL_LONGBLOB); } break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.nativeType(MysqlType.LONGTEXT); builder.columnType(MYSQL_LONGTEXT); builder.dataType(MYSQL_LONGTEXT); } else if (column.getColumnLength() < POWER_2_8) { builder.nativeType(MysqlType.VARCHAR); builder.columnType( String.format("%s(%s)", MYSQL_VARCHAR, column.getColumnLength())); builder.dataType(MYSQL_VARCHAR); } else if (column.getColumnLength() < POWER_2_16) { builder.nativeType(MysqlType.TEXT); builder.columnType(MYSQL_TEXT); builder.dataType(MYSQL_TEXT); } else if (column.getColumnLength() < POWER_2_24) { builder.nativeType(MysqlType.MEDIUMTEXT); builder.columnType(MYSQL_MEDIUMTEXT); builder.dataType(MYSQL_MEDIUMTEXT); } else { builder.nativeType(MysqlType.LONGTEXT); builder.columnType(MYSQL_LONGTEXT); builder.dataType(MYSQL_LONGTEXT); } break; case DATE: builder.nativeType(MysqlType.DATE); builder.columnType(MYSQL_DATE); builder.dataType(MYSQL_DATE); break; case TIME: builder.nativeType(MysqlType.TIME); builder.dataType(MYSQL_TIME); if (version.isAtOrBefore(MySqlVersion.V_5_5)) { builder.columnType(MYSQL_TIME); } else if (column.getScale() != null && column.getScale() > 0) { int timeScale = column.getScale(); if (timeScale > MAX_TIME_SCALE) { timeScale = MAX_TIME_SCALE; log.warn( "The time column {} type time({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to time({})", column.getName(), column.getScale(), MAX_SCALE, timeScale); } builder.columnType(String.format("%s(%s)", MYSQL_TIME, timeScale)); builder.scale(timeScale); } else { builder.columnType(MYSQL_TIME); } break; case TIMESTAMP: builder.nativeType(MysqlType.DATETIME); builder.dataType(MYSQL_DATETIME); if (version.isAtOrBefore(MySqlVersion.V_5_5)) { builder.columnType(MYSQL_DATETIME); } else if (column.getScale() != null && column.getScale() > 0) { int timestampScale = column.getScale(); if (timestampScale > MAX_TIMESTAMP_SCALE) { timestampScale = MAX_TIMESTAMP_SCALE; log.warn( "The timestamp column {} type timestamp({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to timestamp({})", column.getName(), column.getScale(), MAX_TIMESTAMP_SCALE, timestampScale); } builder.columnType(String.format("%s(%s)", MYSQL_DATETIME, timestampScale)); builder.scale(timestampScale); } else { builder.columnType(MYSQL_DATETIME); } break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.MYSQL, column.getDataType().getSqlType().name(), column.getName()); } return builder.build(); }
@Test public void testReconvertString() { Column column = PhysicalColumn.builder() .name("test") .dataType(BasicType.STRING_TYPE) .columnLength(null) .build(); BasicTypeDefine<MysqlType> typeDefine = MySqlTypeConverter.DEFAULT_INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(MysqlType.LONGTEXT, typeDefine.getNativeType()); Assertions.assertEquals(MySqlTypeConverter.MYSQL_LONGTEXT, typeDefine.getColumnType()); Assertions.assertEquals(MySqlTypeConverter.MYSQL_LONGTEXT, typeDefine.getDataType()); column = PhysicalColumn.builder() .name("test") .dataType(BasicType.STRING_TYPE) .columnLength(255L) .build(); typeDefine = MySqlTypeConverter.DEFAULT_INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(MysqlType.VARCHAR, typeDefine.getNativeType()); Assertions.assertEquals( String.format("%s(%s)", MySqlTypeConverter.MYSQL_VARCHAR, column.getColumnLength()), typeDefine.getColumnType()); Assertions.assertEquals(MySqlTypeConverter.MYSQL_VARCHAR, typeDefine.getDataType()); column = PhysicalColumn.builder() .name("test") .dataType(BasicType.STRING_TYPE) .columnLength(65535L) .build(); typeDefine = MySqlTypeConverter.DEFAULT_INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(MysqlType.TEXT, typeDefine.getNativeType()); Assertions.assertEquals(MySqlTypeConverter.MYSQL_TEXT, typeDefine.getColumnType()); Assertions.assertEquals(MySqlTypeConverter.MYSQL_TEXT, typeDefine.getDataType()); column = PhysicalColumn.builder() .name("test") .dataType(BasicType.STRING_TYPE) .columnLength(16777215L) .build(); typeDefine = MySqlTypeConverter.DEFAULT_INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(MysqlType.MEDIUMTEXT, typeDefine.getNativeType()); Assertions.assertEquals(MySqlTypeConverter.MYSQL_MEDIUMTEXT, typeDefine.getColumnType()); Assertions.assertEquals(MySqlTypeConverter.MYSQL_MEDIUMTEXT, typeDefine.getDataType()); column = PhysicalColumn.builder() .name("test") .dataType(BasicType.STRING_TYPE) .columnLength(4294967295L) .build(); typeDefine = MySqlTypeConverter.DEFAULT_INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(MysqlType.LONGTEXT, typeDefine.getNativeType()); Assertions.assertEquals(MySqlTypeConverter.MYSQL_LONGTEXT, typeDefine.getColumnType()); Assertions.assertEquals(MySqlTypeConverter.MYSQL_LONGTEXT, typeDefine.getDataType()); }
@PutMapping @Secured(resource = AuthConstants.CONSOLE_RESOURCE_NAME_PREFIX + "namespaces", action = ActionTypes.WRITE, signType = SignType.CONSOLE) public Result<Boolean> editNamespace(NamespaceForm namespaceForm) throws NacosException { namespaceForm.validate(); // contains illegal chars if (!namespaceNameCheckPattern.matcher(namespaceForm.getNamespaceName()).matches()) { throw new NacosApiException(HttpStatus.BAD_REQUEST.value(), ErrorCode.ILLEGAL_NAMESPACE, "namespaceName [" + namespaceForm.getNamespaceName() + "] contains illegal char"); } return Result.success(namespaceOperationService .editNamespace(namespaceForm.getNamespaceId(), namespaceForm.getNamespaceName(), namespaceForm.getNamespaceDesc())); }
@Test void testEditNamespace() throws NacosException { when(namespaceOperationService.editNamespace(TEST_NAMESPACE_ID, TEST_NAMESPACE_NAME, TEST_NAMESPACE_DESC)).thenReturn(true); Result<Boolean> result = namespaceControllerV2.editNamespace( new NamespaceForm(TEST_NAMESPACE_ID, TEST_NAMESPACE_NAME, TEST_NAMESPACE_DESC)); verify(namespaceOperationService).editNamespace(TEST_NAMESPACE_ID, TEST_NAMESPACE_NAME, TEST_NAMESPACE_DESC); assertEquals(ErrorCode.SUCCESS.getCode(), result.getCode()); assertTrue(result.getData()); }
@Override public List<IndexSegment> getSegments() { List<IndexSegment> segments = new ArrayList<>(_segmentDataManagers.size()); for (SegmentDataManager segmentDataManager : _segmentDataManagers) { if (segmentDataManager.getReferenceCount() > 0) { segments.add(segmentDataManager.getSegment()); } } return segments; }
@Test public void testGetSegments() { SegmentDataManager sdm1 = mockSegmentDataManager("seg01", false, 1); SegmentDataManager sdm2 = mockSegmentDataManager("seg01", true, 1); DuoSegmentDataManager dsdm = new DuoSegmentDataManager(sdm1, sdm2); assertTrue(dsdm.hasMultiSegments()); assertSame(dsdm.getSegment(), sdm1.getSegment()); assertEquals(dsdm.getSegments(), Arrays.asList(sdm1.getSegment(), sdm2.getSegment())); when(sdm1.getReferenceCount()).thenReturn(0); assertTrue(dsdm.hasMultiSegments()); assertSame(dsdm.getSegment(), sdm1.getSegment()); assertEquals(dsdm.getSegments(), Collections.singletonList(sdm2.getSegment())); when(sdm2.getReferenceCount()).thenReturn(0); assertTrue(dsdm.hasMultiSegments()); assertSame(dsdm.getSegment(), sdm1.getSegment()); assertTrue(dsdm.getSegments().isEmpty()); }
public <ConfigType extends ConfigInstance> ConfigType toInstance(Class<ConfigType> clazz, String configId) { return ConfigInstanceUtil.getNewInstance(clazz, configId, this); }
@Test public void test_map_of_struct() { Slime slime = new Slime(); Cursor map = slime.setObject().setObject("innermap"); map.setObject("one").setLong("foo", 1); map.setObject("two").setLong("foo", 2); MaptypesConfig config = new ConfigPayload(slime).toInstance(MaptypesConfig.class, ""); assertThat(config.innermap("one").foo(), is(1)); assertThat(config.innermap("two").foo(), is(2)); }
@VisibleForTesting List<String> getEntityTypes() throws IOException { LeveldbIterator iterator = null; try { iterator = getDbIterator(false); List<String> entityTypes = new ArrayList<String>(); iterator.seek(ENTITY_ENTRY_PREFIX); while (iterator.hasNext()) { byte[] key = iterator.peekNext().getKey(); if (key[0] != ENTITY_ENTRY_PREFIX[0]) { break; } KeyParser kp = new KeyParser(key, ENTITY_ENTRY_PREFIX.length); String entityType = kp.getNextString(); entityTypes.add(entityType); byte[] lookupKey = KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX) .add(entityType).getBytesForLookup(); if (lookupKey[lookupKey.length - 1] != 0x0) { throw new IOException("Found unexpected end byte in lookup key"); } lookupKey[lookupKey.length - 1] = 0x1; iterator.seek(lookupKey); } return entityTypes; } catch(DBException e) { throw new IOException(e); } finally { IOUtils.cleanupWithLogger(LOG, iterator); } }
@Test void testGetEntityTypes() throws IOException { List<String> entityTypes = ((LeveldbTimelineStore) store).getEntityTypes(); assertEquals(7, entityTypes.size()); assertEquals("ACL_ENTITY_TYPE_1", entityTypes.get(0)); assertEquals("OLD_ENTITY_TYPE_1", entityTypes.get(1)); assertEquals(entityType1, entityTypes.get(2)); assertEquals(entityType2, entityTypes.get(3)); assertEquals(entityType4, entityTypes.get(4)); assertEquals(entityType5, entityTypes.get(5)); }
public static StructType convert(Schema schema) { return (StructType) TypeUtil.visit(schema, new TypeToSparkType()); }
@Test public void testSchemaConversionWithMetaDataColumnSchema() { StructType structType = SparkSchemaUtil.convert(TEST_SCHEMA_WITH_METADATA_COLS); List<AttributeReference> attrRefs = scala.collection.JavaConverters.seqAsJavaList(structType.toAttributes()); for (AttributeReference attrRef : attrRefs) { if (MetadataColumns.isMetadataColumn(attrRef.name())) { Assert.assertTrue( "metadata columns should have __metadata_col in attribute metadata", MetadataAttribute.unapply(attrRef).isDefined()); } else { Assert.assertFalse( "non metadata columns should not have __metadata_col in attribute metadata", MetadataAttribute.unapply(attrRef).isDefined()); } } }
@Override public List<String> getServices() { if (!isRegistered.get()) { LOGGER.warning("Query instance must be at the stage that finish registry!"); return Collections.emptyList(); } return RegisterManager.INSTANCE.getServices(); }
@Test public void getServices() { Assert.assertTrue(registerCenterService.getServices().isEmpty()); }
@Override public Long zLexCount(byte[] key, org.springframework.data.domain.Range range) { String min = value(range.getLowerBound(), "-"); String max = value(range.getUpperBound(), "+"); return read(key, StringCodec.INSTANCE, ZLEXCOUNT, key, min, max); }
@Test public void testZLexCount() { StringRedisTemplate redisTemplate = new StringRedisTemplate(); redisTemplate.setConnectionFactory(new RedissonConnectionFactory(redisson)); redisTemplate.afterPropertiesSet(); redisTemplate.boundZSetOps("test").add("1", 10); redisTemplate.boundZSetOps("test").add("2", 20); redisTemplate.boundZSetOps("test").add("3", 30); Long size = redisTemplate.boundZSetOps("test").lexCount(Range.closed("1", "2")); assertThat(size).isEqualTo(2); }
@Override public List<E> subList(int fromIndex, int toIndex) { return Collections.unmodifiableList(underlying).subList(fromIndex, toIndex); }
@Test public void testSubList() { BoundedList<Integer> list = BoundedList.newArrayBacked(3); list.add(1); list.add(2); list.add(3); assertEquals(Arrays.asList(2), list.subList(1, 2)); assertThrows(UnsupportedOperationException.class, () -> list.subList(1, 2).remove(2)); }
@Override public String pluginNamed() { return PluginEnum.DUBBO.getName(); }
@Test public void pluginNamed() { assertThat(handler.pluginNamed(), is(PluginEnum.DUBBO.getName())); }
public T runWithDelay() throws Exception { try { return execute(); } catch(Exception e) { if (e.getClass().equals(retryExceptionType)){ tries++; if (MAX_RETRIES == tries) { throw e; } else { Thread.sleep((long) DELAY * tries); return runWithDelay(); } } else { throw e; } } }
@Test public void testRetryFailureWithDelay() { Retry<Void> retriable = new Retry<Void>(NullPointerException.class) { @Override public Void execute() { throw new RuntimeException(); } }; try { retriable.runWithDelay(); Assert.fail(); } catch (Exception e) { Assert.assertEquals(RuntimeException.class, e.getClass()); } }
@Override public void start() { boolean hasExternalPlugins = pluginRepository.getPlugins().stream().anyMatch(plugin -> plugin.getType().equals(PluginType.EXTERNAL)); try (DbSession session = dbClient.openSession(false)) { PropertyDto property = Optional.ofNullable(dbClient.propertiesDao().selectGlobalProperty(session, PLUGINS_RISK_CONSENT)) .orElse(defaultPluginRiskConsentProperty()); if (hasExternalPlugins && NOT_ACCEPTED == PluginRiskConsent.valueOf(property.getValue())) { addWarningInSonarDotLog(); property.setValue(REQUIRED.name()); dbClient.propertiesDao().saveProperty(session, property); session.commit(); } else if (!hasExternalPlugins && REQUIRED == PluginRiskConsent.valueOf(property.getValue())) { dbClient.propertiesDao().deleteGlobalProperty(PLUGINS_RISK_CONSENT, session); session.commit(); } } }
@Test public void consent_does_not_change_when_value_is_required() { setupExternalPluginConsent(REQUIRED); setupExternalPlugin(); underTest.start(); assertThat(dbClient.propertiesDao().selectGlobalProperty(PLUGINS_RISK_CONSENT)) .extracting(PropertyDto::getValue) .isEqualTo(REQUIRED.name()); }
@Override public boolean skip(final ServerWebExchange exchange) { return skipExcept(exchange, RpcTypeEnum.MOTAN); }
@Test public void testSkip() { final boolean result = motanPlugin.skip(exchange); Assertions.assertTrue(result); }
public final Strictness getStrictness() { return strictness; }
@Test public void testDefaultStrictness() { JsonReader reader = new JsonReader(reader("{}")); assertThat(reader.getStrictness()).isEqualTo(Strictness.LEGACY_STRICT); }
public static ComputeStepSyntaxElement<SplitDataset> splitDataset( final Collection<Dataset> parents, final EventCondition condition) { final ClassFields fields = new ClassFields(); final ValueSyntaxElement ifData = fields.add(new ArrayList<>()); final ValueSyntaxElement elseData = fields.add(new ArrayList<>()); final ValueSyntaxElement right = fields.add(DatasetCompiler.Complement.class); final VariableDefinition event = new VariableDefinition(JrubyEventExtLibrary.RubyEvent.class, "event"); fields.addAfterInit( Closure.wrap( SyntaxFactory.assignment( right, SyntaxFactory.cast( DatasetCompiler.Complement.class, SyntaxFactory.constant( DatasetCompiler.class, DatasetCompiler.Complement.class.getSimpleName() ).call("from", SyntaxFactory.identifier("this"), elseData) ) ) ) ); final ValueSyntaxElement conditionField = fields.add(condition); final DatasetCompiler.ComputeAndClear compute; if (parents.isEmpty()) { compute = withOutputBuffering( conditionalLoop(event, BATCH_ARG, conditionField, ifData, elseData), Closure.wrap(clear(elseData)), ifData, fields ); } else { final Collection<ValueSyntaxElement> parentFields = parents.stream().map(fields::add).collect(Collectors.toList()); final ValueSyntaxElement inputBuffer = fields.add(new ArrayList<>()); compute = withOutputBuffering( withInputBuffering( conditionalLoop(event, inputBuffer, conditionField, ifData, elseData), parentFields, inputBuffer ), clearSyntax(parentFields).add(clear(elseData)), ifData, fields ); } return ComputeStepSyntaxElement.create( Arrays.asList(compute.compute(), compute.clear(), MethodSyntaxElement.right(right)), compute.fields(), SplitDataset.class ); }
@Test public void compilesSplitDataset() { final FieldReference key = FieldReference.from("foo"); final SplitDataset left = DatasetCompiler.splitDataset( Collections.emptyList(), event -> event.getEvent().includes(key) ).instantiate(); final Event trueEvent = new Event(); trueEvent.setField(key, "val"); final JrubyEventExtLibrary.RubyEvent falseEvent = JrubyEventExtLibrary.RubyEvent.newRubyEvent(RubyUtil.RUBY, new Event()); final Dataset right = left.right(); @SuppressWarnings("rawtypes") final RubyArray batch = RubyUtil.RUBY.newArray( JrubyEventExtLibrary.RubyEvent.newRubyEvent(RubyUtil.RUBY, trueEvent), falseEvent ); assertThat(left.compute(batch, false, false).size(), is(1)); assertThat(right.compute(batch, false, false).size(), is(1)); }
@Override public String getString(int rowIndex, int columnIndex) { if (columnIndex != 0) { throw new IllegalArgumentException("Column index must always be 0 for aggregation result sets"); } if (rowIndex != 0) { throw new IllegalArgumentException("Row index must always be 0 for aggregation result sets"); } return _jsonObject.get("value").asText(); }
@Test(expectedExceptions = IllegalArgumentException.class) public void testGetStringForNonZeroRow() { // Run the test _aggregationResultSetUnderTest.getString(1, 0); }
@Override protected String toHtmlDisplay(Element element, String query) { String label = element.getLabel(); int index = label.toLowerCase().indexOf(query.toLowerCase()); String before = label.substring(0, index); String match = label.substring(index, index + query.length()); String after = label.substring(index + query.length()); return NbBundle.getMessage(FuzzyElementLabelSearchProvider.class, "FuzzyElementLabelSearchProvider.result", before, match, after); }
@Test public void testHtmlMiddle() { Mockito.when(node.getLabel()).thenReturn("foobar"); Assert.assertTrue( new FuzzyElementLabelSearchProvider().toHtmlDisplay(node, "oo").contains("f<b>oo</b>bar")); }
public static <T> Write<T> write() { return new AutoValue_SnsIO_Write.Builder<T>() .setClientConfiguration(ClientConfiguration.builder().build()) .build(); }
@Test public void testWriteWithoutTopicArn() { List<String> input = ImmutableList.of("message1", "message2"); when(sns.publish(any(PublishRequest.class))) .thenReturn(PublishResponse.builder().messageId("id").build()); Write<String> snsWrite = SnsIO.<String>write().withPublishRequestBuilder(msg -> requestBuilder(msg, topicArn)); PCollection<PublishResponse> results = p.apply(Create.of(input)).apply(snsWrite); PAssert.that(results.apply(Count.globally())).containsInAnyOrder(2L); p.run(); verify(sns, times(0)).getTopicAttributes(any(Consumer.class)); for (String msg : input) { verify(sns).publish(requestBuilder(msg, topicArn).build()); } }
public static void copyBody(Message source, Message target) { // Preserve the DataType if both messages are DataTypeAware if (source.hasTrait(MessageTrait.DATA_AWARE)) { target.setBody(source.getBody()); target.setPayloadForTrait(MessageTrait.DATA_AWARE, source.getPayloadForTrait(MessageTrait.DATA_AWARE)); return; } target.setBody(source.getBody()); }
@Test void shouldCopyBodyIfBothNotDataTypeAware() { Object body = new Object(); Message m1 = new MyMessageType(body); Message m2 = new MyMessageType(new Object()); copyBody(m1, m2); assertSame(body, m2.getBody()); }
public static String computeQueryHash(String query) { requireNonNull(query, "query is null"); if (query.isEmpty()) { return ""; } byte[] queryBytes = query.getBytes(UTF_8); long queryHash = new XxHash64().update(queryBytes).hash(); return toHexString(queryHash); }
@Test public void testComputeQueryHash() { String query = "SELECT * FROM CUSTOMER LIMIT 5"; assertEquals(computeQueryHash(query), "7f3325a942b43504"); }
@Override public void upgrade() { if (hasBeenRunSuccessfully()) { LOG.debug("Migration already completed."); return; } final Map<String, String> savedSearchToViewsMap = new HashMap<>(); final Map<View, Search> newViews = this.savedSearchService.streamAll() .map(savedSearch -> { final Map.Entry<View, Search> newView = migrateSavedSearch(savedSearch); savedSearchToViewsMap.put(savedSearch.id(), newView.getKey().id()); return newView; }) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); newViews.forEach((view, search) -> { viewService.save(view); searchService.save(search); }); final MigrationCompleted migrationCompleted = MigrationCompleted.create(savedSearchToViewsMap); writeMigrationCompleted(migrationCompleted); }
@Test @MongoDBFixtures("sample_saved_search_keyword_with_interval_field.json") public void migrateSavedSearchKeywordWithIntervalField() throws Exception { this.migration.upgrade(); final MigrationCompleted migrationCompleted = captureMigrationCompleted(); assertThat(migrationCompleted.savedSearchIds()) .containsExactly(new AbstractMap.SimpleEntry<>("5de660c6b2d44b5813c1d806", "000000020000000000000000")); assertViewServiceCreatedViews(1, resourceFile("sample_saved_search_keyword_with_interval_field-expected_views.json")); assertSearchServiceCreated(1, resourceFile("sample_saved_search_keyword_with_interval_field-expected_searches.json")); }
static boolean apply(@Nullable HttpStatus httpStatus) { if (Objects.isNull(httpStatus)) { return false; } RpcEnhancementReporterProperties reportProperties; try { reportProperties = ApplicationContextAwareUtils.getApplicationContext() .getBean(RpcEnhancementReporterProperties.class); } catch (BeansException e) { LOG.error("get RpcEnhancementReporterProperties bean err", e); reportProperties = new RpcEnhancementReporterProperties(); } // statuses > series List<HttpStatus> status = reportProperties.getStatuses(); if (status.isEmpty()) { List<HttpStatus.Series> series = reportProperties.getSeries(); // Check INTERNAL_SERVER_ERROR (500) status. if (reportProperties.isIgnoreInternalServerError() && Objects.equals(httpStatus, INTERNAL_SERVER_ERROR)) { return false; } if (series.isEmpty()) { return HTTP_STATUSES.contains(httpStatus); } return series.contains(httpStatus.series()); } // Use the user-specified fuse status code. return status.contains(httpStatus); }
@Test public void testApplyWithIgnoreInternalServerError() { RpcEnhancementReporterProperties properties = new RpcEnhancementReporterProperties(); // Mock Condition properties.getStatuses().clear(); properties.setIgnoreInternalServerError(true); ApplicationContext applicationContext = mock(ApplicationContext.class); doReturn(properties) .when(applicationContext).getBean(RpcEnhancementReporterProperties.class); mockedApplicationContextAwareUtils.when(ApplicationContextAwareUtils::getApplicationContext) .thenReturn(applicationContext); // Assert assertThat(PolarisEnhancedPluginUtils.apply(HttpStatus.OK)).isEqualTo(false); assertThat(PolarisEnhancedPluginUtils.apply(HttpStatus.INTERNAL_SERVER_ERROR)).isEqualTo(false); assertThat(PolarisEnhancedPluginUtils.apply(HttpStatus.BAD_GATEWAY)).isEqualTo(true); }
static void setStaticGetter(final RegressionCompilationDTO compilationDTO, final LinkedHashMap<String, KiePMMLTableSourceCategory> regressionTablesMap, final MethodDeclaration staticGetterMethod, final String variableName) { final BlockStmt classificationTableBody = staticGetterMethod.getBody().orElseThrow(() -> new KiePMMLException(String.format(MISSING_BODY_TEMPLATE, staticGetterMethod))); final VariableDeclarator variableDeclarator = getVariableDeclarator(classificationTableBody, TO_RETURN).orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_IN_BODY, TO_RETURN, classificationTableBody))); final BlockStmt newBody = new BlockStmt(); final Map<String, Expression> regressionTableCategoriesMap = new LinkedHashMap<>(); regressionTablesMap.forEach((className, tableSourceCategory) -> { MethodCallExpr methodCallExpr = new MethodCallExpr(); methodCallExpr.setScope(new NameExpr(className)); methodCallExpr.setName(KiePMMLRegressionTableFactory.GETKIEPMML_TABLE); regressionTableCategoriesMap.put(tableSourceCategory.getCategory(), methodCallExpr); }); // populate map String categoryTableMapName = String.format(VARIABLE_NAME_TEMPLATE, CATEGORICAL_TABLE_MAP, variableName); createPopulatedLinkedHashMap(newBody, categoryTableMapName, Arrays.asList(String.class.getSimpleName(), KiePMMLRegressionTable.class.getName()), regressionTableCategoriesMap); final MethodCallExpr initializer = variableDeclarator.getInitializer() .orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_INITIALIZER_TEMPLATE, TO_RETURN, classificationTableBody))) .asMethodCallExpr(); final MethodCallExpr builder = getChainedMethodCallExprFrom("builder", initializer); builder.setArgument(0, new StringLiteralExpr(variableName)); final REGRESSION_NORMALIZATION_METHOD regressionNormalizationMethod = compilationDTO.getDefaultREGRESSION_NORMALIZATION_METHOD(); getChainedMethodCallExprFrom("withRegressionNormalizationMethod", initializer).setArgument(0, new NameExpr(regressionNormalizationMethod.getClass().getSimpleName() + "." + regressionNormalizationMethod.name())); OP_TYPE opType = compilationDTO.getOP_TYPE(); getChainedMethodCallExprFrom("withOpType", initializer).setArgument(0, new NameExpr(opType.getClass().getSimpleName() + "." + opType.name())); getChainedMethodCallExprFrom("withCategoryTableMap", initializer).setArgument(0, new NameExpr(categoryTableMapName)); boolean isBinary = compilationDTO.isBinary(regressionTablesMap.size()); final Expression probabilityMapFunctionExpression = getProbabilityMapFunctionExpression(compilationDTO.getModelNormalizationMethod(), isBinary); getChainedMethodCallExprFrom("withProbabilityMapFunction", initializer).setArgument(0, probabilityMapFunctionExpression); getChainedMethodCallExprFrom("withIsBinary", initializer).setArgument(0, getExpressionForObject(isBinary)); getChainedMethodCallExprFrom("withTargetField", initializer).setArgument(0, getExpressionForObject(compilationDTO.getTargetFieldName())); getChainedMethodCallExprFrom("withTargetCategory", initializer).setArgument(0, getExpressionForObject(null)); classificationTableBody.getStatements().forEach(newBody::addStatement); staticGetterMethod.setBody(newBody); }
@Test void setStaticGetter() throws IOException { String variableName = "variableName"; RegressionTable regressionTableProf = getRegressionTable(3.5, "professional"); RegressionTable regressionTableCler = getRegressionTable(27.4, "clerical"); OutputField outputFieldCat = getOutputField("CAT-1", ResultFeature.PROBABILITY, "CatPred-1"); OutputField outputFieldNum = getOutputField("NUM-1", ResultFeature.PROBABILITY, "NumPred-0"); OutputField outputFieldPrev = getOutputField("PREV", ResultFeature.PREDICTED_VALUE, null); String targetField = "targetField"; DataField dataField = new DataField(); dataField.setName(targetField); dataField.setOpType(OpType.CATEGORICAL); DataDictionary dataDictionary = new DataDictionary(); dataDictionary.addDataFields(dataField); RegressionModel regressionModel = new RegressionModel(); regressionModel.setNormalizationMethod(RegressionModel.NormalizationMethod.CAUCHIT); regressionModel.addRegressionTables(regressionTableProf, regressionTableCler); regressionModel.setModelName(getGeneratedClassName("RegressionModel")); Output output = new Output(); output.addOutputFields(outputFieldCat, outputFieldNum, outputFieldPrev); regressionModel.setOutput(output); MiningField miningField = new MiningField(); miningField.setUsageType(MiningField.UsageType.TARGET); miningField.setName(dataField.getName()); MiningSchema miningSchema = new MiningSchema(); miningSchema.addMiningFields(miningField); regressionModel.setMiningSchema(miningSchema); PMML pmml = new PMML(); pmml.setDataDictionary(dataDictionary); pmml.addModels(regressionModel); final CommonCompilationDTO<RegressionModel> source = CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME, pmml, regressionModel, new PMMLCompilationContextMock(), "FILENAME"); final RegressionCompilationDTO compilationDTO = RegressionCompilationDTO.fromCompilationDTORegressionTablesAndNormalizationMethod(source, regressionModel.getRegressionTables(), regressionModel.getNormalizationMethod()); final LinkedHashMap<String, KiePMMLTableSourceCategory> regressionTablesMap = new LinkedHashMap<>(); regressionModel.getRegressionTables().forEach(regressionTable -> { String key = "defpack." + regressionTable.getTargetCategory().toString().toUpperCase(); KiePMMLTableSourceCategory value = new KiePMMLTableSourceCategory("", regressionTable.getTargetCategory().toString()); regressionTablesMap.put(key, value); }); final MethodDeclaration staticGetterMethod = STATIC_GETTER_METHOD.clone(); KiePMMLClassificationTableFactory.setStaticGetter(compilationDTO, regressionTablesMap, staticGetterMethod, variableName); String text = getFileContent(TEST_02_SOURCE); MethodDeclaration expected = JavaParserUtils.parseMethod(text); assertThat(JavaParserUtils.equalsNode(expected, staticGetterMethod)).isTrue(); }
@Override public int channel(CommittableMessage<MultiTableCommittable> committableMessage) { if (committableMessage instanceof CommittableWithLineage) { MultiTableCommittable multiTableCommittable = ((CommittableWithLineage<MultiTableCommittable>) committableMessage) .getCommittable(); return Math.floorMod( Objects.hash( multiTableCommittable.getDatabase(), multiTableCommittable.getTable()), numChannels); } else { // CommittableSummary is inaccurate after shuffling, need to be recreated. return Math.floorMod(Objects.hash(committableMessage), numChannels); } }
@Test public void testChannel() { MultiTableCommittableChannelComputer computer = new MultiTableCommittableChannelComputer(); computer.setup(4); List<MultiTableCommittable> commits = Arrays.asList( new MultiTableCommittable("database", "table1", 1L, null, null), new MultiTableCommittable("database", "table2", 1L, null, null), new MultiTableCommittable("database", "table1", 1L, null, null), new MultiTableCommittable("database", "table5", 1L, null, null), new MultiTableCommittable("database", "table3", 1L, null, null), new MultiTableCommittable("database", "table8", 1L, null, null), new MultiTableCommittable("database", "table5", 1L, null, null), new MultiTableCommittable("database", "table1", 1L, null, null), new MultiTableCommittable("database", "table9", 1L, null, null), new MultiTableCommittable("database", "table5", 1L, null, null), new MultiTableCommittable("database", "table3", 1L, null, null), new MultiTableCommittable("database", "table8", 1L, null, null)); Map<Integer, Set<String>> map = new HashMap<>(); commits.forEach( (commit) -> { int channel = computer.channel(new CommittableWithLineage<>(commit, 1L, 0)); Set<String> set = map.getOrDefault(channel, new HashSet<>()); set.add(commit.getTable()); map.put(channel, set); }); Set<String> actualtables = new HashSet<>(); for (Map.Entry<Integer, Set<String>> entry : map.entrySet()) { actualtables.addAll(entry.getValue()); } Set<String> expectedTables = new HashSet<>( Arrays.asList("table1", "table2", "table3", "table5", "table8", "table9")); // Not a table is appeared in more than one channel. Assertions.assertEquals(actualtables, expectedTables); }
public static SslProvider getSslProvider(String provider) { if (SslProvider.OPENSSL.name().equalsIgnoreCase(provider)) { return SslProvider.OPENSSL; } if (SslProvider.JDK.name().equalsIgnoreCase(provider)) { return SslProvider.JDK; } if (SslProvider.OPENSSL_REFCNT.name().equalsIgnoreCase(provider)) { return SslProvider.OPENSSL_REFCNT; } return SslProvider.OPENSSL; }
@Test void test() { SslProvider openssl = TlsTypeResolve.getSslProvider("openssl"); assertEquals(SslProvider.OPENSSL, openssl); SslProvider openSsL = TlsTypeResolve.getSslProvider("openSSL"); assertEquals(SslProvider.OPENSSL, openSsL); SslProvider jdk = TlsTypeResolve.getSslProvider("JDK"); assertEquals(SslProvider.JDK, jdk); SslProvider anySsl = TlsTypeResolve.getSslProvider("anySSL"); assertEquals(SslProvider.OPENSSL, anySsl); SslProvider refcnt = TlsTypeResolve.getSslProvider("openSSL_refcnt"); assertEquals(SslProvider.OPENSSL_REFCNT, refcnt); }
@Udf(description = "Converts an INT value in degrees to a value in radians") public Double radians( @UdfParameter( value = "value", description = "The value in degrees to convert to radians." ) final Integer value ) { return radians(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleNull() { assertThat(udf.radians((Integer) null), is(nullValue())); assertThat(udf.radians((Long) null), is(nullValue())); assertThat(udf.radians((Double) null), is(nullValue())); }
@Override public int getInt(final int columnIndex) throws SQLException { return (int) ResultSetUtils.convertValue(mergeResultSet.getValue(columnIndex, int.class), int.class); }
@Test void assertGetIntWithColumnLabel() throws SQLException { when(mergeResultSet.getValue(1, int.class)).thenReturn((short) 1); assertThat(shardingSphereResultSet.getInt("label"), is(1)); }
public static Builder route() { return new RouterFunctionBuilder(); }
@Test void andOther() { HandlerFunction<ServerResponse> handlerFunction = request -> ServerResponse.ok().body("42"); RouterFunction<?> routerFunction1 = request -> Optional.empty(); RouterFunction<ServerResponse> routerFunction2 = request -> Optional.of(handlerFunction); RouterFunction<?> result = routerFunction1.andOther(routerFunction2); assertThat(result).isNotNull(); Optional<? extends HandlerFunction<?>> resultHandlerFunction = result.route(request); assertThat(resultHandlerFunction).isPresent(); assertThat(resultHandlerFunction.get()).isEqualTo(handlerFunction); }
@Override public String description() { return "ChronoLocalDateTime.timeLineOrder()"; }
@Test void should_have_description() { assertThat(comparator.description()).isEqualTo("ChronoLocalDateTime.timeLineOrder()"); }
public static <T extends DataflowWorkerHarnessOptions> T initializeGlobalStateAndPipelineOptions( Class<?> workerHarnessClass, Class<T> harnessOptionsClass) throws Exception { /* Extract pipeline options. */ T pipelineOptions = WorkerPipelineOptionsFactory.createFromSystemProperties(harnessOptionsClass); pipelineOptions.setAppName(workerHarnessClass.getSimpleName()); /* Configure logging with job-specific properties. */ DataflowWorkerLoggingMDC.setJobId(pipelineOptions.getJobId()); DataflowWorkerLoggingMDC.setWorkerId(pipelineOptions.getWorkerId()); ExperimentContext ec = ExperimentContext.parseFrom(pipelineOptions); String experimentName = Experiment.EnableConscryptSecurityProvider.getName(); if (ec.isEnabled(Experiment.EnableConscryptSecurityProvider)) { /* Enable fast SSL provider. */ LOG.info( "Dataflow runner is using conscrypt SSL. To disable this feature, " + "remove the pipeline option --experiments={}", experimentName); Security.insertProviderAt(new OpenSSLProvider(), 1); } else { LOG.info( "Not using conscrypt SSL. Note this is the default Java behavior, but may " + "have reduced performance. To use conscrypt SSL pass pipeline option " + "--experiments={}", experimentName); } return pipelineOptions; }
@Test public void testStreamingStreamingConfiguration() throws Exception { DataflowWorkerHarnessOptions pipelineOptions = PipelineOptionsFactory.as(DataflowWorkerHarnessOptions.class); pipelineOptions.setJobId(JOB_ID); pipelineOptions.setWorkerId(WORKER_ID); int activeWorkRefreshPeriodMillis = 12345; pipelineOptions.setActiveWorkRefreshPeriodMillis(activeWorkRefreshPeriodMillis); int stuckCommitDurationMillis = 23456; pipelineOptions.setStuckCommitDurationMillis(stuckCommitDurationMillis); String serializedOptions = new ObjectMapper().writeValueAsString(pipelineOptions); File file = tmpFolder.newFile(); Files.write(Paths.get(file.getPath()), serializedOptions.getBytes(StandardCharsets.UTF_8)); System.setProperty("sdk_pipeline_options_file", file.getPath()); DataflowWorkerHarnessOptions generatedOptions = DataflowWorkerHarnessHelper.initializeGlobalStateAndPipelineOptions( DataflowBatchWorkerHarnessTest.class, DataflowWorkerHarnessOptions.class); // Assert that the returned options are correct. assertThat(generatedOptions.getJobId(), equalTo(JOB_ID)); assertThat(generatedOptions.getWorkerId(), equalTo(WORKER_ID)); assertThat( generatedOptions.getActiveWorkRefreshPeriodMillis(), equalTo(activeWorkRefreshPeriodMillis)); assertThat(generatedOptions.getStuckCommitDurationMillis(), equalTo(stuckCommitDurationMillis)); }
public void to(Action<? super TargetImageParameters> action) { action.execute(to); }
@Test public void testTo() { assertThat(testJibExtension.getTo().getImage()).isNull(); assertThat(testJibExtension.getTo().getCredHelper().getHelper()).isNull(); testJibExtension.to( to -> { to.setImage("some image"); to.setCredHelper("some cred helper"); to.auth(auth -> auth.setUsername("some username")); to.auth(auth -> auth.setPassword("some password")); }); assertThat(testJibExtension.getTo().getImage()).isEqualTo("some image"); assertThat(testJibExtension.getTo().getCredHelper().getHelper()).isEqualTo("some cred helper"); assertThat(testJibExtension.getTo().getAuth().getUsername()).isEqualTo("some username"); assertThat(testJibExtension.getTo().getAuth().getPassword()).isEqualTo("some password"); }
@SuppressWarnings("unchecked") public <IN, OUT> AvroDatumConverter<IN, OUT> create(Class<IN> inputClass) { boolean isMapOnly = ((JobConf) getConf()).getNumReduceTasks() == 0; if (AvroKey.class.isAssignableFrom(inputClass)) { Schema schema; if (isMapOnly) { schema = AvroJob.getMapOutputKeySchema(getConf()); if (null == schema) { schema = AvroJob.getOutputKeySchema(getConf()); } } else { schema = AvroJob.getOutputKeySchema(getConf()); } if (null == schema) { throw new IllegalStateException("Writer schema for output key was not set. Use AvroJob.setOutputKeySchema()."); } return (AvroDatumConverter<IN, OUT>) new AvroWrapperConverter(schema); } if (AvroValue.class.isAssignableFrom(inputClass)) { Schema schema; if (isMapOnly) { schema = AvroJob.getMapOutputValueSchema(getConf()); if (null == schema) { schema = AvroJob.getOutputValueSchema(getConf()); } } else { schema = AvroJob.getOutputValueSchema(getConf()); } if (null == schema) { throw new IllegalStateException( "Writer schema for output value was not set. Use AvroJob.setOutputValueSchema()."); } return (AvroDatumConverter<IN, OUT>) new AvroWrapperConverter(schema); } if (BooleanWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new BooleanWritableConverter(); } if (BytesWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new BytesWritableConverter(); } if (ByteWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new ByteWritableConverter(); } if (DoubleWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new DoubleWritableConverter(); } if (FloatWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new FloatWritableConverter(); } if (IntWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new IntWritableConverter(); } if (LongWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new LongWritableConverter(); } if (NullWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new NullWritableConverter(); } if (Text.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new TextConverter(); } throw new UnsupportedOperationException("Unsupported input type: " + inputClass.getName()); }
@Test void convertLongWritable() { AvroDatumConverter<LongWritable, Long> converter = mFactory.create(LongWritable.class); assertEquals(123L, converter.convert(new LongWritable(123L)).longValue()); }
@Override public Collection<String> doSharding(final Collection<String> availableTargetNames, final HintShardingValue<Comparable<?>> shardingValue) { return shardingValue.getValues().isEmpty() ? availableTargetNames : shardingValue.getValues().stream().map(this::doSharding).collect(Collectors.toList()); }
@Test void assertDoShardingWithSingleValue() { List<String> availableTargetNames = Arrays.asList("t_order_0", "t_order_1", "t_order_2", "t_order_3"); HintShardingValue<Comparable<?>> shardingValue = new HintShardingValue<>("t_order", "order_id", Collections.singleton(4)); Collection<String> actual = hintInlineShardingAlgorithm.doSharding(availableTargetNames, shardingValue); assertTrue(actual.contains("t_order_0")); }
public void undelete() { // make a copy because the selected trash items changes as soon as trashService.undelete is called List<UIDeletedObject> selectedTrashFileItemsSnapshot = new ArrayList<UIDeletedObject>( selectedTrashFileItems ); if ( selectedTrashFileItemsSnapshot != null && selectedTrashFileItemsSnapshot.size() > 0 ) { List<ObjectId> ids = new ArrayList<ObjectId>(); for ( UIDeletedObject uiObj : selectedTrashFileItemsSnapshot ) { ids.add( uiObj.getId() ); } try { trashService.undelete( ids ); setTrash( trashService.getTrash() ); for ( UIDeletedObject uiObj : selectedTrashFileItemsSnapshot ) { // find the closest UIRepositoryDirectory that is in the dirMap RepositoryDirectoryInterface dir = repository.findDirectory( uiObj.getOriginalParentPath() ); while ( dir != null && dirMap.get( dir.getObjectId() ) == null ) { dir = dir.getParent(); } // now refresh that UIRepositoryDirectory so that the file/folders deck instantly refreshes on undelete if ( dir != null ) { dirMap.get( dir.getObjectId() ).refresh(); } // if transformation or directory with transformations call extension to restore data services references. if ( RepositoryObjectType.TRANSFORMATION.name().equals( uiObj.getType() ) ) { TransMeta transMeta = repository.loadTransformation( uiObj.getId(), null ); ExtensionPointHandler .callExtensionPoint( LogChannel.GENERAL, KettleExtensionPoint.TransAfterOpen.id, transMeta ); transMeta.clearChanged(); } else if ( !RepositoryObjectType.JOB.name().equals( uiObj.getType() ) ) { // if not a transformation and not a job then is a Directory RepositoryDirectoryInterface actualDir = repository.findDirectory( uiObj.getOriginalParentPath() + RepositoryDirectory.DIRECTORY_SEPARATOR + uiObj.getName() ); if ( actualDir != null ) { List<RepositoryElementMetaInterface> transformations = new ArrayList<>(); getAllTransformations( actualDir, transformations ); for ( RepositoryElementMetaInterface repositoryElementMetaInterface : transformations ) { TransMeta transMeta = repository.loadTransformation( repositoryElementMetaInterface.getObjectId(), null ); ExtensionPointHandler .callExtensionPoint( LogChannel.GENERAL, KettleExtensionPoint.TransAfterOpen.id, transMeta ); transMeta.clearChanged(); } } else { displayExceptionMessage( BaseMessages.getString( PKG, "TrashBrowseController.UnableToRestoreDirectory", uiObj.getOriginalParentPath() + RepositoryDirectory.DIRECTORY_SEPARATOR + uiObj.getName() ) ); } } } deck.setSelectedIndex( 1 ); } catch ( Throwable th ) { if ( mainController == null || !mainController.handleLostRepository( th ) ) { displayExceptionMessage( BaseMessages.getString( PKG, "TrashBrowseController.UnableToRestoreFile", th.getLocalizedMessage() ) ); //$NON-NLS-1$ } } } else { // ui probably allowed the button to be enabled when it shouldn't have been enabled throw new RuntimeException(); } }
@Test public void testUnDeleteJob() throws Exception { testUnDelete( RepositoryObjectType.JOB.name(), true ); verify( trashServiceMock, times( 1 ) ).undelete( anyList() ); verify( transMetaMock, never() ).clearChanged(); verify( repositoryMock, never() ).loadTransformation( objectIdMock, null ); verify( deckMock, times( 1 ) ).setSelectedIndex( 1 ); }
public EBPFProfilingAnalyzation analyze(List<String> scheduleIdList, List<EBPFProfilingAnalyzeTimeRange> ranges, EBPFProfilingAnalyzeAggregateType aggregateType) throws IOException { EBPFProfilingAnalyzation analyzation = new EBPFProfilingAnalyzation(); // query data long queryDataMaxTimestamp = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(maxQueryTimeoutInSecond); final Stream<EBPFProfilingStack> stackStream = buildTimeRanges(ranges).parallelStream().map(r -> { try { return fetchDataThreadPool.submit(() -> getDataDAO().queryData(scheduleIdList, r.getMinTime(), r.getMaxTime())) .get(queryDataMaxTimestamp - System.currentTimeMillis(), TimeUnit.MILLISECONDS); } catch (Exception e) { log.warn(e.getMessage(), e); return Collections.<EBPFProfilingDataRecord>emptyList(); } }).flatMap(Collection::stream).map(e -> { try { return EBPFProfilingStack.deserialize(e, aggregateType); } catch (Exception ex) { log.warn("could not deserialize the stack", ex); return null; } }).filter(Objects::nonNull).distinct(); // analyze tree generateTrees(analyzation, stackStream); return analyzation; }
@Test public void testAnalyze() throws IOException { EBPFProfilingAnalyzerHolder holder = loadYaml("ebpf-profiling-data.yml", EBPFProfilingAnalyzerHolder.class); for (int c = 0; c < holder.getList().size(); c++) { try { holder.getList().get(c).analyzeAssert(); } catch (Error e) { throw new AssertionError("validate case " + c + " failure", e); } } }
@Override public String ping(RedisClusterNode node) { return execute(node, RedisCommands.PING); }
@Test public void testClusterPing() { RedisClusterNode master = getFirstMaster(); String res = connection.ping(master); assertThat(res).isEqualTo("PONG"); }