focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void sessionWillPassivate(HttpSessionEvent event) { if (!instanceEnabled) { return; } // pour getSessionCount SESSION_COUNT.decrementAndGet(); // pour invalidateAllSession removeSession(event.getSession()); }
@Test public void testSessionWillPassivate() { sessionListener.sessionDidActivate(createSessionEvent()); sessionListener.sessionWillPassivate(createSessionEvent()); if (SessionListener.getSessionCount() != 0) { fail("sessionWillPassivate"); } if (!SessionListener.getAllSessionsInformations().isEmpty()) { fail("sessionWillPassivate"); } }
public static int CCITT_FALSE(@NonNull final byte[] data, final int offset, final int length) { // Other implementation of the same algorithm: // int crc = 0xFFFF; // // for (int i = offset; i < offset + length && i < data.length; ++i) { // crc = (((crc & 0xFFFF) >> 8) | (crc << 8)); // crc ^= data[i]; // crc ^= (crc & 0xFF) >> 4; // crc ^= (crc << 8) << 4; // crc ^= ((crc & 0xFF) << 4) << 1; // } return CRC(0x1021, 0xFFFF, data, offset, length, false, false, 0x0000); }
@Test public void CCITT_FALSE_123456789() { final byte[] data = "123456789".getBytes(); assertEquals(0x29B1, CRC16.CCITT_FALSE(data, 0, 9)); }
@Internal public boolean isCompatible(Trigger other) { if (!getClass().equals(other.getClass())) { return false; } if (subTriggers == null) { return other.subTriggers == null; } else if (other.subTriggers == null) { return false; } else if (subTriggers.size() != other.subTriggers.size()) { return false; } for (int i = 0; i < subTriggers.size(); i++) { if (!subTriggers.get(i).isCompatible(other.subTriggers.get(i))) { return false; } } return true; }
@Test public void testIsCompatible() throws Exception { assertTrue(new Trigger1(null).isCompatible(new Trigger1(null))); assertTrue( new Trigger1(Arrays.asList(new Trigger2(null))) .isCompatible(new Trigger1(Arrays.asList(new Trigger2(null))))); assertFalse(new Trigger1(null).isCompatible(new Trigger2(null))); assertFalse( new Trigger1(Arrays.asList(new Trigger1(null))) .isCompatible(new Trigger1(Arrays.asList(new Trigger2(null))))); }
private void removeNote(Note note, AuthenticationInfo subject) throws IOException { LOGGER.info("Remove note: {}", note.getId()); // Set Remove to true to cancel saving this note note.setRemoved(true); noteManager.removeNote(note.getId(), subject); authorizationService.removeNoteAuth(note.getId()); fireNoteRemoveEvent(note, subject); }
@Test void testRemoveNote() throws IOException, InterruptedException { try { LOGGER.info("--------------- Test testRemoveNote ---------------"); // create a note and a paragraph String noteId = notebook.createNote("note1", anonymous); int mock1ProcessNum = interpreterSettingManager.getByName("mock1").getAllInterpreterGroups().size(); Paragraph p = notebook.processNote(noteId, note -> { return note.addNewParagraph(AuthenticationInfo.ANONYMOUS); }); Map<String, Object> config = new HashMap<>(); p.setConfig(config); p.setText("%mock1 sleep 100000"); p.execute(false); // wait until it is running while (!p.isRunning()) { try { Thread.sleep(1000); } catch (InterruptedException e) { e.printStackTrace(); } } assertEquals(mock1ProcessNum + 1, interpreterSettingManager.getByName("mock1").getAllInterpreterGroups().size()); LOGGER.info("--------------- Finish Test testRemoveNote ---------------"); notebook.removeNote(noteId, anonymous); // stop interpreter process is async, so we wait for 5 seconds here. Thread.sleep(5 * 1000); assertEquals(mock1ProcessNum, interpreterSettingManager.getByName("mock1").getAllInterpreterGroups().size()); LOGGER.info("--------------- Finish Test testRemoveNote ---------------"); } catch (Exception e) { e.printStackTrace(); } }
@PublicEvolving public static AIMDScalingStrategyBuilder builder(int rateThreshold) { return new AIMDScalingStrategyBuilder(rateThreshold); }
@Test void testInvalidIncreaseRate() { assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy( () -> AIMDScalingStrategy.builder(100) .setIncreaseRate(0) .setDecreaseFactor(0.5) .build()) .withMessageContaining("increaseRate must be positive integer."); }
@Override public final void collect(T record) { collect(record, TimestampAssigner.NO_TIMESTAMP); }
@Test void eventsAreBeforeWatermarks() { final CollectingDataOutput<Integer> dataOutput = new CollectingDataOutput<>(); final SourceOutputWithWatermarks<Integer> out = createWithSameOutputs( dataOutput, new RecordTimestampAssigner<>(), new TestWatermarkGenerator<>()); out.collect(42, 12345L); assertThat(dataOutput.events) .contains( new StreamRecord<>(42, 12345L), new org.apache.flink.streaming.api.watermark.Watermark(12345L)); }
private void overlay(Properties to, Properties from) { synchronized (from) { for (Entry<Object, Object> entry : from.entrySet()) { to.put(entry.getKey(), entry.getValue()); } } }
@Test public void testOverlay() throws IOException{ out=new BufferedWriter(new FileWriter(CONFIG)); startConfig(); appendProperty("a","b"); appendProperty("b","c"); appendProperty("d","e"); appendProperty("e","f", true); endConfig(); out=new BufferedWriter(new FileWriter(CONFIG2)); startConfig(); appendProperty("a","b"); appendProperty("b","d"); appendProperty("e","e"); endConfig(); Path fileResource = new Path(CONFIG); conf.addResource(fileResource); //set dynamically something conf.set("c","d"); conf.set("a","d"); Configuration clone=new Configuration(conf); clone.addResource(new Path(CONFIG2)); assertEquals(clone.get("a"), "d"); assertEquals(clone.get("b"), "d"); assertEquals(clone.get("c"), "d"); assertEquals(clone.get("d"), "e"); assertEquals(clone.get("e"), "f"); }
public GenericRecordBuilder set(String fieldName, Object value) { return set(schema().getField(fieldName), value); }
@Test void attemptToSetNonNullableFieldToNull() { assertThrows(org.apache.avro.AvroRuntimeException.class, () -> { new GenericRecordBuilder(recordSchema()).set("intField", null); }); }
@Override public void upgrade() { if (clusterConfigService.get(MigrationCompleted.class) != null) { LOG.debug("Migration already completed."); return; } final LegacyAWSPluginConfiguration legacyConfiguration = clusterConfigService.get( CLUSTER_CONFIG_TYPE, LegacyAWSPluginConfiguration.class ); if (legacyConfiguration != null && !Strings.isNullOrEmpty(legacyConfiguration.secretKey())) { final AWSPluginConfiguration migratedPluginConfiguration = AWSPluginConfiguration.fromLegacyConfig(legacyConfiguration, systemConfiguration); clusterConfigService.write(CLUSTER_CONFIG_TYPE, migratedPluginConfiguration); } clusterConfigService.write(MigrationCompleted.create()); }
@Test public void doesNotDoAnyThingForMissingPluginConfig() { mockExistingConfig(null); this.migration.upgrade(); verify(clusterConfigService, never()).write(anyString(), any()); verify(clusterConfigService, times(1)).write(any(V20200505121200_EncryptAWSSecretKey.MigrationCompleted.class)); }
public void enrichWorkflowDefinition(WorkflowDefinition workflowDefinition) { WorkflowDefinitionExtras enrichedWorkflowDefinition = new WorkflowDefinitionExtras(); enrichParams(workflowDefinition, enrichedWorkflowDefinition); enrichNextRunDate(workflowDefinition, enrichedWorkflowDefinition); workflowDefinition.setEnrichedExtras(enrichedWorkflowDefinition); }
@Test public void testEnrichWorkflowDefinitionParams() { Map<String, ParamDefinition> stepParams = Collections.singletonMap("sp1", ParamDefinition.buildParamDefinition("sp1", "sv1")); Map<String, ParamDefinition> workflowParams = Collections.singletonMap("wp1", ParamDefinition.buildParamDefinition("wp1", "wv1")); when(paramsManager.generateStaticStepParamDefs(any(), any(), any())).thenReturn(stepParams); when(paramsManager.generatedStaticWorkflowParamDefs(any())).thenReturn(workflowParams); workflowEnrichmentHelper.enrichWorkflowDefinition(definition); Assert.assertNotNull(definition.getEnrichedExtras()); Assert.assertNull(definition.getEnrichedExtras().getNextExecutionTime()); WorkflowDefinitionExtras enriched = definition.getEnrichedExtras(); Assert.assertEquals("wv1", enriched.getWorkflowParams().get("wp1").getValue()); Assert.assertEquals("sv1", enriched.getStepParams().get("job1").get("sp1").getValue()); }
public static long memorySize2Byte(final long memorySize, @MemoryConst.Unit final int unit) { if (memorySize < 0) return -1; return memorySize * unit; }
@Test public void memorySize2ByteInputNegativeZeroOutputNegative() { Assert.assertEquals( -1L, ConvertKit.memorySize2Byte(-9_223_372_036_854_775_807L, 0) ); }
public static void writeStringToFile(File file, String data, String encoding) throws IOException { try (OutputStream os = new FileOutputStream(file)) { os.write(data.getBytes(encoding)); os.flush(); } }
@Test public void testWriteStringToFile() throws IOException { File tempFile = new File(tempDir.toFile(), "testWriteStringToFile.txt"); String testString = "test string"; IoUtil.writeStringToFile(tempFile, testString, "UTF-8"); BufferedReader reader = new BufferedReader(new FileReader(tempFile)); String fileContent = reader.readLine(); reader.close(); Assert.assertEquals(testString, fileContent); }
@Override public boolean isLeader() { return isLeader; }
@Test void postsEventWhenLeaderChanges() { when(lockService.lock(any(), isNull())).thenReturn(Optional.of(mock(Lock.class))); leaderElectionService.startAsync().awaitRunning(); verify(eventBus, timeout(10_000)).post(any(LeaderChangedEvent.class)); assertThat(leaderElectionService.isLeader()).isTrue(); when(lockService.lock(any(), isNull())).thenReturn(Optional.empty()); verify(eventBus, timeout(10_000).times(2)).post(any(LeaderChangedEvent.class)); assertThat(leaderElectionService.isLeader()).isFalse(); }
public long getLinesOfCode(){ try (DbSession dbSession = dbClient.openSession(false)) { return dbClient.projectDao().getNclocSum(dbSession); } }
@Test public void should_return_metric_from_liveMeasureDao() { when(dbClient.projectDao().getNclocSum(any(DbSession.class))).thenReturn(1800999L); long linesOfCode = statisticsSupport.getLinesOfCode(); assertThat(linesOfCode).isEqualTo(1800999L); }
public static NameStep newBuilder() { return new CharacterSteps(); }
@Test void testBuildWeakWizard() { final var character = CharacterStepBuilder.newBuilder() .name("Merlin") .wizardClass("alchemist") .withSpell("poison") .noAbilities() .build(); assertEquals("Merlin", character.getName()); assertEquals("alchemist", character.getWizardClass()); assertEquals("poison", character.getSpell()); assertNull(character.getAbilities()); assertNotNull(character.toString()); }
public static String getFullElapsedTime(final long delta) { if (delta < Duration.ofSeconds(1).toMillis()) { return String.format("%d %s", delta, delta == 1 ? LocaleUtils.getLocalizedString("global.millisecond") : LocaleUtils.getLocalizedString("global.milliseconds")); } else if (delta < Duration.ofMinutes(1).toMillis()) { final long millis = delta % Duration.ofSeconds(1).toMillis(); final long seconds = delta / Duration.ofSeconds(1).toMillis(); final String secondsString = String.format("%d %s", seconds, seconds == 1 ? LocaleUtils.getLocalizedString("global.second") : LocaleUtils.getLocalizedString("global.seconds")); if (millis > 0) { return secondsString + ", " + getFullElapsedTime(millis); } else { return secondsString; } } else if (delta < Duration.ofHours(1).toMillis()) { final long millis = delta % Duration.ofMinutes(1).toMillis(); final long minutes = delta / Duration.ofMinutes(1).toMillis(); final String minutesString = String.format("%d %s", minutes, minutes == 1 ? LocaleUtils.getLocalizedString("global.minute") : LocaleUtils.getLocalizedString("global.minutes")); if (millis > 0) { return minutesString + ", " + getFullElapsedTime(millis); } else { return minutesString; } } else if (delta < Duration.ofDays(1).toMillis()) { final long millis = delta % Duration.ofHours(1).toMillis(); final long hours = delta / Duration.ofHours(1).toMillis(); final String daysString = String.format("%d %s", hours, hours == 1 ? LocaleUtils.getLocalizedString("global.hour") : LocaleUtils.getLocalizedString("global.hours")); if (millis > 0) { return daysString + ", " + getFullElapsedTime(millis); } else { return daysString; } } else { final long millis = delta % Duration.ofDays(1).toMillis(); final long days = delta / Duration.ofDays(1).toMillis(); final String daysString = String.format("%d %s", days, days == 1 ? LocaleUtils.getLocalizedString("global.day") : LocaleUtils.getLocalizedString("global.days")); if (millis > 0) { return daysString + ", " + getFullElapsedTime(millis); } else { return daysString; } } }
@Test public void testElapsedTimeInSeconds() throws Exception { assertThat(StringUtils.getFullElapsedTime(Duration.ofSeconds(1)), is("1 second")); assertThat(StringUtils.getFullElapsedTime(Duration.ofMillis(1001)), is("1 second, 1 ms")); assertThat(StringUtils.getFullElapsedTime(Duration.ofSeconds(30).plus(Duration.ofMillis(30))), is("30 seconds, 30 ms")); }
@VisibleForTesting @Nonnull Map<String, Object> prepareContextForPaginatedResponse(@Nonnull List<RuleDao> rules) { final Map<String, RuleDao> ruleTitleMap = rules .stream() .collect(Collectors.toMap(RuleDao::title, dao -> dao)); final Map<String, List<PipelineCompactSource>> result = new HashMap<>(); rules.forEach(r -> result.put(r.id(), new ArrayList<>())); pipelineServiceHelper.groupByRuleName( pipelineService::loadAll, ruleTitleMap.keySet()) .forEach((ruleTitle, pipelineDaos) -> { result.put( ruleTitleMap.get(ruleTitle).id(), pipelineDaos.stream() .map(dao -> PipelineCompactSource.builder() .id(dao.id()) .title(dao.title()) .build()) .toList() ); }); return Map.of("used_in_pipelines", result); }
@Test public void prepareContextForPaginatedResponse_returnsRuleUsageMapIfRulesUsedByPipelines() { final List<RuleDao> rules = List.of( ruleDao("rule-1", "Rule 1"), ruleDao("rule-2", "Rule 2"), ruleDao("rule-3", "Rule 3"), ruleDao("rule-4", "Rule 4") ); when(pipelineServiceHelper.groupByRuleName(any(), eq(ImmutableSet.of("Rule 1", "Rule 2", "Rule 3", "Rule 4")))) .thenReturn(Map.of( "Rule 1", List.of(pipelineDao("pipeline-1", "Pipeline 1")), "Rule 2", List.of(pipelineDao("pipeline-2", "Pipeline 2")), "Rule 3", List.of( pipelineDao("pipeline-1", "Pipeline 1"), pipelineDao("pipeline-2", "Pipeline 2"), pipelineDao("pipeline-3", "Pipeline 3") ), "Rule 4", List.of() )); assertThat(underTest.prepareContextForPaginatedResponse(rules)) .isEqualTo(Map.of("used_in_pipelines", Map.of( "rule-1", List.of(PipelineCompactSource.create("pipeline-1", "Pipeline 1")), "rule-2", List.of(PipelineCompactSource.create("pipeline-2", "Pipeline 2")), "rule-3", List.of( PipelineCompactSource.create("pipeline-1", "Pipeline 1"), PipelineCompactSource.create("pipeline-2", "Pipeline 2"), PipelineCompactSource.create("pipeline-3", "Pipeline 3") ), "rule-4", List.of() ))); }
public boolean tryEnableBinlog(Database db, long tableId, long binlogTtL, long binlogMaxSize) { // pass properties not binlogConfig is for // unify the logic of alter table manaul and alter table of mv trigger HashMap<String, String> properties = new HashMap<>(); properties.put(PropertyAnalyzer.PROPERTIES_BINLOG_ENABLE, "true"); if (!(binlogTtL == INVALID)) { properties.put(PropertyAnalyzer.PROPERTIES_BINLOG_TTL, String.valueOf(binlogTtL)); } if (!(binlogMaxSize == INVALID)) { properties.put(PropertyAnalyzer.PROPERTIES_BINLOG_MAX_SIZE, String.valueOf(binlogMaxSize)); } SchemaChangeHandler schemaChangeHandler = GlobalStateMgr.getCurrentState().getSchemaChangeHandler(); return schemaChangeHandler.updateBinlogConfigMeta(db, tableId, properties, TTabletMetaType.BINLOG_CONFIG); }
@Test public void testTryEnableBinlog() { Database db = GlobalStateMgr.getCurrentState().getDb("test"); OlapTable table = (OlapTable) db.getTable("binlog_test"); boolean result = binlogManager.tryEnableBinlog(db, table.getId(), 200L, -1L); Assert.assertTrue(result); Assert.assertEquals(1, table.getBinlogVersion()); Assert.assertEquals(200, table.getCurBinlogConfig().getBinlogTtlSecond()); Assert.assertEquals(100, table.getCurBinlogConfig().getBinlogMaxSize()); }
@Udf(description = "Converts the number of days since 1970-01-01 00:00:00 UTC/GMT to a date " + "string using the given format pattern. The format pattern should be in the format" + " expected by java.time.format.DateTimeFormatter") public String formatDate( @UdfParameter( description = "The date to convert") final Date date, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.time.format.DateTimeFormatter.") final String formatPattern) { if (date == null || formatPattern == null) { return null; } try { final DateTimeFormatter formatter = formatters.get(formatPattern); return LocalDate.ofEpochDay(TimeUnit.MILLISECONDS.toDays(date.getTime())).format(formatter); } catch (final ExecutionException | RuntimeException e) { throw new KsqlFunctionException("Failed to format date " + date + " with formatter '" + formatPattern + "': " + e.getMessage(), e); } }
@Test public void shouldThrowOnUnsupportedFields() { // When: final Exception e = assertThrows( KsqlFunctionException.class, () -> udf.formatDate(Date.valueOf("2014-11-09"), "yyyy-MM-dd HH:mm")); // Then: assertThat(e.getMessage(), is("Failed to format date 2014-11-09 with formatter 'yyyy-MM-dd HH:mm': Unsupported field: HourOfDay")); }
public static Object convertValue(final Object value, final Class<?> convertType) throws SQLFeatureNotSupportedException { ShardingSpherePreconditions.checkNotNull(convertType, () -> new SQLFeatureNotSupportedException("Type can not be null")); if (null == value) { return convertNullValue(convertType); } if (value.getClass() == convertType) { return value; } if (value instanceof LocalDateTime) { return convertLocalDateTimeValue((LocalDateTime) value, convertType); } if (value instanceof Timestamp) { return convertTimestampValue((Timestamp) value, convertType); } if (URL.class.equals(convertType)) { return convertURL(value); } if (value instanceof Number) { return convertNumberValue(value, convertType); } if (value instanceof Date) { return convertDateValue((Date) value, convertType); } if (value instanceof byte[]) { return convertByteArrayValue((byte[]) value, convertType); } if (boolean.class.equals(convertType)) { return convertBooleanValue(value); } if (String.class.equals(convertType)) { return value.toString(); } try { return convertType.cast(value); } catch (final ClassCastException ignored) { throw new SQLFeatureNotSupportedException("getObject with type"); } }
@Test void assertConvertNullValue() throws SQLException { assertFalse((boolean) ResultSetUtils.convertValue(null, boolean.class)); assertThat(ResultSetUtils.convertValue(null, byte.class), is((byte) 0)); assertThat(ResultSetUtils.convertValue(null, short.class), is((short) 0)); assertThat(ResultSetUtils.convertValue(null, int.class), is(0)); assertThat(ResultSetUtils.convertValue(null, long.class), is(0L)); assertThat(ResultSetUtils.convertValue(null, double.class), is(0.0D)); assertThat(ResultSetUtils.convertValue(null, float.class), is(0.0F)); assertThat(ResultSetUtils.convertValue(null, String.class), is((Object) null)); assertThat(ResultSetUtils.convertValue(null, Object.class), is((Object) null)); assertThat(ResultSetUtils.convertValue(null, BigDecimal.class), is((Object) null)); assertThat(ResultSetUtils.convertValue(null, Date.class), is((Object) null)); }
@Override public GrokPattern load(String patternId) throws NotFoundException { final GrokPattern pattern = dbCollection.findOneById(new ObjectId(patternId)); if (pattern == null) { throw new NotFoundException("Couldn't find Grok pattern with ID " + patternId); } return pattern; }
@Test public void loadNonExistentGrokPatternThrowsNotFoundException() { assertThatThrownBy(() -> service.load("cafebabe00000000deadbeef")) .isInstanceOf(NotFoundException.class); }
@Override public void set(V value) { get(setAsync(value)); }
@Test public void testTouch() { RBucket<String> bucket = redisson.getBucket("test"); bucket.set("someValue"); assertThat(bucket.touch()).isTrue(); RBucket<String> bucket2 = redisson.getBucket("test2"); assertThat(bucket2.touch()).isFalse(); }
@Override @SuppressWarnings("deprecation") public HttpRoute determineRoute(HttpHost target, HttpContext context) { if ( ! target.getSchemeName().equals("http") && ! target.getSchemeName().equals("https")) throw new IllegalArgumentException("Scheme must be 'http' or 'https' when using HttpToHttpsRoutePlanner, was '" + target.getSchemeName() + "'"); if (HttpClientContext.adapt(context).getRequestConfig().getProxy() != null) throw new IllegalArgumentException("Proxies are not supported with HttpToHttpsRoutePlanner"); int port = DefaultSchemePortResolver.INSTANCE.resolve(target); return new HttpRoute(new HttpHost("https", target.getAddress(), target.getHostName(), port)); }
@Test @SuppressWarnings("deprecation") void verifyProxyIsDisallowed() { HttpClientContext context = new HttpClientContext(); context.setRequestConfig(RequestConfig.custom().setProxy(new HttpHost("proxy")).build()); try { planner.determineRoute(new HttpHost("http", "host", 1), context); } catch (IllegalArgumentException e) { assertEquals("Proxies are not supported with HttpToHttpsRoutePlanner", e.getMessage()); } }
@Override public void validateJoinRequest(JoinMessage joinMessage) { // check joining member's major.minor version is same as current cluster version's major.minor numbers MemberVersion memberVersion = joinMessage.getMemberVersion(); Version clusterVersion = node.getClusterService().getClusterVersion(); if (!memberVersion.asVersion().equals(clusterVersion)) { String msg = "Joining node's version " + memberVersion + " is not compatible with cluster version " + clusterVersion; if (clusterVersion.getMajor() != memberVersion.getMajor()) { msg += " (Rolling Member Upgrades are only supported for the same major version)"; } if (clusterVersion.getMinor() > memberVersion.getMinor()) { msg += " (Rolling Member Upgrades are only supported for the next minor version)"; } if (!BuildInfoProvider.getBuildInfo().isEnterprise()) { msg += " (Rolling Member Upgrades are only supported in Hazelcast Enterprise)"; } throw new VersionMismatchException(msg); } }
@Test public void test_joinRequestFails_whenNextMinorVersion() { MemberVersion nextMinorVersion = MemberVersion.of(nodeVersion.getMajor(), nodeVersion.getMinor() + 1, nodeVersion.getPatch()); JoinRequest joinRequest = new JoinRequest(Packet.VERSION, buildNumber, nextMinorVersion, joinAddress, newUnsecureUUID(), false, null, null, null, null, null); assertThatThrownBy(() -> nodeExtension.validateJoinRequest(joinRequest)) .isInstanceOf(VersionMismatchException.class) .hasMessageContaining("Rolling Member Upgrades are only supported in Hazelcast Enterprise"); }
public static NamenodeRole convert(NamenodeRoleProto role) { switch (role) { case NAMENODE: return NamenodeRole.NAMENODE; case BACKUP: return NamenodeRole.BACKUP; case CHECKPOINT: return NamenodeRole.CHECKPOINT; } return null; }
@Test public void testAclStatusProto() { AclEntry e = new AclEntry.Builder().setName("test") .setPermission(FsAction.READ_EXECUTE).setScope(AclEntryScope.DEFAULT) .setType(AclEntryType.OTHER).build(); AclStatus s = new AclStatus.Builder().owner("foo").group("bar").addEntry(e) .build(); Assert.assertEquals(s, PBHelperClient.convert(PBHelperClient.convert(s))); }
@Override public void validate(final Analysis analysis) { try { RULES.forEach(rule -> rule.check(analysis)); } catch (final KsqlException e) { throw new KsqlException(e.getMessage() + PULL_QUERY_SYNTAX_HELP, e); } QueryValidatorUtil.validateNoUserColumnsWithSameNameAsPseudoColumns(analysis); }
@Test public void shouldThrowOnPullQueryThatIsWindowed() { // Given: when(analysis.getWindowExpression()).thenReturn(Optional.of(windowExpression)); // When: final Exception e = assertThrows( KsqlException.class, () -> validator.validate(analysis) ); // Then: assertThat(e.getMessage(), containsString("Pull queries don't support WINDOW clauses.")); }
public static PemAuthIdentity clusterOperator(Secret secret) { return new PemAuthIdentity(secret, "cluster-operator"); }
@Test public void testMissingSecret() { Exception e = assertThrows(NullPointerException.class, () -> PemAuthIdentity.clusterOperator(null)); assertThat(e.getMessage(), is("Cannot extract auth identity from null secret.")); }
public NearCachePreloaderConfig setStoreIntervalSeconds(int storeIntervalSeconds) { this.storeIntervalSeconds = checkPositive("storeIntervalSeconds", storeIntervalSeconds); return this; }
@Test(expected = IllegalArgumentException.class) public void setStoreIntervalSeconds_withNegative() { config.setStoreIntervalSeconds(-1); }
public boolean traced(String clientIp) { ConnectionControlRule connectionControlRule = ControlManagerCenter.getInstance().getConnectionControlManager() .getConnectionLimitRule(); return connectionControlRule != null && connectionControlRule.getMonitorIpList() != null && connectionControlRule.getMonitorIpList().contains(clientIp); }
@Test void testTraced() { assertFalse(connectionManager.traced(clientIp)); }
public void put(K key, V value) { if (value == null) { throw new IllegalArgumentException("Null values are disallowed"); } ValueAndTimestamp<V> oldValue = cache.put(key, new ValueAndTimestamp<>(value)); if (oldValue == null && cache.size() > cleanupThreshold) { doCleanup(); } }
@Test public void test_cleanup() { assertEntries(entry(1, 1), entry(2, 2), entry(3, 3)); put(4, 4); assertEntries(entry(3, 3), entry(4, 4)); }
static void valueMustBeValid(EvaluationContext ctx, Object value) { if (!(value instanceof BigDecimal) && !(value instanceof LocalDate)) { ctx.notifyEvt(() -> new ASTEventBase(FEELEvent.Severity.ERROR, Msg.createMessage(Msg.VALUE_X_NOT_A_VALID_ENDPOINT_FOR_RANGE_BECAUSE_NOT_A_NUMBER_NOT_A_DATE, value), null)); throw new EndpointOfRangeNotValidTypeException(); } }
@Test void valueMustBeValidFalseTest() { try { valueMustBeValid(ctx, "INVALID"); } catch (Exception e) { assertTrue(e instanceof EndpointOfRangeNotValidTypeException); final ArgumentCaptor<FEELEvent> captor = ArgumentCaptor.forClass(FEELEvent.class); verify(listener, times(1)).onEvent(captor.capture()); } }
public boolean checkAndRefresh() { FileTime newLastModifiedTime = updateLastModifiedTime(); if (newLastModifiedTime != null && !newLastModifiedTime.equals(lastModifiedTime)) { this.lastModifiedTime = newLastModifiedTime; return true; } return false; }
@Test(dataProvider = "files") public void testFileModified(String fileName) throws IOException, InterruptedException { Path path = Paths.get(fileName); createFile(path); FileModifiedTimeUpdater fileModifiedTimeUpdater = new FileModifiedTimeUpdater(fileName); Thread.sleep(2000); Files.setLastModifiedTime(path, FileTime.fromMillis(System.currentTimeMillis())); FileTime fileTime = fileModifiedTimeUpdater.getLastModifiedTime(); Assert.assertTrue(fileModifiedTimeUpdater.checkAndRefresh()); Assert.assertNotEquals(fileTime, fileModifiedTimeUpdater.getLastModifiedTime()); }
@SuppressWarnings({"checkstyle:npathcomplexity", "checkstyle:cyclomaticcomplexity", "checkstyle:methodlength"}) void planMigrations(int partitionId, PartitionReplica[] oldReplicas, PartitionReplica[] newReplicas, MigrationDecisionCallback callback) { assert oldReplicas.length == newReplicas.length : "Replica addresses with different lengths! Old: " + Arrays.toString(oldReplicas) + ", New: " + Arrays.toString(newReplicas); if (logger.isFinestEnabled()) { logger.finest("partitionId=%d, Initial state: %s", partitionId, Arrays.toString(oldReplicas)); logger.finest("partitionId=%d, Final state: %s", partitionId, Arrays.toString(newReplicas)); } initState(oldReplicas); assertNoDuplicate(partitionId, oldReplicas, newReplicas); // fix cyclic partition replica movements if (fixCycle(oldReplicas, newReplicas)) { if (logger.isFinestEnabled()) { logger.finest("partitionId=%d, Final state (after cycle fix): %s", partitionId, Arrays.toString(newReplicas)); } } int currentIndex = 0; while (currentIndex < oldReplicas.length) { if (logger.isFinestEnabled()) { logger.finest("partitionId=%d, Current index: %d, state: %s", partitionId, currentIndex, Arrays.toString(state)); } assertNoDuplicate(partitionId, oldReplicas, newReplicas); if (newReplicas[currentIndex] == null) { if (state[currentIndex] != null) { // replica owner is removed and no one will own this replica logger.finest("partitionId=%d, New address is null at index: %d", partitionId, currentIndex); callback.migrate(state[currentIndex], currentIndex, -1, null, -1, -1); state[currentIndex] = null; } currentIndex++; continue; } if (state[currentIndex] == null) { int i = getReplicaIndex(state, newReplicas[currentIndex]); if (i == -1) { // fresh replica copy is needed, so COPY replica to newReplicas[currentIndex] from partition owner logger.finest("partitionId=%d, COPY %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex); callback.migrate(null, -1, -1, newReplicas[currentIndex], -1, currentIndex); state[currentIndex] = newReplicas[currentIndex]; currentIndex++; continue; } if (i > currentIndex) { // SHIFT UP replica from i to currentIndex, copy data from partition owner logger.finest("partitionId=%d, SHIFT UP-2 %s from old addresses index: %d to index: %d", partitionId, state[i], i, currentIndex); callback.migrate(null, -1, -1, state[i], i, currentIndex); state[currentIndex] = state[i]; state[i] = null; continue; } throw new AssertionError("partitionId=" + partitionId + "Migration decision algorithm failed during SHIFT UP! INITIAL: " + Arrays.toString(oldReplicas) + ", CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas)); } if (newReplicas[currentIndex].equals(state[currentIndex])) { // no change, no action needed currentIndex++; continue; } if (getReplicaIndex(newReplicas, state[currentIndex]) == -1 && getReplicaIndex(state, newReplicas[currentIndex]) == -1) { // MOVE partition replica from its old owner to new owner logger.finest("partitionId=%d, MOVE %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex); callback.migrate(state[currentIndex], currentIndex, -1, newReplicas[currentIndex], -1, currentIndex); state[currentIndex] = newReplicas[currentIndex]; currentIndex++; continue; } if (getReplicaIndex(state, newReplicas[currentIndex]) == -1) { int newIndex = getReplicaIndex(newReplicas, state[currentIndex]); assert newIndex > currentIndex : "partitionId=" + partitionId + ", Migration decision algorithm failed during SHIFT DOWN! INITIAL: " + Arrays.toString(oldReplicas) + ", CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas); if (state[newIndex] == null) { // it is a SHIFT DOWN logger.finest("partitionId=%d, SHIFT DOWN %s to index: %d, COPY %s to index: %d", partitionId, state[currentIndex], newIndex, newReplicas[currentIndex], currentIndex); callback.migrate(state[currentIndex], currentIndex, newIndex, newReplicas[currentIndex], -1, currentIndex); state[newIndex] = state[currentIndex]; } else { logger.finest("partitionId=%d, MOVE-3 %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex); callback.migrate(state[currentIndex], currentIndex, -1, newReplicas[currentIndex], -1, currentIndex); } state[currentIndex] = newReplicas[currentIndex]; currentIndex++; continue; } planMigrations(partitionId, oldReplicas, newReplicas, callback, currentIndex); } assert Arrays.equals(state, newReplicas) : "partitionId=" + partitionId + ", Migration decisions failed! INITIAL: " + Arrays.toString(oldReplicas) + " CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas); }
@Test public void test_SHIFT_UPS_performedBy_MOVE() throws UnknownHostException { final PartitionReplica[] oldReplicas = { new PartitionReplica(new Address("localhost", 5701), uuids[0]), new PartitionReplica(new Address("localhost", 5702), uuids[1]), new PartitionReplica(new Address("localhost", 5703), uuids[2]), new PartitionReplica(new Address("localhost", 5704), uuids[3]), null, null, null, }; final PartitionReplica[] newReplicas = { new PartitionReplica(new Address("localhost", 5701), uuids[0]), new PartitionReplica(new Address("localhost", 5703), uuids[2]), new PartitionReplica(new Address("localhost", 5704), uuids[3]), new PartitionReplica(new Address("localhost", 5705), uuids[4]), null, null, null, }; migrationPlanner.planMigrations(0, oldReplicas, newReplicas, callback); verify(callback).migrate(new PartitionReplica(new Address("localhost", 5704), uuids[3]), 3, -1, new PartitionReplica(new Address("localhost", 5705), uuids[4]), -1, 3); verify(callback).migrate(new PartitionReplica(new Address("localhost", 5703), uuids[2]), 2, -1, new PartitionReplica(new Address("localhost", 5704), uuids[3]), -1, 2); verify(callback).migrate(new PartitionReplica(new Address("localhost", 5702), uuids[1]), 1, -1, new PartitionReplica(new Address("localhost", 5703), uuids[2]), -1, 1); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { final SDSApiClient client = session.getClient(); final DownloadTokenGenerateResponse token = new NodesApi(session.getClient()).generateDownloadUrl(Long.valueOf(nodeid.getVersionId(file)), StringUtils.EMPTY); final HttpUriRequest request = new HttpGet(token.getDownloadUrl()); request.addHeader("X-Sds-Auth-Token", StringUtils.EMPTY); if(status.isAppend()) { final HttpRange range = HttpRange.withStatus(status); final String header; if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) { header = String.format("bytes=%d-", range.getStart()); } else { header = String.format("bytes=%d-%d", range.getStart(), range.getEnd()); } if(log.isDebugEnabled()) { log.debug(String.format("Add range header %s for file %s", header, file)); } request.addHeader(new BasicHeader(HttpHeaders.RANGE, header)); // Disable compression request.addHeader(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, "identity")); } final HttpResponse response = client.getClient().execute(request); switch(response.getStatusLine().getStatusCode()) { case HttpStatus.SC_OK: case HttpStatus.SC_PARTIAL_CONTENT: return new HttpMethodReleaseInputStream(response, status); case HttpStatus.SC_NOT_FOUND: nodeid.cache(file, null); // Break through default: throw new DefaultHttpResponseExceptionMappingService().map("Download {0} failed", new HttpResponseException( response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()), file); } } catch(ApiException e) { throw new SDSExceptionMappingService(nodeid).map("Download {0} failed", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Download {0} failed", e, file); } }
@Test public void testReadCloseReleaseEntity() throws Exception { final TransferStatus status = new TransferStatus(); final byte[] content = RandomUtils.nextBytes(32769); final TransferStatus writeStatus = new TransferStatus(); writeStatus.setLength(content.length); final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session); final Path room = new SDSDirectoryFeature(session, nodeid).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path test = new Path(room, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final SDSDirectS3MultipartWriteFeature writer = new SDSDirectS3MultipartWriteFeature(session, nodeid); final HttpResponseOutputStream<Node> out = writer.write(test, writeStatus, new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(writeStatus, writeStatus).transfer(new ByteArrayInputStream(content), out); final CountingInputStream in = new CountingInputStream(new SDSReadFeature(session, nodeid).read(test, status, new DisabledConnectionCallback())); in.close(); assertEquals(0L, in.getByteCount(), 0L); new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
NewExternalIssue mapResult(String driverName, @Nullable Result.Level ruleSeverity, @Nullable Result.Level ruleSeverityForNewTaxonomy, Result result) { NewExternalIssue newExternalIssue = sensorContext.newExternalIssue(); newExternalIssue.type(DEFAULT_TYPE); newExternalIssue.engineId(driverName); newExternalIssue.severity(toSonarQubeSeverity(ruleSeverity)); newExternalIssue.ruleId(requireNonNull(result.getRuleId(), "No ruleId found for issue thrown by driver " + driverName)); newExternalIssue.cleanCodeAttribute(DEFAULT_CLEAN_CODE_ATTRIBUTE); newExternalIssue.addImpact(DEFAULT_SOFTWARE_QUALITY, toSonarQubeImpactSeverity(ruleSeverityForNewTaxonomy)); mapLocations(result, newExternalIssue); return newExternalIssue; }
@Test public void mapResult_useResultMessageForIssue() { Location location = new Location(); result.withLocations(List.of(location)); resultMapper.mapResult(DRIVER_NAME, WARNING, WARNING, result); verify(newExternalIssueLocation).message("Result message"); }
@Override public PMML_MODEL getPMMLModelType() { logger.trace("getPMMLModelType"); return PMML_MODEL.SCORECARD_MODEL; }
@Test void getPMMLModelType() { assertThat(PROVIDER.getPMMLModelType()).isEqualTo(PMML_MODEL.SCORECARD_MODEL); }
public EnvironmentVariableContext initialEnvironmentVariableContext() { return initialContext; }
@Test void shouldSetUpGoGeneratedEnvironmentContextCorrectly() throws Exception { BuildAssignment buildAssigment = createAssignment(null); EnvironmentVariableContext environmentVariableContext = buildAssigment.initialEnvironmentVariableContext(); assertThat(environmentVariableContext.getProperty("GO_REVISION")).isEqualTo("3"); assertThat(environmentVariableContext.getProperty("GO_PIPELINE_NAME")).isEqualTo(PIPELINE_NAME); assertThat(environmentVariableContext.getProperty("GO_PIPELINE_LABEL")).isEqualTo("1"); assertThat(environmentVariableContext.getProperty("GO_STAGE_NAME")).isEqualTo(STAGE_NAME); assertThat(environmentVariableContext.getProperty("GO_STAGE_COUNTER")).isEqualTo("1"); assertThat(environmentVariableContext.getProperty("GO_JOB_NAME")).isEqualTo(JOB_NAME); assertThat(environmentVariableContext.getProperty("GO_TRIGGER_USER")).isEqualTo(TRIGGERED_BY_USER); }
public MemoryLRUCacheBytesIterator reverseRange(final String namespace, final Bytes from, final Bytes to) { final NamedCache cache = getCache(namespace); if (cache == null) { return new MemoryLRUCacheBytesIterator(Collections.emptyIterator(), new NamedCache(namespace, this.metrics)); } return new MemoryLRUCacheBytesIterator(cache.reverseKeyRange(from, to), cache); }
@Test public void shouldReturnFalseIfNoNextKeyReverseRange() { final ThreadCache cache = setupThreadCache(-1, 0, 10000L, true); final ThreadCache.MemoryLRUCacheBytesIterator iterator = cache.reverseRange(namespace, Bytes.wrap(new byte[]{0}), Bytes.wrap(new byte[]{1})); assertFalse(iterator.hasNext()); }
@Override public void purgeFlowRules(DeviceId deviceId) { checkPermission(FLOWRULE_WRITE); checkNotNull(deviceId, DEVICE_ID_NULL); store.purgeFlowRule(deviceId); }
@Test public void purgeFlowRules() { FlowRule f1 = addFlowRule(1); FlowRule f2 = addFlowRule(2); FlowRule f3 = addFlowRule(3); assertEquals("3 rules should exist", 3, flowCount()); FlowEntry fe1 = new DefaultFlowEntry(f1); FlowEntry fe2 = new DefaultFlowEntry(f2); FlowEntry fe3 = new DefaultFlowEntry(f3); providerService.pushFlowMetrics(DID, ImmutableList.of(fe1, fe2, fe3)); validateEvents(RULE_ADD_REQUESTED, RULE_ADD_REQUESTED, RULE_ADD_REQUESTED, RULE_ADDED, RULE_ADDED, RULE_ADDED); mgr.purgeFlowRules(DID); assertEquals("0 rule should exist", 0, flowCount()); }
public JerseyClientBuilder using(JerseyClientConfiguration configuration) { this.configuration = configuration; apacheHttpClientBuilder.using(configuration); return this; }
@Test void usesACustomHttpClientMetricNameStrategy() { final HttpClientMetricNameStrategy customStrategy = HttpClientMetricNameStrategies.HOST_AND_METHOD; builder.using(customStrategy); verify(apacheHttpClientBuilder).using(customStrategy); }
@Override public Class<SingleRuleConfiguration> getType() { return SingleRuleConfiguration.class; }
@Test void assertGetType() { SingleRuleConfigurationToDistSQLConverter singleRuleConfigurationToDistSQLConverter = new SingleRuleConfigurationToDistSQLConverter(); assertThat(singleRuleConfigurationToDistSQLConverter.getType().getName(), is("org.apache.shardingsphere.single.config.SingleRuleConfiguration")); }
int getDefaultCollationStrength() { return getDefaultCollationStrength( Locale.getDefault() ); }
@Test public void testGetDefaultStrength() { SortRowsMeta srm = new SortRowsMeta(); int usStrength = srm.getDefaultCollationStrength( Locale.US ); assertEquals( Collator.TERTIARY, usStrength ); assertEquals( Collator.IDENTICAL, srm.getDefaultCollationStrength( null ) ); }
public static Status unblock( final UnsafeBuffer logMetaDataBuffer, final UnsafeBuffer termBuffer, final int blockedOffset, final int tailOffset, final int termId) { Status status = NO_ACTION; int frameLength = frameLengthVolatile(termBuffer, blockedOffset); if (frameLength < 0) { resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, -frameLength); status = UNBLOCKED; } else if (0 == frameLength) { int currentOffset = blockedOffset + FRAME_ALIGNMENT; while (currentOffset < tailOffset) { frameLength = frameLengthVolatile(termBuffer, currentOffset); if (frameLength != 0) { if (scanBackToConfirmZeroed(termBuffer, currentOffset, blockedOffset)) { final int length = currentOffset - blockedOffset; resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, length); status = UNBLOCKED; } break; } currentOffset += FRAME_ALIGNMENT; } if (currentOffset == termBuffer.capacity()) { if (0 == frameLengthVolatile(termBuffer, blockedOffset)) { final int length = currentOffset - blockedOffset; resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, length); status = UNBLOCKED_TO_END; } } } return status; }
@Test void shouldTakeNoActionWhenNoUnblockedMessage() { final int termOffset = 0; final int tailOffset = TERM_BUFFER_CAPACITY / 2; assertEquals( NO_ACTION, TermUnblocker.unblock(mockLogMetaDataBuffer, mockTermBuffer, termOffset, tailOffset, TERM_ID)); }
@Override public ChannelFuture writePing(ChannelHandlerContext ctx, boolean ack, long data, ChannelPromise promise) { // Only apply the limit to ping acks. if (ack) { ChannelPromise newPromise = handleOutstandingControlFrames(ctx, promise); if (newPromise == null) { return promise; } return super.writePing(ctx, ack, data, newPromise); } return super.writePing(ctx, ack, data, promise); }
@Test public void testNotLimitPing() { assertTrue(encoder.writePing(ctx, false, 8, newPromise()).isSuccess()); assertTrue(encoder.writePing(ctx, false, 8, newPromise()).isSuccess()); assertTrue(encoder.writePing(ctx, false, 8, newPromise()).isSuccess()); assertTrue(encoder.writePing(ctx, false, 8, newPromise()).isSuccess()); verifyFlushAndClose(0, false); }
public void clearAllParagraphOutput() { for (Paragraph p : paragraphs) { p.setReturn(null, null); } }
@Test void clearAllParagraphOutputTest() throws InterpreterNotFoundException { Note note = new Note("test", "", interpreterFactory, interpreterSettingManager, paragraphJobListener, credentials, noteEventListener, zConf, noteParser); Paragraph p1 = note.addNewParagraph(AuthenticationInfo.ANONYMOUS); InterpreterResult result = new InterpreterResult(InterpreterResult.Code.SUCCESS, InterpreterResult.Type.TEXT, "result"); p1.setResult(result); Paragraph p2 = note.addNewParagraph(AuthenticationInfo.ANONYMOUS); p2.setReturn(result, new Throwable()); note.clearAllParagraphOutput(); assertNull(p1.getReturn()); assertNull(p2.getReturn()); }
@ExecuteOn(TaskExecutors.IO) @Get(uri = "/{executionId}/flow") @Operation(tags = {"Executions"}, summary = "Get flow information's for an execution") public FlowForExecution getFlowForExecutionById( @Parameter(description = "The execution that you want flow information's") String executionId ) { Execution execution = executionRepository.findById(tenantService.resolveTenant(), executionId).orElseThrow(); return FlowForExecution.of(flowRepository.findByExecutionWithoutAcl(execution)); }
@SuppressWarnings("DataFlowIssue") @Test void getFlowForExecutionById() { Execution execution = client.toBlocking().retrieve( HttpRequest .POST( "/api/v1/executions/webhook/" + TESTS_FLOW_NS + "/webhook/" + TESTS_WEBHOOK_KEY + "?name=john&age=12&age=13", ImmutableMap.of("a", 1, "b", true) ), Execution.class ); FlowForExecution result = client.toBlocking().retrieve( GET("/api/v1/executions/" + execution.getId() + "/flow"), FlowForExecution.class ); assertThat(result.getId(), is(execution.getFlowId())); assertThat(result.getTriggers(), hasSize(1)); assertThat((result.getTriggers().getFirst() instanceof AbstractTriggerForExecution), is(true)); }
@Override public Map<String, String> getAddresses() { AwsCredentials credentials = awsCredentialsProvider.credentials(); Map<String, String> instances = Collections.emptyMap(); if (!awsConfig.anyOfEcsPropertiesConfigured()) { instances = awsEc2Api.describeInstances(credentials); } if (awsConfig.anyOfEc2PropertiesConfigured()) { return instances; } if (instances.isEmpty() && DiscoveryMode.Client == awsConfig.getDiscoveryMode()) { return getEcsAddresses(credentials); } return instances; }
@Test public void doNotGetEcsAddressesWhenEc2Configured() { AwsCredentials credentials = AwsCredentials.builder() .setAccessKey("access-key") .setSecretKey("secret-key") .setToken("token") .build(); AwsConfig awsConfig = AwsConfig.builder() .setDiscoveryMode(DiscoveryMode.Client) .setSecurityGroupName("my-security-group") .build(); awsEc2Client = new AwsEc2Client(awsEc2Api, awsEcsApi, awsMetadataApi, awsCredentialsProvider, awsConfig); given(awsCredentialsProvider.credentials()).willReturn(credentials); given(awsEc2Api.describeInstances(credentials)).willReturn(emptyMap()); // when Map<String, String> result = awsEc2Client.getAddresses(); // then then(awsEcsApi).should(never()).listTaskPrivateAddresses("CLUSTER", credentials); then(awsEc2Api).should(never()).describeNetworkInterfaces(anyList(), any(AwsCredentials.class)); assertEquals(emptyMap(), result); }
static Version parseVersion(String versionString) { final StringTokenizer st = new StringTokenizer(versionString, "."); int majorVersion = Integer.parseInt(st.nextToken()); int minorVersion; if (st.hasMoreTokens()) minorVersion = Integer.parseInt(st.nextToken()); else minorVersion = 0; return new Version(majorVersion, minorVersion); }
@Test public void testJavaVersion() { Java.Version v = Java.parseVersion("9"); assertEquals(9, v.majorVersion); assertEquals(0, v.minorVersion); assertTrue(v.isJava9Compatible()); v = Java.parseVersion("9.0.1"); assertEquals(9, v.majorVersion); assertEquals(0, v.minorVersion); assertTrue(v.isJava9Compatible()); v = Java.parseVersion("9.0.0.15"); // Azul Zulu assertEquals(9, v.majorVersion); assertEquals(0, v.minorVersion); assertTrue(v.isJava9Compatible()); v = Java.parseVersion("9.1"); assertEquals(9, v.majorVersion); assertEquals(1, v.minorVersion); assertTrue(v.isJava9Compatible()); v = Java.parseVersion("1.8.0_152"); assertEquals(1, v.majorVersion); assertEquals(8, v.minorVersion); assertFalse(v.isJava9Compatible()); v = Java.parseVersion("1.7.0_80"); assertEquals(1, v.majorVersion); assertEquals(7, v.minorVersion); assertFalse(v.isJava9Compatible()); }
@Override public String toString(final RouteUnit routeUnit) { Map<String, String> logicAndActualTables = getLogicAndActualTables(routeUnit); StringBuilder result = new StringBuilder(); int index = 0; for (Projection each : projections) { if (index > 0) { result.append(COLUMN_NAME_SPLITTER); } result.append(getColumnExpression(each, logicAndActualTables)); index++; } return result.toString(); }
@Test void assertToStringWithOwnerQuote() { Collection<Projection> projectionsWithOwnerQuote = Collections.singletonList(new ColumnProjection(new IdentifierValue("temp", QuoteCharacter.BACK_QUOTE), new IdentifierValue("id", QuoteCharacter.BACK_QUOTE), new IdentifierValue("id", QuoteCharacter.BACK_QUOTE), mock(DatabaseType.class))); assertThat(new SubstitutableColumnNameToken(0, 1, projectionsWithOwnerQuote, TypedSPILoader.getService(DatabaseType.class, "MySQL")).toString(mock(RouteUnit.class)), is("`temp`.`id` AS `id`")); Collection<Projection> projectionsWithoutOwnerQuote = Collections.singletonList(new ColumnProjection(new IdentifierValue("temp", QuoteCharacter.NONE), new IdentifierValue("id", QuoteCharacter.BACK_QUOTE), new IdentifierValue("id", QuoteCharacter.BACK_QUOTE), mock(DatabaseType.class))); assertThat(new SubstitutableColumnNameToken(0, 1, projectionsWithoutOwnerQuote, TypedSPILoader.getService(DatabaseType.class, "MySQL")).toString(mock(RouteUnit.class)), is("temp.`id` AS `id`")); }
@Override public Stream<HoodieInstant> getCandidateInstants(HoodieTableMetaClient metaClient, HoodieInstant currentInstant, Option<HoodieInstant> lastSuccessfulInstant) { HoodieActiveTimeline activeTimeline = metaClient.getActiveTimeline(); // To find which instants are conflicting, we apply the following logic // 1. Get completed instants timeline only for commits that have happened since the last successful write. // 2. Get any scheduled or completed compaction or clustering operations that have started and/or finished // after the current instant. We need to check for write conflicts since they may have mutated the same files // that are being newly created by the current write. Stream<HoodieInstant> completedCommitsInstantStream = activeTimeline .getCommitsTimeline() .filterCompletedInstants() .findInstantsAfter(lastSuccessfulInstant.isPresent() ? lastSuccessfulInstant.get().getTimestamp() : HoodieTimeline.INIT_INSTANT_TS) .getInstantsAsStream(); Stream<HoodieInstant> compactionAndClusteringPendingTimeline = activeTimeline .filterPendingReplaceClusteringAndCompactionTimeline() .filter(instant -> ClusteringUtils.isClusteringInstant(activeTimeline, instant) || HoodieTimeline.COMPACTION_ACTION.equals(instant.getAction())) .findInstantsAfter(currentInstant.getTimestamp()) .getInstantsAsStream(); return Stream.concat(completedCommitsInstantStream, compactionAndClusteringPendingTimeline); }
@Test public void tstConcurrentWritesWithPendingInsertOverwriteReplace() throws Exception { createCommit(metaClient.createNewInstantTime(), metaClient); HoodieActiveTimeline timeline = metaClient.getActiveTimeline(); // consider commits before this are all successful Option<HoodieInstant> lastSuccessfulInstant = timeline.getCommitsTimeline().filterCompletedInstants().lastInstant(); // writer 1 starts String currentWriterInstant = metaClient.createNewInstantTime(); createInflightCommit(currentWriterInstant, metaClient); // insert_overwrite 1 gets scheduled and inflighted String newInstantTime = metaClient.createNewInstantTime(); createPendingInsertOverwrite(newInstantTime, WriteOperationType.INSERT_OVERWRITE, metaClient); Option<HoodieInstant> currentInstant = Option.of(new HoodieInstant(State.INFLIGHT, HoodieTimeline.COMMIT_ACTION, currentWriterInstant)); SimpleConcurrentFileWritesConflictResolutionStrategy strategy = new SimpleConcurrentFileWritesConflictResolutionStrategy(); HoodieCommitMetadata currentMetadata = createCommitMetadata(currentWriterInstant); metaClient.reloadActiveTimeline(); List<HoodieInstant> candidateInstants = strategy.getCandidateInstants(metaClient, currentInstant.get(), lastSuccessfulInstant).collect( Collectors.toList()); // writer 1 will not conflicts with insert_overwrite 1 Assertions.assertTrue(candidateInstants.size() == 0); }
public static DataflowRunner fromOptions(PipelineOptions options) { DataflowPipelineOptions dataflowOptions = PipelineOptionsValidator.validate(DataflowPipelineOptions.class, options); ArrayList<String> missing = new ArrayList<>(); if (dataflowOptions.getAppName() == null) { missing.add("appName"); } if (Strings.isNullOrEmpty(dataflowOptions.getRegion()) && isServiceEndpoint(dataflowOptions.getDataflowEndpoint())) { missing.add("region"); } if (missing.size() > 0) { throw new IllegalArgumentException( "Missing required pipeline options: " + Joiner.on(',').join(missing)); } validateWorkerSettings( PipelineOptionsValidator.validate(DataflowPipelineWorkerPoolOptions.class, options)); PathValidator validator = dataflowOptions.getPathValidator(); String gcpTempLocation; try { gcpTempLocation = dataflowOptions.getGcpTempLocation(); } catch (Exception e) { throw new IllegalArgumentException( "DataflowRunner requires gcpTempLocation, " + "but failed to retrieve a value from PipelineOptions", e); } validator.validateOutputFilePrefixSupported(gcpTempLocation); String stagingLocation; try { stagingLocation = dataflowOptions.getStagingLocation(); } catch (Exception e) { throw new IllegalArgumentException( "DataflowRunner requires stagingLocation, " + "but failed to retrieve a value from PipelineOptions", e); } validator.validateOutputFilePrefixSupported(stagingLocation); if (!isNullOrEmpty(dataflowOptions.getSaveProfilesToGcs())) { validator.validateOutputFilePrefixSupported(dataflowOptions.getSaveProfilesToGcs()); } if (dataflowOptions.getFilesToStage() != null) { // The user specifically requested these files, so fail now if they do not exist. // (automatically detected classpath elements are permitted to not exist, so later // staging will not fail on nonexistent files) dataflowOptions.getFilesToStage().stream() .forEach( stagedFileSpec -> { File localFile; if (stagedFileSpec.contains("=")) { String[] components = stagedFileSpec.split("=", 2); localFile = new File(components[1]); } else { localFile = new File(stagedFileSpec); } if (!localFile.exists()) { // should be FileNotFoundException, but for build-time backwards compatibility // cannot add checked exception throw new RuntimeException( String.format("Non-existent files specified in filesToStage: %s", localFile)); } }); } else { dataflowOptions.setFilesToStage( detectClassPathResourcesToStage(DataflowRunner.class.getClassLoader(), options)); if (dataflowOptions.getFilesToStage().isEmpty()) { throw new IllegalArgumentException("No files to stage has been found."); } else { LOG.info( "PipelineOptions.filesToStage was not specified. " + "Defaulting to files from the classpath: will stage {} files. " + "Enable logging at DEBUG level to see which files will be staged.", dataflowOptions.getFilesToStage().size()); LOG.debug("Classpath elements: {}", dataflowOptions.getFilesToStage()); } } // Verify jobName according to service requirements, truncating converting to lowercase if // necessary. String jobName = dataflowOptions.getJobName().toLowerCase(); checkArgument( jobName.matches("[a-z]([-a-z0-9]*[a-z0-9])?"), "JobName invalid; the name must consist of only the characters " + "[-a-z0-9], starting with a letter and ending with a letter " + "or number"); if (!jobName.equals(dataflowOptions.getJobName())) { LOG.info( "PipelineOptions.jobName did not match the service requirements. " + "Using {} instead of {}.", jobName, dataflowOptions.getJobName()); } dataflowOptions.setJobName(jobName); // Verify project String project = dataflowOptions.getProject(); if (project.matches("[0-9]*")) { throw new IllegalArgumentException( "Project ID '" + project + "' invalid. Please make sure you specified the Project ID, not project number."); } else if (!project.matches(PROJECT_ID_REGEXP)) { throw new IllegalArgumentException( "Project ID '" + project + "' invalid. Please make sure you specified the Project ID, not project" + " description."); } DataflowPipelineDebugOptions debugOptions = dataflowOptions.as(DataflowPipelineDebugOptions.class); // Verify the number of worker threads is a valid value if (debugOptions.getNumberOfWorkerHarnessThreads() < 0) { throw new IllegalArgumentException( "Number of worker harness threads '" + debugOptions.getNumberOfWorkerHarnessThreads() + "' invalid. Please make sure the value is non-negative."); } // Verify that if recordJfrOnGcThrashing is set, the pipeline is at least on java 11 if (dataflowOptions.getRecordJfrOnGcThrashing() && Environments.getJavaVersion() == Environments.JavaVersion.java8) { throw new IllegalArgumentException( "recordJfrOnGcThrashing is only supported on java 9 and up."); } if (dataflowOptions.isStreaming() && dataflowOptions.getGcsUploadBufferSizeBytes() == null) { dataflowOptions.setGcsUploadBufferSizeBytes(GCS_UPLOAD_BUFFER_SIZE_BYTES_DEFAULT); } // Adding the Java version to the SDK name for user's and support convenience. String agentJavaVer = "(JRE 8 environment)"; if (Environments.getJavaVersion() != Environments.JavaVersion.java8) { agentJavaVer = String.format("(JRE %s environment)", Environments.getJavaVersion().specification()); } DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo(); String userAgentName = dataflowRunnerInfo.getName(); Preconditions.checkArgument( !userAgentName.equals(""), "Dataflow runner's `name` property cannot be empty."); String userAgentVersion = dataflowRunnerInfo.getVersion(); Preconditions.checkArgument( !userAgentVersion.equals(""), "Dataflow runner's `version` property cannot be empty."); String userAgent = String.format("%s/%s%s", userAgentName, userAgentVersion, agentJavaVer).replace(" ", "_"); dataflowOptions.setUserAgent(userAgent); return new DataflowRunner(dataflowOptions); }
@Test public void testValidJobName() throws IOException { List<String> names = Arrays.asList("ok", "Ok", "A-Ok", "ok-123", "this-one-is-fairly-long-01234567890123456789"); for (String name : names) { DataflowPipelineOptions options = buildPipelineOptions(); options.setJobName(name); DataflowRunner runner = DataflowRunner.fromOptions(options); assertNotNull(runner); } }
public static String getTypeName(final int type) { switch (type) { case START_EVENT_V3: return "Start_v3"; case STOP_EVENT: return "Stop"; case QUERY_EVENT: return "Query"; case ROTATE_EVENT: return "Rotate"; case INTVAR_EVENT: return "Intvar"; case LOAD_EVENT: return "Load"; case NEW_LOAD_EVENT: return "New_load"; case SLAVE_EVENT: return "Slave"; case CREATE_FILE_EVENT: return "Create_file"; case APPEND_BLOCK_EVENT: return "Append_block"; case DELETE_FILE_EVENT: return "Delete_file"; case EXEC_LOAD_EVENT: return "Exec_load"; case RAND_EVENT: return "RAND"; case XID_EVENT: return "Xid"; case USER_VAR_EVENT: return "User var"; case FORMAT_DESCRIPTION_EVENT: return "Format_desc"; case TABLE_MAP_EVENT: return "Table_map"; case PRE_GA_WRITE_ROWS_EVENT: return "Write_rows_event_old"; case PRE_GA_UPDATE_ROWS_EVENT: return "Update_rows_event_old"; case PRE_GA_DELETE_ROWS_EVENT: return "Delete_rows_event_old"; case WRITE_ROWS_EVENT_V1: return "Write_rows_v1"; case UPDATE_ROWS_EVENT_V1: return "Update_rows_v1"; case DELETE_ROWS_EVENT_V1: return "Delete_rows_v1"; case BEGIN_LOAD_QUERY_EVENT: return "Begin_load_query"; case EXECUTE_LOAD_QUERY_EVENT: return "Execute_load_query"; case INCIDENT_EVENT: return "Incident"; case HEARTBEAT_LOG_EVENT: case HEARTBEAT_LOG_EVENT_V2: return "Heartbeat"; case IGNORABLE_LOG_EVENT: return "Ignorable"; case ROWS_QUERY_LOG_EVENT: return "Rows_query"; case WRITE_ROWS_EVENT: return "Write_rows"; case UPDATE_ROWS_EVENT: return "Update_rows"; case DELETE_ROWS_EVENT: return "Delete_rows"; case GTID_LOG_EVENT: return "Gtid"; case ANONYMOUS_GTID_LOG_EVENT: return "Anonymous_Gtid"; case PREVIOUS_GTIDS_LOG_EVENT: return "Previous_gtids"; case PARTIAL_UPDATE_ROWS_EVENT: return "Update_rows_partial"; case TRANSACTION_CONTEXT_EVENT : return "Transaction_context"; case VIEW_CHANGE_EVENT : return "view_change"; case XA_PREPARE_LOG_EVENT : return "Xa_prepare"; case TRANSACTION_PAYLOAD_EVENT : return "transaction_payload"; default: return "Unknown type:" + type; } }
@Test public void getTypeNameInputPositiveOutputNotNull9() { // Arrange final int type = 11; // Act final String actual = LogEvent.getTypeName(type); // Assert result Assert.assertEquals("Delete_file", actual); }
static BlockStmt getComplexPartialScoreVariableDeclaration(final String variableName, final ComplexPartialScore complexPartialScore) { final MethodDeclaration methodDeclaration = COMPLEX_PARTIAL_SCORE_TEMPLATE.getMethodsByName(GETKIEPMMLCOMPLEXPARTIALSCORE).get(0).clone(); final BlockStmt complexPartialScoreBody = methodDeclaration.getBody().orElseThrow(() -> new KiePMMLException(String.format(MISSING_BODY_TEMPLATE, methodDeclaration))); final VariableDeclarator variableDeclarator = getVariableDeclarator(complexPartialScoreBody, COMPLEX_PARTIAL_SCORE) .orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_IN_BODY, COMPLEX_PARTIAL_SCORE, complexPartialScoreBody))); variableDeclarator.setName(variableName); final BlockStmt toReturn = new BlockStmt(); String nestedVariableName = String.format(VARIABLE_NAME_TEMPLATE, variableName, 0); BlockStmt toAdd = getKiePMMLExpressionBlockStmt(nestedVariableName, complexPartialScore.getExpression()); toAdd.getStatements().forEach(toReturn::addStatement); final ObjectCreationExpr objectCreationExpr = variableDeclarator.getInitializer() .orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_INITIALIZER_TEMPLATE, COMPLEX_PARTIAL_SCORE, toReturn))) .asObjectCreationExpr(); objectCreationExpr.getArguments().set(0, new StringLiteralExpr(variableName)); objectCreationExpr.getArguments().set(2, new NameExpr(nestedVariableName)); complexPartialScoreBody.getStatements().forEach(toReturn::addStatement); return toReturn; }
@Test void getComplexPartialScoreVariableDeclarationWithApply() throws IOException { final String variableName = "variableName"; Constant constant = new Constant(); constant.setValue(value1); FieldRef fieldRef = new FieldRef(); fieldRef.setField("FIELD_REF"); Apply apply = new Apply(); apply.setFunction("/"); apply.addExpressions(constant, fieldRef); ComplexPartialScore complexPartialScore = new ComplexPartialScore(); complexPartialScore.setExpression(apply); BlockStmt retrieved = KiePMMLComplexPartialScoreFactory.getComplexPartialScoreVariableDeclaration(variableName, complexPartialScore); String text = getFileContent(TEST_03_SOURCE); Statement expected = JavaParserUtils.parseBlock(String.format(text, constant.getValue(),fieldRef.getField(), apply.getFunction(), apply.getInvalidValueTreatment().value(), variableName)); assertThat(retrieved).isEqualTo(expected); List<Class<?>> imports = Arrays.asList(KiePMMLConstant.class, KiePMMLFieldRef.class, KiePMMLApply.class, KiePMMLComplexPartialScore.class, Arrays.class, Collections.class); commonValidateCompilationWithImports(retrieved, imports); }
@Override public Object[] toArray() { return list.toArray(); }
@Test public void testToArray() { Set<String> set = redisson.getSortedSet("set"); set.add("1"); set.add("4"); set.add("2"); set.add("5"); set.add("3"); assertThat(set.toArray()).contains("1", "4", "2", "5", "3"); String[] strs = set.toArray(new String[0]); assertThat(strs).contains("1", "4", "2", "5", "3"); }
static FileFilter ignoredFilesFilter() { var ioFileFilters = IGNORED_FILES.stream() .map(NameFileFilter::new) .map(IOFileFilter.class::cast) .toList(); return new NotFileFilter(new OrFileFilter(ioFileFilters)); }
@Test public void it_does_not_include_files_to_be_ignored() { var dsStore = new File("target/classes/.DS_Store"); assertFalse(ignoredFilesFilter().accept(dsStore)); }
public static byte[] parseMAC(String value) { final byte[] machineId; final char separator; switch (value.length()) { case 17: separator = value.charAt(2); validateMacSeparator(separator); machineId = new byte[EUI48_MAC_ADDRESS_LENGTH]; break; case 23: separator = value.charAt(2); validateMacSeparator(separator); machineId = new byte[EUI64_MAC_ADDRESS_LENGTH]; break; default: throw new IllegalArgumentException("value is not supported [MAC-48, EUI-48, EUI-64]"); } final int end = machineId.length - 1; int j = 0; for (int i = 0; i < end; ++i, j += 3) { final int sIndex = j + 2; machineId[i] = StringUtil.decodeHexByte(value, j); if (value.charAt(sIndex) != separator) { throw new IllegalArgumentException("expected separator '" + separator + " but got '" + value.charAt(sIndex) + "' at index: " + sIndex); } } machineId[end] = StringUtil.decodeHexByte(value, j); return machineId; }
@Test public void testParseMacInvalidEUI48TrailingSeparatorA() { assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() { parseMAC("00-AA-11-BB-22-CC-"); } }); }
@Override public void fetchSegmentToLocal(URI downloadURI, File dest) throws Exception { // Create a RoundRobinURIProvider to round robin IP addresses when retry uploading. Otherwise may always try to // download from a same broken host as: 1) DNS may not RR the IP addresses 2) OS cache the DNS resolution result. RoundRobinURIProvider uriProvider = new RoundRobinURIProvider(List.of(downloadURI), true); int retryCount = getRetryCount(uriProvider); _logger.info("Retry downloading for {} times. retryCount from pinot server config: {}, number of IP addresses for " + "download URI: {}", retryCount, _retryCount, uriProvider.numAddresses()); RetryPolicies.exponentialBackoffRetryPolicy(retryCount, _retryWaitMs, _retryDelayScaleFactor).attempt(() -> { URI uri = uriProvider.next(); try { String hostName = downloadURI.getHost(); int port = downloadURI.getPort(); // If the original download address is specified as host name, need add a "HOST" HTTP header to the HTTP // request. Otherwise, if the download address is a LB address, when the LB be configured as "disallow direct // access by IP address", downloading will fail. List<Header> httpHeaders = new LinkedList<>(); if (!InetAddresses.isInetAddress(hostName)) { httpHeaders.add(new BasicHeader(HttpHeaders.HOST, hostName + ":" + port)); } int statusCode = _httpClient.downloadFile(uri, dest, _authProvider, httpHeaders); _logger.info("Downloaded segment from: {} to: {} of size: {}; Response status code: {}", uri, dest, dest.length(), statusCode); return true; } catch (HttpErrorStatusException e) { int statusCode = e.getStatusCode(); if (statusCode == HttpStatus.SC_NOT_FOUND || statusCode >= 500) { // Temporary exception // 404 is treated as a temporary exception, as the downloadURI may be backed by multiple hosts, // if singe host is down, can retry with another host. _logger.warn("Got temporary error status code: {} while downloading segment from: {} to: {}", statusCode, uri, dest, e); return false; } else { // Permanent exception _logger.error("Got permanent error status code: {} while downloading segment from: {} to: {}, won't retry", statusCode, uri, dest, e); throw e; } } catch (Exception e) { _logger.warn("Caught exception while downloading segment from: {} to: {}", uri, dest, e); return false; } }); }
@Test(expectedExceptions = AttemptsExceededException.class) public void testFetchSegmentToLocalFailureWithNoPeerServers() throws Exception { FileUploadDownloadClient client = mock(FileUploadDownloadClient.class); // The download always succeeds when(client.downloadFile(any(), any(), any())).thenReturn(200); HttpSegmentFetcher segmentFetcher = getSegmentFetcher(client); List<URI> uris = List.of(); segmentFetcher.fetchSegmentToLocal(SEGMENT_NAME, () -> uris, SEGMENT_FILE); }
public static void main(String[] args) { // Getting the bar series BarSeries series = CsvTradesLoader.loadBitstampSeries(); // Building the trading strategy Strategy strategy = MovingMomentumStrategy.buildStrategy(series); /* * Building chart datasets */ TimeSeriesCollection dataset = new TimeSeriesCollection(); dataset.addSeries(buildChartTimeSeries(series, new ClosePriceIndicator(series), "Bitstamp Bitcoin (BTC)")); /* * Creating the chart */ JFreeChart chart = ChartFactory.createTimeSeriesChart("Bitstamp BTC", // title "Date", // x-axis label "Price", // y-axis label dataset, // data true, // create legend? true, // generate tooltips? false // generate URLs? ); XYPlot plot = (XYPlot) chart.getPlot(); DateAxis axis = (DateAxis) plot.getDomainAxis(); axis.setDateFormatOverride(new SimpleDateFormat("MM-dd HH:mm")); /* * Running the strategy and adding the buy and sell signals to plot */ addBuySellSignals(series, strategy, plot); /* * Displaying the chart */ displayChart(chart); }
@Test public void test() { BuyAndSellSignalsToChart.main(null); }
protected Permission toPermission(final Node node) { final Permission permission = new Permission(); if(node.getPermissions() != null) { switch(node.getType()) { case FOLDER: case ROOM: if(node.getPermissions().isCreate() // For existing files the delete role is also required to overwrite && node.getPermissions().isDelete()) { permission.setUser(Permission.Action.all); } else { permission.setUser(Permission.Action.read.or(Permission.Action.execute)); } break; case FILE: if(node.isIsEncrypted() != null && node.isIsEncrypted()) { try { if(null != session.keyPair()) { permission.setUser(Permission.Action.none.or(Permission.Action.read)); } else { log.warn(String.format("Missing read permission for node %s with missing key pair", node)); } } catch(BackgroundException e) { log.warn(String.format("Ignore failure %s retrieving key pair", e)); } } else { if(node.getPermissions().isRead()) { permission.setUser(Permission.Action.read); } } if(node.getPermissions().isChange() && node.getPermissions().isDelete()) { permission.setUser(permission.getUser().or(Permission.Action.write)); } break; } if(log.isDebugEnabled()) { log.debug(String.format("Map node permissions %s to %s", node.getPermissions(), permission)); } } return permission; }
@Test public void testPermissionsFile() throws Exception { final SDSAttributesAdapter f = new SDSAttributesAdapter(session); final Node node = new Node(); node.setIsEncrypted(false); node.setType(Node.TypeEnum.FILE); final NodePermissions permissions = new NodePermissions().read(false).delete(false).change(false).create(false); node.setPermissions(permissions); assertFalse(f.toPermission(node).isReadable()); assertFalse(f.toPermission(node).isWritable()); assertFalse(f.toPermission(node).isExecutable()); permissions.setRead(true); assertTrue(f.toPermission(node).isReadable()); permissions.setChange(true); assertTrue(f.toPermission(node).isReadable()); assertFalse(f.toPermission(node).isWritable()); permissions.setDelete(true); assertTrue(f.toPermission(node).isReadable()); assertTrue(f.toPermission(node).isWritable()); permissions.setCreate(true); assertTrue(f.toPermission(node).isReadable()); assertTrue(f.toPermission(node).isWritable()); f.toPermission(node); }
public void runPickle(Pickle pickle) { try { StepTypeRegistry stepTypeRegistry = createTypeRegistryForPickle(pickle); snippetGenerators = createSnippetGeneratorsForPickle(stepTypeRegistry); // Java8 step definitions will be added to the glue here buildBackendWorlds(); glue.prepareGlue(stepTypeRegistry); TestCase testCase = createTestCaseForPickle(pickle); testCase.run(bus); } finally { glue.removeScenarioScopedGlue(); disposeBackendWorlds(); } }
@Test void steps_are_executed() { StubStepDefinition stepDefinition = new StubStepDefinition("some step"); Pickle pickleMatchingStepDefinitions = createPickleMatchingStepDefinitions(stepDefinition); TestRunnerSupplier runnerSupplier = new TestRunnerSupplier(bus, runtimeOptions) { @Override public void loadGlue(Glue glue, List<URI> gluePaths) { glue.addStepDefinition(stepDefinition); } }; runnerSupplier.get().runPickle(pickleMatchingStepDefinitions); assertThat(stepDefinition.getArgs(), is(equalTo(emptyList()))); }
public void loadPlugin(GoPluginBundleDescriptor bundleDescriptor) { for (GoPluginDescriptor pluginDescriptor : bundleDescriptor.descriptors()) { if (idToDescriptorMap.containsKey(pluginDescriptor.id().toLowerCase())) { throw new RuntimeException("Found another plugin with ID: " + pluginDescriptor.id()); } } for (GoPluginDescriptor pluginDescriptor : bundleDescriptor.descriptors()) { idToDescriptorMap.put(pluginDescriptor.id().toLowerCase(), pluginDescriptor); } }
@Test void shouldNotLoadPluginIfThereIsOneMorePluginWithTheSameIDAndDifferentCase() { GoPluginBundleDescriptor descriptor = new GoPluginBundleDescriptor(GoPluginDescriptor.builder().id("id1").isBundledPlugin(true).build()); registry.loadPlugin(descriptor); GoPluginBundleDescriptor secondPluginBundleDescriptor = new GoPluginBundleDescriptor(GoPluginDescriptor.builder().id("iD1").isBundledPlugin(true).build()); assertThatCode(() -> registry.loadPlugin(secondPluginBundleDescriptor)) .isInstanceOf(RuntimeException.class); }
public void set(final Object bean, final Object value) { set(bean, this.patternParts, lastIsNumber(this.patternParts), value); }
@Test public void setTest() { final BeanPath pattern = BeanPath.create("userInfo.examInfoDict[0].id"); pattern.set(tempMap, 2); final Object result = pattern.get(tempMap); assertEquals(2, result); }
@Override List<DiscoveryNode> resolveNodes() { if (serviceName != null && !serviceName.isEmpty()) { logger.fine("Using service name to discover nodes."); return getSimpleDiscoveryNodes(client.endpointsByName(serviceName)); } else if (serviceLabel != null && !serviceLabel.isEmpty()) { logger.fine("Using service label to discover nodes."); return getSimpleDiscoveryNodes(client.endpointsByServiceLabel(serviceLabel, serviceLabelValue)); } else if (podLabel != null && !podLabel.isEmpty()) { logger.fine("Using pod label to discover nodes."); return getSimpleDiscoveryNodes(client.endpointsByPodLabel(podLabel, podLabelValue)); } return getSimpleDiscoveryNodes(client.endpoints()); }
@Test public void resolveWithServiceNameWhenNotReadyAddressesAndNotReadyDisabled() { // given List<Endpoint> endpoints = createNotReadyEndpoints(2); given(client.endpointsByName(SERVICE_NAME)).willReturn(endpoints); KubernetesApiEndpointResolver sut = new KubernetesApiEndpointResolver(LOGGER, SERVICE_NAME, 0, null, null, null, null, null, client); // when List<DiscoveryNode> nodes = sut.resolveNodes(); // then assertEquals(0, nodes.size()); }
public boolean checkStateUpdater(final long now, final java.util.function.Consumer<Set<TopicPartition>> offsetResetter) { addTasksToStateUpdater(); if (stateUpdater.hasExceptionsAndFailedTasks()) { handleExceptionsFromStateUpdater(); } if (stateUpdater.restoresActiveTasks()) { handleRestoredTasksFromStateUpdater(now, offsetResetter); } return !stateUpdater.restoresActiveTasks() && !tasks.hasPendingTasksToInit(); }
@Test public void shouldAddTasksToStateUpdater() { final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions) .withInputPartitions(taskId00Partitions) .inState(State.RESTORING).build(); final StandbyTask task01 = standbyTask(taskId01, taskId01ChangelogPartitions) .withInputPartitions(taskId01Partitions) .inState(State.RUNNING).build(); final TasksRegistry tasks = mock(TasksRegistry.class); when(tasks.drainPendingTasksToInit()).thenReturn(mkSet(task00, task01)); taskManager = setUpTaskManager(StreamsConfigUtils.ProcessingMode.AT_LEAST_ONCE, tasks, true); taskManager.checkStateUpdater(time.milliseconds(), noOpResetter); verify(task00).initializeIfNeeded(); verify(task01).initializeIfNeeded(); verify(stateUpdater).add(task00); verify(stateUpdater).add(task01); }
public static String substringAfter(String str, String separator) { if ((str == null) || (str.length() == 0)) { return str; } if (separator == null) { return EMPTY; } int pos = str.indexOf(separator); if (pos < 0) { return EMPTY; } return str.substring(pos + separator.length()); }
@Test public void testSubstringAfter() { Assert.assertEquals(null, StringUtils.substringAfter(null, "*")); Assert.assertEquals("", StringUtils.substringAfter("", "*")); Assert.assertEquals("", StringUtils.substringAfter("*", null)); Assert.assertEquals("bc", StringUtils.substringAfter("abc", "a")); Assert.assertEquals("cba", StringUtils.substringAfter("abcba", "b")); Assert.assertEquals("", StringUtils.substringAfter("abc", "c")); Assert.assertEquals("", StringUtils.substringAfter("abc", "d")); Assert.assertEquals("abc", StringUtils.substringAfter("abc", "")); }
void processSingleResource(FileStatus resource) { Path path = resource.getPath(); // indicates the processing status of the resource ResourceStatus resourceStatus = ResourceStatus.INIT; // first, if the path ends with the renamed suffix, it indicates the // directory was moved (as stale) but somehow not deleted (probably due to // SCM failure); delete the directory if (path.toString().endsWith(RENAMED_SUFFIX)) { LOG.info("Found a renamed directory that was left undeleted at " + path.toString() + ". Deleting."); try { if (fs.delete(path, true)) { resourceStatus = ResourceStatus.DELETED; } } catch (IOException e) { LOG.error("Error while processing a shared cache resource: " + path, e); } } else { // this is the path to the cache resource directory // the directory name is the resource key (i.e. a unique identifier) String key = path.getName(); try { store.cleanResourceReferences(key); } catch (YarnException e) { LOG.error("Exception thrown while removing dead appIds.", e); } if (store.isResourceEvictable(key, resource)) { try { /* * TODO See YARN-2663: There is a race condition between * store.removeResource(key) and * removeResourceFromCacheFileSystem(path) operations because they do * not happen atomically and resources can be uploaded with different * file names by the node managers. */ // remove the resource from scm (checks for appIds as well) if (store.removeResource(key)) { // remove the resource from the file system boolean deleted = removeResourceFromCacheFileSystem(path); if (deleted) { resourceStatus = ResourceStatus.DELETED; } else { LOG.error("Failed to remove path from the file system." + " Skipping this resource: " + path); resourceStatus = ResourceStatus.ERROR; } } else { // we did not delete the resource because it contained application // ids resourceStatus = ResourceStatus.PROCESSED; } } catch (IOException e) { LOG.error( "Failed to remove path from the file system. Skipping this resource: " + path, e); resourceStatus = ResourceStatus.ERROR; } } else { resourceStatus = ResourceStatus.PROCESSED; } } // record the processing switch (resourceStatus) { case DELETED: metrics.reportAFileDelete(); break; case PROCESSED: metrics.reportAFileProcess(); break; case ERROR: metrics.reportAFileError(); break; default: LOG.error("Cleaner encountered an invalid status (" + resourceStatus + ") while processing resource: " + path.getName()); } }
@Test void testResourceIsInUseHasAnActiveApp() throws Exception { FileSystem fs = mock(FileSystem.class); CleanerMetrics metrics = mock(CleanerMetrics.class); SCMStore store = mock(SCMStore.class); FileStatus resource = mock(FileStatus.class); when(resource.getPath()).thenReturn(new Path(ROOT + "/a/b/c/abc")); // resource is stale when(store.isResourceEvictable(isA(String.class), isA(FileStatus.class))) .thenReturn(true); // but still has appIds when(store.removeResource(isA(String.class))).thenReturn(false); CleanerTask task = createSpiedTask(fs, store, metrics, new ReentrantLock()); // process the resource task.processSingleResource(resource); // metrics should record a processed file (but not delete) verify(metrics).reportAFileProcess(); verify(metrics, never()).reportAFileDelete(); }
public List<BlameLine> blame(Path baseDir, String fileName) throws Exception { BlameOutputProcessor outputProcessor = new BlameOutputProcessor(); try { this.processWrapperFactory.create( baseDir, outputProcessor::process, gitCommand, GIT_DIR_FLAG, String.format(GIT_DIR_ARGUMENT, baseDir), GIT_DIR_FORCE_FLAG, baseDir.toString(), BLAME_COMMAND, BLAME_LINE_PORCELAIN_FLAG, IGNORE_WHITESPACES, FILENAME_SEPARATOR_FLAG, fileName) .execute(); } catch (UncommittedLineException e) { LOG.debug("Unable to blame file '{}' - it has uncommitted changes", fileName); return emptyList(); } return outputProcessor.getBlameLines(); }
@Test public void blame_different_author_and_committer() throws Exception { File projectDir = createNewTempFolder(); javaUnzip("dummy-git-different-committer.zip", projectDir); File baseDir = new File(projectDir, "dummy-git"); List<BlameLine> blame = blameCommand.blame(baseDir.toPath(), DUMMY_JAVA); Date revisionDate1 = DateUtils.parseDateTime("2012-07-17T16:12:48+0200"); String revision1 = "6b3aab35a3ea32c1636fee56f996e677653c48ea"; String author1 = "david@gageot.net"; // second commit, which has a commit date different than the author date Date revisionDate2 = DateUtils.parseDateTime("2022-10-11T14:14:26+0200"); String revision2 = "7609f824d5ff7018bebf107cdbe4edcc901b574f"; String author2 = "duarte.meneses@sonarsource.com"; List<BlameLine> expectedBlame = new LinkedList<>(); for (int i = 0; i < 25; i++) { expectedBlame.add(new BlameLine().revision(revision1).date(revisionDate1).author(author1)); } for (int i = 0; i < 3; i++) { expectedBlame.add(new BlameLine().revision(revision2).date(revisionDate2).author(author2)); } for (int i = 0; i < 1; i++) { expectedBlame.add(new BlameLine().revision(revision1).date(revisionDate1).author(author1)); } assertThat(blame).isEqualTo(expectedBlame); }
@Override public String getMethod() { return PATH; }
@Test public void testSetMyDefaultAdministratorRightsWithNone() { SetMyDefaultAdministratorRights setMyDefaultAdministratorRights = SetMyDefaultAdministratorRights .builder() .build(); assertEquals("setMyDefaultAdministratorRights", setMyDefaultAdministratorRights.getMethod()); assertDoesNotThrow(setMyDefaultAdministratorRights::validate); }
@Operation(summary = "Receive SAML AuthnRequest") @PostMapping(value = {"/frontchannel/saml/v4/entrance/request_authentication", "/frontchannel/saml/v4/idp/request_authentication"}) public RedirectView requestAuthenticationService(HttpServletRequest request) throws SamlValidationException, SharedServiceClientException, DienstencatalogusException, UnsupportedEncodingException, ComponentInitializationException, MessageDecodingException, SamlSessionException, SamlParseException { logger.info("Receive SAML AuthnRequest"); if (request.getParameter("SAMLRequest") != null) { AuthenticationRequest authenticationRequest = authenticationService.startAuthenticationProcess(request); return new RedirectView(authenticationRequest.getProtocolType().equals(ProtocolType.SAML_ROUTERINGSDIENST) ? authenticationIdpService.redirectWithCorrectAttributesForAd(request, authenticationRequest) : authenticationEntranceService.redirectWithCorrectAttributesForAd(request, authenticationRequest) ); } else { RedirectView redirectView = new RedirectView("/saml/v4/idp/redirect_with_artifact"); redirectView.setStatusCode(HttpStatus.BAD_REQUEST); return redirectView; } }
@Test public void failedRequestAuthenticationEntranceServiceTest() throws UnsupportedEncodingException, SamlSessionException, DienstencatalogusException, SharedServiceClientException, SamlValidationException, MessageDecodingException, ComponentInitializationException, SamlParseException { RedirectView result = authenticationControllerMock.requestAuthenticationService(request); assertNotNull(result); verify(authenticationEntranceServiceMock, times(0)).startAuthenticationProcess(any(HttpServletRequest.class)); verify(authenticationEntranceServiceMock, times(0)).redirectWithCorrectAttributesForAd(any(HttpServletRequest.class), any(AuthenticationRequest.class)); }
@Override public MetricsContext.Unit unit() { return unit; }
@Test public void noop() { assertThat(DefaultCounter.NOOP.unit()).isEqualTo(Unit.UNDEFINED); assertThat(DefaultCounter.NOOP.isNoop()).isTrue(); assertThatThrownBy(DefaultCounter.NOOP::value) .isInstanceOf(UnsupportedOperationException.class) .hasMessage("NOOP counter has no value"); }
private static String getActualValue(String configVal) { if (configVal != null && configVal.matches("^.*\\$\\{[\\w.]+(:.*)?}.*$")) { final int startIndex = configVal.indexOf("${") + 2; final int endIndex = configVal.indexOf('}', startIndex); final String envKey = configVal.substring(startIndex, endIndex); final int separatorIndex = envKey.indexOf(':'); final String key = separatorIndex >= 0 ? envKey.substring(0, separatorIndex) : envKey; final String defaultValue = separatorIndex >= 0 ? envKey.substring(separatorIndex + 1) : ""; // The priority is environment variables > system variables if (!StringUtils.isBlank(System.getenv(key))) { return System.getenv(key); } if (!StringUtils.isBlank(System.getProperty(key))) { return System.getProperty(key); } return defaultValue; } return configVal; }
@Test public void testGetActualValue() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException, NoSuchFieldException { Method method = BootArgsBuilder.class.getDeclaredMethod("getActualValue", String.class); method.setAccessible(true); assertEquals("demo", method.invoke(BootArgsBuilder.class, "${serviceAName:A}")); assertEquals("1.0", method.invoke(BootArgsBuilder.class, "${serviceAVersion:2.0}")); assertEquals("B", method.invoke(BootArgsBuilder.class, "${serviceBName:B}")); assertEquals("2.0", method.invoke(BootArgsBuilder.class, "${serviceBVersion:2.0}")); }
public void validate(ExternalIssueReport report, Path reportPath) { if (report.rules != null && report.issues != null) { Set<String> ruleIds = validateRules(report.rules, reportPath); validateIssuesCctFormat(report.issues, ruleIds, reportPath); } else if (report.rules == null && report.issues != null) { String documentationLink = documentationLinkGenerator.getDocumentationLink(DOCUMENTATION_SUFFIX); LOGGER.warn("External issues were imported with a deprecated format which will be removed soon. " + "Please switch to the newest format to fully benefit from Clean Code: {}", documentationLink); validateIssuesDeprecatedFormat(report.issues, reportPath); } else { throw new IllegalStateException(String.format("Failed to parse report '%s': invalid report detected.", reportPath)); } }
@Test public void validate_whenInvalidReport_shouldThrowException() throws IOException { ExternalIssueReport report = readInvalidReport(DEPRECATED_REPORTS_LOCATION); assertThatThrownBy(() -> validator.validate(report, reportPath)) .isInstanceOf(IllegalStateException.class) .hasMessage("Failed to parse report 'report-path': invalid report detected."); }
@Override public EncodedMessage transform(ActiveMQMessage message) throws Exception { if (message == null) { return null; } long messageFormat = 0; Header header = null; Properties properties = null; Map<Symbol, Object> daMap = null; Map<Symbol, Object> maMap = null; Map<String,Object> apMap = null; Map<Object, Object> footerMap = null; Section body = convertBody(message); if (message.isPersistent()) { if (header == null) { header = new Header(); } header.setDurable(true); } byte priority = message.getPriority(); if (priority != Message.DEFAULT_PRIORITY) { if (header == null) { header = new Header(); } header.setPriority(UnsignedByte.valueOf(priority)); } String type = message.getType(); if (type != null) { if (properties == null) { properties = new Properties(); } properties.setSubject(type); } MessageId messageId = message.getMessageId(); if (messageId != null) { if (properties == null) { properties = new Properties(); } properties.setMessageId(getOriginalMessageId(message)); } ActiveMQDestination destination = message.getDestination(); if (destination != null) { if (properties == null) { properties = new Properties(); } properties.setTo(destination.getQualifiedName()); if (maMap == null) { maMap = new HashMap<>(); } maMap.put(JMS_DEST_TYPE_MSG_ANNOTATION, destinationType(destination)); } ActiveMQDestination replyTo = message.getReplyTo(); if (replyTo != null) { if (properties == null) { properties = new Properties(); } properties.setReplyTo(replyTo.getQualifiedName()); if (maMap == null) { maMap = new HashMap<>(); } maMap.put(JMS_REPLY_TO_TYPE_MSG_ANNOTATION, destinationType(replyTo)); } String correlationId = message.getCorrelationId(); if (correlationId != null) { if (properties == null) { properties = new Properties(); } try { properties.setCorrelationId(AMQPMessageIdHelper.INSTANCE.toIdObject(correlationId)); } catch (AmqpProtocolException e) { properties.setCorrelationId(correlationId); } } long expiration = message.getExpiration(); if (expiration != 0) { long ttl = expiration - System.currentTimeMillis(); if (ttl < 0) { ttl = 1; } if (header == null) { header = new Header(); } header.setTtl(new UnsignedInteger((int) ttl)); if (properties == null) { properties = new Properties(); } properties.setAbsoluteExpiryTime(new Date(expiration)); } long timeStamp = message.getTimestamp(); if (timeStamp != 0) { if (properties == null) { properties = new Properties(); } properties.setCreationTime(new Date(timeStamp)); } // JMSX Message Properties int deliveryCount = message.getRedeliveryCounter(); if (deliveryCount > 0) { if (header == null) { header = new Header(); } header.setDeliveryCount(UnsignedInteger.valueOf(deliveryCount)); } String userId = message.getUserID(); if (userId != null) { if (properties == null) { properties = new Properties(); } properties.setUserId(new Binary(userId.getBytes(StandardCharsets.UTF_8))); } String groupId = message.getGroupID(); if (groupId != null) { if (properties == null) { properties = new Properties(); } properties.setGroupId(groupId); } int groupSequence = message.getGroupSequence(); if (groupSequence > 0) { if (properties == null) { properties = new Properties(); } properties.setGroupSequence(UnsignedInteger.valueOf(groupSequence)); } final Map<String, Object> entries; try { entries = message.getProperties(); } catch (IOException e) { throw JMSExceptionSupport.create(e); } for (Map.Entry<String, Object> entry : entries.entrySet()) { String key = entry.getKey(); Object value = entry.getValue(); if (key.startsWith(JMS_AMQP_PREFIX)) { if (key.startsWith(NATIVE, JMS_AMQP_PREFIX_LENGTH)) { // skip transformer appended properties continue; } else if (key.startsWith(ORIGINAL_ENCODING, JMS_AMQP_PREFIX_LENGTH)) { // skip transformer appended properties continue; } else if (key.startsWith(MESSAGE_FORMAT, JMS_AMQP_PREFIX_LENGTH)) { messageFormat = (long) TypeConversionSupport.convert(entry.getValue(), Long.class); continue; } else if (key.startsWith(HEADER, JMS_AMQP_PREFIX_LENGTH)) { if (header == null) { header = new Header(); } continue; } else if (key.startsWith(PROPERTIES, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } continue; } else if (key.startsWith(MESSAGE_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (maMap == null) { maMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_MESSAGE_ANNOTATION_PREFIX.length()); maMap.put(Symbol.valueOf(name), value); continue; } else if (key.startsWith(FIRST_ACQUIRER, JMS_AMQP_PREFIX_LENGTH)) { if (header == null) { header = new Header(); } header.setFirstAcquirer((boolean) TypeConversionSupport.convert(value, Boolean.class)); continue; } else if (key.startsWith(CONTENT_TYPE, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setContentType(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class))); continue; } else if (key.startsWith(CONTENT_ENCODING, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setContentEncoding(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class))); continue; } else if (key.startsWith(REPLYTO_GROUP_ID, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setReplyToGroupId((String) TypeConversionSupport.convert(value, String.class)); continue; } else if (key.startsWith(DELIVERY_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (daMap == null) { daMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_DELIVERY_ANNOTATION_PREFIX.length()); daMap.put(Symbol.valueOf(name), value); continue; } else if (key.startsWith(FOOTER_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (footerMap == null) { footerMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_FOOTER_PREFIX.length()); footerMap.put(Symbol.valueOf(name), value); continue; } } else if (key.startsWith(AMQ_SCHEDULED_MESSAGE_PREFIX )) { // strip off the scheduled message properties continue; } // The property didn't map into any other slot so we store it in the // Application Properties section of the message. if (apMap == null) { apMap = new HashMap<>(); } apMap.put(key, value); int messageType = message.getDataStructureType(); if (messageType == CommandTypes.ACTIVEMQ_MESSAGE) { // Type of command to recognize advisory message Object data = message.getDataStructure(); if(data != null) { apMap.put("ActiveMqDataStructureType", data.getClass().getSimpleName()); } } } final AmqpWritableBuffer buffer = new AmqpWritableBuffer(); encoder.setByteBuffer(buffer); if (header != null) { encoder.writeObject(header); } if (daMap != null) { encoder.writeObject(new DeliveryAnnotations(daMap)); } if (maMap != null) { encoder.writeObject(new MessageAnnotations(maMap)); } if (properties != null) { encoder.writeObject(properties); } if (apMap != null) { encoder.writeObject(new ApplicationProperties(apMap)); } if (body != null) { encoder.writeObject(body); } if (footerMap != null) { encoder.writeObject(new Footer(footerMap)); } return new EncodedMessage(messageFormat, buffer.getArray(), 0, buffer.getArrayLength()); }
@Test public void testConvertTextMessageCreatesBodyUsingOriginalEncodingWithDataSection() throws Exception { String contentString = "myTextMessageContent"; ActiveMQTextMessage outbound = createTextMessage(contentString); outbound.setShortProperty(JMS_AMQP_ORIGINAL_ENCODING, AMQP_DATA); outbound.onSend(); outbound.storeContent(); JMSMappingOutboundTransformer transformer = new JMSMappingOutboundTransformer(); EncodedMessage encoded = transformer.transform(outbound); assertNotNull(encoded); Message amqp = encoded.decode(); assertNotNull(amqp.getBody()); assertTrue(amqp.getBody() instanceof Data); assertTrue(((Data) amqp.getBody()).getValue() instanceof Binary); Binary data = ((Data) amqp.getBody()).getValue(); String contents = new String(data.getArray(), data.getArrayOffset(), data.getLength(), StandardCharsets.UTF_8); assertEquals(contentString, contents); }
public CruiseConfig deserializeConfig(String content) throws Exception { String md5 = md5Hex(content); Element element = parseInputStream(new ByteArrayInputStream(content.getBytes())); LOGGER.debug("[Config Save] Updating config cache with new XML"); CruiseConfig configForEdit = classParser(element, BasicCruiseConfig.class, configCache, new GoCipher(), registry, new ConfigReferenceElements()).parse(); setMd5(configForEdit, md5); configForEdit.setOrigins(new FileConfigOrigin()); return configForEdit; }
@Test void shouldAllowBothCounterAndMaterialNameInLabelTemplate() throws Exception { CruiseConfig cruiseConfig = xmlLoader.deserializeConfig(LABEL_TEMPLATE_WITH_LABEL_TEMPLATE("1.3.0-${COUNT}-${git}")); assertThat(cruiseConfig.pipelineConfigByName(new CaseInsensitiveString("cruise")).getLabelTemplate()).isEqualTo("1.3.0-${COUNT}-${git}"); }
protected static void validateTimestampColumnType( final Optional<String> timestampColumnName, final Schema avroSchema ) { if (timestampColumnName.isPresent()) { if (avroSchema.getField(timestampColumnName.get()) == null) { throw new IllegalArgumentException("The indicated timestamp field does not exist: " + timestampColumnName.get()); } if (avroSchema.getField(timestampColumnName.get()).schema().getType() != Type.LONG) { throw new IllegalArgumentException("The timestamp column type should be bigint/long. " + timestampColumnName.get() + " type is " + avroSchema.getField(timestampColumnName.get()).schema().getType()); } } }
@Test public void shouldThrowIfTimestampColumnDoesNotExist() throws IOException { // When final IllegalArgumentException illegalArgumentException = assertThrows( IllegalArgumentException.class, () -> DataGenProducer.validateTimestampColumnType(Optional.of("page__id"), getAvroSchema()) ); // Then assertThat(illegalArgumentException.getMessage(), CoreMatchers.equalTo("The indicated timestamp field does not exist: page__id") ); }
@Override public DescriptiveUrlBag toUrl(final Path file) { final DescriptiveUrlBag list = new DescriptiveUrlBag(); if(file.attributes().getLink() != DescriptiveUrl.EMPTY) { list.add(file.attributes().getLink()); } list.add(new DescriptiveUrl(URI.create(String.format("%s%s", new HostUrlProvider().withUsername(false).get(host), URIEncoder.encode(file.getAbsolute()))), DescriptiveUrl.Type.provider, MessageFormat.format(LocaleFactory.localizedString("{0} URL"), host.getProtocol().getScheme().toString().toUpperCase(Locale.ROOT)))); list.addAll(new HostWebUrlProvider(host).toUrl(file)); return list; }
@Test public void testAbsoluteDocumentRoot() { Host host = new Host(new TestProtocol(), "localhost"); host.setDefaultPath("/usr/home/dkocher/public_html"); Path path = new Path( "/usr/home/dkocher/public_html/file", EnumSet.of(Path.Type.directory)); assertEquals("http://localhost/file", new DefaultUrlProvider(host).toUrl(path).find(DescriptiveUrl.Type.http).getUrl()); host.setWebURL("http://127.0.0.1/~dkocher"); assertEquals("http://127.0.0.1/~dkocher/file", new DefaultUrlProvider(host).toUrl(path).find(DescriptiveUrl.Type.http).getUrl()); }
public Certificate add(CvCertificate cert) { final Certificate db = Certificate.from(cert); if (repository.countByIssuerAndSubject(db.getIssuer(), db.getSubject()) > 0) { throw new ClientException(String.format( "Certificate of subject %s and issuer %s already exists", db.getSubject(), db.getIssuer())); } // Special case for first CVCA certificate for this document type if (db.getType() == Certificate.Type.CVCA && repository.countByDocumentTypeAndType(db.getDocumentType(), db.getType()) == 0) { signatureService.verify(cert, cert.getBody().getPublicKey(), cert.getBody().getPublicKey().getParams()); logger.warn("Added first CVCA certificate for {}, set trusted flag manually", db.getDocumentType()); } else { verify(cert); if (db.getType() == Certificate.Type.AT) { verifyPublicKey(cert); } } return repository.saveAndFlush(db); }
@Test public void shouldNotAddATIfPublicKeyIsNotFound() throws Exception { Mockito.doThrow(new nl.logius.digid.sharedlib.exception.ClientException( "Not Found", 404 )).when(hsmClient).keyInfo(Mockito.eq("AT"), Mockito.eq("SSSSSSSSSSSSSSSS")); certificateRepo.save(loadCvCertificate("rdw/acc/cvca.cvcert", true)); certificateRepo.save(loadCvCertificate("rdw/acc/dvca.cvcert", false)); ClientException thrown = assertThrows(ClientException.class, () -> service.add(readCvCertificate("rdw/acc/at001.cvcert"))); assertEquals("Private key of certificate is not inside hsm", thrown.getMessage()); }
@Override public void setParameters(Collection<CompoundVariable> parameters) throws InvalidVariableException { checkParameterCount(parameters, 0, 2); Object []values = parameters.toArray(); int count = values.length; if (count > 0) { format = ((CompoundVariable) values[0]).execute(); } if (count > 1) { variable = ((CompoundVariable)values[1]).execute().trim(); } }
@Test void testTooMany() throws Exception { params.add(new CompoundVariable("YMD")); params.add(new CompoundVariable("NAME")); params.add(new CompoundVariable("YMD")); assertThrows(InvalidVariableException.class, () -> variable.setParameters(params)); }
@Override public void run() { JobRunrMetadata metadata = storageProvider.getMetadata("database_version", "cluster"); if (metadata != null && "6.0.0".equals(metadata.getValue())) return; migrateScheduledJobsIfNecessary(); storageProvider.saveMetadata(new JobRunrMetadata("database_version", "cluster", "6.0.0")); }
@Test void doesMigrationsOfScheduledJobsIfStorageProviderIsAnSqlStorageProvider() { doReturn(PostgresStorageProvider.class).when(storageProviderInfo).getImplementationClass(); task.run(); verify(storageProvider).getScheduledJobs(any(), any()); }
@Override public boolean isOperational() { if (nodeOperational) { return true; } boolean flag = false; try { flag = checkOperational(); } catch (InterruptedException e) { LOG.trace("Interrupted while checking ES node is operational", e); Thread.currentThread().interrupt(); } finally { if (flag) { esConnector.stop(); nodeOperational = true; } } return nodeOperational; }
@Test public void isOperational_should_return_false_if_ElasticsearchException_thrown() { EsConnector esConnector = mock(EsConnector.class); when(esConnector.getClusterHealthStatus()) .thenThrow(new ElasticsearchException("test")); EsManagedProcess underTest = new EsManagedProcess(mock(Process.class), ProcessId.ELASTICSEARCH, esConnector, WAIT_FOR_UP_TIMEOUT); assertThat(underTest.isOperational()).isFalse(); }
public static int divide(int num1, int num2) { return ((int) (Double.parseDouble(num1 + "") / Double.parseDouble(num2 + "") * PERCENTAGE)); }
@Test public void assertDivide() { Assert.isTrue(CalculateUtil.divide(200, 100) == 200); Assert.isTrue(CalculateUtil.divide(100, 200) == 50); Assert.isTrue(CalculateUtil.divide(100, 100) == 100); }
public static SinkConfig validateUpdate(SinkConfig existingConfig, SinkConfig newConfig) { SinkConfig mergedConfig = clone(existingConfig); if (!existingConfig.getTenant().equals(newConfig.getTenant())) { throw new IllegalArgumentException("Tenants differ"); } if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) { throw new IllegalArgumentException("Namespaces differ"); } if (!existingConfig.getName().equals(newConfig.getName())) { throw new IllegalArgumentException("Sink Names differ"); } if (!StringUtils.isEmpty(newConfig.getClassName())) { mergedConfig.setClassName(newConfig.getClassName()); } if (!StringUtils.isEmpty(newConfig.getSourceSubscriptionName()) && !newConfig.getSourceSubscriptionName() .equals(existingConfig.getSourceSubscriptionName())) { throw new IllegalArgumentException("Subscription Name cannot be altered"); } if (newConfig.getInputSpecs() == null) { newConfig.setInputSpecs(new HashMap<>()); } if (mergedConfig.getInputSpecs() == null) { mergedConfig.setInputSpecs(new HashMap<>()); } if (!StringUtils.isEmpty(newConfig.getLogTopic())) { mergedConfig.setLogTopic(newConfig.getLogTopic()); } if (newConfig.getInputs() != null) { newConfig.getInputs().forEach((topicName -> { newConfig.getInputSpecs().putIfAbsent(topicName, ConsumerConfig.builder().isRegexPattern(false).build()); })); } if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) { newConfig.getInputSpecs().put(newConfig.getTopicsPattern(), ConsumerConfig.builder() .isRegexPattern(true) .build()); } if (newConfig.getTopicToSerdeClassName() != null) { newConfig.getTopicToSerdeClassName().forEach((topicName, serdeClassName) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .serdeClassName(serdeClassName) .isRegexPattern(false) .build()); }); } if (newConfig.getTopicToSchemaType() != null) { newConfig.getTopicToSchemaType().forEach((topicName, schemaClassname) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .schemaType(schemaClassname) .isRegexPattern(false) .build()); }); } if (!newConfig.getInputSpecs().isEmpty()) { SinkConfig finalMergedConfig = mergedConfig; newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> { if (!existingConfig.getInputSpecs().containsKey(topicName)) { throw new IllegalArgumentException("Input Topics cannot be altered"); } if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) { throw new IllegalArgumentException( "isRegexPattern for input topic " + topicName + " cannot be altered"); } finalMergedConfig.getInputSpecs().put(topicName, consumerConfig); }); } if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees() .equals(existingConfig.getProcessingGuarantees())) { throw new IllegalArgumentException("Processing Guarantees cannot be altered"); } if (newConfig.getConfigs() != null) { mergedConfig.setConfigs(newConfig.getConfigs()); } if (newConfig.getSecrets() != null) { mergedConfig.setSecrets(newConfig.getSecrets()); } if (newConfig.getParallelism() != null) { mergedConfig.setParallelism(newConfig.getParallelism()); } if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering() .equals(existingConfig.getRetainOrdering())) { throw new IllegalArgumentException("Retain Ordering cannot be altered"); } if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering() .equals(existingConfig.getRetainKeyOrdering())) { throw new IllegalArgumentException("Retain Key Ordering cannot be altered"); } if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) { throw new IllegalArgumentException("AutoAck cannot be altered"); } if (newConfig.getResources() != null) { mergedConfig .setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources())); } if (newConfig.getTimeoutMs() != null) { mergedConfig.setTimeoutMs(newConfig.getTimeoutMs()); } if (newConfig.getCleanupSubscription() != null) { mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription()); } if (!StringUtils.isEmpty(newConfig.getArchive())) { mergedConfig.setArchive(newConfig.getArchive()); } if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) { mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags()); } if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) { mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions()); } if (newConfig.getTransformFunction() != null) { mergedConfig.setTransformFunction(newConfig.getTransformFunction()); } if (newConfig.getTransformFunctionClassName() != null) { mergedConfig.setTransformFunctionClassName(newConfig.getTransformFunctionClassName()); } if (newConfig.getTransformFunctionConfig() != null) { mergedConfig.setTransformFunctionConfig(newConfig.getTransformFunctionConfig()); } return mergedConfig; }
@Test public void testMergeDifferentInputSpec() { SinkConfig sinkConfig = createSinkConfig(); sinkConfig.getInputSpecs().put("test-input", ConsumerConfig.builder().isRegexPattern(true).receiverQueueSize(1000).build()); Map<String, ConsumerConfig> inputSpecs = new HashMap<>(); inputSpecs.put("test-input", ConsumerConfig.builder().isRegexPattern(true).serdeClassName("test-serde").receiverQueueSize(58).build()); SinkConfig newSinkConfig = createUpdatedSinkConfig("inputSpecs", inputSpecs); SinkConfig mergedConfig = SinkConfigUtils.validateUpdate(sinkConfig, newSinkConfig); assertEquals(mergedConfig.getInputSpecs().get("test-input"), newSinkConfig.getInputSpecs().get("test-input")); // make sure original sinkConfig was not modified assertEquals(sinkConfig.getInputSpecs().get("test-input").getReceiverQueueSize().intValue(), 1000); }
Map<String, Object> offsetSyncsTopicProducerConfig() { return SOURCE_CLUSTER_ALIAS_DEFAULT.equals(offsetSyncsTopicLocation()) ? sourceProducerConfig(OFFSET_SYNCS_SOURCE_PRODUCER_ROLE) : targetProducerConfig(OFFSET_SYNCS_TARGET_PRODUCER_ROLE); }
@Test public void testProducerConfigsForOffsetSyncsTopic() { Map<String, String> connectorProps = makeProps( "source.producer.batch.size", "1", "target.producer.acks", "1", "producer.max.poll.interval.ms", "1", "fetch.min.bytes", "1" ); MirrorSourceConfig config = new MirrorSourceConfig(connectorProps); Map<String, Object> sourceProducerConfig = config.sourceProducerConfig("test"); Map<String, Object> offsetSyncsTopicSourceProducerConfig = config.offsetSyncsTopicProducerConfig(); assertEqualsExceptClientId(sourceProducerConfig, offsetSyncsTopicSourceProducerConfig); assertEquals("source1->target2|ConnectorName|test", sourceProducerConfig.get("client.id")); assertEquals("source1->target2|ConnectorName|" + MirrorSourceConfig.OFFSET_SYNCS_SOURCE_PRODUCER_ROLE, offsetSyncsTopicSourceProducerConfig.get("client.id")); connectorProps.put("offset-syncs.topic.location", "target"); config = new MirrorSourceConfig(connectorProps); Map<String, Object> targetProducerConfig = config.targetProducerConfig("test"); Map<String, Object> offsetSyncsTopicTargetProducerConfig = config.offsetSyncsTopicProducerConfig(); assertEqualsExceptClientId(targetProducerConfig, offsetSyncsTopicTargetProducerConfig); assertEquals("source1->target2|ConnectorName|test", targetProducerConfig.get("client.id")); assertEquals("source1->target2|ConnectorName|" + MirrorSourceConfig.OFFSET_SYNCS_TARGET_PRODUCER_ROLE, offsetSyncsTopicTargetProducerConfig.get("client.id")); }
@Override public Object[] getRowFromCache( RowMetaInterface lookupMeta, Object[] lookupRow ) throws KettleException { if ( stepData.hasDBCondition ) { // actually, there was no sense in executing SELECT from db in this case, // should be reported as improvement return null; } SearchingContext context = new SearchingContext(); context.init( keys.length ); for ( Index index : indexes ) { int column = index.getColumn(); // IS (NOT) NULL operation does not require second argument // hence, lookupValue can be absent // basically, the index ignores both meta and value, so we can pass everything there Object lookupValue = ( column < lookupRow.length ) ? lookupRow[ column ] : null; index.applyRestrictionsTo( context, lookupMeta.getValueMeta( column ), lookupValue ); if ( context.isEmpty() ) { // if nothing matches, break the search return null; } } // iterate through all elements survived after filtering stage // and find the first matching BitSet candidates = context.getCandidates(); int candidate = candidates.nextSetBit( 0 ); while ( candidate != -1 ) { Object[] dataKeys = keys[ candidate ]; boolean matches = true; int lookupShift = 0; for ( int i = 0, len = otherConditions.length; i < len && matches; i++ ) { int[] columnConditionPair = otherConditions[ i ]; final int column = columnConditionPair[ 0 ]; Object keyData = dataKeys[ column ]; ValueMetaInterface keyMeta = keysMeta.getValueMeta( column ); int lookupIndex = column + lookupShift; Object cmpData = lookupRow[ lookupIndex ]; ValueMetaInterface cmpMeta = lookupMeta.getValueMeta( lookupIndex ); int condition = columnConditionPair[ 1 ]; if ( condition == DatabaseLookupMeta.CONDITION_BETWEEN ) { // BETWEEN is a special condition demanding two arguments // technically there are no obstacles to implement it, // as it is just a short form of: (a <= b) && (b <= c) // however, let it be so for now matches = ( keyMeta.compare( keyData, cmpMeta, cmpData ) >= 0 ); if ( matches ) { lookupShift++; lookupIndex++; ValueMetaInterface cmpMeta2 = lookupMeta.getValueMeta( lookupIndex ); Object cmpData2 = lookupRow[ lookupIndex ]; matches = ( keyMeta.compare( keyData, cmpMeta2, cmpData2 ) <= 0 ); } } else { // if not BETWEEN, than it is LIKE (or some new operator) // for now, LIKE is not supported here matches = false; stepData.hasDBCondition = true; } } if ( matches ) { return data[ candidate ]; } else { candidate = candidates.nextSetBit( candidate + 1 ); } } return null; }
@Test public void lookup_DoesNotFind_WithBetweenOperator() throws Exception { RowMeta meta = keysMeta.clone(); meta.setValueMeta( 3, new ValueMetaDate() ); meta.addValueMeta( new ValueMetaInteger() ); ReadAllCache cache = buildCache( "<>,IS NOT NULL,BETWEEN,IS NULL" ); Object[] found = cache.getRowFromCache( meta, new Object[] { -1L, null, new Date( 1000 ), new Date( 2000 ), null } ); assertNull( "(1000 <= keys[2] <= 2000) --> none", found ); }
public static Interval of(String interval, TimeRange timeRange) { switch (timeRange.type()) { case TimeRange.KEYWORD: return timestampInterval(interval); case TimeRange.ABSOLUTE: return ofAbsoluteRange(interval, (AbsoluteRange)timeRange); case TimeRange.RELATIVE: return ofRelativeRange(interval, (RelativeRange)timeRange); } throw new RuntimeException("Unable to parse time range type: " + timeRange.type()); }
@Test public void returnsParsedIntervalIfAbsoluteRangeButAboveLimit() { final AbsoluteRange absoluteRange = AbsoluteRange.create( DateTime.parse("2019-12-01T14:50:23Z"), DateTime.parse("2019-12-02T14:50:23Z") ); final Interval interval = ApproximatedAutoIntervalFactory.of("minute", absoluteRange); assertThat(interval).isEqualTo(TimeUnitInterval.create(TimeUnitInterval.IntervalUnit.MINUTES, 1)); }
@VisibleForTesting static OptionalDouble calculateAverageRowsPerPartition(Collection<PartitionStatistics> statistics) { return statistics.stream() .map(PartitionStatistics::getBasicStatistics) .map(HiveBasicStatistics::getRowCount) .filter(OptionalLong::isPresent) .mapToLong(OptionalLong::getAsLong) .peek(count -> verify(count >= 0, "count must be greater than or equal to zero")) .average(); }
@Test public void testCalculateAverageRowsPerPartition() { assertThat(calculateAverageRowsPerPartition(ImmutableList.of())).isEmpty(); assertThat(calculateAverageRowsPerPartition(ImmutableList.of(PartitionStatistics.empty()))).isEmpty(); assertThat(calculateAverageRowsPerPartition(ImmutableList.of(PartitionStatistics.empty(), PartitionStatistics.empty()))).isEmpty(); assertEquals(calculateAverageRowsPerPartition(ImmutableList.of(rowsCount(10))), OptionalDouble.of(10)); assertEquals(calculateAverageRowsPerPartition(ImmutableList.of(rowsCount(10), PartitionStatistics.empty())), OptionalDouble.of(10)); assertEquals(calculateAverageRowsPerPartition(ImmutableList.of(rowsCount(10), rowsCount(20))), OptionalDouble.of(15)); assertEquals(calculateAverageRowsPerPartition(ImmutableList.of(rowsCount(10), rowsCount(20), PartitionStatistics.empty())), OptionalDouble.of(15)); }
public int validate( final ServiceContext serviceContext, final List<ParsedStatement> statements, final SessionProperties sessionProperties, final String sql ) { requireSandbox(serviceContext); final KsqlExecutionContext ctx = requireSandbox(snapshotSupplier.apply(serviceContext)); final Injector injector = injectorFactory.apply(ctx, serviceContext); final KsqlConfig ksqlConfig = ctx.getKsqlConfig(); int numPersistentQueries = 0; for (final ParsedStatement parsed : statements) { final PreparedStatement<?> prepared = ctx.prepare( parsed, (isVariableSubstitutionEnabled(sessionProperties, ksqlConfig) ? sessionProperties.getSessionVariables() : Collections.emptyMap()) ); final ConfiguredStatement<?> configured = ConfiguredStatement.of(prepared, SessionConfig.of(ksqlConfig, sessionProperties.getMutableScopedProperties()) ); final int currNumPersistentQueries = validate( serviceContext, configured, sessionProperties, ctx, injector ); numPersistentQueries += currNumPersistentQueries; if (currNumPersistentQueries > 0 && QueryCapacityUtil.exceedsPersistentQueryCapacity(ctx, ksqlConfig)) { QueryCapacityUtil.throwTooManyActivePersistentQueriesException(ctx, ksqlConfig, sql); } } return numPersistentQueries; }
@Test public void shouldNotThrowIfNotQueryDespiteTooManyPersistentQueries() { // Given: givenPersistentQueryCount(2); givenRequestValidator(ImmutableMap.of(ListStreams.class, StatementValidator.NO_VALIDATION)); final List<ParsedStatement> statements = givenParsed( "SHOW STREAMS;" ); // When/Then: validator.validate(serviceContext, statements, sessionProperties, "sql"); }
@Override public PageData<Device> findDevicesByTenantId(UUID tenantId, PageLink pageLink) { if (StringUtils.isEmpty(pageLink.getTextSearch())) { return DaoUtil.toPageData( deviceRepository.findByTenantId( tenantId, DaoUtil.toPageable(pageLink))); } else { return DaoUtil.toPageData( deviceRepository.findByTenantId( tenantId, pageLink.getTextSearch(), DaoUtil.toPageable(pageLink))); } }
@Test public void testFindDevicesByTenantId() { PageLink pageLink = new PageLink(15, 0, PREFIX_FOR_DEVICE_NAME); PageData<Device> devices1 = deviceDao.findDevicesByTenantId(tenantId1, pageLink); assertEquals(15, devices1.getData().size()); pageLink = pageLink.nextPageLink(); PageData<Device> devices2 = deviceDao.findDevicesByTenantId(tenantId1, pageLink); assertEquals(5, devices2.getData().size()); }
public FEELFnResult<List<Object>> invoke(@ParameterName( "ctx" ) EvaluationContext ctx, @ParameterName("list") List list, @ParameterName("precedes") FEELFunction function) { if ( function == null ) { return invoke( list ); } else { return invoke(list, (a, b) -> { final Object result = function.invokeReflectively(ctx, new Object[]{a, b}); if (!(result instanceof Boolean) || ((Boolean) result)) { return -1; } else { return 1; } } ); } }
@Test void invokeExceptionInSortFunction() { FunctionTestUtil.assertResultError( sortFunction.invoke(null, Arrays.asList(10, 4, 5, 12), getFunctionThrowingException()), InvalidParametersEvent.class); }
public static GaussianMixture fit(int k, double[] x) { if (k < 2) throw new IllegalArgumentException("Invalid number of components in the mixture."); double min = MathEx.min(x); double max = MathEx.max(x); double step = (max - min) / (k+1); Component[] components = new Component[k]; for (int i = 0; i < k; i++) { components[i] = new Component(1.0/k, new GaussianDistribution(min+=step, step)); } ExponentialFamilyMixture model = fit(x, components); return new GaussianMixture(model.L, x.length, model.components); }
@Test public void testMixture5() { System.out.println("Mixture5"); double[] data = new double[30000]; GaussianDistribution g1 = new GaussianDistribution(1.0, 1.0); for (int i = 0; i < 5000; i++) data[i] = g1.rand(); GaussianDistribution g2 = new GaussianDistribution(4.0, 1.0); for (int i = 5000; i < 10000; i++) data[i] = g2.rand(); GaussianDistribution g3 = new GaussianDistribution(8.0, 1.0); for (int i = 10000; i < 20000; i++) data[i] = g3.rand(); GaussianDistribution g4 = new GaussianDistribution(-2.0, 1.0); for (int i = 20000; i < 25000; i++) data[i] = g4.rand(); GaussianDistribution g5 = new GaussianDistribution(-5.0, 1.0); for (int i = 25000; i < 30000; i++) data[i] = g5.rand(); GaussianMixture mixture = GaussianMixture.fit(data); System.out.println(mixture); }
public static UBinary create(Kind binaryOp, UExpression lhs, UExpression rhs) { checkArgument( OP_CODES.containsKey(binaryOp), "%s is not a supported binary operation", binaryOp); return new AutoValue_UBinary(binaryOp, lhs, rhs); }
@Test public void lessThanOrEqual() { assertUnifiesAndInlines( "4 <= 17", UBinary.create(Kind.LESS_THAN_EQUAL, ULiteral.intLit(4), ULiteral.intLit(17))); }
public Collection<? extends Comparable<?>> generateKeys(final AlgorithmSQLContext algorithmSQLContext, final int keyGenerateCount) { return getKeyGenerateAlgorithm(algorithmSQLContext.getTableName()).generateKeys(algorithmSQLContext, keyGenerateCount); }
@Test void assertGenerateKeyFailure() { AlgorithmSQLContext generateContext = mock(AlgorithmSQLContext.class); when(generateContext.getTableName()).thenReturn("table_0"); assertThrows(ShardingTableRuleNotFoundException.class, () -> createMaximumShardingRule().generateKeys(generateContext, 1)); }
@Override public boolean hasLeadership(UUID leaderSessionId) { synchronized (lock) { return this.leaderContender != null && this.sessionID.equals(leaderSessionId); } }
@Test void testHasLeadershipWithoutContender() throws Exception { try (final LeaderElection testInstance = new StandaloneLeaderElection(SESSION_ID)) { assertThat(testInstance.hasLeadership(SESSION_ID)).isFalse(); final UUID differentSessionID = UUID.randomUUID(); assertThat(testInstance.hasLeadership(differentSessionID)).isFalse(); } }
@Override public Collection<String> allClientId() { Collection<String> result = new HashSet<>(); result.addAll(connectionBasedClientManager.allClientId()); result.addAll(ephemeralIpPortClientManager.allClientId()); result.addAll(persistentIpPortClientManager.allClientId()); return result; }
@Test void testAllClientId() { Collection<String> actual = delegate.allClientId(); assertTrue(actual.contains(connectionId)); assertTrue(actual.contains(ephemeralIpPortId)); assertTrue(actual.contains(persistentIpPortId)); }
public JobRunrConfiguration useDashboard() { return useDashboardIf(true); }
@Test void dashboardThrowsExceptionIfNoStorageProviderIsAvailable() { assertThatThrownBy(() -> JobRunr.configure() .useDashboard() ).isInstanceOf(IllegalArgumentException.class) .hasMessage("A StorageProvider is required to use a JobRunrDashboardWebServer. Please see the documentation on how to setup a job StorageProvider."); }
@Override public void doLimitForSelectRequest(SelectRequest selectRequest) throws SQLException { if (null == selectRequest || !enabledLimit) { return; } doLimit(selectRequest.getSql()); }
@Test void testDoLimitForSelectRequestInvalid() throws SQLException { SelectRequest selectRequest = SelectRequest.builder().sql("select * from test").build(); SelectRequest invalid = SelectRequest.builder().sql("CALL SALES.TOTAL_REVENUES()").build(); List<SelectRequest> selectRequests = new LinkedList<>(); selectRequests.add(selectRequest); selectRequests.add(invalid); assertThrows(SQLException.class, () -> sqlLimiter.doLimitForSelectRequest(selectRequests)); }
@Override public String toJSONString(Object object) { try { return this.jsonParser.toJSONString(object); } catch (Exception e) { throw new JsonParseException(e); } }
@Test public void testToJSONString() { TwoPhaseBusinessActionParam actionParam = new TwoPhaseBusinessActionParam(); actionParam.setActionName("business_action"); actionParam.setBranchType(BranchType.TCC); String resultString = parserWrap.toJSONString(actionParam); assertEquals(jsonString, resultString); }
Object getCellValue(Cell cell, Schema.FieldType type) { ByteString cellValue = cell.getValue(); int valueSize = cellValue.size(); switch (type.getTypeName()) { case BOOLEAN: checkArgument(valueSize == 1, message("Boolean", 1)); return cellValue.toByteArray()[0] != 0; case BYTE: checkArgument(valueSize == 1, message("Byte", 1)); return cellValue.toByteArray()[0]; case INT16: checkArgument(valueSize == 2, message("Int16", 2)); return Shorts.fromByteArray(cellValue.toByteArray()); case INT32: checkArgument(valueSize == 4, message("Int32", 4)); return Ints.fromByteArray(cellValue.toByteArray()); case INT64: checkArgument(valueSize == 8, message("Int64", 8)); return Longs.fromByteArray(cellValue.toByteArray()); case FLOAT: checkArgument(valueSize == 4, message("Float", 4)); return Float.intBitsToFloat(Ints.fromByteArray(cellValue.toByteArray())); case DOUBLE: checkArgument(valueSize == 8, message("Double", 8)); return Double.longBitsToDouble(Longs.fromByteArray(cellValue.toByteArray())); case DATETIME: return DateTime.parse(cellValue.toStringUtf8()); case STRING: return cellValue.toStringUtf8(); case BYTES: return cellValue.toByteArray(); case LOGICAL_TYPE: String identifier = checkArgumentNotNull(type.getLogicalType()).getIdentifier(); throw new IllegalStateException("Unsupported logical type: " + identifier); default: throw new IllegalArgumentException( String.format("Unsupported cell value type '%s'.", type.getTypeName())); } }
@Test public void shouldFailParseFloatTypeTooLong() { byte[] value = new byte[10]; IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> PARSER.getCellValue(cell(value), FLOAT)); checkMessage(exception.getMessage(), "Float has to be 4-bytes long bytearray"); }
@Override public NativeEntity<PipelineDao> createNativeEntity(Entity entity, Map<String, ValueReference> parameters, Map<EntityDescriptor, Object> nativeEntities, String username) { if (entity instanceof EntityV1) { return decode((EntityV1) entity, parameters, nativeEntities); } else { throw new IllegalArgumentException("Unsupported entity version: " + entity.getClass()); } }
@Test public void createNativeEntity() throws NotFoundException { final Entity entity = EntityV1.builder() .id(ModelId.of("1")) .type(ModelTypes.PIPELINE_V1) .data(objectMapper.convertValue(PipelineEntity.create( ValueReference.of("Title"), ValueReference.of("Description"), ValueReference.of("pipeline \"Title\"\nstage 0 match either\nrule \"debug\"\nrule \"no-op\"\nend"), Collections.singleton(ValueReference.of("5adf23894b900a0f00000001"))), JsonNode.class)) .build(); final EntityDescriptor streamDescriptor = EntityDescriptor.create("5adf23894b900a0f00000001", ModelTypes.STREAM_V1); final Stream stream = mock(Stream.class); when(stream.getId()).thenReturn("5adf23894b900a0f00000001"); final Map<EntityDescriptor, Object> nativeEntities = Collections.singletonMap(streamDescriptor, stream); final NativeEntity<PipelineDao> nativeEntity = facade.createNativeEntity(entity, Collections.emptyMap(), nativeEntities, "username"); assertThat(nativeEntity.descriptor().type()).isEqualTo(ModelTypes.PIPELINE_V1); assertThat(nativeEntity.entity().title()).isEqualTo("Title"); assertThat(nativeEntity.entity().description()).isEqualTo("Description"); assertThat(nativeEntity.entity().source()).startsWith("pipeline \"Title\""); assertThat(connectionsService.load("5adf23894b900a0f00000001").pipelineIds()) .containsOnly(nativeEntity.entity().id()); }
public static String getDatabaseRuleActiveVersionNode(final String databaseName, final String ruleName, final String key) { return String.join("/", getDatabaseRuleNode(databaseName, ruleName), key, ACTIVE_VERSION); }
@Test void assertGetDatabaseRuleActiveVersionNode() { assertThat(DatabaseRuleMetaDataNode.getDatabaseRuleActiveVersionNode("foo_db", "foo_rule", "foo_tables"), is("/metadata/foo_db/rules/foo_rule/foo_tables/active_version")); }