focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { if (!stringsPresent) { return; } StringsConfig stringsConfig = context.get(StringsConfig.class, defaultStringsConfig); try (TemporaryResources tmp = new TemporaryResources()) { TikaInputStream tis = TikaInputStream.get(stream, tmp, metadata); File input = tis.getFile(); // Metadata metadata.set("strings:min-len", "" + stringsConfig.getMinLength()); metadata.set("strings:encoding", stringsConfig.toString()); metadata.set("strings:file_output", doFile(tis)); int totalBytes = 0; // Content XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata); xhtml.startDocument(); totalBytes = doStrings(input, stringsConfig, xhtml); xhtml.endDocument(); // Metadata metadata.set("strings:length", "" + totalBytes); } }
@Test public void testParse() throws Exception { assumeTrue(canRun()); String resource = "/test-documents/testOCTET_header.dbase3"; String[] content = {"CLASSNO", "TITLE", "ITEMNO", "LISTNO", "LISTDATE"}; String[] met_attributes = {"min-len", "encoding", "strings:file_output"}; StringsConfig stringsConfig = new StringsConfig(); Parser parser = new StringsParser(); ((Initializable) parser).initialize(Collections.emptyMap()); ContentHandler handler = new BodyContentHandler(); Metadata metadata = new Metadata(); ParseContext context = new ParseContext(); context.set(StringsConfig.class, stringsConfig); try (InputStream stream = StringsParserTest.class.getResourceAsStream(resource)) { parser.parse(stream, handler, metadata, context); } catch (Exception e) { e.printStackTrace(); } // Content for (String word : content) { assertTrue(handler.toString().contains(word), "can't find " + word); } // Metadata Arrays.equals(met_attributes, metadata.names()); }
@Udf(description = "Returns the inverse (arc) sine of an INT value") public Double asin( @UdfParameter( value = "value", description = "The value to get the inverse sine of." ) final Integer value ) { return asin(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleLessThanNegativeOne() { assertThat(Double.isNaN(udf.asin(-1.1)), is(true)); assertThat(Double.isNaN(udf.asin(-6.0)), is(true)); assertThat(Double.isNaN(udf.asin(-2)), is(true)); assertThat(Double.isNaN(udf.asin(-2L)), is(true)); }
@Override public T deserialize(final String topic, final byte[] bytes) { try { if (bytes == null) { return null; } // don't use the JsonSchemaConverter to read this data because // we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS, // which is not currently available in the standard converters final JsonNode value = isJsonSchema ? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class) : MAPPER.readTree(bytes); final Object coerced = enforceFieldType( "$", new JsonValueContext(value, schema) ); if (LOG.isTraceEnabled()) { LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced); } return SerdeUtils.castToTargetType(coerced, targetType); } catch (final Exception e) { // Clear location in order to avoid logging data, for security reasons if (e instanceof JsonParseException) { ((JsonParseException) e).clearLocation(); } throw new SerializationException( "Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e); } }
@Test public void shouldDeserializeScientificNotation() { // Given: final KsqlJsonDeserializer<BigDecimal> deserializer = givenDeserializerForSchema(DecimalUtil.builder(3, 1).build(), BigDecimal.class); final byte[] bytes = addMagic("1E+1".getBytes(UTF_8)); // When: final Object result = deserializer.deserialize(SOME_TOPIC, bytes); // Then: assertThat(result, is(new BigDecimal("10.0"))); }
Future<Boolean> canRoll(int podId) { LOGGER.debugCr(reconciliation, "Determining whether broker {} can be rolled", podId); return canRollBroker(descriptions, podId); }
@Test public void testBelowMinIsr(VertxTestContext context) { KSB ksb = new KSB() .addNewTopic("A", false) .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "2") .addNewPartition(0) .replicaOn(0, 1, 3) .leader(0) .isr(0, 1) .endPartition() .endTopic() .addNewTopic("B", false) .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "2") .addNewPartition(0) .replicaOn(0, 1, 3) .leader(1) .isr(1) .endPartition() .endTopic() .addBroker(4); KafkaAvailability kafkaAvailability = new KafkaAvailability(new Reconciliation("dummy", "kind", "namespace", "A"), ksb.ac()); Checkpoint a = context.checkpoint(ksb.brokers.size()); for (Integer brokerId : ksb.brokers.keySet()) { kafkaAvailability.canRoll(brokerId).onComplete(context.succeeding(canRoll -> context.verify(() -> { if (brokerId == 4) { assertTrue(canRoll, "broker " + brokerId + " should be rollable, having no partitions"); } else { assertFalse(canRoll, "broker " + brokerId + " should not be rollable, being minisr = 2 and it's only replicated on two brokers"); } a.flag(); }))); } }
@GET @Produces(MediaTypeRestconf.APPLICATION_YANG_DATA_JSON) @Path("data/{identifier : .+}") public Response handleGetRequest(@PathParam("identifier") String uriString) { log.debug("handleGetRequest: {}", uriString); URI uri = uriInfo.getRequestUri(); try { ObjectNode node = service.runGetOperationOnDataResource(uri); if (node == null) { RestconfError error = RestconfError.builder(RestconfError.ErrorType.PROTOCOL, RestconfError.ErrorTag.INVALID_VALUE) .errorMessage("Resource not found") .errorPath(uriString) .errorAppTag("handleGetRequest") .build(); return Response.status(NOT_FOUND) .entity(RestconfError.wrapErrorAsJson(Arrays.asList(error))) .build(); } return Response.ok(node) .build(); } catch (RestconfException e) { log.error("ERROR: handleGetRequest: {}", e.getMessage()); log.debug("Exception in handleGetRequest:", e); return Response.status(e.getResponse().getStatus()) .entity(e.toRestconfErrorJson()) .build(); } catch (Exception e) { RestconfError error = RestconfError .builder(RestconfError.ErrorType.APPLICATION, RestconfError.ErrorTag.OPERATION_FAILED) .errorMessage(e.getMessage()).errorAppTag("handlePostRequest").build(); return Response.status(INTERNAL_SERVER_ERROR) .entity(RestconfError.wrapErrorAsJson(Arrays.asList(error))) .build(); } }
@Test public void testHandleGetRequest() { ObjectMapper mapper = new ObjectMapper(); ObjectNode node = mapper.createObjectNode(); expect(restconfService .runGetOperationOnDataResource(URI.create(getBaseUri() + DATA_IETF_SYSTEM_SYSTEM))) .andReturn(node).anyTimes(); replay(restconfService); WebTarget wt = target(); String response = wt.path("/" + DATA_IETF_SYSTEM_SYSTEM).request().get(String.class); assertNotNull(response); }
@Activate public void activate(ComponentContext context) { providerService = providerRegistry.register(this); appId = coreService.registerApplication(APP_NAME); netCfgService.registerConfigFactory(factory); netCfgService.addListener(cfgLister); connectDevices(); modified(context); log.info("Started"); }
@Test public void testActivate() { assertEquals("Incorrect provider service", deviceProviderService, provider.providerService); assertEquals("Incorrect application id", applicationId, provider.appId); assertTrue("Incorrect config factories", cfgFactories.contains(provider.factory)); assertTrue("Incorrect network config listener", netCfgListeners.contains(provider.cfgLister)); }
@Nullable public static TimeHandlerConfig getTimeHandlerConfig(TableConfig tableConfig, Schema schema, Map<String, String> taskConfig) { String timeColumn = tableConfig.getValidationConfig().getTimeColumnName(); if (timeColumn == null) { return null; } DateTimeFieldSpec fieldSpec = schema.getSpecForTimeColumn(timeColumn); Preconditions.checkState(fieldSpec != null, "No valid spec found for time column: %s in schema for table: %s", timeColumn, tableConfig.getTableName()); TimeHandlerConfig.Builder timeHandlerConfigBuilder = new TimeHandlerConfig.Builder(TimeHandler.Type.EPOCH); String windowStartMs = taskConfig.get(MergeTask.WINDOW_START_MS_KEY); String windowEndMs = taskConfig.get(MergeTask.WINDOW_END_MS_KEY); if (windowStartMs != null && windowEndMs != null) { timeHandlerConfigBuilder.setTimeRange(Long.parseLong(windowStartMs), Long.parseLong(windowEndMs)) .setNegateWindowFilter(Boolean.parseBoolean(taskConfig.get(MergeTask.NEGATE_WINDOW_FILTER))); } String roundBucketTimePeriod = taskConfig.get(MergeTask.ROUND_BUCKET_TIME_PERIOD_KEY); if (roundBucketTimePeriod != null) { timeHandlerConfigBuilder.setRoundBucketMs(TimeUtils.convertPeriodToMillis(roundBucketTimePeriod)); } String partitionBucketTimePeriod = taskConfig.get(MergeTask.PARTITION_BUCKET_TIME_PERIOD_KEY); if (partitionBucketTimePeriod != null) { timeHandlerConfigBuilder.setPartitionBucketMs(TimeUtils.convertPeriodToMillis(partitionBucketTimePeriod)); } return timeHandlerConfigBuilder.build(); }
@Test public void testGetTimeHandlerConfig() { TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName("myTable").setTimeColumnName("dateTime").build(); Schema schema = new Schema.SchemaBuilder() .addDateTime("dateTime", DataType.LONG, "1:SECONDS:SIMPLE_DATE_FORMAT:yyyyMMddHHmmss", "1:SECONDS").build(); Map<String, String> taskConfig = new HashMap<>(); long expectedWindowStartMs = 1625097600000L; long expectedWindowEndMs = 1625184000000L; taskConfig.put(MergeTask.WINDOW_START_MS_KEY, Long.toString(expectedWindowStartMs)); taskConfig.put(MergeTask.WINDOW_END_MS_KEY, Long.toString(expectedWindowEndMs)); long expectedRoundBucketMs = 6 * 3600 * 1000; taskConfig.put(MergeTask.ROUND_BUCKET_TIME_PERIOD_KEY, "6h"); long expectedPartitionBucketMs = 24 * 3600 * 1000; taskConfig.put(MergeTask.PARTITION_BUCKET_TIME_PERIOD_KEY, "1d"); TimeHandlerConfig timeHandlerConfig = MergeTaskUtils.getTimeHandlerConfig(tableConfig, schema, taskConfig); assertNotNull(timeHandlerConfig); assertEquals(timeHandlerConfig.getType(), TimeHandler.Type.EPOCH); assertEquals(timeHandlerConfig.getStartTimeMs(), expectedWindowStartMs); assertEquals(timeHandlerConfig.getEndTimeMs(), expectedWindowEndMs); assertEquals(timeHandlerConfig.getRoundBucketMs(), expectedRoundBucketMs); assertEquals(timeHandlerConfig.getPartitionBucketMs(), expectedPartitionBucketMs); // No time column in table config TableConfig tableConfigWithoutTimeColumn = new TableConfigBuilder(TableType.OFFLINE).setTableName("myTable").build(); assertNull(MergeTaskUtils.getTimeHandlerConfig(tableConfigWithoutTimeColumn, schema, taskConfig)); // Time column does not exist in schema Schema schemaWithoutTimeColumn = new Schema.SchemaBuilder().build(); try { MergeTaskUtils.getTimeHandlerConfig(tableConfig, schemaWithoutTimeColumn, taskConfig); fail(); } catch (IllegalStateException e) { // Expected } }
public static Future<Integer> authTlsHash(SecretOperator secretOperations, String namespace, KafkaClientAuthentication auth, List<CertSecretSource> certSecretSources) { Future<Integer> tlsFuture; if (certSecretSources == null || certSecretSources.isEmpty()) { tlsFuture = Future.succeededFuture(0); } else { // get all TLS trusted certs, compute hash from each of them, sum hashes tlsFuture = Future.join(certSecretSources.stream().map(certSecretSource -> getCertificateAsync(secretOperations, namespace, certSecretSource) .compose(cert -> Future.succeededFuture(cert.hashCode()))).collect(Collectors.toList())) .compose(hashes -> Future.succeededFuture(hashes.list().stream().mapToInt(e -> (int) e).sum())); } if (auth == null) { return tlsFuture; } else { // compute hash from Auth if (auth instanceof KafkaClientAuthenticationScram) { // only passwordSecret can be changed return tlsFuture.compose(tlsHash -> getPasswordAsync(secretOperations, namespace, auth) .compose(password -> Future.succeededFuture(password.hashCode() + tlsHash))); } else if (auth instanceof KafkaClientAuthenticationPlain) { // only passwordSecret can be changed return tlsFuture.compose(tlsHash -> getPasswordAsync(secretOperations, namespace, auth) .compose(password -> Future.succeededFuture(password.hashCode() + tlsHash))); } else if (auth instanceof KafkaClientAuthenticationTls) { // custom cert can be used (and changed) return ((KafkaClientAuthenticationTls) auth).getCertificateAndKey() == null ? tlsFuture : tlsFuture.compose(tlsHash -> getCertificateAndKeyAsync(secretOperations, namespace, (KafkaClientAuthenticationTls) auth) .compose(crtAndKey -> Future.succeededFuture(crtAndKey.certAsBase64String().hashCode() + crtAndKey.keyAsBase64String().hashCode() + tlsHash))); } else if (auth instanceof KafkaClientAuthenticationOAuth) { List<Future<Integer>> futureList = ((KafkaClientAuthenticationOAuth) auth).getTlsTrustedCertificates() == null ? new ArrayList<>() : ((KafkaClientAuthenticationOAuth) auth).getTlsTrustedCertificates().stream().map(certSecretSource -> getCertificateAsync(secretOperations, namespace, certSecretSource) .compose(cert -> Future.succeededFuture(cert.hashCode()))).collect(Collectors.toList()); futureList.add(tlsFuture); futureList.add(addSecretHash(secretOperations, namespace, ((KafkaClientAuthenticationOAuth) auth).getAccessToken())); futureList.add(addSecretHash(secretOperations, namespace, ((KafkaClientAuthenticationOAuth) auth).getClientSecret())); futureList.add(addSecretHash(secretOperations, namespace, ((KafkaClientAuthenticationOAuth) auth).getRefreshToken())); return Future.join(futureList) .compose(hashes -> Future.succeededFuture(hashes.list().stream().mapToInt(e -> (int) e).sum())); } else { // unknown Auth type return tlsFuture; } } }
@Test void testAuthTlsPlainSecretFoundAndPasswordNotFound() { SecretOperator secretOperator = mock(SecretOperator.class); Map<String, String> data = new HashMap<>(); data.put("passwordKey", "my-password"); Secret secret = new Secret(); secret.setData(data); CompletionStage<Secret> cf = CompletableFuture.supplyAsync(() -> secret); when(secretOperator.getAsync(anyString(), anyString())).thenReturn(Future.fromCompletionStage(cf)); KafkaClientAuthenticationPlain auth = new KafkaClientAuthenticationPlain(); PasswordSecretSource passwordSecretSource = new PasswordSecretSource(); passwordSecretSource.setSecretName("my-secret"); passwordSecretSource.setPassword("password1"); auth.setPasswordSecret(passwordSecretSource); Future<Integer> result = VertxUtil.authTlsHash(secretOperator, "anyNamespace", auth, List.of()); result.onComplete(handler -> { assertTrue(handler.failed()); assertEquals("Items with key(s) [password1] are missing in Secret my-secret", handler.cause().getMessage()); }); }
@Override public void notifyRequiredSegmentId(int subpartitionId, int segmentId) { if (segmentId > requiredSegmentId) { requiredSegmentId = segmentId; stopSendingData = false; availabilityListener.notifyDataAvailable(this); } }
@Test void testNotifyRequiredSegmentId() { tieredStorageResultSubpartitionView.notifyRequiredSegmentId(0, 1); assertThat(availabilityListener).isDone(); }
public Span nextSpan(TraceContextOrSamplingFlags extracted) { if (extracted == null) throw new NullPointerException("extracted == null"); TraceContext context = extracted.context(); if (context != null) return newChild(context); TraceIdContext traceIdContext = extracted.traceIdContext(); if (traceIdContext != null) { return _toSpan(null, decorateContext( InternalPropagation.instance.flags(extracted.traceIdContext()), traceIdContext.traceIdHigh(), traceIdContext.traceId(), 0L, 0L, 0L, extracted.extra() )); } SamplingFlags samplingFlags = extracted.samplingFlags(); List<Object> extra = extracted.extra(); TraceContext parent = currentTraceContext.get(); int flags; long traceIdHigh = 0L, traceId = 0L, localRootId = 0L, spanId = 0L; if (parent != null) { // At this point, we didn't extract trace IDs, but do have a trace in progress. Since typical // trace sampling is up front, we retain the decision from the parent. flags = InternalPropagation.instance.flags(parent); traceIdHigh = parent.traceIdHigh(); traceId = parent.traceId(); localRootId = parent.localRootId(); spanId = parent.spanId(); extra = concat(extra, parent.extra()); } else { flags = InternalPropagation.instance.flags(samplingFlags); } return _toSpan(parent, decorateContext(flags, traceIdHigh, traceId, localRootId, spanId, 0L, extra)); }
@Test void localRootId_nextSpan_flags_debug() { TraceContextOrSamplingFlags flags = TraceContextOrSamplingFlags.DEBUG; localRootId(flags, flags, ctx -> tracer.nextSpan(ctx)); }
public StringSubject factValue(String key) { return doFactValue(key, null); }
@Test public void factValueFailWrongValue() { expectFailureWhenTestingThat(fact("foo", "the foo")).factValue("foo").isEqualTo("the bar"); assertFailureValue("value of", "failure.factValue(foo)"); }
@Override void execute() throws HiveMetaException { // Need to confirm unless it's a dry run or specified -yes if (!schemaTool.isDryRun() && !this.yes) { boolean confirmed = promptToConfirm(); if (!confirmed) { System.out.println("Operation cancelled, exiting."); return; } } Connection conn = schemaTool.getConnectionToMetastore(true); try { try (Statement stmt = conn.createStatement()) { final String def = Warehouse.DEFAULT_DATABASE_NAME; // List databases List<String> databases = new ArrayList<>(); try (ResultSet rs = stmt.executeQuery("SHOW DATABASES")) { while (rs.next()) { databases.add(rs.getString(1)); } } // Drop databases for (String database : databases) { // Don't try to drop 'default' database as it's not allowed if (!def.equalsIgnoreCase(database)) { if (schemaTool.isDryRun()) { System.out.println("would drop database " + database); } else { logIfVerbose("dropping database " + database); stmt.execute(String.format("DROP DATABASE `%s` CASCADE", database)); } } } // List tables in 'default' database List<String> tables = new ArrayList<>(); try (ResultSet rs = stmt.executeQuery(String.format("SHOW TABLES IN `%s`", def))) { while (rs.next()) { tables.add(rs.getString(1)); } } // Drop tables in 'default' database for (String table : tables) { if (schemaTool.isDryRun()) { System.out.println("would drop table " + table); } else { logIfVerbose("dropping table " + table); stmt.execute(String.format("DROP TABLE `%s`.`%s`", def, table)); } } } } catch (SQLException se) { throw new HiveMetaException("Failed to drop databases.", se); } }
@Test public void testExecuteDryRun() throws Exception { setUpTwoDatabases(); when(uut.schemaTool.isDryRun()).thenReturn(true); uut.execute(); Mockito.verify(stmtMock, times(0)).execute(anyString()); }
static void populateSchemaWithConstraints(Schema toPopulate, SimpleTypeImpl t) { if (t.getAllowedValues() != null && !t.getAllowedValues().isEmpty()) { parseSimpleType(DMNOASConstants.X_DMN_ALLOWED_VALUES, toPopulate, t.getAllowedValuesFEEL(), t.getAllowedValues()); } if (t.getTypeConstraint() != null && !t.getTypeConstraint().isEmpty()) { parseSimpleType(DMNOASConstants.X_DMN_TYPE_CONSTRAINTS, toPopulate, t.getTypeConstraintFEEL(), t.getTypeConstraint()); } }
@Test void populateSchemaWithConstraintsForTypeConstraints() { List<String> enumBase = Arrays.asList("DMN", "PMML", "JBPMN", "DRL"); List<Object> toEnum = enumBase.stream().map(toMap -> String.format("\"%s\"", toMap)).collect(Collectors.toUnmodifiableList()); String typeConstraintsString = String.join(",", toEnum.stream().map(toMap -> String.format("%s", toMap)).toList()); SimpleTypeImpl toRead = getSimpleType(null, typeConstraintsString, FEEL_STRING, BuiltInType.STRING); AtomicReference<Schema> toPopulate = new AtomicReference<>(getSchemaForSimpleType(toRead)); DMNTypeSchemas.populateSchemaWithConstraints(toPopulate.get(), toRead); assertEquals(enumBase.size(), toPopulate.get().getEnumeration().size()); enumBase.forEach(en -> assertTrue(toPopulate.get().getEnumeration().contains(en))); assertTrue(toPopulate.get().getExtensions().containsKey(DMNOASConstants.X_DMN_TYPE_CONSTRAINTS)); String retrieved = ((String) toPopulate.get().getExtensions().get(DMNOASConstants.X_DMN_TYPE_CONSTRAINTS)).replace(" ", ""); assertEquals(typeConstraintsString, retrieved); toEnum = Arrays.asList(1, 3, 6, 78); typeConstraintsString = String.join(",", toEnum.stream().map(toMap -> String.format("%s", toMap)).toList()); toRead = getSimpleType(null, typeConstraintsString, FEEL_NUMBER, BuiltInType.NUMBER); toPopulate.set(getSchemaForSimpleType(toRead)); DMNTypeSchemas.populateSchemaWithConstraints(toPopulate.get(), toRead); assertEquals(toEnum.size(), toPopulate.get().getEnumeration().size()); toEnum.stream().map(i -> BigDecimal.valueOf((int) i)).forEach(en -> assertTrue(toPopulate.get().getEnumeration().contains(en))); assertTrue(toPopulate.get().getExtensions().containsKey(DMNOASConstants.X_DMN_TYPE_CONSTRAINTS)); retrieved = ((String) toPopulate.get().getExtensions().get(DMNOASConstants.X_DMN_TYPE_CONSTRAINTS)).replace( " ", ""); assertEquals(typeConstraintsString, retrieved); }
boolean acceptClass(String classname) { if (inclusions.isEmpty() && exclusions.isEmpty()) { return true; } return acceptResource(classToResource(classname)); }
@Test void ALL_accepts_everything() throws Exception { assertThat(Mask.ALL.acceptClass("org.sonar.Bar")).isTrue(); assertThat(Mask.ALL.acceptClass("Bar")).isTrue(); }
static ExecutorService getConfiguredExecutorService( CamelContext camelContext, String name, DynamicRouterConfiguration cfg, boolean useDefault) throws IllegalArgumentException { ExecutorServiceManager manager = camelContext.getExecutorServiceManager(); ObjectHelper.notNull(manager, ESM_NAME, camelContext); String exSvcRef = cfg.getExecutorService(); ExecutorService exSvcBean = cfg.getExecutorServiceBean(); String errorMessage = "ExecutorServiceRef '" + exSvcRef + "' not found in registry as an ExecutorService " + "instance or as a thread pool profile"; // The first (preferred) option is to use an explicitly-configured executor if the configuration has it return Optional.ofNullable(exSvcBean) // The second preference is to check for an executor service reference .or(() -> Optional.ofNullable(exSvcRef) // Try to get the referenced executor service .map(r -> lookupExecutorServiceRef(camelContext, name, cfg, r) // But, if the reference is specified in the config, // and could not be obtained, this is an error .orElseThrow(() -> new IllegalArgumentException(errorMessage)))) // The third and final option is to create a new "default" thread pool if the parameter // specifies to that the default thread pool should be used as a fallback .or(() -> useDefault ? Optional.of(manager.newDefaultThreadPool(cfg, name)) : Optional.empty()) // failing the above options, then no executor service is configured .orElse(null); }
@Test void testGetConfiguredExecutorServiceWithoutBeanAndServiceRefAndUseDefaultFalse() { when(mockConfig.getExecutorServiceBean()).thenReturn(null); when(mockConfig.getExecutorService()).thenReturn(null); when(camelContext.getExecutorServiceManager()).thenReturn(manager); ExecutorService result = DynamicRouterRecipientListHelper.getConfiguredExecutorService(camelContext, "someName", mockConfig, false); assertNull(result); }
protected static int findSequence(byte[] sequence, byte[] buffer) { int pos = -1; for (int i = 0; i < buffer.length - sequence.length + 1; i++) { if (buffer[i] == sequence[0] && testRemaining(sequence, buffer, i)) { pos = i; break; } } return pos; }
@Test public void testFindSequence() throws IOException { byte[] sequence = "project".getBytes(StandardCharsets.UTF_8); byte[] buffer = "my big project".getBytes(StandardCharsets.UTF_8); int expResult = 7; int result = PomProjectInputStream.findSequence(sequence, buffer); assertEquals(expResult, result); sequence = "<project".getBytes(StandardCharsets.UTF_8); buffer = "my big project".getBytes(StandardCharsets.UTF_8); expResult = -1; result = PomProjectInputStream.findSequence(sequence, buffer); assertEquals(expResult, result); sequence = "bigger sequence".getBytes(StandardCharsets.UTF_8); buffer = "buffer".getBytes(StandardCharsets.UTF_8); expResult = -1; result = PomProjectInputStream.findSequence(sequence, buffer); assertEquals(expResult, result); sequence = "fff".getBytes(StandardCharsets.UTF_8); buffer = "buffer".getBytes(StandardCharsets.UTF_8); expResult = -1; result = PomProjectInputStream.findSequence(sequence, buffer); assertEquals(expResult, result); }
public static void checkArgument(boolean expression, String errorMessage) { checkArgument(expression, () -> errorMessage); }
@Test public void testCheckingCorrectArgument() { try { Utils.checkArgument(true, "Error"); } catch (Throwable t) { fail("Checking argument should not fail"); } }
void executeWork(DataflowWorkExecutor worker, DataflowWorkProgressUpdater progressUpdater) throws Exception { progressUpdater.startReportingProgress(); // Blocks while executing the work. try { worker.execute(); } finally { // stopReportingProgress can throw an exception if the final progress // update fails. For correctness, the task must then be marked as failed. progressUpdater.stopReportingProgress(); } }
@Test public void testStopProgressReportInCaseOfFailure() throws Exception { doThrow(new WorkerException()).when(mockWorkExecutor).execute(); BatchDataflowWorker worker = new BatchDataflowWorker( mockWorkUnitClient, IntrinsicMapTaskExecutorFactory.defaultFactory(), options); try { worker.executeWork(mockWorkExecutor, mockProgressUpdater); } catch (WorkerException e) { /* Expected - ignore. */ } verify(mockProgressUpdater, times(1)).stopReportingProgress(); }
public static RelDataType create(HazelcastIntegerType type, boolean nullable) { if (type.isNullable() == nullable) { return type; } return create0(type.getSqlTypeName(), nullable, type.getBitWidth()); }
@Test public void testNullableIntegerTypeOfTypeName() { assertType(TINYINT, Byte.SIZE - 1, false, HazelcastIntegerType.create(TINYINT, false)); assertType(SMALLINT, Short.SIZE - 1, false, HazelcastIntegerType.create(SMALLINT, false)); assertType(INTEGER, Integer.SIZE - 1, false, HazelcastIntegerType.create(INTEGER, false)); assertType(BIGINT, Long.SIZE - 1, false, HazelcastIntegerType.create(BIGINT, false)); assertType(TINYINT, Byte.SIZE - 1, true, HazelcastIntegerType.create(TINYINT, true)); assertType(SMALLINT, Short.SIZE - 1, true, HazelcastIntegerType.create(SMALLINT, true)); assertType(INTEGER, Integer.SIZE - 1, true, HazelcastIntegerType.create(INTEGER, true)); assertType(BIGINT, Long.SIZE - 1, true, HazelcastIntegerType.create(BIGINT, true)); }
@Override public void setConfigAttributes(Object attributes) { clear(); if (attributes == null) { return; } Map attributeMap = (Map) attributes; String materialType = (String) attributeMap.get(AbstractMaterialConfig.MATERIAL_TYPE); if (SvnMaterialConfig.TYPE.equals(materialType)) { addMaterialConfig(getSvnMaterial(), (Map) attributeMap.get(SvnMaterialConfig.TYPE)); } else if (HgMaterialConfig.TYPE.equals(materialType)) { addMaterialConfig(getHgMaterial(), (Map) attributeMap.get(HgMaterialConfig.TYPE)); } else if (GitMaterialConfig.TYPE.equals(materialType)) { addMaterialConfig(getGitMaterial(), (Map) attributeMap.get(GitMaterialConfig.TYPE)); } else if (P4MaterialConfig.TYPE.equals(materialType)) { addMaterialConfig(getP4Material(), (Map) attributeMap.get(P4MaterialConfig.TYPE)); } else if (DependencyMaterialConfig.TYPE.equals(materialType)) { addMaterialConfig(getDependencyMaterial(), (Map) attributeMap.get(DependencyMaterialConfig.TYPE)); } else if (TfsMaterialConfig.TYPE.equals(materialType)) { addMaterialConfig(getTfsMaterial(), (Map) attributeMap.get(TfsMaterialConfig.TYPE)); } else if (PackageMaterialConfig.TYPE.equals(materialType)) { addMaterialConfig(getPackageMaterial(), (Map) attributeMap.get(PackageMaterialConfig.TYPE)); } else if (PluggableSCMMaterialConfig.TYPE.equals(materialType)) { addMaterialConfig(getSCMMaterial(), (Map) attributeMap.get(PluggableSCMMaterialConfig.TYPE)); } }
@Test public void shouldClearExistingAndSetHgConfigAttributesForMaterial() { MaterialConfigs materialConfigs = new MaterialConfigs(); materialConfigs.add(hg("", null)); materialConfigs.add(svn("", "", "", false)); Map<String, String> hashMap = new HashMap<>(); hashMap.put(HgMaterialConfig.URL, "foo"); Map<String, Object> attributeMap = new HashMap<>(); attributeMap.put(AbstractMaterialConfig.MATERIAL_TYPE, HgMaterialConfig.TYPE); attributeMap.put(HgMaterialConfig.TYPE, hashMap); materialConfigs.setConfigAttributes(attributeMap); assertThat(materialConfigs).hasSize(1); assertThat(materialConfigs.first()).isEqualTo(hg("foo", null)); }
@Override public CompletableFuture<Void> closeAsync() { synchronized (lock) { if (isShutdown) { return terminationFuture; } else { isShutdown = true; final Collection<CompletableFuture<Void>> terminationFutures = new ArrayList<>(3); final Time gracePeriod = Time.seconds(1L); if (metricQueryServiceRpcService != null) { final CompletableFuture<Void> metricQueryServiceRpcServiceTerminationFuture = metricQueryServiceRpcService.closeAsync(); terminationFutures.add(metricQueryServiceRpcServiceTerminationFuture); } Throwable throwable = null; for (ReporterAndSettings reporterAndSettings : reporters) { try { reporterAndSettings.getReporter().close(); } catch (Throwable t) { throwable = ExceptionUtils.firstOrSuppressed(t, throwable); } } reporters.clear(); if (throwable != null) { terminationFutures.add( FutureUtils.completedExceptionally( new FlinkException( "Could not shut down the metric reporters properly.", throwable))); } final CompletableFuture<Void> reporterExecutorShutdownFuture = ExecutorUtils.nonBlockingShutdown( gracePeriod.toMilliseconds(), TimeUnit.MILLISECONDS, reporterScheduledExecutor); terminationFutures.add(reporterExecutorShutdownFuture); final CompletableFuture<Void> viewUpdaterExecutorShutdownFuture = ExecutorUtils.nonBlockingShutdown( gracePeriod.toMilliseconds(), TimeUnit.MILLISECONDS, viewUpdaterScheduledExecutor); terminationFutures.add(viewUpdaterExecutorShutdownFuture); FutureUtils.completeAll(terminationFutures) .whenComplete( (Void ignored, Throwable error) -> { if (error != null) { terminationFuture.completeExceptionally(error); } else { terminationFuture.complete(null); } }); return terminationFuture; } } }
@Test void testConfigurableDelimiter() throws Exception { Configuration config = new Configuration(); config.set(MetricOptions.SCOPE_DELIMITER, "_"); config.set(MetricOptions.SCOPE_NAMING_TM, "A.B.C.D.E"); MetricRegistryImpl registry = new MetricRegistryImpl( MetricRegistryTestUtils.fromConfiguration(config), ReporterSetup.fromConfiguration(config, null)); TaskManagerMetricGroup tmGroup = TaskManagerMetricGroup.createTaskManagerMetricGroup( registry, "host", new ResourceID("id")); assertThat(tmGroup.getMetricIdentifier("name")).isEqualTo("A_B_C_D_E_name"); registry.closeAsync().get(); }
public static PlanNodeStatsEstimate computeSemiJoin(PlanNodeStatsEstimate sourceStats, PlanNodeStatsEstimate filteringSourceStats, VariableReferenceExpression sourceJoinVariable, VariableReferenceExpression filteringSourceJoinVariable) { return compute(sourceStats, filteringSourceStats, sourceJoinVariable, filteringSourceJoinVariable, (sourceJoinSymbolStats, filteringSourceJoinSymbolStats) -> min(filteringSourceJoinSymbolStats.getDistinctValuesCount(), sourceJoinSymbolStats.getDistinctValuesCount())); }
@Test public void testSemiJoin() { // overlapping ranges assertThat(computeSemiJoin(inputStatistics, inputStatistics, x, w)) .variableStats(x, stats -> stats .lowValue(xStats.getLowValue()) .highValue(xStats.getHighValue()) .nullsFraction(0) .distinctValuesCount(wStats.getDistinctValuesCount())) .variableStats(w, stats -> stats.isEqualTo(wStats)) .variableStats(z, stats -> stats.isEqualTo(zStats)) .outputRowsCount(inputStatistics.getOutputRowCount() * xStats.getValuesFraction() * (wStats.getDistinctValuesCount() / xStats.getDistinctValuesCount())); // overlapping ranges, nothing filtered out assertThat(computeSemiJoin(inputStatistics, inputStatistics, x, u)) .variableStats(x, stats -> stats .lowValue(xStats.getLowValue()) .highValue(xStats.getHighValue()) .nullsFraction(0) .distinctValuesCount(xStats.getDistinctValuesCount())) .variableStats(u, stats -> stats.isEqualTo(uStats)) .variableStats(z, stats -> stats.isEqualTo(zStats)) .outputRowsCount(inputStatistics.getOutputRowCount() * xStats.getValuesFraction()); // source stats are unknown assertThat(computeSemiJoin(inputStatistics, inputStatistics, unknown, u)) .variableStats(unknown, stats -> stats .nullsFraction(0) .distinctValuesCountUnknown() .unknownRange()) .variableStats(u, stats -> stats.isEqualTo(uStats)) .variableStats(z, stats -> stats.isEqualTo(zStats)) .outputRowsCountUnknown(); // filtering stats are unknown assertThat(computeSemiJoin(inputStatistics, inputStatistics, x, unknown)) .variableStats(x, stats -> stats .nullsFraction(0) .lowValue(xStats.getLowValue()) .highValue(xStats.getHighValue()) .distinctValuesCountUnknown()) .variableStatsUnknown(unknown) .variableStats(z, stats -> stats.isEqualTo(zStats)) .outputRowsCountUnknown(); // zero distinct values assertThat(computeSemiJoin(inputStatistics, inputStatistics, emptyRange, emptyRange)) .outputRowsCount(0); // fractional distinct values assertThat(computeSemiJoin(inputStatistics, inputStatistics, fractionalNdv, fractionalNdv)) .outputRowsCount(1000) .variableStats(fractionalNdv, stats -> stats .nullsFraction(0) .distinctValuesCount(0.1)); }
@Override protected Mono<Void> doExecute(final ServerWebExchange exchange, final ShenyuPluginChain chain, final SelectorData selector, final RuleData rule) { CasdoorAuthService casdoorAuthService = Singleton.INST.get(CasdoorAuthService.class); ServerHttpRequest request = exchange.getRequest(); String token = exchange.getRequest().getHeaders().getFirst(HttpHeaders.AUTHORIZATION); if (Objects.nonNull(token)) { CasdoorUser casdoorUser = casdoorAuthService.parseJwtToken(token); if (Objects.nonNull(casdoorUser)) { return chain.execute(handleToken(exchange, casdoorUser)); } } MultiValueMap<String, String> queryParams = request.getQueryParams(); String code = queryParams.getFirst("code"); String state = queryParams.getFirst("state"); if (Objects.nonNull(code) || Objects.nonNull(state)) { token = casdoorAuthService.getOAuthToken(code, state); CasdoorUser casdoorUser = casdoorAuthService.parseJwtToken(token); if (Objects.nonNull(casdoorUser)) { return chain.execute(handleToken(exchange, casdoorUser)); } } Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.ERROR_TOKEN); return WebFluxResultUtils.result(exchange, error); }
@Test void doExecute() { final PluginData pluginData = new PluginData("pluginId", "pluginName", "{\"organization-name\":\"test\",\"application-name\":\"app-test\",\"endpoint\":\"http://localhost:8000\",\"client_secrect\":\"a4209d412a33a842b7a9c05a3446e623cbb7262d\",\"client_id\":\"6e3a84154e73d1fb156a\",\"certificate\":\"-----BEGIN CERTIFICATE-----\\n\"}", "0", false, null); casdoorPluginDateHandlerTest.handlerPlugin(pluginData); try { CasdoorAuthService casdoorAuthService = Singleton.INST.get(CasdoorAuthService.class); casdoorPluginTest.doExecute(exchange, chain, selector, rule); } catch (Exception e) { Assumptions.assumeTrue(e instanceof CasdoorAuthException); } CasdoorAuthService casdoorAuthService = mock(CasdoorAuthService.class); String token = exchange.getRequest().getHeaders().getFirst(HttpHeaders.AUTHORIZATION); CasdoorUser casdoorUser = new CasdoorUser(); Mockito.when(casdoorAuthService.parseJwtToken(token)).thenReturn(casdoorUser); Singleton.INST.single(CasdoorAuthService.class, casdoorAuthService); when(this.chain.execute(any())).thenReturn(Mono.empty()); Mono<Void> mono = casdoorPluginTest.doExecute(exchange, chain, selector, rule); StepVerifier.create(mono).expectSubscription().verifyComplete(); exchange = MockServerWebExchange.from(MockServerHttpRequest .get("localshost") .queryParam("state", "state") .queryParam("code", "code") .build()); Mockito.when(casdoorAuthService.getOAuthToken("code", "state")).thenReturn(token); Singleton.INST.single(CasdoorAuthService.class, casdoorAuthService); mono = casdoorPluginTest.doExecute(exchange, chain, selector, rule); StepVerifier.create(mono).expectSubscription().verifyComplete(); exchange = MockServerWebExchange.from(MockServerHttpRequest .get("localshost") .build()); mono = casdoorPluginTest.doExecute(exchange, chain, selector, rule); StepVerifier.create(mono).expectSubscription().verifyComplete(); }
public ControlledFragmentHandler.Action onExtensionMessage( final int actingBlockLength, final int templateId, final int schemaId, final int actingVersion, final DirectBuffer buffer, final int offset, final int length, final Header header) { if (null != consensusModuleExtension) { return consensusModuleExtension.onIngressExtensionMessage( actingBlockLength, templateId, schemaId, actingVersion, buffer, offset, length, header); } throw new ClusterException("expected schemaId=" + MessageHeaderDecoder.SCHEMA_ID + ", actual=" + schemaId); }
@Test void shouldThrowExceptionOnUnknownSchemaAndNoAdapter() { final TestClusterClock clock = new TestClusterClock(TimeUnit.MILLISECONDS); ctx.epochClock(clock).clusterClock(clock); final ConsensusModuleAgent agent = new ConsensusModuleAgent(ctx); assertThrows(ClusterException.class, () -> agent.onExtensionMessage(0, 0, SCHEMA_ID, 0, null, 0, 0, null)); }
static <T> ByteBuddyProxyInvoker<T> newInstance(T proxy, Class<T> type, URL url) { return new ByteBuddyProxyInvoker<>(proxy, type, url, MethodInvoker.newInstance(proxy.getClass())); }
@Test void testNewInstance() throws Throwable { URL url = URL.valueOf("test://test:11/test?group=dubbo&version=1.1"); RemoteService proxy = Mockito.mock(RemoteService.class); ByteBuddyProxyInvoker<RemoteService> invoker = ByteBuddyProxyInvoker.newInstance(proxy, RemoteService.class, url); invoker.doInvoke(proxy, "sayHello", new Class[] {String.class}, new Object[] {"test"}); Mockito.verify(proxy, Mockito.times(1)).sayHello("test"); Assertions.assertThrows( IllegalArgumentException.class, () -> invoker.doInvoke(proxy, "equals", new Class[] {String.class}, new Object[] {"test", "test2"})); }
public ExecutorService getExecutorService() { return executorService; }
@Test public void testTikaExecutorServiceFromConfig() throws Exception { URL url = getResourceAsUrl("TIKA-1762-executors.xml"); TikaConfig config = new TikaConfig(url); ThreadPoolExecutor executorService = (ThreadPoolExecutor) config.getExecutorService(); assertTrue((executorService instanceof DummyExecutor), "Should use Dummy Executor"); assertEquals(3, executorService.getCorePoolSize(), "Should have configured Core Threads"); assertEquals(10, executorService.getMaximumPoolSize(), "Should have configured Max Threads"); }
@Override public int getMediumLE(int index) { int value = getUnsignedMediumLE(index); if ((value & 0x800000) != 0) { value |= 0xff000000; } return value; }
@Test public void testGetMediumLEAfterRelease() { assertThrows(IllegalReferenceCountException.class, new Executable() { @Override public void execute() { releasedBuffer().getMediumLE(0); } }); }
@Override public boolean match(Message msg, StreamRule rule) { if (msg.getField(rule.getField()) == null) { return rule.getInverted(); } final String value = msg.getField(rule.getField()).toString(); return rule.getInverted() ^ value.trim().equals(rule.getValue()); }
@Test public void testMissedMatch() { StreamRule rule = getSampleRule(); Message msg = getSampleMessage(); msg.addField("something", "nonono"); StreamRuleMatcher matcher = getMatcher(rule); assertFalse(matcher.match(msg, rule)); }
@Override public void unsubscribe(URL url, NotifyListener listener) { super.unsubscribe(url, listener); received.remove(url); }
@Test void testUnsubscribe() { // subscribe first registry.subscribe(consumerUrl, new NotifyListener() { @Override public void notify(List<URL> urls) { // do nothing } }); // then unsubscribe registry.unsubscribe(consumerUrl, new NotifyListener() { @Override public void notify(List<URL> urls) { Map<URL, Set<NotifyListener>> subscribed = registry.getSubscribed(); Set<NotifyListener> listeners = subscribed.get(consumerUrl); assertTrue(listeners.isEmpty()); Map<URL, Set<URL>> received = registry.getReceived(); assertTrue(received.get(consumerUrl).isEmpty()); } }); }
@Override public void report(SortedMap<MetricName, Gauge> gauges, SortedMap<MetricName, Counter> counters, SortedMap<MetricName, Histogram> histograms, SortedMap<MetricName, Meter> meters, SortedMap<MetricName, Timer> timers) { if (loggerProxy.isEnabled(marker)) { for (Entry<MetricName, Gauge> entry : gauges.entrySet()) { logGauge(entry.getKey(), entry.getValue()); } for (Entry<MetricName, Counter> entry : counters.entrySet()) { logCounter(entry.getKey(), entry.getValue()); } for (Entry<MetricName, Histogram> entry : histograms.entrySet()) { logHistogram(entry.getKey(), entry.getValue()); } for (Entry<MetricName, Meter> entry : meters.entrySet()) { logMeter(entry.getKey(), entry.getValue()); } for (Entry<MetricName, Timer> entry : timers.entrySet()) { logTimer(entry.getKey(), entry.getValue()); } } }
@Test public void reportsMeterValues() throws Exception { final Meter meter = mock(Meter.class); when(meter.getCount()).thenReturn(1L); when(meter.getMeanRate()).thenReturn(2.0); when(meter.getOneMinuteRate()).thenReturn(3.0); when(meter.getFiveMinuteRate()).thenReturn(4.0); when(meter.getFifteenMinuteRate()).thenReturn(5.0); when(logger.isInfoEnabled(marker)).thenReturn(true); infoReporter.report(this.map(), this.map(), this.map(), map("test.meter", meter), this.map()); verify(logger).info(marker, "type={}, name={}, count={}, mean_rate={}, m1={}, m5={}, m15={}, rate_unit={}", "METER", "prefix.test.meter", 1L, 2.0, 3.0, 4.0, 5.0, "events/second"); }
@Override public AllocatedSlotsAndReservationStatus removeSlots(ResourceID owner) { final Set<AllocationID> slotsOfTaskExecutor = slotsPerTaskExecutor.remove(owner); if (slotsOfTaskExecutor != null) { final Collection<AllocatedSlot> removedSlots = new ArrayList<>(); final Map<AllocationID, ReservationStatus> removedSlotsReservationStatus = new HashMap<>(); for (AllocationID allocationId : slotsOfTaskExecutor) { final ReservationStatus reservationStatus = containsFreeSlot(allocationId) ? ReservationStatus.FREE : ReservationStatus.RESERVED; final AllocatedSlot removedSlot = Preconditions.checkNotNull(removeSlotInternal(allocationId)); removedSlots.add(removedSlot); removedSlotsReservationStatus.put(removedSlot.getAllocationId(), reservationStatus); } return new DefaultAllocatedSlotsAndReservationStatus( removedSlots, removedSlotsReservationStatus); } else { return new DefaultAllocatedSlotsAndReservationStatus( Collections.emptyList(), Collections.emptyMap()); } }
@Test void testRemoveSlotsOfUnknownOwnerIsIgnored() { final DefaultAllocatedSlotPool slotPool = new DefaultAllocatedSlotPool(); slotPool.removeSlots(ResourceID.generate()); }
public CompletableFuture<Long> compact(String topic) { return RawReader.create(pulsar, topic, COMPACTION_SUBSCRIPTION, false).thenComposeAsync( this::compactAndCloseReader, scheduler); }
@Test public void testCompactedWithConcurrentSend() throws Exception { String topic = "persistent://my-property/use/my-ns/testCompactedWithConcurrentSend"; @Cleanup Producer<byte[]> producer = pulsarClient.newProducer().topic(topic) .enableBatching(false) .messageRoutingMode(MessageRoutingMode.SinglePartition) .create(); var future = CompletableFuture.runAsync(() -> { for (int i = 0; i < 100; i++) { try { producer.newMessage().key(String.valueOf(i)).value(String.valueOf(i).getBytes()).send(); } catch (PulsarClientException e) { throw new RuntimeException(e); } } }); PersistentTopic persistentTopic = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topic).get(); PulsarTopicCompactionService topicCompactionService = (PulsarTopicCompactionService) persistentTopic.getTopicCompactionService(); Awaitility.await().untilAsserted(() -> { long compactedLedgerId = compact(topic); Thread.sleep(300); Optional<CompactedTopicContext> compactedTopicContext = topicCompactionService.getCompactedTopic() .getCompactedTopicContext(); Assert.assertTrue(compactedTopicContext.isPresent()); Assert.assertEquals(compactedTopicContext.get().ledger.getId(), compactedLedgerId); }); Position lastCompactedPosition = topicCompactionService.getLastCompactedPosition().get(); Entry lastCompactedEntry = topicCompactionService.readLastCompactedEntry().get(); Assert.assertTrue(PositionFactory.create(lastCompactedPosition.getLedgerId(), lastCompactedPosition.getEntryId()) .compareTo(lastCompactedEntry.getLedgerId(), lastCompactedEntry.getEntryId()) >= 0); future.join(); }
String getSubstitutionVariable(String key) { return substitutionVariables.get(key); }
@Test public void testMavenFormat() { assertThat(new LoggingConfiguration(new EnvironmentInformation("maven", "1.0")) .getSubstitutionVariable(LoggingConfiguration.PROPERTY_FORMAT)).isEqualTo(LoggingConfiguration.FORMAT_MAVEN); }
public int getUpdateAppQueueFailedRetrieved() { return numUpdateAppQueueFailedRetrieved.value(); }
@Test public void testUpdateAppQueueRetrievedFailed() { long totalBadBefore = metrics.getUpdateAppQueueFailedRetrieved(); badSubCluster.getUpdateQueueFailed(); Assert.assertEquals(totalBadBefore + 1, metrics.getUpdateAppQueueFailedRetrieved()); }
DatanodeDescriptor getDatanodeByHost(String ipAddr) { if (ipAddr == null) { return null; } hostmapLock.readLock().lock(); try { DatanodeDescriptor[] nodes = map.get(ipAddr); // no entry if (nodes== null) { return null; } // one node if (nodes.length == 1) { return nodes[0]; } // more than one node return nodes[ThreadLocalRandom.current().nextInt(nodes.length)]; } finally { hostmapLock.readLock().unlock(); } }
@Test public void testGetDatanodeByHost() throws Exception { assertEquals(map.getDatanodeByHost("1.1.1.1"), dataNodes[0]); assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]); DatanodeDescriptor node = map.getDatanodeByHost("3.3.3.3"); assertTrue(node == dataNodes[2] || node == dataNodes[3]); assertNull(map.getDatanodeByHost("4.4.4.4")); }
@Override public Set<Class<? extends BaseStepMeta>> getSupportedSteps() { return new HashSet<Class<? extends BaseStepMeta>>() { { add( ExcelOutputMeta.class ); } }; }
@Test public void testGetSupportedSteps() { ExcelOutputStepAnalyzer analyzer = new ExcelOutputStepAnalyzer(); Set<Class<? extends BaseStepMeta>> types = analyzer.getSupportedSteps(); assertNotNull( types ); assertEquals( types.size(), 1 ); assertTrue( types.contains( ExcelOutputMeta.class ) ); }
public static Schema schemaFromPojoClass( TypeDescriptor<?> typeDescriptor, FieldValueTypeSupplier fieldValueTypeSupplier) { return StaticSchemaInference.schemaFromClass(typeDescriptor, fieldValueTypeSupplier); }
@Test public void testNestedPOJOWithSimplePOJO() { Schema schema = POJOUtils.schemaFromPojoClass( new TypeDescriptor<TestPOJOs.NestedPOJOWithSimplePOJO>() {}, JavaFieldTypeSupplier.INSTANCE); SchemaTestUtils.assertSchemaEquivalent(NESTED_POJO_WITH_SIMPLE_POJO_SCHEMA, schema); }
public void changeToSlave(final String newMasterAddress, final int newMasterEpoch, Long newMasterBrokerId) { synchronized (this) { if (newMasterEpoch > this.masterEpoch) { LOGGER.info("Begin to change to slave, brokerName={}, brokerId={}, newMasterBrokerId={}, newMasterAddress={}, newMasterEpoch={}", this.brokerConfig.getBrokerName(), this.brokerControllerId, newMasterBrokerId, newMasterAddress, newMasterEpoch); this.masterEpoch = newMasterEpoch; if (newMasterBrokerId.equals(this.masterBrokerId)) { // if master doesn't change this.haService.changeToSlaveWhenMasterNotChange(newMasterAddress, newMasterEpoch); this.brokerController.getTopicConfigManager().getDataVersion().nextVersion(newMasterEpoch); registerBrokerWhenRoleChange(); return; } // Stop checking syncStateSet because only master is able to check stopCheckSyncStateSet(); // Change config(compatibility problem) this.brokerController.getMessageStoreConfig().setBrokerRole(BrokerRole.SLAVE); this.brokerController.changeSpecialServiceStatus(false); // The brokerId in brokerConfig just means its role(master[0] or slave[>=1]) this.brokerConfig.setBrokerId(brokerControllerId); // Change record this.masterAddress = newMasterAddress; this.masterBrokerId = newMasterBrokerId; // Handle the slave synchronise handleSlaveSynchronize(BrokerRole.SLAVE); // Notify ha service, change to slave this.haService.changeToSlave(newMasterAddress, newMasterEpoch, brokerControllerId); this.brokerController.getTopicConfigManager().getDataVersion().nextVersion(newMasterEpoch); registerBrokerWhenRoleChange(); } } }
@Test public void changeToSlaveTest() { Assertions.assertThatCode(() -> replicasManager.changeToSlave(NEW_MASTER_ADDRESS, NEW_MASTER_EPOCH, BROKER_ID_2)) .doesNotThrowAnyException(); }
public static <T> T getItemAtPositionOrNull(T[] array, int position) { if (position >= 0 && array.length > position) { return array[position]; } return null; }
@Test public void getItemAtPositionOrNull_whenSmallerArray_thenReturnNull() { Object obj = new Object(); Object[] src = new Object[1]; src[0] = obj; Object result = ArrayUtils.getItemAtPositionOrNull(src, 1); assertNull(result); }
@Override public V load(K key) { awaitSuccessfulInit(); try (SqlResult queryResult = sqlService.execute(queries.load(), key)) { Iterator<SqlRow> it = queryResult.iterator(); V value = null; if (it.hasNext()) { SqlRow sqlRow = it.next(); if (it.hasNext()) { throw new IllegalStateException("multiple matching rows for a key " + key); } // If there is a single column as the value, return that column as the value if (queryResult.getRowMetadata().getColumnCount() == 2 && genericMapStoreProperties.singleColumnAsValue) { value = sqlRow.getObject(1); } else { //noinspection unchecked value = (V) toGenericRecord(sqlRow, genericMapStoreProperties); } } return value; } }
@Test public void givenDefaultTypeName_whenLoad_thenReturnGenericRecordMapNameAsTypeName() { ObjectSpec spec = objectProvider.createObject(mapName, false); objectProvider.insertItems(spec, 1); mapLoader = createMapLoader(); CompactGenericRecord genericRecord = (CompactGenericRecord) mapLoader.load(0); assertThat(genericRecord.getSchema().getTypeName()).isEqualTo(mapName); }
@SuppressWarnings("unchecked") public static <T> T newInstanceIfPossible(Class<T> type) { Assert.notNull(type); // 原始类型 if (type.isPrimitive()) { return (T) ClassUtil.getPrimitiveDefaultValue(type); } // 某些特殊接口的实例化按照默认实现进行 if (type.isAssignableFrom(AbstractMap.class)) { type = (Class<T>) HashMap.class; } else if (type.isAssignableFrom(List.class)) { type = (Class<T>) ArrayList.class; } else if (type.isAssignableFrom(Set.class)) { type = (Class<T>) HashSet.class; } try { return newInstance(type); } catch (Exception e) { // ignore // 默认构造不存在的情况下查找其它构造 } // 枚举 if (type.isEnum()) { return type.getEnumConstants()[0]; } // 数组 if (type.isArray()) { return (T) Array.newInstance(type.getComponentType(), 0); } final Constructor<T>[] constructors = getConstructors(type); Class<?>[] parameterTypes; for (Constructor<T> constructor : constructors) { parameterTypes = constructor.getParameterTypes(); if (0 == parameterTypes.length) { continue; } setAccessible(constructor); try { return constructor.newInstance(ClassUtil.getDefaultValues(parameterTypes)); } catch (Exception ignore) { // 构造出错时继续尝试下一种构造方式 } } return null; }
@Test public void noneStaticInnerClassTest() { final NoneStaticClass testAClass = ReflectUtil.newInstanceIfPossible(NoneStaticClass.class); assertNotNull(testAClass); assertEquals(2, testAClass.getA()); }
static MetricsConfig loadFirst(String prefix, String... fileNames) { for (String fname : fileNames) { try { PropertiesConfiguration pcf = new PropertiesConfiguration(); pcf.setListDelimiterHandler(new DefaultListDelimiterHandler(',')); FileHandler fh = new FileHandler(pcf); fh.setFileName(fname); fh.load(); Configuration cf = pcf.interpolatedConfiguration(); LOG.info("Loaded properties from {}", fname); if (LOG.isDebugEnabled()) { LOG.debug("Properties: {}", toString(cf)); } MetricsConfig mc = new MetricsConfig(cf, prefix); LOG.debug("Metrics Config: {}", mc); return mc; } catch (ConfigurationException e) { // Commons Configuration defines the message text when file not found if (e.getMessage().startsWith("Could not locate")) { LOG.debug("Could not locate file {}", fname, e); continue; } throw new MetricsConfigException(e); } } LOG.warn("Cannot locate configuration: tried " + Joiner.on(",").join(fileNames)); // default to an empty configuration return new MetricsConfig(new PropertiesConfiguration(), prefix); }
@Test public void testLoadFirst() throws Exception { String filename = getTestFilename("hadoop-metrics2-p1"); new ConfigBuilder().add("p1.foo", "p1foo").save(filename); MetricsConfig mc = MetricsConfig.create("p1"); MetricsConfig mc2 = MetricsConfig.create("p1", "na1", "na2", filename); Configuration expected = new ConfigBuilder().add("foo", "p1foo").config; assertEq(expected, mc); assertEq(expected, mc2); }
@Override public String getType() { return POST_DOCUMENT_TYPE; }
@Test void ensureTypeNotModified() { assertEquals("post.content.halo.run", provider.getType()); }
public String metricsJson(Reconciliation reconciliation, ConfigMap configMap) { if (isEnabled) { if (configMap == null) { LOGGER.warnCr(reconciliation, "ConfigMap {} does not exist.", configMapName); throw new InvalidConfigurationException("ConfigMap " + configMapName + " does not exist"); } else { String data = configMap.getData().get(configMapKey); if (data == null) { LOGGER.warnCr(reconciliation, "ConfigMap {} does not contain specified key {}.", configMapName, configMapKey); throw new InvalidConfigurationException("ConfigMap " + configMapName + " does not contain specified key " + configMapKey); } else { if (data.isEmpty()) { return "{}"; } try { ObjectMapper yamlReader = new ObjectMapper(new YAMLFactory()); Object yaml = yamlReader.readValue(data, Object.class); ObjectMapper jsonWriter = new ObjectMapper(); return jsonWriter.writeValueAsString(yaml); } catch (JsonProcessingException e) { throw new InvalidConfigurationException("Failed to parse metrics configuration", e); } } } } else { return null; } }
@Test public void testProblemWithConfigMap() { MetricsConfig metricsConfig = new JmxPrometheusExporterMetricsBuilder() .withNewValueFrom() .withConfigMapKeyRef(new ConfigMapKeySelector("my-key", "my-name", false)) .endValueFrom() .build(); MetricsModel metrics = new MetricsModel(new KafkaConnectSpecBuilder().withMetricsConfig(metricsConfig).build()); InvalidConfigurationException ex = assertThrows(InvalidConfigurationException.class, () -> metrics.metricsJson(Reconciliation.DUMMY_RECONCILIATION, null)); assertThat(ex.getMessage(), is("ConfigMap my-name does not exist")); ex = assertThrows(InvalidConfigurationException.class, () -> metrics.metricsJson(Reconciliation.DUMMY_RECONCILIATION, new ConfigMapBuilder().withData(Map.of("other-key", "foo: bar")).build())); assertThat(ex.getMessage(), is("ConfigMap my-name does not contain specified key my-key")); ex = assertThrows(InvalidConfigurationException.class, () -> metrics.metricsJson(Reconciliation.DUMMY_RECONCILIATION, new ConfigMapBuilder().withData(Map.of("my-key", "foo: -")).build())); assertThat(ex.getMessage(), startsWith("Failed to parse metrics configuration")); }
public static List<AclEntry> filterAclEntriesByAclSpec( List<AclEntry> existingAcl, List<AclEntry> inAclSpec) throws AclException { ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec); ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); EnumMap<AclEntryScope, AclEntry> providedMask = Maps.newEnumMap(AclEntryScope.class); EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class); EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class); for (AclEntry existingEntry: existingAcl) { if (aclSpec.containsKey(existingEntry)) { scopeDirty.add(existingEntry.getScope()); if (existingEntry.getType() == MASK) { maskDirty.add(existingEntry.getScope()); } } else { if (existingEntry.getType() == MASK) { providedMask.put(existingEntry.getScope(), existingEntry); } else { aclBuilder.add(existingEntry); } } } copyDefaultsIfNeeded(aclBuilder); calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty); return buildAndValidateAcl(aclBuilder); }
@Test(expected=AclException.class) public void testFilterAclEntriesByAclSpecRemoveDefaultMaskRequired() throws AclException { List<AclEntry> existing = new ImmutableList.Builder<AclEntry>() .add(aclEntry(ACCESS, USER, ALL)) .add(aclEntry(ACCESS, GROUP, READ)) .add(aclEntry(ACCESS, OTHER, NONE)) .add(aclEntry(DEFAULT, USER, ALL)) .add(aclEntry(DEFAULT, USER, "bruce", READ)) .add(aclEntry(DEFAULT, GROUP, READ)) .add(aclEntry(DEFAULT, MASK, ALL)) .add(aclEntry(DEFAULT, OTHER, NONE)) .build(); List<AclEntry> aclSpec = Lists.newArrayList( aclEntry(DEFAULT, MASK)); filterAclEntriesByAclSpec(existing, aclSpec); }
public void setTimeoutSeconds(int timeout) { kp.put("timeoutSeconds",timeout); }
@Test public void testFetchTimeout() throws Exception { CrawlURI curi = makeCrawlURI("http://localhost:7777/slow.txt"); fetcher().setTimeoutSeconds(2); fetcher().process(curi); // logger.info('\n' + httpRequestString(curi) + "\n\n" + rawResponseString(curi)); assertTrue(curi.getAnnotations().contains("timeTrunc")); assertTrue(curi.getFetchDuration() >= 2000 && curi.getFetchDuration() < 2200); }
@Override public ValidationResponse validate(ValidationRequest req) { if (req.isEmptyQuery()) { return ValidationResponse.ok(); } try { final ParsedQuery parsedQuery = luceneQueryParser.parse(req.rawQuery()); final ValidationContext context = ValidationContext.builder() .request(req) .query(parsedQuery) .availableFields(fields.fieldTypesByStreamIds(req.streams(), req.timerange())) .build(); final List<ValidationMessage> explanations = validators.stream() .flatMap(val -> val.validate(context).stream()) .collect(Collectors.toList()); return ValidationResponse.withDetectedStatus(explanations); } catch (Exception e) { return ValidationResponse.error(ValidationErrors.create(e)); } }
@Test void validateMixedTypes() { // validator returns one error final QueryValidator errorValidator = context -> Collections.singletonList( ValidationMessage.builder(ValidationStatus.ERROR, ValidationType.QUERY_PARSING_ERROR) .errorMessage("Query can't be parsed") .build()); // validator returns one warning final QueryValidator warningValidator = context -> Collections.singletonList( ValidationMessage.builder(ValidationStatus.WARNING, ValidationType.UNKNOWN_FIELD) .errorMessage("Unknown field") .build()); final QueryValidationServiceImpl service = new QueryValidationServiceImpl( new LuceneQueryParser(false), FIELD_TYPES_SERVICE, ImmutableSet.of(warningValidator, errorValidator)); final ValidationResponse validationResponse = service.validate(req()); assertThat(validationResponse.status()).isEqualTo(ValidationStatus.ERROR); assertThat(validationResponse.explanations()) .hasSize(2) .extracting(ValidationMessage::validationStatus) .containsOnly(ValidationStatus.ERROR, ValidationStatus.WARNING); }
@Override public void getErrors(ErrorCollection errors, String parentLocation) { String location = getLocation(parentLocation); errors.checkMissing(location, "link", link); errors.checkMissing(location, "regex", regex); validateLink(errors, location); }
@Test public void shouldDeserializeFromAPILikeObject() { String json = """ { "link": "https://github.com/gocd/api.go.cd/issues/${ID}", "regex": "##(d+)" }"""; CRTrackingTool deserializedValue = gson.fromJson(json,CRTrackingTool.class); assertThat(deserializedValue.getLink(),is("https://github.com/gocd/api.go.cd/issues/${ID}")); assertThat(deserializedValue.getRegex(),is("##(d+)")); ErrorCollection errors = deserializedValue.getErrors(); assertTrue(errors.isEmpty()); }
@Override public Future<RestResponse> restRequest(RestRequest request) { return restRequest(request, new RequestContext()); }
@Test(retryAnalyzer = ThreeRetries.class) // Known to be flaky in CI public void testRestRetryExceedsClientRetryRatio() throws Exception { SimpleLoadBalancer balancer = prepareLoadBalancer(Arrays.asList("http://test.linkedin.com/retry1", "http://test.linkedin.com/good"), HttpClientFactory.DEFAULT_MAX_CLIENT_REQUEST_RETRY_RATIO); SettableClock clock = new SettableClock(); DynamicClient dynamicClient = new DynamicClient(balancer, null); RetryClient client = new RetryClient( dynamicClient, balancer, D2ClientConfig.DEFAULT_RETRY_LIMIT, RetryClient.DEFAULT_UPDATE_INTERVAL_MS, RetryClient.DEFAULT_AGGREGATED_INTERVAL_NUM, clock, true, false); URI uri1 = URI.create("d2://retryService1?arg1=empty&arg2=empty"); RestRequest restRequest1 = new RestRequestBuilder(uri1).build(); URI uri2 = URI.create("d2://retryService2?arg1=empty&arg2=empty"); RestRequest restRequest2 = new RestRequestBuilder(uri2).build(); // This request will be retried and route to the good host DegraderTrackerClientTest.TestCallback<RestResponse> restCallback = new DegraderTrackerClientTest.TestCallback<>(); client.restRequest(restRequest1, restCallback); assertNull(restCallback.e); assertNotNull(restCallback.t); // This request will not be retried because the retry ratio is exceeded clock.addDuration(RetryClient.DEFAULT_UPDATE_INTERVAL_MS); restCallback = new DegraderTrackerClientTest.TestCallback<>(); client.restRequest(restRequest1, restCallback); assertNull(restCallback.t); assertNotNull(restCallback.e); assertTrue(restCallback.e.getMessage().contains("Data not available")); // If the client sends request to a different service endpoint, the retry ratio should not interfere restCallback = new DegraderTrackerClientTest.TestCallback<>(); client.restRequest(restRequest2, restCallback); assertNull(restCallback.e); assertNotNull(restCallback.t); // After 5s interval, retry counter is reset and this request will be retried again clock.addDuration(RetryClient.DEFAULT_UPDATE_INTERVAL_MS * RetryClient.DEFAULT_AGGREGATED_INTERVAL_NUM); restCallback = new DegraderTrackerClientTest.TestCallback<>(); client.restRequest(restRequest1, restCallback); assertNull(restCallback.e); assertNotNull(restCallback.t); }
@Override public NSImage folderIcon(final Integer size) { NSImage folder = this.iconNamed("NSFolder", size); if(null == folder) { return this.iconNamed("NSFolder", size); } return folder; }
@Test public void testFolderIcon128() { final NSImage icon = new NSImageIconCache().folderIcon(128); assertNotNull(icon); assertTrue(icon.isValid()); assertFalse(icon.isTemplate()); assertEquals(128, icon.size().width.intValue()); assertEquals(128, icon.size().height.intValue()); assertTrue(icon.representations().count().intValue() >= 1); }
public synchronized long getSplitPointsConsumed() { if (position == null) { return 0; } else if (isDone()) { return splitPointsSeen; } else { // There is a current split point, and it has not finished processing. checkState( splitPointsSeen > 0, "A started rangeTracker should have seen > 0 split points (is %s)", splitPointsSeen); return splitPointsSeen - 1; } }
@Test public void testGetSplitPointsConsumed() { ByteKeyRangeTracker tracker = ByteKeyRangeTracker.of(INITIAL_RANGE); assertEquals(0, tracker.getSplitPointsConsumed()); // Started, 0 split points consumed assertTrue(tracker.tryReturnRecordAt(true, INITIAL_START_KEY)); assertEquals(0, tracker.getSplitPointsConsumed()); // Processing new split point, 1 split point consumed assertTrue(tracker.tryReturnRecordAt(true, AFTER_START_KEY)); assertEquals(1, tracker.getSplitPointsConsumed()); // Processing new non-split point, 1 split point consumed assertTrue(tracker.tryReturnRecordAt(false, INITIAL_MIDDLE_KEY)); assertEquals(1, tracker.getSplitPointsConsumed()); // Processing new split point, 2 split points consumed assertTrue(tracker.tryReturnRecordAt(true, BEFORE_END_KEY)); assertEquals(2, tracker.getSplitPointsConsumed()); // Mark tracker as done, 3 split points consumed tracker.markDone(); assertEquals(3, tracker.getSplitPointsConsumed()); }
public static Object convertFromJs( Object value, int type, String fieldName ) throws KettleValueException { String classType = value.getClass().getName(); switch ( type ) { case ValueMetaInterface.TYPE_NUMBER: return jsToNumber( value, classType ); case ValueMetaInterface.TYPE_INTEGER: return jsToInteger( value, value.getClass() ); case ValueMetaInterface.TYPE_STRING: return jsToString( value, classType ); case ValueMetaInterface.TYPE_DATE: return jsToDate( value, classType ); case ValueMetaInterface.TYPE_BOOLEAN: return value; case ValueMetaInterface.TYPE_BIGNUMBER: return jsToBigNumber( value, classType ); case ValueMetaInterface.TYPE_BINARY: return Context.jsToJava( value, byte[].class ); case ValueMetaInterface.TYPE_NONE: throw new KettleValueException( "No data output data type was specified for new field [" + fieldName + "]" ); default: return Context.jsToJava( value, Object.class ); } }
@Test( expected = RuntimeException.class ) public void convertFromJs_TypeNone() throws Exception { JavaScriptUtils.convertFromJs( null, ValueMetaInterface.TYPE_NONE, "qwerty" ); }
public List<String> toMnemonic(byte[] entropy) { checkArgument(entropy.length % 4 == 0, () -> "entropy length not multiple of 32 bits"); checkArgument(entropy.length > 0, () -> "entropy is empty"); // We take initial entropy of ENT bits and compute its // checksum by taking first ENT / 32 bits of its SHA256 hash. byte[] hash = Sha256Hash.hash(entropy); boolean[] hashBits = bytesToBits(hash); boolean[] entropyBits = bytesToBits(entropy); int checksumLengthBits = entropyBits.length / 32; // We append these bits to the end of the initial entropy. boolean[] concatBits = new boolean[entropyBits.length + checksumLengthBits]; System.arraycopy(entropyBits, 0, concatBits, 0, entropyBits.length); System.arraycopy(hashBits, 0, concatBits, entropyBits.length, checksumLengthBits); // Next we take these concatenated bits and split them into // groups of 11 bits. Each group encodes number from 0-2047 // which is a position in a wordlist. We convert numbers into // words and use joined words as mnemonic sentence. ArrayList<String> words = new ArrayList<>(); int nwords = concatBits.length / 11; for (int i = 0; i < nwords; ++i) { int index = 0; for (int j = 0; j < 11; ++j) { index <<= 1; if (concatBits[(i * 11) + j]) index |= 0x1; } words.add(this.wordList.get(index)); } return words; }
@Test(expected = RuntimeException.class) public void testEmptyEntropy() throws Exception { byte[] entropy = {}; mc.toMnemonic(entropy); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } if(new DefaultPathContainerService().isContainer(file)) { return PathAttributes.EMPTY; } final Path query; if(file.isPlaceholder()) { query = new Path(file.getParent(), FilenameUtils.removeExtension(file.getName()), file.getType(), file.attributes()); } else { query = file; } final AttributedList<Path> list; if(new SimplePathPredicate(DriveHomeFinderService.SHARED_DRIVES_NAME).test(file.getParent())) { list = new DriveTeamDrivesListService(session, fileid).list(file.getParent(), listener); } else { list = new FileidDriveListService(session, fileid, query).list(file.getParent(), listener); } final Path found = list.find(new ListFilteringFeature.ListFilteringPredicate(session.getCaseSensitivity(), file)); if(null == found) { throw new NotfoundException(file.getAbsolute()); } return found.attributes(); }
@Test @Ignore public void testFindSharedDriveAsDefaultPath() throws Exception { final DriveAttributesFinderFeature f = new DriveAttributesFinderFeature(session, new DriveFileIdProvider(session)); assertNotEquals(PathAttributes.EMPTY, f.find(new Path(DriveHomeFinderService.SHARED_DRIVES_NAME, "iterate", EnumSet.of(Path.Type.directory)))); }
public String parseToPlainText() throws IOException, SAXException, TikaException { BodyContentHandler handler = new BodyContentHandler(); AutoDetectParser parser = new AutoDetectParser(); Metadata metadata = new Metadata(); try (InputStream stream = ContentHandlerExample.class.getResourceAsStream("test.doc")) { parser.parse(stream, handler, metadata); return handler.toString(); } }
@Test public void testParseToPlainText() throws IOException, SAXException, TikaException { String result = example .parseToPlainText() .trim(); assertEquals("test", result, "Expected 'test', but got '" + result + "'"); }
@Override public List<Column> getPartitionColumns(Map<ColumnId, Column> idToColumn) { List<Column> columns = MetaUtils.getColumnsByColumnIds(idToColumn, partitionColumnIds); for (int i = 0; i < columns.size(); i++) { Expr expr = partitionExprs.get(i).convertToColumnNameExpr(idToColumn); Column column = columns.get(i); if (expr.getType().getPrimitiveType() != PrimitiveType.INVALID_TYPE && expr.getType().getPrimitiveType() != column.getType().getPrimitiveType()) { Column newColumn = new Column(column); newColumn.setType(expr.getType()); columns.set(i, newColumn); } } return columns; }
@Test public void testInitHybrid() { Column k1 = new Column("k1", new ScalarType(PrimitiveType.DATETIME), true, null, "", ""); SlotRef slotRef = new SlotRef(tableName, "k1"); partitionExprs.add(ColumnIdExpr.create(slotRef)); partitionExprs.add(ColumnIdExpr.create(functionCallExpr)); List<Column> schema = Arrays.asList(k1, k2); ExpressionRangePartitionInfo expressionRangePartitionInfo = new ExpressionRangePartitionInfo(partitionExprs, schema, PartitionType.RANGE); List<Column> partitionColumns = expressionRangePartitionInfo.getPartitionColumns( MetaUtils.buildIdToColumn(schema)); Assert.assertEquals(partitionColumns.size(), 2); Assert.assertEquals(partitionColumns.get(0), k1); Assert.assertEquals(partitionColumns.get(1), k2); }
@Override public StatusOutputStream<ObjStat> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { try { final IRODSFileSystemAO fs = session.getClient(); final IRODSFileOutputStream out = fs.getIRODSFileFactory().instanceIRODSFileOutputStream( file.getAbsolute(), status.isAppend() ? DataObjInp.OpenFlags.READ_WRITE : DataObjInp.OpenFlags.WRITE_TRUNCATE); return new StatusOutputStream<ObjStat>(new PackingIrodsOutputStream(out)) { @Override public ObjStat getStatus() throws BackgroundException { // No remote attributes from server returned after upload try { return fs.getObjStat(file.getAbsolute()); } catch(JargonException e) { throw new IRODSExceptionMappingService().map("Failure to read attributes of {0}", e, file); } } }; } catch(JargonRuntimeException e) { if(e.getCause() instanceof JargonException) { throw (JargonException) e.getCause(); } throw new DefaultExceptionMappingService().map(e); } } catch(JargonException e) { throw new IRODSExceptionMappingService().map("Uploading {0} failed", e, file); } }
@Test public void testWrite() throws Exception { final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new IRODSProtocol()))); final Profile profile = new ProfilePlistReader(factory).read( this.getClass().getResourceAsStream("/iRODS (iPlant Collaborative).cyberduckprofile")); final Host host = new Host(profile, profile.getDefaultHostname(), new Credentials( PROPERTIES.get("irods.key"), PROPERTIES.get("irods.secret") )); final IRODSSession session = new IRODSSession(host); session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); final Path test = new Path(new IRODSHomeFinderService(session).find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); assertFalse(session.getFeature(Find.class).find(test)); final byte[] content = RandomUtils.nextBytes(100); final IRODSWriteFeature feature = new IRODSWriteFeature(session); { final TransferStatus status = new TransferStatus(); status.setAppend(false); status.setLength(content.length); assertEquals(0L, new IRODSUploadFeature(session).append(test, status).offset, 0L); final StatusOutputStream<ObjStat> out = feature.write(test, status, new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(new TransferStatus(), new TransferStatus()).transfer(new ByteArrayInputStream(content), out); assertTrue(session.getFeature(Find.class).find(test)); final PathAttributes attributes = new IRODSAttributesFinderFeature(session).find(test); assertEquals(content.length, attributes.getSize()); final InputStream in = session.getFeature(Read.class).read(test, new TransferStatus(), new DisabledConnectionCallback()); final byte[] buffer = new byte[content.length]; IOUtils.readFully(in, buffer); in.close(); assertArrayEquals(content, buffer); } { final byte[] newcontent = RandomUtils.nextBytes(10); final TransferStatus status = new TransferStatus(); status.setAppend(false); status.setLength(newcontent.length); status.setRemote(new IRODSAttributesFinderFeature(session).find(test)); assertTrue(new IRODSUploadFeature(session).append(test, status).append); assertEquals(content.length, new IRODSUploadFeature(session).append(test, status).offset, 0L); final StatusOutputStream<ObjStat> out = feature.write(test, status, new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(new TransferStatus(), new TransferStatus()).transfer(new ByteArrayInputStream(newcontent), out); assertTrue(session.getFeature(Find.class).find(test)); final PathAttributes attributes = new IRODSAttributesFinderFeature(session).find(test); assertEquals(newcontent.length, attributes.getSize()); assertEquals(new IRODSAttributesFinderFeature(session).toAttributes(out.getStatus()), attributes); final InputStream in = session.getFeature(Read.class).read(test, new TransferStatus(), new DisabledConnectionCallback()); final byte[] buffer = new byte[newcontent.length]; IOUtils.readFully(in, buffer); in.close(); assertArrayEquals(newcontent, buffer); } session.getFeature(Delete.class).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(session.getFeature(Find.class).find(test)); session.close(); }
static final String addFunctionParameter(ParameterDescriptor descriptor, RuleBuilderStep step) { final String parameterName = descriptor.name(); // parameter name needed by function final Map<String, Object> parameters = step.parameters(); if (Objects.isNull(parameters)) { return null; } final Object value = parameters.get(parameterName); // parameter value set by rule definition String syntax = " " + parameterName + " : "; if (value == null) { return null; } else if (value instanceof String valueString) { if (StringUtils.isEmpty(valueString)) { return null; } else if (valueString.startsWith("$")) { // value set as variable syntax += valueString.substring(1); } else { syntax += "\"" + StringEscapeUtils.escapeJava(valueString) + "\""; // value set as string } } else { syntax += value; } return syntax; }
@Test public void addFunctionParameterNull_WhenNoParametersAreSet() { RuleBuilderStep step = mock(RuleBuilderStep.class); when(step.parameters()).thenReturn(null); ParameterDescriptor descriptor = mock(ParameterDescriptor.class); assertThat(ParserUtil.addFunctionParameter(descriptor, step)).isNull(); }
public List<RuleData> obtainRuleData(final String selectorId) { return RULE_MAP.get(selectorId); }
@Test public void testObtainRuleData() throws NoSuchFieldException, IllegalAccessException { RuleData ruleData = RuleData.builder().id("1").selectorId(mockSelectorId1).build(); ConcurrentHashMap<String, List<RuleData>> ruleMap = getFieldByName(ruleMapStr); ruleMap.put(mockSelectorId1, Lists.newArrayList(ruleData)); List<RuleData> ruleDataList = BaseDataCache.getInstance().obtainRuleData(mockSelectorId1); assertEquals(Lists.newArrayList(ruleData), ruleDataList); }
protected GelfMessage toGELFMessage(final Message message) { final DateTime timestamp; final Object fieldTimeStamp = message.getField(Message.FIELD_TIMESTAMP); if (fieldTimeStamp instanceof DateTime) { timestamp = (DateTime) fieldTimeStamp; } else { timestamp = Tools.nowUTC(); } final GelfMessageLevel messageLevel = extractLevel(message.getField(Message.FIELD_LEVEL)); final String fullMessage = (String) message.getField(Message.FIELD_FULL_MESSAGE); final String forwarder = GelfOutput.class.getCanonicalName(); final GelfMessageBuilder builder = new GelfMessageBuilder(message.getMessage(), message.getSource()) .timestamp(timestamp.getMillis() / 1000.0d) .additionalField("_forwarder", forwarder) .additionalFields(message.getFields()); if (messageLevel != null) { builder.level(messageLevel); } if (fullMessage != null) { builder.fullMessage(fullMessage); } return builder.build(); }
@Test public void testToGELFMessageWithInvalidStringLevel() throws Exception { final GelfTransport transport = mock(GelfTransport.class); final GelfOutput gelfOutput = new GelfOutput(transport); final DateTime now = DateTime.now(DateTimeZone.UTC); final Message message = messageFactory.createMessage("Test", "Source", now); message.addField("level", "BOOM"); final GelfMessage gelfMessage = gelfOutput.toGELFMessage(message); assertEquals(GelfMessageLevel.ALERT, gelfMessage.getLevel()); }
@Override public boolean tryLock(String name) { return tryLock(name, DEFAULT_LOCK_DURATION_SECONDS); }
@Test public void tryLock_fails_with_IAE_if_name_is_empty() { String badLockName = ""; expectBadLockNameIAE(() -> underTest.tryLock(badLockName), badLockName); }
public static int checkGreaterThanOrEqual(int n, int expected, String name) { if (n < expected) { throw new IllegalArgumentException(name + ": " + n + " (expected: >= " + expected + ')'); } return n; }
@Test public void checkGreaterThanOrEqualMustPassIfArgumentIsEqualToExpected() { final int n = 1; final int actual = RangeUtil.checkGreaterThanOrEqual(n, 1, "var"); assertThat(actual, is(equalTo(n))); }
@Override public String getValue(EvaluationContext context) { String result = null; if (expressions.length > 0) { // Execute first expression for getting result. result = expressions[0].getValue(context); for (int i = 1; i < expressions.length; i++) { Expression exp = expressions[i]; if (exp instanceof FunctionExpression) { // Clone this expression, enriching args with previous result. FunctionExpression functionExp = (FunctionExpression) exp; String[] clonedArgs = Arrays.copyOf(functionExp.getFunctionArgs(), functionExp.getFunctionArgs().length + 1); clonedArgs[clonedArgs.length - 1] = result; FunctionExpression clonedExp = new FunctionExpression(functionExp.getFunction(), clonedArgs); clonedExp.getValue(context); } } } return result; }
@Test void testLiteralRedirectToMultiContext() { EvaluationContext context = new EvaluationContext(); Expression[] expressions = new Expression[] { new LiteralExpression("hello"), new FunctionExpression(new PutInContextELFunction(), new String[] { "greeting1" }), new FunctionExpression(new PutInContextELFunction(), new String[] { "greeting2" }) }; RedirectExpression exp = new RedirectExpression(expressions); String result = exp.getValue(context); assertEquals("hello", result); assertEquals("hello", context.lookupVariable("greeting1")); assertEquals("hello", context.lookupVariable("greeting2")); }
@Override public GroupingShuffleReaderIterator<K, V> iterator() throws IOException { ApplianceShuffleEntryReader entryReader = new ApplianceShuffleEntryReader( shuffleReaderConfig, executionContext, operationContext, true); initCounter(entryReader.getDatasetId()); return iterator(entryReader); }
@Test public void testReadFromShuffleAndDynamicSplit() throws Exception { PipelineOptions options = PipelineOptionsFactory.create(); BatchModeExecutionContext context = BatchModeExecutionContext.forTesting(options, "testStage"); TestOperationContext operationContext = TestOperationContext.create(); GroupingShuffleReader<Integer, Integer> groupingShuffleReader = new GroupingShuffleReader<>( options, null, null, null, WindowedValue.getFullCoder( KvCoder.of( BigEndianIntegerCoder.of(), IterableCoder.of(BigEndianIntegerCoder.of())), IntervalWindow.getCoder()), context, operationContext, ShuffleReadCounterFactory.INSTANCE, false /* do not sort values */); groupingShuffleReader.perOperationPerDatasetBytesCounter = operationContext .counterFactory() .longSum(CounterName.named("dax-shuffle-test-wf-read-bytes")); TestShuffleReader shuffleReader = new TestShuffleReader(); final int kNumRecords = 10; final int kFirstShard = 0; final int kSecondShard = 1; // Setting up two shards with kNumRecords each; keys are unique // (hence groups of values for the same key are singletons) // therefore each record comes with a unique position constructed. for (int i = 0; i < kNumRecords; ++i) { byte[] keyByte = CoderUtils.encodeToByteArray(BigEndianIntegerCoder.of(), i); ShuffleEntry entry = newShuffleEntry( fabricatePosition(kFirstShard, keyByte), keyByte, EMPTY_BYTE_ARRAY, keyByte); shuffleReader.addEntry(entry); } for (int i = kNumRecords; i < 2 * kNumRecords; ++i) { byte[] keyByte = CoderUtils.encodeToByteArray(BigEndianIntegerCoder.of(), i); ShuffleEntry entry = newShuffleEntry( fabricatePosition(kSecondShard, keyByte), keyByte, EMPTY_BYTE_ARRAY, keyByte); shuffleReader.addEntry(entry); } int i = 0; assertFalse(shuffleReader.isClosed()); try (GroupingShuffleReaderIterator<Integer, Integer> iter = groupingShuffleReader.iterator(shuffleReader)) { // Poke the iterator so we can test dynamic splitting. assertTrue(iter.start()); ++i; assertNull(iter.requestDynamicSplit(splitRequestAtPosition(new Position()))); // Split at the shard boundary NativeReader.DynamicSplitResult dynamicSplitResult = iter.requestDynamicSplit(splitRequestAtPosition(makeShufflePosition(kSecondShard, null))); assertNotNull(dynamicSplitResult); assertEquals( encodeBase64URLSafeString(fabricatePosition(kSecondShard).getPosition().toByteArray()), positionFromSplitResult(dynamicSplitResult).getShufflePosition()); for (; iter.advance(); ++i) { // iter.getCurrent() is supposed to be side-effect-free and give the same result if called // repeatedly. Test that this is indeed the case. iter.getCurrent(); iter.getCurrent(); KV<Integer, Reiterable<Integer>> elem = iter.getCurrent().getValue(); int key = elem.getKey(); assertEquals(key, i); Reiterable<Integer> valuesIterable = elem.getValue(); Reiterator<Integer> valuesIterator = valuesIterable.iterator(); int j = 0; while (valuesIterator.hasNext()) { assertTrue(valuesIterator.hasNext()); assertTrue(valuesIterator.hasNext()); int value = valuesIterator.next(); assertEquals(value, i); ++j; } assertFalse(valuesIterator.hasNext()); assertFalse(valuesIterator.hasNext()); assertEquals(1, j); } assertFalse(iter.advance()); } assertTrue(shuffleReader.isClosed()); assertEquals(i, kNumRecords); // There are 10 Shuffle records that each encode an integer key (4 bytes) and integer value (4 // bytes). We therefore expect to read 80 bytes. assertEquals( 80L, (long) groupingShuffleReader.perOperationPerDatasetBytesCounter.getAggregate()); }
Boolean processPayment() { try { ResponseEntity<Boolean> paymentProcessResult = restTemplateBuilder .build() .postForEntity("http://localhost:30301/payment/process", "processing payment", Boolean.class); LOGGER.info("Payment processing result: {}", paymentProcessResult.getBody()); return paymentProcessResult.getBody(); } catch (ResourceAccessException | HttpClientErrorException e) { LOGGER.error("Error communicating with payment service: {}", e.getMessage()); return false; } }
@Test void testProcessPayment_HttpClientErrorException() { // Arrange when(restTemplate.postForEntity(eq("http://localhost:30301/payment/process"), anyString(), eq(Boolean.class))) .thenThrow(new HttpClientErrorException(org.springframework.http.HttpStatus.BAD_REQUEST, "Bad request")); // Act Boolean result = orderService.processPayment(); // Assert assertEquals(false, result); }
@Override public String getName() { return ANALYZER_NAME; }
@Test public void testGetName() { assertEquals("Pipfile.lock Analyzer", analyzer.getName()); }
boolean isModified(Namespace namespace) { Release release = releaseService.findLatestActiveRelease(namespace); List<Item> items = itemService.findItemsWithoutOrdered(namespace.getId()); if (release == null) { return hasNormalItems(items); } Map<String, String> releasedConfiguration = GSON.fromJson(release.getConfigurations(), GsonType.CONFIG); Map<String, String> configurationFromItems = generateConfigurationFromItems(namespace, items); MapDifference<String, String> difference = Maps.difference(releasedConfiguration, configurationFromItems); return !difference.areEqual(); }
@Test public void testNamespaceModifyItem() { long namespaceId = 1; Namespace namespace = createNamespace(namespaceId); Release release = createRelease("{\"k1\":\"v1\"}"); List<Item> items = Collections.singletonList(createItem("k1", "v2")); when(releaseService.findLatestActiveRelease(namespace)).thenReturn(release); when(itemService.findItemsWithoutOrdered(namespaceId)).thenReturn(items); when(namespaceService.findParentNamespace(namespace)).thenReturn(null); boolean isModified = namespaceUnlockAspect.isModified(namespace); Assert.assertTrue(isModified); }
@SuppressWarnings("MethodLength") static void dissectControlRequest( final ArchiveEventCode eventCode, final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder); HEADER_DECODER.wrap(buffer, offset + encodedLength); encodedLength += MessageHeaderDecoder.ENCODED_LENGTH; switch (eventCode) { case CMD_IN_CONNECT: CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendConnect(builder); break; case CMD_IN_CLOSE_SESSION: CLOSE_SESSION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendCloseSession(builder); break; case CMD_IN_START_RECORDING: START_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording(builder); break; case CMD_IN_STOP_RECORDING: STOP_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecording(builder); break; case CMD_IN_REPLAY: REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplay(builder); break; case CMD_IN_STOP_REPLAY: STOP_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplay(builder); break; case CMD_IN_LIST_RECORDINGS: LIST_RECORDINGS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordings(builder); break; case CMD_IN_LIST_RECORDINGS_FOR_URI: LIST_RECORDINGS_FOR_URI_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingsForUri(builder); break; case CMD_IN_LIST_RECORDING: LIST_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecording(builder); break; case CMD_IN_EXTEND_RECORDING: EXTEND_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording(builder); break; case CMD_IN_RECORDING_POSITION: RECORDING_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendRecordingPosition(builder); break; case CMD_IN_TRUNCATE_RECORDING: TRUNCATE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTruncateRecording(builder); break; case CMD_IN_STOP_RECORDING_SUBSCRIPTION: STOP_RECORDING_SUBSCRIPTION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingSubscription(builder); break; case CMD_IN_STOP_POSITION: STOP_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopPosition(builder); break; case CMD_IN_FIND_LAST_MATCHING_RECORD: FIND_LAST_MATCHING_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendFindLastMatchingRecord(builder); break; case CMD_IN_LIST_RECORDING_SUBSCRIPTIONS: LIST_RECORDING_SUBSCRIPTIONS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingSubscriptions(builder); break; case CMD_IN_START_BOUNDED_REPLAY: BOUNDED_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartBoundedReplay(builder); break; case CMD_IN_STOP_ALL_REPLAYS: STOP_ALL_REPLAYS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopAllReplays(builder); break; case CMD_IN_REPLICATE: REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate(builder); break; case CMD_IN_STOP_REPLICATION: STOP_REPLICATION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplication(builder); break; case CMD_IN_START_POSITION: START_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartPosition(builder); break; case CMD_IN_DETACH_SEGMENTS: DETACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDetachSegments(builder); break; case CMD_IN_DELETE_DETACHED_SEGMENTS: DELETE_DETACHED_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDeleteDetachedSegments(builder); break; case CMD_IN_PURGE_SEGMENTS: PURGE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeSegments(builder); break; case CMD_IN_ATTACH_SEGMENTS: ATTACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAttachSegments(builder); break; case CMD_IN_MIGRATE_SEGMENTS: MIGRATE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendMigrateSegments(builder); break; case CMD_IN_AUTH_CONNECT: AUTH_CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAuthConnect(builder); break; case CMD_IN_KEEP_ALIVE: KEEP_ALIVE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendKeepAlive(builder); break; case CMD_IN_TAGGED_REPLICATE: TAGGED_REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTaggedReplicate(builder); break; case CMD_IN_START_RECORDING2: START_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording2(builder); break; case CMD_IN_EXTEND_RECORDING2: EXTEND_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording2(builder); break; case CMD_IN_STOP_RECORDING_BY_IDENTITY: STOP_RECORDING_BY_IDENTITY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingByIdentity(builder); break; case CMD_IN_PURGE_RECORDING: PURGE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeRecording(builder); break; case CMD_IN_REPLICATE2: REPLICATE_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate2(builder); break; case CMD_IN_REQUEST_REPLAY_TOKEN: REPLAY_TOKEN_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplayToken(builder); break; default: builder.append(": unknown command"); } }
@Test void controlRequestListRecordingSubscriptions() { internalEncodeLogHeader(buffer, 0, 90, 90, () -> 10_325_000_000L); final ListRecordingSubscriptionsRequestEncoder requestEncoder = new ListRecordingSubscriptionsRequestEncoder(); requestEncoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder) .controlSessionId(1) .correlationId(2) .pseudoIndex(1111111) .applyStreamId(BooleanType.TRUE) .subscriptionCount(777) .streamId(555) .channel("ch2"); dissectControlRequest(CMD_IN_LIST_RECORDING_SUBSCRIPTIONS, buffer, 0, builder); assertEquals("[10.325000000] " + CONTEXT + ": " + CMD_IN_LIST_RECORDING_SUBSCRIPTIONS.name() + " [90/90]:" + " controlSessionId=1" + " correlationId=2" + " pseudoIndex=1111111" + " applyStreamId=" + BooleanType.TRUE + " subscriptionCount=777" + " streamId=555" + " channel=ch2", builder.toString()); }
public ASN1Sequence signedPipFromPplist(List<PolymorphicPseudonymType> response) { for (PolymorphicPseudonymType polymorphicPseudonymType : response) { ASN1Sequence sequence; try { sequence = (ASN1Sequence) ASN1Sequence.fromByteArray(polymorphicPseudonymType.getValue()); } catch (Exception e) { logger.error(String.format("PolymorphicPseudonymType not a valid ASN1 Sequence. Exception: '%s'", e.getMessage())); continue; } if (sequence.getObjectAt(0) instanceof ASN1ObjectIdentifier) { ASN1ObjectIdentifier objectIdentifier = (ASN1ObjectIdentifier) sequence.getObjectAt(0); if (objectIdentifier.getId().equals(SIGNED_PIP_OID)) { return sequence; } } } throw new IllegalArgumentException("No signed pip found in PolymorphicPseudonymType list"); }
@Test public void signedPipFromPplistTest() throws IOException, BsnkException { List<PolymorphicPseudonymType> pplist = new ArrayList<>(); pplist.add(new PolymorphicPseudonymType() { { value = signedPip.getEncoded(); } }); ASN1Sequence result = bsnkUtil.signedPipFromPplist(pplist); assertEquals(Base64.getEncoder().encodeToString(result.getEncoded()), signedPipBase64); }
@Override public Table getTable(String dbName, String tblName) { try { return deltaOps.getTable(dbName, tblName); } catch (Exception e) { LOG.error("Failed to get table {}.{}", dbName, tblName, e); return null; } }
@Test public void testGetTable() { new MockUp<DeltaUtils>() { @mockit.Mock public DeltaLakeTable convertDeltaToSRTable(String catalog, String dbName, String tblName, String path, Engine deltaEngine, long createTime) { return new DeltaLakeTable(1, "delta0", "db1", "table1", Lists.newArrayList(), Lists.newArrayList("col1"), null, "path/to/table", null, 0); } }; DeltaLakeTable deltaTable = (DeltaLakeTable) deltaLakeMetadata.getTable("db1", "table1"); Assert.assertNotNull(deltaTable); Assert.assertEquals("table1", deltaTable.getName()); Assert.assertEquals(Table.TableType.DELTALAKE, deltaTable.getType()); Assert.assertEquals("path/to/table", deltaTable.getTableLocation()); }
public MijnDigidSessionStatus sessionStatus(String mijnDigiDSessionId) { Optional<MijnDigidSession> optionalSession = mijnDigiDSessionRepository.findById(mijnDigiDSessionId); if( optionalSession.isEmpty()) { return MijnDigidSessionStatus.INVALID; } MijnDigidSession session = optionalSession.get(); if( session.isAuthenticated() ) { return MijnDigidSessionStatus.VALID; } return MijnDigidSessionStatus.INVALID; }
@Test void testStatusAuthenticatedExistingSession() { MijnDigidSession session = new MijnDigidSession(1L); session.setAuthenticated(true); when(mijnDigiDSessionRepository.findById(eq(session.getId()))).thenReturn(Optional.of(session)); MijnDigidSessionStatus status = mijnDigiDSessionService.sessionStatus(session.getId()); verify(mijnDigiDSessionRepository, times(1)).findById(eq(session.getId())); assertEquals(status, MijnDigidSessionStatus.VALID); }
public static PluginOption parse(String pluginSpecification) { Matcher pluginWithFile = PLUGIN_WITH_ARGUMENT_PATTERN.matcher(pluginSpecification); if (!pluginWithFile.matches()) { Class<? extends Plugin> pluginClass = parsePluginName(pluginSpecification, pluginSpecification); return new PluginOption(pluginSpecification, pluginClass, null); } Class<? extends Plugin> pluginClass = parsePluginName(pluginSpecification, pluginWithFile.group(1)); return new PluginOption(pluginSpecification, pluginClass, pluginWithFile.group(2)); }
@Test void throws_for_plugins_that_do_not_implement_plugin() { IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> PluginOption.parse(String.class.getName())); assertThat(exception.getMessage(), is("The plugin specification 'java.lang.String' has a problem:\n" + "\n" + "'java.lang.String' does not implement 'io.cucumber.plugin.Plugin'.\n" + "\n" + "Plugin specifications should have the format of PLUGIN[:[PATH|[URI [OPTIONS]]]\n" + "\n" + "Valid values for PLUGIN are: html, json, junit, message, pretty, progress, rerun, summary, teamcity, testng, timeline, unused, usage\n" + "\n" + "PLUGIN can also be a fully qualified class name, allowing registration of 3rd party plugins. The 3rd party plugin must implement io.cucumber.plugin.Plugin")); }
@Override public E intern(E sample) { E canonical = map.get(sample); if (canonical != null) { return canonical; } var value = map.putIfAbsent(sample, sample); return (value == null) ? sample : value; }
@Test(dataProvider = "interners") public void intern(Interner<Int> interner) { var canonical = new Int(1); var other = new Int(1); assertThat(interner.intern(canonical)).isSameInstanceAs(canonical); assertThat(interner.intern(other)).isSameInstanceAs(canonical); checkSize(interner, 1); var next = new Int(2); assertThat(interner.intern(next)).isSameInstanceAs(next); checkSize(interner, 2); checkState(interner); }
public static AmountRequest fromString(String amountRequestAsString) { if (isNullOrEmpty(amountRequestAsString)) return null; return new AmountRequest( lenientSubstringBetween(amountRequestAsString, "order=", "&"), Integer.parseInt(lenientSubstringBetween(amountRequestAsString, "limit=", "&")) ); }
@Test void testAmountRequestWithEmptyString() { AmountRequest amountRequest = AmountRequest.fromString(""); assertThat(amountRequest).isNull(); }
public void afterTaskCompletion(MigrationRunnable task) { if (migrateTaskCount.decrementAndGet() < 0) { throw new IllegalStateException(); } }
@Test(expected = IllegalStateException.class) public void test_migrateTaskCount_notDecremented_belowZero() { migrationQueue.afterTaskCompletion(mock(MigrationRunnable.class)); }
public LinkedHashMap<String, String> getKeyPropertyList(ObjectName mbeanName) { LinkedHashMap<String, String> keyProperties = keyPropertiesPerBean.get(mbeanName); if (keyProperties == null) { keyProperties = new LinkedHashMap<>(); String properties = mbeanName.getKeyPropertyListString(); Matcher match = PROPERTY_PATTERN.matcher(properties); while (match.lookingAt()) { keyProperties.put(match.group(1), match.group(2)); properties = properties.substring(match.end()); if (properties.startsWith(",")) { properties = properties.substring(1); } match.reset(properties); } keyPropertiesPerBean.put(mbeanName, keyProperties); } return keyProperties; }
@Test public void testQuotedObjectNameWithEquals() throws Throwable { JmxMBeanPropertyCache testCache = new JmxMBeanPropertyCache(); LinkedHashMap<String, String> parameterList = testCache.getKeyPropertyList( new ObjectName("com.organisation:name=\"value=more\",name2=value2")); assertSameElementsAndOrder(parameterList, "name", "\"value=more\"", "name2", "value2"); }
@Override public CompletableFuture<Map<String, BrokerLookupData>> filterAsync(Map<String, BrokerLookupData> brokers, ServiceUnitId serviceUnit, LoadManagerContext context) { ServiceConfiguration conf = context.brokerConfiguration(); if (!conf.isPreferLaterVersions() || brokers.isEmpty()) { return CompletableFuture.completedFuture(brokers); } Version latestVersion; try { latestVersion = getLatestVersionNumber(brokers); if (log.isDebugEnabled()) { log.debug("Latest broker version found was [{}]", latestVersion); } } catch (Exception ex) { log.warn("Disabling PreferLaterVersions feature; reason: " + ex.getMessage()); return FutureUtil.failedFuture( new BrokerFilterBadVersionException("Cannot determine newest broker version: " + ex.getMessage())); } int numBrokersLatestVersion = 0; int numBrokersOlderVersion = 0; Iterator<Map.Entry<String, BrokerLookupData>> brokerIterator = brokers.entrySet().iterator(); while (brokerIterator.hasNext()) { Map.Entry<String, BrokerLookupData> next = brokerIterator.next(); String brokerId = next.getKey(); String version = next.getValue().brokerVersion(); Version brokerVersionVersion = Version.valueOf(version); if (brokerVersionVersion.equals(latestVersion)) { log.debug("Broker [{}] is running the latest version ([{}])", brokerId, version); numBrokersLatestVersion++; } else { log.info("Broker [{}] is running an older version ([{}]); latest version is [{}]", brokerId, version, latestVersion); numBrokersOlderVersion++; brokerIterator.remove(); } } if (numBrokersOlderVersion == 0) { log.info("All {} brokers are running the latest version [{}]", numBrokersLatestVersion, latestVersion); } return CompletableFuture.completedFuture(brokers); }
@Test public void testFilter() throws BrokerFilterException, ExecutionException, InterruptedException { Map<String, BrokerLookupData> originalBrokers = Map.of( "localhost:6650", getLookupData("2.10.0"), "localhost:6651", getLookupData("2.10.1"), "localhost:6652", getLookupData("2.10.1"), "localhost:6653", getLookupData("2.10.1") ); BrokerVersionFilter brokerVersionFilter = new BrokerVersionFilter(); Map<String, BrokerLookupData> result = brokerVersionFilter.filterAsync( new HashMap<>(originalBrokers), null, getContext()).get(); assertEquals(result, Map.of( "localhost:6651", getLookupData("2.10.1"), "localhost:6652", getLookupData("2.10.1"), "localhost:6653", getLookupData("2.10.1") )); originalBrokers = Map.of( "localhost:6650", getLookupData("2.10.0"), "localhost:6651", getLookupData("2.10.1-SNAPSHOT"), "localhost:6652", getLookupData("2.10.1"), "localhost:6653", getLookupData("2.10.1") ); result = brokerVersionFilter.filterAsync(new HashMap<>(originalBrokers), null, getContext()).get(); assertEquals(result, Map.of( "localhost:6652", getLookupData("2.10.1"), "localhost:6653", getLookupData("2.10.1") )); originalBrokers = Map.of( "localhost:6650", getLookupData("2.10.0"), "localhost:6651", getLookupData("2.10.1-SNAPSHOT"), "localhost:6652", getLookupData("2.10.1"), "localhost:6653", getLookupData("2.10.2-SNAPSHOT") ); result = brokerVersionFilter.filterAsync(new HashMap<>(originalBrokers), null, getContext()).get(); assertEquals(result, Map.of( "localhost:6653", getLookupData("2.10.2-SNAPSHOT") )); }
public boolean sendRequest(AfnemersberichtAanDGL request) { Map<String, Object> extraHeaders = new HashMap<>(); extraHeaders.put(Headers.X_AUX_SENDER_ID, digidOIN); extraHeaders.put(Headers.X_AUX_RECEIVER_ID, digileveringOIN); try { digileveringSender.sendMessage(request, HeaderUtil.createAfnemersberichtAanDGLHeaders(extraHeaders)); if( MessageUtil.getBerichttype(request).equals("Av01")){ digidXClient.remoteLogWithoutRelatingToAccount(Log.SEND_SUCCESS, "Av01"); } else { digidXClient.remoteLogBericht(Log.SEND_SUCCESS, request); } return true; } catch (JmsException jmsException) { logger.error(jmsException.getMessage()); digidXClient.remoteLogBericht(Log.SEND_FAILURE, request); return false; } }
@Test public void testSendAp01Correct(){ AfnemersberichtAanDGLFactory afnemersberichtAanDGLFactory = new AfnemersberichtAanDGLFactory("oin1", "oin2"); AfnemersberichtAanDGL message = afnemersberichtAanDGLFactory.createAfnemersberichtAanDGL(TestDglMessagesUtil.createTestAp01("bsn")); classUnderTest.sendRequest(message); verify(digileveringSender, times(1)).sendMessage(any(), any()); verify(digidXClient, times(1)).remoteLogBericht(Log.SEND_SUCCESS, message); }
public void process() throws Exception { if (_segmentMetadata.getTotalDocs() == 0) { LOGGER.info("Skip preprocessing empty segment: {}", _segmentMetadata.getName()); return; } // Segment processing has to be done with a local directory. File indexDir = new File(_indexDirURI); // This fixes the issue of temporary files not getting deleted after creating new inverted indexes. removeInvertedIndexTempFiles(indexDir); try (SegmentDirectory.Writer segmentWriter = _segmentDirectory.createWriter()) { // Update default columns according to the schema. if (_schema != null) { DefaultColumnHandler defaultColumnHandler = DefaultColumnHandlerFactory .getDefaultColumnHandler(indexDir, _segmentMetadata, _indexLoadingConfig, _schema, segmentWriter); defaultColumnHandler.updateDefaultColumns(); _segmentMetadata = new SegmentMetadataImpl(indexDir); _segmentDirectory.reloadMetadata(); } else { LOGGER.warn("Skip creating default columns for segment: {} without schema", _segmentMetadata.getName()); } // Update single-column indices, like inverted index, json index etc. List<IndexHandler> indexHandlers = new ArrayList<>(); // We cannot just create all the index handlers in a random order. // Specifically, ForwardIndexHandler needs to be executed first. This is because it modifies the segment metadata // while rewriting forward index to create a dictionary. Some other handlers (like the range one) assume that // metadata was already been modified by ForwardIndexHandler. IndexHandler forwardHandler = createHandler(StandardIndexes.forward()); indexHandlers.add(forwardHandler); forwardHandler.updateIndices(segmentWriter); // Now that ForwardIndexHandler.updateIndices has been updated, we can run all other indexes in any order _segmentMetadata = new SegmentMetadataImpl(indexDir); _segmentDirectory.reloadMetadata(); for (IndexType<?, ?, ?> type : IndexService.getInstance().getAllIndexes()) { if (type != StandardIndexes.forward()) { IndexHandler handler = createHandler(type); indexHandlers.add(handler); handler.updateIndices(segmentWriter); // Other IndexHandler classes may modify the segment metadata while creating a temporary forward // index to generate their respective indexes from if the forward index was disabled. This new metadata is // needed to construct other indexes like RangeIndex. _segmentMetadata = _segmentDirectory.getSegmentMetadata(); } } // Perform post-cleanup operations on the index handlers. for (IndexHandler handler : indexHandlers) { handler.postUpdateIndicesCleanup(segmentWriter); } // Add min/max value to column metadata according to the prune mode. ColumnMinMaxValueGeneratorMode columnMinMaxValueGeneratorMode = _indexLoadingConfig.getColumnMinMaxValueGeneratorMode(); if (columnMinMaxValueGeneratorMode != ColumnMinMaxValueGeneratorMode.NONE) { ColumnMinMaxValueGenerator columnMinMaxValueGenerator = new ColumnMinMaxValueGenerator(_segmentMetadata, segmentWriter, columnMinMaxValueGeneratorMode); columnMinMaxValueGenerator.addColumnMinMaxValue(); // NOTE: This step may modify the segment metadata. When adding new steps after this, un-comment the next line. // _segmentMetadata = new SegmentMetadataImpl(indexDir); } segmentWriter.save(); } // Startree creation will load the segment again, so we need to close and re-open the segment writer to make sure // that the other required indices (e.g. forward index) are up-to-date. try (SegmentDirectory.Writer segmentWriter = _segmentDirectory.createWriter()) { // Create/modify/remove star-trees if required. processStarTrees(indexDir); _segmentDirectory.reloadMetadata(); segmentWriter.save(); } }
@Test public void testV1UpdateDefaultColumns() throws Exception { constructV1Segment(Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList()); IngestionConfig ingestionConfig = new IngestionConfig(); ingestionConfig.setTransformConfigs( ImmutableList.of( new TransformConfig(NEW_INT_SV_DIMENSION_COLUMN_NAME, "plus(column1, 1)"), new TransformConfig(NEW_RAW_STRING_SV_DIMENSION_COLUMN_NAME, "reverse(column3)"), // Ensure that null values returned by transform functions for derived columns are handled appropriately // during segment reload new TransformConfig(NEW_NULL_RETURN_STRING_SV_DIMENSION_COLUMN_NAME, "json_path_string(column21, 'non-existent-path', null)"), // Ensure that any transform function failures result in a null value if error on failure is false new TransformConfig(NEW_WRONG_ARG_DATE_TRUNC_DERIVED_COLUMN_NAME, "dateTrunc('abcd', column1)") )); _tableConfig.setIngestionConfig(ingestionConfig); _indexLoadingConfig.addInvertedIndexColumns(NEW_COLUMN_INVERTED_INDEX); _indexLoadingConfig.addNoDictionaryColumns(NEW_RAW_STRING_SV_DIMENSION_COLUMN_NAME); _indexLoadingConfig.setErrorOnColumnBuildFailure(false); checkUpdateDefaultColumns(); // Try to use the third schema and update default value again. // For the third schema, we changed the default value for column 'newStringMVDimension' to 'notSameLength', // which is not the same length as before. This should be fine for segment format v1. // We added two new columns and also removed the NEW_INT_SV_DIMENSION_COLUMN_NAME from schema. // NEW_INT_SV_DIMENSION_COLUMN_NAME exists before processing but removed afterwards. SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(_indexDir); assertNotNull(segmentMetadata.getColumnMetadataFor(NEW_INT_SV_DIMENSION_COLUMN_NAME)); try (SegmentDirectory segmentDirectory = SegmentDirectoryLoaderRegistry.getDefaultSegmentDirectoryLoader() .load(_indexDir.toURI(), new SegmentDirectoryLoaderContext.Builder().setSegmentDirectoryConfigs(_configuration).build()); SegmentPreProcessor processor = new SegmentPreProcessor(segmentDirectory, _indexLoadingConfig, _newColumnsSchema3)) { processor.process(); } segmentMetadata = new SegmentMetadataImpl(_indexDir); assertNull(segmentMetadata.getColumnMetadataFor(NEW_INT_SV_DIMENSION_COLUMN_NAME)); ColumnMetadata hllMetricMetadata = segmentMetadata.getColumnMetadataFor(NEW_HLL_BYTE_METRIC_COLUMN_NAME); FieldSpec expectedHllMetricFieldSpec = _newColumnsSchema3.getFieldSpecFor(NEW_HLL_BYTE_METRIC_COLUMN_NAME); assertEquals(hllMetricMetadata.getFieldSpec(), expectedHllMetricFieldSpec); ByteArray expectedDefaultValue = new ByteArray((byte[]) expectedHllMetricFieldSpec.getDefaultNullValue()); assertEquals(hllMetricMetadata.getMinValue(), expectedDefaultValue); assertEquals(hllMetricMetadata.getMaxValue(), expectedDefaultValue); ColumnMetadata tDigestMetricMetadata = segmentMetadata.getColumnMetadataFor(NEW_TDIGEST_BYTE_METRIC_COLUMN_NAME); FieldSpec expectedTDigestMetricFieldSpec = _newColumnsSchema3.getFieldSpecFor(NEW_TDIGEST_BYTE_METRIC_COLUMN_NAME); assertEquals(tDigestMetricMetadata.getFieldSpec(), expectedTDigestMetricFieldSpec); expectedDefaultValue = new ByteArray((byte[]) expectedTDigestMetricFieldSpec.getDefaultNullValue()); assertEquals(tDigestMetricMetadata.getMinValue(), expectedDefaultValue); assertEquals(tDigestMetricMetadata.getMaxValue(), expectedDefaultValue); }
public String compile(final String xls, final String template, int startRow, int startCol) { return compile( xls, template, InputType.XLS, startRow, startCol ); }
@Test public void testPricing() throws Exception { final List<DataListener> listeners = new ArrayList<>(); TemplateDataListener l1 = new TemplateDataListener(10, 3, "/templates/test_pricing1.drl"); listeners.add(l1); TemplateDataListener l2 = new TemplateDataListener(30, 3, "/templates/test_pricing2.drl"); listeners.add(l2); converter.compile("/data/ExamplePolicyPricing.drl.xls", InputType.XLS, listeners); //COMPILE KnowledgeBuilder kbuilder = KnowledgeBuilderFactory.newKnowledgeBuilder(); kbuilder.add(ResourceFactory.newByteArrayResource(l1.renderDRL().getBytes()), ResourceType.DRL); kbuilder.add(ResourceFactory.newByteArrayResource(l2.renderDRL().getBytes()), ResourceType.DRL); assertThat(kbuilder.hasErrors()).isFalse(); InternalKnowledgeBase kbase = KnowledgeBaseFactory.newKnowledgeBase(); kbase.addPackages(kbuilder.getKnowledgePackages()); KieSession kSession = kbase.newKieSession(); //now create some test data Driver driver = new Driver(); Policy policy = new Policy(); kSession.insert(driver); kSession.insert(policy); kSession.fireAllRules(); int basePrice = policy.getBasePrice(); assertThat(basePrice).isEqualTo(120); }
@Override public void updateRewardActivity(RewardActivityUpdateReqVO updateReqVO) { // 校验存在 RewardActivityDO dbRewardActivity = validateRewardActivityExists(updateReqVO.getId()); if (dbRewardActivity.getStatus().equals(PromotionActivityStatusEnum.CLOSE.getStatus())) { // 已关闭的活动,不能修改噢 throw exception(REWARD_ACTIVITY_UPDATE_FAIL_STATUS_CLOSED); } // 校验商品是否冲突 validateRewardActivitySpuConflicts(updateReqVO.getId(), updateReqVO.getProductSpuIds()); // 更新 RewardActivityDO updateObj = RewardActivityConvert.INSTANCE.convert(updateReqVO) .setStatus(PromotionUtils.calculateActivityStatus(updateReqVO.getEndTime())); rewardActivityMapper.updateById(updateObj); }
@Test public void testUpdateRewardActivity_success() { // mock 数据 RewardActivityDO dbRewardActivity = randomPojo(RewardActivityDO.class, o -> o.setStatus(PromotionActivityStatusEnum.WAIT.getStatus())); rewardActivityMapper.insert(dbRewardActivity);// @Sql: 先插入出一条存在的数据 // 准备参数 RewardActivityUpdateReqVO reqVO = randomPojo(RewardActivityUpdateReqVO.class, o -> { o.setId(dbRewardActivity.getId()); // 设置更新的 ID o.setConditionType(randomEle(PromotionConditionTypeEnum.values()).getType()); o.setProductScope(randomEle(PromotionProductScopeEnum.values()).getScope()); // 用于触发进行中的状态 o.setStartTime(addTime(Duration.ofDays(1))).setEndTime(addTime(Duration.ofDays(2))); }); // 调用 rewardActivityService.updateRewardActivity(reqVO); // 校验是否更新正确 RewardActivityDO rewardActivity = rewardActivityMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, rewardActivity, "rules"); assertEquals(rewardActivity.getStatus(), PromotionActivityStatusEnum.WAIT.getStatus()); for (int i = 0; i < reqVO.getRules().size(); i++) { assertPojoEquals(reqVO.getRules().get(i), rewardActivity.getRules().get(i)); } }
public static <E extends Enum<E>> Map<String, E> mapEnumNamesToValues( final String prefix, final Class<E> enumClass) { final E[] constants = enumClass.getEnumConstants(); Map<String, E> mapping = new HashMap<>(constants.length); for (E constant : constants) { final String lc = constant.name().toLowerCase(Locale.ROOT); final E orig = mapping.put(prefix + lc, constant); checkArgument(orig == null, "Enum %s " + ERROR_MULTIPLE_ELEMENTS_MATCHING_TO_LOWER_CASE_VALUE + " %s", enumClass, lc); } return mapping; }
@Test public void testEmptyEnumMap() { Assertions.assertThat(mapEnumNamesToValues("", EmptyEnum.class)) .isEmpty(); }
public RuntimeOptionsBuilder parse(String... args) { return parse(Arrays.asList(args)); }
@Test void creates_no_formatter_by_default() { RuntimeOptions options = parser .parse() .build(); Plugins plugins = new Plugins(new PluginFactory(), options); plugins.setEventBusOnEventListenerPlugins(new TimeServiceEventBus(Clock.systemUTC(), UUID::randomUUID)); assertThat(plugins.getPlugins(), is(empty())); }
@Override public void reloadSegment(String segmentName, IndexLoadingConfig indexLoadingConfig, SegmentZKMetadata zkMetadata, SegmentMetadata localMetadata, @Nullable Schema schema, boolean forceDownload) throws Exception { Preconditions.checkState(!_shutDown, "Table data manager is already shut down, cannot reload segment: %s of table: %s", segmentName, _tableNameWithType); _logger.info("Reloading segment: {}", segmentName); String segmentTier = getSegmentCurrentTier(segmentName); indexLoadingConfig.setSegmentTier(segmentTier); indexLoadingConfig.setTableDataDir(_tableDataDir); indexLoadingConfig.setInstanceTierConfigs(_instanceDataManagerConfig.getTierConfigs()); File indexDir = getSegmentDataDir(segmentName, segmentTier, indexLoadingConfig.getTableConfig()); Lock segmentLock = getSegmentLock(segmentName); segmentLock.lock(); try { // Download segment from deep store if CRC changes or forced to download; // otherwise, copy backup directory back to the original index directory. // And then continue to load the segment from the index directory. boolean shouldDownload = forceDownload || !hasSameCRC(zkMetadata, localMetadata); if (shouldDownload) { // Create backup directory to handle failure of segment reloading. createBackup(indexDir); if (forceDownload) { _logger.info("Force downloading segment: {}", segmentName); } else { _logger.info("Downloading segment: {} because its CRC has changed from: {} to: {}", segmentName, localMetadata.getCrc(), zkMetadata.getCrc()); } indexDir = downloadSegment(zkMetadata); } else { _logger.info("Reloading existing segment: {} on tier: {}", segmentName, TierConfigUtils.normalizeTierName(segmentTier)); SegmentDirectory segmentDirectory = initSegmentDirectory(segmentName, String.valueOf(zkMetadata.getCrc()), indexLoadingConfig); // We should first try to reuse existing segment directory if (canReuseExistingDirectoryForReload(zkMetadata, segmentTier, segmentDirectory, indexLoadingConfig, schema)) { _logger.info("Reloading segment: {} using existing segment directory as no reprocessing needed", segmentName); // No reprocessing needed, reuse the same segment ImmutableSegment segment = ImmutableSegmentLoader.load(segmentDirectory, indexLoadingConfig, schema); addSegment(segment); return; } // Create backup directory to handle failure of segment reloading. createBackup(indexDir); // The indexDir is empty after calling createBackup, as it's renamed to a backup directory. // The SegmentDirectory should initialize accordingly. Like for SegmentLocalFSDirectory, it // doesn't load anything from an empty indexDir, but gets the info to complete the copyTo. try { segmentDirectory.copyTo(indexDir); } finally { segmentDirectory.close(); } } // Load from indexDir and replace the old segment in memory. What's inside indexDir // may come from SegmentDirectory.copyTo() or the segment downloaded from deep store. indexLoadingConfig.setSegmentTier(zkMetadata.getTier()); _logger.info("Loading segment: {} from indexDir: {} to tier: {}", segmentName, indexDir, TierConfigUtils.normalizeTierName(zkMetadata.getTier())); ImmutableSegment segment = ImmutableSegmentLoader.load(indexDir, indexLoadingConfig, schema); addSegment(segment); // Remove backup directory to mark the completion of segment reloading. removeBackup(indexDir); } catch (Exception reloadFailureException) { try { LoaderUtils.reloadFailureRecovery(indexDir); } catch (Exception recoveryFailureException) { _logger.error("Failed to recover segment: {} after reload failure", segmentName, recoveryFailureException); reloadFailureException.addSuppressed(recoveryFailureException); } addSegmentError(segmentName, new SegmentErrorInfo(System.currentTimeMillis(), "Caught exception while reloading segment", reloadFailureException)); throw reloadFailureException; } finally { segmentLock.unlock(); } _logger.info("Reloaded segment: {}", segmentName); }
@Test public void testReloadSegmentForceDownload() throws Exception { File indexDir = createSegment(SegmentVersion.v3, 5); SegmentZKMetadata zkMetadata = makeRawSegment(indexDir, new File(TEMP_DIR, SEGMENT_NAME + TarCompressionUtils.TAR_GZ_FILE_EXTENSION), false); // Same CRC but force to download. BaseTableDataManager tableDataManager = createTableManager(); SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(indexDir); assertEquals(Long.parseLong(segmentMetadata.getCrc()), zkMetadata.getCrc()); // Remove the local segment dir. Segment reloading fails unless force to download. FileUtils.deleteQuietly(indexDir); try { tableDataManager.reloadSegment(SEGMENT_NAME, new IndexLoadingConfig(), zkMetadata, segmentMetadata, null, false); fail(); } catch (Exception e) { // As expected, segment reloading fails due to missing the local segment dir. } tableDataManager.reloadSegment(SEGMENT_NAME, new IndexLoadingConfig(), zkMetadata, segmentMetadata, null, true); assertTrue(indexDir.exists()); segmentMetadata = new SegmentMetadataImpl(indexDir); assertEquals(Long.parseLong(segmentMetadata.getCrc()), zkMetadata.getCrc()); assertEquals(segmentMetadata.getTotalDocs(), 5); }
@Override public Optional<Lock> unlock(@Nonnull String resource, @Nullable String lockContext) { return doUnlock(resource, getLockedByString(lockContext)); }
@Test void unlock(MongoDBTestService mongodb) { final MongoLockService otherNodesLockService = new MongoLockService(otherNodeId, mongodb.mongoConnection(), MongoLockService.MIN_LOCK_TTL); final Lock orig = otherNodesLockService.lock("test-resource", null) .orElseThrow(() -> new IllegalStateException("Unable to create original lock.")); assertThat(lockService.lock("test-resource", null)).isEmpty(); final Optional<Lock> deletedLock = otherNodesLockService.unlock("test-resource", null); assertThat(deletedLock).hasValueSatisfying(l -> { assertThat(l.resource()).isEqualTo(orig.resource()); assertThat(l.lockedBy()).isEqualTo(orig.lockedBy()); }); assertThat(lockService.lock("test-resource", null)).isNotEmpty(); }
public void checkLimit() { if (!rateLimiter.tryAcquire()) { rejectSensor.record(); throw new KsqlRateLimitException("Host is at rate limit for pull queries. Currently set to " + rateLimiter.getRate() + " qps."); } }
@Test public void shouldError_atLimit() { final Metrics metrics = new Metrics(); final Map<String, String> tags = Collections.emptyMap(); // It doesn't look like the underlying guava rate limiter has a way to control time, so we're // just going to have to hope that these tests reliably run in under a second. final RateLimiter limiter = new RateLimiter(1, METRIC_NAMESPACE, metrics, tags); assertThat(getReject(metrics, tags), is(0.0)); limiter.checkLimit(); final KsqlException ksqlException = assertThrows(KsqlException.class, limiter::checkLimit); assertThat( ksqlException.getMessage(), is("Host is at rate limit for pull queries. Currently set to 1.0 qps.") ); assertThat(getReject(metrics, tags), is(1.0)); }
@Override public NearCacheStats getNearCacheStats() { throw new UnsupportedOperationException("Replicated map has no Near Cache!"); }
@Test(expected = UnsupportedOperationException.class) public void testNearCacheStats() { localReplicatedMapStats.getNearCacheStats(); }
@SuppressWarnings("deprecation") public boolean setSocketOpt(int option, Object optval) { final ValueReference<Boolean> result = new ValueReference<>(false); switch (option) { case ZMQ.ZMQ_SNDHWM: sendHwm = (Integer) optval; if (sendHwm < 0) { throw new IllegalArgumentException("sendHwm " + optval); } return true; case ZMQ.ZMQ_RCVHWM: recvHwm = (Integer) optval; if (recvHwm < 0) { throw new IllegalArgumentException("recvHwm " + optval); } return true; case ZMQ.ZMQ_AFFINITY: affinity = (Long) optval; return true; case ZMQ.ZMQ_IDENTITY: byte[] val = parseBytes(option, optval); if (val == null || val.length > 255) { throw new IllegalArgumentException("identity must not be null or less than 255 " + optval); } identity = Arrays.copyOf(val, val.length); identitySize = (short) identity.length; return true; case ZMQ.ZMQ_RATE: rate = (Integer) optval; return true; case ZMQ.ZMQ_RECOVERY_IVL: recoveryIvl = (Integer) optval; return true; case ZMQ.ZMQ_SNDBUF: sndbuf = (Integer) optval; return true; case ZMQ.ZMQ_RCVBUF: rcvbuf = (Integer) optval; return true; case ZMQ.ZMQ_TOS: tos = (Integer) optval; return true; case ZMQ.ZMQ_LINGER: linger = (Integer) optval; return true; case ZMQ.ZMQ_RECONNECT_IVL: reconnectIvl = (Integer) optval; if (reconnectIvl < -1) { throw new IllegalArgumentException("reconnectIvl " + optval); } return true; case ZMQ.ZMQ_RECONNECT_IVL_MAX: reconnectIvlMax = (Integer) optval; if (reconnectIvlMax < 0) { throw new IllegalArgumentException("reconnectIvlMax " + optval); } return true; case ZMQ.ZMQ_BACKLOG: backlog = (Integer) optval; return true; case ZMQ.ZMQ_MAXMSGSIZE: maxMsgSize = (Long) optval; return true; case ZMQ.ZMQ_MULTICAST_HOPS: multicastHops = (Integer) optval; return true; case ZMQ.ZMQ_RCVTIMEO: recvTimeout = (Integer) optval; return true; case ZMQ.ZMQ_SNDTIMEO: sendTimeout = (Integer) optval; return true; /* Deprecated in favor of ZMQ_IPV6 */ case ZMQ.ZMQ_IPV4ONLY: return setSocketOpt(ZMQ.ZMQ_IPV6, !parseBoolean(option, optval)); /* To replace the somewhat surprising IPV4ONLY */ case ZMQ.ZMQ_IPV6: ipv6 = parseBoolean(option, optval); return true; case ZMQ.ZMQ_SOCKS_PROXY: socksProxyAddress = parseString(option, optval); return true; case ZMQ.ZMQ_TCP_KEEPALIVE: tcpKeepAlive = ((Number) optval).intValue(); if (tcpKeepAlive != -1 && tcpKeepAlive != 0 && tcpKeepAlive != 1) { throw new IllegalArgumentException("tcpKeepAlive only accepts one of -1,0,1 " + optval); } return true; case ZMQ.ZMQ_TCP_KEEPALIVE_CNT: this.tcpKeepAliveCnt = ((Number) optval).intValue(); return true; case ZMQ.ZMQ_TCP_KEEPALIVE_IDLE: this.tcpKeepAliveIdle = ((Number) optval).intValue(); return true; case ZMQ.ZMQ_TCP_KEEPALIVE_INTVL: this.tcpKeepAliveIntvl = ((Number) optval).intValue(); return true; case ZMQ.ZMQ_IMMEDIATE: immediate = parseBoolean(option, optval); return true; case ZMQ.ZMQ_DELAY_ATTACH_ON_CONNECT: immediate = !parseBoolean(option, optval); return true; case ZMQ.ZMQ_TCP_ACCEPT_FILTER: String filterStr = parseString(option, optval); if (filterStr == null) { tcpAcceptFilters.clear(); } else if (filterStr.isEmpty() || filterStr.length() > 255) { throw new IllegalArgumentException("tcp_accept_filter " + optval); } else { TcpAddressMask filter = new TcpAddressMask(filterStr, ipv6); tcpAcceptFilters.add(filter); } return true; case ZMQ.ZMQ_PLAIN_SERVER: asServer = parseBoolean(option, optval); mechanism = (asServer ? Mechanisms.PLAIN : Mechanisms.NULL); return true; case ZMQ.ZMQ_PLAIN_USERNAME: if (optval == null) { mechanism = Mechanisms.NULL; asServer = false; return true; } plainUsername = parseString(option, optval); asServer = false; mechanism = Mechanisms.PLAIN; return true; case ZMQ.ZMQ_PLAIN_PASSWORD: if (optval == null) { mechanism = Mechanisms.NULL; asServer = false; return true; } plainPassword = parseString(option, optval); asServer = false; mechanism = Mechanisms.PLAIN; return true; case ZMQ.ZMQ_ZAP_DOMAIN: String domain = parseString(option, optval); if (domain != null && domain.length() < 256) { zapDomain = domain; return true; } throw new IllegalArgumentException("zap domain length shall be < 256 : " + optval); case ZMQ.ZMQ_CURVE_SERVER: asServer = parseBoolean(option, optval); mechanism = (asServer ? Mechanisms.CURVE : Mechanisms.NULL); return true; case ZMQ.ZMQ_CURVE_PUBLICKEY: curvePublicKey = setCurveKey(option, optval, result); return result.get(); case ZMQ.ZMQ_CURVE_SECRETKEY: curveSecretKey = setCurveKey(option, optval, result); return result.get(); case ZMQ.ZMQ_CURVE_SERVERKEY: curveServerKey = setCurveKey(option, optval, result); if (curveServerKey == null) { asServer = false; } return result.get(); case ZMQ.ZMQ_CONFLATE: conflate = parseBoolean(option, optval); return true; case ZMQ.ZMQ_GSSAPI_SERVER: asServer = parseBoolean(option, optval); mechanism = Mechanisms.GSSAPI; return true; case ZMQ.ZMQ_GSSAPI_PRINCIPAL: gssPrincipal = parseString(option, optval); mechanism = Mechanisms.GSSAPI; return true; case ZMQ.ZMQ_GSSAPI_SERVICE_PRINCIPAL: gssServicePrincipal = parseString(option, optval); mechanism = Mechanisms.GSSAPI; return true; case ZMQ.ZMQ_GSSAPI_PLAINTEXT: gssPlaintext = parseBoolean(option, optval); return true; case ZMQ.ZMQ_HANDSHAKE_IVL: handshakeIvl = (Integer) optval; if (handshakeIvl < 0) { throw new IllegalArgumentException("handshakeIvl only accept positive values " + optval); } return true; case ZMQ.ZMQ_HEARTBEAT_IVL: heartbeatInterval = (Integer) optval; if (heartbeatInterval < 0) { throw new IllegalArgumentException("heartbeatInterval only accept positive values " + optval); } return true; case ZMQ.ZMQ_HEARTBEAT_TIMEOUT: heartbeatTimeout = (Integer) optval; if (heartbeatTimeout < 0) { throw new IllegalArgumentException("heartbeatTimeout only accept positive values " + optval); } return true; case ZMQ.ZMQ_HEARTBEAT_TTL: Integer value = (Integer) optval; // Convert this to deciseconds from milliseconds value /= 100; if (value >= 0 && value <= 6553) { heartbeatTtl = value; } else { throw new IllegalArgumentException("heartbeatTtl is out of range [0..655399]" + optval); } return true; case ZMQ.ZMQ_HEARTBEAT_CONTEXT: heartbeatContext = (byte[]) optval; if (heartbeatContext == null) { throw new IllegalArgumentException("heartbeatContext cannot be null"); } return true; case ZMQ.ZMQ_DECODER: decoder = checkCustomCodec(optval, IDecoder.class); rawSocket = true; // failure throws ZError.InstantiationException // if that line is reached, everything is fine return true; case ZMQ.ZMQ_ENCODER: encoder = checkCustomCodec(optval, IEncoder.class); rawSocket = true; // failure throws ZError.InstantiationException // if that line is reached, everything is fine return true; case ZMQ.ZMQ_MSG_ALLOCATOR: if (optval instanceof String) { try { allocator = allocator(Class.forName((String) optval)); return true; } catch (ClassNotFoundException e) { throw new IllegalArgumentException(e); } } else if (optval instanceof Class) { allocator = allocator((Class<?>) optval); return true; } else if (optval instanceof MsgAllocator) { allocator = (MsgAllocator) optval; return true; } return false; case ZMQ.ZMQ_MSG_ALLOCATION_HEAP_THRESHOLD: Integer allocationHeapThreshold = (Integer) optval; allocator = new MsgAllocatorThreshold(allocationHeapThreshold); return true; case ZMQ.ZMQ_SELECTOR_PROVIDERCHOOSER: if (optval instanceof String) { try { selectorChooser = chooser(Class.forName((String) optval)); return true; } catch (ClassNotFoundException e) { throw new IllegalArgumentException(e); } } else if (optval instanceof Class) { selectorChooser = chooser((Class<?>) optval); return true; } else if (optval instanceof SelectorProviderChooser) { selectorChooser = (SelectorProviderChooser) optval; return true; } return false; case ZMQ.ZMQ_HELLO_MSG: if (optval == null) { helloMsg = null; } else { byte[] bytes = parseBytes(option, optval); if (bytes.length == 0) { helloMsg = null; } else { helloMsg = new Msg(Arrays.copyOf(bytes, bytes.length)); } } return true; case ZMQ.ZMQ_DISCONNECT_MSG: if (optval == null) { disconnectMsg = null; } else { byte[] bytes = parseBytes(option, optval); if (bytes.length == 0) { disconnectMsg = null; } else { disconnectMsg = new Msg(Arrays.copyOf(bytes, bytes.length)); } } return true; case ZMQ.ZMQ_HICCUP_MSG: if (optval == null) { hiccupMsg = null; } else { byte[] bytes = parseBytes(option, optval); if (bytes.length == 0) { hiccupMsg = null; } else { hiccupMsg = new Msg(Arrays.copyOf(bytes, bytes.length)); } } return true; case ZMQ.ZMQ_AS_TYPE: this.asType = (Integer) optval; return true; case ZMQ.ZMQ_SELFADDR_PROPERTY_NAME: this.selfAddressPropertyName = parseString(option, optval); return true; default: throw new IllegalArgumentException("Unknown Option " + option); } }
@Test(expected = IllegalArgumentException.class) public void testHeartbeatTtlUnderflow() { options.setSocketOpt(ZMQ.ZMQ_HEARTBEAT_TTL, -100); }
RegistryEndpointProvider<Optional<URL>> initializer() { return new Initializer(); }
@Test public void testInitializer_handleResponse_created() throws IOException, RegistryException { Mockito.when(mockResponse.getStatusCode()).thenReturn(201); // Created Assert.assertFalse(testBlobPusher.initializer().handleResponse(mockResponse).isPresent()); }
public static String formatExpression(final Expression expression) { return formatExpression(expression, FormatOptions.of(s -> false)); }
@Test public void shouldFormatCreateMapExpression() { assertThat(ExpressionFormatter.formatExpression( new CreateMapExpression(ImmutableMap.<Expression, Expression>builder() .put(new StringLiteral("foo"), new SubscriptExpression(new UnqualifiedColumnReferenceExp(ColumnName.of("abc")), new IntegerLiteral(1))) .put(new StringLiteral("bar"), new StringLiteral("val")) .build() )), equalTo("MAP('foo':=abc[1], 'bar':='val')") ); }
public static Builder custom() { return new Builder(); }
@Test public void shouldCreatePercentCutoff() { HedgeConfig config = HedgeConfig.custom() .averagePlusPercentageDuration(100, false).build(); then(HedgeDurationSupplier.fromConfig(config)).isInstanceOf(AverageDurationSupplier.class); }
public Future<KafkaVersionChange> reconcile() { return getVersionFromController() .compose(i -> getPods()) .compose(this::detectToAndFromVersions) .compose(i -> prepareVersionChange()); }
@Test public void testUpgradeFromUnsupportedKafkaVersionWithAllVersions(VertxTestContext context) { String oldKafkaVersion = "2.8.0"; String oldInterBrokerProtocolVersion = "2.8"; String oldLogMessageFormatVersion = "2.8"; String kafkaVersion = VERSIONS.defaultVersion().version(); String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); VersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), mockNewCluster( null, mockSps(kafkaVersion), mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) ) ); Checkpoint async = context.checkpoint(); vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion))); assertThat(c.to(), is(VERSIONS.defaultVersion())); assertThat(c.interBrokerProtocolVersion(), is(oldInterBrokerProtocolVersion)); assertThat(c.logMessageFormatVersion(), is(oldLogMessageFormatVersion)); async.flag(); }))); }
public void offerToQueue(Point p) { // @todo -- THIS IS FLAWED, this call WILL drop point data when the queue is // full // WE CANNOT (???) BLOCK (i.e. swap to queue.put(p)) BECAUSE THE "DATA LOADING // THREAD" IS THE SAME AS THE "DATA PROCESSING THREAD" // ARE YOU SURE? The one data pulling task would block at offerToQueue while the // other threads would continue processing data and emptying queues boolean addedToQueue = queue.offer(p); numPointsIngested++; if (!addedToQueue) { long curCount = failCounter.incrementAndGet(); if (curCount == failBreadCrumbTrigger) { makeOverflowBreadcrumb(failBreadCrumbTrigger); failBreadCrumbTrigger *= 10; throw new IllegalStateException("Failed to add Point to SwimLane " + failBreadCrumbTrigger / 10); } } }
@Test public void swimLaneEmitsBreadcrumbsOnOverflow() { SwimLane lane = new SwimLane(simpleStreamingKpi(), 1); Point point = Point.builder().latLong(0.0, 1.0).time(EPOCH).build(); //these files should be made when the swim lane rejects data because the queue was full File warn1 = new File(OVER_FLOW_FILEPREFIX + 1 + ".txt"); File warn10 = new File(OVER_FLOW_FILEPREFIX + 10 + ".txt"); File warn100 = new File(OVER_FLOW_FILEPREFIX + 100 + ".txt"); int exceptionCount = 0; assertThat(warn1.exists(), is(false)); assertThat(warn10.exists(), is(false)); assertThat(warn100.exists(), is(false)); assertThat(exceptionCount, is(0)); lane.offerToQueue(point); //accept 1st point assertThat(warn1.exists(), is(false)); assertThat(warn10.exists(), is(false)); assertThat(warn100.exists(), is(false)); assertThat(exceptionCount, is(0)); try { lane.offerToQueue(point); //drop this point } catch (IllegalStateException ise) { exceptionCount++; } assertThat(warn1.exists(), is(true)); assertThat(warn10.exists(), is(false)); assertThat(warn100.exists(), is(false)); assertThat(exceptionCount, is(1)); for (int i = 0; i < 10; i++) { try { lane.offerToQueue(point); //drop 10 more points } catch (IllegalStateException ise) { exceptionCount++; } } assertThat(warn1.exists(), is(true)); assertThat(warn10.exists(), is(true)); assertThat(warn100.exists(), is(false)); assertThat(exceptionCount, is(2)); for (int i = 0; i < 100; i++) { try { lane.offerToQueue(point); //drop 100 more points } catch (IllegalStateException ise) { exceptionCount++; } } assertThat(warn1.exists(), is(true)); assertThat(warn10.exists(), is(true)); assertThat(warn100.exists(), is(true)); assertThat(exceptionCount, is(3)); warn1.delete(); warn10.delete(); warn100.delete(); }
@Override public int getSessionIntervalTime() { return 30 * 1000; }
@Test public void getSessionIntervalTime() { Assert.assertEquals(30 * 1000, mSensorsAPI.getSessionIntervalTime()); }
@Override public void run() { if (processor != null) { processor.execute(); } else { if (!beforeHook()) { logger.info("before-feature hook returned [false], aborting: {}", this); } else { scenarios.forEachRemaining(this::processScenario); } afterFeature(); } }
@Test void testSchemaRead() { run("schema-read.feature"); }
public LockedInodePath lockFinalEdgeWrite() throws InvalidPathException { Preconditions.checkState(!fullPathExists()); LockedInodePath newPath = new LockedInodePath(mUri, this, mPathComponents, LockPattern.WRITE_EDGE, mUseTryLock); newPath.traverse(); return newPath; }
@Test public void lockFinalEdgeWrite() throws Exception { mInodeStore.removeChild(mRootDir.getId(), "a"); mPath = create("/a", LockPattern.READ); mPath.traverse(); LockedInodePath writeLocked = mPath.lockFinalEdgeWrite(); assertFalse(writeLocked.fullPathExists()); assertEquals(Arrays.asList(mRootDir), writeLocked.getInodeList()); checkOnlyNodesReadLocked(mRootDir); checkOnlyNodesWriteLocked(); checkOnlyIncomingEdgesReadLocked(mRootDir); checkOnlyIncomingEdgesWriteLocked(mDirA); writeLocked.close(); checkOnlyNodesReadLocked(mRootDir); checkOnlyNodesWriteLocked(); checkOnlyIncomingEdgesReadLocked(mRootDir); checkOnlyIncomingEdgesWriteLocked(); }
@Override public List<T> getResults() { return multiResult.getResults(); }
@Test public void testGetResults() { List<Integer> results = immutableMultiResult.getResults(); assertEquals(2, results.size()); assertTrue(results.contains(23)); assertTrue(results.contains(42)); }
@Override public String getHostname(final String alias) { if(StringUtils.isBlank(alias)) { return alias; } final String hostname = configuration.lookup(alias).getHostName(); if(StringUtils.isBlank(hostname)) { return alias; } if(log.isInfoEnabled()) { log.info(String.format("Determined hostname %s from alias %s from %s", hostname, alias, configuration)); } return hostname; }
@Test public void testLookup() { OpenSSHHostnameConfigurator c = new OpenSSHHostnameConfigurator( new OpenSshConfig( new Local("src/test/resources", "openssh/config"))); assertEquals("cyberduck.ch", c.getHostname("alias")); }
@Override public ClientDetailsEntity saveNewClient(ClientDetailsEntity client) { if (client.getId() != null) { // if it's not null, it's already been saved, this is an error throw new IllegalArgumentException("Tried to save a new client with an existing ID: " + client.getId()); } if (client.getRegisteredRedirectUri() != null) { for (String uri : client.getRegisteredRedirectUri()) { if (blacklistedSiteService.isBlacklisted(uri)) { throw new IllegalArgumentException("Client URI is blacklisted: " + uri); } } } // assign a random clientid if it's empty // NOTE: don't assign a random client secret without asking, since public clients have no secret if (Strings.isNullOrEmpty(client.getClientId())) { client = generateClientId(client); } // make sure that clients with the "refresh_token" grant type have the "offline_access" scope, and vice versa ensureRefreshTokenConsistency(client); // make sure we don't have both a JWKS and a JWKS URI ensureKeyConsistency(client); // check consistency when using HEART mode checkHeartMode(client); // timestamp this to right now client.setCreatedAt(new Date()); // check the sector URI checkSectorIdentifierUri(client); ensureNoReservedScopes(client); ClientDetailsEntity c = clientRepository.saveClient(client); statsService.resetCache(); return c; }
@Test(expected = IllegalArgumentException.class) public void saveNewClient_badId() { // Set up a mock client. ClientDetailsEntity client = Mockito.mock(ClientDetailsEntity.class); Mockito.when(client.getId()).thenReturn(12345L); // any non-null ID will work service.saveNewClient(client); }
public static <K, E, V> Collector<E, ImmutableSetMultimap.Builder<K, V>, ImmutableSetMultimap<K, V>> unorderedFlattenIndex( Function<? super E, K> keyFunction, Function<? super E, Stream<V>> valueFunction) { verifyKeyAndValueFunctions(keyFunction, valueFunction); BiConsumer<ImmutableSetMultimap.Builder<K, V>, E> accumulator = (map, element) -> { K key = requireNonNull(keyFunction.apply(element), KEY_FUNCTION_CANT_RETURN_NULL_MESSAGE); Stream<V> valueStream = requireNonNull(valueFunction.apply(element), VALUE_FUNCTION_CANT_RETURN_NULL_MESSAGE); valueStream.forEach(value -> map.put(key, value)); }; BinaryOperator<ImmutableSetMultimap.Builder<K, V>> merger = (m1, m2) -> { for (Map.Entry<K, V> entry : m2.build().entries()) { m1.put(entry.getKey(), entry.getValue()); } return m1; }; return Collector.of( ImmutableSetMultimap::builder, accumulator, merger, ImmutableSetMultimap.Builder::build); }
@Test public void unorderedFlattenIndex_with_valueFunction_fails_if_value_function_is_null() { assertThatThrownBy(() -> unorderedFlattenIndex(MyObj2::getId, null)) .isInstanceOf(NullPointerException.class) .hasMessage("Value function can't be null"); }