focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public String getRomOAID() { return null; }
@Test public void getRomOAID() { DefaultImpl impl = new DefaultImpl(); // if (impl.isSupported()) { // Assert.assertNull(impl.getRomOAID()); // } }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { final ThreadPool pool = ThreadPoolFactory.get("list", concurrency); try { final String prefix = this.createPrefix(directory); if(log.isDebugEnabled()) { log.debug(String.format("List with prefix %s", prefix)); } final Path bucket = containerService.getContainer(directory); final AttributedList<Path> objects = new AttributedList<>(); String priorLastKey = null; String priorLastVersionId = null; long revision = 0L; String lastKey = null; boolean hasDirectoryPlaceholder = bucket.isRoot() || containerService.isContainer(directory); do { final VersionOrDeleteMarkersChunk chunk = session.getClient().listVersionedObjectsChunked( bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), prefix, String.valueOf(Path.DELIMITER), new HostPreferences(session.getHost()).getInteger("s3.listing.chunksize"), priorLastKey, priorLastVersionId, false); // Amazon S3 returns object versions in the order in which they were stored, with the most recently stored returned first. for(BaseVersionOrDeleteMarker marker : chunk.getItems()) { final String key = URIEncoder.decode(marker.getKey()); if(new SimplePathPredicate(PathNormalizer.compose(bucket, key)).test(directory)) { if(log.isDebugEnabled()) { log.debug(String.format("Skip placeholder key %s", key)); } hasDirectoryPlaceholder = true; continue; } final PathAttributes attr = new PathAttributes(); attr.setVersionId(marker.getVersionId()); if(!StringUtils.equals(lastKey, key)) { // Reset revision for next file revision = 0L; } attr.setRevision(++revision); attr.setDuplicate(marker.isDeleteMarker() && marker.isLatest() || !marker.isLatest()); if(marker.isDeleteMarker()) { attr.setCustom(Collections.singletonMap(KEY_DELETE_MARKER, String.valueOf(true))); } attr.setModificationDate(marker.getLastModified().getTime()); attr.setRegion(bucket.attributes().getRegion()); if(marker instanceof S3Version) { final S3Version object = (S3Version) marker; attr.setSize(object.getSize()); if(StringUtils.isNotBlank(object.getEtag())) { attr.setETag(StringUtils.remove(object.getEtag(), "\"")); // The ETag will only be the MD5 of the object data when the object is stored as plaintext or encrypted // using SSE-S3. If the object is encrypted using another method (such as SSE-C or SSE-KMS) the ETag is // not the MD5 of the object data. attr.setChecksum(Checksum.parse(StringUtils.remove(object.getEtag(), "\""))); } if(StringUtils.isNotBlank(object.getStorageClass())) { attr.setStorageClass(object.getStorageClass()); } } final Path f = new Path(directory.isDirectory() ? directory : directory.getParent(), PathNormalizer.name(key), EnumSet.of(Path.Type.file), attr); if(metadata) { f.withAttributes(attributes.find(f)); } objects.add(f); lastKey = key; } final String[] prefixes = chunk.getCommonPrefixes(); final List<Future<Path>> folders = new ArrayList<>(); for(String common : prefixes) { if(new SimplePathPredicate(PathNormalizer.compose(bucket, URIEncoder.decode(common))).test(directory)) { continue; } folders.add(this.submit(pool, bucket, directory, URIEncoder.decode(common))); } for(Future<Path> f : folders) { try { objects.add(Uninterruptibles.getUninterruptibly(f)); } catch(ExecutionException e) { log.warn(String.format("Listing versioned objects failed with execution failure %s", e.getMessage())); for(Throwable cause : ExceptionUtils.getThrowableList(e)) { Throwables.throwIfInstanceOf(cause, BackgroundException.class); } throw new DefaultExceptionMappingService().map(Throwables.getRootCause(e)); } } priorLastKey = null != chunk.getNextKeyMarker() ? URIEncoder.decode(chunk.getNextKeyMarker()) : null; priorLastVersionId = chunk.getNextVersionIdMarker(); listener.chunk(directory, objects); } while(priorLastKey != null); if(!hasDirectoryPlaceholder && objects.isEmpty()) { // Only for AWS if(S3Session.isAwsHostname(session.getHost().getHostname())) { if(StringUtils.isEmpty(RequestEntityRestStorageService.findBucketInHostname(session.getHost()))) { if(log.isWarnEnabled()) { log.warn(String.format("No placeholder found for directory %s", directory)); } throw new NotfoundException(directory.getAbsolute()); } } else { // Handle missing prefix for directory placeholders in Minio final VersionOrDeleteMarkersChunk chunk = session.getClient().listVersionedObjectsChunked( bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), String.format("%s%s", this.createPrefix(directory.getParent()), directory.getName()), String.valueOf(Path.DELIMITER), 1, null, null, false); if(Arrays.stream(chunk.getCommonPrefixes()).map(URIEncoder::decode).noneMatch(common -> common.equals(prefix))) { throw new NotfoundException(directory.getAbsolute()); } } } return objects; } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Listing directory {0} failed", e, directory); } finally { // Cancel future tasks pool.shutdown(false); } }
@Test public void testListCommonPrefixSlashOnly() throws Exception { final Path container = new Path("test-eu-central-1-cyberduck-unsupportedprefix", EnumSet.of(Path.Type.directory, Path.Type.volume)); final S3AccessControlListFeature acl = new S3AccessControlListFeature(session); assertTrue(new S3ObjectListService(session, acl).list(container, new DisabledListProgressListener()).isEmpty()); }
@Override public Object handle(String targetService, List<Object> invokers, Object invocation, Map<String, String> queryMap, String serviceInterface) { if (!shouldHandle(invokers)) { return invokers; } List<Object> result = getTargetInvokersByRules(invokers, targetService); return super.handle(targetService, result, invocation, queryMap, serviceInterface); }
@Test public void testGetTargetInvokerByTagRulesWithPolicySceneThree() { // initialize the routing rule RuleInitializationUtils.initAZTagMatchTriggerThresholdMinAllInstancesPolicyRule(); // Scenario 1: The downstream provider has instances that meet the requirements List<Object> invokers = new ArrayList<>(); ApacheInvoker<Object> invoker1 = new ApacheInvoker<>("1.0.0", "az1"); invokers.add(invoker1); ApacheInvoker<Object> invoker2 = new ApacheInvoker<>("1.0.0", "az2"); invokers.add(invoker2); ApacheInvoker<Object> invoker3 = new ApacheInvoker<>("1.0.1", "az1"); invokers.add(invoker3); ApacheInvoker<Object> invoker4 = new ApacheInvoker<>("1.0.1", "az2"); invokers.add(invoker4); ApacheInvoker<Object> invoker5 = new ApacheInvoker<>("1.0.2", "az1"); invokers.add(invoker5); Invocation invocation = new ApacheInvocation(); Map<String, String> queryMap = new HashMap<>(); queryMap.put("zone", "az1"); queryMap.put("interface", "io.sermant.foo.FooTest"); Map<String, String> parameters = new HashMap<>(); parameters.putIfAbsent(RouterConstant.META_ZONE_KEY, "az1"); DubboCache.INSTANCE.setParameters(parameters); DubboCache.INSTANCE.putApplication("io.sermant.foo.FooTest", "foo"); List<Object> targetInvokers = (List<Object>) tagRouteHandler.handle( DubboCache.INSTANCE.getApplication("io.sermant.foo.FooTest") , invokers, invocation, queryMap, "io.sermant.foo.FooTest"); Assert.assertEquals(3, targetInvokers.size()); ConfigCache.getLabel(RouterConstant.DUBBO_CACHE_NAME).resetRouteRule(Collections.emptyMap()); }
public static ConnectToSqlTypeConverter connectToSqlConverter() { return CONNECT_TO_SQL_CONVERTER; }
@Test public void shouldThrowOnUnsupportedConnectSchemaType() { // Given: final Schema unsupported = SchemaBuilder.int8().build(); // When: final Exception e = assertThrows( KsqlException.class, () -> SchemaConverters.connectToSqlConverter().toSqlType(unsupported) ); // Then: assertThat(e.getMessage(), containsString("Unexpected schema type: Schema{INT8}")); }
public static Object[] getArguments(Invocation invocation) { if (($INVOKE.equals(invocation.getMethodName()) || $INVOKE_ASYNC.equals(invocation.getMethodName())) && invocation.getArguments() != null && invocation.getArguments().length > 2 && invocation.getArguments()[2] instanceof Object[]) { return (Object[]) invocation.getArguments()[2]; } return invocation.getArguments(); }
@Test void testGet_$invoke_Arguments() { Object[] args = new Object[] {"hello", "dubbo", 520}; Class<?> demoServiceClass = DemoService.class; String serviceName = demoServiceClass.getName(); Invoker invoker = createMockInvoker(); RpcInvocation inv = new RpcInvocation( "$invoke", serviceName, "", new Class<?>[] {String.class, String[].class, Object[].class}, new Object[] {"method", new String[] {}, args}, null, invoker, null); Object[] arguments = RpcUtils.getArguments(inv); for (int i = 0; i < args.length; i++) { Assertions.assertNotNull(arguments[i]); Assertions.assertEquals( args[i].getClass().getName(), arguments[i].getClass().getName()); Assertions.assertEquals(args[i], arguments[i]); } }
public LogicalSchema resolve(final ExecutionStep<?> step, final LogicalSchema schema) { return Optional.ofNullable(HANDLERS.get(step.getClass())) .map(h -> h.handle(this, schema, step)) .orElseThrow(() -> new IllegalStateException("Unhandled step class: " + step.getClass())); }
@Test public void shouldResolveSchemaForStreamSelect() { // Given: final StreamSelect<?> step = new StreamSelect<>( PROPERTIES, streamSource, ImmutableList.of(), Optional.empty(), ImmutableList.of( add("JUICE", "ORANGE", "APPLE"), ref("PLANTAIN", "BANANA"), ref("CITRUS", "ORANGE")) ); // When: final LogicalSchema result = resolver.resolve(step, SCHEMA); // Then: assertThat(result, is( LogicalSchema.builder() .keyColumn(ColumnName.of("K0"), SqlTypes.INTEGER) .valueColumn(ColumnName.of("JUICE"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("PLANTAIN"), SqlTypes.STRING) .valueColumn(ColumnName.of("CITRUS"), SqlTypes.INTEGER) .build()) ); }
public Certificate add(CvCertificate cert) { final Certificate db = Certificate.from(cert); if (repository.countByIssuerAndSubject(db.getIssuer(), db.getSubject()) > 0) { throw new ClientException(String.format( "Certificate of subject %s and issuer %s already exists", db.getSubject(), db.getIssuer())); } // Special case for first CVCA certificate for this document type if (db.getType() == Certificate.Type.CVCA && repository.countByDocumentTypeAndType(db.getDocumentType(), db.getType()) == 0) { signatureService.verify(cert, cert.getBody().getPublicKey(), cert.getBody().getPublicKey().getParams()); logger.warn("Added first CVCA certificate for {}, set trusted flag manually", db.getDocumentType()); } else { verify(cert); if (db.getType() == Certificate.Type.AT) { verifyPublicKey(cert); } } return repository.saveAndFlush(db); }
@Test public void shouldNotAddATIfHSMIsUnavailable() throws Exception { Mockito.doThrow(new nl.logius.digid.sharedlib.exception.ClientException( "Bad Gateway", 503 )).when(hsmClient).keyInfo(Mockito.eq("AT"), Mockito.eq("SSSSSSSSSSSSSSSS")); certificateRepo.save(loadCvCertificate("rdw/acc/cvca.cvcert", true)); certificateRepo.save(loadCvCertificate("rdw/acc/dvca.cvcert", false)); assertThrows(ServerException.class, () -> service.add(readCvCertificate("rdw/acc/at001.cvcert"))); }
@Operation(summary = "updateUser", description = "UPDATE_USER_NOTES") @Parameters({ @Parameter(name = "id", description = "USER_ID", required = true, schema = @Schema(implementation = int.class, example = "100")), @Parameter(name = "userName", description = "USER_NAME", required = true, schema = @Schema(implementation = String.class)), @Parameter(name = "userPassword", description = "USER_PASSWORD", required = true, schema = @Schema(implementation = String.class)), @Parameter(name = "tenantId", description = "TENANT_ID", required = true, schema = @Schema(implementation = int.class, example = "100")), @Parameter(name = "queue", description = "QUEUE", schema = @Schema(implementation = String.class)), @Parameter(name = "email", description = "EMAIL", required = true, schema = @Schema(implementation = String.class)), @Parameter(name = "phone", description = "PHONE", schema = @Schema(implementation = String.class)), @Parameter(name = "state", description = "STATE", schema = @Schema(implementation = int.class, example = "1")) }) @PostMapping(value = "/update") @ResponseStatus(HttpStatus.OK) @ApiException(UPDATE_USER_ERROR) public Result<User> updateUser(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "id") int id, @RequestParam(value = "userName") String userName, @RequestParam(value = "userPassword") String userPassword, @RequestParam(value = "queue", required = false, defaultValue = "") String queue, @RequestParam(value = "email") String email, @RequestParam(value = "tenantId") int tenantId, @RequestParam(value = "phone", required = false) String phone, @RequestParam(value = "state", required = false) int state, @RequestParam(value = "timeZone", required = false) String timeZone) throws Exception { User user = usersService.updateUser(loginUser, id, userName, userPassword, email, tenantId, phone, queue, state, timeZone); return Result.success(user); }
@Test public void testUpdateUser() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("id", "32"); paramsMap.add("userName", "user_test"); paramsMap.add("userPassword", "123456qwe?"); paramsMap.add("tenantId", "9"); paramsMap.add("queue", "1"); paramsMap.add("email", "12343534@qq.com"); paramsMap.add("phone", "15800000000"); MvcResult mvcResult = mockMvc.perform(post("/users/update") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assertions.assertEquals(Status.UPDATE_USER_ERROR.getCode(), result.getCode().intValue()); }
@Override public void log(Level logLevel, String message) { if (!messageConsumers.containsKey(logLevel)) { return; } Consumer<String> messageConsumer = messageConsumers.get(logLevel); // remove the color from the message final String plainMessage = message.replaceAll("\u001B\\[[0-9;]{1,5}m", ""); singleThreadedExecutor.execute(() -> messageConsumer.accept(plainMessage)); }
@Test public void testLog_ignoreIfNoMessageConsumer() { testPlainConsoleLogger = new PlainConsoleLogger( ImmutableMap.of(Level.WARN, createMessageConsumer(Level.WARN)), singleThreadedExecutor); testPlainConsoleLogger.log(Level.LIFECYCLE, "lifecycle"); testPlainConsoleLogger.log(Level.PROGRESS, "progress"); testPlainConsoleLogger.log(Level.INFO, "info"); testPlainConsoleLogger.log(Level.DEBUG, "debug"); testPlainConsoleLogger.log(Level.WARN, "warn"); testPlainConsoleLogger.log(Level.ERROR, "error"); singleThreadedExecutor.shutDownAndAwaitTermination(SHUTDOWN_TIMEOUT); Assert.assertEquals(Collections.singletonList(Level.WARN), levels); Assert.assertEquals(Collections.singletonList("warn"), messages); }
@Override @SuppressWarnings("unchecked") public boolean delDir(String dirPath) { if (false == cd(dirPath)) { return false; } final ChannelSftp channel = getClient(); Vector<LsEntry> list; try { list = channel.ls(channel.pwd()); } catch (SftpException e) { throw new JschRuntimeException(e); } String fileName; for (LsEntry entry : list) { fileName = entry.getFilename(); if (false == ".".equals(fileName) && false == "..".equals(fileName)) { if (entry.getAttrs().isDir()) { delDir(fileName); } else { delFile(fileName); } } } if (false == cd("..")) { return false; } // 删除空目录 try { channel.rmdir(dirPath); return true; } catch (SftpException e) { throw new JschRuntimeException(e); } }
@Test @Disabled public void delDirTest() { sshjSftp.delDir("/home/test/temp"); }
@SuppressWarnings("unchecked") protected Set<PathSpec> getFields() { Object fields = _queryParams.get(RestConstants.FIELDS_PARAM); if (fields == null) { return Collections.emptySet(); } if (fields instanceof Set) { return (Set<PathSpec>) fields; } else if (fields instanceof String) { try { MaskTree tree = URIMaskUtil.decodeMaskUriFormat((String) fields); return tree.getOperations().keySet(); } catch (IllegalMaskException e) { throw new IllegalArgumentException("Field param was a string and it did not represent a serialized mask tree", e); } } else if (fields instanceof DataMap) { MaskTree tree = new MaskTree((DataMap) fields); return tree.getOperations().keySet(); } throw new IllegalArgumentException("Fields param is of unrecognized type: " + fields.getClass()); }
@Test public void testMaskTreeFieldsParam() { DataMap fields = new DataMap(); fields.put("id", MaskMap.POSITIVE_MASK); GetRequest<TestRecord> getRequest = generateDummyRequestBuilder().setParam(RestConstants.FIELDS_PARAM, fields).build(); assertEquals(getRequest.getFields(), Collections.singleton(new PathSpec("id"))); }
@Override public Endpoint<Http2RemoteFlowController> remote() { return remoteEndpoint; }
@Test public void clientCreatePushShouldFailOnRemoteEndpointWhenMaxAllowedStreamsExceeded() throws Http2Exception { client = new DefaultHttp2Connection(false, 0); client.remote().maxActiveStreams(1); final Http2Stream requestStream = client.remote().createStream(2, false); assertThrows(Http2Exception.class, new Executable() { @Override public void execute() throws Throwable { client.remote().reservePushStream(4, requestStream); } }); }
@Override public List<RoleDO> getRoleList() { return roleMapper.selectList(); }
@Test public void testGetRoleList_ids() { // mock 数据 RoleDO dbRole01 = randomPojo(RoleDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus())); roleMapper.insert(dbRole01); RoleDO dbRole02 = randomPojo(RoleDO.class, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus())); roleMapper.insert(dbRole02); // 准备参数 Collection<Long> ids = singleton(dbRole01.getId()); // 调用 List<RoleDO> list = roleService.getRoleList(ids); // 断言 assertEquals(1, list.size()); assertPojoEquals(dbRole01, list.get(0)); }
public static <T> List<List<T>> partition(Collection<T> values, int partitionSize) { if (values == null) { return null; } else if (values.isEmpty()) { return Collections.emptyList(); } List<T> valuesList; if (values instanceof List) { valuesList = (List<T>) values; } else { valuesList = new ArrayList<>(values); } int valuesSize = values.size(); if (valuesSize <= partitionSize) { return Collections.singletonList(valuesList); } List<List<T>> safeValuesList = new ArrayList<>(); consumePartitions(values, partitionSize, safeValuesList::add); return safeValuesList; }
@Test void partitionSet() { Collection<String> set = new LinkedHashSet<>(); set.add("1"); set.add("2"); set.add("3"); set.add("4"); set.add("5"); assertThat(CollectionUtil.partition(set, 3)) .containsExactly( Arrays.asList("1", "2", "3"), Arrays.asList("4", "5") ); assertThat(CollectionUtil.partition(set, 5)) .containsExactly( Arrays.asList("1", "2", "3", "4", "5") ); }
public Span handleReceive(RpcServerRequest request) { Span span = nextSpan(extractor.extract(request), request); return handleStart(request, span); }
@Test void handleReceive_samplerSeesRpcServerRequest() { SamplerFunction<RpcRequest> serverSampler = mock(SamplerFunction.class); init(httpTracingBuilder(tracingBuilder()).serverSampler(serverSampler)); handler.handleReceive(request); verify(serverSampler).trySample(request); }
@Override public void close() { }
@Test public void shouldSucceed_gapDetectedRemote_noRetry() throws ExecutionException, InterruptedException { // Given: final AtomicReference<Set<KsqlNode>> nodes = new AtomicReference<>( ImmutableSet.of(ksqlNodeLocal, ksqlNodeRemote)); final PushRouting routing = new PushRouting(sqr -> nodes.get(), 50, false); // When: final PushConnectionsHandle handle = handlePushRouting(routing); context.runOnContext(v -> { remotePublisher.accept(REMOTE_ROW1); remotePublisher.accept(REMOTE_CONTINUATION_TOKEN1); remotePublisher.accept(REMOTE_ROW2); remotePublisher.accept(REMOTE_CONTINUATION_TOKEN_GAP); }); Set<List<?>> rows = waitOnRows(2); waitOnNodeStatus(handle, ksqlNodeRemote, RoutingResultStatus.OFFSET_GAP_FOUND); handle.close(); // Then: assertThat(rows.contains(REMOTE_ROW1.getRow().get().getColumns()), is(true)); assertThat(rows.contains(REMOTE_ROW2.getRow().get().getColumns()), is(true)); assertThat(handle.get(ksqlNodeRemote).get().getStatus(), is(RoutingResultStatus.OFFSET_GAP_FOUND)); }
public OpenConfigChannelHandler addLogicalChannelAssignments( OpenConfigLogicalChannelAssignmentsHandler logicalChannelAssignments) { modelObject.logicalChannelAssignments(logicalChannelAssignments.getModelObject()); return this; }
@Test public void testAddLogicalChannelAssignments() { // test Handler OpenConfigChannelHandler channel = new OpenConfigChannelHandler(1, parent); // call addLogicalChannelAssignments OpenConfigLogicalChannelAssignmentsHandler logicalChannelAssignments = new OpenConfigLogicalChannelAssignmentsHandler(channel); // expected ModelObject DefaultChannel modelObject = new DefaultChannel(); modelObject.index(1); DefaultLogicalChannelAssignments logicalChannel = new DefaultLogicalChannelAssignments(); modelObject.logicalChannelAssignments(logicalChannel); assertEquals( "[NG]addLogicalChannelAssignments:ModelObject(LogicalChannelAssignments added) is not an expected one.\n", modelObject, channel.getModelObject()); }
public boolean verifySignature(String jwksUri, SignedJWT signedJwt) throws JOSEException, InvalidSignatureException, IOException, ParseException { var publicKeys = getPublicKeys(jwksUri); var kid = signedJwt.getHeader().getKeyID(); if (kid != null) { var key = ((RSAKey) publicKeys.getKeyByKeyId(kid)); if (key != null) { RSASSAVerifier rsaSSAVerifier = new RSASSAVerifier(key.toRSAPublicKey()); if (signedJwt.verify(rsaSSAVerifier)) return true; } } for (JWK jwk : publicKeys.getKeys()) { if (signedJwt.verify(new RSASSAVerifier(((RSAKey) jwk).toRSAPublicKey()))) return true; } throw new InvalidSignatureException("Could not validate signature of JWT token"); }
@Test void verifyValidSignatureTest() throws ParseException, InvalidSignatureException, IOException, JOSEException { provider.verifySignature("jwskUri", SignedJWT.parse(client.generateRequest())); }
@Override public FSDataOutputStream create(Path path, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { String confUmask = mAlluxioConf.getString(PropertyKey.SECURITY_AUTHORIZATION_PERMISSION_UMASK); Mode mode = ModeUtils.applyFileUMask(Mode.defaults(), confUmask); return this.create(path, new FsPermission(mode.toShort()), overwrite, bufferSize, replication, blockSize, progress); }
@Test public void initializeWithZookeeperSystemProperties() throws Exception { HashMap<String, String> sysProps = new HashMap<>(); sysProps.put(PropertyKey.ZOOKEEPER_ENABLED.getName(), "true"); sysProps.put(PropertyKey.ZOOKEEPER_ADDRESS.getName(), "zkHost:2181"); try (Closeable p = new SystemPropertyRule(sysProps).toResource()) { alluxio.conf.Configuration.reloadProperties(); URI uri = URI.create("alluxio:///"); FileSystem fs = getHadoopFilesystem(org.apache.hadoop.fs.FileSystem.get(uri, getConf())); assertTrue(fs.mFileSystem.getConf().getBoolean(PropertyKey.ZOOKEEPER_ENABLED)); assertEquals("zkHost:2181", fs.mFileSystem.getConf().get(PropertyKey.ZOOKEEPER_ADDRESS)); } }
@Override public boolean contains(Object object) { return get(containsAsync(object)); }
@Test public void testContains() { RScoredSortedSet<TestObject> set = redisson.getScoredSortedSet("simple"); set.add(0, new TestObject("1", "2")); set.add(1, new TestObject("1", "2")); set.add(2, new TestObject("2", "3")); set.add(3, new TestObject("3", "4")); set.add(4, new TestObject("5", "6")); Assertions.assertTrue(set.contains(new TestObject("2", "3"))); Assertions.assertTrue(set.contains(new TestObject("1", "2"))); Assertions.assertFalse(set.contains(new TestObject("1", "9"))); }
@Override public String serializeToString() { return REPLACE_RESOLVED_DST_PATH.getConfigName() + RegexMountPoint.INTERCEPTOR_INTERNAL_SEP + srcRegexString + RegexMountPoint.INTERCEPTOR_INTERNAL_SEP + replaceString; }
@Test public void testSerialization() { String srcRegex = "word1"; String replaceString = "word2"; String serializedString = createSerializedString(srcRegex, replaceString); RegexMountPointResolvedDstPathReplaceInterceptor interceptor = new RegexMountPointResolvedDstPathReplaceInterceptor(srcRegex, replaceString); Assert.assertEquals(interceptor.serializeToString(), serializedString); }
public boolean couldHoldIgnoringSharedMemory(NormalizedResources other, double thisTotalMemoryMb, double otherTotalMemoryMb) { if (this.cpu < other.getTotalCpu()) { return false; } return couldHoldIgnoringSharedMemoryAndCpu(other, thisTotalMemoryMb, otherTotalMemoryMb); }
@Test public void testCouldHoldWithTooFewResource() { NormalizedResources resources = new NormalizedResources(normalize(Collections.singletonMap(gpuResourceName, 1))); NormalizedResources resourcesToCheck = new NormalizedResources(normalize(Collections.singletonMap(gpuResourceName, 2))); boolean couldHold = resources.couldHoldIgnoringSharedMemory(resourcesToCheck, 100, 1); assertThat(couldHold, is(false)); }
public static String servicePath(String basePath) { return servicePath(basePath, SERVICE_PATH); }
@Test(dataProvider = "servicePaths") public void testZKFSUtilServicePath(String basePath, String servicePath, String resultServicePath) { Assert.assertEquals(ZKFSUtil.servicePath(basePath, servicePath), resultServicePath); }
public static RecordBuilder<Schema> record(String name) { return builder().record(name); }
@Test void fields() { Schema rec = SchemaBuilder.record("Rec").fields().name("documented").doc("documented").type().nullType().noDefault() .name("ascending").orderAscending().type().booleanType().noDefault().name("descending").orderDescending().type() .floatType().noDefault().name("ignored").orderIgnore().type().doubleType().noDefault().name("aliased") .aliases("anAlias").type().stringType().noDefault().endRecord(); assertEquals("documented", rec.getField("documented").doc()); assertEquals(Order.ASCENDING, rec.getField("ascending").order()); assertEquals(Order.DESCENDING, rec.getField("descending").order()); assertEquals(Order.IGNORE, rec.getField("ignored").order()); assertTrue(rec.getField("aliased").aliases().contains("anAlias")); }
@ConstantFunction.List(list = { @ConstantFunction(name = "divide", argTypes = {DECIMALV2, DECIMALV2}, returnType = DECIMALV2), @ConstantFunction(name = "divide", argTypes = {DECIMAL32, DECIMAL32}, returnType = DECIMAL32), @ConstantFunction(name = "divide", argTypes = {DECIMAL64, DECIMAL64}, returnType = DECIMAL64), @ConstantFunction(name = "divide", argTypes = {DECIMAL128, DECIMAL128}, returnType = DECIMAL128) }) public static ConstantOperator divideDecimal(ConstantOperator first, ConstantOperator second) { if (BigDecimal.ZERO.compareTo(second.getDecimal()) == 0) { return ConstantOperator.createNull(second.getType()); } return createDecimalConstant(first.getDecimal().divide(second.getDecimal())); }
@Test public void divideDecimal() { assertEquals("1", ScalarOperatorFunctions.divideDecimal(O_DECIMAL_100, O_DECIMAL_100).getDecimal().toString()); assertEquals("1", ScalarOperatorFunctions.divideDecimal(O_DECIMAL32P7S2_100, O_DECIMAL32P7S2_100).getDecimal() .toString()); assertEquals("1", ScalarOperatorFunctions.divideDecimal(O_DECIMAL32P9S0_100, O_DECIMAL32P9S0_100).getDecimal() .toString()); assertEquals("1", ScalarOperatorFunctions.divideDecimal(O_DECIMAL64P15S10_100, O_DECIMAL64P15S10_100).getDecimal() .toString()); assertEquals("1", ScalarOperatorFunctions.divideDecimal(O_DECIMAL64P18S15_100, O_DECIMAL64P18S15_100).getDecimal() .toString()); assertEquals("1", ScalarOperatorFunctions.divideDecimal(O_DECIMAL128P30S2_100, O_DECIMAL128P30S2_100).getDecimal() .toString()); assertEquals("1", ScalarOperatorFunctions.divideDecimal(O_DECIMAL128P38S20_100, O_DECIMAL128P38S20_100).getDecimal() .toString()); assertTrue(ScalarOperatorFunctions.divideDecimal(O_DECIMAL128P38S20_100, O_DECIMAL128P38S20_100).getType() .isDecimalV3()); }
public FEELFnResult<Boolean> invoke(@ParameterName( "string" ) String string, @ParameterName( "match" ) String match) { if ( string == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "string", "cannot be null")); } if ( match == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "match", "cannot be null")); } return FEELFnResult.ofResult( string.endsWith( match ) ); }
@Test void invokeNotEndsWith() { FunctionTestUtil.assertResult(endsWithFunction.invoke("test", "es"), false); FunctionTestUtil.assertResult(endsWithFunction.invoke("test", "ttttt"), false); FunctionTestUtil.assertResult(endsWithFunction.invoke("test", "estt"), false); FunctionTestUtil.assertResult(endsWithFunction.invoke("test", "tt"), false); }
public NodeStatsResponse nodesStats() { return execute(() -> { Request request = new Request("GET", "/_nodes/stats/fs,process,jvm,indices,breaker"); Response response = restHighLevelClient.getLowLevelClient().performRequest(request); return NodeStatsResponse.toNodeStatsResponse(gson.fromJson(EntityUtils.toString(response.getEntity()), JsonObject.class)); }); }
@Test public void should_call_node_stats_api() throws Exception { HttpEntity entity = mock(HttpEntity.class); when(entity.getContent()).thenReturn(new ByteArrayInputStream(EXAMPLE_NODE_STATS_JSON.getBytes())); Response response = mock(Response.class); when(response.getEntity()).thenReturn(entity); when(restClient.performRequest(argThat(new RawRequestMatcher( "GET", "/_nodes/stats/fs,process,jvm,indices,breaker")))) .thenReturn(response); assertThat(underTest.nodesStats()).isNotNull(); }
@Override public BasicTypeDefine reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.builder() .name(column.getName()) .nullable(column.isNullable()) .comment(column.getComment()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case BOOLEAN: builder.columnType(String.format("%s(%s)", ORACLE_NUMBER, 1)); builder.dataType(ORACLE_NUMBER); builder.length(1L); break; case TINYINT: case SMALLINT: case INT: case BIGINT: builder.columnType(ORACLE_INTEGER); builder.dataType(ORACLE_INTEGER); break; case FLOAT: builder.columnType(ORACLE_BINARY_FLOAT); builder.dataType(ORACLE_BINARY_FLOAT); break; case DOUBLE: builder.columnType(ORACLE_BINARY_DOUBLE); builder.dataType(ORACLE_BINARY_DOUBLE); break; case DECIMAL: DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = (int) Math.max(0, scale - (precision - MAX_PRECISION)); precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } builder.columnType(String.format("%s(%s,%s)", ORACLE_NUMBER, precision, scale)); builder.dataType(ORACLE_NUMBER); builder.precision(precision); builder.scale(scale); break; case BYTES: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(ORACLE_BLOB); builder.dataType(ORACLE_BLOB); } else if (column.getColumnLength() <= MAX_RAW_LENGTH) { builder.columnType( String.format("%s(%s)", ORACLE_RAW, column.getColumnLength())); builder.dataType(ORACLE_RAW); } else { builder.columnType(ORACLE_BLOB); builder.dataType(ORACLE_BLOB); } break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType( String.format("%s(%s)", ORACLE_VARCHAR2, MAX_VARCHAR_LENGTH)); builder.dataType(ORACLE_VARCHAR2); } else if (column.getColumnLength() <= MAX_VARCHAR_LENGTH) { builder.columnType( String.format("%s(%s)", ORACLE_VARCHAR2, column.getColumnLength())); builder.dataType(ORACLE_VARCHAR2); } else { builder.columnType(ORACLE_CLOB); builder.dataType(ORACLE_CLOB); } break; case DATE: builder.columnType(ORACLE_DATE); builder.dataType(ORACLE_DATE); break; case TIMESTAMP: if (column.getScale() == null || column.getScale() <= 0) { builder.columnType(ORACLE_TIMESTAMP_WITH_LOCAL_TIME_ZONE); } else { int timestampScale = column.getScale(); if (column.getScale() > MAX_TIMESTAMP_SCALE) { timestampScale = MAX_TIMESTAMP_SCALE; log.warn( "The timestamp column {} type timestamp({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to timestamp({})", column.getName(), column.getScale(), MAX_TIMESTAMP_SCALE, timestampScale); } builder.columnType( String.format("TIMESTAMP(%s) WITH LOCAL TIME ZONE", timestampScale)); builder.scale(timestampScale); } builder.dataType(ORACLE_TIMESTAMP_WITH_LOCAL_TIME_ZONE); break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.ORACLE, column.getDataType().getSqlType().name(), column.getName()); } return builder.build(); }
@Test public void testReconvertFloat() { Column column = PhysicalColumn.builder().name("test").dataType(BasicType.FLOAT_TYPE).build(); BasicTypeDefine typeDefine = OracleTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals( OracleTypeConverter.ORACLE_BINARY_FLOAT, typeDefine.getColumnType()); Assertions.assertEquals(OracleTypeConverter.ORACLE_BINARY_FLOAT, typeDefine.getDataType()); }
@VisibleForTesting Optional<Xpp3Dom> getSpringBootRepackageConfiguration() { Plugin springBootPlugin = project.getPlugin("org.springframework.boot:spring-boot-maven-plugin"); if (springBootPlugin != null) { for (PluginExecution execution : springBootPlugin.getExecutions()) { if (execution.getGoals().contains("repackage")) { Xpp3Dom configuration = (Xpp3Dom) execution.getConfiguration(); if (configuration == null) { return Optional.of(new Xpp3Dom("configuration")); } boolean skip = Boolean.parseBoolean(getChildValue(configuration, "skip").orElse("false")); return skip ? Optional.empty() : Optional.of(configuration); } } } return Optional.empty(); }
@Test public void testGetSpringBootRepackageConfiguration_noExecutions() { when(mockMavenProject.getPlugin("org.springframework.boot:spring-boot-maven-plugin")) .thenReturn(mockPlugin); when(mockPlugin.getExecutions()).thenReturn(Collections.emptyList()); assertThat(mavenProjectProperties.getSpringBootRepackageConfiguration()).isEmpty(); }
@Override public <T> T attach(T detachedObject) { addExpireListener(commandExecutor); validateDetached(detachedObject); Class<T> entityClass = (Class<T>) detachedObject.getClass(); String idFieldName = getRIdFieldName(detachedObject.getClass()); Object id = ClassUtils.getField(detachedObject, idFieldName); return createLiveObject(entityClass, id); }
@Test public void test() { RLiveObjectService service = redisson.getLiveObjectService(); MyObject object = new MyObject(20L); try { service.attach(object); } catch (Exception e) { assertEquals("Non-null value is required for the field with RId annotation.", e.getMessage()); } }
public void registerWithStream(final long workerId, final List<String> storageTierAliases, final Map<String, Long> totalBytesOnTiers, final Map<String, Long> usedBytesOnTiers, final Map<BlockStoreLocation, List<Long>> currentBlocksOnLocation, final Map<String, List<String>> lostStorage, final List<ConfigProperty> configList) throws IOException { AtomicReference<IOException> ioe = new AtomicReference<>(); // The retry logic only takes care of connection issues. // If the master side sends back an error, // no retry will be attempted and the worker will quit. retryRPC(() -> { // The gRPC stream lifecycle is managed internal to the RegisterStreamer // When an exception is thrown, the stream has been closed and error propagated // to the other side, so no extra handling is required here. RegisterStreamer stream = new RegisterStreamer(mAsyncClient, workerId, storageTierAliases, totalBytesOnTiers, usedBytesOnTiers, currentBlocksOnLocation, lostStorage, configList); try { stream.registerWithMaster(); } catch (IOException e) { ioe.set(e); } catch (InterruptedException e) { ioe.set(new IOException(e)); } return null; }, LOG, "Register", "workerId=%d", workerId); if (ioe.get() != null) { throw ioe.get(); } }
@Test public void registerWithStream() throws Exception { register(true); }
public <T extends AwsSyncClientBuilder> void applyHttpClientConfigurations(T builder) { if (Strings.isNullOrEmpty(httpClientType)) { httpClientType = CLIENT_TYPE_DEFAULT; } switch (httpClientType) { case CLIENT_TYPE_URLCONNECTION: UrlConnectionHttpClientConfigurations urlConnectionHttpClientConfigurations = loadHttpClientConfigurations(UrlConnectionHttpClientConfigurations.class.getName()); urlConnectionHttpClientConfigurations.configureHttpClientBuilder(builder); break; case CLIENT_TYPE_APACHE: ApacheHttpClientConfigurations apacheHttpClientConfigurations = loadHttpClientConfigurations(ApacheHttpClientConfigurations.class.getName()); apacheHttpClientConfigurations.configureHttpClientBuilder(builder); break; default: throw new IllegalArgumentException("Unrecognized HTTP client type " + httpClientType); } }
@Test public void testInvalidHttpClientType() { Map<String, String> properties = Maps.newHashMap(); properties.put(HttpClientProperties.CLIENT_TYPE, "test"); HttpClientProperties httpClientProperties = new HttpClientProperties(properties); S3ClientBuilder s3ClientBuilder = S3Client.builder(); assertThatThrownBy(() -> httpClientProperties.applyHttpClientConfigurations(s3ClientBuilder)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Unrecognized HTTP client type test"); }
@Override protected File getFile(HandlerRequest<EmptyRequestBody> handlerRequest) { if (logDir == null) { return null; } // wrapping around another File instantiation is a simple way to remove any path information // - we're // solely interested in the filename String filename = new File(handlerRequest.getPathParameter(LogFileNamePathParameter.class)).getName(); return new File(logDir, filename); }
@Test void testGetJobManagerCustomLogsValidFilenameWithInvalidPath() throws Exception { File actualFile = testInstance.getFile( createHandlerRequest(String.format("../%s", VALID_LOG_FILENAME))); assertThat(actualFile).isNotNull(); String actualContent = String.join("", Files.readAllLines(actualFile.toPath())); assertThat(actualContent).isEqualTo(VALID_LOG_CONTENT); }
@Override void handle(Connection connection, DatabaseCharsetChecker.State state) throws SQLException { // PostgreSQL does not have concept of case-sensitive collation. Only charset ("encoding" in postgresql terminology) // must be verified. expectUtf8AsDefault(connection); if (state == DatabaseCharsetChecker.State.UPGRADE || state == DatabaseCharsetChecker.State.STARTUP) { // no need to check columns on fresh installs... as they are not supposed to exist! expectUtf8Columns(connection); } }
@Test public void schema_is_taken_into_account_when_selecting_columns() throws Exception { answerDefaultCharset("utf8"); answerSchema("test-schema"); answerColumns(asList( new String[] {TABLE_ISSUES, COLUMN_KEE, "utf8"}, new String[] {TABLE_PROJECTS, COLUMN_NAME, "" /* unset -> uses db collation */})); // no error assertThatCode(() -> underTest.handle(connection, DatabaseCharsetChecker.State.UPGRADE)) .doesNotThrowAnyException(); verify(sqlExecutor).select(same(connection), eq("select table_name, column_name," + " collation_name " + "from information_schema.columns " + "where table_schema='test-schema' " + "and table_name in (" + SqTables.TABLES.stream().map(s -> "'" + s + "'").collect(Collectors.joining(",")) + ") " + "and udt_name='varchar' order by table_name, column_name"), any(SqlExecutor.StringsConverter.class)); }
public FEELFnResult<List> invoke(@ParameterName("list") List list, @ParameterName("start position") BigDecimal start) { return invoke( list, start, null ); }
@Test void invokeStartNegative() { FunctionTestUtil.assertResult(sublistFunction.invoke(Arrays.asList(1, 2, 3), BigDecimal.valueOf(-2)), Arrays.asList(2, 3)); FunctionTestUtil.assertResult(sublistFunction.invoke(Arrays.asList(1, "test", 3), BigDecimal.valueOf(-2)), Arrays.asList("test", 3)); FunctionTestUtil.assertResult(sublistFunction.invoke(Arrays.asList(1, "test", 3), BigDecimal.valueOf(-2), BigDecimal.ONE), Collections.singletonList("test")); }
public static Builder builder() { return new Builder(); }
@Test public void testBuilder() { StreamDataProducer producer = Mockito.mock(StreamDataProducer.class); PinotSourceDataGenerator generator = Mockito.mock(PinotSourceDataGenerator.class); PinotRealtimeSource realtimeSource = PinotRealtimeSource.builder().setTopic("mytopic").setProducer(producer).setGenerator(generator).build(); Assert.assertNotNull(realtimeSource); PinotStreamRateLimiter limiter = Mockito.mock(PinotStreamRateLimiter.class); ExecutorService executorService = Mockito.mock(ExecutorService.class); realtimeSource = PinotRealtimeSource.builder().setRateLimiter(limiter).setProducer(producer).setGenerator(generator) .setTopic("mytopic").setExecutor(executorService).setMaxMessagePerSecond(9527).build(); Assert.assertEquals(realtimeSource._executor, executorService); Assert.assertEquals(realtimeSource._producer, producer); Assert.assertEquals(realtimeSource._topicName, "mytopic"); String qps = realtimeSource._properties.get(PinotRealtimeSource.KEY_OF_MAX_MESSAGE_PER_SECOND).toString(); Assert.assertNotNull(qps); Assert.assertEquals(qps, "9527"); Assert.assertEquals(realtimeSource._rateLimiter, limiter); }
public MpUnReachNlri(List<BgpLSNlri> mpUnReachNlri, short afi, byte safi, int length) { this.mpUnReachNlri = mpUnReachNlri; this.isMpUnReachNlri = true; this.afi = afi; this.safi = safi; this.length = length; }
@Test public void mpUnReachNlriTest() throws BgpParseException { // BGP flow spec Message byte[] flowSpecMsg = new byte[] {(byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, 0x00, 0x2b, 0x02, 0x00, 0x00, 0x00, 0x14, (byte) 0x90, 0x0f, 0x00, 0x10, 0x00, 0x01, (byte) 0x85, 0x0c, 0x02, 0x20, (byte) 0xc0, (byte) 0xa8, 0x07, 0x36, 0x03, (byte) 0x81, 0x67, 0x04, (byte) 0x81, 0x01}; byte[] testFsMsg; ChannelBuffer buffer = ChannelBuffers.dynamicBuffer(); buffer.writeBytes(flowSpecMsg); BgpMessageReader<BgpMessage> reader = BgpFactories.getGenericReader(); BgpMessage message; BgpHeader bgpHeader = new BgpHeader(); message = reader.readFrom(buffer, bgpHeader); assertThat(message, instanceOf(BgpUpdateMsgVer4.class)); ChannelBuffer buf = ChannelBuffers.dynamicBuffer(); message.writeTo(buf); int readLen = buf.writerIndex(); testFsMsg = new byte[readLen]; buf.readBytes(testFsMsg, 0, readLen); assertThat(testFsMsg, is(flowSpecMsg)); }
@Override public void init(InitContext context) { String state = context.generateCsrfState(); OAuth20Service scribe = newScribeBuilder(context).build(scribeApi); String url = scribe.getAuthorizationUrl(state); context.redirectTo(url); }
@Test public void fail_to_init_when_disabled() { enableBitbucketAuthentication(false); OAuth2IdentityProvider.InitContext context = mock(OAuth2IdentityProvider.InitContext.class); assertThatThrownBy(() -> underTest.init(context)) .isInstanceOf(IllegalStateException.class) .hasMessage("Bitbucket authentication is disabled"); }
public static boolean isJsonValid(String schemaText, String jsonText) throws IOException { return isJsonValid(schemaText, jsonText, null); }
@Test void testValidateJsonSuccess() { boolean valid = false; String schemaText = null; String jsonText = "{\"name\": \"307\", \"model\": \"Peugeot 307\", \"year\": 2003}"; try { // Load schema from file. schemaText = FileUtils .readFileToString(new File("target/test-classes/io/github/microcks/util/openapi/car-schema.json")); // Validate Json according schema. valid = OpenAPISchemaValidator.isJsonValid(schemaText, jsonText); } catch (Exception e) { fail("Exception should not be thrown"); } // Assert Json object is valid. assertTrue(valid); }
@Override protected boolean isNan(Long number) { // NaN never applies here because only types like Float and Double have NaN return false; }
@Test void testIsNan() { LongSummaryAggregator ag = new LongSummaryAggregator(); // always false for Long assertThat(ag.isNan(-1L)).isFalse(); assertThat(ag.isNan(0L)).isFalse(); assertThat(ag.isNan(23L)).isFalse(); assertThat(ag.isNan(Long.MAX_VALUE)).isFalse(); assertThat(ag.isNan(Long.MIN_VALUE)).isFalse(); assertThat(ag.isNan(null)).isFalse(); }
@Override public <T> T clone(T object) { if (object instanceof String) { return object; } else if (object instanceof Collection) { Object firstElement = findFirstNonNullElement((Collection) object); if (firstElement != null && !(firstElement instanceof Serializable)) { JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), firstElement.getClass()); return objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type); } } else if (object instanceof Map) { Map.Entry firstEntry = this.findFirstNonNullEntry((Map) object); if (firstEntry != null) { Object key = firstEntry.getKey(); Object value = firstEntry.getValue(); if (!(key instanceof Serializable) || !(value instanceof Serializable)) { JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), key.getClass(), value.getClass()); return (T) objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type); } } } else if (object instanceof JsonNode) { return (T) ((JsonNode) object).deepCopy(); } if (object instanceof Serializable) { try { return (T) SerializationHelper.clone((Serializable) object); } catch (SerializationException e) { //it is possible that object itself implements java.io.Serializable, but underlying structure does not //in this case we switch to the other JSON marshaling strategy which doesn't use the Java serialization } } return jsonClone(object); }
@Test public void should_clone_serializable_complex_object_with_serializable_nested_object() { Map<String, List<SerializableObject>> map = new LinkedHashMap<>(); map.put("key1", Lists.newArrayList(new SerializableObject("name1"))); map.put("key2", Lists.newArrayList( new SerializableObject("name2"), new SerializableObject("name3") )); Object original = new SerializableComplexObject(map); Object cloned = serializer.clone(original); assertEquals(original, cloned); assertNotSame(original, cloned); }
public Optional<Map<String, ParamDefinition>> getDefaultParamsForType(StepType stepType) { Map<String, ParamDefinition> defaults = defaultTypeParams.get(stepType.toString().toLowerCase(Locale.US)); if (defaults != null) { return Optional.of(preprocessParams(defaults)); } else { return Optional.empty(); } }
@Test public void testStepTypeParamsMutate() { defaultParamManager .getDefaultParamsForType(StepType.FOREACH) .get() .put("TEST", ParamDefinition.buildParamDefinition("TEST", "123")); assertNull(defaultParamManager.getDefaultParamsForType(StepType.FOREACH).get().get("TEST")); }
@SuppressWarnings("unchecked") @Override public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) throws YarnException, IOException { NodeStatus remoteNodeStatus = request.getNodeStatus(); /** * Here is the node heartbeat sequence... * 1. Check if it's a valid (i.e. not excluded) node * 2. Check if it's a registered node * 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat * 4. Send healthStatus to RMNode * 5. Update node's labels if distributed Node Labels configuration is enabled */ NodeId nodeId = remoteNodeStatus.getNodeId(); // 1. Check if it's a valid (i.e. not excluded) node, if not, see if it is // in decommissioning. if (!this.nodesListManager.isValidNode(nodeId.getHost()) && !isNodeInDecommissioning(nodeId)) { String message = "Disallowed NodeManager nodeId: " + nodeId + " hostname: " + nodeId.getHost(); LOG.info(message); return YarnServerBuilderUtils.newNodeHeartbeatResponse( NodeAction.SHUTDOWN, message); } // 2. Check if it's a registered node RMNode rmNode = this.rmContext.getRMNodes().get(nodeId); if (rmNode == null) { /* node does not exist */ String message = "Node not found resyncing " + remoteNodeStatus.getNodeId(); LOG.info(message); return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC, message); } // Send ping this.nmLivelinessMonitor.receivedPing(nodeId); this.decommissioningWatcher.update(rmNode, remoteNodeStatus); // 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat NodeHeartbeatResponse lastNodeHeartbeatResponse = rmNode.getLastNodeHeartBeatResponse(); if (getNextResponseId( remoteNodeStatus.getResponseId()) == lastNodeHeartbeatResponse .getResponseId()) { LOG.info("Received duplicate heartbeat from node " + rmNode.getNodeAddress()+ " responseId=" + remoteNodeStatus.getResponseId()); return lastNodeHeartbeatResponse; } else if (remoteNodeStatus.getResponseId() != lastNodeHeartbeatResponse .getResponseId()) { String message = "Too far behind rm response id:" + lastNodeHeartbeatResponse.getResponseId() + " nm response id:" + remoteNodeStatus.getResponseId(); LOG.info(message); // TODO: Just sending reboot is not enough. Think more. this.rmContext.getDispatcher().getEventHandler().handle( new RMNodeEvent(nodeId, RMNodeEventType.REBOOTING)); return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC, message); } // Evaluate whether a DECOMMISSIONING node is ready to be DECOMMISSIONED. if (rmNode.getState() == NodeState.DECOMMISSIONING && decommissioningWatcher.checkReadyToBeDecommissioned( rmNode.getNodeID())) { String message = "DECOMMISSIONING " + nodeId + " is ready to be decommissioned"; LOG.info(message); this.rmContext.getDispatcher().getEventHandler().handle( new RMNodeEvent(nodeId, RMNodeEventType.DECOMMISSION)); this.nmLivelinessMonitor.unregister(nodeId); return YarnServerBuilderUtils.newNodeHeartbeatResponse( NodeAction.SHUTDOWN, message); } if (timelineServiceV2Enabled) { // Check & update collectors info from request. updateAppCollectorsMap(request); } // Heartbeat response long newInterval = nextHeartBeatInterval; if (heartBeatIntervalScalingEnable) { newInterval = rmNode.calculateHeartBeatInterval( nextHeartBeatInterval, heartBeatIntervalMin, heartBeatIntervalMax, heartBeatIntervalSpeedupFactor, heartBeatIntervalSlowdownFactor); } NodeHeartbeatResponse nodeHeartBeatResponse = YarnServerBuilderUtils.newNodeHeartbeatResponse( getNextResponseId(lastNodeHeartbeatResponse.getResponseId()), NodeAction.NORMAL, null, null, null, null, newInterval); rmNode.setAndUpdateNodeHeartbeatResponse(nodeHeartBeatResponse); populateKeys(request, nodeHeartBeatResponse); populateTokenSequenceNo(request, nodeHeartBeatResponse); if (timelineServiceV2Enabled) { // Return collectors' map that NM needs to know setAppCollectorsMapToResponse(rmNode.getRunningApps(), nodeHeartBeatResponse); } // 4. Send status to RMNode, saving the latest response. RMNodeStatusEvent nodeStatusEvent = new RMNodeStatusEvent(nodeId, remoteNodeStatus); if (request.getLogAggregationReportsForApps() != null && !request.getLogAggregationReportsForApps().isEmpty()) { nodeStatusEvent.setLogAggregationReportsForApps(request .getLogAggregationReportsForApps()); } this.rmContext.getDispatcher().getEventHandler().handle(nodeStatusEvent); // 5. Update node's labels to RM's NodeLabelManager. if (isDistributedNodeLabelsConf && request.getNodeLabels() != null) { try { updateNodeLabelsFromNMReport( NodeLabelsUtils.convertToStringSet(request.getNodeLabels()), nodeId); nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(true); } catch (IOException ex) { //ensure the error message is captured and sent across in response nodeHeartBeatResponse.setDiagnosticsMessage(ex.getMessage()); nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(false); } } // 6. check if node's capacity is load from dynamic-resources.xml // if so, send updated resource back to NM. String nid = nodeId.toString(); Resource capability = loadNodeResourceFromDRConfiguration(nid); // sync back with new resource if not null. if (capability != null) { nodeHeartBeatResponse.setResource(capability); } // Check if we got an event (AdminService) that updated the resources if (rmNode.isUpdatedCapability()) { nodeHeartBeatResponse.setResource(rmNode.getTotalCapability()); rmNode.resetUpdatedCapability(); } // 7. Send Container Queuing Limits back to the Node. This will be used by // the node to truncate the number of Containers queued for execution. if (this.rmContext.getNodeManagerQueueLimitCalculator() != null) { nodeHeartBeatResponse.setContainerQueuingLimit( this.rmContext.getNodeManagerQueueLimitCalculator() .createContainerQueuingLimit()); } // 8. Get node's attributes and update node-to-attributes mapping // in RMNodeAttributeManager. if (request.getNodeAttributes() != null) { try { // update node attributes if necessary then update heartbeat response updateNodeAttributesIfNecessary(nodeId, request.getNodeAttributes()); nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(true); } catch (IOException ex) { //ensure the error message is captured and sent across in response String errorMsg = nodeHeartBeatResponse.getDiagnosticsMessage() == null ? ex.getMessage() : nodeHeartBeatResponse.getDiagnosticsMessage() + "\n" + ex .getMessage(); nodeHeartBeatResponse.setDiagnosticsMessage(errorMsg); nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(false); } } return nodeHeartBeatResponse; }
@Test public void testReconnectNode() throws Exception { rm = new MockRM() { @Override protected EventHandler<SchedulerEvent> createSchedulerEventDispatcher() { return new EventDispatcher<SchedulerEvent>(this.scheduler, this.scheduler.getClass().getName()) { @Override public void handle(SchedulerEvent event) { scheduler.handle(event); } }; } }; rm.start(); MockNM nm1 = rm.registerNode("host1:1234", 5120); MockNM nm2 = rm.registerNode("host2:5678", 5120); nm1.nodeHeartbeat(true); nm2.nodeHeartbeat(false); rm.drainEvents(); checkUnhealthyNMCount(rm, nm2, true, 1); final int expectedNMs = ClusterMetrics.getMetrics().getNumActiveNMs(); QueueMetrics metrics = rm.getResourceScheduler().getRootQueueMetrics(); // TODO Metrics incorrect in case of the FifoScheduler Assert.assertEquals(5120, metrics.getAvailableMB()); // reconnect of healthy node nm1 = rm.registerNode("host1:1234", 5120); NodeHeartbeatResponse response = nm1.nodeHeartbeat(true); Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction())); rm.drainEvents(); Assert.assertEquals(expectedNMs, ClusterMetrics.getMetrics().getNumActiveNMs()); checkUnhealthyNMCount(rm, nm2, true, 1); // reconnect of unhealthy node nm2 = rm.registerNode("host2:5678", 5120); response = nm2.nodeHeartbeat(false); Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction())); rm.drainEvents(); Assert.assertEquals(expectedNMs, ClusterMetrics.getMetrics().getNumActiveNMs()); checkUnhealthyNMCount(rm, nm2, true, 1); // unhealthy node changed back to healthy nm2 = rm.registerNode("host2:5678", 5120); response = nm2.nodeHeartbeat(true); response = nm2.nodeHeartbeat(true); rm.drainEvents(); Assert.assertEquals(5120 + 5120, metrics.getAvailableMB()); // reconnect of node with changed capability nm1 = rm.registerNode("host2:5678", 10240); response = nm1.nodeHeartbeat(true); rm.drainEvents(); Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction())); Assert.assertEquals(5120 + 10240, metrics.getAvailableMB()); // reconnect of node with changed capability and running applications List<ApplicationId> runningApps = new ArrayList<ApplicationId>(); runningApps.add(ApplicationId.newInstance(1, 0)); nm1 = rm.registerNode("host2:5678", 15360, 2, runningApps); response = nm1.nodeHeartbeat(true); rm.drainEvents(); Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction())); Assert.assertEquals(5120 + 15360, metrics.getAvailableMB()); // reconnect healthy node changing http port nm1 = new MockNM("host1:1234", 5120, rm.getResourceTrackerService()); nm1.setHttpPort(3); nm1.registerNode(); response = nm1.nodeHeartbeat(true); response = nm1.nodeHeartbeat(true); rm.drainEvents(); RMNode rmNode = rm.getRMContext().getRMNodes().get(nm1.getNodeId()); Assert.assertEquals(3, rmNode.getHttpPort()); Assert.assertEquals(5120, rmNode.getTotalCapability().getMemorySize()); Assert.assertEquals(5120 + 15360, metrics.getAvailableMB()); }
public static byte[] readFileBytes(File file) { if (file.exists()) { String result = readFile(file); if (result != null) { return ByteUtils.toBytes(result); } } return null; }
@Test void testReadFileBytesWithPath() { assertNotNull(DiskUtils.readFileBytes(testFile.getParent(), testFile.getName())); }
public HtmlCreator() { html.append("<!DOCTYPE html>"); html.append("<head>"); html.append("<meta charset=\"utf-8\">"); html.append("<style type='text/css'>.version{padding:10px;text-decoration-line: none;}.message-header{" + "background-color: #900C3F;\n" + "color: #fff;\n" + "font-weight: bold;" + "-webkit-box-align: center;\n" + "-ms-flex-align: center;\n" + "align-items: center;\n" + "color: #fff;\n" + "display: -webkit-box;\n" + "display: -ms-flexbox;\n" + "display: flex;\n" + "-webkit-box-pack: justify;\n" + "-ms-flex-pack: justify;\n" + "justify-content: space-between;\n" + "line-height: 1.25;\n" + "padding: 0.5em 0.75em;\n" + "position: relative;" + "}" + ".message-body{" + "background-color: #fff5f7;" + "font-size:1rem;" + "border-color: #ff3860;\n" + "color: #900C3F;\n" + "border-top-left-radius: 0;\n" + "border-top-right-radius: 0;\n" + "border-top: none;\n" + "border: 1px solid #dbdbdb;\n" + "border-radius: 3px;\n" + "padding: 1em 1.25em;" + "}</style>"); }
@Test public void testHtmlCreator() { String html = htmlCreator.html(); Assert.assertEquals(true, html.contains("Blade")); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } PurgeableAnalysisDto that = (PurgeableAnalysisDto) o; return analysisUuid.equals(that.analysisUuid); }
@Test void testEquals() { PurgeableAnalysisDto dto1 = new PurgeableAnalysisDto().setAnalysisUuid("u3"); PurgeableAnalysisDto dto2 = new PurgeableAnalysisDto().setAnalysisUuid("u4"); assertThat(dto1.equals(dto2)).isFalse(); assertThat(dto2.equals(dto1)).isFalse(); assertThat(dto1.equals(dto1)).isTrue(); assertThat(dto1.equals(new PurgeableAnalysisDto().setAnalysisUuid("u3"))).isTrue(); assertThat(dto1.equals("bi_bop_a_lou_la")).isFalse(); assertThat(dto1.equals(null)).isFalse(); }
public static ShardingRouteEngine newInstance(final ShardingRule shardingRule, final ShardingSphereDatabase database, final QueryContext queryContext, final ShardingConditions shardingConditions, final ConfigurationProperties props, final ConnectionContext connectionContext) { SQLStatementContext sqlStatementContext = queryContext.getSqlStatementContext(); SQLStatement sqlStatement = sqlStatementContext.getSqlStatement(); if (sqlStatement instanceof TCLStatement) { return new ShardingDatabaseBroadcastRoutingEngine(); } if (sqlStatement instanceof DDLStatement) { if (sqlStatementContext instanceof CursorAvailable) { return getCursorRouteEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props); } return getDDLRoutingEngine(shardingRule, database, sqlStatementContext); } if (sqlStatement instanceof DALStatement) { return getDALRoutingEngine(shardingRule, database, sqlStatementContext, connectionContext); } if (sqlStatement instanceof DCLStatement) { return getDCLRoutingEngine(shardingRule, database, sqlStatementContext); } return getDQLRoutingEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props, connectionContext); }
@Test void assertNewInstanceForOptimizeTableWithShardingTable() { MySQLOptimizeTableStatement optimizeTableStatement = mock(MySQLOptimizeTableStatement.class); when(sqlStatementContext.getSqlStatement()).thenReturn(optimizeTableStatement); tableNames.add("table_1"); when(shardingRule.getShardingRuleTableNames(tableNames)).thenReturn(tableNames); QueryContext queryContext = new QueryContext(sqlStatementContext, "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class)); ShardingRouteEngine actual = ShardingRouteEngineFactory.newInstance(shardingRule, database, queryContext, shardingConditions, props, new ConnectionContext(Collections::emptySet)); assertThat(actual, instanceOf(ShardingTableBroadcastRoutingEngine.class)); }
@Override public Result apply(ApplyNode applyNode, Captures captures, Context context) { if (applyNode.getMayParticipateInAntiJoin()) { return Result.empty(); } Assignments subqueryAssignments = applyNode.getSubqueryAssignments(); if (subqueryAssignments.size() != 1) { return Result.empty(); } RowExpression expression = getOnlyElement(subqueryAssignments.getExpressions()); if (!(expression instanceof InSubqueryExpression)) { return Result.empty(); } InSubqueryExpression inPredicate = (InSubqueryExpression) expression; VariableReferenceExpression inPredicateOutputVariable = getOnlyElement(subqueryAssignments.getVariables()); PlanNode leftInput = applyNode.getInput(); // Add unique id column if the set of columns do not form a unique key already if (!((GroupReference) leftInput).getLogicalProperties().isPresent() || !((GroupReference) leftInput).getLogicalProperties().get().isDistinct(ImmutableSet.copyOf(leftInput.getOutputVariables()))) { VariableReferenceExpression uniqueKeyVariable = context.getVariableAllocator().newVariable("unique", BIGINT); leftInput = new AssignUniqueId( applyNode.getSourceLocation(), context.getIdAllocator().getNextId(), leftInput, uniqueKeyVariable); } VariableReferenceExpression leftVariableReference = inPredicate.getValue(); VariableReferenceExpression rightVariableReference = inPredicate.getSubquery(); JoinNode innerJoin = new JoinNode( applyNode.getSourceLocation(), context.getIdAllocator().getNextId(), JoinType.INNER, leftInput, applyNode.getSubquery(), ImmutableList.of(new EquiJoinClause( leftVariableReference, rightVariableReference)), ImmutableList.<VariableReferenceExpression>builder() .addAll(leftInput.getOutputVariables()) .build(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), ImmutableMap.of()); AggregationNode distinctNode = new AggregationNode( innerJoin.getSourceLocation(), context.getIdAllocator().getNextId(), innerJoin, ImmutableMap.of(), singleGroupingSet(ImmutableList.<VariableReferenceExpression>builder() .addAll(innerJoin.getOutputVariables()) .build()), ImmutableList.of(), SINGLE, Optional.empty(), Optional.empty(), Optional.empty()); ImmutableList<VariableReferenceExpression> referencedOutputs = ImmutableList.<VariableReferenceExpression>builder() .addAll(applyNode.getInput().getOutputVariables()) .add(inPredicateOutputVariable) .build(); ProjectNode finalProjectNdde = new ProjectNode( context.getIdAllocator().getNextId(), distinctNode, Assignments.builder() .putAll(identityAssignments(distinctNode.getOutputVariables())) .put(inPredicateOutputVariable, TRUE_CONSTANT) .build() .filter(referencedOutputs)); return Result.ofPlanNode(finalProjectNdde); }
@Test public void testDoesNotFiresForInPredicateThatMayParticipateInAntiJoin() { tester().assertThat(new TransformUncorrelatedInPredicateSubqueryToDistinctInnerJoin()) .on(p -> p.apply( assignment( p.variable("x"), inSubquery(p.variable("y"), p.variable("z"))), emptyList(), p.values(p.variable("y")), p.values(p.variable("z")), true)) .doesNotFire(); }
@Override public String named() { return PluginEnum.GRPC.getName(); }
@Test public void testNamed() { final String result = grpcPlugin.named(); assertEquals(PluginEnum.GRPC.getName(), result); }
static void createCompactedTopic(String topicName, short partitions, short replicationFactor, Admin admin) { NewTopic topicDescription = TopicAdmin.defineTopic(topicName). compacted(). partitions(partitions). replicationFactor(replicationFactor). build(); CreateTopicsOptions args = new CreateTopicsOptions().validateOnly(false); try { admin.createTopics(singleton(topicDescription), args).values().get(topicName).get(); log.info("Created topic '{}'", topicName); } catch (InterruptedException e) { Thread.interrupted(); throw new ConnectException("Interrupted while attempting to create/find topic '" + topicName + "'", e); } catch (ExecutionException e) { Throwable cause = e.getCause(); if (cause instanceof TopicExistsException) { log.debug("Unable to create topic '{}' since it already exists.", topicName); return; } if (cause instanceof UnsupportedVersionException) { log.debug("Unable to create topic '{}' since the brokers do not support the CreateTopics API." + " Falling back to assume topic exists or will be auto-created by the broker.", topicName); return; } if (cause instanceof TopicAuthorizationException) { log.debug("Not authorized to create topic(s) '{}' upon the brokers." + " Falling back to assume topic(s) exist or will be auto-created by the broker.", topicName); return; } if (cause instanceof ClusterAuthorizationException) { log.debug("Not authorized to create topic '{}'." + " Falling back to assume topic exists or will be auto-created by the broker.", topicName); return; } if (cause instanceof InvalidConfigurationException) { throw new ConnectException("Unable to create topic '" + topicName + "': " + cause.getMessage(), cause); } if (cause instanceof TimeoutException) { // Timed out waiting for the operation to complete throw new ConnectException("Timed out while checking for or creating topic '" + topicName + "'." + " This could indicate a connectivity issue, unavailable topic partitions, or if" + " this is your first use of the topic it may have taken too long to create.", cause); } throw new ConnectException("Error while attempting to create/find topic '" + topicName + "'", e); } }
@Test public void testCreateCompactedTopicAssumeTopicAlreadyExistsWithTopicAuthorizationException() throws Exception { Map<String, KafkaFuture<Void>> values = Collections.singletonMap(TOPIC, future); when(future.get()).thenThrow(new ExecutionException(new TopicAuthorizationException("not authorised"))); when(ctr.values()).thenReturn(values); when(admin.createTopics(any(), any())).thenReturn(ctr); MirrorUtils.createCompactedTopic(TOPIC, (short) 1, (short) 1, admin); verify(future).get(); verify(ctr).values(); verify(admin).createTopics(any(), any()); }
@Override public CloseableIterator<String> readScannerLogs() { ensureInitialized(); File file = delegate.getFileStructure().analysisLog(); if (!file.exists()) { return CloseableIterator.emptyCloseableIterator(); } try { InputStreamReader reader = new InputStreamReader(FileUtils.openInputStream(file), UTF_8); return new LineReaderIterator(reader); } catch (IOException e) { throw new IllegalStateException("Fail to open file " + file, e); } }
@Test public void readScannerLogs() throws IOException { File scannerLogFile = writer.getFileStructure().analysisLog(); FileUtils.write(scannerLogFile, "log1\nlog2"); CloseableIterator<String> logs = underTest.readScannerLogs(); assertThat(logs).toIterable().containsExactly("log1", "log2"); }
public Result parse(final String string) throws DateNotParsableException { return this.parse(string, new Date()); }
@Test public void testParseToday() throws Exception { DateTime reference = DateTimeFormat.forPattern("dd.MM.yyyy HH:mm:ss").withZoneUTC().parseDateTime("12.06.2021 09:45:23"); DateTime startOfToday = DateTimeFormat.forPattern("dd.MM.yyyy HH:mm:ss").withZoneUTC().parseDateTime("12.06.2021 00:00:00"); NaturalDateParser.Result result = naturalDateParser.parse("today", reference.toDate()); assertThat(result.getFrom()).as("should be equal to").isEqualTo(startOfToday); assertThat(result.getTo()).as("should differ from").isNotEqualTo(startOfToday); }
@Override public boolean isAdded(Component component) { checkComponent(component); if (analysisMetadataHolder.isFirstAnalysis()) { return true; } return addedComponents.contains(component); }
@Test public void isAdded_returns_true_for_any_component_type_on_first_analysis() { when(analysisMetadataHolder.isFirstAnalysis()).thenReturn(true); Arrays.stream(Component.Type.values()).forEach(type -> { Component component = newComponent(type); assertThat(underTest.isAdded(component)).isTrue(); }); }
@Override public final void remove() { try { doRemove(); } catch (RuntimeException e) { close(); throw e; } }
@Test public void remove_is_not_supported_by_default() { SimpleCloseableIterator it = new SimpleCloseableIterator(); try { it.remove(); fail(); } catch (UnsupportedOperationException expected) { assertThat(it.isClosed).isTrue(); } }
public static boolean isJavaVersionAtLeast(int version) { return JAVA_SPEC_VER >= version; }
@Test public void testIsJavaVersionAtLeast() { assertTrue(Shell.isJavaVersionAtLeast(8)); }
public void createGroupTombstoneRecords( String groupId, List<CoordinatorRecord> records ) { // At this point, we have already validated the group id, so we know that the group exists and that no exception will be thrown. createGroupTombstoneRecords(group(groupId), records); }
@Test public void testConsumerGroupDelete() { String groupId = "group-id"; GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10)) .build(); List<CoordinatorRecord> expectedRecords = Arrays.asList( GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord(groupId), GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId), GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId) ); List<CoordinatorRecord> records = new ArrayList<>(); context.groupMetadataManager.createGroupTombstoneRecords("group-id", records); assertEquals(expectedRecords, records); }
@Override public void replayOnAborted(TransactionState txnState) { writeLock(); try { replayTxnAttachment(txnState); failMsg = new FailMsg(FailMsg.CancelType.LOAD_RUN_FAIL, txnState.getReason()); finishTimestamp = txnState.getFinishTime(); state = JobState.CANCELLED; if (retryTime <= 0 || !failMsg.getMsg().contains("timeout") || !isTimeout()) { GlobalStateMgr.getCurrentState().getGlobalTransactionMgr().getCallbackFactory().removeCallback(id); return; } retryTime--; } finally { writeUnlock(); } }
@Test public void testReplayOnAbortedAfterFailure(@Injectable TransactionState txnState, @Injectable LoadJobFinalOperation attachment, @Injectable FailMsg failMsg) { BrokerLoadJob brokerLoadJob = new BrokerLoadJob(); brokerLoadJob.setId(1); GlobalTransactionMgr globalTxnMgr = GlobalStateMgr.getCurrentState().getGlobalTransactionMgr(); globalTxnMgr.getCallbackFactory().addCallback(brokerLoadJob); // 1. The job will be keep when the failure is timeout new Expectations() { { txnState.getTxnCommitAttachment(); minTimes = 0; result = attachment; txnState.getReason(); minTimes = 0; result = "load timeout"; } }; brokerLoadJob.replayOnAborted(txnState); TxnStateChangeCallback callback = globalTxnMgr.getCallbackFactory().getCallback(1); Assert.assertNotNull(callback); // 2. The job will be discard when failure isn't timeout new Expectations() { { txnState.getTxnCommitAttachment(); minTimes = 0; result = attachment; txnState.getReason(); minTimes = 0; result = "load_run_fail"; } }; brokerLoadJob.replayOnAborted(txnState); callback = globalTxnMgr.getCallbackFactory().getCallback(1); Assert.assertNull(callback); }
@Override public void updatePort(K8sPort port) { checkNotNull(port, ERR_NULL_PORT); checkArgument(!Strings.isNullOrEmpty(port.portId()), ERR_NULL_PORT_ID); checkArgument(!Strings.isNullOrEmpty(port.networkId()), ERR_NULL_PORT_NET_ID); k8sNetworkStore.updatePort(port); log.info(String.format(MSG_PORT, port.portId(), MSG_UPDATED)); }
@Test(expected = NullPointerException.class) public void testUpdateNullPort() { target.updatePort(null); }
@Override public void deleteFile(String location) { Preconditions.checkState(!closed, "Cannot call deleteFile after calling close()"); if (null == IN_MEMORY_FILES.remove(location)) { throw new NotFoundException("No in-memory file found for location: %s", location); } }
@Test public void testDeleteFileNotFound() { InMemoryFileIO fileIO = new InMemoryFileIO(); assertThatExceptionOfType(NotFoundException.class) .isThrownBy(() -> fileIO.deleteFile("s3://nonexistent/file")); }
@Override public AppAttemptInfo getAppAttempt(HttpServletRequest req, HttpServletResponse res, String appId, String appAttemptId) { // Check that the appId/appAttemptId format is accurate try { RouterServerUtil.validateApplicationAttemptId(appAttemptId); } catch (IllegalArgumentException e) { routerMetrics.incrAppAttemptReportFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_APP_ATTEMPT, UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage()); throw e; } // Call the getAppAttempt method try { long startTime = Time.now(); DefaultRequestInterceptorREST interceptor = getOrCreateInterceptorByAppId(appId); AppAttemptInfo appAttemptInfo = interceptor.getAppAttempt(req, res, appId, appAttemptId); if (appAttemptInfo != null) { long stopTime = Time.now(); RouterAuditLogger.logSuccess(getUser().getShortUserName(), GET_APP_ATTEMPT, TARGET_WEB_SERVICE); routerMetrics.succeededAppAttemptReportRetrieved(stopTime - startTime); return appAttemptInfo; } } catch (IllegalArgumentException e) { routerMetrics.incrAppAttemptReportFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_APP_ATTEMPT, UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage()); RouterServerUtil.logAndThrowRunTimeException(e, "Unable to getAppAttempt by appId: %s, appAttemptId: %s.", appId, appAttemptId); } catch (YarnException e) { routerMetrics.incrAppAttemptReportFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_APP_ATTEMPT, UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage()); RouterServerUtil.logAndThrowRunTimeException(e, "getAppAttempt error, appId: %s, appAttemptId: %s.", appId, appAttemptId); } routerMetrics.incrAppAttemptReportFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_APP_ATTEMPT, UNKNOWN, TARGET_WEB_SERVICE, "getAppAttempt failed."); throw RouterServerUtil.logAndReturnRunTimeException( "getAppAttempt failed, appId: %s, appAttemptId: %s.", appId, appAttemptId); }
@Test public void testGetAppAttempt() throws IOException, InterruptedException { // Generate ApplicationId information ApplicationId appId = ApplicationId.newInstance(Time.now(), 1); ApplicationSubmissionContextInfo context = new ApplicationSubmissionContextInfo(); context.setApplicationId(appId.toString()); // Generate ApplicationAttemptId information Assert.assertNotNull(interceptor.submitApplication(context, null)); ApplicationAttemptId expectAppAttemptId = ApplicationAttemptId.newInstance(appId, 1); String appAttemptId = expectAppAttemptId.toString(); org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo appAttemptInfo = interceptor.getAppAttempt(null, null, appId.toString(), appAttemptId); Assert.assertNotNull(appAttemptInfo); Assert.assertEquals(expectAppAttemptId.toString(), appAttemptInfo.getAppAttemptId()); Assert.assertEquals("url", appAttemptInfo.getTrackingUrl()); Assert.assertEquals("oUrl", appAttemptInfo.getOriginalTrackingUrl()); Assert.assertEquals(124, appAttemptInfo.getRpcPort()); Assert.assertEquals("host", appAttemptInfo.getHost()); }
public static String calculateSha256Hex(@Nonnull byte[] data) throws NoSuchAlgorithmException { return calculateSha256Hex(data, data.length); }
@Test public void testLeadingZero() throws Exception { byte[] data = {-103, -109, 6, 90, -72, 68, 41, 7, -45, 42, 12, -38, -50, 123, -100, 102, 95, 65, 5, 30, 64, 85, 126, -26, 5, 54, 18, -98, -85, -101, 109, -91}; String result = Sha256Util.calculateSha256Hex(data); assertEquals("07b18fecd4bcb1a726fbab1bd4c017e57e20f6f962a342789c57e531667f603b", result); }
public static void flip(Buffer buffer) { buffer.flip(); }
@Test public void testFlip() { ByteBuffer byteBuffer = ByteBuffer.allocate(4); byteBuffer.putInt(1); Assertions.assertDoesNotThrow(() -> BufferUtils.flip(byteBuffer)); }
public static void publishPriceUpdate(String topicArn, String payload, String groupId) { try { // Create and publish a message that updates the wholesale price. String subject = "Price Update"; String dedupId = UUID.randomUUID().toString(); String attributeName = "business"; String attributeValue = "wholesale"; MessageAttributeValue msgAttValue = MessageAttributeValue.builder() .dataType("String") .stringValue(attributeValue) .build(); Map<String, MessageAttributeValue> attributes = new HashMap<>(); attributes.put(attributeName, msgAttValue); PublishRequest pubRequest = PublishRequest.builder() .topicArn(topicArn) .subject(subject) .message(payload) .messageGroupId(groupId) .messageDeduplicationId(dedupId) .messageAttributes(attributes) .build(); final PublishResponse response = snsClient.publish(pubRequest); System.out.println(response.messageId()); System.out.println(response.sequenceNumber()); System.out.println("Message was published to " + topicArn); } catch (SnsException e) { System.err.println(e.awsErrorDetails().errorMessage()); System.exit(1); } }
@Test @Tag("IntegrationTest") void publishPriceUpdateTest() { String topicName = "MyTestTopic.fifo"; String wholesaleQueueName = "wholesaleQueue.fifo"; String retailQueueName = "retailQueue.fifo"; String analyticsQueueName = "analyticsQueue"; List<PriceUpdateExample.QueueData> queues = List.of( new PriceUpdateExample.QueueData(wholesaleQueueName, PriceUpdateExample.QueueType.FIFO), new PriceUpdateExample.QueueData(retailQueueName, PriceUpdateExample.QueueType.FIFO), new PriceUpdateExample.QueueData(analyticsQueueName, PriceUpdateExample.QueueType.Standard)); createQueues(queues); String topicARN = createFIFOTopic(topicName); subscribeQueues(queues, topicARN); addAccessPolicyToQueuesFINAL(queues, topicARN); publishPriceUpdate(topicARN, "{\"product\": 214, \"price\": 79.99}", "Consumables"); // Assert that each queue received the message published queues.forEach(queue -> { queue.testMessage = sqsClient.receiveMessage(builder -> builder.queueUrl(queue.queueURL).maxNumberOfMessages(1)).messages().get(0); Assertions.assertNotNull(queue.testMessage); }); deleteSubscriptions(queues); deleteQueues(queues); deleteTopic(topicARN); }
@Override public boolean add(final Long value) { return add(value.longValue()); }
@Test public void setsWithTheSameValuesAreEqual() { final LongHashSet other = new LongHashSet(100, -1); set.add(1); set.add(1001); other.add(1); other.add(1001); assertEquals(set, other); }
public static List<PKafkaOffsetProxyResult> getBatchOffsets(List<PKafkaOffsetProxyRequest> requests) throws UserException { return PROXY_API.getBatchOffsets(requests); }
@Test public void testGetInfoRpcException() throws UserException, RpcException { Backend backend = new Backend(1L, "127.0.0.1", 9050); backend.setBeRpcPort(8060); backend.setAlive(true); new Expectations() { { service.getBackendOrComputeNode(anyLong); result = backend; client.getInfo((TNetworkAddress) any, (PProxyRequest) any); result = new RpcException("rpc failed"); } }; KafkaUtil.ProxyAPI api = new KafkaUtil.ProxyAPI(); LoadException e = Assert.assertThrows(LoadException.class, () -> api.getBatchOffsets(null)); Assert.assertTrue(e.getMessage().contains("err: rpc failed")); }
public static Select select(String fieldName) { return new Select(fieldName); }
@Test void matches() { String q = Q.select("*") .from("sd1") .where("f1").matches("v1") .and("f2").matches("v2") .or("f3").matches("v3") .andnot("f4").matches("v4") .build(); assertEquals(q, "yql=select * from sd1 where f1 matches \"v1\" and f2 matches \"v2\" or f3 matches \"v3\" and !(f4 matches \"v4\")"); }
public byte[] encode(String val, String delimiters) { return codecs[0].encode(val); }
@Test public void testEncodeArabicPersonName() { assertArrayEquals(ARABIC_PERSON_NAME_BYTE, iso8859_6().encode(ARABIC_PERSON_NAME, PN_DELIMS)); }
public void setLocalTaskQueueCapacity(int localTaskQueueCapacity) { this.localTaskQueueCapacity = checkPositive(localTaskQueueCapacity, "localTaskQueueCapacity"); }
@Test public void test_setLocalTaskQueueCapacity_whenNegative() { ReactorBuilder builder = newBuilder(); assertThrows(IllegalArgumentException.class, () -> builder.setLocalTaskQueueCapacity(-1)); }
public String toString(Object datum) { StringBuilder buffer = new StringBuilder(); toString(datum, buffer, new IdentityHashMap<>(128)); return buffer.toString(); }
@Test void mapWithNonStringKeyToStringIsJson() throws Exception { Schema intMapSchema = new Schema.Parser() .parse("{\"type\": \"map\", \"values\": \"string\", \"java-key-class\" : \"java.lang.Integer\"}"); Field intMapField = new Field("intMap", Schema.createMap(intMapSchema), null, null); Schema decMapSchema = new Schema.Parser() .parse("{\"type\": \"map\", \"values\": \"string\", \"java-key-class\" : \"java.math.BigDecimal\"}"); Field decMapField = new Field("decMap", Schema.createMap(decMapSchema), null, null); Schema boolMapSchema = new Schema.Parser() .parse("{\"type\": \"map\", \"values\": \"string\", \"java-key-class\" : \"java.lang.Boolean\"}"); Field boolMapField = new Field("boolMap", Schema.createMap(boolMapSchema), null, null); Schema fileMapSchema = new Schema.Parser() .parse("{\"type\": \"map\", \"values\": \"string\", \"java-key-class\" : \"java.io.File\"}"); Field fileMapField = new Field("fileMap", Schema.createMap(fileMapSchema), null, null); Schema schema = Schema.createRecord("my_record", "doc", "mytest", false); schema.setFields(Arrays.asList(intMapField, decMapField, boolMapField, fileMapField)); HashMap<Integer, String> intPair = new HashMap<>(); intPair.put(1, "one"); intPair.put(2, "two"); HashMap<java.math.BigDecimal, String> decPair = new HashMap<>(); decPair.put(java.math.BigDecimal.valueOf(1), "one"); decPair.put(java.math.BigDecimal.valueOf(2), "two"); HashMap<Boolean, String> boolPair = new HashMap<>(); boolPair.put(true, "isTrue"); boolPair.put(false, "isFalse"); boolPair.put(null, null); HashMap<java.io.File, String> filePair = new HashMap<>(); java.io.File f = new java.io.File(getClass().getResource("/SchemaBuilder.avsc").toURI()); filePair.put(f, "File"); GenericRecord r = new GenericData.Record(schema); r.put(intMapField.name(), intPair); r.put(decMapField.name(), decPair); r.put(boolMapField.name(), boolPair); r.put(fileMapField.name(), filePair); String json = r.toString(); JsonFactory factory = new JsonFactory(); JsonParser parser = factory.createParser(json); ObjectMapper mapper = new ObjectMapper(); // will throw exception if string is not parsable json mapper.readTree(parser); }
@Nonnull @Override public Collection<String> resourceTypes() { return Collections.singleton(OBJECT_TYPE_IMAP_JOURNAL); }
@Test public void should_list_resource_types() { // given DataConnectionConfig dataConnectionConfig = nonSharedDataConnectionConfig(clusterName); hazelcastDataConnection = new HazelcastDataConnection(dataConnectionConfig); // when Collection<String> resourcedTypes = hazelcastDataConnection.resourceTypes(); //then assertThat(resourcedTypes) .map(r -> r.toLowerCase(Locale.ROOT)) .containsExactlyInAnyOrder("imapjournal"); }
public static String toJsonStr(JSON json, int indentFactor) { if (null == json) { return null; } return json.toJSONString(indentFactor); }
@Test public void issue3540Test() { Long userId = 10101010L; final String jsonStr = JSONUtil.toJsonStr(userId); assertEquals("{}", jsonStr); }
@Override public void ignorableWhitespace(char[] ch, int start, int length) throws SAXException { filter(ch, start, length, ignorableWhitespaceOutput); }
@Test public void testInvalidSurrogates() throws SAXException { safe.ignorableWhitespace("\udb00\ubfff".toCharArray(), 0, 2); assertEquals("\ufffd\ubfff", output.toString()); }
public static String calculateTypeName(CompilationUnit compilationUnit, FullyQualifiedJavaType fqjt) { if (fqjt.isArray()) { // if array, then calculate the name of the base (non-array) type // then add the array indicators back in String fqn = fqjt.getFullyQualifiedName(); String typeName = calculateTypeName(compilationUnit, new FullyQualifiedJavaType(fqn.substring(0, fqn.indexOf('[')))); return typeName + fqn.substring(fqn.indexOf('[')); } if (!fqjt.getTypeArguments().isEmpty()) { return calculateParameterizedTypeName(compilationUnit, fqjt); } if (compilationUnit == null || typeDoesNotRequireImport(fqjt) || typeIsInSamePackage(compilationUnit, fqjt) || typeIsAlreadyImported(compilationUnit, fqjt)) { return fqjt.getShortName(); } else { return fqjt.getFullyQualifiedName(); } }
@Test void testGenericTypeWithWildCardAllImported() { Interface interfaze = new Interface(new FullyQualifiedJavaType("com.foo.UserMapper")); interfaze.addImportedType(new FullyQualifiedJavaType("java.util.Map")); interfaze.addImportedType(new FullyQualifiedJavaType("java.util.List")); interfaze.addImportedType(new FullyQualifiedJavaType("com.beeant.dto.User")); interfaze.addImportedType(new FullyQualifiedJavaType("java.math.BigDecimal")); FullyQualifiedJavaType fqjt = new FullyQualifiedJavaType("java.util.Map<java.math.BigDecimal, java.util.List<? extends com.beeant.dto.User>>"); assertEquals("Map<BigDecimal, List<? extends User>>", JavaDomUtils.calculateTypeName(interfaze, fqjt)); }
@Override void execute() { String[] loc = getCl().getUpddateLocationParams(); Path newPath = new Path(loc[0]); Path oldPath = new Path(loc[1]); URI oldURI = oldPath.toUri(); URI newURI = newPath.toUri(); /* * validate input - Both new and old URI should contain valid host names and valid schemes. * port is optional in both the URIs since HDFS HA NN URI doesn't have a port. */ if (oldURI.getHost() == null || newURI.getHost() == null) { throw new IllegalStateException("HiveMetaTool:A valid host is required in both old-loc and new-loc"); } else if (oldURI.getScheme() == null || newURI.getScheme() == null) { throw new IllegalStateException("HiveMetaTool:A valid scheme is required in both old-loc and new-loc"); } updateFSRootLocation(oldURI, newURI, getCl().getSerdePropKey(), getCl().getTablePropKey(), getCl().isDryRun()); }
@Test public void testNoHost() throws Exception { exception.expect(IllegalStateException.class); exception.expectMessage("HiveMetaTool:A valid host is required in both old-loc and new-loc"); MetaToolTaskUpdateLocation t = new MetaToolTaskUpdateLocation(); t.setCommandLine(new HiveMetaToolCommandLine(new String[] {"-updateLocation", "hdfs://", "hdfs://"})); t.execute(); }
public static GeyserResourcePack readPack(Path path) throws IllegalArgumentException { if (!path.getFileName().toString().endsWith(".mcpack") && !path.getFileName().toString().endsWith(".zip")) { throw new IllegalArgumentException("Resource pack " + path.getFileName() + " must be a .zip or .mcpack file!"); } AtomicReference<GeyserResourcePackManifest> manifestReference = new AtomicReference<>(); try (ZipFile zip = new ZipFile(path.toFile()); Stream<? extends ZipEntry> stream = zip.stream()) { stream.forEach(x -> { String name = x.getName(); if (SHOW_RESOURCE_PACK_LENGTH_WARNING && name.length() >= 80) { GeyserImpl.getInstance().getLogger().warning("The resource pack " + path.getFileName() + " has a file in it that meets or exceeds 80 characters in its path (" + name + ", " + name.length() + " characters long). This will cause problems on some Bedrock platforms." + " Please rename it to be shorter, or reduce the amount of folders needed to get to the file."); } if (name.contains("manifest.json")) { try { GeyserResourcePackManifest manifest = FileUtils.loadJson(zip.getInputStream(x), GeyserResourcePackManifest.class); if (manifest.header().uuid() != null) { manifestReference.set(manifest); } } catch (IOException e) { e.printStackTrace(); } } }); GeyserResourcePackManifest manifest = manifestReference.get(); if (manifest == null) { throw new IllegalArgumentException(path.getFileName() + " does not contain a valid pack_manifest.json or manifest.json"); } // Check if a file exists with the same name as the resource pack suffixed by .key, // and set this as content key. (e.g. test.zip, key file would be test.zip.key) Path keyFile = path.resolveSibling(path.getFileName().toString() + ".key"); String contentKey = Files.exists(keyFile) ? Files.readString(keyFile, StandardCharsets.UTF_8) : ""; return new GeyserResourcePack(new GeyserPathPackCodec(path), manifest, contentKey); } catch (Exception e) { throw new IllegalArgumentException(GeyserLocale.getLocaleStringLog("geyser.resource_pack.broken", path.getFileName()), e); } }
@Test public void testPack() throws Exception { // this mcpack only contains a folder, which the manifest is in Path path = getResource("empty_pack.mcpack"); ResourcePack pack = ResourcePackLoader.readPack(path); assertEquals("", pack.contentKey()); // should probably add some more tests here related to the manifest }
public static String generateWsRemoteAddress(HttpServletRequest request) { if (request == null) { throw new IllegalArgumentException("HttpServletRequest must not be null."); } StringBuilder remoteAddress = new StringBuilder(); String scheme = request.getScheme(); remoteAddress.append(scheme != null && scheme.equalsIgnoreCase("https") ? "wss://" : "ws://"); remoteAddress.append(request.getRemoteAddr()); remoteAddress.append(":"); remoteAddress.append(request.getRemotePort()); return remoteAddress.toString(); }
@Test public void testGenerateWsRemoteAddress() { HttpServletRequest request = mock(HttpServletRequest.class); when(request.getScheme()).thenReturn("http"); when(request.getRemoteAddr()).thenReturn("localhost"); when(request.getRemotePort()).thenReturn(8080); assertEquals("ws://localhost:8080", HttpTransportUtils.generateWsRemoteAddress(request)); }
@VisibleForTesting void validateParentMenu(Long parentId, Long childId) { if (parentId == null || ID_ROOT.equals(parentId)) { return; } // 不能设置自己为父菜单 if (parentId.equals(childId)) { throw exception(MENU_PARENT_ERROR); } MenuDO menu = menuMapper.selectById(parentId); // 父菜单不存在 if (menu == null) { throw exception(MENU_PARENT_NOT_EXISTS); } // 父菜单必须是目录或者菜单类型 if (!MenuTypeEnum.DIR.getType().equals(menu.getType()) && !MenuTypeEnum.MENU.getType().equals(menu.getType())) { throw exception(MENU_PARENT_NOT_DIR_OR_MENU); } }
@Test public void testValidateParentMenu_success() { // mock 数据 MenuDO menuDO = buildMenuDO(MenuTypeEnum.MENU, "parent", 0L); menuMapper.insert(menuDO); // 准备参数 Long parentId = menuDO.getId(); // 调用,无需断言 menuService.validateParentMenu(parentId, null); }
static boolean hasUnboundedOutput(Pipeline p) { PipelineTranslationModeOptimizer optimizer = new PipelineTranslationModeOptimizer(); optimizer.translate(p); return optimizer.hasUnboundedCollections; }
@Test public void testBoundedCollectionProducingTransform() { PipelineOptions options = PipelineOptionsFactory.create(); options.setRunner(FlinkRunner.class); Pipeline pipeline = Pipeline.create(options); pipeline.apply(GenerateSequence.from(0).to(10)); assertThat(PipelineTranslationModeOptimizer.hasUnboundedOutput(pipeline), is(false)); }
@Override public void writeShort(final int v) throws IOException { ensureAvailable(SHORT_SIZE_IN_BYTES); MEM.putShort(buffer, ARRAY_BYTE_BASE_OFFSET + pos, (short) v); pos += SHORT_SIZE_IN_BYTES; }
@Test public void testWriteShortV() throws Exception { short expected = 100; out.writeShort(expected); short actual = Bits.readShort(out.buffer, 0, ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN); assertEquals(expected, actual); }
@Override public boolean isReadOnly() { return false; }
@Test void assertIsReadOnly() { assertFalse(metaData.isReadOnly()); }
static void validate(KafkaConsumer<byte[], byte[]> consumer, byte[] message, ConsumerRecords<byte[], byte[]> records) { if (records.isEmpty()) { consumer.commitSync(); throw new RuntimeException("poll() timed out before finding a result (timeout:[" + POLL_TIMEOUT_MS + "])"); } //Check result matches the original record String sent = new String(message, StandardCharsets.UTF_8); String read = new String(records.iterator().next().value(), StandardCharsets.UTF_8); if (!read.equals(sent)) { consumer.commitSync(); throw new RuntimeException("The message read [" + read + "] did not match the message sent [" + sent + "]"); } //Check we only got the one message if (records.count() != 1) { int count = records.count(); consumer.commitSync(); throw new RuntimeException("Only one result was expected during this test. We found [" + count + "]"); } }
@Test @SuppressWarnings("unchecked") public void shouldFailWhenSentIsNotEqualToReceived() { Iterator<ConsumerRecord<byte[], byte[]>> iterator = mock(Iterator.class); ConsumerRecord<byte[], byte[]> record = mock(ConsumerRecord.class); when(records.isEmpty()).thenReturn(false); when(records.iterator()).thenReturn(iterator); when(iterator.next()).thenReturn(record); when(record.value()).thenReturn("kafkab".getBytes(StandardCharsets.UTF_8)); assertThrows(RuntimeException.class, () -> EndToEndLatency.validate(consumer, "kafkaa".getBytes(StandardCharsets.UTF_8), records)); }
public static BigDecimal getBigDecimalOrNull(Object value) { if ( value instanceof BigDecimal ) { return (BigDecimal) value; } if ( value instanceof BigInteger ) { return new BigDecimal((BigInteger) value, MathContext.DECIMAL128); } if ( value instanceof Double || value instanceof Float ) { String stringVal = value.toString(); if (stringVal.equals("NaN") || stringVal.equals("Infinity") || stringVal.equals("-Infinity")) { return null; } // doubleValue() sometimes produce rounding errors, so we need to use toString() instead // We also need to remove trailing zeros, if there are some so for 10d we get BigDecimal.valueOf(10) // instead of BigDecimal.valueOf(10.0). return new BigDecimal( removeTrailingZeros(value.toString()), MathContext.DECIMAL128 ); } if ( value instanceof Number ) { return new BigDecimal( ((Number) value).longValue(), MathContext.DECIMAL128 ); } if ( value instanceof String ) { try { // we need to remove leading zeros to prevent octal conversion return new BigDecimal(((String) value).replaceFirst("^0+(?!$)", ""), MathContext.DECIMAL128); } catch (NumberFormatException e) { return null; } } return null; }
@Test void getBigDecimalOrNull() { assertThat(NumberEvalHelper.getBigDecimalOrNull(10d)).isEqualTo(new BigDecimal("10")); assertThat(NumberEvalHelper.getBigDecimalOrNull(10.00000000D)).isEqualTo(new BigDecimal("10")); assertThat(NumberEvalHelper.getBigDecimalOrNull(10000000000.5D)).isEqualTo(new BigDecimal("10000000000.5")); }
public ServiceConfiguration getConfiguration() { return this.config; }
@Test public void testAdvertisedAddress() throws Exception { cleanup(); useStaticPorts = true; setup(); assertEquals(pulsar.getAdvertisedAddress(), "localhost"); assertEquals(pulsar.getBrokerServiceUrlTls(), "pulsar+ssl://localhost:6651"); assertEquals(pulsar.getBrokerServiceUrl(), "pulsar://localhost:6660"); assertEquals(pulsar.getWebServiceAddress(), "http://localhost:8081"); assertEquals(pulsar.getWebServiceAddressTls(), "https://localhost:8082"); assertEquals(conf, pulsar.getConfiguration()); }
@Override public T build(ConfigurationSourceProvider provider, String path) throws IOException, ConfigurationException { try (InputStream input = provider.open(requireNonNull(path))) { final JsonNode node = mapper.readTree(createParser(input)); if (node == null) { throw ConfigurationParsingException .builder("Configuration at " + path + " must not be empty") .build(path); } return build(node, path); } catch (JsonParseException e) { throw ConfigurationParsingException .builder("Malformed " + formatName) .setCause(e) .setLocation(e.getLocation()) .setDetail(e.getMessage()) .build(path); } }
@Test void handlesArrayOverride() throws Exception { System.setProperty("dw.type", "coder,wizard,overridden"); final Example example = factory.build(configurationSourceProvider, validFile); assertThat(example.getType()) .hasSize(3) .element(2) .isEqualTo("overridden"); }
@Override public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) { table.refresh(); if (lastPosition != null) { return discoverIncrementalSplits(lastPosition); } else { return discoverInitialSplits(); } }
@Test public void testIncrementalFromSnapshotTimestamp() throws Exception { appendTwoSnapshots(); ScanContext scanContext = ScanContext.builder() .startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_SNAPSHOT_TIMESTAMP) .startSnapshotTimestamp(snapshot2.timestampMillis()) .build(); ContinuousSplitPlannerImpl splitPlanner = new ContinuousSplitPlannerImpl(TABLE_RESOURCE.tableLoader().clone(), scanContext, null); ContinuousEnumerationResult initialResult = splitPlanner.planSplits(null); assertThat(initialResult.fromPosition()).isNull(); // For inclusive behavior, the initial result should point to snapshot1 (as snapshot2's parent). assertThat(initialResult.toPosition().snapshotId().longValue()) .isEqualTo(snapshot1.snapshotId()); assertThat(initialResult.toPosition().snapshotTimestampMs().longValue()) .isEqualTo(snapshot1.timestampMillis()); assertThat(initialResult.splits()).isEmpty(); ContinuousEnumerationResult secondResult = splitPlanner.planSplits(initialResult.toPosition()); assertThat(secondResult.fromPosition().snapshotId().longValue()) .isEqualTo(snapshot1.snapshotId()); assertThat(secondResult.fromPosition().snapshotTimestampMs().longValue()) .isEqualTo(snapshot1.timestampMillis()); assertThat(secondResult.toPosition().snapshotId().longValue()) .isEqualTo(snapshot2.snapshotId()); assertThat(secondResult.toPosition().snapshotTimestampMs().longValue()) .isEqualTo(snapshot2.timestampMillis()); IcebergSourceSplit split = Iterables.getOnlyElement(secondResult.splits()); assertThat(split.task().files()).hasSize(1); Set<String> discoveredFiles = split.task().files().stream() .map(fileScanTask -> fileScanTask.file().path().toString()) .collect(Collectors.toSet()); // should discover dataFile2 appended in snapshot2 Set<String> expectedFiles = ImmutableSet.of(dataFile2.path().toString()); assertThat(discoveredFiles).containsExactlyElementsOf(expectedFiles); IcebergEnumeratorPosition lastPosition = secondResult.toPosition(); for (int i = 0; i < 3; ++i) { lastPosition = verifyOneCycle(splitPlanner, lastPosition).lastPosition; } }
@Override @SuppressWarnings("DuplicatedCode") public Integer cleanErrorLog(Integer exceedDay, Integer deleteLimit) { int count = 0; LocalDateTime expireDate = LocalDateTime.now().minusDays(exceedDay); // 循环删除,直到没有满足条件的数据 for (int i = 0; i < Short.MAX_VALUE; i++) { int deleteCount = apiErrorLogMapper.deleteByCreateTimeLt(expireDate, deleteLimit); count += deleteCount; // 达到删除预期条数,说明到底了 if (deleteCount < deleteLimit) { break; } } return count; }
@Test public void testCleanJobLog() { // mock 数据 ApiErrorLogDO log01 = randomPojo(ApiErrorLogDO.class, o -> o.setCreateTime(addTime(Duration.ofDays(-3)))); apiErrorLogMapper.insert(log01); ApiErrorLogDO log02 = randomPojo(ApiErrorLogDO.class, o -> o.setCreateTime(addTime(Duration.ofDays(-1)))); apiErrorLogMapper.insert(log02); // 准备参数 Integer exceedDay = 2; Integer deleteLimit = 1; // 调用 Integer count = apiErrorLogService.cleanErrorLog(exceedDay, deleteLimit); // 断言 assertEquals(1, count); List<ApiErrorLogDO> logs = apiErrorLogMapper.selectList(); assertEquals(1, logs.size()); assertEquals(log02, logs.get(0)); }
public static Boolean andOfWrap(Boolean... array) { if (ArrayUtil.isEmpty(array)) { throw new IllegalArgumentException("The Array must not be empty !"); } for (final Boolean b : array) { if(!isTrue(b)){ return false; } } return true; }
@Test public void issue3587Test() { Boolean boolean1 = true; Boolean boolean2 = null; Boolean result = BooleanUtil.andOfWrap(boolean1, boolean2); assertFalse(result); }
@Override public int sizeIdx2size(int sizeIdx) { return sizeClass.sizeIdx2size(sizeIdx); }
@Test public void testSizeIdx2size() { SizeClasses sc = new SizeClasses(PAGE_SIZE, PAGE_SHIFTS, CHUNK_SIZE, 0); PoolArena<ByteBuffer> arena = new PoolArena.DirectArena(null, sc); for (int i = 0; i < arena.sizeClass.nSizes; i++) { assertEquals(arena.sizeClass.sizeIdx2sizeCompute(i), arena.sizeClass.sizeIdx2size(i)); } }
@ApiOperation(value = "Create Or update Tenant (saveTenant)", notes = "Create or update the Tenant. When creating tenant, platform generates Tenant Id as " + UUID_WIKI_LINK + "Default Rule Chain and Device profile are also generated for the new tenants automatically. " + "The newly created Tenant Id will be present in the response. " + "Specify existing Tenant Id id to update the Tenant. " + "Referencing non-existing Tenant Id will cause 'Not Found' error." + "Remove 'id', 'tenantId' from the request body example (below) to create new Tenant entity." + SYSTEM_AUTHORITY_PARAGRAPH) @PreAuthorize("hasAuthority('SYS_ADMIN')") @RequestMapping(value = "/tenant", method = RequestMethod.POST) @ResponseBody public Tenant saveTenant(@Parameter(description = "A JSON value representing the tenant.") @RequestBody Tenant tenant) throws Exception { checkEntity(tenant.getId(), tenant, Resource.TENANT); return tbTenantService.save(tenant); }
@Test public void testFindTenants() throws Exception { loginSysAdmin(); List<Tenant> tenants = new ArrayList<>(); PageLink pageLink = new PageLink(17); PageData<Tenant> pageData = doGetTypedWithPageLink("/api/tenants?", PAGE_DATA_TENANT_TYPE_REF, pageLink); Assert.assertFalse(pageData.hasNext()); Assert.assertEquals(1, pageData.getData().size()); tenants.addAll(pageData.getData()); Mockito.reset(tbClusterService); int cntEntity = 56; List<ListenableFuture<Tenant>> createFutures = new ArrayList<>(56); for (int i = 0; i < cntEntity; i++) { Tenant tenant = new Tenant(); tenant.setTitle("Tenant" + i); createFutures.add(executor.submit(() -> saveTenant(tenant))); } tenants.addAll(Futures.allAsList(createFutures).get(TIMEOUT, TimeUnit.SECONDS)); testBroadcastEntityStateChangeEventTimeManyTimeTenant(new Tenant(), ComponentLifecycleEvent.CREATED, cntEntity); List<Tenant> loadedTenants = new ArrayList<>(); pageLink = new PageLink(17); do { pageData = doGetTypedWithPageLink("/api/tenants?", PAGE_DATA_TENANT_TYPE_REF, pageLink); loadedTenants.addAll(pageData.getData()); if (pageData.hasNext()) { pageLink = pageLink.nextPageLink(); } } while (pageData.hasNext()); assertThat(tenants).containsExactlyInAnyOrderElementsOf(loadedTenants); deleteEntitiesAsync("/api/tenant/", loadedTenants.stream() .filter((t) -> !TEST_TENANT_NAME.equals(t.getTitle())) .collect(Collectors.toList()), executor).get(TIMEOUT, TimeUnit.SECONDS); testBroadcastEntityStateChangeEventTimeManyTimeTenant(new Tenant(), ComponentLifecycleEvent.DELETED, cntEntity); pageLink = new PageLink(17); pageData = doGetTypedWithPageLink("/api/tenants?", PAGE_DATA_TENANT_TYPE_REF, pageLink); Assert.assertFalse(pageData.hasNext()); Assert.assertEquals(1, pageData.getData().size()); }
public static List<Path> getJarsInDirectory(String path) { return getJarsInDirectory(path, true); }
@Test public void testGetJarsInDirectory() throws Exception { List<Path> jars = FileUtil.getJarsInDirectory("/foo/bar/bogus/"); assertTrue("no jars should be returned for a bogus path", jars.isEmpty()); // create jar files to be returned File jar1 = new File(tmp, "wildcard1.jar"); File jar2 = new File(tmp, "wildcard2.JAR"); List<File> matches = Arrays.asList(jar1, jar2); for (File match: matches) { assertTrue("failure creating file: " + match, match.createNewFile()); } // create non-jar files, which we expect to not be included in the result Verify.createNewFile(new File(tmp, "text.txt")); Verify.createNewFile(new File(tmp, "executable.exe")); Verify.createNewFile(new File(tmp, "README")); // pass in the directory String directory = tmp.getCanonicalPath(); jars = FileUtil.getJarsInDirectory(directory); assertEquals("there should be 2 jars", 2, jars.size()); for (Path jar: jars) { URL url = jar.toUri().toURL(); assertTrue("the jar should match either of the jars", url.equals(jar1.getCanonicalFile().toURI().toURL()) || url.equals(jar2.getCanonicalFile().toURI().toURL())); } }
@Override public Set<UnloadDecision> findBundlesForUnloading(LoadManagerContext context, Map<String, Long> recentlyUnloadedBundles, Map<String, Long> recentlyUnloadedBrokers) { final var conf = context.brokerConfiguration(); decisionCache.clear(); stats.clear(); Map<String, BrokerLookupData> availableBrokers; try { availableBrokers = context.brokerRegistry().getAvailableBrokerLookupDataAsync() .get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS); } catch (ExecutionException | InterruptedException | TimeoutException e) { counter.update(Failure, Unknown); log.warn("Failed to fetch available brokers. Stop unloading.", e); return decisionCache; } try { final var loadStore = context.brokerLoadDataStore(); stats.setLoadDataStore(loadStore); boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log); var skipReason = stats.update( context.brokerLoadDataStore(), availableBrokers, recentlyUnloadedBrokers, conf); if (skipReason.isPresent()) { if (debugMode) { log.warn(CANNOT_CONTINUE_UNLOAD_MSG + " Skipped the load stat update. Reason:{}.", skipReason.get()); } counter.update(Skip, skipReason.get()); return decisionCache; } counter.updateLoadData(stats.avg, stats.std); if (debugMode) { log.info("brokers' load stats:{}", stats); } // skip metrics int numOfBrokersWithEmptyLoadData = 0; int numOfBrokersWithFewBundles = 0; final double targetStd = conf.getLoadBalancerBrokerLoadTargetStd(); boolean transfer = conf.isLoadBalancerTransferEnabled(); if (stats.std() > targetStd || isUnderLoaded(context, stats.peekMinBroker(), stats) || isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { unloadConditionHitCount++; } else { unloadConditionHitCount = 0; } if (unloadConditionHitCount <= conf.getLoadBalancerSheddingConditionHitCountThreshold()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Shedding condition hit count:{} is less than or equal to the threshold:{}.", unloadConditionHitCount, conf.getLoadBalancerSheddingConditionHitCountThreshold()); } counter.update(Skip, HitCount); return decisionCache; } while (true) { if (!stats.hasTransferableBrokers()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Exhausted target transfer brokers."); } break; } UnloadDecision.Reason reason; if (stats.std() > targetStd) { reason = Overloaded; } else if (isUnderLoaded(context, stats.peekMinBroker(), stats)) { reason = Underloaded; if (debugMode) { log.info(String.format("broker:%s is underloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this underloaded broker.", stats.peekMinBroker(), context.brokerLoadDataStore().get(stats.peekMinBroker()).get(), stats.std(), targetStd)); } } else if (isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { reason = Overloaded; if (debugMode) { log.info(String.format("broker:%s is overloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this overloaded broker.", stats.peekMaxBroker(), context.brokerLoadDataStore().get(stats.peekMaxBroker()).get(), stats.std(), targetStd)); } } else { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + "The overall cluster load meets the target, std:{} <= targetStd:{}." + "minBroker:{} is not underloaded. maxBroker:{} is not overloaded.", stats.std(), targetStd, stats.peekMinBroker(), stats.peekMaxBroker()); } break; } String maxBroker = stats.pollMaxBroker(); String minBroker = stats.peekMinBroker(); Optional<BrokerLoadData> maxBrokerLoadData = context.brokerLoadDataStore().get(maxBroker); Optional<BrokerLoadData> minBrokerLoadData = context.brokerLoadDataStore().get(minBroker); if (maxBrokerLoadData.isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " MaxBrokerLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } if (minBrokerLoadData.isEmpty()) { log.error("Can't transfer load to broker:{}. MinBrokerLoadData is empty.", minBroker); numOfBrokersWithEmptyLoadData++; continue; } double maxLoad = maxBrokerLoadData.get().getWeightedMaxEMA(); double minLoad = minBrokerLoadData.get().getWeightedMaxEMA(); double offload = (maxLoad - minLoad) / 2; BrokerLoadData brokerLoadData = maxBrokerLoadData.get(); double maxBrokerThroughput = brokerLoadData.getMsgThroughputIn() + brokerLoadData.getMsgThroughputOut(); double minBrokerThroughput = minBrokerLoadData.get().getMsgThroughputIn() + minBrokerLoadData.get().getMsgThroughputOut(); double offloadThroughput = maxBrokerThroughput * offload / maxLoad; if (debugMode) { log.info(String.format( "Attempting to shed load from broker:%s%s, which has the max resource " + "usage:%.2f%%, targetStd:%.2f," + " -- Trying to offload %.2f%%, %.2f KByte/s of traffic.", maxBroker, transfer ? " to broker:" + minBroker : "", maxLoad * 100, targetStd, offload * 100, offloadThroughput / KB )); } double trafficMarkedToOffload = 0; double trafficMarkedToGain = 0; Optional<TopBundlesLoadData> bundlesLoadData = context.topBundleLoadDataStore().get(maxBroker); if (bundlesLoadData.isEmpty() || bundlesLoadData.get().getTopBundlesLoadData().isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " TopBundlesLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } var maxBrokerTopBundlesLoadData = bundlesLoadData.get().getTopBundlesLoadData(); if (maxBrokerTopBundlesLoadData.size() == 1) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Sole namespace bundle:%s is overloading the broker. ", maxBroker, maxBrokerTopBundlesLoadData.iterator().next())); continue; } Optional<TopBundlesLoadData> minBundlesLoadData = context.topBundleLoadDataStore().get(minBroker); var minBrokerTopBundlesLoadDataIter = minBundlesLoadData.isPresent() ? minBundlesLoadData.get().getTopBundlesLoadData().iterator() : null; if (maxBrokerTopBundlesLoadData.isEmpty()) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Broker overloaded despite having no bundles", maxBroker)); continue; } int remainingTopBundles = maxBrokerTopBundlesLoadData.size(); for (var e : maxBrokerTopBundlesLoadData) { String bundle = e.bundleName(); if (channel != null && !channel.isOwner(bundle, maxBroker)) { if (debugMode) { log.warn(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " MaxBroker:%s is not the owner.", bundle, maxBroker)); } continue; } if (recentlyUnloadedBundles.containsKey(bundle)) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " Bundle has been recently unloaded at ts:%d.", bundle, recentlyUnloadedBundles.get(bundle))); } continue; } if (!isTransferable(context, availableBrokers, bundle, maxBroker, Optional.of(minBroker))) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " This unload can't meet " + "affinity(isolation) or anti-affinity group policies.", bundle)); } continue; } if (remainingTopBundles <= 1) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The remaining bundles in TopBundlesLoadData from the maxBroker:%s is" + " less than or equal to 1.", bundle, maxBroker)); } break; } var bundleData = e.stats(); double maxBrokerBundleThroughput = bundleData.msgThroughputIn + bundleData.msgThroughputOut; boolean swap = false; List<Unload> minToMaxUnloads = new ArrayList<>(); double minBrokerBundleSwapThroughput = 0.0; if (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput > offloadThroughput) { // see if we can swap bundles from min to max broker to balance better. if (transfer && minBrokerTopBundlesLoadDataIter != null) { var maxBrokerNewThroughput = maxBrokerThroughput - trafficMarkedToOffload + trafficMarkedToGain - maxBrokerBundleThroughput; var minBrokerNewThroughput = minBrokerThroughput + trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput; while (minBrokerTopBundlesLoadDataIter.hasNext()) { var minBrokerBundleData = minBrokerTopBundlesLoadDataIter.next(); if (!isTransferable(context, availableBrokers, minBrokerBundleData.bundleName(), minBroker, Optional.of(maxBroker))) { continue; } var minBrokerBundleThroughput = minBrokerBundleData.stats().msgThroughputIn + minBrokerBundleData.stats().msgThroughputOut; var maxBrokerNewThroughputTmp = maxBrokerNewThroughput + minBrokerBundleThroughput; var minBrokerNewThroughputTmp = minBrokerNewThroughput - minBrokerBundleThroughput; if (maxBrokerNewThroughputTmp < maxBrokerThroughput && minBrokerNewThroughputTmp < maxBrokerThroughput) { minToMaxUnloads.add(new Unload(minBroker, minBrokerBundleData.bundleName(), Optional.of(maxBroker))); maxBrokerNewThroughput = maxBrokerNewThroughputTmp; minBrokerNewThroughput = minBrokerNewThroughputTmp; minBrokerBundleSwapThroughput += minBrokerBundleThroughput; if (minBrokerNewThroughput <= maxBrokerNewThroughput && maxBrokerNewThroughput < maxBrokerThroughput * 0.75) { swap = true; break; } } } } if (!swap) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The traffic to unload:%.2f - gain:%.2f = %.2f KByte/s is " + "greater than the target :%.2f KByte/s.", bundle, (trafficMarkedToOffload + maxBrokerBundleThroughput) / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput) / KB, offloadThroughput / KB)); } break; } } Unload unload; if (transfer) { if (swap) { minToMaxUnloads.forEach(minToMaxUnload -> { if (debugMode) { log.info("Decided to gain bundle:{} from min broker:{}", minToMaxUnload.serviceUnit(), minToMaxUnload.sourceBroker()); } var decision = new UnloadDecision(); decision.setUnload(minToMaxUnload); decision.succeed(reason); decisionCache.add(decision); }); if (debugMode) { log.info(String.format( "Total traffic %.2f KByte/s to transfer from min broker:%s to max broker:%s.", minBrokerBundleSwapThroughput / KB, minBroker, maxBroker)); trafficMarkedToGain += minBrokerBundleSwapThroughput; } } unload = new Unload(maxBroker, bundle, Optional.of(minBroker)); } else { unload = new Unload(maxBroker, bundle); } var decision = new UnloadDecision(); decision.setUnload(unload); decision.succeed(reason); decisionCache.add(decision); trafficMarkedToOffload += maxBrokerBundleThroughput; remainingTopBundles--; if (debugMode) { log.info(String.format("Decided to unload bundle:%s, throughput:%.2f KByte/s." + " The traffic marked to unload:%.2f - gain:%.2f = %.2f KByte/s." + " Target:%.2f KByte/s.", bundle, maxBrokerBundleThroughput / KB, trafficMarkedToOffload / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain) / KB, offloadThroughput / KB)); } } if (trafficMarkedToOffload > 0) { var adjustedOffload = (trafficMarkedToOffload - trafficMarkedToGain) * maxLoad / maxBrokerThroughput; stats.offload(maxLoad, minLoad, adjustedOffload); if (debugMode) { log.info( String.format("brokers' load stats:%s, after offload{max:%.2f, min:%.2f, offload:%.2f}", stats, maxLoad, minLoad, adjustedOffload)); } } else { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " There is no bundle that can be unloaded in top bundles load data. " + "Consider splitting bundles owned by the broker " + "to make each bundle serve less traffic " + "or increasing loadBalancerMaxNumberOfBundlesInBundleLoadReport" + " to report more bundles in the top bundles load data.", maxBroker)); } } // while end if (debugMode) { log.info("decisionCache:{}", decisionCache); } if (decisionCache.isEmpty()) { UnloadDecision.Reason reason; if (numOfBrokersWithEmptyLoadData > 0) { reason = NoLoadData; } else if (numOfBrokersWithFewBundles > 0) { reason = NoBundles; } else { reason = HitCount; } counter.update(Skip, reason); } else { unloadConditionHitCount = 0; } } catch (Throwable e) { log.error("Failed to process unloading. ", e); this.counter.update(Failure, Unknown); } return decisionCache; }
@Test public void testOutDatedLoadData() throws IllegalAccessException { UnloadCounter counter = new UnloadCounter(); TransferShedder transferShedder = new TransferShedder(counter); var ctx = setupContext(); var brokerLoadDataStore = ctx.brokerLoadDataStore(); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); assertEquals(res.size(), 2); FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker1:8080").get(), "updatedAt", 0, true); FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker2:8080").get(), "updatedAt", 0, true); FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker3:8080").get(), "updatedAt", 0, true); FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker4:8080").get(), "updatedAt", 0, true); FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker5:8080").get(), "updatedAt", 0, true); res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); assertTrue(res.isEmpty()); assertEquals(counter.getBreakdownCounters().get(Skip).get(OutDatedData).get(), 1); }
@Override public CRMaterial deserialize(JsonElement json, Type type, JsonDeserializationContext context) throws JsonParseException { return determineJsonElementForDistinguishingImplementers(json, context, TYPE, ARTIFACT_ORIGIN); }
@Test public void shouldDeserializeP4tMaterialType() { JsonObject jsonObject = new JsonObject(); jsonObject.addProperty("type", "p4"); materialTypeAdapter.deserialize(jsonObject, type, jsonDeserializationContext); verify(jsonDeserializationContext).deserialize(jsonObject, CRP4Material.class); }
void scheduleAfterWrite() { int drainStatus = drainStatusOpaque(); for (;;) { switch (drainStatus) { case IDLE: casDrainStatus(IDLE, REQUIRED); scheduleDrainBuffers(); return; case REQUIRED: scheduleDrainBuffers(); return; case PROCESSING_TO_IDLE: if (casDrainStatus(PROCESSING_TO_IDLE, PROCESSING_TO_REQUIRED)) { return; } drainStatus = drainStatusAcquire(); continue; case PROCESSING_TO_REQUIRED: return; default: throw new IllegalStateException("Invalid drain status: " + drainStatus); } } }
@Test public void scheduleAfterWrite() { var cache = new BoundedLocalCache<Object, Object>( Caffeine.newBuilder(), /* loader */ null, /* async */ false) { @Override void scheduleDrainBuffers() {} }; var transitions = Map.of( IDLE, REQUIRED, REQUIRED, REQUIRED, PROCESSING_TO_IDLE, PROCESSING_TO_REQUIRED, PROCESSING_TO_REQUIRED, PROCESSING_TO_REQUIRED); transitions.forEach((start, end) -> { cache.drainStatus = start; cache.scheduleAfterWrite(); assertThat(cache.drainStatus).isEqualTo(end); }); }
@Override public String toString() { return "ResourceConfig{" + "url=" + url + ", id='" + id + '\'' + ", resourceType=" + resourceType + '}'; }
@Test public void when_attachNonexistentFileWithPathAndId_then_throwsException() { // Given String id = "exist"; String path = Paths.get("/i/do/not/" + id).toString(); // Then expectedException.expect(JetException.class); expectedException.expectMessage("Not an existing, readable file: " + path); // When config.attachFile(path, id); }
public List<AttendeeGroup> findAttendeeGroupCombinationsOverSize(int minSize) { if (minSize < 1 || minSize > attendees.size()) { throw new MomoException(AttendeeErrorCode.INVALID_ATTENDEE_SIZE); } List<AttendeeGroup> array = new ArrayList<>(); for (int i = attendees.size(); i >= minSize; i--) { array.addAll(generateCombinations(i)); } return array; }
@DisplayName("참석자 그룹의 모든 조합을 구한다.") @Test void findCombinationAttendeeGroups() { Meeting meeting = MeetingFixture.DINNER.create(); Attendee jazz = AttendeeFixture.HOST_JAZZ.create(meeting); Attendee pedro = AttendeeFixture.GUEST_PEDRO.create(meeting); Attendee baeky = AttendeeFixture.GUEST_BAKEY.create(meeting); AttendeeGroup attendeeGroup = new AttendeeGroup(List.of(jazz, pedro, baeky)); List<AttendeeGroup> attendeeGroups = attendeeGroup.findAttendeeGroupCombinationsOverSize(1); assertThat(attendeeGroups) .containsExactlyInAnyOrder( new AttendeeGroup(List.of(jazz, pedro, baeky)), new AttendeeGroup(List.of(jazz, pedro)), new AttendeeGroup(List.of(jazz, baeky)), new AttendeeGroup(List.of(pedro, baeky)), new AttendeeGroup(List.of(jazz)), new AttendeeGroup(List.of(pedro)), new AttendeeGroup(List.of(baeky)) ); }
@VisibleForTesting public static String getNsFromDataNodeNetworkLocation(String location) { // network location should be in the format of /ns/rack Pattern pattern = Pattern.compile("^/([^/]*)/"); Matcher matcher = pattern.matcher(location); if (matcher.find()) { return matcher.group(1); } return ""; }
@Test public void testGetNsFromDataNodeNetworkLocation() { assertEquals("ns0", RouterWebHdfsMethods .getNsFromDataNodeNetworkLocation("/ns0/rack-info1")); assertEquals("ns0", RouterWebHdfsMethods .getNsFromDataNodeNetworkLocation("/ns0/row1/rack-info1")); assertEquals("", RouterWebHdfsMethods .getNsFromDataNodeNetworkLocation("/row0")); assertEquals("", RouterWebHdfsMethods .getNsFromDataNodeNetworkLocation("whatever-rack-info1")); }
public <V> V retryCallable( Callable<V> callable, Set<Class<? extends Exception>> exceptionsToIntercept) { return RetryHelper.runWithRetries( callable, getRetrySettings(), getExceptionHandlerForExceptions(exceptionsToIntercept), NanoClock.getDefaultClock()); }
@Test public void testRetryCallable_RetriesExpectedNumberOfTimes() { AtomicInteger executeCounter = new AtomicInteger(0); Callable<Integer> incrementingFunction = () -> { executeCounter.incrementAndGet(); if (executeCounter.get() < 2) { throw new MyException(); } return executeCounter.get(); }; retryCallableManager.retryCallable(incrementingFunction, ImmutableSet.of(MyException.class)); assertEquals( String.format("Should run 2 times, instead ran %d times.", executeCounter.get()), 2, executeCounter.get()); }
@Override public JCConditional inline(Inliner inliner) throws CouldNotResolveImportException { return inliner .maker() .Conditional( getCondition().inline(inliner), getTrueExpression().inline(inliner), getFalseExpression().inline(inliner)); }
@Test public void inline() { assertInlines( "true ? -1 : 1", UConditional.create(ULiteral.booleanLit(true), ULiteral.intLit(-1), ULiteral.intLit(1))); }
@Override public Optional<Product> findProduct(int productId) { return this.productRepository.findById(productId); }
@Test void findProduct_ProductExists_ReturnsNotEmptyOptional() { // given var product = new Product(1, "Товар №1", "Описание товара №1"); doReturn(Optional.of(product)).when(this.productRepository).findById(1); // when var result = this.service.findProduct(1); // then assertNotNull(result); assertTrue(result.isPresent()); assertEquals(product, result.orElseThrow()); verify(this.productRepository).findById(1); verifyNoMoreInteractions(this.productRepository); }
@Override public ListConsumerGroupsResult listConsumerGroups(ListConsumerGroupsOptions options) { final KafkaFutureImpl<Collection<Object>> all = new KafkaFutureImpl<>(); final long nowMetadata = time.milliseconds(); final long deadline = calcDeadlineMs(nowMetadata, options.timeoutMs()); runnable.call(new Call("findAllBrokers", deadline, new LeastLoadedNodeProvider()) { @Override MetadataRequest.Builder createRequest(int timeoutMs) { return new MetadataRequest.Builder(new MetadataRequestData() .setTopics(Collections.emptyList()) .setAllowAutoTopicCreation(true)); } @Override void handleResponse(AbstractResponse abstractResponse) { MetadataResponse metadataResponse = (MetadataResponse) abstractResponse; Collection<Node> nodes = metadataResponse.brokers(); if (nodes.isEmpty()) throw new StaleMetadataException("Metadata fetch failed due to missing broker list"); HashSet<Node> allNodes = new HashSet<>(nodes); final ListConsumerGroupsResults results = new ListConsumerGroupsResults(allNodes, all); for (final Node node : allNodes) { final long nowList = time.milliseconds(); runnable.call(new Call("listConsumerGroups", deadline, new ConstantNodeIdProvider(node.id())) { @Override ListGroupsRequest.Builder createRequest(int timeoutMs) { List<String> states = options.states() .stream() .map(ConsumerGroupState::toString) .collect(Collectors.toList()); List<String> groupTypes = options.types() .stream() .map(GroupType::toString) .collect(Collectors.toList()); return new ListGroupsRequest.Builder(new ListGroupsRequestData() .setStatesFilter(states) .setTypesFilter(groupTypes) ); } private void maybeAddConsumerGroup(ListGroupsResponseData.ListedGroup group) { String protocolType = group.protocolType(); if (protocolType.equals(ConsumerProtocol.PROTOCOL_TYPE) || protocolType.isEmpty()) { final String groupId = group.groupId(); final Optional<ConsumerGroupState> state = group.groupState().isEmpty() ? Optional.empty() : Optional.of(ConsumerGroupState.parse(group.groupState())); final Optional<GroupType> type = group.groupType().isEmpty() ? Optional.empty() : Optional.of(GroupType.parse(group.groupType())); final ConsumerGroupListing groupListing = new ConsumerGroupListing( groupId, protocolType.isEmpty(), state, type ); results.addListing(groupListing); } } @Override void handleResponse(AbstractResponse abstractResponse) { final ListGroupsResponse response = (ListGroupsResponse) abstractResponse; synchronized (results) { Errors error = Errors.forCode(response.data().errorCode()); if (error == Errors.COORDINATOR_LOAD_IN_PROGRESS || error == Errors.COORDINATOR_NOT_AVAILABLE) { throw error.exception(); } else if (error != Errors.NONE) { results.addError(error.exception(), node); } else { for (ListGroupsResponseData.ListedGroup group : response.data().groups()) { maybeAddConsumerGroup(group); } } results.tryComplete(node); } } @Override void handleFailure(Throwable throwable) { synchronized (results) { results.addError(throwable, node); results.tryComplete(node); } } }, nowList); } } @Override void handleFailure(Throwable throwable) { KafkaException exception = new KafkaException("Failed to find brokers to send ListGroups", throwable); all.complete(Collections.singletonList(exception)); } }, nowMetadata); return new ListConsumerGroupsResult(all); }
@Test public void testListConsumerGroupsWithTypes() throws Exception { try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); // Test with a specific state filter but no type filter in list consumer group options. env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); env.kafkaClient().prepareResponseFrom( expectListGroupsRequestWithFilters(singleton(ConsumerGroupState.STABLE.toString()), Collections.emptySet()), new ListGroupsResponse(new ListGroupsResponseData() .setErrorCode(Errors.NONE.code()) .setGroups(singletonList( new ListGroupsResponseData.ListedGroup() .setGroupId("group-1") .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) .setGroupState("Stable") .setGroupType(GroupType.CLASSIC.toString())))), env.cluster().nodeById(0)); final ListConsumerGroupsOptions options = new ListConsumerGroupsOptions().inStates(singleton(ConsumerGroupState.STABLE)); final ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(options); Collection<ConsumerGroupListing> listings = result.valid().get(); assertEquals(1, listings.size()); List<ConsumerGroupListing> expected = new ArrayList<>(); expected.add(new ConsumerGroupListing("group-1", false, Optional.of(ConsumerGroupState.STABLE), Optional.of(GroupType.CLASSIC))); assertEquals(expected, listings); assertEquals(0, result.errors().get().size()); // Test with list consumer group options. env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); env.kafkaClient().prepareResponseFrom( expectListGroupsRequestWithFilters(Collections.emptySet(), singleton(GroupType.CONSUMER.toString())), new ListGroupsResponse(new ListGroupsResponseData() .setErrorCode(Errors.NONE.code()) .setGroups(asList( new ListGroupsResponseData.ListedGroup() .setGroupId("group-1") .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) .setGroupState("Stable") .setGroupType(GroupType.CONSUMER.toString()), new ListGroupsResponseData.ListedGroup() .setGroupId("group-2") .setGroupState("Empty") .setGroupType(GroupType.CONSUMER.toString())))), env.cluster().nodeById(0)); final ListConsumerGroupsOptions options2 = new ListConsumerGroupsOptions().withTypes(singleton(GroupType.CONSUMER)); final ListConsumerGroupsResult result2 = env.adminClient().listConsumerGroups(options2); Collection<ConsumerGroupListing> listings2 = result2.valid().get(); assertEquals(2, listings2.size()); List<ConsumerGroupListing> expected2 = new ArrayList<>(); expected2.add(new ConsumerGroupListing("group-2", true, Optional.of(ConsumerGroupState.EMPTY), Optional.of(GroupType.CONSUMER))); expected2.add(new ConsumerGroupListing("group-1", false, Optional.of(ConsumerGroupState.STABLE), Optional.of(GroupType.CONSUMER))); assertEquals(expected2, listings2); assertEquals(0, result.errors().get().size()); } }