focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
static CodecFactory getCodecFactory(JobConf job) { CodecFactory factory = null; if (FileOutputFormat.getCompressOutput(job)) { int deflateLevel = job.getInt(DEFLATE_LEVEL_KEY, DEFAULT_DEFLATE_LEVEL); int xzLevel = job.getInt(XZ_LEVEL_KEY, DEFAULT_XZ_LEVEL); int zstdLevel = job.getInt(ZSTD_LEVEL_KEY, DEFAULT_ZSTANDARD_LEVEL); boolean zstdBufferPool = job.getBoolean(ZSTD_BUFFERPOOL_KEY, DEFAULT_ZSTANDARD_BUFFERPOOL); String codecName = job.get(AvroJob.OUTPUT_CODEC); if (codecName == null) { String codecClassName = job.get("mapred.output.compression.codec", null); String avroCodecName = HadoopCodecFactory.getAvroCodecName(codecClassName); if (codecClassName != null && avroCodecName != null) { factory = HadoopCodecFactory.fromHadoopString(codecClassName); job.set(AvroJob.OUTPUT_CODEC, avroCodecName); return factory; } else { return CodecFactory.deflateCodec(deflateLevel); } } else { if (codecName.equals(DEFLATE_CODEC)) { factory = CodecFactory.deflateCodec(deflateLevel); } else if (codecName.equals(XZ_CODEC)) { factory = CodecFactory.xzCodec(xzLevel); } else if (codecName.equals(ZSTANDARD_CODEC)) { factory = CodecFactory.zstandardCodec(zstdLevel, false, zstdBufferPool); } else { factory = CodecFactory.fromString(codecName); } } } return factory; }
@Test void deflateCodecUsingHadoopClass() { CodecFactory avroDeflateCodec = CodecFactory.fromString("deflate"); JobConf job = new JobConf(); job.set("mapred.output.compress", "true"); job.set("mapred.output.compression.codec", "org.apache.hadoop.io.compress.DeflateCodec"); CodecFactory factory = AvroOutputFormat.getCodecFactory(job); assertNotNull(factory); assertEquals(factory.getClass(), avroDeflateCodec.getClass()); }
@Override public String toString() { return "WebsocketConfig{" + "urls='" + urls + ", allowOrigin='" + allowOrigin + '}'; }
@Test public void testToString() { String toString = "WebsocketConfig{urls='%s, allowOrigin='%s}"; String expected = String.format(toString, URLS, ALLOW_ORIGIN); assertEquals(expected, websocketConfig.toString()); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ProviderInfo that = (ProviderInfo) o; if (port != that.port) { return false; } if (rpcVersion != that.rpcVersion) { return false; } if (protocolType != null ? !protocolType.equals(that.protocolType) : that.protocolType != null) { return false; } if (host != null ? !host.equals(that.host) : that.host != null) { return false; } if (path != null ? !path.equals(that.path) : that.path != null) { return false; } if (serializationType != null ? !serializationType.equals(that.serializationType) : that.serializationType != null) { return false; } // return staticAttrs != null ? staticAttrs.equals(that.staticAttrs) : that.staticAttrs == null; return true; }
@Test public void testEquals() throws Exception { ProviderInfo p1 = new ProviderInfo(); ProviderInfo p2 = new ProviderInfo(); Assert.assertEquals(p1, p2); List<ProviderInfo> ps = new ArrayList<ProviderInfo>(); ps.add(p1); ps.remove(p2); Assert.assertEquals(ps.size(), 0); p1.setHost("127.0.0.1"); Assert.assertFalse(p1.equals(p2)); p2.setHost("127.0.0.2"); Assert.assertFalse(p1.equals(p2)); p2.setHost("127.0.0.1"); Assert.assertTrue(p1.equals(p2)); p1.setPort(12200); Assert.assertFalse(p1.equals(p2)); p2.setPort(12201); Assert.assertFalse(p1.equals(p2)); p2.setPort(12200); Assert.assertTrue(p1.equals(p2)); p1.setRpcVersion(4420); Assert.assertFalse(p1.equals(p2)); p2.setRpcVersion(4421); Assert.assertFalse(p1.equals(p2)); p2.setRpcVersion(4420); Assert.assertTrue(p1.equals(p2)); p1.setProtocolType("p1"); Assert.assertFalse(p1.equals(p2)); p2.setProtocolType("p2"); Assert.assertFalse(p1.equals(p2)); p2.setProtocolType("p1"); Assert.assertTrue(p1.equals(p2)); p1.setSerializationType("zzz"); Assert.assertFalse(p1.equals(p2)); p2.setSerializationType("yyy"); Assert.assertFalse(p1.equals(p2)); p2.setSerializationType("zzz"); Assert.assertTrue(p1.equals(p2)); // p1.setInterfaceId("com.xxx"); // Assert.assertFalse(p1.equals(p2)); // p2.setInterfaceId("com.yyy"); // Assert.assertFalse(p1.equals(p2)); // p2.setInterfaceId("com.xxx"); // Assert.assertTrue(p1.equals(p2)); // // p1.setUniqueId("u1"); // Assert.assertFalse(p1.equals(p2)); // p2.setUniqueId("u2"); // Assert.assertFalse(p1.equals(p2)); // p2.setUniqueId("u1"); // Assert.assertTrue(p1.equals(p2)); p1.setPath("/aaa"); Assert.assertFalse(p1.equals(p2)); p2.setPath("/bbb"); Assert.assertFalse(p1.equals(p2)); p2.setPath("/aaa"); Assert.assertTrue(p1.equals(p2)); p1.setWeight(200); Assert.assertTrue(p1.equals(p2)); p2.setWeight(300); Assert.assertTrue(p1.equals(p2)); p2.setWeight(200); Assert.assertTrue(p1.equals(p2)); p1.setDynamicAttr("x1", "y1"); Assert.assertTrue(p1.equals(p2)); p2.setDynamicAttr("x1", "y1"); Assert.assertTrue(p1.equals(p2)); p2.setDynamicAttr("x2", "y2"); Assert.assertTrue(p1.equals(p2)); p1.setStaticAttr("x1", "y1"); Assert.assertTrue(p1.equals(p2)); p2.setStaticAttr("x1", "y1"); Assert.assertTrue(p1.equals(p2)); p1.setStaticAttr("x2", "y2"); Assert.assertTrue(p1.equals(p2)); p2.setStaticAttr("x2", "y2"); Assert.assertTrue(p1.equals(p2)); ps.add(p1); ps.remove(p2); Assert.assertEquals(ps.size(), 0); }
public ContentInfo verify(ContentInfo signedMessage, Date date) { final SignedData signedData = SignedData.getInstance(signedMessage.getContent()); final X509Certificate cert = certificate(signedData); certificateVerifier.verify(cert, date); final X500Name name = X500Name.getInstance(cert.getIssuerX500Principal().getEncoded()); try { final CMSSignedData cms = new CMSSignedData(signedMessage); cms.verifySignatures(signerId -> { if (!name.equals(signerId.getIssuer())) { throw new VerificationException("Issuer does not match certificate"); } if (!cert.getSerialNumber().equals(signerId.getSerialNumber())) { throw new VerificationException("Serial number does not match certificate"); } return new JcaSignerInfoVerifierBuilder(digestProvider).setProvider(bcProvider).build(cert); }); } catch (CMSException e) { throw new VerificationException("Could not verify CMS", e); } return signedData.getEncapContentInfo(); }
@Test public void shouldThrowExceptionIfSerialNumberDoesNotMatch() throws Exception { final byte[] data = fixture(); data[2118]++; final ContentInfo signedMessage = ContentInfo.getInstance(data); thrown.expect(VerificationException.class); thrown.expectMessage("Serial number does not match certificate"); new CmsVerifier(new CertificateVerifier.None()).verify(signedMessage); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { if(directory.isRoot()) { final AttributedList<Path> list = new AttributedList<>(); for(RootFolder root : session.roots()) { switch(root.getRootFolderType()) { case 0: // My Files case 1: // Common list.add(new Path(directory, PathNormalizer.name(root.getName()), EnumSet.of(Path.Type.directory, Path.Type.volume), attributes.toAttributes(root))); break; } listener.chunk(directory, list); } return list; } else { try { final AttributedList<Path> children = new AttributedList<>(); int pageIndex = 0; int fileCount = 0; FileContents files; do { files = new FilesApi(this.session.getClient()).filesGetById(URIEncoder.encode(fileid.getFileId(directory)), pageIndex, chunksize, "Name asc", 0, // All true, false, false ); for(File f : files.getFiles()) { final PathAttributes attrs = attributes.toAttributes(f); final EnumSet<Path.Type> type = (f.getFlags() & 1) == 1 ? EnumSet.of(Path.Type.directory) : EnumSet.of(Path.Type.file); children.add(new Path(directory, f.getName(), type, attrs)); } pageIndex++; fileCount += files.getFiles().size(); listener.chunk(directory, children); } while(fileCount < files.getTotalRowCount()); return children; } catch(ApiException e) { throw new StoregateExceptionMappingService(fileid).map("Listing directory {0} failed", e, directory); } } }
@Test public void testList() throws Exception { final StoregateIdProvider nodeid = new StoregateIdProvider(session); final Path room = new Path("/My files", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path folder = new StoregateDirectoryFeature(session, nodeid).mkdir(new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final TransferStatus status = new TransferStatus(); status.setHidden(true); final Path file = new StoregateTouchFeature(session, nodeid).touch(new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), status); final AttributedList<Path> list = new StoregateListService(session, nodeid).list(folder, new DisabledListProgressListener()); assertNotSame(AttributedList.emptyList(), list); assertTrue(list.contains(file)); assertSame(folder, list.get(file).getParent()); assertTrue(list.get(file).attributes().isHidden()); assertSame(folder, list.get(file).getParent()); new StoregateDeleteFeature(session, nodeid).delete(Arrays.asList(file, folder), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
private void setup() { setLayout(new BorderLayout()); var bot = new JPanel(); add(jt.getTableHeader(), BorderLayout.NORTH); bot.setLayout(new BorderLayout()); bot.add(del, BorderLayout.EAST); add(bot, BorderLayout.SOUTH); var jsp = new JScrollPane(jt); jsp.setPreferredSize(new Dimension(500, 250)); add(jsp, BorderLayout.CENTER); del.addActionListener(new TargetListener()); var rootPane = SwingUtilities.getRootPane(del); rootPane.setDefaultButton(del); setVisible(true); }
@Test void testSetup(){ final var target = new Target(); assertEquals(target.getSize().getWidth(), Double.valueOf(640)); assertEquals(target.getSize().getHeight(), Double.valueOf(480)); assertTrue(target.isVisible()); }
public static int parseInteger(String str) { return isNumber(str) ? Integer.parseInt(str) : 0; }
@Test void testParseInteger() throws Exception { assertThat(StringUtils.parseInteger(null), equalTo(0)); assertThat(StringUtils.parseInteger("123"), equalTo(123)); }
@Override public MapperResult findConfigInfoAggrByPageFetchRows(MapperContext context) { int startRow = context.getStartRow(); int pageSize = context.getPageSize(); String dataId = (String) context.getWhereParameter(FieldConstant.DATA_ID); String groupId = (String) context.getWhereParameter(FieldConstant.GROUP_ID); String tenantId = (String) context.getWhereParameter(FieldConstant.TENANT_ID); String sql = "SELECT data_id,group_id,tenant_id,datum_id,app_name,content FROM config_info_aggr WHERE data_id= ? AND " + "group_id= ? AND tenant_id= ? ORDER BY datum_id LIMIT " + startRow + "," + pageSize; List<Object> paramList = CollectionUtils.list(dataId, groupId, tenantId); return new MapperResult(sql, paramList); }
@Test void testFindConfigInfoAggrByPageFetchRows() { String dataId = "data-id"; String groupId = "group-id"; String tenantId = "tenant-id"; Integer startRow = 0; Integer pageSize = 5; MapperContext context = new MapperContext(); context.putWhereParameter(FieldConstant.DATA_ID, dataId); context.putWhereParameter(FieldConstant.GROUP_ID, groupId); context.putWhereParameter(FieldConstant.TENANT_ID, tenantId); context.setStartRow(startRow); context.setPageSize(pageSize); MapperResult mapperResult = configInfoAggrMapperByMySql.findConfigInfoAggrByPageFetchRows(context); String sql = mapperResult.getSql(); List<Object> paramList = mapperResult.getParamList(); assertEquals(sql, "SELECT data_id,group_id,tenant_id,datum_id,app_name,content FROM config_info_aggr WHERE " + "data_id= ? AND group_id= ? AND tenant_id= ? ORDER BY datum_id LIMIT 0,5"); assertEquals(paramList, Arrays.asList(dataId, groupId, tenantId)); }
public QueryBuilders.QueryBuilder convert(Expr conjunct) { return visit(conjunct); }
@Test public void testTranslateInPredicate() { SlotRef codeSlotRef = mockSlotRef("code", Type.INT); List<Expr> codeLiterals = new ArrayList<>(); IntLiteral codeLiteral1 = new IntLiteral(1); IntLiteral codeLiteral2 = new IntLiteral(2); IntLiteral codeLiteral3 = new IntLiteral(3); codeLiterals.add(codeLiteral1); codeLiterals.add(codeLiteral2); codeLiterals.add(codeLiteral3); InPredicate inPredicate = new InPredicate(codeSlotRef, codeLiterals, false); InPredicate notInPredicate = new InPredicate(codeSlotRef, codeLiterals, true); Assert.assertEquals("{\"terms\":{\"code\":[1,2,3]}}", queryConverter.convert(inPredicate).toString()); Assert.assertEquals("{\"bool\":{\"must_not\":{\"terms\":{\"code\":[1,2,3]}}}}", queryConverter.convert(notInPredicate).toString()); }
void portAddedHelper(DeviceEvent event) { log.debug("Instance port {} is detected from {}", event.port().annotations().value(PORT_NAME), event.subject().id()); // we check the existence of openstack port, in case VM creation // event comes before port creation if (osNetworkService.port(event.port()) != null) { processPortAdded(event.port()); } }
@Test public void testProcessPortAddedForUpdate() { org.onosproject.net.Port addedPort = new DefaultPort(DEV1, P1, true, ANNOTATIONS); DeviceEvent addedEvent = new DeviceEvent(DeviceEvent.Type.PORT_ADDED, DEV1, addedPort); target.portAddedHelper(addedEvent); //org.onosproject.net.Port updatedPort = new DefaultPort(DEV1, P2, true, ANNOTATIONS); //DeviceEvent updatedEvent = new DeviceEvent(DeviceEvent.Type.PORT_ADDED, DEV1, updatedPort); target.portAddedHelper(addedEvent); HostId hostId = HostId.hostId(HOST_MAC); HostDescription hostDesc = new DefaultHostDescription( HOST_MAC, VlanId.NONE, new HostLocation(CP11, System.currentTimeMillis()), ImmutableSet.of(HOST_IP11), ANNOTATIONS ); verifyHostResult(hostId, hostDesc); }
public String getSource() { return getFieldAs(String.class, FIELD_SOURCE); }
@Test public void testGetSource() throws Exception { assertEquals("bar", message.getSource()); }
public static boolean isEligibleForCarbonsDelivery(final Message stanza) { // To properly handle messages exchanged with a MUC (or similar service), the server must be able to identify MUC-related messages. // This can be accomplished by tracking the clients' presence in MUCs, or by checking for the <x xmlns="http://jabber.org/protocol/muc#user"> // element in messages. The following rules apply to MUC-related messages: if (stanza.getChildElement("x", "http://jabber.org/protocol/muc#user") != null) { // A <message/> containing a Direct MUC Invitations (XEP-0249) SHOULD be carbon-copied. if (containsChildElement(stanza, Set.of("x"), "jabber:x:conference")) { return true; } // A <message/> containing a Mediated Invitation SHOULD be carbon-copied. if (stanza.getChildElement("x", "http://jabber.org/protocol/muc#user") != null && stanza.getChildElement("x", "http://jabber.org/protocol/muc#user").element("invite") != null) { return true; } // A private <message/> from a local user to a MUC participant (sent to a full JID) SHOULD be carbon-copied // The server SHOULD limit carbon-copying to the clients sharing a Multi-Session Nick in that MUC, and MAY // inject the <x/> element into such carbon copies. Clients can not respond to carbon-copies of MUC-PMs // related to a MUC they are not joined to. Therefore, they SHOULD either ignore such carbon copies, or // provide a way for the user to join the MUC before answering. if (stanza.getTo() != null && stanza.getTo().getResource() != null && stanza.getFrom() != null && stanza.getFrom().getNode() != null && XMPPServer.getInstance().isLocal(stanza.getFrom())) { return true; // TODO The server SHOULD limit carbon-copying to the clients sharing a Multi-Session Nick in that MUC (OF-2780). } // A private <message/> from a MUC participant (received from a full JID) to a local user SHOULD NOT be // carbon-copied (these messages are already replicated by the MUC service to all joined client instances). if (stanza.getFrom() != null && stanza.getFrom().getResource() != null && stanza.getTo() != null && stanza.getTo().getNode() != null && XMPPServer.getInstance().isLocal(stanza.getTo())) { return false; } } // A <message/> of type "groupchat" SHOULD NOT be carbon-copied. if (stanza.getType() == Message.Type.groupchat) { return false; } // A <message/> is eligible for carbons delivery if it does not contain a <private/> child element... if (containsChildElement(stanza, Set.of("private", "received"), "urn:xmpp:carbons")) { return false; } // and if at least one of the following is true: // ... it is of type "chat". if (stanza.getType() == Message.Type.chat) { return true; } // ... it is of type "normal" and contains a <body> element. if ((stanza.getType() == null || stanza.getType() == Message.Type.normal) && stanza.getBody() != null) { return true; } // ... it contains payload elements typically used in IM if (containsChildElement(stanza, Set.of("request", "received"), "urn:xmpp:receipts") // Message Delivery Receipts (XEP-0184) || containsChildElement(stanza, Set.of("active", "inactive", "gone", "composing", "paused"), "http://jabber.org/protocol/chatstates") // Chat State Notifications (XEP-0085) || (containsChildElement(stanza, Set.of("markable", "received", "displayed", "acknowledged"), "urn:xmpp:chat-markers")) // Chat Markers (XEP-0333)). ) { return true; } // ... it is of type "error" and it was sent in response to a <message/> that was eligible for carbons delivery. // TODO implement me (OF-2779) return false; }
@Test public void testNormalWithBodyPrivate() throws Exception { // Setup test fixture. final Message input = new Message(); input.setType(Message.Type.normal); input.setBody("This message is part of unit test " + getClass()); input.getElement().addElement("private", "urn:xmpp:carbons:2"); // Execute system under test. final boolean result = Forwarded.isEligibleForCarbonsDelivery(input); // Verify results. assertFalse(result); }
public ZContext() { this(1); }
@Test(timeout = 5000) public void testZContext() { ZContext ctx = new ZContext(); ctx.createSocket(SocketType.PAIR); ctx.createSocket(SocketType.REQ); ctx.createSocket(SocketType.REP); ctx.createSocket(SocketType.PUB); ctx.createSocket(SocketType.SUB); ctx.close(); assertThat(ctx.getSockets().isEmpty(), is(true)); }
public String expand(final String remote) { return this.expand(remote, PREFIX); }
@Test public void testExpandPathWithDirectory() { final String expanded = new TildePathExpander(new Path("/home/jenkins", EnumSet.of(Path.Type.directory))) .expand("/~/f/s"); assertEquals("/home/jenkins/f/s", expanded); }
public static Future<Void> reconcileJmxSecret(Reconciliation reconciliation, SecretOperator secretOperator, SupportsJmx cluster) { return secretOperator.getAsync(reconciliation.namespace(), cluster.jmx().secretName()) .compose(currentJmxSecret -> { Secret desiredJmxSecret = cluster.jmx().jmxSecret(currentJmxSecret); if (desiredJmxSecret != null) { // Desired secret is not null => should be updated return secretOperator.reconcile(reconciliation, reconciliation.namespace(), cluster.jmx().secretName(), desiredJmxSecret) .map((Void) null); } else if (currentJmxSecret != null) { // Desired secret is null but current is not => we should delete the secret return secretOperator.reconcile(reconciliation, reconciliation.namespace(), cluster.jmx().secretName(), null) .map((Void) null); } else { // Both current and desired secret are null => nothing to do return Future.succeededFuture(); } }); }
@Test public void testEnabledJmxWithAuthWithMissingSecret(VertxTestContext context) { KafkaClusterSpec spec = new KafkaClusterSpecBuilder() .withNewJmxOptions() .withNewKafkaJmxAuthenticationPassword() .endKafkaJmxAuthenticationPassword() .endJmxOptions() .build(); JmxModel jmx = new JmxModel(NAMESPACE, NAME, LABELS, OWNER_REFERENCE, spec); SecretOperator mockSecretOps = mock(SecretOperator.class); when(mockSecretOps.getAsync(eq(NAMESPACE), eq(NAME))).thenReturn(Future.succeededFuture()); ArgumentCaptor<Secret> secretCaptor = ArgumentCaptor.forClass(Secret.class); when(mockSecretOps.reconcile(any(), any(), any(), secretCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(3)))); Checkpoint async = context.checkpoint(); ReconcilerUtils.reconcileJmxSecret(Reconciliation.DUMMY_RECONCILIATION, mockSecretOps, new MockJmxCluster(jmx)) .onComplete(context.succeeding(v -> context.verify(() -> { verify(mockSecretOps, times(1)).reconcile(eq(Reconciliation.DUMMY_RECONCILIATION), eq(NAMESPACE), eq(NAME), any()); Secret secret = secretCaptor.getValue(); assertThat(secret, is(notNullValue())); assertThat(secret.getMetadata().getName(), is(NAME)); assertThat(secret.getMetadata().getNamespace(), is(NAMESPACE)); assertThat(secret.getMetadata().getOwnerReferences(), is(List.of(OWNER_REFERENCE))); assertThat(secret.getMetadata().getLabels(), is(LABELS.toMap())); assertThat(secret.getMetadata().getAnnotations(), is(nullValue())); assertThat(secret.getData().size(), is(2)); assertThat(secret.getData().get("jmx-username"), is(notNullValue())); assertThat(secret.getData().get("jmx-password"), is(notNullValue())); async.flag(); }))); }
@Override public SinkRecord newRecord(String topic, Integer kafkaPartition, Schema keySchema, Object key, Schema valueSchema, Object value, Long timestamp, Iterable<Header> headers) { return new InternalSinkRecord(context, topic, kafkaPartition, keySchema, key, valueSchema, value, kafkaOffset(), timestamp, timestampType(), headers); }
@Test public void testNewRecordHeaders() { SinkRecord sinkRecord = new SinkRecord(TOPIC, 0, null, null, null, null, 10); ConsumerRecord<byte[], byte[]> consumerRecord = new ConsumerRecord<>("test-topic", 0, 10, null, null); ProcessingContext<ConsumerRecord<byte[], byte[]>> context = new ProcessingContext<>(consumerRecord); InternalSinkRecord internalSinkRecord = new InternalSinkRecord(context, sinkRecord); assertTrue(internalSinkRecord.headers().isEmpty()); assertTrue(sinkRecord.headers().isEmpty()); SinkRecord newRecord = internalSinkRecord.newRecord(TOPIC, 0, null, null, null, null, null, Collections.singletonList(mock(Header.class))); assertEquals(1, newRecord.headers().size()); }
@Override public boolean createReservation(ReservationId reservationId, String user, Plan plan, ReservationDefinition contract) throws PlanningException { LOG.info("placing the following ReservationRequest: " + contract); try { boolean res = planner.createReservation(reservationId, user, plan, contract); if (res) { LOG.info("OUTCOME: SUCCESS, Reservation ID: " + reservationId.toString() + ", Contract: " + contract.toString()); } else { LOG.info("OUTCOME: FAILURE, Reservation ID: " + reservationId.toString() + ", Contract: " + contract.toString()); } return res; } catch (PlanningException e) { LOG.info("OUTCOME: FAILURE, Reservation ID: " + reservationId.toString() + ", Contract: " + contract.toString()); throw e; } }
@Test public void testAny() throws PlanningException { prepareBasicPlan(); // create an ANY request, with an impossible step (last in list, first // considered), // and two satisfiable ones. We expect the second one to be returned. ReservationDefinition rr = new ReservationDefinitionPBImpl(); rr.setArrival(100 * step); rr.setDeadline(120 * step); rr.setRecurrenceExpression(recurrenceExpression); ReservationRequests reqs = new ReservationRequestsPBImpl(); reqs.setInterpreter(ReservationRequestInterpreter.R_ANY); ReservationRequest r = ReservationRequest.newInstance( Resource.newInstance(1024, 1), 5, 5, 10 * step); ReservationRequest r2 = ReservationRequest.newInstance( Resource.newInstance(2048, 2), 10, 5, 10 * step); ReservationRequest r3 = ReservationRequest.newInstance( Resource.newInstance(1024, 1), 110, 110, 10 * step); List<ReservationRequest> list = new ArrayList<ReservationRequest>(); list.add(r); list.add(r2); list.add(r3); reqs.setReservationResources(list); rr.setReservationRequests(reqs); // submit to agent ReservationId reservationID = ReservationSystemTestUtil .getNewReservationId(); boolean res = agent.createReservation(reservationID, "u1", plan, rr); // validate results, we expect the second one to be accepted assertTrue("Agent-based allocation failed", res); assertTrue("Agent-based allocation failed", plan.getAllReservations() .size() == 3); ReservationAllocation cs = plan.getReservationById(reservationID); if (allocateLeft) { assertTrue(cs.toString(), check(cs, 100 * step, 110 * step, 5, 1024, 1)); } else { assertTrue(cs.toString(), check(cs, 110 * step, 120 * step, 20, 1024, 1)); } System.out.println("--------AFTER ANY ALLOCATION (queue: " + reservationID + ")----------"); System.out.println(plan.toString()); System.out.println(plan.toCumulativeString()); }
private JobMetrics getJobMetrics() throws IOException { if (cachedMetricResults != null) { // Metric results have been cached after the job ran. return cachedMetricResults; } JobMetrics result = dataflowClient.getJobMetrics(dataflowPipelineJob.getJobId()); if (dataflowPipelineJob.getState().isTerminal()) { // Add current query result to the cache. cachedMetricResults = result; } return result; }
@Test public void testMultipleCounterUpdates() throws IOException { AppliedPTransform<?, ?, ?> myStep2 = mock(AppliedPTransform.class); when(myStep2.getFullName()).thenReturn("myStepName"); BiMap<AppliedPTransform<?, ?, ?>, String> transformStepNames = HashBiMap.create(); transformStepNames.put(myStep2, "s2"); AppliedPTransform<?, ?, ?> myStep3 = mock(AppliedPTransform.class); when(myStep3.getFullName()).thenReturn("myStepName3"); transformStepNames.put(myStep3, "s3"); AppliedPTransform<?, ?, ?> myStep4 = mock(AppliedPTransform.class); when(myStep4.getFullName()).thenReturn("myStepName4"); transformStepNames.put(myStep4, "s4"); JobMetrics jobMetrics = new JobMetrics(); DataflowClient dataflowClient = mock(DataflowClient.class); when(dataflowClient.getJobMetrics(JOB_ID)).thenReturn(jobMetrics); DataflowPipelineJob job = mock(DataflowPipelineJob.class); DataflowPipelineOptions options = mock(DataflowPipelineOptions.class); when(options.isStreaming()).thenReturn(false); when(job.getDataflowOptions()).thenReturn(options); when(job.getState()).thenReturn(State.RUNNING); when(job.getJobId()).thenReturn(JOB_ID); when(job.getTransformStepNames()).thenReturn(transformStepNames); // The parser relies on the fact that one tentative and one committed metric update exist in // the job metrics results. jobMetrics.setMetrics( ImmutableList.of( makeCounterMetricUpdate("counterName", "counterNamespace", "s2", 1233L, false), makeCounterMetricUpdate("counterName", "counterNamespace", "s2", 1234L, true), makeCounterMetricUpdate("otherCounter", "otherNamespace", "s3", 12L, false), makeCounterMetricUpdate("otherCounter", "otherNamespace", "s3", 12L, true), makeCounterMetricUpdate("counterName", "otherNamespace", "s4", 1200L, false), makeCounterMetricUpdate("counterName", "otherNamespace", "s4", 1233L, true), // The following counter can not have its name translated thus it won't appear. makeCounterMetricUpdate("lostName", "otherNamespace", "s5", 1200L, false), makeCounterMetricUpdate("lostName", "otherNamespace", "s5", 1200L, true))); DataflowMetrics dataflowMetrics = new DataflowMetrics(job, dataflowClient); MetricQueryResults result = dataflowMetrics.allMetrics(); assertThat( result.getCounters(), containsInAnyOrder( attemptedMetricsResult("counterNamespace", "counterName", "myStepName", 1233L), attemptedMetricsResult("otherNamespace", "otherCounter", "myStepName3", 12L), attemptedMetricsResult("otherNamespace", "counterName", "myStepName4", 1200L))); assertThat( result.getCounters(), containsInAnyOrder( committedMetricsResult("counterNamespace", "counterName", "myStepName", 1233L), committedMetricsResult("otherNamespace", "otherCounter", "myStepName3", 12L), committedMetricsResult("otherNamespace", "counterName", "myStepName4", 1200L))); }
@Override public PartitionQuickStats buildQuickStats(ConnectorSession session, SemiTransactionalHiveMetastore metastore, SchemaTableName table, MetastoreContext metastoreContext, String partitionId, Iterator<HiveFileInfo> files) { requireNonNull(session); requireNonNull(metastore); requireNonNull(table); requireNonNull(metastoreContext); requireNonNull(partitionId); requireNonNull(files); if (!files.hasNext()) { return PartitionQuickStats.EMPTY; } // TODO: Consider refactoring storage and/or table format to the interface when we implement an ORC/Iceberg quick stats builder StorageFormat storageFormat; if (UNPARTITIONED_ID.getPartitionName().equals(partitionId)) { Table resolvedTable = metastore.getTable(metastoreContext, table.getSchemaName(), table.getTableName()).get(); storageFormat = resolvedTable.getStorage().getStorageFormat(); } else { Partition partition = metastore.getPartitionsByNames(metastoreContext, table.getSchemaName(), table.getTableName(), ImmutableList.of(new PartitionNameWithVersion(partitionId, Optional.empty()))).get(partitionId).get(); storageFormat = partition.getStorage().getStorageFormat(); } if (!PARQUET_SERDE_CLASS_NAMES.contains(storageFormat.getSerDe())) { // Not a parquet table/partition return PartitionQuickStats.EMPTY; } // We want to keep the number of files we use to build quick stats bounded, so that // 1. We can control total file IO overhead in a measurable way // 2. Planning time remains bounded // Future work here is to sample the file list, read their stats only and extrapolate the overall stats (TODO) List<CompletableFuture<ParquetMetadata>> footerFetchCompletableFutures = new ArrayList<>(); int filesCount = 0; while (files.hasNext()) { HiveFileInfo file = files.next(); filesCount++; Path path = file.getPath(); long fileSize = file.getLength(); HiveFileContext hiveFileContext = new HiveFileContext( true, NO_CACHE_CONSTRAINTS, Optional.empty(), OptionalLong.of(fileSize), OptionalLong.empty(), OptionalLong.empty(), file.getFileModifiedTime(), false); HdfsContext hdfsContext = new HdfsContext(session, table.getSchemaName(), table.getTableName()); Configuration configuration = hdfsEnvironment.getConfiguration(hdfsContext, path); footerFetchCompletableFutures.add(supplyAsync(() -> { Stopwatch footerFetchDuration = Stopwatch.createStarted(); try (FSDataInputStream inputStream = hdfsEnvironment.getFileSystem(hdfsContext, path).openFile(path, hiveFileContext); ParquetDataSource parquetDataSource = buildHdfsParquetDataSource(inputStream, path, stats)) { ParquetFileMetadata parquetFileMetadata = readFooter(parquetDataSource, fileSize, createDecryptor(configuration, path), getReadNullMaskedParquetEncryptedValue(session)); footerByteSizeDistribution.add(parquetFileMetadata.getMetadataSize()); return parquetFileMetadata.getParquetMetadata(); } catch (Exception e) { log.error(e); throw new RuntimeException(e); } finally { this.footerFetchDuration.add(footerFetchDuration.elapsed(MILLISECONDS), MILLISECONDS); } }, footerFetchExecutor)); } // Record a metric about how many files were seen session.getRuntimeStats().addMetricValue(String.format("ParquetQuickStatsBuilder/FileCount/%s/%s", table.getTableName(), partitionId), RuntimeUnit.NONE, filesCount); fileCountPerPartition.add(filesCount); HashMap<ColumnPath, ColumnQuickStats<?>> rolledUpColStats = new HashMap<>(); try { // Wait for footer reads to finish CompletableFuture<Void> overallCompletableFuture = CompletableFuture.allOf(footerFetchCompletableFutures.toArray(new CompletableFuture[0])); overallCompletableFuture.get(footerFetchTimeoutMillis, MILLISECONDS); for (CompletableFuture<ParquetMetadata> future : footerFetchCompletableFutures) { ParquetMetadata parquetMetadata = future.get(); processColumnMetadata(parquetMetadata, rolledUpColStats); } } catch (InterruptedException | ExecutionException | TimeoutException e) { log.error(e, "Failed to read/build stats from parquet footer"); throw new RuntimeException(e); } if (rolledUpColStats.isEmpty()) { return PartitionQuickStats.EMPTY; } return new PartitionQuickStats(partitionId, rolledUpColStats.values(), filesCount); }
@Test public void testStatsAreBuiltFromFooters() { String resourceDir = TestParquetQuickStatsBuilder.class.getClassLoader().getResource("quick_stats").toString(); // Table : TPCDS SF 0.01 store_sales ImmutableList<HiveFileInfo> hiveFileInfos = buildHiveFileInfos(resourceDir, "tpcds_store_sales_sf_point_01", 1); PartitionQuickStats partitionQuickStats = parquetQuickStatsBuilder.buildQuickStats(SESSION, metastore, new SchemaTableName(TEST_SCHEMA, TEST_TABLE), metastoreContext, UNPARTITIONED_ID.getPartitionName(), hiveFileInfos.iterator()); assertEquals(8, partitionQuickStats.getFileCount()); // We check a few of the columns Map<String, ? extends ColumnQuickStats<?>> columnQuickStatsMap = partitionQuickStats.getStats().stream().collect(toMap(ColumnQuickStats::getColumnName, v -> v)); assertEquals(columnQuickStatsMap.get("ss_promo_sk"), createLongStats("ss_promo_sk", 120527L, 5303L, 1L, 3L)); assertEquals(columnQuickStatsMap.get("ss_sold_date_sk"), createLongStats("ss_sold_date_sk", 120527L, 5335L, 2450816L, 2452642L)); assertEquals(columnQuickStatsMap.get("ss_quantity"), createIntegerStats("ss_quantity", 120527L, 5450L, 1, 100)); // DECIMAL columns are stored as binary arrays in parquet assertEquals(columnQuickStatsMap.get("ss_wholesale_cost"), createBinaryStats("ss_wholesale_cost", 120527L, 5369L)); // Table : TPCH orders table; 100 rows hiveFileInfos = buildHiveFileInfos(resourceDir, "tpch_orders_100_rows", 1); partitionQuickStats = parquetQuickStatsBuilder.buildQuickStats(SESSION, metastore, new SchemaTableName(TEST_SCHEMA, TEST_TABLE), metastoreContext, UNPARTITIONED_ID.getPartitionName(), hiveFileInfos.iterator()); assertEquals(1, partitionQuickStats.getFileCount()); columnQuickStatsMap = partitionQuickStats.getStats().stream().collect(toMap(ColumnQuickStats::getColumnName, v -> v)); // VARCHAR columns are stored as binary arrays in parquet assertEquals(columnQuickStatsMap.get("comment"), createBinaryStats("comment", 100L, 0L)); assertEquals(columnQuickStatsMap.get("orderdate"), createDateStats("orderdate", 100L, 0L, parse("1992-01-29"), parse("1998-07-24"))); assertEquals(columnQuickStatsMap.get("totalprice"), createDoubleStats("totalprice", 100L, 0L, 1373.4, 352797.28)); }
public String getLockMessage() throws KettleException { return repObj.getLockMessage(); }
@Test public void testGetLockMessage() throws Exception { when( mockEERepositoryObject.getLockMessage() ).thenReturn( LOCK_MESSAGE ); assertEquals( LOCK_MESSAGE, uiTransformation.getLockMessage() ); }
@Override public void run() { try { // make sure we call afterRun() even on crashes // and operate countdown latches, else we may hang the parallel runner if (steps == null) { beforeRun(); } if (skipped) { return; } int count = steps.size(); int index = 0; while ((index = nextStepIndex()) < count) { currentStep = steps.get(index); execute(currentStep); if (currentStepResult != null) { // can be null if debug step-back or hook skip result.addStepResult(currentStepResult); } } } catch (Exception e) { if (currentStepResult != null) { result.addStepResult(currentStepResult); } logError("scenario [run] failed\n" + StringUtils.throwableToString(e)); currentStepResult = result.addFakeStepResult("scenario [run] failed", e); } finally { if (!skipped) { afterRun(); if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) { featureRuntime.suite.abort(); } } if (caller.isNone()) { logAppender.close(); // reclaim memory } } }
@Test void testCallSingleThatReturnsJson() { run( "def res = karate.callSingle('called3.js')" ); matchVar("res", "{ varA: '2', varB: '3' }"); }
@PostMapping("/authorize") @Operation(summary = "申请授权", description = "适合 code 授权码模式,或者 implicit 简化模式;在 sso.vue 单点登录界面被【提交】调用") @Parameters({ @Parameter(name = "response_type", required = true, description = "响应类型", example = "code"), @Parameter(name = "client_id", required = true, description = "客户端编号", example = "tudou"), @Parameter(name = "scope", description = "授权范围", example = "userinfo.read"), // 使用 Map<String, Boolean> 格式,Spring MVC 暂时不支持这么接收参数 @Parameter(name = "redirect_uri", required = true, description = "重定向 URI", example = "https://www.iocoder.cn"), @Parameter(name = "auto_approve", required = true, description = "用户是否接受", example = "true"), @Parameter(name = "state", example = "1") }) public CommonResult<String> approveOrDeny(@RequestParam("response_type") String responseType, @RequestParam("client_id") String clientId, @RequestParam(value = "scope", required = false) String scope, @RequestParam("redirect_uri") String redirectUri, @RequestParam(value = "auto_approve") Boolean autoApprove, @RequestParam(value = "state", required = false) String state) { @SuppressWarnings("unchecked") Map<String, Boolean> scopes = JsonUtils.parseObject(scope, Map.class); scopes = ObjectUtil.defaultIfNull(scopes, Collections.emptyMap()); // 0. 校验用户已经登录。通过 Spring Security 实现 // 1.1 校验 responseType 是否满足 code 或者 token 值 OAuth2GrantTypeEnum grantTypeEnum = getGrantTypeEnum(responseType); // 1.2 校验 redirectUri 重定向域名是否合法 + 校验 scope 是否在 Client 授权范围内 OAuth2ClientDO client = oauth2ClientService.validOAuthClientFromCache(clientId, null, grantTypeEnum.getGrantType(), scopes.keySet(), redirectUri); // 2.1 假设 approved 为 null,说明是场景一 if (Boolean.TRUE.equals(autoApprove)) { // 如果无法自动授权通过,则返回空 url,前端不进行跳转 if (!oauth2ApproveService.checkForPreApproval(getLoginUserId(), getUserType(), clientId, scopes.keySet())) { return success(null); } } else { // 2.2 假设 approved 非 null,说明是场景二 // 如果计算后不通过,则跳转一个错误链接 if (!oauth2ApproveService.updateAfterApproval(getLoginUserId(), getUserType(), clientId, scopes)) { return success(OAuth2Utils.buildUnsuccessfulRedirect(redirectUri, responseType, state, "access_denied", "User denied access")); } } // 3.1 如果是 code 授权码模式,则发放 code 授权码,并重定向 List<String> approveScopes = convertList(scopes.entrySet(), Map.Entry::getKey, Map.Entry::getValue); if (grantTypeEnum == OAuth2GrantTypeEnum.AUTHORIZATION_CODE) { return success(getAuthorizationCodeRedirect(getLoginUserId(), client, approveScopes, redirectUri, state)); } // 3.2 如果是 token 则是 implicit 简化模式,则发送 accessToken 访问令牌,并重定向 return success(getImplicitGrantRedirect(getLoginUserId(), client, approveScopes, redirectUri, state)); }
@Test // autoApprove = false,但是不通过 public void testApproveOrDeny_ApproveNo() { // 准备参数 String responseType = "token"; String clientId = randomString(); String scope = "{\"read\": true, \"write\": false}"; String redirectUri = "https://www.iocoder.cn"; String state = "test"; // mock 方法 OAuth2ClientDO client = randomPojo(OAuth2ClientDO.class); when(oauth2ClientService.validOAuthClientFromCache(eq(clientId), isNull(), eq("implicit"), eq(asSet("read", "write")), eq(redirectUri))).thenReturn(client); // 调用 CommonResult<String> result = oauth2OpenController.approveOrDeny(responseType, clientId, scope, redirectUri, false, state); // 断言 assertEquals(0, result.getCode()); assertEquals("https://www.iocoder.cn#error=access_denied&error_description=User%20denied%20access&state=test", result.getData()); }
@Override public List<GrokPattern> saveAll(Collection<GrokPattern> patterns, ImportStrategy importStrategy) throws ValidationException { final Map<String, GrokPattern> newPatternsByName; try { newPatternsByName = patterns.stream().collect(Collectors.toMap(GrokPattern::name, Function.identity())); } catch (IllegalStateException e) { throw new ValidationException("The supplied Grok patterns contain conflicting names: " + e.getLocalizedMessage()); } final Map<String, GrokPattern> existingPatternsByName = loadAll().stream().collect(Collectors.toMap(GrokPattern::name, Function.identity())); if (importStrategy == ABORT_ON_CONFLICT) { final Sets.SetView<String> conflictingNames = Sets.intersection(newPatternsByName.keySet(), existingPatternsByName.keySet()); if (!conflictingNames.isEmpty()) { final Iterable<String> limited = Iterables.limit(conflictingNames, MAX_DISPLAYED_CONFLICTS); throw new ValidationException("The following Grok patterns already exist: " + StringUtils.join(limited, ", ") + (conflictingNames.size() > MAX_DISPLAYED_CONFLICTS ? " (+ " + (conflictingNames.size() - MAX_DISPLAYED_CONFLICTS) + " more)" : "") + "."); } } validateAllOrThrow(patterns, importStrategy); final List<GrokPattern> savedPatterns = patterns.stream() .map(newPattern -> { final GrokPattern existingPattern = existingPatternsByName.get(newPattern.name()); if (existingPattern != null) { return newPattern.toBuilder().id(existingPattern.id()).build(); } else { return newPattern; } }) .map(dbCollection::save) .map(WriteResult::getSavedObject).collect(Collectors.toList()); clusterBus.post(GrokPatternsUpdatedEvent.create(newPatternsByName.keySet())); return savedPatterns; }
@Test public void saveAllSucceedsWithValidGrokPatterns() throws ValidationException { final List<GrokPattern> grokPatterns = Arrays.asList( GrokPattern.create("NUMBER", "[0-9]+"), GrokPattern.create("INT", "[+-]?%{NUMBER}")); service.saveAll(grokPatterns, ABORT_ON_CONFLICT); verify(clusterEventBus, times(1)).post(any(GrokPatternsUpdatedEvent.class)); assertThat(collection.countDocuments()).isEqualTo(2L); }
@VisibleForTesting public SmsChannelDO validateSmsChannel(Long channelId) { SmsChannelDO channelDO = smsChannelService.getSmsChannel(channelId); if (channelDO == null) { throw exception(SMS_CHANNEL_NOT_EXISTS); } if (CommonStatusEnum.isDisable(channelDO.getStatus())) { throw exception(SMS_CHANNEL_DISABLE); } return channelDO; }
@Test public void testValidateSmsChannel_notExists() { // 准备参数 Long channelId = randomLongId(); // 调用,校验异常 assertServiceException(() -> smsTemplateService.validateSmsChannel(channelId), SMS_CHANNEL_NOT_EXISTS); }
public static Counter counter(MonitoringInfoMetricName metricName) { return new DelegatingCounter(metricName); }
@Test public void testOperationsUpdateCounterFromContainerWhenContainerIsPresent() { HashMap<String, String> labels = new HashMap<String, String>(); String urn = MonitoringInfoConstants.Urns.ELEMENT_COUNT; MonitoringInfoMetricName name = MonitoringInfoMetricName.named(urn, labels); MetricsContainer mockContainer = Mockito.mock(MetricsContainer.class); Counter mockCounter = Mockito.mock(Counter.class); when(mockContainer.getCounter(name)).thenReturn(mockCounter); Counter counter = LabeledMetrics.counter(name); MetricsEnvironment.setCurrentContainer(mockContainer); counter.inc(); verify(mockCounter).inc(1); counter.inc(47L); verify(mockCounter).inc(47); counter.dec(5L); verify(mockCounter).inc(-5); }
@Override public <K, V> ICache<K, V> getCache(String name) { checkNotNull(name, "Retrieving a cache instance with a null name is not allowed!"); return getCacheByFullName(HazelcastCacheManager.CACHE_MANAGER_PREFIX + name); }
@Test public void getCache_when_hazelcastExceptionIsThrown_then_isRethrown() { // when a HazelcastException occurs whose cause is not a ServiceNotFoundException HazelcastInstance hzInstance = mock(HazelcastInstance.class); when(hzInstance.getDistributedObject(anyString(), anyString())).thenThrow(new HazelcastException("mock exception")); ClientICacheManager clientCacheManager = new ClientICacheManager(hzInstance); // then the exception is rethrown assertThrows(HazelcastException.class, () -> clientCacheManager.getCache("any-cache")); }
public JetSqlRow project(Object key, Object value) { return project(key, null, value, null); }
@Test public void test_project() { KvRowProjector projector = new KvRowProjector( new QueryPath[]{QueryPath.KEY_PATH, QueryPath.VALUE_PATH}, new QueryDataType[]{INT, INT}, new IdentityTarget(), new IdentityTarget(), null, asList( MultiplyFunction.create(ColumnExpression.create(0, INT), ConstantExpression.create(2, INT), INT), DivideFunction.create(ColumnExpression.create(1, INT), ConstantExpression.create(2, INT), INT) ), mock(ExpressionEvalContext.class) ); JetSqlRow row = projector.project(1, 8); assertThat(row.getValues()).isEqualTo(new Object[]{2, 4}); }
public GeometricDistribution(double p) { if (p <= 0 || p > 1) { throw new IllegalArgumentException("Invalid p: " + p); } this.p = p; }
@Test public void testGeometricDistribution() { System.out.println("GeometricDistribution"); MathEx.setSeed(19650218); // to get repeatable results. GeometricDistribution instance = new GeometricDistribution(0.4); int[] data = instance.randi(1000); GeometricDistribution est = GeometricDistribution.fit(data); assertEquals(0.4, est.p, 1E-2); }
public void decode(ByteBuf buffer) { boolean last; int statusCode; while (true) { switch(state) { case READ_COMMON_HEADER: if (buffer.readableBytes() < SPDY_HEADER_SIZE) { return; } int frameOffset = buffer.readerIndex(); int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET; int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET; buffer.skipBytes(SPDY_HEADER_SIZE); boolean control = (buffer.getByte(frameOffset) & 0x80) != 0; int version; int type; if (control) { // Decode control frame common header version = getUnsignedShort(buffer, frameOffset) & 0x7FFF; type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET); streamId = 0; // Default to session Stream-ID } else { // Decode data frame common header version = spdyVersion; // Default to expected version type = SPDY_DATA_FRAME; streamId = getUnsignedInt(buffer, frameOffset); } flags = buffer.getByte(flagsOffset); length = getUnsignedMedium(buffer, lengthOffset); // Check version first then validity if (version != spdyVersion) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SPDY Version"); } else if (!isValidFrameHeader(streamId, type, flags, length)) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid Frame Error"); } else { state = getNextState(type, length); } break; case READ_DATA_FRAME: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0)); break; } // Generate data frames that do not exceed maxChunkSize int dataLength = Math.min(maxChunkSize, length); // Wait until entire frame is readable if (buffer.readableBytes() < dataLength) { return; } ByteBuf data = buffer.alloc().buffer(dataLength); data.writeBytes(buffer, dataLength); length -= dataLength; if (length == 0) { state = State.READ_COMMON_HEADER; } last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN); delegate.readDataFrame(streamId, last, data); break; case READ_SYN_STREAM_FRAME: if (buffer.readableBytes() < 10) { return; } int offset = buffer.readerIndex(); streamId = getUnsignedInt(buffer, offset); int associatedToStreamId = getUnsignedInt(buffer, offset + 4); byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07); last = hasFlag(flags, SPDY_FLAG_FIN); boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL); buffer.skipBytes(10); length -= 10; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_STREAM Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional); } break; case READ_SYN_REPLY_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_REPLY Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynReplyFrame(streamId, last); } break; case READ_RST_STREAM_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (streamId == 0 || statusCode == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid RST_STREAM Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readRstStreamFrame(streamId, statusCode); } break; case READ_SETTINGS_FRAME: if (buffer.readableBytes() < 4) { return; } boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR); numSettings = getUnsignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); length -= 4; // Validate frame length against number of entries. Each ID/Value entry is 8 bytes. if ((length & 0x07) != 0 || length >> 3 != numSettings) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SETTINGS Frame"); } else { state = State.READ_SETTING; delegate.readSettingsFrame(clear); } break; case READ_SETTING: if (numSettings == 0) { state = State.READ_COMMON_HEADER; delegate.readSettingsEnd(); break; } if (buffer.readableBytes() < 8) { return; } byte settingsFlags = buffer.getByte(buffer.readerIndex()); int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1); int value = getSignedInt(buffer, buffer.readerIndex() + 4); boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE); boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED); buffer.skipBytes(8); --numSettings; delegate.readSetting(id, value, persistValue, persisted); break; case READ_PING_FRAME: if (buffer.readableBytes() < 4) { return; } int pingId = getSignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); state = State.READ_COMMON_HEADER; delegate.readPingFrame(pingId); break; case READ_GOAWAY_FRAME: if (buffer.readableBytes() < 8) { return; } int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); state = State.READ_COMMON_HEADER; delegate.readGoAwayFrame(lastGoodStreamId, statusCode); break; case READ_HEADERS_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid HEADERS Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readHeadersFrame(streamId, last); } break; case READ_WINDOW_UPDATE_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (deltaWindowSize == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid WINDOW_UPDATE Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readWindowUpdateFrame(streamId, deltaWindowSize); } break; case READ_HEADER_BLOCK: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readHeaderBlockEnd(); break; } if (!buffer.isReadable()) { return; } int compressedBytes = Math.min(buffer.readableBytes(), length); ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes); headerBlock.writeBytes(buffer, compressedBytes); length -= compressedBytes; delegate.readHeaderBlock(headerBlock); break; case DISCARD_FRAME: int numBytes = Math.min(buffer.readableBytes(), length); buffer.skipBytes(numBytes); length -= numBytes; if (length == 0) { state = State.READ_COMMON_HEADER; break; } return; case FRAME_ERROR: buffer.skipBytes(buffer.readableBytes()); return; default: throw new Error("Shouldn't reach here."); } } }
@Test public void testIllegalSpdyRstStreamFrameStreamId() throws Exception { short type = 3; byte flags = 0; int length = 8; int streamId = 0; // invalid stream identifier int statusCode = RANDOM.nextInt() | 0x01; ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); encodeControlFrameHeader(buf, type, flags, length); buf.writeInt(streamId); buf.writeInt(statusCode); decoder.decode(buf); verify(delegate).readFrameError(anyString()); assertFalse(buf.isReadable()); buf.release(); }
public List<SchemaChangeEvent> applySchemaChange(SchemaChangeEvent schemaChangeEvent) { List<SchemaChangeEvent> events = new ArrayList<>(); TableId originalTable = schemaChangeEvent.tableId(); boolean noRouteMatched = true; for (Tuple3<Selectors, String, String> route : routes) { // Check routing table if (!route.f0.isMatch(originalTable)) { continue; } noRouteMatched = false; // Matched a routing rule TableId derivedTable = resolveReplacement(originalTable, route); Set<TableId> originalTables = derivationMapping.computeIfAbsent(derivedTable, t -> new HashSet<>()); originalTables.add(originalTable); if (originalTables.size() == 1) { // single source mapping, replace the table ID directly SchemaChangeEvent derivedSchemaChangeEvent = ChangeEventUtils.recreateSchemaChangeEvent(schemaChangeEvent, derivedTable); events.add(derivedSchemaChangeEvent); } else { // multiple source mapping (merging tables) Schema derivedTableSchema = schemaManager.getLatestEvolvedSchema(derivedTable).get(); events.addAll( Objects.requireNonNull( SchemaChangeEventVisitor.visit( schemaChangeEvent, addColumnEvent -> handleAddColumnEvent( addColumnEvent, derivedTableSchema, derivedTable), alterColumnTypeEvent -> handleAlterColumnTypeEvent( alterColumnTypeEvent, derivedTableSchema, derivedTable), createTableEvent -> handleCreateTableEvent( createTableEvent, derivedTableSchema, derivedTable), dropColumnEvent -> Collections.emptyList(), // Column drop shouldn't be // spread to route // destination. dropTableEvent -> Collections.emptyList(), // Table drop shouldn't be // spread to route // destination. renameColumnEvent -> handleRenameColumnEvent( renameColumnEvent, derivedTableSchema, derivedTable), truncateTableEvent -> Collections.emptyList() // // Table truncation // shouldn't be spread to route // destination. ))); } } if (noRouteMatched) { // No routes are matched, leave it as-is return Collections.singletonList(schemaChangeEvent); } else { return events; } }
@Test void testOneToOneMapping() { SchemaDerivation schemaDerivation = new SchemaDerivation(new SchemaManager(), ROUTES, new HashMap<>()); // Create table List<SchemaChangeEvent> derivedChangesAfterCreateTable = schemaDerivation.applySchemaChange(new CreateTableEvent(TABLE_1, SCHEMA)); assertThat(derivedChangesAfterCreateTable).hasSize(1); assertThat(derivedChangesAfterCreateTable.get(0)) .asCreateTableEvent() .hasTableId(MERGED_TABLE) .hasSchema(SCHEMA); // Add column AddColumnEvent.ColumnWithPosition newCol1 = new AddColumnEvent.ColumnWithPosition( new PhysicalColumn("new_col1", DataTypes.STRING(), null)); AddColumnEvent.ColumnWithPosition newCol2 = new AddColumnEvent.ColumnWithPosition( new PhysicalColumn("new_col2", DataTypes.STRING(), null)); List<AddColumnEvent.ColumnWithPosition> newColumns = Arrays.asList(newCol1, newCol2); List<SchemaChangeEvent> derivedChangesAfterAddColumn = schemaDerivation.applySchemaChange(new AddColumnEvent(TABLE_1, newColumns)); assertThat(derivedChangesAfterAddColumn).hasSize(1); assertThat(derivedChangesAfterAddColumn.get(0)) .asAddColumnEvent() .hasTableId(MERGED_TABLE) .containsAddedColumns(newCol1, newCol2); // Alter column type ImmutableMap<String, DataType> typeMapping = ImmutableMap.of("age", DataTypes.BIGINT()); List<SchemaChangeEvent> derivedChangesAfterAlterTableType = schemaDerivation.applySchemaChange(new AlterColumnTypeEvent(TABLE_1, typeMapping)); assertThat(derivedChangesAfterAlterTableType).hasSize(1); assertThat(derivedChangesAfterAlterTableType.get(0)) .asAlterColumnTypeEvent() .hasTableId(MERGED_TABLE) .containsTypeMapping(typeMapping); // Drop column List<String> droppedColumns = Arrays.asList("new_col1", "new_col2"); List<SchemaChangeEvent> derivedChangesAfterDropColumn = schemaDerivation.applySchemaChange(new DropColumnEvent(TABLE_1, droppedColumns)); assertThat(derivedChangesAfterDropColumn).hasSize(1); assertThat(derivedChangesAfterDropColumn.get(0)) .asDropColumnEvent() .hasTableId(MERGED_TABLE) .containsDroppedColumns("new_col1", "new_col2"); // Rename column Map<String, String> renamedColumns = ImmutableMap.of("name", "last_name"); List<SchemaChangeEvent> derivedChangesAfterRenameColumn = schemaDerivation.applySchemaChange(new RenameColumnEvent(TABLE_1, renamedColumns)); assertThat(derivedChangesAfterRenameColumn).hasSize(1); assertThat(derivedChangesAfterRenameColumn.get(0)) .asRenameColumnEvent() .hasTableId(MERGED_TABLE) .containsNameMapping(renamedColumns); }
@Override public void checkCanSetCatalogSessionProperty(ConnectorTransactionHandle transactionHandle, ConnectorIdentity identity, AccessControlContext context, String propertyName) { if (!canSetSessionProperty(identity, propertyName)) { denySetSessionProperty(propertyName); } }
@Test public void testSessionPropertyRules() throws IOException { ConnectorAccessControl accessControl = createAccessControl("session_property.json"); accessControl.checkCanSetCatalogSessionProperty(TRANSACTION_HANDLE, user("admin"), CONTEXT, "dangerous"); accessControl.checkCanSetCatalogSessionProperty(TRANSACTION_HANDLE, user("alice"), CONTEXT, "safe"); accessControl.checkCanSetCatalogSessionProperty(TRANSACTION_HANDLE, user("alice"), CONTEXT, "unsafe"); accessControl.checkCanSetCatalogSessionProperty(TRANSACTION_HANDLE, user("bob"), CONTEXT, "safe"); assertDenied(() -> accessControl.checkCanSetCatalogSessionProperty(TRANSACTION_HANDLE, user("bob"), CONTEXT, "unsafe")); assertDenied(() -> accessControl.checkCanSetCatalogSessionProperty(TRANSACTION_HANDLE, user("alice"), CONTEXT, "dangerous")); assertDenied(() -> accessControl.checkCanSetCatalogSessionProperty(TRANSACTION_HANDLE, user("charlie"), CONTEXT, "safe")); }
public static KTableHolder<GenericKey> build( final KGroupedStreamHolder groupedStream, final StreamAggregate aggregate, final RuntimeBuildContext buildContext, final MaterializedFactory materializedFactory) { return build( groupedStream, aggregate, buildContext, materializedFactory, new AggregateParamsFactory() ); }
@Test public void shouldBuildUnwindowedAggregateWithCorrectSchema() { // Given: givenUnwindowedAggregate(); // When: final KTableHolder<GenericKey> result = aggregate.build(planBuilder, planInfo); // Then: assertThat(result.getSchema(), is(OUTPUT_SCHEMA)); }
public int compare(boolean b1, boolean b2) { throw new UnsupportedOperationException( "compare(boolean, boolean) was called on a non-boolean comparator: " + toString()); }
@Test public void testBooleanComparator() { Boolean[] valuesInAscendingOrder = {null, false, true}; for (int i = 0; i < valuesInAscendingOrder.length; ++i) { for (int j = 0; j < valuesInAscendingOrder.length; ++j) { Boolean vi = valuesInAscendingOrder[i]; Boolean vj = valuesInAscendingOrder[j]; int exp = i - j; assertSignumEquals(vi, vj, exp, BOOLEAN_COMPARATOR.compare(vi, vj)); if (vi != null && vj != null) { assertSignumEquals(vi, vj, exp, BOOLEAN_COMPARATOR.compare(vi.booleanValue(), vj.booleanValue())); } } } checkThrowingUnsupportedException(BOOLEAN_COMPARATOR, Boolean.TYPE); }
@Override public void identify(String distinctId) { }
@Test public void identify() { mSensorsAPI.identify("abcde"); Assert.assertNull(mSensorsAPI.getAnonymousId()); }
public void setRetryContext(int maxRetries, int retryInterval, long failuresValidityInterval) { ContainerRetryContext retryContext = ContainerRetryContext .newInstance(ContainerRetryPolicy.RETRY_ON_ALL_ERRORS, null, maxRetries, retryInterval, failuresValidityInterval); containerLaunchContext.setContainerRetryContext(retryContext); }
@Test public void testContainerRetries() throws Exception { DefaultProviderService providerService = new DefaultProviderService(); AbstractLauncher mockLauncher = mock(AbstractLauncher.class); ContainerLaunchService.ComponentLaunchContext componentLaunchContext = mock(ContainerLaunchService.ComponentLaunchContext.class); ComponentInstance componentInstance = mock(ComponentInstance.class); //Never Restart Policy Component component = mock(Component.class); when(componentInstance.getComponent()).thenReturn(component); when(component.getRestartPolicyHandler()).thenReturn(NeverRestartPolicy .getInstance()); providerService.buildContainerRetry(mockLauncher, getConfig(), componentLaunchContext, componentInstance); verifyZeroInteractions(mockLauncher); //OnFailure restart policy when(component.getRestartPolicyHandler()).thenReturn(OnFailureRestartPolicy .getInstance()); when(componentLaunchContext.getConfiguration()).thenReturn(new Configuration()); providerService.buildContainerRetry(mockLauncher, getConfig(), componentLaunchContext, componentInstance); verify(mockLauncher).setRetryContext(DEFAULT_CONTAINER_RETRY_MAX, DEFAULT_CONTAINER_RETRY_INTERVAL, DEFAULT_CONTAINER_FAILURES_VALIDITY_INTERVAL); reset(mockLauncher); //Always restart policy when(component.getRestartPolicyHandler()).thenReturn(AlwaysRestartPolicy .getInstance()); providerService.buildContainerRetry(mockLauncher, getConfig(), componentLaunchContext, componentInstance); verify(mockLauncher).setRetryContext(DEFAULT_CONTAINER_RETRY_MAX, DEFAULT_CONTAINER_RETRY_INTERVAL, DEFAULT_CONTAINER_FAILURES_VALIDITY_INTERVAL); }
public static <NodeT, EdgeT> List<List<NodeT>> allPathsFromRootsToLeaves( Network<NodeT, EdgeT> network) { ArrayDeque<List<NodeT>> paths = new ArrayDeque<>(); // Populate the list with all roots for (NodeT node : network.nodes()) { if (network.inDegree(node) == 0) { paths.add(ImmutableList.of(node)); } } List<List<NodeT>> distinctPathsFromRootsToLeaves = new ArrayList<>(); while (!paths.isEmpty()) { List<NodeT> path = paths.removeFirst(); NodeT lastNode = path.get(path.size() - 1); if (network.outDegree(lastNode) == 0) { distinctPathsFromRootsToLeaves.add(new ArrayList<>(path)); } else { for (EdgeT edge : network.outEdges(lastNode)) { paths.addFirst( ImmutableList.<NodeT>builder() .addAll(path) .add(network.incidentNodes(edge).target()) .build()); } } } return distinctPathsFromRootsToLeaves; }
@Test public void testAllPathsFromRootsToLeaves() { // Expected paths: // D // A, B, C, F // A, B, E, G // A, B, E, G (again) // A, B, E, H // I, J, E, G // I, J, E, G (again) // I, J, E, H // I, E, G // I, E, G (again) // I, E, H // I, K, L // M, N, L // M, N, L (again) // O List<List<String>> expectedPaths = ImmutableList.of( ImmutableList.of("D"), ImmutableList.of("A", "B", "C", "F"), ImmutableList.of("A", "B", "E", "G"), ImmutableList.of("A", "B", "E", "G"), ImmutableList.of("A", "B", "E", "H"), ImmutableList.of("I", "J", "E", "G"), ImmutableList.of("I", "J", "E", "G"), ImmutableList.of("I", "J", "E", "H"), ImmutableList.of("I", "E", "G"), ImmutableList.of("I", "E", "G"), ImmutableList.of("I", "E", "H"), ImmutableList.of("I", "K", "L"), ImmutableList.of("M", "N", "L"), ImmutableList.of("M", "N", "L"), ImmutableList.of("O")); MutableNetwork<String, String> network = createNetwork(); List<List<String>> actualPaths = Networks.allPathsFromRootsToLeaves(network); assertThat(actualPaths, containsInAnyOrder(expectedPaths.toArray())); assertEquals(actualPaths.size(), expectedPaths.size()); }
public double getX01FromLongitude(double longitude, boolean wrapEnabled) { longitude = wrapEnabled ? Clip(longitude, getMinLongitude(), getMaxLongitude()) : longitude; final double result = getX01FromLongitude(longitude); return wrapEnabled ? Clip(result, 0, 1) : result; }
@Test public void testGetX01FromLongitude() { final int iterations = 10; for (int i = 0; i <= iterations; i++) { final double longitude = tileSystem.getMinLongitude() + i * (tileSystem.getMaxLongitude() - tileSystem.getMinLongitude()) / iterations; checkXY01(((double) i) / iterations, tileSystem.getX01FromLongitude(longitude, true)); } }
public void schedule(ExecutableMethod<?, ?> method) { if (hasParametersOutsideOfJobContext(method.getTargetMethod())) { throw new IllegalStateException("Methods annotated with " + Recurring.class.getName() + " can only have zero parameters or a single parameter of type JobContext."); } String id = getId(method); String cron = getCron(method); String interval = getInterval(method); if (StringUtils.isNullOrEmpty(cron) && StringUtils.isNullOrEmpty(interval)) throw new IllegalArgumentException("Either cron or interval attribute is required."); if (isNotNullOrEmpty(cron) && isNotNullOrEmpty(interval)) throw new IllegalArgumentException("Both cron and interval attribute provided. Only one is allowed."); if (Recurring.RECURRING_JOB_DISABLED.equals(cron) || Recurring.RECURRING_JOB_DISABLED.equals(interval)) { if (id == null) { LOGGER.warn("You are trying to disable a recurring job using placeholders but did not define an id."); } else { jobScheduler.deleteRecurringJob(id); } } else { JobDetails jobDetails = getJobDetails(method); ZoneId zoneId = getZoneId(method); if (isNotNullOrEmpty(cron)) { jobScheduler.scheduleRecurrently(id, jobDetails, CronExpression.create(cron), zoneId); } else { jobScheduler.scheduleRecurrently(id, jobDetails, new Interval(interval), zoneId); } } }
@Test void beansWithMethodsAnnotatedWithRecurringCronAnnotationUsingJobContextWillAutomaticallyBeRegistered() { final ExecutableMethod executableMethod = mock(ExecutableMethod.class); final Method method = getRequiredMethod(MyServiceWithRecurringCronJobUsingJobContext.class, "myRecurringMethod", JobContext.class); when(executableMethod.getTargetMethod()).thenReturn(method); when(executableMethod.stringValue(Recurring.class, "id")).thenReturn(Optional.of("my-recurring-job")); when(executableMethod.stringValue(Recurring.class, "cron")).thenReturn(Optional.of("*/15 * * * *")); when(executableMethod.stringValue(Recurring.class, "interval")).thenReturn(Optional.empty()); when(executableMethod.stringValue(Recurring.class, "zoneId")).thenReturn(Optional.empty()); jobRunrRecurringJobScheduler.schedule(executableMethod); verify(jobScheduler).scheduleRecurrently(eq("my-recurring-job"), jobDetailsArgumentCaptor.capture(), eq(CronExpression.create("*/15 * * * *")), eq(ZoneId.systemDefault())); final JobDetails actualJobDetails = jobDetailsArgumentCaptor.getValue(); assertThat(actualJobDetails) .isCacheable() .hasClassName(MyServiceWithRecurringCronJobUsingJobContext.class.getName()) .hasMethodName("myRecurringMethod") .hasJobContextArg(); }
public List<ContainerLogMeta> collect( LogAggregationFileController fileController) throws IOException { List<ContainerLogMeta> containersLogMeta = new ArrayList<>(); RemoteIterator<FileStatus> appDirs = fileController. getApplicationDirectoriesOfUser(logsRequest.getUser()); while (appDirs.hasNext()) { FileStatus currentAppDir = appDirs.next(); if (logsRequest.getAppId() == null || logsRequest.getAppId().equals(currentAppDir.getPath().getName())) { ApplicationId appId = ApplicationId.fromString( currentAppDir.getPath().getName()); RemoteIterator<FileStatus> nodeFiles = fileController .getNodeFilesOfApplicationDirectory(currentAppDir); while (nodeFiles.hasNext()) { FileStatus currentNodeFile = nodeFiles.next(); if (!logsRequest.getNodeId().match(currentNodeFile.getPath() .getName())) { continue; } if (currentNodeFile.getPath().getName().equals( logsRequest.getAppId() + ".har")) { Path p = new Path("har:///" + currentNodeFile.getPath().toUri().getRawPath()); nodeFiles = HarFs.get(p.toUri(), conf).listStatusIterator(p); continue; } try { Map<String, List<ContainerLogFileInfo>> metaFiles = fileController .getLogMetaFilesOfNode(logsRequest, currentNodeFile, appId); if (metaFiles == null) { continue; } metaFiles.entrySet().removeIf(entry -> !(logsRequest.getContainerId() == null || logsRequest.getContainerId().equals(entry.getKey()))); containersLogMeta.addAll(createContainerLogMetas( currentNodeFile.getPath().getName(), metaFiles)); } catch (IOException ioe) { LOG.warn("Can not get log meta from the log file:" + currentNodeFile.getPath() + "\n" + ioe.getMessage()); } } } } return containersLogMeta; }
@Test void testContainerIdExactMatch() throws IOException { ExtendedLogMetaRequest.ExtendedLogMetaRequestBuilder request = new ExtendedLogMetaRequest.ExtendedLogMetaRequestBuilder(); request.setAppId(null); request.setContainerId(attemptContainer.toString()); request.setFileName(null); request.setFileSize(null); request.setModificationTime(null); request.setNodeId(null); request.setUser(null); LogAggregationMetaCollector collector = new LogAggregationMetaCollector( request.build(), new YarnConfiguration()); List<ContainerLogMeta> res = collector.collect(fileController); List<ContainerLogFileInfo> allFile = res.stream() .flatMap(m -> m.getContainerLogMeta().stream()) .collect(Collectors.toList()); assertEquals(2, allFile.size()); assertTrue(allFile.stream().allMatch( f -> f.getFileName().contains(attemptContainer.toString()))); }
public static <U, V> Pair<U, V> pair(U a, V b) { return new Pair<>(a, b); }
@Test public void equalities(){ assertEquals(pair("a", "b"), pair("a", "b")); assertNotEquals(pair("a", "b"), pair("a", "c")); assertEquals(pair("a", pair("b", pair("c", null))), pair("a", pair("b", pair("c", null)))); }
public Tuple2<Long, Long> cancel() throws Exception { List<Tuple2<Future<? extends StateObject>, String>> pairs = new ArrayList<>(); pairs.add(new Tuple2<>(getKeyedStateManagedFuture(), "managed keyed")); pairs.add(new Tuple2<>(getKeyedStateRawFuture(), "managed operator")); pairs.add(new Tuple2<>(getOperatorStateManagedFuture(), "raw keyed")); pairs.add(new Tuple2<>(getOperatorStateRawFuture(), "raw operator")); pairs.add(new Tuple2<>(getInputChannelStateFuture(), "input channel")); pairs.add(new Tuple2<>(getResultSubpartitionStateFuture(), "result subpartition")); final long[] sizeTuple = new long[2]; try (Closer closer = Closer.create()) { for (Tuple2<Future<? extends StateObject>, String> pair : pairs) { closer.register( () -> { try { Tuple2<Long, Long> tuple = discardStateFuture(pair.f0); sizeTuple[0] += tuple.f0; sizeTuple[1] += tuple.f1; } catch (Exception e) { throw new RuntimeException( String.format( "Could not properly cancel %s state future", pair.f1), e); } }); } } return Tuple2.of(sizeTuple[0], sizeTuple[1]); }
@Test void testCancelReturnsStateSize() throws Exception { KeyGroupsStateHandle s1 = new KeyGroupsStateHandle( new KeyGroupRangeOffsets(0, 0), new ByteStreamStateHandle("", new byte[123])); KeyGroupsStateHandle s2 = new KeyGroupsStateHandle( new KeyGroupRangeOffsets(0, 0), new ByteStreamStateHandle("", new byte[456])); OperatorSnapshotFutures futures = new OperatorSnapshotFutures( DoneFuture.of(SnapshotResult.of(s1)), DoneFuture.of(SnapshotResult.of(s2)), DoneFuture.of(SnapshotResult.empty()), ExceptionallyDoneFuture.of(new RuntimeException()), ExceptionallyDoneFuture.of(new RuntimeException()), ExceptionallyDoneFuture.of(new RuntimeException())); long stateSize = s1.getStateSize() + s2.getStateSize(); assertThat(futures.cancel()).isEqualTo(Tuple2.of(stateSize, stateSize)); }
public static Client connect(String url, ChannelHandler... handler) throws RemotingException { return connect(URL.valueOf(url), handler); }
@Test void testConnect() throws RemotingException { Assertions.assertThrows(RuntimeException.class, () -> Transporters.connect((String) null)); Assertions.assertThrows(RuntimeException.class, () -> Transporters.connect((URL) null)); Assertions.assertNotNull(Transporters.connect(url)); Assertions.assertNotNull(Transporters.connect(url, channel)); Assertions.assertNotNull(Transporters.connect(url, channel, channel)); }
public static String hashpw(String password, String salt) throws IllegalArgumentException { BCrypt B; String real_salt; byte passwordb[], saltb[], hashed[]; char minor = (char) 0; int rounds, off = 0; StringBuilder rs = new StringBuilder(); if (salt == null) { throw new IllegalArgumentException("salt cannot be null"); } int saltLength = salt.length(); if (saltLength < 28) { throw new IllegalArgumentException("Invalid salt"); } if (salt.charAt(0) != '$' || salt.charAt(1) != '2') { throw new IllegalArgumentException("Invalid salt version"); } if (salt.charAt(2) == '$') { off = 3; } else { minor = salt.charAt(2); if (minor != 'a' || salt.charAt(3) != '$') { throw new IllegalArgumentException("Invalid salt revision"); } off = 4; } if (saltLength - off < 25) { throw new IllegalArgumentException("Invalid salt"); } // Extract number of rounds if (salt.charAt(off + 2) > '$') { throw new IllegalArgumentException("Missing salt rounds"); } rounds = Integer.parseInt(salt.substring(off, off + 2)); real_salt = salt.substring(off + 3, off + 25); try { passwordb = (password + (minor >= 'a' ? "\000" : "")).getBytes("UTF-8"); } catch (UnsupportedEncodingException uee) { throw new AssertionError("UTF-8 is not supported"); } saltb = decode_base64(real_salt, BCRYPT_SALT_LEN); B = new BCrypt(); hashed = B.crypt_raw(passwordb, saltb, rounds); rs.append("$2"); if (minor >= 'a') { rs.append(minor); } rs.append("$"); if (rounds < 10) { rs.append("0"); } rs.append(rounds); rs.append("$"); encode_base64(saltb, saltb.length, rs); encode_base64(hashed, bf_crypt_ciphertext.length * 4 - 1, rs); return rs.toString(); }
@Test public void testHashpw() { Assert.assertEquals( "$2a$10$......................0li5vIK0lccG/IXHAOP2wBncDW/oa2u", BCrypt.hashpw("foo", "$2a$10$......................")); Assert.assertEquals( "$2$09$......................GlnmyWmDnFB.MnSSUnFsiPvHsC2KPBm", BCrypt.hashpw("foo", "$2$09$......................")); }
@Override protected String toHtmlDisplay(Element element, String query) { String label = element.getLabel(); int index = label.toLowerCase().indexOf(query.toLowerCase()); String before = label.substring(0, index); String match = label.substring(index, index + query.length()); String after = label.substring(index + query.length()); return NbBundle.getMessage(FuzzyElementLabelSearchProvider.class, "FuzzyElementLabelSearchProvider.result", before, match, after); }
@Test public void testFirstOnly() { Mockito.when(node.getLabel()).thenReturn("foobarfoo"); Assert.assertTrue( new FuzzyElementLabelSearchProvider().toHtmlDisplay(node, "oo").contains("f<b>oo</b>barfoo")); }
public static void main(String[] args) { Map<Integer, Instance> instanceMap = new HashMap<>(); var messageManager = new BullyMessageManager(instanceMap); var instance1 = new BullyInstance(messageManager, 1, 1); var instance2 = new BullyInstance(messageManager, 2, 1); var instance3 = new BullyInstance(messageManager, 3, 1); var instance4 = new BullyInstance(messageManager, 4, 1); var instance5 = new BullyInstance(messageManager, 5, 1); instanceMap.put(1, instance1); instanceMap.put(2, instance2); instanceMap.put(3, instance3); instanceMap.put(4, instance4); instanceMap.put(5, instance5); instance4.onMessage(new Message(MessageType.HEARTBEAT_INVOKE, "")); final var thread1 = new Thread(instance1); final var thread2 = new Thread(instance2); final var thread3 = new Thread(instance3); final var thread4 = new Thread(instance4); final var thread5 = new Thread(instance5); thread1.start(); thread2.start(); thread3.start(); thread4.start(); thread5.start(); instance1.setAlive(false); }
@Test void shouldExecuteApplicationWithoutException() { assertDoesNotThrow(() -> BullyApp.main(new String[]{})); }
@Override public JType apply(String nodeName, JsonNode node, JsonNode parent, JClassContainer jClassContainer, Schema schema) { String propertyTypeName = getTypeName(node); JType type; if (propertyTypeName.equals("object") || node.has("properties") && node.path("properties").size() > 0) { type = ruleFactory.getObjectRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema); } else if (node.has("existingJavaType")) { String typeName = node.path("existingJavaType").asText(); if (isPrimitive(typeName, jClassContainer.owner())) { type = primitiveType(typeName, jClassContainer.owner()); } else { type = resolveType(jClassContainer, typeName); } } else if (propertyTypeName.equals("string")) { type = jClassContainer.owner().ref(String.class); } else if (propertyTypeName.equals("number")) { type = getNumberType(jClassContainer.owner(), ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("integer")) { type = getIntegerType(jClassContainer.owner(), node, ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("boolean")) { type = unboxIfNecessary(jClassContainer.owner().ref(Boolean.class), ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("array")) { type = ruleFactory.getArrayRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema); } else { type = jClassContainer.owner().ref(Object.class); } if (!node.has("javaType") && !node.has("existingJavaType") && node.has("format")) { type = ruleFactory.getFormatRule().apply(nodeName, node.get("format"), node, type, schema); } else if (!node.has("javaType") && !node.has("existingJavaType") && propertyTypeName.equals("string") && node.has("media")) { type = ruleFactory.getMediaRule().apply(nodeName, node.get("media"), node, type, schema); } return type; }
@Test public void applyGeneratesDate() { JPackage jpackage = new JCodeModel()._package(getClass().getPackage().getName()); ObjectNode objectNode = new ObjectMapper().createObjectNode(); objectNode.put("type", "string"); TextNode formatNode = TextNode.valueOf("date-time"); objectNode.set("format", formatNode); JType mockDateType = mock(JType.class); FormatRule mockFormatRule = mock(FormatRule.class); when(mockFormatRule.apply(eq("fooBar"), eq(formatNode), any(), Mockito.isA(JType.class), isNull())).thenReturn(mockDateType); when(ruleFactory.getFormatRule()).thenReturn(mockFormatRule); JType result = rule.apply("fooBar", objectNode, null, jpackage, null); assertThat(result, equalTo(mockDateType)); }
public String documentationOf(String key) { ConfigDef.ConfigKey configKey = definition.configKeys().get(key); if (configKey == null) return null; return configKey.documentation; }
@Test public void testDocumentationOfExpectNull() { Properties props = new Properties(); TestIndirectConfigResolution config = new TestIndirectConfigResolution(props); assertNull(config.documentationOf("xyz")); }
@Override public boolean isAutoUpdate() { return packageDefinition.isAutoUpdate(); }
@Test public void shouldDelegateToPackageDefinitionForAutoUpdate() throws Exception { PackageDefinition packageDefinition = mock(PackageDefinition.class); when(packageDefinition.isAutoUpdate()).thenReturn(false); PackageMaterialConfig materialConfig = new PackageMaterialConfig(new CaseInsensitiveString("name"), "package-id", packageDefinition); assertThat(materialConfig.isAutoUpdate(), is(false)); verify(packageDefinition).isAutoUpdate(); }
@Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setName(NAME_PREFIX + count.getAndIncrement()); t.setDaemon(true); return t; }
@Test public void testName() { GitThreadFactory factory = new GitThreadFactory(); assertThat(factory.newThread(() -> { }).getName()).isEqualTo("git-scm-0"); assertThat(factory.newThread(() -> { }).getName()).isEqualTo("git-scm-1"); }
@Override public void run() { // top-level command, do nothing }
@Test public void test_submit_server_cli_version_minor_mismatch() { String serverVersion = "5.0.0"; System.setProperty(HAZELCAST_INTERNAL_OVERRIDE_VERSION, serverVersion); Config cfg = smallInstanceConfig(); cfg.getJetConfig().setResourceUploadEnabled(true); String clusterName = randomName(); cfg.setClusterName(clusterName); hz = createHazelcastInstance(cfg); System.setProperty(HAZELCAST_INTERNAL_OVERRIDE_VERSION, "5.1.0"); ClientConfig clientConfig = new ClientConfig(); clientConfig.setClusterName(clusterName); client = createHazelcastClient(clientConfig); BuildInfo buildInfo = BuildInfoProvider.getBuildInfo(); String clientVersion = buildInfo.getVersion(); assertThatThrownBy(() -> run("submit", testJobJarFile.toString())) .hasStackTraceContaining("Server and client must have matching minor version. Server version " + serverVersion + ", hz-cli version " + clientVersion); assertTrueEventually(() -> assertThat(hz.getJet().getJobs()).isEmpty()); assertTrueEventually(() -> assertContains(captureErr(), "ERROR: Server and client must have matching minor version. Server version " + serverVersion + ", hz-cli version " + clientVersion) ); }
Map<String, String> unprocessedNodes(ConfigNode node) { List<ConfigNode> nodes = new ArrayList<>(); findAllUnreadNodes(nodes, node); return nodes.stream().map(this::process).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); }
@Test public void shouldDetectUnappliedClientConfigEntries() { Map<String, String> entries = new HashMap<>(); entries.put("HZCLIENT_FOO", "foo"); entries.put("HZCLIENT_NETWORK_SOCKETINTERCEPTOR_ENABLE", "true"); entries.put("HZCLIENT_NETWORK_SMARTROUTING", "true"); ConfigNode configNode = PropertiesToNodeConverter.propsToNode(EnvVariablesConfigParser.client().parse(entries)); new YamlClientDomConfigProcessor(true, new ClientConfig(), false) .buildConfig(new ConfigOverrideElementAdapter(configNode)); Map<String, String> unprocessed = new ConfigNodeStateTracker().unprocessedNodes(configNode); assertTrue(unprocessed.containsKey("hazelcast-client.foo")); assertTrue(unprocessed.containsKey("hazelcast-client.network.socketinterceptor.enable")); assertFalse(unprocessed.containsKey("hazelcast-client.network.smartrouting")); }
public static int checkPattern( String text, String regexChar ) { return checkPattern( text, regexChar, "" ); }
@Test public void testCheckPattern() { // Check more information in: // https://docs.oracle.com/javase/tutorial/essential/regex/literals.html String metacharacters = "<([{\\^-=$!|]})?*+.>"; for( int i = 0; i < metacharacters.length(); i++ ) { int matches = TextFileInputUtils.checkPattern( metacharacters, String.valueOf( metacharacters.charAt( i ) ), null ); Assert.assertEquals( 1, matches ); } }
@Override public Member next() { Member memberToReturn = nextMember; nextMember = null; if (memberToReturn != null) { return memberToReturn; } if (!advance()) { throw new NoSuchElementException("no more elements"); } memberToReturn = nextMember; nextMember = null; return memberToReturn; }
@Test(expected = NoSuchElementException.class) public void give() { int maxRetries = 0; addClusterMember(); RestartingMemberIterator iterator = new RestartingMemberIterator(mockClusterService, maxRetries); iterator.next(); //this should throw NoSuchElementException iterator.next(); }
public boolean hasAvailableDiskSpace() { return NameNodeResourcePolicy.areResourcesAvailable(volumes.values(), minimumRedundantVolumes); }
@Test public void testCheckAvailability() throws IOException { conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, 0); NameNodeResourceChecker nb = new NameNodeResourceChecker(conf); assertTrue( "isResourceAvailable must return true if " + "disk usage is lower than threshold", nb.hasAvailableDiskSpace()); }
public static boolean isEmail(CharSequence value) { return isMatchRegex(EMAIL, value); }
@Test public void isEmailTest() { final boolean email = Validator.isEmail("abc_cde@163.com"); assertTrue(email); final boolean email1 = Validator.isEmail("abc_%cde@163.com"); assertTrue(email1); final boolean email2 = Validator.isEmail("abc_%cde@aaa.c"); assertTrue(email2); final boolean email3 = Validator.isEmail("xiaolei.lu@aaa.b"); assertTrue(email3); final boolean email4 = Validator.isEmail("xiaolei.Lu@aaa.b"); assertTrue(email4); final boolean email5 = Validator.isEmail("luxiaolei_小磊@小磊.com", true); assertTrue(email5); }
@Override public List<PinotTaskConfig> generateTasks(List<TableConfig> tableConfigs) { String taskType = RealtimeToOfflineSegmentsTask.TASK_TYPE; List<PinotTaskConfig> pinotTaskConfigs = new ArrayList<>(); for (TableConfig tableConfig : tableConfigs) { String realtimeTableName = tableConfig.getTableName(); if (tableConfig.getTableType() != TableType.REALTIME) { LOGGER.warn("Skip generating task: {} for non-REALTIME table: {}", taskType, realtimeTableName); continue; } LOGGER.info("Start generating task configs for table: {} for task: {}", realtimeTableName, taskType); // Only schedule 1 task of this type, per table Map<String, TaskState> incompleteTasks = TaskGeneratorUtils.getIncompleteTasks(taskType, realtimeTableName, _clusterInfoAccessor); if (!incompleteTasks.isEmpty()) { LOGGER.warn("Found incomplete tasks: {} for same table: {} and task type: {}. Skipping task generation.", incompleteTasks.keySet(), realtimeTableName, taskType); continue; } // Get all segment metadata for completed segments (DONE/UPLOADED status). List<SegmentZKMetadata> completedSegmentsZKMetadata = new ArrayList<>(); Map<Integer, String> partitionToLatestLLCSegmentName = new HashMap<>(); Set<Integer> allPartitions = new HashSet<>(); getCompletedSegmentsInfo(realtimeTableName, completedSegmentsZKMetadata, partitionToLatestLLCSegmentName, allPartitions); if (completedSegmentsZKMetadata.isEmpty()) { LOGGER.info("No realtime-completed segments found for table: {}, skipping task generation: {}", realtimeTableName, taskType); continue; } allPartitions.removeAll(partitionToLatestLLCSegmentName.keySet()); if (!allPartitions.isEmpty()) { LOGGER.info( "Partitions: {} have no completed segments. Table: {} is not ready for {}. Skipping task generation.", allPartitions, realtimeTableName, taskType); continue; } TableTaskConfig tableTaskConfig = tableConfig.getTaskConfig(); Preconditions.checkState(tableTaskConfig != null); Map<String, String> taskConfigs = tableTaskConfig.getConfigsForTaskType(taskType); Preconditions.checkState(taskConfigs != null, "Task config shouldn't be null for table: %s", realtimeTableName); // Get the bucket size and buffer String bucketTimePeriod = taskConfigs.getOrDefault(RealtimeToOfflineSegmentsTask.BUCKET_TIME_PERIOD_KEY, DEFAULT_BUCKET_PERIOD); String bufferTimePeriod = taskConfigs.getOrDefault(RealtimeToOfflineSegmentsTask.BUFFER_TIME_PERIOD_KEY, DEFAULT_BUFFER_PERIOD); long bucketMs = TimeUtils.convertPeriodToMillis(bucketTimePeriod); long bufferMs = TimeUtils.convertPeriodToMillis(bufferTimePeriod); // Get watermark from RealtimeToOfflineSegmentsTaskMetadata ZNode. WindowStart = watermark. WindowEnd = // windowStart + bucket. long windowStartMs = getWatermarkMs(realtimeTableName, completedSegmentsZKMetadata, bucketMs); long windowEndMs = windowStartMs + bucketMs; // Find all COMPLETED segments with data overlapping execution window: windowStart (inclusive) to windowEnd // (exclusive) List<String> segmentNames = new ArrayList<>(); List<String> downloadURLs = new ArrayList<>(); Set<String> lastLLCSegmentPerPartition = new HashSet<>(partitionToLatestLLCSegmentName.values()); boolean skipGenerate = false; while (true) { // Check that execution window is older than bufferTime if (windowEndMs > System.currentTimeMillis() - bufferMs) { LOGGER.info( "Window with start: {} and end: {} is not older than buffer time: {} configured as {} ago. Skipping task " + "generation: {}", windowStartMs, windowEndMs, bufferMs, bufferTimePeriod, taskType); skipGenerate = true; break; } for (SegmentZKMetadata segmentZKMetadata : completedSegmentsZKMetadata) { String segmentName = segmentZKMetadata.getSegmentName(); long segmentStartTimeMs = segmentZKMetadata.getStartTimeMs(); long segmentEndTimeMs = segmentZKMetadata.getEndTimeMs(); // Check overlap with window if (windowStartMs <= segmentEndTimeMs && segmentStartTimeMs < windowEndMs) { // If last completed segment is being used, make sure that segment crosses over end of window. // In the absence of this check, CONSUMING segments could contain some portion of the window. That data // would be skipped forever. if (lastLLCSegmentPerPartition.contains(segmentName) && segmentEndTimeMs < windowEndMs) { LOGGER.info("Window data overflows into CONSUMING segments for partition of segment: {}. Skipping task " + "generation: {}", segmentName, taskType); skipGenerate = true; break; } segmentNames.add(segmentName); downloadURLs.add(segmentZKMetadata.getDownloadUrl()); } } if (skipGenerate || !segmentNames.isEmpty()) { break; } LOGGER.info("Found no eligible segments for task: {} with window [{} - {}), moving to the next time bucket", taskType, windowStartMs, windowEndMs); windowStartMs = windowEndMs; windowEndMs += bucketMs; } if (skipGenerate) { continue; } Map<String, String> configs = MinionTaskUtils.getPushTaskConfig(realtimeTableName, taskConfigs, _clusterInfoAccessor); configs.putAll(getBaseTaskConfigs(tableConfig, segmentNames)); configs.put(MinionConstants.DOWNLOAD_URL_KEY, StringUtils.join(downloadURLs, MinionConstants.URL_SEPARATOR)); configs.put(MinionConstants.UPLOAD_URL_KEY, _clusterInfoAccessor.getVipUrl() + "/segments"); // Segment processor configs configs.put(RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY, String.valueOf(windowStartMs)); configs.put(RealtimeToOfflineSegmentsTask.WINDOW_END_MS_KEY, String.valueOf(windowEndMs)); String roundBucketTimePeriod = taskConfigs.get(RealtimeToOfflineSegmentsTask.ROUND_BUCKET_TIME_PERIOD_KEY); if (roundBucketTimePeriod != null) { configs.put(RealtimeToOfflineSegmentsTask.ROUND_BUCKET_TIME_PERIOD_KEY, roundBucketTimePeriod); } // NOTE: Check and put both keys for backward-compatibility String mergeType = taskConfigs.get(RealtimeToOfflineSegmentsTask.MERGE_TYPE_KEY); if (mergeType == null) { mergeType = taskConfigs.get(RealtimeToOfflineSegmentsTask.COLLECTOR_TYPE_KEY); } if (mergeType != null) { configs.put(RealtimeToOfflineSegmentsTask.MERGE_TYPE_KEY, mergeType); configs.put(RealtimeToOfflineSegmentsTask.COLLECTOR_TYPE_KEY, mergeType); } for (Map.Entry<String, String> entry : taskConfigs.entrySet()) { if (entry.getKey().endsWith(RealtimeToOfflineSegmentsTask.AGGREGATION_TYPE_KEY_SUFFIX)) { configs.put(entry.getKey(), entry.getValue()); } } String maxNumRecordsPerSegment = taskConfigs.get(RealtimeToOfflineSegmentsTask.MAX_NUM_RECORDS_PER_SEGMENT_KEY); if (maxNumRecordsPerSegment != null) { configs.put(RealtimeToOfflineSegmentsTask.MAX_NUM_RECORDS_PER_SEGMENT_KEY, maxNumRecordsPerSegment); } pinotTaskConfigs.add(new PinotTaskConfig(taskType, configs)); LOGGER.info("Finished generating task configs for table: {} for task: {}", realtimeTableName, taskType); } return pinotTaskConfigs; }
@Test public void testTimeGap() { Map<String, Map<String, String>> taskConfigsMap = new HashMap<>(); taskConfigsMap.put(RealtimeToOfflineSegmentsTask.TASK_TYPE, new HashMap<>()); TableConfig realtimeTableConfig = getRealtimeTableConfig(taskConfigsMap); ClusterInfoAccessor mockClusterInfoProvide = mock(ClusterInfoAccessor.class); when(mockClusterInfoProvide.getTaskStates(RealtimeToOfflineSegmentsTask.TASK_TYPE)).thenReturn(new HashMap<>()); when(mockClusterInfoProvide.getMinionTaskMetadataZNRecord(RealtimeToOfflineSegmentsTask.TASK_TYPE, REALTIME_TABLE_NAME)).thenReturn( new RealtimeToOfflineSegmentsTaskMetadata(REALTIME_TABLE_NAME, 1590019200000L).toZNRecord()); // 21 May 2020 UTC SegmentZKMetadata segmentZKMetadata = getSegmentZKMetadata("testTable__0__1__12345", Status.DONE, 1590220800000L, 1590307200000L, TimeUnit.MILLISECONDS, "download2"); // 05-23-2020T08:00:00 UTC to 05-24-2020T08:00:00 UTC when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME)).thenReturn( Collections.singletonList(segmentZKMetadata)); when(mockClusterInfoProvide.getIdealState(REALTIME_TABLE_NAME)).thenReturn(getIdealState(REALTIME_TABLE_NAME, Lists.newArrayList(segmentZKMetadata.getSegmentName()))); RealtimeToOfflineSegmentsTaskGenerator generator = new RealtimeToOfflineSegmentsTaskGenerator(); generator.init(mockClusterInfoProvide); // Generated task should skip 2 days and have time window of [23 May 2020 UTC, 24 May 2020 UTC) List<PinotTaskConfig> pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(realtimeTableConfig)); assertEquals(pinotTaskConfigs.size(), 1); Map<String, String> configs = pinotTaskConfigs.get(0).getConfigs(); assertEquals(configs.get(RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY), "1590192000000"); assertEquals(configs.get(RealtimeToOfflineSegmentsTask.WINDOW_END_MS_KEY), "1590278400000"); }
public static KeyFormat sanitizeKeyFormat( final KeyFormat keyFormat, final List<SqlType> newKeyColumnSqlTypes, final boolean allowKeyFormatChangeToSupportNewKeySchema ) { return sanitizeKeyFormatWrapping( !allowKeyFormatChangeToSupportNewKeySchema ? keyFormat : sanitizeKeyFormatForTypeCompatibility( sanitizeKeyFormatForMultipleColumns( keyFormat, newKeyColumnSqlTypes.size()), newKeyColumnSqlTypes ), newKeyColumnSqlTypes.size() == 1 ); }
@Test public void shouldConvertKafkaFormatForSingleKeyWithUnsupportedPrimitiveType() { // Given: final KeyFormat format = KeyFormat.nonWindowed( FormatInfo.of(KafkaFormat.NAME), SerdeFeatures.of()); // When: final KeyFormat sanitized = SerdeFeaturesFactory.sanitizeKeyFormat(format, ImmutableList.of(SqlPrimitiveType.of(SqlBaseType.BOOLEAN)), true); // Then: assertThat(sanitized.getFormatInfo(), equalTo(FormatInfo.of(JsonFormat.NAME))); assertThat(sanitized.getFeatures(), equalTo(SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES))); }
public void finish(StreamTaskActionExecutor actionExecutor, StopMode stopMode) throws Exception { if (!isHead && stopMode == StopMode.DRAIN) { // NOTE: This only do for the case where the operator is one-input operator. At present, // any non-head operator on the operator chain is one-input operator. actionExecutor.runThrowing(() -> endOperatorInput(1)); } quiesceTimeServiceAndFinishOperator(actionExecutor, stopMode); // propagate the close operation to the next wrapper if (next != null) { next.finish(actionExecutor, stopMode); } }
@Test void testFinishingOperatorWithException() { AbstractStreamOperator<Void> streamOperator = new AbstractStreamOperator<Void>() { @Override public void finish() throws Exception { throw new Exception("test exception at finishing"); } }; StreamOperatorWrapper<?, ?> operatorWrapper = new StreamOperatorWrapper<>( streamOperator, Optional.ofNullable(streamOperator.getProcessingTimeService()), containingTask .getMailboxExecutorFactory() .createExecutor(Integer.MAX_VALUE - 1), true); assertThatThrownBy( () -> operatorWrapper.finish( containingTask.getActionExecutor(), StopMode.DRAIN)) .hasMessageContaining("test exception at finishing"); }
@Override protected void doStart() throws Exception { super.doStart(); reconnect(); }
@Test public void doStartTest() { producer.start(); verify(connection).addIRCEventListener(listener); verify(endpoint).joinChannels(); }
@NotNull public Map<OutputFile, String> getPathInfo() { return pathInfo; }
@Test void pathInfoTest() { ConfigBuilder configBuilder; Map<OutputFile, String> pathInfo; configBuilder = new ConfigBuilder(GeneratorBuilder.packageConfig(), DATA_SOURCE_CONFIG, GeneratorBuilder.strategyConfig(), null, null, null); pathInfo = configBuilder.getPathInfo(); Assertions.assertFalse(pathInfo.isEmpty()); Assertions.assertEquals(7, pathInfo.size()); Assertions.assertTrue(pathInfo.containsKey(OutputFile.entity)); Assertions.assertTrue(pathInfo.containsKey(OutputFile.controller)); Assertions.assertTrue(pathInfo.containsKey(OutputFile.service)); Assertions.assertTrue(pathInfo.containsKey(OutputFile.serviceImpl)); Assertions.assertTrue(pathInfo.containsKey(OutputFile.xml)); Assertions.assertTrue(pathInfo.containsKey(OutputFile.mapper)); Assertions.assertTrue(pathInfo.containsKey(OutputFile.parent)); configBuilder = new ConfigBuilder( GeneratorBuilder.packageConfigBuilder().pathInfo(Collections.singletonMap(OutputFile.entity, "/tmp/code/entity")).build(), DATA_SOURCE_CONFIG, GeneratorBuilder.strategyConfig(), null, null, null); pathInfo = configBuilder.getPathInfo(); Assertions.assertFalse(pathInfo.isEmpty()); Assertions.assertEquals(7, pathInfo.size()); Assertions.assertTrue(pathInfo.containsKey(OutputFile.entity)); }
@Nullable public static Map<String, Set<FieldConfig.IndexType>> getSkipIndexes(Map<String, String> queryOptions) { // Example config: skipIndexes='col1=inverted,range&col2=inverted' String skipIndexesStr = queryOptions.get(QueryOptionKey.SKIP_INDEXES); if (skipIndexesStr == null) { return null; } String[] perColumnIndexSkip = StringUtils.split(skipIndexesStr, '&'); Map<String, Set<FieldConfig.IndexType>> skipIndexes = new HashMap<>(); for (String columnConf : perColumnIndexSkip) { String[] conf = StringUtils.split(columnConf, '='); if (conf.length != 2) { throw new RuntimeException("Invalid format for " + QueryOptionKey.SKIP_INDEXES + ". Example of valid format: SET skipIndexes='col1=inverted,range&col2=inverted'"); } String columnName = conf[0]; String[] indexTypes = StringUtils.split(conf[1], ','); for (String indexType : indexTypes) { skipIndexes.computeIfAbsent(columnName, k -> new HashSet<>()) .add(FieldConfig.IndexType.valueOf(indexType.toUpperCase())); } } return skipIndexes; }
@Test public void testSkipIndexesParsing() { String skipIndexesStr = "col1=inverted,range&col2=sorted"; Map<String, String> queryOptions = Map.of(CommonConstants.Broker.Request.QueryOptionKey.SKIP_INDEXES, skipIndexesStr); Map<String, Set<FieldConfig.IndexType>> skipIndexes = QueryOptionsUtils.getSkipIndexes(queryOptions); Assert.assertEquals(skipIndexes.get("col1"), Set.of(FieldConfig.IndexType.RANGE, FieldConfig.IndexType.INVERTED)); Assert.assertEquals(skipIndexes.get("col2"), Set.of(FieldConfig.IndexType.SORTED)); }
@Override @Transactional(rollbackFor = Exception.class) public Long createCombinationActivity(CombinationActivityCreateReqVO createReqVO) { // 校验商品 SPU 是否存在是否参加的别的活动 validateProductConflict(createReqVO.getSpuId(), null); // 校验商品是否存在 validateProductExists(createReqVO.getSpuId(), createReqVO.getProducts()); // 插入拼团活动 CombinationActivityDO activity = CombinationActivityConvert.INSTANCE.convert(createReqVO) .setStatus(CommonStatusEnum.ENABLE.getStatus()); combinationActivityMapper.insert(activity); // 插入商品 List<CombinationProductDO> products = CombinationActivityConvert.INSTANCE.convertList(createReqVO.getProducts(), activity); combinationProductMapper.insertBatch(products); return activity.getId(); }
@Test public void testCreateCombinationActivity_success() { // 准备参数 CombinationActivityCreateReqVO reqVO = randomPojo(CombinationActivityCreateReqVO.class); // 调用 Long combinationActivityId = combinationActivityService.createCombinationActivity(reqVO); // 断言 assertNotNull(combinationActivityId); // 校验记录的属性是否正确 CombinationActivityDO combinationActivity = combinationActivityMapper.selectById(combinationActivityId); assertPojoEquals(reqVO, combinationActivity); }
public static void validatePermission(@Nullable String tableName, AccessType accessType, @Nullable HttpHeaders httpHeaders, String endpointUrl, AccessControl accessControl) { String userMessage = getUserMessage(tableName, accessType, endpointUrl); String rawTableName = TableNameBuilder.extractRawTableName(tableName); try { if (rawTableName == null) { if (accessControl.hasAccess(accessType, httpHeaders, endpointUrl)) { return; } } else { if (accessControl.hasAccess(rawTableName, accessType, httpHeaders, endpointUrl)) { return; } } } catch (WebApplicationException exception) { // throwing the exception if it's WebApplicationException throw exception; } catch (Throwable t) { // catch and log Throwable for NoSuchMethodError which can happen when there are classpath conflicts // otherwise, grizzly will return a 500 without any logs or indication of what failed throw new ControllerApplicationException(LOGGER, "Caught exception while validating permission for " + userMessage, Response.Status.INTERNAL_SERVER_ERROR, t); } throw new ControllerApplicationException(LOGGER, "Permission is denied for " + userMessage, Response.Status.FORBIDDEN); }
@Test public void testValidatePermissionWithNoSuchMethodError() { AccessControl ac = Mockito.mock(AccessControl.class); HttpHeaders mockHttpHeaders = Mockito.mock(HttpHeaders.class); Mockito.when(ac.hasAccess(_table, AccessType.READ, mockHttpHeaders, _endpoint)) .thenThrow(new NoSuchMethodError("Method not found")); try { AccessControlUtils.validatePermission(_table, AccessType.READ, mockHttpHeaders, _endpoint, ac); } catch (ControllerApplicationException e) { Assert.assertTrue(e.getMessage().contains("Caught exception while validating permission")); Assert.assertEquals(e.getResponse().getStatus(), Response.Status.INTERNAL_SERVER_ERROR.getStatusCode()); } }
@Override public boolean test(final Path test) { return this.equals(new DefaultPathPredicate(test)); }
@Test public void testHashcodeCollision() { assertNotEquals( new DefaultPathPredicate( new Path("19", EnumSet.of(Path.Type.file)) ), new DefaultPathPredicate( new Path("0X", EnumSet.of(Path.Type.file)) ) ); assertFalse(new DefaultPathPredicate( new Path("19", EnumSet.of(Path.Type.file)) ).test( new Path("0X", EnumSet.of(Path.Type.file)) )); }
public Template getIndexTemplate(IndexSet indexSet) { final IndexSetMappingTemplate indexSetMappingTemplate = getTemplateIndexSetConfig(indexSet, indexSet.getConfig(), profileService); return indexMappingFactory.createIndexMapping(indexSet.getConfig()) .toTemplate(indexSetMappingTemplate); }
@Test void testUsesCustomMappingsWhileGettingTemplateWhenProfileIsNull() { final CustomFieldMappings individualCustomFieldMappings = new CustomFieldMappings(List.of( new CustomFieldMapping("f1", "string"), new CustomFieldMapping("f2", "long") )); final TestIndexSet testIndexSet = indexSetConfig("test", "test-template-profiles", "custom", "000000000000000000000013", individualCustomFieldMappings); doReturn(Optional.of(new IndexFieldTypeProfile( "000000000000000000000013", "empty_test_profile", "Empty test profile", new CustomFieldMappings(List.of())) )).when(profileService).get("000000000000000000000013"); IndexMappingTemplate indexMappingTemplateMock = mock(IndexMappingTemplate.class); doReturn(indexMappingTemplateMock).when(indexMappingFactory).createIndexMapping(testIndexSet.getConfig()); underTest.getIndexTemplate(testIndexSet); verify(indexMappingTemplateMock).toTemplate( new IndexSetMappingTemplate("standard", "test_*", individualCustomFieldMappings) ); }
public boolean skipFrame() throws IOException { if (currentHeader != null) { long toSkip = currentHeader.getLength() - HEADER_SIZE; long skipped = IOUtils.skip(in, toSkip); currentHeader = null; if (skipped < toSkip) { return false; } return true; } return false; }
@Test public void testSkipNoCurrentHeader() throws IOException { ByteArrayOutputStream bos = new ByteArrayOutputStream(); bos.write("This is a test".getBytes(UTF_8)); ByteArrayInputStream in = new ByteArrayInputStream(bos.toByteArray()); stream = new MpegStream(in); assertFalse(stream.skipFrame(), "Wrong result"); }
@Override protected Mono<Void> doExecute(final ServerWebExchange exchange, final ShenyuPluginChain chain, final SelectorData selector, final RuleData rule) { final String param = exchange.getAttribute(Constants.PARAM_TRANSFORM); ShenyuContext shenyuContext = exchange.getAttribute(Constants.CONTEXT); assert shenyuContext != null; MetaData metaData = exchange.getAttribute(Constants.META_DATA); if (!checkMetaData(metaData)) { LOG.error(" path is :{}, meta data have error.... {}", shenyuContext.getPath(), metaData); exchange.getResponse().setStatusCode(HttpStatus.INTERNAL_SERVER_ERROR); Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.META_DATA_ERROR); return WebFluxResultUtils.result(exchange, error); } assert metaData != null; if (StringUtils.isNoneBlank(metaData.getParameterTypes()) && StringUtils.isBlank(param)) { exchange.getResponse().setStatusCode(HttpStatus.INTERNAL_SERVER_ERROR); Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.GRPC_HAVE_BODY_PARAM); return WebFluxResultUtils.result(exchange, error); } final ShenyuGrpcClient client = GrpcClientCache.getGrpcClient(selector.getId()); if (Objects.isNull(client)) { exchange.getResponse().setStatusCode(HttpStatus.INTERNAL_SERVER_ERROR); Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.GRPC_CLIENT_NULL); return WebFluxResultUtils.result(exchange, error); } // load balance context Context.current().withValue(GrpcConstants.GRPC_SELECTOR_ID, selector.getId()).attach(); Context.current().withValue(GrpcConstants.GRPC_RULE_ID, rule.getId()).attach(); Context.current().withValue(GrpcConstants.GRPC_REMOTE_ADDRESS, Objects.requireNonNull(exchange.getRequest().getRemoteAddress()).getAddress().getHostAddress()).attach(); GrpcExtInfo extInfo = GsonUtils.getGson().fromJson(metaData.getRpcExt(), GrpcExtInfo.class); CallOptions callOptions = CallOptions.DEFAULT.withDeadlineAfter(extInfo.timeout, TimeUnit.MILLISECONDS); Map<String, Map<String, String>> rpcContext = exchange.getAttribute(Constants.GENERAL_CONTEXT); Optional.ofNullable(rpcContext).map(context -> context.get(PluginEnum.GRPC.getName())).ifPresent( context -> Context.current().withValue(RPC_CONTEXT_KEY, context).attach()); CompletableFuture<ShenyuGrpcResponse> result = client.call(metaData, callOptions, param, extInfo.methodType); Context.current().detach(Context.ROOT); return Mono.fromFuture(result.thenApply(ret -> { exchange.getAttributes().put(Constants.RPC_RESULT, ret.getResults()); exchange.getAttributes().put(Constants.CLIENT_RESPONSE_RESULT_TYPE, ResultEnum.SUCCESS.getName()); return ret; })).onErrorMap(ShenyuException::new).then(chain.execute(exchange)); }
@Test public void testDoExecuteParaIsBlankError() { ServerWebExchange exchange = getServerWebExchange(); exchange.getAttributes().put(Constants.META_DATA, new MetaData()); RuleData data = mock(RuleData.class); StepVerifier.create(grpcPlugin.doExecute(exchange, chain, selector, data)).expectSubscription().verifyComplete(); }
@Override public AppResponse process(Flow flow, ActivateWithCodeRequest request) throws FlowNotDefinedException, IOException, NoSuchAlgorithmException { Map<String, Object> result = digidClient.activateAccountWithCode(appSession.getAccountId(), request.getActivationCode()); if (result.get(lowerUnderscore(STATUS)).equals("OK")) { appAuthenticator.setIssuerType((String) result.get(lowerUnderscore(ISSUER_TYPE))); return new OkResponse(); } if (result.get(lowerUnderscore(STATUS)).equals("NOK") && result.get(ERROR) != null ) { final var error = result.get(ERROR); if (ERROR_CODE_NOT_CORRECT.equals(error)) { // Logcode 88 is already logged in x, can be changed when switching to account microservice : return new EnterActivationResponse(ERROR_CODE_NOT_CORRECT, Map.of(REMAINING_ATTEMPTS, result.get(lowerUnderscore(REMAINING_ATTEMPTS)))); } else if (ERROR_CODE_BLOCKED.equals(error)) { digidClient.remoteLog("87", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId())); return new NokResponse((String) result.get(ERROR)); } else if (ERROR_CODE_INVALID.equals(error)) { digidClient.remoteLog("90", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId())); return new EnterActivationResponse(ERROR_CODE_INVALID, Map.of(DAYS_VALID, result.get(lowerUnderscore(DAYS_VALID)))); } } return new NokResponse(); }
@Test public void responseBlockedTest() throws FlowNotDefinedException, IOException, NoSuchAlgorithmException { //given when(digidClientMock.activateAccountWithCode(anyLong(), any())).thenReturn(Map.of( lowerUnderscore(STATUS), "NOK", lowerUnderscore(ERROR), "activation_code_blocked", lowerUnderscore(ERROR_CODE_BLOCKED), "activation_code_blocked" )); //when AppResponse result = activationCodeChecked.process(mockedFlow, activateWithCodeRequest); //then verify(digidClientMock, times(1)).remoteLog("87", ImmutableMap.of(lowerUnderscore(ACCOUNT_ID), mockedAppSession.getAccountId())); assertTrue(result instanceof NokResponse); assertEquals("activation_code_blocked", ((NokResponse) result).getError()); }
static Collection<Field> getAllFields(Class<?> owner, Predicate<? super Field> predicate) { return getAll(owner, Class::getDeclaredFields).filter(predicate).collect(toList()); }
@Test public void getAllFields_of_interface() { assertThat(ReflectionUtils.getAllFields(Subinterface.class, alwaysTrue())) .containsOnly( field(SomeInterface.class, "SOME_CONSTANT"), field(OtherInterface.class, "OTHER_CONSTANT")); }
Map<String, Object> parseForValidate(Map<String, String> props, Map<String, ConfigValue> configValues) { Map<String, Object> parsed = new HashMap<>(); Set<String> configsWithNoParent = getConfigsWithNoParent(); for (String name: configsWithNoParent) { parseForValidate(name, props, parsed, configValues); } return parsed; }
@Test public void testParseForValidate() { Map<String, Object> expectedParsed = new HashMap<>(); expectedParsed.put("a", 1); expectedParsed.put("b", null); expectedParsed.put("c", null); expectedParsed.put("d", 10); Map<String, ConfigValue> expected = new HashMap<>(); String errorMessageB = "Missing required configuration \"b\" which has no default value."; String errorMessageC = "Missing required configuration \"c\" which has no default value."; ConfigValue configA = new ConfigValue("a", 1, Collections.emptyList(), Collections.emptyList()); ConfigValue configB = new ConfigValue("b", null, Collections.emptyList(), Arrays.asList(errorMessageB, errorMessageB)); ConfigValue configC = new ConfigValue("c", null, Collections.emptyList(), singletonList(errorMessageC)); ConfigValue configD = new ConfigValue("d", 10, Collections.emptyList(), Collections.emptyList()); expected.put("a", configA); expected.put("b", configB); expected.put("c", configC); expected.put("d", configD); ConfigDef def = new ConfigDef() .define("a", Type.INT, Importance.HIGH, "docs", "group", 1, Width.SHORT, "a", Arrays.asList("b", "c"), new IntegerRecommender(false)) .define("b", Type.INT, Importance.HIGH, "docs", "group", 2, Width.SHORT, "b", new IntegerRecommender(true)) .define("c", Type.INT, Importance.HIGH, "docs", "group", 3, Width.SHORT, "c", new IntegerRecommender(true)) .define("d", Type.INT, Importance.HIGH, "docs", "group", 4, Width.SHORT, "d", singletonList("b"), new IntegerRecommender(false)); Map<String, String> props = new HashMap<>(); props.put("a", "1"); props.put("d", "10"); Map<String, ConfigValue> configValues = new HashMap<>(); for (String name : def.configKeys().keySet()) { configValues.put(name, new ConfigValue(name)); } Map<String, Object> parsed = def.parseForValidate(props, configValues); assertEquals(expectedParsed, parsed); assertEquals(expected, configValues); }
@Override public void onMsg(TbContext ctx, TbMsg msg) throws ExecutionException, InterruptedException, TbNodeException { TbMsgMetaData metaDataCopy = msg.getMetaData().copy(); String data = msg.getData(); boolean msgChanged = false; switch (renameIn) { case METADATA: Map<String, String> metaDataMap = metaDataCopy.getData(); for (Map.Entry<String, String> entry : renameKeysMapping.entrySet()) { String currentKeyName = entry.getKey(); String newKeyName = entry.getValue(); if (metaDataMap.containsKey(currentKeyName)) { msgChanged = true; String value = metaDataMap.get(currentKeyName); metaDataMap.put(newKeyName, value); metaDataMap.remove(currentKeyName); } } metaDataCopy = new TbMsgMetaData(metaDataMap); break; case DATA: JsonNode dataNode = JacksonUtil.toJsonNode(data); if (dataNode.isObject()) { ObjectNode msgData = (ObjectNode) dataNode; for (Map.Entry<String, String> entry : renameKeysMapping.entrySet()) { String currentKeyName = entry.getKey(); String newKeyName = entry.getValue(); if (msgData.has(currentKeyName)) { msgChanged = true; JsonNode value = msgData.get(currentKeyName); msgData.set(newKeyName, value); msgData.remove(currentKeyName); } } data = JacksonUtil.toString(msgData); } break; default: log.debug("Unexpected RenameIn value: {}. Allowed values: {}", renameIn, TbMsgSource.values()); } ctx.tellSuccess(msgChanged ? TbMsg.transformMsg(msg, metaDataCopy, data) : msg); }
@Test void givenMsg_whenOnMsg_thenVerifyOutput() throws Exception { String data = "{\"Temperature_1\":22.5,\"TestKey_2\":10.3}"; node.onMsg(ctx, getTbMsg(deviceId, data)); ArgumentCaptor<TbMsg> newMsgCaptor = ArgumentCaptor.forClass(TbMsg.class); verify(ctx, times(1)).tellSuccess(newMsgCaptor.capture()); verify(ctx, never()).tellFailure(any(), any()); TbMsg newMsg = newMsgCaptor.getValue(); assertThat(newMsg).isNotNull(); JsonNode dataNode = JacksonUtil.toJsonNode(newMsg.getData()); assertThat(dataNode.has("Attribute_2")).isEqualTo(true); assertThat(dataNode.has("Temperature_1")).isEqualTo(true); }
@Nullable @Override public Message decode(@Nonnull RawMessage rawMessage) { final byte[] payload = rawMessage.getPayload(); final Map<String, Object> event; try { event = objectMapper.readValue(payload, TypeReferences.MAP_STRING_OBJECT); } catch (IOException e) { LOG.error("Couldn't decode raw message {}", rawMessage); return null; } return parseEvent(event); }
@Test public void decodeMessagesHandlesTopbeatMessages() throws Exception { final Message message = codec.decode(messageFromJson("topbeat-system.json")); assertThat(message).isNotNull(); assertThat(message.getSource()).isEqualTo("example.local"); assertThat(message.getTimestamp()).isEqualTo(new DateTime(2016, 4, 1, 0, 0, DateTimeZone.UTC)); assertThat(message.getField("facility")).isEqualTo("topbeat"); assertThat(message.getField("type")).isEqualTo("system"); }
public Optional<DoFn.ProcessContinuation> run( PartitionRecord partitionRecord, ChangeStreamRecord record, RestrictionTracker<StreamProgress, StreamProgress> tracker, DoFn.OutputReceiver<KV<ByteString, ChangeStreamRecord>> receiver, ManualWatermarkEstimator<Instant> watermarkEstimator, BytesThroughputEstimator<KV<ByteString, ChangeStreamRecord>> throughputEstimator) { if (record instanceof Heartbeat) { Heartbeat heartbeat = (Heartbeat) record; final Instant watermark = toJodaTime(heartbeat.getEstimatedLowWatermark()); // These will be filtered so the key doesn't really matter but the most logical thing to // key a heartbeat by is the partition it corresponds to. ByteString heartbeatKey = Range.ByteStringRange.serializeToByteString(partitionRecord.getPartition()); KV<ByteString, ChangeStreamRecord> outputRecord = KV.of(heartbeatKey, heartbeat); throughputEstimator.update(Instant.now(), outputRecord); StreamProgress streamProgress = new StreamProgress( heartbeat.getChangeStreamContinuationToken(), watermark, throughputEstimator.get(), Instant.now(), true); watermarkEstimator.setWatermark(watermark); // If the tracker fail to claim the streamProgress, it most likely means the runner initiated // a checkpoint. See {@link // org.apache.beam.sdk.io.gcp.bigtable.changestreams.restriction.ReadChangeStreamPartitionProgressTracker} // for more information regarding runner initiated checkpoints. if (!tracker.tryClaim(streamProgress)) { return Optional.of(DoFn.ProcessContinuation.stop()); } metrics.incHeartbeatCount(); // We output heartbeats so that they are factored into throughput and can be used to // autoscale. These will be filtered in a downstream step and never returned to users. This is // to prevent autoscaler from scaling down when we have large tables with no throughput but // we need enough workers to keep up with heartbeats. // We are outputting elements with timestamp of 0 to prevent reliance on event time. This // limits the ability to window on commit time of any data changes. It is still possible to // window on processing time. receiver.outputWithTimestamp(outputRecord, Instant.EPOCH); } else if (record instanceof CloseStream) { CloseStream closeStream = (CloseStream) record; StreamProgress streamProgress = new StreamProgress(closeStream); // If the tracker fail to claim the streamProgress, it most likely means the runner initiated // a checkpoint. See {@link // org.apache.beam.sdk.io.gcp.bigtable.changestreams.restriction.ReadChangeStreamPartitionProgressTracker} // for more information regarding runner initiated checkpoints. if (!tracker.tryClaim(streamProgress)) { return Optional.of(DoFn.ProcessContinuation.stop()); } metrics.incClosestreamCount(); return Optional.of(DoFn.ProcessContinuation.resume()); } else if (record instanceof ChangeStreamMutation) { ChangeStreamMutation changeStreamMutation = (ChangeStreamMutation) record; final Instant watermark = toJodaTime(changeStreamMutation.getEstimatedLowWatermark()); watermarkEstimator.setWatermark(watermark); // Build a new StreamProgress with the continuation token to be claimed. ChangeStreamContinuationToken changeStreamContinuationToken = ChangeStreamContinuationToken.create( Range.ByteStringRange.create( partitionRecord.getPartition().getStart(), partitionRecord.getPartition().getEnd()), changeStreamMutation.getToken()); KV<ByteString, ChangeStreamRecord> outputRecord = KV.of(changeStreamMutation.getRowKey(), changeStreamMutation); throughputEstimator.update(Instant.now(), outputRecord); StreamProgress streamProgress = new StreamProgress( changeStreamContinuationToken, watermark, throughputEstimator.get(), Instant.now(), false); // If the tracker fail to claim the streamProgress, it most likely means the runner initiated // a checkpoint. See ReadChangeStreamPartitionProgressTracker for more information regarding // runner initiated checkpoints. if (!tracker.tryClaim(streamProgress)) { return Optional.of(DoFn.ProcessContinuation.stop()); } if (changeStreamMutation.getType() == ChangeStreamMutation.MutationType.GARBAGE_COLLECTION) { metrics.incChangeStreamMutationGcCounter(); } else if (changeStreamMutation.getType() == ChangeStreamMutation.MutationType.USER) { metrics.incChangeStreamMutationUserCounter(); } Instant delay = toJodaTime(changeStreamMutation.getCommitTimestamp()); metrics.updateProcessingDelayFromCommitTimestamp( Instant.now().getMillis() - delay.getMillis()); // We are outputting elements with timestamp of 0 to prevent reliance on event time. This // limits the ability to window on commit time of any data changes. It is still possible to // window on processing time. receiver.outputWithTimestamp(outputRecord, Instant.EPOCH); } else { LOG.warn( "RCSP {}: Invalid response type", formatByteStringRange(partitionRecord.getPartition())); } return Optional.empty(); }
@Test public void testHeartBeat() { final Instant lowWatermark = Instant.ofEpochSecond(1000); ChangeStreamContinuationToken changeStreamContinuationToken = ChangeStreamContinuationToken.create(ByteStringRange.create("a", "b"), "1234"); Heartbeat mockHeartBeat = Mockito.mock(Heartbeat.class); Mockito.when(mockHeartBeat.getEstimatedLowWatermark()) .thenReturn(toThreetenInstant(lowWatermark)); Mockito.when(mockHeartBeat.getChangeStreamContinuationToken()) .thenReturn(changeStreamContinuationToken); final Optional<DoFn.ProcessContinuation> result = action.run( partitionRecord, mockHeartBeat, tracker, receiver, watermarkEstimator, throughputEstimator); assertFalse(result.isPresent()); verify(metrics).incHeartbeatCount(); verify(watermarkEstimator).setWatermark(eq(lowWatermark)); StreamProgress streamProgress = new StreamProgress( changeStreamContinuationToken, lowWatermark, BigDecimal.valueOf(1000), Instant.now(), true); verify(tracker).tryClaim(streamProgressArgumentCaptor.capture()); assertEquals( streamProgress.getCurrentToken(), streamProgressArgumentCaptor.getValue().getCurrentToken()); assertEquals( streamProgress.getThroughputEstimate(), streamProgressArgumentCaptor.getValue().getThroughputEstimate()); assertEquals( streamProgress.getEstimatedLowWatermark(), streamProgressArgumentCaptor.getValue().getEstimatedLowWatermark()); assertEquals( streamProgress.isHeartbeat(), streamProgressArgumentCaptor.getValue().isHeartbeat()); KV<ByteString, ChangeStreamRecord> record = KV.of(ByteStringRange.serializeToByteString(partitionRecord.getPartition()), mockHeartBeat); verify(receiver).outputWithTimestamp(eq(record), eq(Instant.EPOCH)); verify(throughputEstimator).update(any(), eq(record)); }
public static ServerId of(@Nullable String databaseId, String datasetId) { if (databaseId != null) { int databaseIdLength = databaseId.length(); checkArgument(databaseIdLength == DATABASE_ID_LENGTH, "Illegal databaseId length (%s)", databaseIdLength); } int datasetIdLength = datasetId.length(); checkArgument(datasetIdLength == DEPRECATED_SERVER_ID_LENGTH || datasetIdLength == NOT_UUID_DATASET_ID_LENGTH || datasetIdLength == UUID_DATASET_ID_LENGTH, "Illegal datasetId length (%s)", datasetIdLength); return new ServerId(databaseId, datasetId); }
@Test @UseDataProvider("illegalDatabaseIdLengths") public void of_throws_IAE_if_databaseId_length_is_not_8(int illegalDatabaseIdLengths) { String databaseId = randomAlphabetic(illegalDatabaseIdLengths); String datasetId = randomAlphabetic(UUID_DATASET_ID_LENGTH); assertThatThrownBy(() -> ServerId.of(databaseId, datasetId)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Illegal databaseId length (" + illegalDatabaseIdLengths + ")"); }
static int toInteger(final JsonNode object) { if (object instanceof NumericNode) { return object.intValue(); } if (object instanceof TextNode) { try { return Integer.parseInt(object.textValue()); } catch (final NumberFormatException e) { throw failedStringCoercionException(SqlBaseType.INTEGER); } } throw invalidConversionException(object, SqlBaseType.INTEGER); }
@Test public void shouldConvertStringToIntCorrectly() { final Integer i = JsonSerdeUtils.toInteger(JsonNodeFactory.instance.textNode("1")); assertThat(i, equalTo(1)); }
public static void upgradeConfigurationAndVersion(RuleNode node, RuleNodeClassInfo nodeInfo) { JsonNode oldConfiguration = node.getConfiguration(); int configurationVersion = node.getConfigurationVersion(); int currentVersion = nodeInfo.getCurrentVersion(); var configClass = nodeInfo.getAnnotation().configClazz(); if (oldConfiguration == null || !oldConfiguration.isObject()) { log.warn("Failed to upgrade rule node with id: {} type: {} fromVersion: {} toVersion: {}. " + "Current configuration is null or not a json object. " + "Going to set default configuration ... ", node.getId(), node.getType(), configurationVersion, currentVersion); node.setConfiguration(getDefaultConfig(configClass)); } else { var tbVersionedNode = getTbVersionedNode(nodeInfo); try { JsonNode queueName = oldConfiguration.get(QUEUE_NAME); TbPair<Boolean, JsonNode> upgradeResult = tbVersionedNode.upgrade(configurationVersion, oldConfiguration); if (upgradeResult.getFirst()) { node.setConfiguration(upgradeResult.getSecond()); if (nodeInfo.getAnnotation().hasQueueName() && queueName != null && queueName.isTextual()) { node.setQueueName(queueName.asText()); } } } catch (Exception e) { try { JacksonUtil.treeToValue(oldConfiguration, configClass); } catch (Exception ex) { log.warn("Failed to upgrade rule node with id: {} type: {} fromVersion: {} toVersion: {}. " + "Going to set default configuration ... ", node.getId(), node.getType(), configurationVersion, currentVersion, e); node.setConfiguration(getDefaultConfig(configClass)); } } } node.setConfigurationVersion(currentVersion); }
@Test public void testUpgradeRuleNodeConfigurationWithNullConfig() throws Exception { // GIVEN var node = new RuleNode(); var nodeInfo = mock(RuleNodeClassInfo.class); var nodeConfigClazz = TbGetAttributesNodeConfiguration.class; var annotation = mock(org.thingsboard.rule.engine.api.RuleNode.class); var defaultConfig = JacksonUtil.valueToTree(nodeConfigClazz.getDeclaredConstructor().newInstance().defaultConfiguration()); when(nodeInfo.getClazz()).thenReturn((Class) TbGetAttributesNode.class); when(nodeInfo.getCurrentVersion()).thenReturn(1); when(nodeInfo.getAnnotation()).thenReturn(annotation); when(annotation.configClazz()).thenReturn((Class) nodeConfigClazz); // WHEN TbNodeUpgradeUtils.upgradeConfigurationAndVersion(node, nodeInfo); // THEN Assertions.assertThat(node.getConfiguration()).isEqualTo(defaultConfig); Assertions.assertThat(node.getConfigurationVersion()).isEqualTo(1); }
public static UserOperatorConfig buildFromMap(Map<String, String> map) { Map<String, String> envMap = new HashMap<>(map); envMap.keySet().retainAll(UserOperatorConfig.keyNames()); Map<String, Object> generatedMap = ConfigParameter.define(envMap, CONFIG_VALUES); return new UserOperatorConfig(generatedMap); }
@Test public void testFromMapNamespaceEnvVarMissingThrows() { Map<String, String> envVars = new HashMap<>(UserOperatorConfigTest.ENV_VARS); envVars.remove(UserOperatorConfig.NAMESPACE.key()); assertThrows(InvalidConfigurationException.class, () -> UserOperatorConfig.buildFromMap(envVars)); }
public Result execute() { long start = clock().getTick(); Result result; try { result = check(); } catch (Exception e) { result = Result.unhealthy(e); } result.setDuration(TimeUnit.MILLISECONDS.convert(clock().getTick() - start, TimeUnit.NANOSECONDS)); return result; }
@Test public void wrapsExceptionsWhenExecuted() { final RuntimeException e = mock(RuntimeException.class); when(e.getMessage()).thenReturn("oh noes"); when(underlying.execute()).thenThrow(e); HealthCheck.Result actual = healthCheck.execute(); assertThat(actual.isHealthy()) .isFalse(); assertThat(actual.getMessage()) .isEqualTo("oh noes"); assertThat(actual.getError()) .isEqualTo(e); assertThat(actual.getDetails()) .isNull(); assertThat(actual.getDuration()) .isGreaterThanOrEqualTo(0); }
public String buildSql(List<HiveColumnHandle> columns, TupleDomain<HiveColumnHandle> tupleDomain) { // SELECT clause StringBuilder sql = new StringBuilder("SELECT "); if (columns.isEmpty()) { sql.append("' '"); } else { String columnNames = columns.stream() .map(this::getFullyQualifiedColumnName) .collect(joining(", ")); sql.append(columnNames); } // FROM clause sql.append(" FROM "); sql.append(DATA_SOURCE); // WHERE clause List<String> clauses = toConjuncts(columns, tupleDomain); if (!clauses.isEmpty()) { sql.append(" WHERE ") .append(Joiner.on(" AND ").join(clauses)); } return sql.toString(); }
@Test public void testNotPushDoublePredicates() { List<HiveColumnHandle> columns = ImmutableList.of( new HiveColumnHandle("quantity", HIVE_INT, parseTypeSignature(INTEGER), 0, REGULAR, Optional.empty(), Optional.empty()), new HiveColumnHandle("extendedprice", HIVE_DOUBLE, parseTypeSignature(StandardTypes.DOUBLE), 1, REGULAR, Optional.empty(), Optional.empty()), new HiveColumnHandle("discount", HIVE_DOUBLE, parseTypeSignature(StandardTypes.DOUBLE), 2, REGULAR, Optional.empty(), Optional.empty())); TupleDomain<HiveColumnHandle> tupleDomain = withColumnDomains( ImmutableMap.of( columns.get(0), Domain.create(ofRanges(Range.lessThan(BIGINT, 50L)), false), columns.get(1), Domain.create(ofRanges(Range.equal(DOUBLE, 0.05)), false), columns.get(2), Domain.create(ofRanges(Range.range(DOUBLE, 0.0, true, 0.02, true)), false))); // CSV IonSqlQueryBuilder queryBuilder = new IonSqlQueryBuilder(createTestFunctionAndTypeManager(), CSV); assertEquals("SELECT s._1, s._2, s._3 FROM S3Object s WHERE ((case s._1 when '' then null else CAST(s._1 AS INT) end < 50))", queryBuilder.buildSql(columns, tupleDomain)); // JSON queryBuilder = new IonSqlQueryBuilder(createTestFunctionAndTypeManager(), JSON); assertEquals(queryBuilder.buildSql(columns, tupleDomain), "SELECT s.quantity, s.extendedprice, s.discount FROM S3Object s WHERE ((case s.quantity when '' then null else CAST(s.quantity AS INT) end < 50))"); }
@Override protected String buildHandle(final List<URIRegisterDTO> uriList, final SelectorDO selectorDO) { List<GrpcUpstream> addList = buildGrpcUpstreamList(uriList); List<GrpcUpstream> canAddList = new CopyOnWriteArrayList<>(); boolean isEventDeleted = uriList.size() == 1 && EventType.DELETED.equals(uriList.get(0).getEventType()); if (isEventDeleted) { addList.get(0).setStatus(false); } List<GrpcUpstream> existList = GsonUtils.getInstance().fromCurrentList(selectorDO.getHandle(), GrpcUpstream.class); if (CollectionUtils.isEmpty(existList)) { canAddList = addList; } else { List<GrpcUpstream> diffList = addList.stream().filter(upstream -> !existList.contains(upstream)).collect(Collectors.toList()); if (CollectionUtils.isNotEmpty(diffList)) { canAddList.addAll(diffList); existList.addAll(diffList); } List<GrpcUpstream> diffStatusList = addList.stream().filter(upstream -> !upstream.isStatus() || existList.stream().anyMatch(e -> e.equals(upstream) && e.isStatus() != upstream.isStatus())).collect(Collectors.toList()); if (CollectionUtils.isNotEmpty(diffStatusList)) { canAddList.addAll(diffStatusList); } } if (doSubmit(selectorDO.getId(), canAddList)) { return null; } return GsonUtils.getInstance().toJson(CollectionUtils.isEmpty(existList) ? canAddList : existList); }
@Test public void testBuildHandle() { shenyuClientRegisterGrpcService = spy(shenyuClientRegisterGrpcService); final String returnStr = "[{upstreamUrl='localhost:8090',weight=1,status=true,timestamp=1637826588267}," + "{upstreamUrl='localhost:8091',weight=2,status=true,timestamp=1637826588267}]"; final String expected = "[{\"weight\":1,\"upstreamUrl\":\"localhost:8090\",\"status\":true,\"timestamp\":1637826588267}," + "{\"weight\":2,\"upstreamUrl\":\"localhost:8091\",\"status\":true,\"timestamp\":1637826588267}]"; List<URIRegisterDTO> list = new ArrayList<>(); list.add(URIRegisterDTO.builder().appName("test1").rpcType(RpcTypeEnum.GRPC.getName()).host("localhost").port(8090).build()); SelectorDO selectorDO = mock(SelectorDO.class); when(selectorDO.getHandle()).thenReturn(returnStr); doReturn(false).when(shenyuClientRegisterGrpcService).doSubmit(any(), any()); String actual = shenyuClientRegisterGrpcService.buildHandle(list, selectorDO); assertEquals(actual, expected); List<TarsUpstream> resultList = GsonUtils.getInstance().fromCurrentList(actual, TarsUpstream.class); assertEquals(resultList.size(), 2); list.clear(); list.add(URIRegisterDTO.builder().appName("test1").rpcType(RpcTypeEnum.GRPC.getName()).host("localhost").port(8092).build()); selectorDO = mock(SelectorDO.class); when(selectorDO.getHandle()).thenReturn(returnStr); doReturn(false).when(shenyuClientRegisterGrpcService).doSubmit(any(), any()); actual = shenyuClientRegisterGrpcService.buildHandle(list, selectorDO); resultList = GsonUtils.getInstance().fromCurrentList(actual, TarsUpstream.class); assertEquals(resultList.size(), 3); list.clear(); list.add(URIRegisterDTO.builder().appName("test1").rpcType(RpcTypeEnum.GRPC.getName()).host("localhost").port(8090).build()); doReturn(false).when(shenyuClientRegisterGrpcService).doSubmit(any(), any()); selectorDO = mock(SelectorDO.class); actual = shenyuClientRegisterGrpcService.buildHandle(list, selectorDO); resultList = GsonUtils.getInstance().fromCurrentList(actual, TarsUpstream.class); assertEquals(resultList.size(), 1); }
@Override public FindCoordinatorRequest.Builder buildRequest(Set<CoordinatorKey> keys) { unrepresentableKeys = keys.stream().filter(k -> k == null || !isRepresentableKey(k.idValue)).collect(Collectors.toSet()); Set<CoordinatorKey> representableKeys = keys.stream().filter(k -> k != null && isRepresentableKey(k.idValue)).collect(Collectors.toSet()); if (batch) { ensureSameType(representableKeys); FindCoordinatorRequestData data = new FindCoordinatorRequestData() .setKeyType(type.id()) .setCoordinatorKeys(representableKeys.stream().map(k -> k.idValue).collect(Collectors.toList())); return new FindCoordinatorRequest.Builder(data); } else { CoordinatorKey key = requireSingletonAndType(representableKeys); return new FindCoordinatorRequest.Builder( new FindCoordinatorRequestData() .setKey(key.idValue) .setKeyType(key.type.id()) ); } }
@Test public void testBuildLookupRequest() { CoordinatorStrategy strategy = new CoordinatorStrategy(CoordinatorType.GROUP, new LogContext()); FindCoordinatorRequest.Builder request = strategy.buildRequest(new HashSet<>(Arrays.asList( CoordinatorKey.byGroupId("foo"), CoordinatorKey.byGroupId("bar")))); assertEquals("", request.data().key()); assertEquals(2, request.data().coordinatorKeys().size()); assertEquals(CoordinatorType.GROUP, CoordinatorType.forId(request.data().keyType())); }
public static String format(Object x) { if (x != null) { return format(x.toString()); } else { return StrUtil.EMPTY; } }
@Test public void testFormatLargeNumber() { // 测试传入大数字的情况 String result = NumberWordFormatter.format(1234567890123L); assertEquals("ONE TRILLION TWO HUNDRED AND THIRTY FOUR BILLION FIVE HUNDRED AND SIXTY SEVEN MILLION EIGHT HUNDRED AND NINETY THOUSAND ONE HUNDRED AND TWENTY THREE ONLY", result); }
public static String createGPX(InstructionList instructions, String trackName, long startTimeMillis, boolean includeElevation, boolean withRoute, boolean withTrack, boolean withWayPoints, String version, Translation tr) { DateFormat formatter = Helper.createFormatter(); DecimalFormat decimalFormat = new DecimalFormat("#", DecimalFormatSymbols.getInstance(Locale.ROOT)); decimalFormat.setMinimumFractionDigits(1); decimalFormat.setMaximumFractionDigits(6); decimalFormat.setMinimumIntegerDigits(1); String header = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\" ?>" + "<gpx xmlns=\"http://www.topografix.com/GPX/1/1\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"" + " creator=\"Graphhopper version " + version + "\" version=\"1.1\"" // This xmlns:gh acts only as ID, no valid URL necessary. // Use a separate namespace for custom extensions to make basecamp happy. + " xmlns:gh=\"https://graphhopper.com/public/schema/gpx/1.1\">" + "\n<metadata>" + "<copyright author=\"OpenStreetMap contributors\"/>" + "<link href=\"http://graphhopper.com\">" + "<text>GraphHopper GPX</text>" + "</link>" + "<time>" + formatter.format(startTimeMillis) + "</time>" + "</metadata>"; StringBuilder gpxOutput = new StringBuilder(header); if (!instructions.isEmpty()) { if (withWayPoints) { createWayPointBlock(gpxOutput, instructions.get(0), decimalFormat, tr); // Start for (Instruction currInstr : instructions) { if ((currInstr.getSign() == Instruction.REACHED_VIA) // Via || (currInstr.getSign() == Instruction.FINISH)) // End { createWayPointBlock(gpxOutput, currInstr, decimalFormat, tr); } } } if (withRoute) { gpxOutput.append("\n<rte>"); Instruction nextInstr = null; for (Instruction currInstr : instructions) { if (null != nextInstr) createRteptBlock(gpxOutput, nextInstr, currInstr, decimalFormat, tr); nextInstr = currInstr; } createRteptBlock(gpxOutput, nextInstr, null, decimalFormat, tr); gpxOutput.append("\n</rte>"); } } if (withTrack) { gpxOutput.append("\n<trk><name>").append(trackName).append("</name>"); gpxOutput.append("<trkseg>"); for (GPXEntry entry : createGPXList(instructions)) { gpxOutput.append("\n<trkpt lat=\"").append(decimalFormat.format(entry.getPoint().getLat())); gpxOutput.append("\" lon=\"").append(decimalFormat.format(entry.getPoint().getLon())).append("\">"); if (includeElevation) gpxOutput.append("<ele>").append(Helper.round2(((GHPoint3D) entry.getPoint()).getEle())).append("</ele>"); if (entry.getTime() != null) gpxOutput.append("<time>").append(formatter.format(startTimeMillis + entry.getTime())).append("</time>"); gpxOutput.append("</trkpt>"); } gpxOutput.append("\n</trkseg>"); gpxOutput.append("\n</trk>"); } // we could now use 'wpt' for via points gpxOutput.append("\n</gpx>"); return gpxOutput.toString(); }
@Test public void testCreateGPX() { InstructionList instructions = new InstructionList(trMap.getWithFallBack(Locale.US)); PointList pl = new PointList(); pl.add(49.942576, 11.580384); pl.add(49.941858, 11.582422); instructions.add(new Instruction(Instruction.CONTINUE_ON_STREET, "temp", pl).setDistance(240).setTime(15000)); pl = new PointList(); pl.add(49.941575, 11.583501); instructions.add(new Instruction(Instruction.TURN_LEFT, "temp2", pl).setDistance(25).setTime(4000)); pl = new PointList(); pl.add(49.941389, 11.584311); instructions.add(new Instruction(Instruction.TURN_LEFT, "temp2", pl).setDistance(25).setTime(3000)); instructions.add(new FinishInstruction(49.941029, 11.584514, 0)); List<GpxConversions.GPXEntry> result = GpxConversions.createGPXList(instructions); assertEquals(5, result.size()); assertEquals(0, result.get(0).getTime().longValue()); assertNull(result.get(1).getTime()); assertEquals(15000, result.get(2).getTime().longValue()); assertEquals(19000, result.get(3).getTime().longValue()); assertEquals(22000, result.get(4).getTime().longValue()); verifyGPX(GpxConversions.createGPX(instructions, "GraphHopper", new Date().getTime(), false, true, true, true, Constants.VERSION, trMap.getWithFallBack(Locale.US))); }
@Override public final void run() { long valueCount = collector.getMergingValueCount(); if (valueCount == 0) { return; } runInternal(); assert operationCount > 0 : "No merge operations have been invoked in AbstractContainerMerger"; try { long timeoutMillis = Math.max(valueCount * TIMEOUT_FACTOR, MINIMAL_TIMEOUT_MILLIS); if (!semaphore.tryAcquire(operationCount, timeoutMillis, TimeUnit.MILLISECONDS)) { logger.warning("Split-brain healing for " + getLabel() + " didn't finish within the timeout..."); } } catch (InterruptedException e) { logger.finest("Interrupted while waiting for split-brain healing of " + getLabel() + "..."); Thread.currentThread().interrupt(); } finally { collector.destroy(); } }
@Test @RequireAssertEnabled public void testMergerRun_whenEmptyCollector_thenMergerDoesNotRun() { TestMergeOperation operation = new TestMergeOperation(); TestContainerMerger merger = new TestContainerMerger(emptyCollector, nodeEngine, operation); merger.run(); assertFalse("Expected the merge operation not to be invoked", operation.hasBeenInvoked); assertFalse("Expected collected containers not to be destroyed", collector.onDestroyHasBeenCalled); }
@Override public ObjectNode encode(Alarm alarm, CodecContext context) { checkNotNull(alarm, "Alarm cannot be null"); return context.mapper().createObjectNode() .put("id", alarm.id().toString()) .put("deviceId", alarm.deviceId().toString()) .put("description", alarm.description()) .put("source", alarm.source() == null ? null : alarm.source().toString()) .put("timeRaised", alarm.timeRaised()) .put("timeUpdated", alarm.timeUpdated()) .put("timeCleared", alarm.timeCleared()) .put("severity", alarm.severity().toString()) .put("serviceAffecting", alarm.serviceAffecting()) .put("acknowledged", alarm.acknowledged()) .put("manuallyClearable", alarm.manuallyClearable()) .put("assignedUser", alarm.assignedUser()); }
@Test public void alarmCodecTestWithOptionalFieldMissing() { JsonCodec<Alarm> codec = context.codec(Alarm.class); assertThat(codec, is(notNullValue())); ObjectNode alarmJson = codec.encode(alarmMinimumFields, context); assertThat(alarmJson, notNullValue()); assertThat(alarmJson, matchesAlarm(alarmMinimumFields)); }
public static <@NonNull E> CompletableSource resolveScopeFromLifecycle( final LifecycleScopeProvider<E> provider) throws OutsideScopeException { return resolveScopeFromLifecycle(provider, true); }
@Test public void lifecycleDefault_shouldFailIfNotStarted() { TestLifecycleScopeProvider lifecycle = TestLifecycleScopeProvider.create(); try { testSource(resolveScopeFromLifecycle(lifecycle)); throw new AssertionError( "Lifecycle resolution should have failed due to missing start " + "event"); } catch (LifecycleNotStartedException ignored) { } }
@Override public List<Intent> compile(LinkCollectionIntent intent, List<Intent> installable) { SetMultimap<DeviceId, PortNumber> inputPorts = HashMultimap.create(); SetMultimap<DeviceId, PortNumber> outputPorts = HashMultimap.create(); Map<ConnectPoint, Identifier<?>> labels = ImmutableMap.of(); Optional<EncapsulationConstraint> encapConstraint = this.getIntentEncapConstraint(intent); computePorts(intent, inputPorts, outputPorts); if (encapConstraint.isPresent()) { labels = labelAllocator.assignLabelToPorts(intent.links(), intent.key(), encapConstraint.get().encapType(), encapConstraint.get().suggestedIdentifier()); } ImmutableList.Builder<Intent> intentList = ImmutableList.builder(); if (this.isDomainProcessingEnabled(intent)) { intentList.addAll(this.getDomainIntents(intent, domainService)); } List<Objective> objectives = new ArrayList<>(); List<DeviceId> devices = new ArrayList<>(); for (DeviceId deviceId : outputPorts.keySet()) { // add only objectives that are not inside of a domain if (LOCAL.equals(domainService.getDomain(deviceId))) { List<Objective> deviceObjectives = createRules(intent, deviceId, inputPorts.get(deviceId), outputPorts.get(deviceId), labels); deviceObjectives.forEach(objective -> { objectives.add(objective); devices.add(deviceId); }); } } // if any objectives have been created if (!objectives.isEmpty()) { intentList.add(new FlowObjectiveIntent(appId, intent.key(), devices, objectives, intent.resources(), intent.resourceGroup())); } return intentList.build(); }
@Test public void singleHopTestForMp() { Set<Link> testLinks = ImmutableSet.of(); Set<FilteredConnectPoint> ingress = ImmutableSet.of( new FilteredConnectPoint(of1p1, vlan100Selector), new FilteredConnectPoint(of1p2, vlan100Selector) ); Set<FilteredConnectPoint> egress = ImmutableSet.of( new FilteredConnectPoint(of1p3, vlan100Selector) ); LinkCollectionIntent intent = LinkCollectionIntent.builder() .appId(appId) .selector(ethDstSelector) .treatment(treatment) .links(testLinks) .filteredIngressPoints(ingress) .filteredEgressPoints(egress) .build(); List<Intent> result = compiler.compile(intent, Collections.emptyList()); assertThat(result, hasSize(1)); assertThat(result.get(0), instanceOf(FlowObjectiveIntent.class)); FlowObjectiveIntent foIntent = (FlowObjectiveIntent) result.get(0); List<Objective> objectives = foIntent.objectives(); assertThat(objectives, hasSize(6)); TrafficSelector expectSelector = DefaultTrafficSelector .builder(ethDstSelector) .matchInPort(PortNumber.portNumber(1)) .matchVlanId(VLAN_100) .build(); TrafficTreatment expectTreatment = DefaultTrafficTreatment.builder() .setOutput(PortNumber.portNumber(3)) .build(); /* * First set of objective */ filteringObjective = (FilteringObjective) objectives.get(0); forwardingObjective = (ForwardingObjective) objectives.get(1); nextObjective = (NextObjective) objectives.get(2); PortCriterion inPortCriterion = (PortCriterion) expectSelector.getCriterion(Criterion.Type.IN_PORT); // test case for first filtering objective checkFiltering(filteringObjective, inPortCriterion, intent.priority(), null, appId, true, vlan100Selector.criteria()); // test case for first next objective checkNext(nextObjective, SIMPLE, expectTreatment, expectSelector, ADD); // test case for first forwarding objective checkForward(forwardingObjective, ADD, expectSelector, nextObjective.id(), SPECIFIC); /* * Second set of objective */ filteringObjective = (FilteringObjective) objectives.get(3); forwardingObjective = (ForwardingObjective) objectives.get(4); nextObjective = (NextObjective) objectives.get(5); expectSelector = DefaultTrafficSelector.builder(ethDstSelector) .matchInPort(PortNumber.portNumber(2)) .matchVlanId(VLAN_100) .build(); inPortCriterion = (PortCriterion) expectSelector.getCriterion(Criterion.Type.IN_PORT); // test case for first filtering objective checkFiltering(filteringObjective, inPortCriterion, intent.priority(), null, appId, true, vlan100Selector.criteria()); // test case for first next objective checkNext(nextObjective, SIMPLE, expectTreatment, expectSelector, ADD); // test case for first forwarding objective checkForward(forwardingObjective, ADD, expectSelector, nextObjective.id(), SPECIFIC); }
@Override public IcebergEnumeratorState snapshotState(long checkpointId) { return new IcebergEnumeratorState( enumeratorPosition.get(), assigner.state(), enumerationHistory.snapshot()); }
@Test public void testDiscoverWhenReaderRegistered() throws Exception { TestingSplitEnumeratorContext<IcebergSourceSplit> enumeratorContext = new TestingSplitEnumeratorContext<>(4); ScanContext scanContext = ScanContext.builder() .streaming(true) .startingStrategy(StreamingStartingStrategy.TABLE_SCAN_THEN_INCREMENTAL) .build(); ManualContinuousSplitPlanner splitPlanner = new ManualContinuousSplitPlanner(scanContext, 0); ContinuousIcebergEnumerator enumerator = createEnumerator(enumeratorContext, scanContext, splitPlanner); // register one reader, and let it request a split enumeratorContext.registerReader(2, "localhost"); enumerator.addReader(2); enumerator.handleSourceEvent(2, new SplitRequestEvent()); // make one split available and trigger the periodic discovery List<IcebergSourceSplit> splits = SplitHelpers.createSplitsFromTransientHadoopTable(temporaryFolder, 1, 1); splitPlanner.addSplits(splits); enumeratorContext.triggerAllActions(); assertThat(enumerator.snapshotState(1).pendingSplits()).isEmpty(); assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits()) .contains(splits.get(0)); }
public void putString(String key, String str) { checkNotNull(key); checkNotNull(str); put(key, str); }
@Test void testArrayInvalidSingleValue() { DescriptorProperties properties = new DescriptorProperties(); properties.putString(ARRAY_KEY, "INVALID"); assertThatThrownBy(() -> testArrayValidation(properties, 1, Integer.MAX_VALUE)) .isInstanceOf(ValidationException.class); }
@VisibleForTesting static <T> Udaf<T, Struct, T> earliestT( final boolean ignoreNulls ) { return new Udaf<T, Struct, T>() { Schema structSchema; SqlType aggregateType; SqlType returnType; @Override public void initializeTypeArguments(final List<SqlArgument> argTypeList) { returnType = argTypeList.get(0).getSqlTypeOrThrow(); final Schema connectType = SchemaConverters.sqlToConnectConverter().toConnectSchema(returnType); structSchema = KudafByOffsetUtils.buildSchema(connectType); aggregateType = SchemaConverters.connectToSqlConverter().toSqlType(structSchema); } @Override public Optional<SqlType> getAggregateSqlType() { return Optional.of(aggregateType); } @Override public Optional<SqlType> getReturnSqlType() { return Optional.of(returnType); } @Override public Struct initialize() { return null; } @Override public Struct aggregate(final T current, final Struct aggregate) { if (aggregate != null) { return aggregate; } if (current == null && ignoreNulls) { return null; } return createStruct(structSchema, current); } @Override public Struct merge(final Struct aggOne, final Struct aggTwo) { if (aggOne == null) { return aggTwo; } if (aggTwo == null) { return aggOne; } // When merging we need some way of evaluating the "earliest" one. // We do this by keeping track of the sequence of when it was originally processed if (INTERMEDIATE_STRUCT_COMPARATOR.compare(aggOne, aggTwo) < 0) { return aggOne; } else { return aggTwo; } } @Override @SuppressWarnings("unchecked") public T map(final Struct agg) { if (agg == null) { return null; } return (T) agg.get(VAL_FIELD); } }; }
@Test public void shouldInitialize() { // Given: final Udaf<Integer, Struct, Integer> udaf = EarliestByOffset .earliestT(true); // When: final Struct init = udaf.initialize(); // Then: assertThat(init, is(nullValue())); }
public void remove(long item1, long item2) { lock.writeLock().lock(); try { RoaringBitmap bitSet = map.get(item1); if (bitSet != null) { bitSet.remove(item2, item2 + 1); if (bitSet.isEmpty()) { map.remove(item1, bitSet); } } } finally { lock.writeLock().unlock(); } }
@Test public void testRemove() { ConcurrentBitmapSortedLongPairSet set = new ConcurrentBitmapSortedLongPairSet(); int items = 10; for (int i = 0; i < items; i++) { set.add(1, i); } for (int i = 0; i < items / 2; i++) { set.remove(1, i); } assertEquals(set.size(), items / 2); for (int i = 0; i < items / 2; i++) { set.remove(2, i); } assertEquals(set.size(), items / 2); for (int i = 0; i < items / 2; i++) { set.remove(1, i + 10000); } assertEquals(set.size(), items / 2); for (int i = 0; i < items / 2; i++) { set.remove(1, i + items / 2); } assertEquals(set.size(), 0); assertTrue(set.isEmpty()); }
@Override public Map<K, V> getCachedMap() { return localCacheView.getCachedMap(); }
@Test public void testAddAndGet() { RLocalCachedMap<Integer, Integer> map = redisson.getLocalCachedMap(LocalCachedMapOptions.<Integer, Integer>name("test") .codec(new CompositeCodec(redisson.getConfig().getCodec(), IntegerCodec.INSTANCE))); Map<Integer, Integer> cache = map.getCachedMap(); map.put(1, 100); Integer res = map.addAndGet(1, 12); assertThat(cache.size()).isEqualTo(1); assertThat(res).isEqualTo(112); res = map.get(1); assertThat(res).isEqualTo(112); RMap<Integer, Double> map2 = redisson.getLocalCachedMap(LocalCachedMapOptions.<Integer, Double>name("test2") .codec(new CompositeCodec(redisson.getConfig().getCodec(), DoubleCodec.INSTANCE))); map2.put(1, 100.2); Double res2 = map2.addAndGet(1, 12.1); assertThat(res2).isEqualTo(112.3); res2 = map2.get(1); assertThat(res2).isEqualTo(112.3); RMap<String, Integer> mapStr = redisson.getLocalCachedMap(LocalCachedMapOptions.<String, Integer>name("test3").codec(new CompositeCodec(redisson.getConfig().getCodec(), IntegerCodec.INSTANCE))); assertThat(mapStr.put("1", 100)).isNull(); assertThat(mapStr.addAndGet("1", 12)).isEqualTo(112); assertThat(mapStr.get("1")).isEqualTo(112); assertThat(cache.size()).isEqualTo(1); }
@VisibleForTesting static void initAddrUseFqdn(List<InetAddress> addrs) { useFqdn = true; analyzePriorityCidrs(); String fqdn = null; if (PRIORITY_CIDRS.isEmpty()) { // Get FQDN from local host by default. try { InetAddress localHost = InetAddress.getLocalHost(); fqdn = localHost.getCanonicalHostName(); String ip = localHost.getHostAddress(); LOG.info("Get FQDN from local host by default, FQDN: {}, ip: {}, v6: {}", fqdn, ip, localHost instanceof Inet6Address); } catch (UnknownHostException e) { LOG.error("failed to get FQDN from local host, will exit", e); System.exit(-1); } if (fqdn == null) { LOG.error("priority_networks is not set and we cannot get FQDN from local host"); System.exit(-1); } // Try to resolve addr from FQDN InetAddress uncheckedInetAddress = null; try { uncheckedInetAddress = InetAddress.getByName(fqdn); } catch (UnknownHostException e) { LOG.error("failed to parse FQDN: {}, message: {}", fqdn, e.getMessage(), e); System.exit(-1); } if (null == uncheckedInetAddress) { LOG.error("failed to parse FQDN: {}", fqdn); System.exit(-1); } // Check whether the InetAddress obtained via FQDN is bound to some network interface boolean hasInetAddr = false; for (InetAddress addr : addrs) { LOG.info("Try to match addr in fqdn mode, ip: {}, FQDN: {}", addr.getHostAddress(), addr.getCanonicalHostName()); if (addr.getCanonicalHostName() .equals(uncheckedInetAddress.getCanonicalHostName())) { hasInetAddr = true; break; } } if (hasInetAddr) { localAddr = uncheckedInetAddress; LOG.info("Using FQDN from local host by default, FQDN: {}, ip: {}, v6: {}", localAddr.getCanonicalHostName(), localAddr.getHostAddress(), localAddr instanceof Inet6Address); } else { LOG.error("Cannot find a network interface matching FQDN: {}", fqdn); System.exit(-1); } } else { LOG.info("using priority_networks in fqdn mode to decide whether ipv6 or ipv4 is preferred"); for (InetAddress addr : addrs) { String hostAddr = addr.getHostAddress(); String canonicalHostName = addr.getCanonicalHostName(); LOG.info("Try to match addr in fqdn mode, ip: {}, FQDN: {}", hostAddr, canonicalHostName); if (isInPriorNetwork(hostAddr)) { localAddr = addr; fqdn = canonicalHostName; LOG.info("Using FQDN from matched addr, FQDN: {}, ip: {}, v6: {}", fqdn, hostAddr, addr instanceof Inet6Address); break; } LOG.info("skip addr {} not belonged to priority networks in FQDN mode", addr); } if (fqdn == null) { LOG.error("priority_networks has been set and we cannot find matched addr, will exit"); System.exit(-1); } } // double-check the reverse resolve String canonicalHostName = localAddr.getCanonicalHostName(); if (!canonicalHostName.equals(fqdn)) { LOG.error("The FQDN of the parsed address [{}] is not the same as " + "the FQDN obtained from the host [{}]", canonicalHostName, fqdn); System.exit(-1); } }
@Test(expected = IllegalAccessException.class) public void testGetStartWithFQDNGetNullCanonicalHostName() { testInitAddrUseFqdnCommonMock(); List<InetAddress> hosts = NetUtils.getHosts(); new MockUp<InetAddress>() { @Mock public InetAddress getLocalHost() throws UnknownHostException { return addr; } @Mock public String getHostAddress() { return "127.0.0.10"; } @Mock public String getCanonicalHostName() { return null; } }; FrontendOptions.initAddrUseFqdn(hosts); }
protected static String getPartition(String relative) { return getParent(relative); }
@Test public void testGetPartition() { assertEquals("year=2017/month=10", getPartition("year=2017/month=10/part-0000.avro")); }
public void incBrokerPutNumsWithoutSystemTopic(final String topic, final int incValue) { if (TopicValidator.isSystemTopic(topic)) { return; } this.statsTable.get(BROKER_PUT_NUMS_WITHOUT_SYSTEM_TOPIC).getAndCreateStatsItem(this.clusterName).getValue().add(incValue); }
@Test public void testIncBrokerPutNumsWithoutSystemTopic() { brokerStatsManager.incBrokerPutNumsWithoutSystemTopic(TOPIC, 1); assertThat(brokerStatsManager.getStatsItem(BrokerStatsManager.BROKER_PUT_NUMS_WITHOUT_SYSTEM_TOPIC, CLUSTER_NAME) .getValue().doubleValue()).isEqualTo(1L); assertThat(brokerStatsManager.getBrokerPutNumsWithoutSystemTopic()).isEqualTo(1L); brokerStatsManager.incBrokerPutNumsWithoutSystemTopic(TopicValidator.RMQ_SYS_TRACE_TOPIC, 1); assertThat(brokerStatsManager.getStatsItem(BrokerStatsManager.BROKER_PUT_NUMS_WITHOUT_SYSTEM_TOPIC, CLUSTER_NAME) .getValue().doubleValue()).isEqualTo(1L); assertThat(brokerStatsManager.getBrokerPutNumsWithoutSystemTopic()).isEqualTo(1L); }
@Override public String toString() { return "DataflowRunner#" + options.getJobName(); }
@Test public void testToString() { DataflowPipelineOptions options = PipelineOptionsFactory.as(DataflowPipelineOptions.class); options.setJobName("TestJobName"); options.setProject("test-project"); options.setRegion(REGION_ID); options.setTempLocation("gs://test/temp/location"); options.setGcpCredential(new TestCredential()); options.setPathValidatorClass(NoopPathValidator.class); options.setRunner(DataflowRunner.class); assertEquals("DataflowRunner#testjobname", DataflowRunner.fromOptions(options).toString()); }
@NonNull public List<FilePath> list() throws IOException, InterruptedException { return list((FileFilter) null); }
@Test public void listWithExcludes() throws Exception { File baseDir = temp.getRoot(); final Set<FilePath> expected = new HashSet<>(); expected.add(createFilePath(baseDir, "top", "sub", "app.log")); createFilePath(baseDir, "top", "sub", "trace.log"); expected.add(createFilePath(baseDir, "top", "db", "db.log")); createFilePath(baseDir, "top", "db", "trace.log"); final FilePath[] result = new FilePath(baseDir).list("**", "**/trace.log"); assertEquals(expected, new HashSet<>(Arrays.asList(result))); }
@Override public boolean match(Message msg, StreamRule rule) { final boolean inverted = rule.getInverted(); final Object field = msg.getField(rule.getField()); if (field != null) { final String value = field.toString(); return inverted ^ value.contains(rule.getValue()); } else { return inverted; } }
@Test public void testSuccessfulMatchInArray() { msg.addField("something", Collections.singleton("foobar")); StreamRuleMatcher matcher = getMatcher(rule); assertTrue(matcher.match(msg, rule)); }
@SuppressWarnings( "unchecked" ) public List<? extends ConnectionDetails> getConnectionDetailsByScheme( String scheme ) { initialize(); ConnectionProvider provider = connectionProviders.get( scheme ); if ( provider != null ) { List<String> names = namesByConnectionProvider.get( provider.getName() ); if ( names != null && !names.isEmpty() ) { List<ConnectionDetails> details = new ArrayList<>(); for ( String name : names ) { details.add( detailsByName.get( name ) ); } return details; } } return Collections.emptyList(); }
@Test public void testGetConnectionDetailsBySchemeEmpty() { addOne(); assertEquals( 0, connectionManager.getConnectionDetailsByScheme( DOES_NOT_EXIST ).size() ); }
public static String getFullUrl(HttpServletRequest request) { if (request.getQueryString() == null) { return request.getRequestURI(); } return request.getRequestURI() + "?" + request.getQueryString(); }
@Test void formatsFullURIs() throws Exception { assertThat(Servlets.getFullUrl(fullRequest)) .isEqualTo("/one/two?one=two&three=four"); }