focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
void handleStatement(final QueuedCommand queuedCommand) {
throwIfNotConfigured();
handleStatementWithTerminatedQueries(
queuedCommand.getAndDeserializeCommand(commandDeserializer),
queuedCommand.getAndDeserializeCommandId(),
queuedCommand.getStatus(),
Mode.EXECUTE,
queuedCommand.getOffset(),
false
);
} | @Test
public void shouldThrowOnUnexpectedException() {
// Given:
final String statementText = "mama said knock you out";
final RuntimeException exception = new RuntimeException("i'm gonna knock you out");
when(mockParser.parseSingleStatement(statementText)).thenThrow(exception);
final Command command = new Command(
statementText,
emptyMap(),
emptyMap(),
Optional.empty()
);
final CommandId commandId = new CommandId(
CommandId.Type.STREAM, "_CSASGen", CommandId.Action.CREATE);
// When:
try {
handleStatement(statementExecutorWithMocks, command, commandId, Optional.empty(), 0);
Assert.fail("handleStatement should throw");
} catch (final RuntimeException caughtException) {
// Then:
assertThat(caughtException, is(exception));
}
} |
public Meter meter(String name) {
return getOrAdd(name, MetricBuilder.METERS);
} | @Test
public void accessingAMeterRegistersAndReusesIt() {
final Meter meter1 = registry.meter("thing");
final Meter meter2 = registry.meter("thing");
assertThat(meter1)
.isSameAs(meter2);
verify(listener).onMeterAdded("thing", meter1);
} |
@Override
public BasicTypeDefine reconvert(Column column) {
BasicTypeDefine.BasicTypeDefineBuilder builder =
BasicTypeDefine.builder()
.name(column.getName())
.nullable(column.isNullable())
.comment(column.getComment())
.defaultValue(column.getDefaultValue());
switch (column.getDataType().getSqlType()) {
case BOOLEAN:
builder.columnType(DB2_BOOLEAN);
builder.dataType(DB2_BOOLEAN);
break;
case TINYINT:
case SMALLINT:
builder.columnType(DB2_SMALLINT);
builder.dataType(DB2_SMALLINT);
break;
case INT:
builder.columnType(DB2_INT);
builder.dataType(DB2_INT);
break;
case BIGINT:
builder.columnType(DB2_BIGINT);
builder.dataType(DB2_BIGINT);
break;
case FLOAT:
builder.columnType(DB2_REAL);
builder.dataType(DB2_REAL);
break;
case DOUBLE:
builder.columnType(DB2_DOUBLE);
builder.dataType(DB2_DOUBLE);
break;
case DECIMAL:
DecimalType decimalType = (DecimalType) column.getDataType();
long precision = decimalType.getPrecision();
int scale = decimalType.getScale();
if (precision <= 0) {
precision = DEFAULT_PRECISION;
scale = DEFAULT_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is precision less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (precision > MAX_PRECISION) {
scale = (int) Math.max(0, scale - (precision - MAX_PRECISION));
precision = MAX_PRECISION;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum precision of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_PRECISION,
precision,
scale);
}
if (scale < 0) {
scale = 0;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is scale less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (scale > MAX_SCALE) {
scale = MAX_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_SCALE,
precision,
scale);
}
builder.columnType(String.format("%s(%s,%s)", DB2_DECIMAL, precision, scale));
builder.dataType(DB2_DECIMAL);
builder.precision(precision);
builder.scale(scale);
break;
case BYTES:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(
String.format("%s(%s)", DB2_VARBINARY, MAX_VARBINARY_LENGTH));
builder.dataType(DB2_VARBINARY);
builder.length(column.getColumnLength());
} else if (column.getColumnLength() <= MAX_BINARY_LENGTH) {
builder.columnType(
String.format("%s(%s)", DB2_BINARY, column.getColumnLength()));
builder.dataType(DB2_BINARY);
builder.length(column.getColumnLength());
} else if (column.getColumnLength() <= MAX_VARBINARY_LENGTH) {
builder.columnType(
String.format("%s(%s)", DB2_VARBINARY, column.getColumnLength()));
builder.dataType(DB2_VARBINARY);
builder.length(column.getColumnLength());
} else {
long length = column.getColumnLength();
if (length > MAX_BLOB_LENGTH) {
length = MAX_BLOB_LENGTH;
log.warn(
"The length of blob type {} is out of range, "
+ "it will be converted to {}({})",
column.getName(),
DB2_BLOB,
length);
}
builder.columnType(String.format("%s(%s)", DB2_BLOB, length));
builder.dataType(DB2_BLOB);
builder.length(length);
}
break;
case STRING:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(String.format("%s(%s)", DB2_VARCHAR, MAX_VARCHAR_LENGTH));
builder.dataType(DB2_VARCHAR);
builder.length(column.getColumnLength());
} else if (column.getColumnLength() <= MAX_CHAR_LENGTH) {
builder.columnType(String.format("%s(%s)", DB2_CHAR, column.getColumnLength()));
builder.dataType(DB2_CHAR);
builder.length(column.getColumnLength());
} else if (column.getColumnLength() <= MAX_VARCHAR_LENGTH) {
builder.columnType(
String.format("%s(%s)", DB2_VARCHAR, column.getColumnLength()));
builder.dataType(DB2_VARCHAR);
builder.length(column.getColumnLength());
} else {
long length = column.getColumnLength();
if (length > MAX_CLOB_LENGTH) {
length = MAX_CLOB_LENGTH;
log.warn(
"The length of clob type {} is out of range, "
+ "it will be converted to {}({})",
column.getName(),
DB2_CLOB,
length);
}
builder.columnType(String.format("%s(%s)", DB2_CLOB, length));
builder.dataType(DB2_CLOB);
builder.length(length);
}
break;
case DATE:
builder.columnType(DB2_DATE);
builder.dataType(DB2_DATE);
break;
case TIME:
builder.columnType(DB2_TIME);
builder.dataType(DB2_TIME);
break;
case TIMESTAMP:
if (column.getScale() != null && column.getScale() > 0) {
int timestampScale = column.getScale();
if (column.getScale() > MAX_TIMESTAMP_SCALE) {
timestampScale = MAX_TIMESTAMP_SCALE;
log.warn(
"The timestamp column {} type timestamp({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to timestamp({})",
column.getName(),
column.getScale(),
MAX_TIMESTAMP_SCALE,
timestampScale);
}
builder.columnType(String.format("%s(%s)", DB2_TIMESTAMP, timestampScale));
builder.scale(timestampScale);
} else {
builder.columnType(DB2_TIMESTAMP);
}
builder.dataType(DB2_TIMESTAMP);
break;
default:
throw CommonError.convertToConnectorTypeError(
DatabaseIdentifier.DB_2,
column.getDataType().getSqlType().name(),
column.getName());
}
return builder.build();
} | @Test
public void testReconvertInt() {
Column column = PhysicalColumn.builder().name("test").dataType(BasicType.INT_TYPE).build();
BasicTypeDefine typeDefine = DB2TypeConverter.INSTANCE.reconvert(column);
Assertions.assertEquals(column.getName(), typeDefine.getName());
Assertions.assertEquals(DB2TypeConverter.DB2_INT, typeDefine.getColumnType());
Assertions.assertEquals(DB2TypeConverter.DB2_INT, typeDefine.getDataType());
} |
@Override
public Stream<FileSlice> getLatestFileSlicesBeforeOrOn(String partitionPath, String maxCommitTime,
boolean includeFileSlicesInPendingCompaction) {
return execute(partitionPath, maxCommitTime, includeFileSlicesInPendingCompaction,
preferredView::getLatestFileSlicesBeforeOrOn, (path, commitTime, includeSlices) -> getSecondaryView().getLatestFileSlicesBeforeOrOn(path, commitTime, includeSlices));
} | @Test
public void testGetLatestFileSlicesBeforeOrOn() {
Stream<FileSlice> actual;
Stream<FileSlice> expected = testFileSliceStream;
String partitionPath = "/table2";
String maxCommitTime = "2020-01-01";
when(primary.getLatestFileSlicesBeforeOrOn(partitionPath, maxCommitTime, false))
.thenReturn(testFileSliceStream);
actual = fsView.getLatestFileSlicesBeforeOrOn(partitionPath, maxCommitTime, false);
assertEquals(expected, actual);
verify(secondaryViewSupplier, never()).get();
resetMocks();
when(secondaryViewSupplier.get()).thenReturn(secondary);
when(primary.getLatestFileSlicesBeforeOrOn(partitionPath, maxCommitTime, false))
.thenThrow(new RuntimeException());
when(secondary.getLatestFileSlicesBeforeOrOn(partitionPath, maxCommitTime, false))
.thenReturn(testFileSliceStream);
actual = fsView.getLatestFileSlicesBeforeOrOn(partitionPath, maxCommitTime, false);
assertEquals(expected, actual);
resetMocks();
when(secondary.getLatestFileSlicesBeforeOrOn(partitionPath, maxCommitTime, false))
.thenReturn(testFileSliceStream);
actual = fsView.getLatestFileSlicesBeforeOrOn(partitionPath, maxCommitTime, false);
assertEquals(expected, actual);
resetMocks();
when(secondary.getLatestFileSlicesBeforeOrOn(partitionPath, maxCommitTime, false))
.thenThrow(new RuntimeException());
assertThrows(RuntimeException.class, () -> {
fsView.getLatestFileSlicesBeforeOrOn(partitionPath, maxCommitTime, false);
});
} |
public static <K> KTableHolder<K> build(
final KTableHolder<K> left,
final KTableHolder<K> right,
final TableTableJoin<K> join
) {
final LogicalSchema leftSchema;
final LogicalSchema rightSchema;
if (join.getJoinType().equals(RIGHT)) {
leftSchema = right.getSchema();
rightSchema = left.getSchema();
} else {
leftSchema = left.getSchema();
rightSchema = right.getSchema();
}
final JoinParams joinParams = JoinParamsFactory
.create(join.getKeyColName(), leftSchema, rightSchema);
final KTable<K, GenericRow> result;
switch (join.getJoinType()) {
case INNER:
result = left.getTable().join(right.getTable(), joinParams.getJoiner());
break;
case LEFT:
result = left.getTable().leftJoin(right.getTable(), joinParams.getJoiner());
break;
case RIGHT:
result = right.getTable().leftJoin(left.getTable(), joinParams.getJoiner());
break;
case OUTER:
result = left.getTable().outerJoin(right.getTable(), joinParams.getJoiner());
break;
default:
throw new IllegalStateException("invalid join type: " + join.getJoinType());
}
return KTableHolder.unmaterialized(
result,
joinParams.getSchema(),
left.getExecutionKeyFactory());
} | @Test
public void shouldDoInnerJoinWithSytheticKey() {
// Given:
givenInnerJoin(SYNTH_KEY);
// When:
join.build(planBuilder, planInfo);
// Then:
verify(leftKTable).join(
same(rightKTable),
eq(new KsqlValueJoiner(LEFT_SCHEMA.value().size(), RIGHT_SCHEMA.value().size(), 1))
);
} |
@Override
public boolean equals(@Nullable Object object) {
if (object instanceof MetricsContainerImpl) {
MetricsContainerImpl metricsContainerImpl = (MetricsContainerImpl) object;
return Objects.equals(stepName, metricsContainerImpl.stepName)
&& Objects.equals(counters, metricsContainerImpl.counters)
&& Objects.equals(distributions, metricsContainerImpl.distributions)
&& Objects.equals(gauges, metricsContainerImpl.gauges)
&& Objects.equals(stringSets, metricsContainerImpl.stringSets);
}
return false;
} | @Test
public void testEquals() {
MetricsContainerImpl metricsContainerImpl = new MetricsContainerImpl("stepName");
MetricsContainerImpl equal = new MetricsContainerImpl("stepName");
Assert.assertEquals(metricsContainerImpl, equal);
Assert.assertEquals(metricsContainerImpl.hashCode(), equal.hashCode());
} |
public String getXML() {
StringBuilder retval = new StringBuilder( 300 );
retval
.append( " " ).append(
XMLHandler.addTagValue( "connection", databaseMeta == null ? "" : databaseMeta.getName() ) );
retval.append( " " ).append( XMLHandler.addTagValue( "commit", commitSize ) );
retval.append( " " ).append( XMLHandler.addTagValue( "bind_size", bindSize ) );
retval.append( " " ).append( XMLHandler.addTagValue( "read_size", readSize ) );
retval.append( " " ).append( XMLHandler.addTagValue( "errors", maxErrors ) );
retval.append( " " ).append( XMLHandler.addTagValue( "schema", schemaName ) );
retval.append( " " ).append( XMLHandler.addTagValue( "table", tableName ) );
retval.append( " " ).append( XMLHandler.addTagValue( "load_method", loadMethod ) );
retval.append( " " ).append( XMLHandler.addTagValue( "load_action", loadAction ) );
retval.append( " " ).append( XMLHandler.addTagValue( "sqlldr", sqlldr ) );
retval.append( " " ).append( XMLHandler.addTagValue( "control_file", controlFile ) );
retval.append( " " ).append( XMLHandler.addTagValue( "data_file", dataFile ) );
retval.append( " " ).append( XMLHandler.addTagValue( "log_file", logFile ) );
retval.append( " " ).append( XMLHandler.addTagValue( "bad_file", badFile ) );
retval.append( " " ).append( XMLHandler.addTagValue( "discard_file", discardFile ) );
retval.append( " " ).append( XMLHandler.addTagValue( "direct_path", directPath ) );
retval.append( " " ).append( XMLHandler.addTagValue( "erase_files", eraseFiles ) );
retval.append( " " ).append( XMLHandler.addTagValue( "encoding", encoding ) );
retval.append( " " ).append( XMLHandler.addTagValue( "dbname_override", dbNameOverride ) );
retval.append( " " ).append( XMLHandler.addTagValue( "character_set", characterSetName ) );
retval.append( " " ).append( XMLHandler.addTagValue( "fail_on_warning", failOnWarning ) );
retval.append( " " ).append( XMLHandler.addTagValue( "fail_on_error", failOnError ) );
retval.append( " " ).append( XMLHandler.addTagValue( "parallel", parallel ) );
retval.append( " " ).append( XMLHandler.addTagValue( "alt_rec_term", altRecordTerm ) );
for ( int i = 0; i < fieldTable.length; i++ ) {
retval.append( " <mapping>" ).append( Const.CR );
retval.append( " " ).append( XMLHandler.addTagValue( "stream_name", fieldTable[i] ) );
retval.append( " " ).append( XMLHandler.addTagValue( "field_name", fieldStream[i] ) );
retval.append( " " ).append( XMLHandler.addTagValue( "date_mask", dateMask[i] ) );
retval.append( " </mapping>" ).append( Const.CR );
}
return retval.toString();
} | @Test
public void testGetXML() {
OraBulkLoaderMeta oraBulkLoaderMeta = new OraBulkLoaderMeta();
oraBulkLoaderMeta.setFieldTable( new String[] { "fieldTable1", "fieldTable2" } );
oraBulkLoaderMeta.setFieldStream( new String[] { "fieldStreamValue1" } );
oraBulkLoaderMeta.setDateMask( new String[] {} );
oraBulkLoaderMeta.afterInjectionSynchronization();
//run without exception
oraBulkLoaderMeta.getXML();
Assert.assertEquals( oraBulkLoaderMeta.getFieldStream().length, oraBulkLoaderMeta.getDateMask().length );
} |
@Override
public Response onRequest(ReadRequest request) {
throw new UnsupportedOperationException("Temporary does not support");
} | @Test
void testOnRequest() {
assertThrows(UnsupportedOperationException.class, () -> {
persistentClientOperationServiceImpl.onRequest(ReadRequest.newBuilder().build());
});
} |
@Override
public void start() throws Exception {
LOG.info("Starting split enumerator for source {}.", operatorName);
// we mark this as started first, so that we can later distinguish the cases where
// 'start()' wasn't called and where 'start()' failed.
started = true;
// there are two ways the SplitEnumerator can get created:
// (1) Source.restoreEnumerator(), in which case the 'resetToCheckpoint()' method creates
// it
// (2) Source.createEnumerator, in which case it has not been created, yet, and we create
// it here
if (enumerator == null) {
final ClassLoader userCodeClassLoader =
context.getCoordinatorContext().getUserCodeClassloader();
try (TemporaryClassLoaderContext ignored =
TemporaryClassLoaderContext.of(userCodeClassLoader)) {
enumerator = source.createEnumerator(context);
} catch (Throwable t) {
ExceptionUtils.rethrowIfFatalErrorOrOOM(t);
LOG.error("Failed to create Source Enumerator for source {}", operatorName, t);
context.failJob(t);
return;
}
}
// The start sequence is the first task in the coordinator executor.
// We rely on the single-threaded coordinator executor to guarantee
// the other methods are invoked after the enumerator has started.
runInEventLoop(() -> enumerator.start(), "starting the SplitEnumerator.");
if (coordinatorListeningID != null) {
coordinatorStore.compute(
coordinatorListeningID,
(key, oldValue) -> {
// The value for a listener ID can be a source coordinator listening to an
// event, or an event waiting to be retrieved
if (oldValue == null || oldValue instanceof OperatorCoordinator) {
// The coordinator has not registered or needs to be recreated after
// global failover.
return this;
} else {
checkState(
oldValue instanceof OperatorEvent,
"The existing value for "
+ coordinatorStore
+ "is expected to be an operator event, but it is in fact "
+ oldValue);
LOG.info(
"Handling event {} received before the source coordinator with ID {} is registered",
oldValue,
coordinatorListeningID);
handleEventFromOperator(0, 0, (OperatorEvent) oldValue);
// Since for non-global failover the coordinator will not be recreated
// and for global failover both the sender and receiver need to restart,
// the coordinator will receive the event only once.
// As the event has been processed, it can be removed safely and there's
// no need to register the coordinator for further events as well.
return null;
}
});
}
if (watermarkAlignmentParams.isEnabled()) {
LOG.info("Starting schedule the period announceCombinedWatermark task");
coordinatorStore.putIfAbsent(
watermarkAlignmentParams.getWatermarkGroup(), new WatermarkAggregator<>());
context.schedulePeriodTask(
this::announceCombinedWatermark,
watermarkAlignmentParams.getUpdateInterval(),
watermarkAlignmentParams.getUpdateInterval(),
TimeUnit.MILLISECONDS);
}
} | @Test
public void testListeningEventsFromOtherCoordinators() throws Exception {
final String listeningID = "testListeningID";
CoordinatorStore store = new CoordinatorStoreImpl();
final SourceCoordinator<?, ?> coordinator =
new SourceCoordinator<>(
OPERATOR_NAME,
createMockSource(),
context,
store,
WatermarkAlignmentParams.WATERMARK_ALIGNMENT_DISABLED,
listeningID);
coordinator.start();
assertThat(store.get(listeningID)).isNotNull().isSameAs(coordinator);
} |
@Override
public byte convertToByte(CharSequence value) {
if (value instanceof AsciiString && value.length() == 1) {
return ((AsciiString) value).byteAt(0);
}
return Byte.parseByte(value.toString());
} | @Test
public void testByteFromAsciiString() {
assertEquals(127, converter.convertToByte(AsciiString.of("127")));
} |
@Override
public boolean isIndexed(QueryContext queryContext) {
Index index = queryContext.matchIndex(attributeName, QueryContext.IndexMatchHint.PREFER_ORDERED);
return index != null && index.isOrdered() && expressionCanBeUsedAsIndexPrefix();
} | @Test
public void likePredicateIsIndexed_whenPercentWildcardIsUsed_andIndexIsSorted() {
QueryContext queryContext = mock(QueryContext.class);
when(queryContext.matchIndex("this", QueryContext.IndexMatchHint.PREFER_ORDERED)).thenReturn(createIndex(IndexType.SORTED));
assertTrue(new LikePredicate("this", "sub%").isIndexed(queryContext));
assertTrue(new LikePredicate("this", "sub\\\\%").isIndexed(queryContext));
assertTrue(new LikePredicate("this", "sub\\%string%").isIndexed(queryContext));
assertTrue(new LikePredicate("this", "sub\\_string%").isIndexed(queryContext));
} |
@Override
public RedisClusterNode clusterGetNodeForSlot(int slot) {
Iterable<RedisClusterNode> res = clusterGetNodes();
for (RedisClusterNode redisClusterNode : res) {
if (redisClusterNode.isMaster() && redisClusterNode.getSlotRange().contains(slot)) {
return redisClusterNode;
}
}
return null;
} | @Test
public void testClusterGetNodeForSlot() {
testInCluster(connection -> {
RedisClusterNode node1 = connection.clusterGetNodeForSlot(1);
RedisClusterNode node2 = connection.clusterGetNodeForSlot(16000);
assertThat(node1.getId()).isNotEqualTo(node2.getId());
});
} |
public Result combine(Result other) {
return new Result(this.isPass() && other.isPass(), this.isDescend()
&& other.isDescend());
} | @Test
public void equalsFail() {
Result one = Result.FAIL;
Result two = Result.FAIL.combine(Result.FAIL);
assertEquals(one, two);
} |
@Override
public void run() {
updateElasticSearchHealthStatus();
updateFileSystemMetrics();
} | @Test
public void when_no_es_status_null_status_is_updated_to_red() {
ClusterHealthResponse clusterHealthResponse = Mockito.mock(ClusterHealthResponse.class);
when(clusterHealthResponse.getStatus()).thenReturn(null);
when(esClient.clusterHealth(any())).thenReturn(clusterHealthResponse);
underTest.run();
verify(serverMonitoringMetrics, times(1)).setElasticSearchStatusToRed();
verifyNoMoreInteractions(serverMonitoringMetrics);
} |
@Deprecated
@Override
public void init(final org.apache.kafka.streams.processor.ProcessorContext context, final StateStore root) {
store.init(context, root);
} | @Deprecated
@Test
public void shouldDeprecatedInitTimestampedStore() {
givenWrapperWithTimestampedStore();
final org.apache.kafka.streams.processor.ProcessorContext mockContext
= mock(org.apache.kafka.streams.processor.ProcessorContext.class);
wrapper.init(mockContext, wrapper);
verify(timestampedStore).init(mockContext, wrapper);
} |
static String lookupKafkaClusterId(WorkerConfig config) {
log.info("Creating Kafka admin client");
try (Admin adminClient = Admin.create(config.originals())) {
return lookupKafkaClusterId(adminClient);
}
} | @Test
public void testLookupNullKafkaClusterId() {
final Node broker1 = new Node(0, "dummyHost-1", 1234);
final Node broker2 = new Node(1, "dummyHost-2", 1234);
List<Node> cluster = Arrays.asList(broker1, broker2);
MockAdminClient adminClient = new MockAdminClient.Builder().
brokers(cluster).clusterId(null).build();
assertNull(WorkerConfig.lookupKafkaClusterId(adminClient));
} |
@Override
public KsMaterializedQueryResult<Row> get(
final GenericKey key,
final int partition,
final Optional<Position> position
) {
try {
final KeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = KeyQuery.withKey(key);
StateQueryRequest<ValueAndTimestamp<GenericRow>>
request = inStore(stateStore.getStateStoreName())
.withQuery(query)
.withPartitions(ImmutableSet.of(partition));
if (position.isPresent()) {
request = request.withPositionBound(PositionBound.at(position.get()));
}
final StateQueryResult<ValueAndTimestamp<GenericRow>>
result = stateStore.getKafkaStreams().query(request);
final QueryResult<ValueAndTimestamp<GenericRow>> queryResult =
result.getPartitionResults().get(partition);
// Some of these failures are retriable, and in the future, we may want to retry
// locally before throwing.
if (queryResult.isFailure()) {
throw failedQueryException(queryResult);
} else if (queryResult.getResult() == null) {
return KsMaterializedQueryResult.rowIteratorWithPosition(
Collections.emptyIterator(), queryResult.getPosition());
} else {
final ValueAndTimestamp<GenericRow> row = queryResult.getResult();
return KsMaterializedQueryResult.rowIteratorWithPosition(
ImmutableList.of(Row.of(stateStore.schema(), key, row.value(), row.timestamp()))
.iterator(),
queryResult.getPosition());
}
} catch (final NotUpToBoundException | MaterializationException e) {
throw e;
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
} | @Test
public void shouldReturnEmptyIfRangeNotPresent() {
// Given:
when(kafkaStreams.query(any())).thenReturn(getEmptyIteratorResult());
// When:
final Iterator<Row> rowIterator = table.get(PARTITION, A_KEY, null).rowIterator;
// Then:
assertThat(rowIterator.hasNext(), is(false));
} |
public static Schema create(Type type) {
switch (type) {
case STRING:
return new StringSchema();
case BYTES:
return new BytesSchema();
case INT:
return new IntSchema();
case LONG:
return new LongSchema();
case FLOAT:
return new FloatSchema();
case DOUBLE:
return new DoubleSchema();
case BOOLEAN:
return new BooleanSchema();
case NULL:
return new NullSchema();
default:
throw new AvroRuntimeException("Can't create a: " + type);
}
} | @Test
void intDefaultValue() {
Schema.Field field = new Schema.Field("myField", Schema.create(Schema.Type.INT), "doc", 1);
assertTrue(field.hasDefaultValue());
assertEquals(1, field.defaultVal());
assertEquals(1, GenericData.get().getDefaultValue(field));
field = new Schema.Field("myField", Schema.create(Schema.Type.INT), "doc", Integer.MIN_VALUE);
assertTrue(field.hasDefaultValue());
assertEquals(Integer.MIN_VALUE, field.defaultVal());
assertEquals(Integer.MIN_VALUE, GenericData.get().getDefaultValue(field));
field = new Schema.Field("myField", Schema.create(Schema.Type.INT), "doc", Integer.MAX_VALUE);
assertTrue(field.hasDefaultValue());
assertEquals(Integer.MAX_VALUE, field.defaultVal());
assertEquals(Integer.MAX_VALUE, GenericData.get().getDefaultValue(field));
} |
@Override
protected double maintain() {
if ( ! nodeRepository().nodes().isWorking()) return 0.0;
if ( ! nodeRepository().zone().cloud().allowHostSharing()) return 1.0; // Re-balancing not necessary
if (nodeRepository().zone().environment().isTest()) return 1.0; // Short-lived deployments; no need to rebalance
if (nodeRepository().zone().system().isCd()) return 1.0; // CD tests assert on # of nodes, avoid re-balancing as it make tests unstable
// Work with an unlocked snapshot as this can take a long time and full consistency is not needed
NodeList allNodes = nodeRepository().nodes().list();
updateSkewMetric(allNodes);
if ( ! zoneIsStable(allNodes)) return 1.0;
findBestMove(allNodes).execute(true, Agent.Rebalancer, deployer, metric, nodeRepository());
return 1.0;
} | @Test
public void testNoRebalancingIfRecentlyDeployed() {
RebalancerTester tester = new RebalancerTester();
// --- Deploying a cpu heavy application - causing 1 of these nodes to be skewed
tester.deployApp(cpuApp);
Node cpuSkewedNode = tester.getNode(cpuApp);
tester.maintain();
assertFalse("No better place to move the skewed node, so no action is taken",
tester.getNode(cpuSkewedNode.hostname()).get().status().wantToRetire());
// --- Making a more suitable node configuration available causes rebalancing
Node newCpuHost = tester.makeReadyNode("cpu");
tester.activateTenantHosts();
tester.deployApp(cpuApp, false /* skip advancing clock after deployment */);
tester.maintain();
assertFalse("No action, since app was recently deployed", tester.isNodeRetired(cpuSkewedNode));
tester.clock().advance(waitTimeAfterPreviousDeployment);
tester.maintain();
assertTrue("Rebalancer retired the node we wanted to move away from", tester.isNodeRetired(cpuSkewedNode));
assertTrue("... and added a node on the new host instead",
tester.getNodes(cpuApp, Node.State.active).stream().anyMatch(node -> node.hasParent(newCpuHost.hostname())));
} |
@Override
public RemoteData.Builder serialize() {
final RemoteData.Builder remoteBuilder = RemoteData.newBuilder();
remoteBuilder.addDataLongs(getTotal());
remoteBuilder.addDataLongs(getTimeBucket());
remoteBuilder.addDataStrings(getEntityId());
remoteBuilder.addDataStrings(getServiceId());
return remoteBuilder;
} | @Test
public void testSerialize() {
long time = 1597113447737L;
function.accept(MeterEntity.newService("sum_sync_time", Layer.GENERAL), time);
SumPerMinFunction function2 = Mockito.spy(SumPerMinFunction.class);
function2.deserialize(function.serialize().build());
assertThat(function2.getEntityId()).isEqualTo(function.getEntityId());
assertThat(function2.getTimeBucket()).isEqualTo(function.getTimeBucket());
} |
public String toJson() {
JsonObject details = new JsonObject();
details.addProperty(FIELD_LEVEL, level.toString());
JsonArray conditionResults = new JsonArray();
for (EvaluatedCondition condition : this.conditions) {
conditionResults.add(toJson(condition));
}
details.add("conditions", conditionResults);
details.addProperty(FIELD_IGNORED_CONDITIONS, ignoredConditions);
return details.toString();
} | @Test
public void verify_json_for_small_leak() {
String actualJson = new QualityGateDetailsData(Measure.Level.OK, Collections.emptyList(), false).toJson();
JsonAssert.assertJson(actualJson).isSimilarTo("{\"ignoredConditions\": false}");
String actualJson2 = new QualityGateDetailsData(Measure.Level.OK, Collections.emptyList(), true).toJson();
JsonAssert.assertJson(actualJson2).isSimilarTo("{\"ignoredConditions\": true}");
} |
public Map<String, Gauge<Long>> gauges() {
Map<String, Gauge<Long>> gauges = new HashMap<>();
final TrafficCounter tc = trafficCounter();
gauges.put(READ_BYTES_1_SEC, new Gauge<Long>() {
@Override
public Long getValue() {
return tc.lastReadBytes();
}
});
gauges.put(WRITTEN_BYTES_1_SEC, new Gauge<Long>() {
@Override
public Long getValue() {
return tc.lastWrittenBytes();
}
});
gauges.put(READ_BYTES_TOTAL, new Gauge<Long>() {
@Override
public Long getValue() {
return tc.cumulativeReadBytes();
}
});
gauges.put(WRITTEN_BYTES_TOTAL, new Gauge<Long>() {
@Override
public Long getValue() {
return tc.cumulativeWrittenBytes();
}
});
return gauges;
} | @Test
@Ignore("Flaky test")
public void counterReturnsWrittenBytes() throws InterruptedException {
final ByteBuf byteBuf = Unpooled.copiedBuffer("Test", StandardCharsets.US_ASCII);
channel.writeOutbound(byteBuf);
Thread.sleep(1000L);
channel.writeOutbound(byteBuf);
channel.finish();
final Map<String, Gauge<Long>> gauges = throughputCounter.gauges();
assertThat(gauges.get(ThroughputCounter.READ_BYTES_1_SEC).getValue()).isEqualTo(0L);
assertThat(gauges.get(ThroughputCounter.WRITTEN_BYTES_1_SEC).getValue()).isEqualTo(4L);
assertThat(gauges.get(ThroughputCounter.READ_BYTES_TOTAL).getValue()).isEqualTo(0L);
assertThat(gauges.get(ThroughputCounter.WRITTEN_BYTES_TOTAL).getValue()).isEqualTo(8L);
} |
public static CredentialService getInstance() {
return getInstance(null);
} | @Test
void testGetInstance2() {
CredentialService credentialService1 = CredentialService.getInstance(APP_NAME);
CredentialService credentialService2 = CredentialService.getInstance(APP_NAME);
assertEquals(credentialService1, credentialService2);
} |
public static String parsePath(String uri, Map<String, String> patterns) {
if (uri == null) {
return null;
} else if (StringUtils.isBlank(uri)) {
return String.valueOf(SLASH);
}
CharacterIterator ci = new StringCharacterIterator(uri);
StringBuilder pathBuffer = new StringBuilder();
char c = ci.first();
if (c == CharacterIterator.DONE) {
return String.valueOf(SLASH);
}
do {
if (c == OPEN) {
String regexBuffer = cutParameter(ci, patterns);
if (regexBuffer == null) {
LOGGER.warn("Operation path \"{}\" contains syntax error.", uri);
return null;
}
pathBuffer.append(regexBuffer);
} else {
int length = pathBuffer.length();
if (!(c == SLASH && (length != 0 && pathBuffer.charAt(length - 1) == SLASH))) {
pathBuffer.append(c);
}
}
} while ((c = ci.next()) != CharacterIterator.DONE);
return pathBuffer.toString();
} | @Test(description = "parse path like /swagger.{json|yaml}")
public void test() {
final Map<String, String> regexMap = new HashMap<String, String>();
final String path = PathUtils.parsePath("/swagger.{json|yaml}", regexMap);
assertEquals(path, "/swagger.{json|yaml}");
assertEquals(regexMap.size(), 0);
} |
public static byte[] decryptAES(byte[] data, byte[] key) {
return desTemplate(data, key, AES_Algorithm, AES_Transformation, false);
} | @Test
public void decryptAES() throws Exception {
TestCase.assertTrue(
Arrays.equals(
bytesDataAES,
EncryptKit.decryptAES(bytesResAES, bytesKeyAES)
)
);
TestCase.assertTrue(
Arrays.equals(
bytesDataAES,
EncryptKit.decryptHexStringAES(resAES, bytesKeyAES)
)
);
TestCase.assertTrue(
Arrays.equals(
bytesDataAES,
EncryptKit.decryptBase64AES(Base64.getEncoder().encode(bytesResAES), bytesKeyAES)
)
);
} |
public CoercedExpressionResult coerce() {
final Class<?> leftClass = left.getRawClass();
final Class<?> nonPrimitiveLeftClass = toNonPrimitiveType(leftClass);
final Class<?> rightClass = right.getRawClass();
final Class<?> nonPrimitiveRightClass = toNonPrimitiveType(rightClass);
boolean sameClass = leftClass == rightClass;
boolean isUnificationExpression = left instanceof UnificationTypedExpression || right instanceof UnificationTypedExpression;
if (sameClass || isUnificationExpression) {
return new CoercedExpressionResult(left, right);
}
if (!canCoerce()) {
throw new CoercedExpressionException(new InvalidExpressionErrorResult("Comparison operation requires compatible types. Found " + leftClass + " and " + rightClass));
}
if ((nonPrimitiveLeftClass == Integer.class || nonPrimitiveLeftClass == Long.class) && nonPrimitiveRightClass == Double.class) {
CastExpr castExpression = new CastExpr(PrimitiveType.doubleType(), this.left.getExpression());
return new CoercedExpressionResult(
new TypedExpression(castExpression, double.class, left.getType()),
right,
false);
}
final boolean leftIsPrimitive = leftClass.isPrimitive() || Number.class.isAssignableFrom( leftClass );
final boolean canCoerceLiteralNumberExpr = canCoerceLiteralNumberExpr(leftClass);
boolean rightAsStaticField = false;
final Expression rightExpression = right.getExpression();
final TypedExpression coercedRight;
if (leftIsPrimitive && canCoerceLiteralNumberExpr && rightExpression instanceof LiteralStringValueExpr) {
final Expression coercedLiteralNumberExprToType = coerceLiteralNumberExprToType((LiteralStringValueExpr) right.getExpression(), leftClass);
coercedRight = right.cloneWithNewExpression(coercedLiteralNumberExprToType);
coercedRight.setType( leftClass );
} else if (shouldCoerceBToString(left, right)) {
coercedRight = coerceToString(right);
} else if (isNotBinaryExpression(right) && canBeNarrowed(leftClass, rightClass) && right.isNumberLiteral()) {
coercedRight = castToClass(leftClass);
} else if (leftClass == long.class && rightClass == int.class) {
coercedRight = right.cloneWithNewExpression(new CastExpr(PrimitiveType.longType(), right.getExpression()));
} else if (leftClass == Date.class && rightClass == String.class) {
coercedRight = coerceToDate(right);
rightAsStaticField = true;
} else if (leftClass == LocalDate.class && rightClass == String.class) {
coercedRight = coerceToLocalDate(right);
rightAsStaticField = true;
} else if (leftClass == LocalDateTime.class && rightClass == String.class) {
coercedRight = coerceToLocalDateTime(right);
rightAsStaticField = true;
} else if (shouldCoerceBToMap()) {
coercedRight = castToClass(toNonPrimitiveType(leftClass));
} else if (isBoolean(leftClass) && !isBoolean(rightClass)) {
coercedRight = coerceBoolean(right);
} else {
coercedRight = right;
}
final TypedExpression coercedLeft;
if (nonPrimitiveLeftClass == Character.class && shouldCoerceBToString(right, left)) {
coercedLeft = coerceToString(left);
} else {
coercedLeft = left;
}
return new CoercedExpressionResult(coercedLeft, coercedRight, rightAsStaticField);
} | @Test
public void doNotCastNameExprLiterals() {
final TypedExpression left = expr(THIS_PLACEHOLDER + ".getAgeAsShort()", java.lang.Short.class);
final TypedExpression right = expr("$age", int.class);
final CoercedExpression.CoercedExpressionResult coerce = new CoercedExpression(left, right, false).coerce();
assertThat(coerce.getCoercedRight()).isEqualTo(expr("$age", int.class));
} |
@VisibleForTesting
DatanodeDescriptor[] chooseSourceDatanodes(BlockInfo block,
List<DatanodeDescriptor> containingNodes,
List<DatanodeStorageInfo> nodesContainingLiveReplicas,
NumberReplicas numReplicas, List<Byte> liveBlockIndices,
List<Byte> liveBusyBlockIndices, List<Byte> excludeReconstructed, int priority) {
containingNodes.clear();
nodesContainingLiveReplicas.clear();
List<DatanodeDescriptor> srcNodes = new ArrayList<>();
liveBlockIndices.clear();
final boolean isStriped = block.isStriped();
DatanodeDescriptor decommissionedSrc = null;
BitSet liveBitSet = null;
BitSet decommissioningBitSet = null;
if (isStriped) {
int blockNum = ((BlockInfoStriped) block).getTotalBlockNum();
liveBitSet = new BitSet(blockNum);
decommissioningBitSet = new BitSet(blockNum);
}
for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
final DatanodeDescriptor node = getDatanodeDescriptorFromStorage(storage);
final StoredReplicaState state = checkReplicaOnStorage(numReplicas, block,
storage, corruptReplicas.getNodes(block), false);
if (state == StoredReplicaState.LIVE) {
if (storage.getStorageType() == StorageType.PROVIDED) {
storage = new DatanodeStorageInfo(node, storage.getStorageID(),
storage.getStorageType(), storage.getState());
}
nodesContainingLiveReplicas.add(storage);
}
containingNodes.add(node);
// do not select the replica if it is corrupt or excess
if (state == StoredReplicaState.CORRUPT ||
state == StoredReplicaState.EXCESS) {
continue;
}
// Never use maintenance node not suitable for read
// or unknown state replicas.
if (state == null
|| state == StoredReplicaState.MAINTENANCE_NOT_FOR_READ) {
continue;
}
// Save the live decommissioned replica in case we need it. Such replicas
// are normally not used for replication, but if nothing else is
// available, one can be selected as a source.
if (state == StoredReplicaState.DECOMMISSIONED) {
if (decommissionedSrc == null ||
ThreadLocalRandom.current().nextBoolean()) {
decommissionedSrc = node;
}
continue;
}
// for EC here need to make sure the numReplicas replicates state correct
// because in the scheduleReconstruction it need the numReplicas to check
// whether need to reconstruct the ec internal block
byte blockIndex = -1;
if (isStriped) {
blockIndex = ((BlockInfoStriped) block)
.getStorageBlockIndex(storage);
countLiveAndDecommissioningReplicas(numReplicas, state,
liveBitSet, decommissioningBitSet, blockIndex);
}
if (priority != LowRedundancyBlocks.QUEUE_HIGHEST_PRIORITY
&& (!node.isDecommissionInProgress() && !node.isEnteringMaintenance())
&& node.getNumberOfBlocksToBeReplicated() +
node.getNumberOfBlocksToBeErasureCoded() >= maxReplicationStreams) {
if (isStriped && (state == StoredReplicaState.LIVE
|| state == StoredReplicaState.DECOMMISSIONING)) {
liveBusyBlockIndices.add(blockIndex);
//HDFS-16566 ExcludeReconstructed won't be reconstructed.
excludeReconstructed.add(blockIndex);
}
continue; // already reached replication limit
}
if (node.getNumberOfBlocksToBeReplicated() +
node.getNumberOfBlocksToBeErasureCoded() >= replicationStreamsHardLimit) {
if (isStriped && (state == StoredReplicaState.LIVE
|| state == StoredReplicaState.DECOMMISSIONING)) {
liveBusyBlockIndices.add(blockIndex);
//HDFS-16566 ExcludeReconstructed won't be reconstructed.
excludeReconstructed.add(blockIndex);
}
continue;
}
if(isStriped || srcNodes.isEmpty()) {
srcNodes.add(node);
if (isStriped) {
liveBlockIndices.add(blockIndex);
}
continue;
}
// for replicated block, switch to a different node randomly
// this to prevent from deterministically selecting the same node even
// if the node failed to replicate the block on previous iterations
if (ThreadLocalRandom.current().nextBoolean()) {
srcNodes.set(0, node);
}
}
// Pick a live decommissioned replica, if nothing else is available.
if (!isStriped && nodesContainingLiveReplicas.isEmpty() &&
srcNodes.isEmpty() && decommissionedSrc != null) {
srcNodes.add(decommissionedSrc);
}
return srcNodes.toArray(new DatanodeDescriptor[srcNodes.size()]);
} | @Test
public void testChooseSrcDNWithDupECInDecommissioningNode() throws Exception {
long blockId = -9223372036854775776L; // real ec block id
Block aBlock = new Block(blockId, 0, 0);
// RS-3-2 EC policy
ErasureCodingPolicy ecPolicy =
SystemErasureCodingPolicies.getPolicies().get(1);
// striped blockInfo
BlockInfoStriped aBlockInfoStriped = new BlockInfoStriped(aBlock, ecPolicy);
// ec storageInfo
DatanodeStorageInfo ds1 = DFSTestUtil.createDatanodeStorageInfo(
"storage1", "1.1.1.1", "rack1", "host1");
DatanodeStorageInfo ds2 = DFSTestUtil.createDatanodeStorageInfo(
"storage2", "2.2.2.2", "rack2", "host2");
DatanodeStorageInfo ds3 = DFSTestUtil.createDatanodeStorageInfo(
"storage3", "3.3.3.3", "rack3", "host3");
DatanodeStorageInfo ds4 = DFSTestUtil.createDatanodeStorageInfo(
"storage4", "4.4.4.4", "rack4", "host4");
DatanodeStorageInfo ds5 = DFSTestUtil.createDatanodeStorageInfo(
"storage5", "5.5.5.5", "rack5", "host5");
DatanodeStorageInfo ds6 = DFSTestUtil.createDatanodeStorageInfo(
"storage6", "6.6.6.6", "rack6", "host6");
// link block with storage
aBlockInfoStriped.addStorage(ds1, aBlock);
aBlockInfoStriped.addStorage(ds2, new Block(blockId + 1, 0, 0));
aBlockInfoStriped.addStorage(ds3, new Block(blockId + 2, 0, 0));
aBlockInfoStriped.addStorage(ds4, new Block(blockId + 3, 0, 0));
aBlockInfoStriped.addStorage(ds5, new Block(blockId + 4, 0, 0));
// NOTE: duplicate block 0,this DN will replace the decommission ds1 DN
aBlockInfoStriped.addStorage(ds6, aBlock);
addEcBlockToBM(blockId, ecPolicy);
// decommission datanode where store block 0
ds1.getDatanodeDescriptor().startDecommission();
List<DatanodeDescriptor> containingNodes =
new LinkedList<DatanodeDescriptor>();
List<DatanodeStorageInfo> nodesContainingLiveReplicas =
new LinkedList<DatanodeStorageInfo>();
NumberReplicas numReplicas = new NumberReplicas();
List<Byte> liveBlockIndices = new ArrayList<>();
List<Byte> liveBusyBlockIndices = new ArrayList<>();
List<Byte> excludeReconstructedIndices = new ArrayList<>();
bm.chooseSourceDatanodes(
aBlockInfoStriped,
containingNodes,
nodesContainingLiveReplicas,
numReplicas, liveBlockIndices,
liveBusyBlockIndices,
excludeReconstructedIndices,
LowRedundancyBlocks.QUEUE_HIGHEST_PRIORITY);
assertEquals("There are 5 live replicas in " +
"[ds2, ds3, ds4, ds5, ds6] datanodes ",
5, numReplicas.liveReplicas());
assertEquals("The ds1 datanode is in decommissioning, " +
"so there is no redundant replica",
0, numReplicas.redundantInternalBlocks());
} |
@Override
public boolean isTrusted(Address address) {
if (address == null) {
return false;
}
if (trustedInterfaces.isEmpty()) {
return true;
}
String host = address.getHost();
if (matchAnyInterface(host, trustedInterfaces)) {
return true;
} else {
if (logger.isFineEnabled()) {
logger.fine(
"Address %s doesn't match any trusted interface", host);
}
return false;
}
} | @Test
public void givenInterfaceIsConfigured_whenMessageWithMatchingHost_thenTrust() throws UnknownHostException {
AddressCheckerImpl joinMessageTrustChecker = new AddressCheckerImpl(singleton("127.0.0.1"), logger);
Address address = createAddress("127.0.0.1");
assertTrue(joinMessageTrustChecker.isTrusted(address));
} |
public static Map<String, String> transStringMap(final Map<String, Object> map) {
return Optional.ofNullable(map)
.map(m -> m.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> Objects.toString(e.getValue(), null))))
.orElse(null);
} | @Test
public void testTransStringMap() {
Map<String, Object> jsonParams = new HashMap<>();
jsonParams.put("a", 1);
jsonParams.put("b", 2);
Map<String, String> stringStringMap = MapUtils.transStringMap(jsonParams);
assertEquals(stringStringMap.get("a").getClass(), String.class);
assertEquals(stringStringMap.get("a"), "1");
} |
public static String getClassName(Object obj, boolean isSimple) {
if (null == obj) {
return null;
}
final Class<?> clazz = obj.getClass();
return getClassName(clazz, isSimple);
} | @Test
public void getClassNameTest() {
String className = ClassUtil.getClassName(ClassUtil.class, false);
assertEquals("cn.hutool.core.util.ClassUtil", className);
String simpleClassName = ClassUtil.getClassName(ClassUtil.class, true);
assertEquals("ClassUtil", simpleClassName);
} |
@Override
public void addPermission(String role, String resource, String action) {
String sql = "INSERT INTO permissions (role, resource, action) VALUES (?, ?, ?)";
try {
jt.update(sql, role, resource, action);
} catch (CannotGetJdbcConnectionException e) {
LogUtil.FATAL_LOG.error("[db-error] " + e.toString(), e);
throw e;
}
} | @Test
void testAddPermission() {
String sql = "INSERT INTO permissions (role, resource, action) VALUES (?, ?, ?)";
externalPermissionPersistService.addPermission("role", "resource", "action");
Mockito.verify(jdbcTemplate).update(sql, "role", "resource", "action");
} |
public RouteResult<T> route(HttpMethod method, String path) {
return route(method, path, Collections.emptyMap());
} | @Test
void testParams() {
RouteResult<String> routed = router.route(GET, "/articles/123");
assertThat(routed.target()).isEqualTo("show");
assertThat(routed.pathParams()).hasSize(1);
assertThat(routed.pathParams().get("id")).isEqualTo("123");
} |
@PublicEvolving
public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes(
MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) {
return getMapReturnTypes(mapInterface, inType, null, false);
} | @Test
void testFunctionDependingOnUnknownInput() {
IdentityMapper3<Boolean, String> function = new IdentityMapper3<Boolean, String>();
TypeInformation<?> ti =
TypeExtractor.getMapReturnTypes(
function, BasicTypeInfo.BOOLEAN_TYPE_INFO, "name", true);
assertThat(ti).isInstanceOf(MissingTypeInfo.class);
assertThatThrownBy(
() ->
TypeExtractor.getMapReturnTypes(
function, BasicTypeInfo.BOOLEAN_TYPE_INFO))
.isInstanceOf(InvalidTypesException.class);
} |
public static Double interpolateCourse(Double c1, Double c2, double fraction) {
if (c1 == null || c2 == null) {
return null;
}
checkArgument(VALID_COURSE_RANGE.contains(c1), "The 1st course: " + c1 + " is not in range");
checkArgument(VALID_COURSE_RANGE.contains(c2), "The 2nd course: " + c2 + " is not in range");
checkArgument(VALID_FRACTION_RANGE.contains(fraction), "The fraction: " + fraction + " is not in range");
double angleDelta = Spherical.angleDifference(c2, c1);
Double course = c1 + interpolate(0.0, angleDelta, fraction);
return Spherical.mod(course, 360.0d);
} | @Test
public void testInterpolateCourseOnNullInput() {
assertNull(interpolateCourse(null, 10.0, .5));
assertNull(interpolateCourse(10.0, null, .5));
} |
public static Criterion matchIPProtocol(short proto) {
return new IPProtocolCriterion(proto);
} | @Test
public void testMatchIpProtocolMethod() {
Criterion matchIPProtocol = Criteria.matchIPProtocol(protocol1);
IPProtocolCriterion ipProtocolCriterion =
checkAndConvert(matchIPProtocol,
Criterion.Type.IP_PROTO,
IPProtocolCriterion.class);
assertThat(ipProtocolCriterion.protocol(), is(equalTo(protocol1)));
} |
public static Collection<MdbValidityStatus> assertEjbClassValidity(final ClassInfo mdbClass) {
Collection<MdbValidityStatus> mdbComplianceIssueList = new ArrayList<>(MdbValidityStatus.values().length);
final String className = mdbClass.name().toString();
verifyModifiers(className, mdbClass.flags(), mdbComplianceIssueList);
for (MethodInfo method : mdbClass.methods()) {
if ("onMessage".equals(method.name())) {
verifyOnMessageMethod(className, method.flags(), mdbComplianceIssueList);
}
if ("finalize".equals(method.name())) {
EjbLogger.DEPLOYMENT_LOGGER.mdbCantHaveFinalizeMethod(className);
mdbComplianceIssueList.add(MdbValidityStatus.MDB_SHOULD_NOT_HAVE_FINALIZE_METHOD);
}
}
return mdbComplianceIssueList;
} | @Test
public void mdbWithStaticOnMessageMethod() {
assertTrue(assertEjbClassValidity(buildClassInfoForClass(InvalidMdbOnMessageCantBeStatic.class.getName())).contains(
MdbValidityStatus.MDB_ON_MESSAGE_METHOD_CANT_BE_STATIC));
} |
@Override
public void removeAttribute(String key) {
channel.removeAttribute(key);
} | @Test
void removeAttributeTest() {
header.setAttribute("test", "test");
Assertions.assertEquals(header.getAttribute("test"), "test");
header.removeAttribute("test");
Assertions.assertFalse(header.hasAttribute("test"));
} |
public Where getColumns() {
return new Where("Columns");
} | @Test
void queryFirstColumn() throws Exception {
queryableIndex.getColumns()
.where(Column.index()
.any())
.select()
.first(firstColumnQueryCallback);
verify(firstColumnQueryCallback).callback(firstColumnArgumentCaptor.capture());
assertThat(firstColumnArgumentCaptor.getValue()).isEqualTo(firstColumn);
} |
@Config("database-url")
public MySqlConnectionConfig setDatabaseUrl(String databaseUrl)
{
this.databaseUrl = databaseUrl;
return this;
} | @Test
public void testExplicitPropertyMappings()
{
Map<String, String> properties = new ImmutableMap.Builder<String, String>()
.put("database-url", "localhost:1080")
.build();
MySqlConnectionConfig expected = new MySqlConnectionConfig()
.setDatabaseUrl("localhost:1080");
assertFullMapping(properties, expected);
} |
static <T> @Nullable JdbcReadWithPartitionsHelper<T> getPartitionsHelper(TypeDescriptor<T> type) {
// This cast is unchecked, thus this is a small type-checking risk. We just need
// to make sure that all preset helpers in `JdbcUtil.PRESET_HELPERS` are matched
// in type from their Key and their Value.
return (JdbcReadWithPartitionsHelper<T>) PRESET_HELPERS.get(type.getRawType());
} | @Test
public void testDatetimePartitioningWithSingleKey() {
JdbcReadWithPartitionsHelper<DateTime> helper =
JdbcUtil.getPartitionsHelper(TypeDescriptor.of(DateTime.class));
DateTime onlyPoint = DateTime.now();
List<KV<DateTime, DateTime>> expectedRanges =
Lists.newArrayList(KV.of(onlyPoint, onlyPoint.plusMillis(1)));
List<KV<DateTime, DateTime>> ranges =
Lists.newArrayList(helper.calculateRanges(onlyPoint, onlyPoint, 10L));
// It is not possible to generate any more than one range, because the lower and upper range are
// exactly the same.
// The range goes from the current DateTime to ONE MILISECOND AFTER.
// Because the query's filter statement is : WHERE column >= lowerBound AND column < upperBound.
assertEquals(1, ranges.size());
assertArrayEquals(expectedRanges.toArray(), ranges.toArray());
} |
public static SmartFilterTestExecutionResultDTO execSmartFilterTest(SmartFilterTestExecutionDTO execData) {
Predicate<TopicMessageDTO> predicate;
try {
predicate = MessageFilters.createMsgFilter(
execData.getFilterCode(),
MessageFilterTypeDTO.GROOVY_SCRIPT
);
} catch (Exception e) {
log.info("Smart filter '{}' compilation error", execData.getFilterCode(), e);
return new SmartFilterTestExecutionResultDTO()
.error("Compilation error : " + e.getMessage());
}
try {
var result = predicate.test(
new TopicMessageDTO()
.key(execData.getKey())
.content(execData.getValue())
.headers(execData.getHeaders())
.offset(execData.getOffset())
.partition(execData.getPartition())
.timestamp(
Optional.ofNullable(execData.getTimestampMs())
.map(ts -> OffsetDateTime.ofInstant(Instant.ofEpochMilli(ts), ZoneOffset.UTC))
.orElse(null))
);
return new SmartFilterTestExecutionResultDTO()
.result(result);
} catch (Exception e) {
log.info("Smart filter {} execution error", execData, e);
return new SmartFilterTestExecutionResultDTO()
.error("Execution error : " + e.getMessage());
}
} | @Test
void execSmartFilterTestReturnsExecutionResult() {
var params = new SmartFilterTestExecutionDTO()
.filterCode("key != null && value != null && headers != null && timestampMs != null && offset != null")
.key("1234")
.value("{ \"some\" : \"value\" } ")
.headers(Map.of("h1", "hv1"))
.offset(12345L)
.timestampMs(System.currentTimeMillis())
.partition(1);
assertThat(execSmartFilterTest(params).getResult()).isTrue();
params.setFilterCode("return false");
assertThat(execSmartFilterTest(params).getResult()).isFalse();
} |
@Override
public void checkAuthorization(
final KsqlSecurityContext securityContext,
final MetaStore metaStore,
final Statement statement
) {
if (statement instanceof Query) {
validateQuery(securityContext, metaStore, (Query)statement);
} else if (statement instanceof InsertInto) {
validateInsertInto(securityContext, metaStore, (InsertInto)statement);
} else if (statement instanceof CreateAsSelect) {
validateCreateAsSelect(securityContext, metaStore, (CreateAsSelect)statement);
} else if (statement instanceof PrintTopic) {
validatePrintTopic(securityContext, (PrintTopic)statement);
} else if (statement instanceof CreateSource) {
validateCreateSource(securityContext, (CreateSource)statement);
}
} | @Test
public void shouldThrowWhenCreateAsSelectOnNewTopicWithoutValueSchemaWritePermissions() {
// Given:
givenSubjectAccessDenied("topic-value", AclOperation.WRITE);
final Statement statement = givenStatement(String.format(
"CREATE STREAM newStream WITH (kafka_topic='topic', value_format='AVRO') "
+ "AS SELECT * FROM %s;", KAFKA_STREAM_TOPIC)
);
// When:
final Exception e = assertThrows(
KsqlSchemaAuthorizationException.class,
() -> authorizationValidator.checkAuthorization(securityContext, metaStore, statement)
);
// Then:
assertThat(e.getMessage(), containsString(String.format(
"Authorization denied to Write on Schema Registry subject: [topic-value]"
)));
} |
public static UriTemplate create(String template, Charset charset) {
return new UriTemplate(template, true, charset);
} | @Test
void substituteNullMap() {
assertThrows(IllegalArgumentException.class,
() -> UriTemplate.create("stuff", Util.UTF_8).expand(null));
} |
@Transactional
public void payInstallment(String identificationNumber, int creditId, int installmentId) {
Credit credit = creditRepository.findByIdAndIdentificationNumber(creditId, identificationNumber)
.orElseThrow(() -> createCreditNotFoundException(creditId));
Installment installment = installmentRepository.findByCredit(credit)
.stream()
.filter(c -> c.getStatus() == InstallmentStatus.UNPAID)
.filter(c -> c.getId() == installmentId)
.findFirst()
.orElseThrow(() -> createInstallmentNotFoundException(installmentId));
installment.setStatus(InstallmentStatus.PAID);
Installment savedInstallment = installmentRepository.save(installment);
eventPublisherService.publishInstallment(getInstallmentDocumentFromInstallment(savedInstallment, credit));
if (installmentRepository.findByCredit(credit).stream().noneMatch(c -> c.getStatus() == InstallmentStatus.UNPAID)) {
credit.setStatus(CreditStatus.FINISHED);
Credit finishedCredit = creditRepository.save(credit);
eventPublisherService.publishPaidCreditInstallment(finishedCredit);
}
eventPublisherService.publishCredit(getCreditDocumentFromCredit(credit));
} | @Test
void payInstallment_alreadyPaid() {
// Arrange
String identificationNumber = "1234567890";
int creditId = 1;
int installmentId = 1;
Credit credit = Credit.builder()
.id(creditId)
.amount(BigDecimal.valueOf(1000))
.status(CreditStatus.STARTED)
.identificationNumber(identificationNumber)
.build();
Installment installment = Installment.builder()
.id(installmentId)
.amount(BigDecimal.valueOf(200))
.status(InstallmentStatus.PAID)
.dueDate(LocalDateTime.now())
.credit(credit)
.build();
when(creditRepository.findByIdAndIdentificationNumber(creditId, identificationNumber)).thenReturn(Optional.of(credit));
when(installmentRepository.findByCredit(credit)).thenReturn(List.of(installment));
// Act & Assert
assertThrows(GenericException.class, () -> creditService.payInstallment(identificationNumber, creditId, installmentId));
verify(installmentRepository, never()).save(any(Installment.class));
verify(eventPublisherService, never()).publishInstallment(any(InstallmentDocument.class));
verify(eventPublisherService, never()).publishCredit(any(CreditDocument.class));
} |
@Override
public boolean retryRequest(
HttpRequest request, IOException exception, int execCount, HttpContext context) {
if (execCount > maxRetries) {
// Do not retry if over max retries
return false;
}
if (nonRetriableExceptions.contains(exception.getClass())) {
return false;
} else {
for (Class<? extends IOException> rejectException : nonRetriableExceptions) {
if (rejectException.isInstance(exception)) {
return false;
}
}
}
if (request instanceof CancellableDependency
&& ((CancellableDependency) request).isCancelled()) {
return false;
}
// Retry if the request is considered idempotent
return Method.isIdempotent(request.getMethod());
} | @Test
public void noRetryForNoRouteToHostException() {
HttpGet request = new HttpGet("/");
assertThat(retryStrategy.retryRequest(request, new NoRouteToHostException(), 1, null))
.isFalse();
} |
@Override
public ObjectNode encode(Alarm alarm, CodecContext context) {
checkNotNull(alarm, "Alarm cannot be null");
return context.mapper().createObjectNode()
.put("id", alarm.id().toString())
.put("deviceId", alarm.deviceId().toString())
.put("description", alarm.description())
.put("source",
alarm.source() == null ? null
: alarm.source().toString())
.put("timeRaised", alarm.timeRaised())
.put("timeUpdated", alarm.timeUpdated())
.put("timeCleared", alarm.timeCleared())
.put("severity", alarm.severity().toString())
.put("serviceAffecting", alarm.serviceAffecting())
.put("acknowledged", alarm.acknowledged())
.put("manuallyClearable", alarm.manuallyClearable())
.put("assignedUser", alarm.assignedUser());
} | @Test
public void alarmCodecTestWithOptionalField() {
JsonCodec<Alarm> codec = context.codec(Alarm.class);
assertThat(codec, is(notNullValue()));
ObjectNode alarmJson = codec.encode(alarmWithSource, context);
assertThat(alarmJson, notNullValue());
assertThat(alarmJson, matchesAlarm(alarmWithSource));
} |
@Override
public boolean putRow( RowMetaInterface rowMeta, Object[] rowData ) {
this.rowMeta = rowMeta;
buffer.add( rowData );
return true;
} | @Test
public void testPutRow() throws Exception {
rowSet.putRow( new RowMeta(), row );
assertSame( row, rowSet.getRow() );
} |
public String getPassword() {
return password;
} | @Test
public void testPassword() {
assertEquals("password", jt400Configuration.getPassword());
} |
@Udf
public <T extends Comparable<? super T>> T arrayMax(@UdfParameter(
description = "Array of values from which to find the maximum") final List<T> input) {
if (input == null) {
return null;
}
T candidate = null;
for (T thisVal : input) {
if (thisVal != null) {
if (candidate == null) {
candidate = thisVal;
} else if (thisVal.compareTo(candidate) > 0) {
candidate = thisVal;
}
}
}
return candidate;
} | @Test
public void shouldFindDecimalMax() {
final List<BigDecimal> input =
Arrays.asList(BigDecimal.valueOf(1.2), BigDecimal.valueOf(1.3), BigDecimal.valueOf(-1.2));
assertThat(udf.arrayMax(input), is(BigDecimal.valueOf(1.3)));
} |
public static String rpcTypeAdapter(final String rpcType) {
RpcTypeEnum rpcTypeEnum = RpcTypeEnum.acquireByName(rpcType);
switch (rpcTypeEnum) {
case GRPC:
return PluginEnum.GRPC.getName();
case SPRING_CLOUD:
return PluginEnum.SPRING_CLOUD.getName();
case DUBBO:
return PluginEnum.DUBBO.getName();
case TARS:
return PluginEnum.TARS.getName();
case SOFA:
return PluginEnum.SOFA.getName();
case WEB_SOCKET:
return PluginEnum.WEB_SOCKET.getName();
case MOTAN:
return PluginEnum.MOTAN.getName();
case HTTP:
default:
return PluginEnum.DIVIDE.getName();
}
} | @Test
public void testRpcTypeAdapter() {
Arrays.stream(RpcTypeEnum.values())
.filter(rpcTypeEnum -> !RpcTypeEnum.HTTP.getName().equals(rpcTypeEnum.getName()))
.forEach(rpcTypeEnum -> assertEquals(PluginNameAdapter.rpcTypeAdapter(rpcTypeEnum.getName()),
PluginEnum.getPluginEnumByName(rpcTypeEnum.getName()).getName()));
assertEquals(PluginNameAdapter.rpcTypeAdapter(RpcTypeEnum.HTTP.getName()), PluginEnum.DIVIDE.getName());
} |
public HollowHashIndexResult findMatches(Object... query) {
if (hashStateVolatile == null) {
throw new IllegalStateException(this + " wasn't initialized");
}
int hashCode = 0;
for(int i=0;i<query.length;i++) {
if(query[i] == null)
throw new IllegalArgumentException("querying by null unsupported; i=" + i);
hashCode ^= HashCodes.hashInt(keyHashCode(query[i], i));
}
HollowHashIndexResult result;
HollowHashIndexState hashState;
do {
result = null;
hashState = hashStateVolatile;
long bucket = hashCode & hashState.getMatchHashMask();
long hashBucketBit = bucket * hashState.getBitsPerMatchHashEntry();
boolean bucketIsEmpty = hashState.getMatchHashTable().getElementValue(hashBucketBit, hashState.getBitsPerTraverserField()[0]) == 0;
while (!bucketIsEmpty) {
if (matchIsEqual(hashState.getMatchHashTable(), hashBucketBit, query)) {
int selectSize = (int) hashState.getMatchHashTable().getElementValue(hashBucketBit + hashState.getBitsPerMatchHashKey(), hashState.getBitsPerSelectTableSize());
long selectBucketPointer = hashState.getMatchHashTable().getElementValue(hashBucketBit + hashState.getBitsPerMatchHashKey() + hashState.getBitsPerSelectTableSize(), hashState.getBitsPerSelectTablePointer());
result = new HollowHashIndexResult(hashState, selectBucketPointer, selectSize);
break;
}
bucket = (bucket + 1) & hashState.getMatchHashMask();
hashBucketBit = bucket * hashState.getBitsPerMatchHashEntry();
bucketIsEmpty = hashState.getMatchHashTable().getElementValue(hashBucketBit, hashState.getBitsPerTraverserField()[0]) == 0;
}
} while (hashState != hashStateVolatile);
return result;
} | @Test
public void testIndexingBooleanTypeFieldWithNullValues() throws Exception {
mapper.add(new TypeBoolean(null));
mapper.add(new TypeBoolean(true));
mapper.add(new TypeBoolean(false));
roundTripSnapshot();
HollowHashIndex index = new HollowHashIndex(readStateEngine, "TypeBoolean", "", "data.value");
assertIteratorContainsAll(index.findMatches(Boolean.FALSE).iterator(), 2);
assertIteratorContainsAll(index.findMatches(Boolean.TRUE).iterator(), 1);
} |
@Override
public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) {
table.refresh();
if (lastPosition != null) {
return discoverIncrementalSplits(lastPosition);
} else {
return discoverInitialSplits();
}
} | @Test
public void testTableScanNoStats() throws Exception {
appendTwoSnapshots();
ScanContext scanContext =
ScanContext.builder()
.includeColumnStats(false)
.startingStrategy(StreamingStartingStrategy.TABLE_SCAN_THEN_INCREMENTAL)
.build();
ContinuousSplitPlannerImpl splitPlanner =
new ContinuousSplitPlannerImpl(TABLE_RESOURCE.tableLoader().clone(), scanContext, null);
ContinuousEnumerationResult initialResult = splitPlanner.planSplits(null);
assertThat(initialResult.splits()).hasSize(1);
IcebergSourceSplit split = Iterables.getOnlyElement(initialResult.splits());
assertThat(split.task().files()).hasSize(2);
verifyStatCount(split, 0);
IcebergEnumeratorPosition lastPosition = initialResult.toPosition();
for (int i = 0; i < 3; ++i) {
CycleResult result = verifyOneCycle(splitPlanner, lastPosition);
verifyStatCount(result.split, 0);
lastPosition = result.lastPosition;
}
} |
@VisibleForTesting
public int getAppsFailedSubmitted() {
return numAppsFailedSubmitted.value();
} | @Test
public void testAppsFailedSubmitted() {
long totalBadbefore = metrics.getAppsFailedSubmitted();
badSubCluster.submitApplication();
Assert.assertEquals(totalBadbefore + 1, metrics.getAppsFailedSubmitted());
} |
public TermsAggregationBuilder buildTermsAggregation(String name,
TopAggregationDefinition<?> topAggregation, @Nullable Integer numberOfTerms) {
TermsAggregationBuilder termsAggregation = AggregationBuilders.terms(name)
.field(topAggregation.getFilterScope().getFieldName())
.order(order)
.minDocCount(TERM_AGGREGATION_MIN_DOC_COUNT);
if (numberOfTerms != null) {
termsAggregation.size(numberOfTerms);
}
if (subAggregation != null) {
termsAggregation = termsAggregation.subAggregation(subAggregation);
}
return termsAggregation;
} | @Test
public void buildTermsAggregation_adds_custom_size_if_TermTopAggregation_specifies_one() {
String aggName = randomAlphabetic(10);
int customSize = 1 + new Random().nextInt(400);
SimpleFieldTopAggregationDefinition topAggregation = new SimpleFieldTopAggregationDefinition("bar", false);
Stream.of(
underTest,
underTestWithCustomSubAgg,
underTestWithCustomsSubAggAndOrder)
.forEach(t -> {
TermsAggregationBuilder agg = t.buildTermsAggregation(aggName, topAggregation, customSize);
assertThat(agg.getName()).isEqualTo(aggName);
assertThat(agg.field()).isEqualTo(topAggregation.getFilterScope().getFieldName());
assertThat(agg.size()).isEqualTo(customSize);
});
} |
@Override
@MethodNotAvailable
public void close() {
throw new MethodNotAvailableException();
} | @Test(expected = MethodNotAvailableException.class)
public void testClose() {
adapter.close();
} |
@VisibleForTesting
protected static long[] splitIp(String baseIp) throws UnknownHostException {
InetAddress inetAddress;
try {
inetAddress = InetAddress.getByName(baseIp);
} catch (UnknownHostException e) {
LOG.error("Base IP address is invalid");
throw e;
}
if (inetAddress instanceof Inet6Address) {
throw new IllegalArgumentException(
"IPv6 is not yet supported for " + "reverse zones");
}
byte[] octets = inetAddress.getAddress();
if (octets.length != 4) {
throw new IllegalArgumentException("Base IP address is invalid");
}
long[] results = new long[4];
for (int i = 0; i < octets.length; i++) {
results[i] = octets[i] & 0xff;
}
return results;
} | @Test
public void testSplitIp() throws Exception {
long[] splitIp = ReverseZoneUtils.splitIp(NET);
assertEquals(172, splitIp[0]);
assertEquals(17, splitIp[1]);
assertEquals(4, splitIp[2]);
assertEquals(0, splitIp[3]);
} |
public static <K, V> Write<K, V> write() {
return new AutoValue_CdapIO_Write.Builder<K, V>().build();
} | @Test
public void testWriteObjectCreationFailsIfCdapPluginClassIsNull() {
assertThrows(
IllegalArgumentException.class,
() -> CdapIO.<String, String>write().withCdapPluginClass(null));
} |
protected String matchFilters(final Exchange exchange) {
return filterService.getMatchingEndpointsForExchangeByChannel(
exchange, channel, MODE_FIRST_MATCH.equals(recipientMode), warnDroppedMessage);
} | @Test
void testMatchFilters() {
when(filterService.getMatchingEndpointsForExchangeByChannel(exchange, TEST_CHANNEL, true, false))
.thenReturn(MOCK_ENDPOINT);
String result = processor.matchFilters(exchange);
assertEquals(MOCK_ENDPOINT, result);
} |
@Deprecated
public static String getJwt(JwtClaims claims) throws JoseException {
String jwt;
RSAPrivateKey privateKey = (RSAPrivateKey) getPrivateKey(
jwtConfig.getKey().getFilename(),jwtConfig.getKey().getPassword(), jwtConfig.getKey().getKeyName());
// A JWT is a JWS and/or a JWE with JSON claims as the payload.
// In this example it is a JWS nested inside a JWE
// So we first create a JsonWebSignature object.
JsonWebSignature jws = new JsonWebSignature();
// The payload of the JWS is JSON content of the JWT Claims
jws.setPayload(claims.toJson());
// The JWT is signed using the sender's private key
jws.setKey(privateKey);
// Get provider from security config file, it should be two digit
// And the provider id will set as prefix for keyid in the token header, for example: 05100
// if there is no provider id, we use "00" for the default value
String provider_id = "";
if (jwtConfig.getProviderId() != null) {
provider_id = jwtConfig.getProviderId();
if (provider_id.length() == 1) {
provider_id = "0" + provider_id;
} else if (provider_id.length() > 2) {
logger.error("provider_id defined in the security.yml file is invalid; the length should be 2");
provider_id = provider_id.substring(0, 2);
}
}
jws.setKeyIdHeaderValue(provider_id + jwtConfig.getKey().getKid());
// Set the signature algorithm on the JWT/JWS that will integrity protect the claims
jws.setAlgorithmHeaderValue(AlgorithmIdentifiers.RSA_USING_SHA256);
// Sign the JWS and produce the compact serialization, which will be the inner JWT/JWS
// representation, which is a string consisting of three dot ('.') separated
// base64url-encoded parts in the form Header.Payload.Signature
jwt = jws.getCompactSerialization();
return jwt;
} | @Test
public void longlivedCcPetstoreScope() throws Exception {
JwtClaims claims = ClaimsUtil.getTestCcClaimsScope("f7d42348-c647-4efb-a52d-4c5787421e73", "write:pets read:pets");
claims.setExpirationTimeMinutesInTheFuture(5256000);
String jwt = JwtIssuer.getJwt(claims, long_kid, KeyUtil.deserializePrivateKey(long_key, KeyUtil.RSA));
System.out.println("***Long lived token for portal lightapi***: " + jwt);
} |
@Override
public synchronized List<DeviceEvent> updatePorts(ProviderId providerId,
DeviceId deviceId,
List<PortDescription> portDescriptions) {
NodeId localNode = clusterService.getLocalNode().id();
// TODO: It might be negligible, but this will have negative impact to topology discovery performance,
// since it will trigger distributed store read.
// Also, it'll probably be better if side-way communication happened on ConfigurationProvider, etc.
// outside Device subsystem. so that we don't have to modify both Device and Link stores.
// If we don't care much about topology performance, then it might be OK.
NodeId deviceNode = mastershipService.getMasterFor(deviceId);
// Process port update only if we're the master of the device,
// otherwise signal the actual master.
List<DeviceEvent> deviceEvents = null;
if (localNode.equals(deviceNode)) {
final Timestamp newTimestamp;
try {
newTimestamp = deviceClockService.getTimestamp(deviceId);
} catch (IllegalStateException e) {
log.info("Timestamp was not available for device {}", deviceId);
log.debug(" discarding {}", portDescriptions);
// Failed to generate timestamp.
// Possible situation:
// Device connected and became master for short period of time,
// but lost mastership before this instance had the chance to
// retrieve term information.
// Information dropped here is expected to be recoverable by
// device probing after mastership change
return Collections.emptyList();
}
log.debug("timestamp for {} {}", deviceId, newTimestamp);
final Timestamped<List<PortDescription>> timestampedInput
= new Timestamped<>(portDescriptions, newTimestamp);
final Timestamped<List<PortDescription>> merged;
final Map<ProviderId, DeviceDescriptions> device = getOrCreateDeviceDescriptionsMap(deviceId);
synchronized (device) {
deviceEvents = updatePortsInternal(providerId, deviceId, timestampedInput);
final DeviceDescriptions descs = device.get(providerId);
List<PortDescription> mergedList =
FluentIterable.from(portDescriptions)
.transform(input ->
// lookup merged port description
descs.getPortDesc(input.portNumber()).value()
).toList();
merged = new Timestamped<>(mergedList, newTimestamp);
}
if (!deviceEvents.isEmpty()) {
log.debug("Notifying peers of a ports update topology event for providerId: {} and deviceId: {}",
providerId, deviceId);
notifyPeers(new InternalPortEvent(providerId, deviceId, merged));
}
} else {
return Collections.emptyList();
}
return deviceEvents;
} | @Test
public final void testUpdatePorts() {
putDevice(DID1, SW1);
List<PortDescription> pds = Arrays.asList(
DefaultPortDescription.builder().withPortNumber(P1).isEnabled(true).build(),
DefaultPortDescription.builder().withPortNumber(P2).isEnabled(true).build()
);
Capture<InternalDeviceEvent> message = Capture.newInstance();
Capture<MessageSubject> subject = Capture.newInstance();
Capture<Function<InternalDeviceEvent, byte[]>> encoder = Capture.newInstance();
resetCommunicatorExpectingSingleBroadcast(message, subject, encoder);
List<DeviceEvent> events = deviceStore.updatePorts(PID, DID1, pds);
verify(clusterCommunicator);
// TODO: verify broadcast message
assertTrue(message.hasCaptured());
Set<PortNumber> expectedPorts = Sets.newHashSet(P1, P2);
for (DeviceEvent event : events) {
assertEquals(PORT_ADDED, event.type());
assertDevice(DID1, SW1, event.subject());
assertTrue("PortNumber is one of expected",
expectedPorts.remove(event.port().number()));
assertTrue("Port is enabled", event.port().isEnabled());
}
assertTrue("Event for all expectedport appeared", expectedPorts.isEmpty());
List<PortDescription> pds2 = Arrays.asList(
DefaultPortDescription.builder().withPortNumber(P1).isEnabled(false).build(),
DefaultPortDescription.builder().withPortNumber(P2).isEnabled(true).build(),
DefaultPortDescription.builder().withPortNumber(P3).isEnabled(true).build()
);
resetCommunicatorExpectingSingleBroadcast(message, subject, encoder);
events = deviceStore.updatePorts(PID, DID1, pds2);
verify(clusterCommunicator);
// TODO: verify broadcast message
assertTrue(message.hasCaptured());
assertFalse("event should be triggered", events.isEmpty());
for (DeviceEvent event : events) {
PortNumber num = event.port().number();
if (P1.equals(num)) {
assertEquals(PORT_UPDATED, event.type());
assertDevice(DID1, SW1, event.subject());
assertFalse("Port is disabled", event.port().isEnabled());
} else if (P2.equals(num)) {
fail("P2 event not expected.");
} else if (P3.equals(num)) {
assertEquals(PORT_ADDED, event.type());
assertDevice(DID1, SW1, event.subject());
assertTrue("Port is enabled", event.port().isEnabled());
} else {
fail("Unknown port number encountered: " + num);
}
}
List<PortDescription> pds3 = Arrays.asList(
DefaultPortDescription.builder().withPortNumber(P1).isEnabled(false).build(),
DefaultPortDescription.builder().withPortNumber(P2).isEnabled(true).build()
);
resetCommunicatorExpectingSingleBroadcast(message, subject, encoder);
events = deviceStore.updatePorts(PID, DID1, pds3);
verify(clusterCommunicator);
// TODO: verify broadcast message
assertTrue(message.hasCaptured());
assertFalse("event should be triggered", events.isEmpty());
for (DeviceEvent event : events) {
PortNumber num = event.port().number();
if (P1.equals(num)) {
fail("P1 event not expected.");
} else if (P2.equals(num)) {
fail("P2 event not expected.");
} else if (P3.equals(num)) {
assertEquals(PORT_REMOVED, event.type());
assertDevice(DID1, SW1, event.subject());
assertTrue("Port was enabled", event.port().isEnabled());
} else {
fail("Unknown port number encountered: " + num);
}
}
} |
@Override
@SuppressWarnings("MissingDefault")
public boolean offer(final E e) {
if (e == null) {
throw new NullPointerException();
}
long mask;
E[] buffer;
long pIndex;
while (true) {
long producerLimit = lvProducerLimit();
pIndex = lvProducerIndex(this);
// lower bit is indicative of resize, if we see it we spin until it's cleared
if ((pIndex & 1) == 1) {
continue;
}
// pIndex is even (lower bit is 0) -> actual index is (pIndex >> 1)
// mask/buffer may get changed by resizing -> only use for array access after successful CAS.
mask = this.producerMask;
buffer = this.producerBuffer;
// a successful CAS ties the ordering, lv(pIndex)-[mask/buffer]->cas(pIndex)
// assumption behind this optimization is that queue is almost always empty or near empty
if (producerLimit <= pIndex) {
int result = offerSlowPath(mask, pIndex, producerLimit);
switch (result) {
case 0:
break;
case 1:
continue;
case 2:
return false;
case 3:
resize(mask, buffer, pIndex, e);
return true;
}
}
if (casProducerIndex(this, pIndex, pIndex + 2)) {
break;
}
}
// INDEX visible before ELEMENT, consistent with consumer expectation
final long offset = modifiedCalcElementOffset(pIndex, mask);
soElement(buffer, offset, e);
return true;
} | @Test(dataProvider = "empty")
public void offer_whenEmpty(MpscGrowableArrayQueue<Integer> queue) {
assertThat(queue.offer(1)).isTrue();
assertThat(queue).hasSize(1);
} |
public static boolean hasSchemePattern( String path ) {
return hasSchemePattern( path, PROVIDER_PATTERN_SCHEME );
} | @Test
public void testHasScheme() {
String vfsFilename = "hdfs://hsbcmaster:8020/tmp/acltest/";
boolean testVfsFilename = KettleVFS.hasSchemePattern( vfsFilename, PROVIDER_PATTERN_SCHEME );
assertTrue( testVfsFilename );
} |
private JobMetrics getJobMetrics() throws IOException {
if (cachedMetricResults != null) {
// Metric results have been cached after the job ran.
return cachedMetricResults;
}
JobMetrics result = dataflowClient.getJobMetrics(dataflowPipelineJob.getJobId());
if (dataflowPipelineJob.getState().isTerminal()) {
// Add current query result to the cache.
cachedMetricResults = result;
}
return result;
} | @Test
public void testSingleCounterUpdates() throws IOException {
AppliedPTransform<?, ?, ?> myStep = mock(AppliedPTransform.class);
when(myStep.getFullName()).thenReturn("myStepName");
BiMap<AppliedPTransform<?, ?, ?>, String> transformStepNames = HashBiMap.create();
transformStepNames.put(myStep, "s2");
JobMetrics jobMetrics = new JobMetrics();
DataflowPipelineJob job = mock(DataflowPipelineJob.class);
DataflowPipelineOptions options = mock(DataflowPipelineOptions.class);
when(options.isStreaming()).thenReturn(false);
when(job.getDataflowOptions()).thenReturn(options);
when(job.getState()).thenReturn(State.RUNNING);
when(job.getJobId()).thenReturn(JOB_ID);
when(job.getTransformStepNames()).thenReturn(transformStepNames);
MetricUpdate update = new MetricUpdate();
long stepValue = 1234L;
update.setScalar(new BigDecimal(stepValue));
// The parser relies on the fact that one tentative and one committed metric update exist in
// the job metrics results.
MetricUpdate mu1 =
makeCounterMetricUpdate("counterName", "counterNamespace", "s2", 1234L, false);
MetricUpdate mu1Tentative =
makeCounterMetricUpdate("counterName", "counterNamespace", "s2", 1233L, true);
jobMetrics.setMetrics(ImmutableList.of(mu1, mu1Tentative));
DataflowClient dataflowClient = mock(DataflowClient.class);
when(dataflowClient.getJobMetrics(JOB_ID)).thenReturn(jobMetrics);
DataflowMetrics dataflowMetrics = new DataflowMetrics(job, dataflowClient);
MetricQueryResults result = dataflowMetrics.allMetrics();
assertThat(
result.getCounters(),
containsInAnyOrder(
attemptedMetricsResult("counterNamespace", "counterName", "myStepName", 1234L)));
assertThat(
result.getCounters(),
containsInAnyOrder(
committedMetricsResult("counterNamespace", "counterName", "myStepName", 1234L)));
} |
public static String joinAndCamelize(Iterable<?> iterable) {
StringBuilder sb = new StringBuilder();
boolean isFirst = true;
for ( Object object : iterable ) {
if ( !isFirst ) {
sb.append( capitalize( object.toString() ) );
}
else {
sb.append( object );
isFirst = false;
}
}
return sb.toString();
} | @Test
public void testJoinAndCamelize() {
assertThat( Strings.joinAndCamelize( new ArrayList<String>() ) ).isEqualTo( "" );
assertThat( Strings.joinAndCamelize( Arrays.asList( "Hello", "World" ) ) ).isEqualTo( "HelloWorld" );
assertThat( Strings.joinAndCamelize( Arrays.asList( "Hello", "world" ) ) ).isEqualTo( "HelloWorld" );
assertThat( Strings.joinAndCamelize( Arrays.asList( "hello", "world" ) ) ).isEqualTo( "helloWorld" );
} |
static void parseServerIpAndPort(Connection connection, Span span) {
try {
URI url = URI.create(connection.getMetaData().getURL().substring(5)); // strip "jdbc:"
String remoteServiceName = connection.getProperties().getProperty("zipkinServiceName");
if (remoteServiceName == null || "".equals(remoteServiceName)) {
String databaseName = connection.getCatalog();
if (databaseName != null && !databaseName.isEmpty()) {
remoteServiceName = "mysql-" + databaseName;
} else {
remoteServiceName = "mysql";
}
}
span.remoteServiceName(remoteServiceName);
String host = connection.getHost();
if (host != null) {
span.remoteIpAndPort(host, url.getPort() == -1 ? 3306 : url.getPort());
}
} catch (Exception e) {
// remote address is optional
}
} | @Test void parseServerIpAndPort_doesntCrash() throws SQLException {
when(connection.getMetaData()).thenThrow(new SQLException());
TracingStatementInterceptor.parseServerIpAndPort(connection, span);
verifyNoMoreInteractions(span);
} |
@Override
void handle(Connection connection, DatabaseCharsetChecker.State state) throws SQLException {
// PostgreSQL does not have concept of case-sensitive collation. Only charset ("encoding" in postgresql terminology)
// must be verified.
expectUtf8AsDefault(connection);
if (state == DatabaseCharsetChecker.State.UPGRADE || state == DatabaseCharsetChecker.State.STARTUP) {
// no need to check columns on fresh installs... as they are not supposed to exist!
expectUtf8Columns(connection);
}
} | @Test
public void upgrade_fails_if_default_charset_is_not_utf8() throws Exception {
answerDefaultCharset("latin");
answerColumns(
List.<String[]>of(new String[] {TABLE_ISSUES, COLUMN_KEE, "utf8"}));
assertThatThrownBy(() -> underTest.handle(connection, DatabaseCharsetChecker.State.UPGRADE))
.isInstanceOf(MessageException.class)
.hasMessage("Database charset is latin. It must support UTF8.");
} |
public static <NodeT, EdgeT> Set<NodeT> reachableNodes(
Network<NodeT, EdgeT> network, Set<NodeT> startNodes, Set<NodeT> endNodes) {
Set<NodeT> visitedNodes = new HashSet<>();
Queue<NodeT> queuedNodes = new ArrayDeque<>();
queuedNodes.addAll(startNodes);
// Perform a breadth-first traversal rooted at the input node.
while (!queuedNodes.isEmpty()) {
NodeT currentNode = queuedNodes.remove();
// If we have already visited this node or it is a terminal node than do not add any
// successors.
if (!visitedNodes.add(currentNode) || endNodes.contains(currentNode)) {
continue;
}
queuedNodes.addAll(network.successors(currentNode));
}
return visitedNodes;
} | @Test
public void testReachableNodesFromAllRoots() {
assertEquals(
createNetwork().nodes(),
Networks.reachableNodes(
createNetwork(), ImmutableSet.of("A", "D", "I", "M", "O"), Collections.emptySet()));
} |
@Override
public long getPosition() throws IOException
{
checkClosed();
return currentPosition;
} | @Test
void testPositionPeek() throws IOException
{
byte[] values = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20 };
try (RandomAccessReadBuffer randomAccessSource =
new RandomAccessReadBuffer(new ByteArrayInputStream(values));
RandomAccessReadView randomAccessReadView =
new RandomAccessReadView(randomAccessSource, 10, 20))
{
assertEquals(0, randomAccessReadView.getPosition());
randomAccessReadView.skip(6);
assertEquals(6, randomAccessReadView.getPosition());
assertEquals(16, randomAccessReadView.peek());
assertEquals(6, randomAccessReadView.getPosition());
}
} |
public void build(@Nullable SegmentVersion segmentVersion, ServerMetrics serverMetrics)
throws Exception {
SegmentGeneratorConfig genConfig = new SegmentGeneratorConfig(_tableConfig, _dataSchema);
// The segment generation code in SegmentColumnarIndexCreator will throw
// exception if start and end time in time column are not in acceptable
// range. We don't want the realtime consumption to stop (if an exception
// is thrown) and thus the time validity check is explicitly disabled for
// realtime segment generation
genConfig.setSegmentTimeValueCheck(false);
if (_columnIndicesForRealtimeTable.getInvertedIndexColumns() != null) {
genConfig.setIndexOn(StandardIndexes.inverted(), IndexConfig.ENABLED,
_columnIndicesForRealtimeTable.getInvertedIndexColumns());
}
if (_columnIndicesForRealtimeTable.getVarLengthDictionaryColumns() != null) {
genConfig.setVarLengthDictionaryColumns(_columnIndicesForRealtimeTable.getVarLengthDictionaryColumns());
}
if (segmentVersion != null) {
genConfig.setSegmentVersion(segmentVersion);
}
genConfig.setTableName(_tableName);
genConfig.setOutDir(_outputPath);
genConfig.setSegmentName(_segmentName);
addIndexOrDefault(genConfig, StandardIndexes.text(), _columnIndicesForRealtimeTable.getTextIndexColumns(),
new TextIndexConfigBuilder(genConfig.getFSTIndexType()).build());
addIndexOrDefault(genConfig, StandardIndexes.fst(), _columnIndicesForRealtimeTable.getFstIndexColumns(),
new FstIndexConfig(genConfig.getFSTIndexType()));
SegmentPartitionConfig segmentPartitionConfig = _realtimeSegmentImpl.getSegmentPartitionConfig();
genConfig.setSegmentPartitionConfig(segmentPartitionConfig);
genConfig.setNullHandlingEnabled(_nullHandlingEnabled);
genConfig.setSegmentZKPropsConfig(_segmentZKPropsConfig);
// flush any artifacts to disk to improve mutable to immutable segment conversion
_realtimeSegmentImpl.commit();
SegmentIndexCreationDriverImpl driver = new SegmentIndexCreationDriverImpl();
try (PinotSegmentRecordReader recordReader = new PinotSegmentRecordReader()) {
int[] sortedDocIds = _columnIndicesForRealtimeTable.getSortedColumn() != null
? _realtimeSegmentImpl.getSortedDocIdIterationOrderWithSortedColumn(
_columnIndicesForRealtimeTable.getSortedColumn()) : null;
recordReader.init(_realtimeSegmentImpl, sortedDocIds);
RealtimeSegmentSegmentCreationDataSource dataSource =
new RealtimeSegmentSegmentCreationDataSource(_realtimeSegmentImpl, recordReader);
driver.init(genConfig, dataSource, RecordEnricherPipeline.getPassThroughPipeline(),
TransformPipeline.getPassThroughPipeline());
if (!_enableColumnMajor) {
driver.build();
} else {
driver.buildByColumn(_realtimeSegmentImpl);
}
}
if (segmentPartitionConfig != null) {
Map<String, ColumnPartitionConfig> columnPartitionMap = segmentPartitionConfig.getColumnPartitionMap();
for (String columnName : columnPartitionMap.keySet()) {
int numPartitions = driver.getSegmentStats().getColumnProfileFor(columnName).getPartitions().size();
serverMetrics.addValueToTableGauge(_tableName, ServerGauge.REALTIME_SEGMENT_NUM_PARTITIONS, numPartitions);
}
}
} | @Test
public void test10RecordsIndexedColumnMajorSegmentBuilder()
throws Exception {
File tmpDir = new File(TMP_DIR, "tmp_" + System.currentTimeMillis());
TableConfig tableConfig =
new TableConfigBuilder(TableType.REALTIME).setTableName("testTable")
.setTimeColumnName(DATE_TIME_COLUMN)
.setInvertedIndexColumns(Lists.newArrayList(STRING_COLUMN1, LONG_COLUMN1))
.setSortedColumn(LONG_COLUMN1)
.setRangeIndexColumns(Lists.newArrayList(STRING_COLUMN2))
.setNoDictionaryColumns(Lists.newArrayList(LONG_COLUMN2))
.setVarLengthDictionaryColumns(Lists.newArrayList(STRING_COLUMN3))
.setOnHeapDictionaryColumns(Lists.newArrayList(LONG_COLUMN3))
.setColumnMajorSegmentBuilderEnabled(true)
.build();
Schema schema = new Schema.SchemaBuilder()
.addSingleValueDimension(STRING_COLUMN1, FieldSpec.DataType.STRING)
.addSingleValueDimension(STRING_COLUMN2, FieldSpec.DataType.STRING)
.addSingleValueDimension(STRING_COLUMN3, FieldSpec.DataType.STRING)
.addSingleValueDimension(STRING_COLUMN4, FieldSpec.DataType.STRING)
.addSingleValueDimension(LONG_COLUMN1, FieldSpec.DataType.LONG)
.addSingleValueDimension(LONG_COLUMN2, FieldSpec.DataType.LONG)
.addSingleValueDimension(LONG_COLUMN3, FieldSpec.DataType.LONG)
.addMultiValueDimension(MV_INT_COLUMN, FieldSpec.DataType.INT)
.addMetric(LONG_COLUMN4, FieldSpec.DataType.LONG)
.addDateTime(DATE_TIME_COLUMN, FieldSpec.DataType.LONG, "1:MILLISECONDS:EPOCH", "1:MILLISECONDS")
.build();
String tableNameWithType = tableConfig.getTableName();
String segmentName = "testTable__0__0__123456";
IndexingConfig indexingConfig = tableConfig.getIndexingConfig();
DictionaryIndexConfig varLengthDictConf = new DictionaryIndexConfig(false, true);
RealtimeSegmentConfig.Builder realtimeSegmentConfigBuilder =
new RealtimeSegmentConfig.Builder().setTableNameWithType(tableNameWithType).setSegmentName(segmentName)
.setStreamName(tableNameWithType).setSchema(schema).setTimeColumnName(DATE_TIME_COLUMN).setCapacity(1000)
.setAvgNumMultiValues(3)
.setIndex(Sets.newHashSet(LONG_COLUMN2), StandardIndexes.dictionary(), DictionaryIndexConfig.DISABLED)
.setIndex(Sets.newHashSet(Sets.newHashSet(STRING_COLUMN3)), StandardIndexes.dictionary(), varLengthDictConf)
.setIndex(Sets.newHashSet(STRING_COLUMN1, LONG_COLUMN1), StandardIndexes.inverted(), IndexConfig.ENABLED)
.setSegmentZKMetadata(getSegmentZKMetadata(segmentName)).setOffHeap(true)
.setMemoryManager(new DirectMemoryManager(segmentName))
.setStatsHistory(RealtimeSegmentStatsHistory.deserialzeFrom(new File(tmpDir, "stats")))
.setConsumerDir(new File(tmpDir, "consumerDir").getAbsolutePath());
// create mutable segment impl
MutableSegmentImpl mutableSegmentImpl = new MutableSegmentImpl(realtimeSegmentConfigBuilder.build(), null);
List<GenericRow> rows = generateTestData();
for (GenericRow row : rows) {
mutableSegmentImpl.index(row, null);
}
File outputDir = new File(tmpDir, "outputDir");
SegmentZKPropsConfig segmentZKPropsConfig = new SegmentZKPropsConfig();
segmentZKPropsConfig.setStartOffset("1");
segmentZKPropsConfig.setEndOffset("100");
ColumnIndicesForRealtimeTable cdc = new ColumnIndicesForRealtimeTable(indexingConfig.getSortedColumn().get(0),
indexingConfig.getInvertedIndexColumns(), null, null, indexingConfig.getNoDictionaryColumns(),
indexingConfig.getVarLengthDictionaryColumns());
RealtimeSegmentConverter converter =
new RealtimeSegmentConverter(mutableSegmentImpl, segmentZKPropsConfig, outputDir.getAbsolutePath(), schema,
tableNameWithType, tableConfig, segmentName, cdc, false);
converter.build(SegmentVersion.v3, null);
File indexDir = new File(outputDir, segmentName);
SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(indexDir);
assertEquals(segmentMetadata.getVersion(), SegmentVersion.v3);
assertEquals(segmentMetadata.getTotalDocs(), rows.size());
assertEquals(segmentMetadata.getTimeColumn(), DATE_TIME_COLUMN);
assertEquals(segmentMetadata.getTimeUnit(), TimeUnit.MILLISECONDS);
long expectedStartTime = (long) rows.get(0).getValue(DATE_TIME_COLUMN);
assertEquals(segmentMetadata.getStartTime(), expectedStartTime);
long expectedEndTime = (long) rows.get(rows.size() - 1).getValue(DATE_TIME_COLUMN);
assertEquals(segmentMetadata.getEndTime(), expectedEndTime);
assertTrue(segmentMetadata.getAllColumns().containsAll(schema.getColumnNames()));
assertEquals(segmentMetadata.getStartOffset(), "1");
assertEquals(segmentMetadata.getEndOffset(), "100");
testSegment(rows, indexDir, tableConfig, segmentMetadata);
} |
public static RandomForest fit(Formula formula, DataFrame data) {
return fit(formula, data, new Properties());
} | @Test
public void testPenDigits() {
System.out.println("Pen Digits");
MathEx.setSeed(19650218); // to get repeatable results for cross validation.
ClassificationValidations<RandomForest> result = CrossValidation.classification(10, PenDigits.formula, PenDigits.data,
(f, x) -> RandomForest.fit(f, x, 100, 4, SplitRule.GINI, 20, 100, 5, 1.0, null, Arrays.stream(seeds)));
System.out.println(result);
assertEquals(0.9706, result.avg.accuracy, 1E-4);
} |
public void addOrder(Order... orders) {
this.orders = ArrayUtil.append(this.orders, orders);
} | @Test
public void addOrderTest() {
Page page = new Page();
page.addOrder(new Order("aaa"));
assertEquals(page.getOrders().length, 1);
page.addOrder(new Order("aaa"));
assertEquals(page.getOrders().length, 2);
} |
@Override
protected void handleLookup(CommandLookupTopic lookup) {
checkArgument(state == State.Connected);
final long requestId = lookup.getRequestId();
final boolean authoritative = lookup.isAuthoritative();
// use the connection-specific listener name by default.
final String advertisedListenerName =
lookup.hasAdvertisedListenerName() && StringUtils.isNotBlank(lookup.getAdvertisedListenerName())
? lookup.getAdvertisedListenerName() : this.listenerName;
if (log.isDebugEnabled()) {
log.debug("[{}] Received Lookup from {} for {} requesting listener {}", lookup.getTopic(), remoteAddress,
requestId, StringUtils.isNotBlank(advertisedListenerName) ? advertisedListenerName : "(none)");
}
TopicName topicName = validateTopicName(lookup.getTopic(), requestId, lookup);
if (topicName == null) {
return;
}
if (!this.service.getPulsar().isRunning()) {
if (log.isDebugEnabled()) {
log.debug("[{}] Failed lookup topic {} due to pulsar service is not ready: {} state", remoteAddress,
topicName, this.service.getPulsar().getState().toString());
}
writeAndFlush(newLookupErrorResponse(ServerError.ServiceNotReady,
"Failed due to pulsar service is not ready", requestId));
return;
}
final Semaphore lookupSemaphore = service.getLookupRequestSemaphore();
if (lookupSemaphore.tryAcquire()) {
isTopicOperationAllowed(topicName, TopicOperation.LOOKUP, authenticationData, originalAuthData).thenApply(
isAuthorized -> {
if (isAuthorized) {
lookupTopicAsync(getBrokerService().pulsar(), topicName, authoritative,
getPrincipal(), getAuthenticationData(),
requestId, advertisedListenerName).handle((lookupResponse, ex) -> {
if (ex == null) {
writeAndFlush(lookupResponse);
} else {
// it should never happen
log.warn("[{}] lookup failed with error {}, {}", remoteAddress, topicName,
ex.getMessage(), ex);
writeAndFlush(newLookupErrorResponse(ServerError.ServiceNotReady,
ex.getMessage(), requestId));
}
lookupSemaphore.release();
return null;
});
} else {
final String msg = "Client is not authorized to Lookup";
log.warn("[{}] {} with role {} on topic {}", remoteAddress, msg, getPrincipal(), topicName);
writeAndFlush(newLookupErrorResponse(ServerError.AuthorizationError, msg, requestId));
lookupSemaphore.release();
}
return null;
}).exceptionally(ex -> {
logAuthException(remoteAddress, "lookup", getPrincipal(), Optional.of(topicName), ex);
final String msg = "Exception occurred while trying to authorize lookup";
writeAndFlush(newLookupErrorResponse(ServerError.AuthorizationError, msg, requestId));
lookupSemaphore.release();
return null;
});
} else {
if (log.isDebugEnabled()) {
log.debug("[{}] Failed lookup due to too many lookup-requests {}", remoteAddress, topicName);
}
writeAndFlush(newLookupErrorResponse(ServerError.TooManyRequests,
"Failed due to too many pending lookup requests", requestId));
}
} | @Test(expectedExceptions = IllegalArgumentException.class)
public void shouldFailHandleLookup() throws Exception {
ServerCnx serverCnx = mock(ServerCnx.class, CALLS_REAL_METHODS);
Field stateUpdater = ServerCnx.class.getDeclaredField("state");
stateUpdater.setAccessible(true);
stateUpdater.set(serverCnx, ServerCnx.State.Failed);
serverCnx.handleLookup(any());
} |
public static String toJson(UpdateRequirement updateRequirement) {
return toJson(updateRequirement, false);
} | @Test
public void testAssertUUIDToJson() {
String uuid = "2cc52516-5e73-41f2-b139-545d41a4e151";
String expected = String.format("{\"type\":\"assert-table-uuid\",\"uuid\":\"%s\"}", uuid);
UpdateRequirement actual = new UpdateRequirement.AssertTableUUID(uuid);
assertThat(UpdateRequirementParser.toJson(actual))
.as("AssertTableUUID should convert to the correct JSON value")
.isEqualTo(expected);
} |
@SuppressWarnings("deprecation")
public Object getSocketOpt(int option)
{
switch (option) {
case ZMQ.ZMQ_SNDHWM:
return sendHwm;
case ZMQ.ZMQ_RCVHWM:
return recvHwm;
case ZMQ.ZMQ_AFFINITY:
return affinity;
case ZMQ.ZMQ_IDENTITY:
return identity;
case ZMQ.ZMQ_RATE:
return rate;
case ZMQ.ZMQ_RECOVERY_IVL:
return recoveryIvl;
case ZMQ.ZMQ_SNDBUF:
return sndbuf;
case ZMQ.ZMQ_RCVBUF:
return rcvbuf;
case ZMQ.ZMQ_TOS:
return tos;
case ZMQ.ZMQ_TYPE:
return type;
case ZMQ.ZMQ_LINGER:
return linger;
case ZMQ.ZMQ_RECONNECT_IVL:
return reconnectIvl;
case ZMQ.ZMQ_RECONNECT_IVL_MAX:
return reconnectIvlMax;
case ZMQ.ZMQ_BACKLOG:
return backlog;
case ZMQ.ZMQ_MAXMSGSIZE:
return maxMsgSize;
case ZMQ.ZMQ_MULTICAST_HOPS:
return multicastHops;
case ZMQ.ZMQ_RCVTIMEO:
return recvTimeout;
case ZMQ.ZMQ_SNDTIMEO:
return sendTimeout;
case ZMQ.ZMQ_IPV4ONLY:
return !ipv6;
case ZMQ.ZMQ_IPV6:
return ipv6;
case ZMQ.ZMQ_TCP_KEEPALIVE:
return tcpKeepAlive;
case ZMQ.ZMQ_IMMEDIATE:
return immediate;
case ZMQ.ZMQ_DELAY_ATTACH_ON_CONNECT:
return !immediate;
case ZMQ.ZMQ_SOCKS_PROXY:
return socksProxyAddress;
case ZMQ.ZMQ_TCP_KEEPALIVE_CNT:
return tcpKeepAliveCnt;
case ZMQ.ZMQ_TCP_KEEPALIVE_IDLE:
return tcpKeepAliveIdle;
case ZMQ.ZMQ_TCP_KEEPALIVE_INTVL:
return tcpKeepAliveIntvl;
case ZMQ.ZMQ_MECHANISM:
return mechanism;
case ZMQ.ZMQ_PLAIN_SERVER:
return asServer && mechanism == Mechanisms.PLAIN;
case ZMQ.ZMQ_PLAIN_USERNAME:
return plainUsername;
case ZMQ.ZMQ_PLAIN_PASSWORD:
return plainPassword;
case ZMQ.ZMQ_ZAP_DOMAIN:
return zapDomain;
case ZMQ.ZMQ_LAST_ENDPOINT:
return lastEndpoint;
case ZMQ.ZMQ_CURVE_SERVER:
return asServer && mechanism == Mechanisms.CURVE;
case ZMQ.ZMQ_CURVE_PUBLICKEY:
return curvePublicKey;
case ZMQ.ZMQ_CURVE_SERVERKEY:
return curveServerKey;
case ZMQ.ZMQ_CURVE_SECRETKEY:
return curveSecretKey;
case ZMQ.ZMQ_CONFLATE:
return conflate;
case ZMQ.ZMQ_GSSAPI_SERVER:
return asServer && mechanism == Mechanisms.GSSAPI;
case ZMQ.ZMQ_GSSAPI_PRINCIPAL:
return gssPrincipal;
case ZMQ.ZMQ_GSSAPI_SERVICE_PRINCIPAL:
return gssServicePrincipal;
case ZMQ.ZMQ_GSSAPI_PLAINTEXT:
return gssPlaintext;
case ZMQ.ZMQ_HANDSHAKE_IVL:
return handshakeIvl;
case ZMQ.ZMQ_HEARTBEAT_IVL:
return heartbeatInterval;
case ZMQ.ZMQ_HEARTBEAT_TIMEOUT:
return heartbeatTimeout;
case ZMQ.ZMQ_HEARTBEAT_TTL:
// Convert the internal deciseconds value to milliseconds
return heartbeatTtl * 100;
case ZMQ.ZMQ_HEARTBEAT_CONTEXT:
return heartbeatContext;
case ZMQ.ZMQ_MSG_ALLOCATOR:
return allocator;
case ZMQ.ZMQ_MSG_ALLOCATION_HEAP_THRESHOLD:
if (allocator instanceof MsgAllocatorThreshold) {
MsgAllocatorThreshold all = (MsgAllocatorThreshold) allocator;
return all.threshold;
}
return -1;
case ZMQ.ZMQ_SELECTOR_PROVIDERCHOOSER:
return selectorChooser;
case ZMQ.ZMQ_AS_TYPE:
return asType;
case ZMQ.ZMQ_SELFADDR_PROPERTY_NAME:
return selfAddressPropertyName;
default:
throw new IllegalArgumentException("option=" + option);
}
} | @Test
public void testDefaultValue()
{
assertThat(options.getSocketOpt(ZMQ.ZMQ_GSSAPI_PRINCIPAL), is(options.gssPrincipal));
assertThat(options.getSocketOpt(ZMQ.ZMQ_GSSAPI_SERVICE_PRINCIPAL), is(options.gssServicePrincipal));
assertThat(options.getSocketOpt(ZMQ.ZMQ_HANDSHAKE_IVL), is(options.handshakeIvl));
assertThat(options.getSocketOpt(ZMQ.ZMQ_IDENTITY), is(options.identity));
assertThat(options.getSocketOpt(ZMQ.ZMQ_IMMEDIATE), is(options.immediate));
// assertThat(options.getSocketOpt(ZMQ.ZMQ_TCP_ACCEPT_FILTER), is((Object)options.ipcAcceptFilters));
assertThat(options.getSocketOpt(ZMQ.ZMQ_IPV6), is(options.ipv6));
assertThat(options.getSocketOpt(ZMQ.ZMQ_LAST_ENDPOINT), is(options.lastEndpoint));
assertThat(options.getSocketOpt(ZMQ.ZMQ_LINGER), is(options.linger));
assertThat(options.getSocketOpt(ZMQ.ZMQ_MAXMSGSIZE), is(options.maxMsgSize));
assertThat(options.getSocketOpt(ZMQ.ZMQ_MECHANISM), is(options.mechanism));
assertThat(options.getSocketOpt(ZMQ.ZMQ_MULTICAST_HOPS), is(options.multicastHops));
assertThat(options.getSocketOpt(ZMQ.ZMQ_PLAIN_PASSWORD), is(options.plainPassword));
assertThat(options.getSocketOpt(ZMQ.ZMQ_PLAIN_USERNAME), is(options.plainUsername));
assertThat(options.getSocketOpt(ZMQ.ZMQ_RATE), is(options.rate));
assertThat(options.getSocketOpt(ZMQ.ZMQ_RCVBUF), is(options.rcvbuf));
assertThat(options.getSocketOpt(ZMQ.ZMQ_RECONNECT_IVL), is(options.reconnectIvl));
assertThat(options.getSocketOpt(ZMQ.ZMQ_RECONNECT_IVL_MAX), is(options.reconnectIvlMax));
assertThat(options.getSocketOpt(ZMQ.ZMQ_RECOVERY_IVL), is(options.recoveryIvl));
assertThat(options.getSocketOpt(ZMQ.ZMQ_RCVHWM), is(options.recvHwm));
assertThat(options.getSocketOpt(ZMQ.ZMQ_RCVTIMEO), is(options.recvTimeout));
assertThat(options.getSocketOpt(ZMQ.ZMQ_SNDHWM), is(options.sendHwm));
assertThat(options.getSocketOpt(ZMQ.ZMQ_SNDTIMEO), is(options.sendTimeout));
assertThat(options.getSocketOpt(ZMQ.ZMQ_SNDBUF), is(options.sndbuf));
assertThat(options.getSocketOpt(ZMQ.ZMQ_SOCKS_PROXY), is(options.socksProxyAddress));
// assertThat(options.getSocketOpt(ZMQ.ZMQ_TCP_ACCEPT_FILTER), is((Object)options.tcpAcceptFilters));
assertThat(options.getSocketOpt(ZMQ.ZMQ_TCP_KEEPALIVE), is(options.tcpKeepAlive));
assertThat(options.getSocketOpt(ZMQ.ZMQ_TOS), is(options.tos));
assertThat(options.getSocketOpt(ZMQ.ZMQ_TYPE), is(options.type));
assertThat(options.getSocketOpt(ZMQ.ZMQ_ZAP_DOMAIN), is(options.zapDomain));
assertThat(options.getSocketOpt(ZMQ.ZMQ_HANDSHAKE_IVL), is(options.handshakeIvl));
assertThat(options.getSocketOpt(ZMQ.ZMQ_HEARTBEAT_IVL), is(options.heartbeatInterval));
assertThat(options.getSocketOpt(ZMQ.ZMQ_HEARTBEAT_TIMEOUT), is(options.heartbeatTimeout));
assertThat(options.getSocketOpt(ZMQ.ZMQ_HEARTBEAT_TTL), is(options.heartbeatTtl));
assertThat(options.getSocketOpt(ZMQ.ZMQ_SELECTOR_PROVIDERCHOOSER), nullValue());
assertThat(options.getSocketOpt(ZMQ.ZMQ_IDENTITY), is(new byte[0]));
assertThat(options.getSocketOpt(ZMQ.ZMQ_SELFADDR_PROPERTY_NAME), nullValue());
} |
@Override
public void run() {
// top-level command, do nothing
} | @Test
public void test_submit_server_cli_version_same_minor_patch_mismatch() {
String serverVersion = "5.0.0";
System.setProperty(HAZELCAST_INTERNAL_OVERRIDE_VERSION, serverVersion);
Config cfg = smallInstanceConfig();
cfg.getJetConfig().setResourceUploadEnabled(true);
String clusterName = randomName();
cfg.setClusterName(clusterName);
hz = createHazelcastInstance(cfg);
System.setProperty(HAZELCAST_INTERNAL_OVERRIDE_VERSION, "5.0.1");
ClientConfig clientConfig = new ClientConfig();
clientConfig.setClusterName(clusterName);
client = createHazelcastClient(clientConfig);
run("submit", testJobJarFile.toString());
Job job = hz.getJet().getJobs().get(0);
assertThat(job).eventuallyHasStatus(JobStatus.RUNNING);
} |
public boolean isMatch(Map<String, Pattern> patterns) {
if (!patterns.isEmpty()) {
return matchPatterns(patterns);
}
// Empty pattern is still considered as a match.
return true;
} | @Test
public void testIsMatchMultiplePatternValid() throws UnknownHostException {
Uuid uuid = Uuid.randomUuid();
ClientMetricsInstanceMetadata instanceMetadata = new ClientMetricsInstanceMetadata(uuid,
ClientMetricsTestUtils.requestContext());
Map<String, Pattern> patternMap = new HashMap<>();
patternMap.put(ClientMetricsConfigs.CLIENT_ID, Pattern.compile("producer-1"));
patternMap.put(ClientMetricsConfigs.CLIENT_INSTANCE_ID, Pattern.compile(uuid.toString()));
patternMap.put(ClientMetricsConfigs.CLIENT_SOFTWARE_NAME, Pattern.compile("apache-kafka-.*"));
patternMap.put(ClientMetricsConfigs.CLIENT_SOFTWARE_VERSION, Pattern.compile("3.5.2"));
patternMap.put(ClientMetricsConfigs.CLIENT_SOURCE_ADDRESS, Pattern.compile(InetAddress.getLocalHost().getHostAddress()));
patternMap.put(ClientMetricsConfigs.CLIENT_SOURCE_PORT, Pattern.compile(String.valueOf(ClientMetricsTestUtils.CLIENT_PORT)));
assertTrue(instanceMetadata.isMatch(patternMap));
} |
public static Mode parse(String value) {
if (StringUtils.isBlank(value)) {
throw new IllegalArgumentException(ExceptionMessage.INVALID_MODE.getMessage(value));
}
try {
return parseNumeric(value);
} catch (NumberFormatException e) {
// Treat as symbolic
return parseSymbolic(value);
}
} | @Test
public void symbolicsBadTargets() {
mThrown.expect(IllegalArgumentException.class);
mThrown.expectMessage(ExceptionMessage.INVALID_MODE_SEGMENT.getMessage("f=r", "f=r", "f"));
ModeParser.parse("f=r");
} |
@Override
public void destroyChannel(String serverAddress, Channel channel) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info("will destroy channel:{},address:{}", channel, serverAddress);
}
channel.disconnect();
channel.close();
} | @Test
public void testDestroyChannel() {
Channel channel = Mockito.mock(Channel.class);
nettyRemotingServer.destroyChannel("127.0.0.1:8091", channel);
Mockito.verify(channel).close();
} |
@Override
public long countKeys() {
return get(countKeysAsync());
} | @Test
public void testCountKeys() {
RJsonBucket<TestType> al = redisson.getJsonBucket("test", new JacksonCodec<>(TestType.class));
TestType t = new TestType();
t.setName("name1");
al.set(t);
assertThat(al.countKeys()).isEqualTo(1);
NestedType nt = new NestedType();
nt.setValues(Arrays.asList("t1", "t2", "t4", "t5", "t6"));
al.set("type", nt);
assertThat(al.countKeys()).isEqualTo(2);
List<Long> l = al.countKeysMulti("$.type");
assertThat(l.get(0)).isEqualTo(1L);
} |
public SerializableFunction<Row, T> getFromRowFunction() {
return fromRowFunction;
} | @Test
public void testPrimitiveRowToProto() {
ProtoDynamicMessageSchema schemaProvider = schemaFromDescriptor(Primitive.getDescriptor());
SerializableFunction<Row, DynamicMessage> fromRow = schemaProvider.getFromRowFunction();
assertEquals(PRIMITIVE_PROTO.toString(), fromRow.apply(PRIMITIVE_ROW).toString());
} |
@Override
public ConfigRepoPluginInfo pluginInfoFor(GoPluginDescriptor descriptor) {
PluggableInstanceSettings pluggableInstanceSettings = getPluginSettingsAndView(descriptor, extension);
return new ConfigRepoPluginInfo(descriptor, image(descriptor.id()), pluggableInstanceSettings, capabilities(descriptor.id()));
} | @Test
public void shouldContinueWithBuildingPluginInfoIfPluginSettingsIsNotProvidedByPlugin() throws Exception {
GoPluginDescriptor descriptor = GoPluginDescriptor.builder().id("plugin1").build();
doThrow(new RuntimeException("foo")).when(extension).getPluginSettingsConfiguration("plugin1");
ConfigRepoPluginInfo pluginInfo = new ConfigRepoPluginInfoBuilder(extension).pluginInfoFor(descriptor);
assertThat(pluginInfo.getDescriptor(), is(descriptor));
assertThat(pluginInfo.getExtensionName(), is("configrepo"));
assertNull(pluginInfo.getPluginSettings());
} |
@Override
public void getManagedLedgerInfo(String ledgerName, boolean createIfMissing, Map<String, String> properties,
MetaStoreCallback<ManagedLedgerInfo> callback) {
// Try to get the content or create an empty node
String path = PREFIX + ledgerName;
store.get(path)
.thenAcceptAsync(optResult -> {
if (optResult.isPresent()) {
ManagedLedgerInfo info;
try {
info = parseManagedLedgerInfo(optResult.get().getValue());
info = updateMLInfoTimestamp(info);
callback.operationComplete(info, optResult.get().getStat());
} catch (InvalidProtocolBufferException e) {
callback.operationFailed(getException(e));
}
} else {
// Z-node doesn't exist
if (createIfMissing) {
log.info("Creating '{}'", path);
store.put(path, new byte[0], Optional.of(-1L))
.thenAccept(stat -> {
ManagedLedgerInfo.Builder ledgerBuilder = ManagedLedgerInfo.newBuilder();
if (properties != null) {
properties.forEach((k, v) -> {
ledgerBuilder.addProperties(
MLDataFormats.KeyValue.newBuilder()
.setKey(k)
.setValue(v)
.build());
});
}
callback.operationComplete(ledgerBuilder.build(), stat);
}).exceptionally(ex -> {
callback.operationFailed(getException(ex));
return null;
});
} else {
// Tried to open a managed ledger but it doesn't exist and we shouldn't creating it at this
// point
callback.operationFailed(new MetadataNotFoundException("Managed ledger not found"));
}
}
}, executor.chooseThread(ledgerName))
.exceptionally(ex -> {
try {
executor.executeOrdered(ledgerName,
() -> callback.operationFailed(getException(ex)));
} catch (RejectedExecutionException e) {
//executor maybe shutdown, use common pool to run callback.
CompletableFuture.runAsync(() -> callback.operationFailed(getException(ex)));
}
return null;
});
} | @Test(timeOut = 20000)
void readMalformedML() throws Exception {
MetaStore store = new MetaStoreImpl(metadataStore, executor);
metadataStore.put("/managed-ledgers/my_test", "non-valid".getBytes(), Optional.empty()).join();
final CountDownLatch latch = new CountDownLatch(1);
store.getManagedLedgerInfo("my_test", false, new MetaStoreCallback<MLDataFormats.ManagedLedgerInfo>() {
public void operationFailed(MetaStoreException e) {
// Ok
latch.countDown();
}
public void operationComplete(ManagedLedgerInfo result, Stat version) {
fail("Operation should have failed");
}
});
latch.await();
} |
public void forEach(BiConsumer<? super K, ? super V> action) {
Objects.requireNonNull(action);
K[] keyTable = this.keyTable;
V[] valueTable = this.valueTable;
int i = keyTable.length;
while (i-- > 0) {
K key = keyTable[i];
if (key == null) {
continue;
}
V value = valueTable[i];
action.accept(key, value);
}
} | @Test
public void testForEach() {
FuryObjectMap<String, String> map = new ObjectMap<>(4, 0.2f);
Map<String, String> hashMap = new HashMap<>();
for (int i = 0; i < 100; i++) {
map.put("k" + i, "v" + i);
hashMap.put("k" + i, "v" + i);
}
Map<String, String> hashMap2 = new HashMap<>();
map.forEach(hashMap2::put);
Assert.assertEquals(hashMap2, hashMap);
} |
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
return partition(topic, key, keyBytes, value, valueBytes, cluster, cluster.partitionsForTopic(topic).size());
} | @Test
public void testKeyPartitionIsStable() {
@SuppressWarnings("deprecation")
final Partitioner partitioner = new DefaultPartitioner();
final Cluster cluster = new Cluster("clusterId", asList(NODES), PARTITIONS,
Collections.emptySet(), Collections.emptySet());
int partition = partitioner.partition(TOPIC, null, KEY_BYTES, null, null, cluster);
assertEquals(partition, partitioner.partition(TOPIC, null, KEY_BYTES, null, null, cluster), "Same key should yield same partition");
} |
public Column getColumn(String value) {
Matcher m = PATTERN.matcher(value);
if (!m.matches()) {
throw new IllegalArgumentException("value " + value + " is not a valid column definition");
}
String name = m.group(1);
String type = m.group(6);
type = type == null ? "String" : type;
boolean array = (m.group(4) != null) || (m.group(7) != null);
if (array) {
return new ArrayColumn(name,
createColumn(name,
type));
}
return createColumn(name,
type);
} | @Test
public void testGetDollarArrayTypedColumn() {
ColumnFactory f = new ColumnFactory();
Column column = f.getColumn("$column: Long[]");
assertThat(column instanceof ArrayColumn).isTrue();
assertThat(column.getName()).isEqualTo("$column");
assertThat(column.getCellType()).isEqualTo("LongCell");
} |
public Tracking<DefaultIssue, DefaultIssue> track(Component component, Input<DefaultIssue> rawInput) {
Input<DefaultIssue> openBaseIssuesInput = baseInputFactory.create(component);
NonClosedTracking<DefaultIssue, DefaultIssue> openIssueTracking = tracker.trackNonClosed(rawInput, openBaseIssuesInput);
if (openIssueTracking.isComplete() || analysisMetadataHolder.isFirstAnalysis()) {
return openIssueTracking;
}
Input<DefaultIssue> closedIssuesBaseInput = closedIssuesInputFactory.create(component);
Tracking<DefaultIssue, DefaultIssue> closedIssuesTracking = tracker.trackClosed(openIssueTracking, closedIssuesBaseInput);
// changes of closed issues need to be loaded in order to:
// - compute right transition from workflow
// - recover fields values from before they were closed
Set<DefaultIssue> matchesClosedIssues = closedIssuesTracking.getMatchedRaws().values().stream()
.filter(t -> Issue.STATUS_CLOSED.equals(t.getStatus()))
.collect(Collectors.toSet());
componentIssuesLoader.loadLatestDiffChangesForReopeningOfClosedIssues(matchesClosedIssues);
return closedIssuesTracking;
} | @Test
public void track_loadChanges_on_matched_closed_issues() {
ReportComponent component = ReportComponent.builder(Component.Type.FILE, 1).build();
when(baseInputFactory.create(component)).thenReturn(openIssuesInput);
when(closedIssuesInputFactory.create(component)).thenReturn(closedIssuesInput);
when(nonClosedTracking.isComplete()).thenReturn(false);
when(analysisMetadataHolder.isFirstAnalysis()).thenReturn(false);
when(tracker.trackNonClosed(rawInput, openIssuesInput)).thenReturn(nonClosedTracking);
when(tracker.trackClosed(nonClosedTracking, closedIssuesInput)).thenReturn(closedTracking);
Set<DefaultIssue> mappedClosedIssues = IntStream.range(1, 2 + new Random().nextInt(2))
.mapToObj(i -> new DefaultIssue().setKey("closed" + i).setStatus(Issue.STATUS_CLOSED))
.collect(toSet());
ArrayList<DefaultIssue> mappedBaseIssues = new ArrayList<>(mappedClosedIssues);
Issue.STATUSES.stream().filter(t -> !Issue.STATUS_CLOSED.equals(t)).forEach(s -> mappedBaseIssues.add(new DefaultIssue().setKey(s).setStatus(s)));
Collections.shuffle(mappedBaseIssues);
when(closedTracking.getMatchedRaws()).thenReturn(mappedBaseIssues.stream().collect(Collectors.toMap(i -> new DefaultIssue().setKey("raw_for_" + i.key()), Function.identity())));
Tracking<DefaultIssue, DefaultIssue> tracking = underTest.track(component, rawInput);
assertThat(tracking).isSameAs(closedTracking);
verify(tracker).trackNonClosed(rawInput, openIssuesInput);
verify(tracker).trackClosed(nonClosedTracking, closedIssuesInput);
verify(componentIssuesLoader).loadLatestDiffChangesForReopeningOfClosedIssues(mappedClosedIssues);
verifyNoMoreInteractions(tracker);
} |
@Asn1Property(order=0)
public Body getBody() {
return body;
} | @Test
public void shouldVerifyNikCvca() throws IOException {
byte[] data = Resources.toByteArray(Resources.getResource("nik/tv/cvca.cvcert"));
final CvCertificate cert = mapper.read(data, CvCertificate.class);
assertDoesNotThrow(() -> new SignatureService().verify(cert, cert.getBody().getPublicKey(), cert.getBody().getPublicKey().getParams()));
} |
public static PDImageXObject createFromImage(PDDocument document, BufferedImage image)
throws IOException
{
if (isGrayImage(image))
{
return createFromGrayImage(image, document);
}
// We try to encode the image with predictor
if (USE_PREDICTOR_ENCODER)
{
PDImageXObject pdImageXObject = new PredictorEncoder(document, image).encode();
if (pdImageXObject != null)
{
if (pdImageXObject.getColorSpace() == PDDeviceRGB.INSTANCE &&
pdImageXObject.getBitsPerComponent() < 16 &&
image.getWidth() * image.getHeight() <= 50 * 50)
{
// also create classic compressed image, compare sizes
PDImageXObject pdImageXObjectClassic = createFromRGBImage(image, document);
if (pdImageXObjectClassic.getCOSObject().getLength() <
pdImageXObject.getCOSObject().getLength())
{
pdImageXObject.getCOSObject().close();
return pdImageXObjectClassic;
}
else
{
pdImageXObjectClassic.getCOSObject().close();
}
}
return pdImageXObject;
}
}
// Fallback: We export the image as 8-bit sRGB and might lose color information
return createFromRGBImage(image, document);
} | @Test
void testCreateLosslessFromImage4BYTE_ABGR() throws IOException
{
PDDocument document = new PDDocument();
BufferedImage image = ImageIO.read(this.getClass().getResourceAsStream("png.png"));
// create an ARGB image
int w = image.getWidth();
int h = image.getHeight();
BufferedImage argbImage = new BufferedImage(w, h, BufferedImage.TYPE_4BYTE_ABGR);
Graphics ag = argbImage.getGraphics();
ag.drawImage(image, 0, 0, null);
ag.dispose();
for (int x = 0; x < argbImage.getWidth(); ++x)
{
for (int y = 0; y < argbImage.getHeight(); ++y)
{
argbImage.setRGB(x, y, (argbImage.getRGB(x, y) & 0xFFFFFF) | ((y / 10 * 10) << 24));
}
}
// extra for PDFBOX-3181: check for exception due to different sizes of
// alphaRaster.getSampleModel().getWidth()
// and
// alphaRaster.getWidth()
// happens with image returned by BufferedImage.getSubimage()
argbImage = argbImage.getSubimage(1, 1, argbImage.getWidth() - 2, argbImage.getHeight() - 2);
w -= 2;
h -= 2;
PDImageXObject ximage = LosslessFactory.createFromImage(document, argbImage);
validate(ximage, 8, w, h, "png", PDDeviceRGB.INSTANCE.getName());
checkIdent(argbImage, ximage.getImage());
checkIdentRGB(argbImage, ximage.getOpaqueImage(null, 1));
assertNotNull(ximage.getSoftMask());
validate(ximage.getSoftMask(), 8, w, h, "png", PDDeviceGray.INSTANCE.getName());
assertTrue(colorCount(ximage.getSoftMask().getImage()) > image.getHeight() / 10);
doWritePDF(document, ximage, TESTRESULTSDIR, "4babgr.pdf");
} |
protected List<ResourceRequest> packageRequests(
List<ContainerSimulator> csList, int priority) {
// create requests
Map<Long, Map<String, ResourceRequest>> rackLocalRequests =
new HashMap<>();
Map<Long, Map<String, ResourceRequest>> nodeLocalRequests =
new HashMap<>();
Map<Long, ResourceRequest> anyRequests = new HashMap<>();
for (ContainerSimulator cs : csList) {
long allocationId = cs.getAllocationId();
ResourceRequest anyRequest = anyRequests.get(allocationId);
if (cs.getHostname() != null) {
Map<String, ResourceRequest> rackLocalRequestMap;
if (rackLocalRequests.containsKey(allocationId)) {
rackLocalRequestMap = rackLocalRequests.get(allocationId);
} else {
rackLocalRequestMap = new HashMap<>();
rackLocalRequests.put(allocationId, rackLocalRequestMap);
}
String[] rackHostNames = SLSUtils.getRackHostName(cs.getHostname());
// check rack local
String rackname = "/" + rackHostNames[0];
if (rackLocalRequestMap.containsKey(rackname)) {
rackLocalRequestMap.get(rackname).setNumContainers(
rackLocalRequestMap.get(rackname).getNumContainers() + 1);
} else {
ResourceRequest request = createResourceRequest(cs.getResource(),
cs.getExecutionType(), rackname, priority,
cs.getAllocationId(), 1);
rackLocalRequestMap.put(rackname, request);
}
// check node local
Map<String, ResourceRequest> nodeLocalRequestMap;
if (nodeLocalRequests.containsKey(allocationId)) {
nodeLocalRequestMap = nodeLocalRequests.get(allocationId);
} else {
nodeLocalRequestMap = new HashMap<>();
nodeLocalRequests.put(allocationId, nodeLocalRequestMap);
}
String hostname = rackHostNames[1];
if (nodeLocalRequestMap.containsKey(hostname)) {
nodeLocalRequestMap.get(hostname).setNumContainers(
nodeLocalRequestMap.get(hostname).getNumContainers() + 1);
} else {
ResourceRequest request = createResourceRequest(cs.getResource(),
cs.getExecutionType(), hostname, priority,
cs.getAllocationId(), 1);
nodeLocalRequestMap.put(hostname, request);
}
}
// any
if (anyRequest == null) {
anyRequest = createResourceRequest(cs.getResource(),
cs.getExecutionType(), ResourceRequest.ANY, priority,
cs.getAllocationId(), 1);
anyRequests.put(allocationId, anyRequest);
} else {
anyRequest.setNumContainers(anyRequest.getNumContainers() + 1);
}
}
List<ResourceRequest> ask = new ArrayList<ResourceRequest>();
for (Map<String, ResourceRequest> nodeLocalRequestMap :
nodeLocalRequests.values()) {
ask.addAll(nodeLocalRequestMap.values());
}
for (Map<String, ResourceRequest> rackLocalRequestMap :
rackLocalRequests.values()) {
ask.addAll(rackLocalRequestMap.values());
}
ask.addAll(anyRequests.values());
return ask;
} | @Test
public void testPackageRequests() throws YarnException {
MockAMSimulator app = new MockAMSimulator();
List<ContainerSimulator> containerSimulators = new ArrayList<>();
Resource resource = Resources.createResource(1024);
int priority = 1;
ExecutionType execType = ExecutionType.GUARANTEED;
String type = "map";
TaskContainerDefinition.Builder builder =
TaskContainerDefinition.Builder.create()
.withResource(resource)
.withDuration(100)
.withPriority(1)
.withType(type)
.withExecutionType(execType)
.withAllocationId(-1)
.withRequestDelay(0);
ContainerSimulator s1 = ContainerSimulator
.createFromTaskContainerDefinition(
builder.withHostname("/default-rack/h1").build());
ContainerSimulator s2 = ContainerSimulator
.createFromTaskContainerDefinition(
builder.withHostname("/default-rack/h1").build());
ContainerSimulator s3 = ContainerSimulator
.createFromTaskContainerDefinition(
builder.withHostname("/default-rack/h2").build());
containerSimulators.add(s1);
containerSimulators.add(s2);
containerSimulators.add(s3);
List<ResourceRequest> res = app.packageRequests(containerSimulators,
priority);
// total 4 resource requests: any -> 1, rack -> 1, node -> 2
// All resource requests for any would be packaged into 1.
// All resource requests for racks would be packaged into 1 as all of them
// are for same rack.
// All resource requests for nodes would be packaged into 2 as there are
// two different nodes.
Assert.assertEquals(4, res.size());
int anyRequestCount = 0;
int rackRequestCount = 0;
int nodeRequestCount = 0;
for (ResourceRequest request : res) {
String resourceName = request.getResourceName();
if (resourceName.equals("*")) {
anyRequestCount++;
} else if (resourceName.equals("/default-rack")) {
rackRequestCount++;
} else {
nodeRequestCount++;
}
}
Assert.assertEquals(1, anyRequestCount);
Assert.assertEquals(1, rackRequestCount);
Assert.assertEquals(2, nodeRequestCount);
containerSimulators.clear();
s1 = ContainerSimulator.createFromTaskContainerDefinition(
createDefaultTaskContainerDefMock(resource, priority, execType, type,
"/default-rack/h1", 1));
s2 = ContainerSimulator.createFromTaskContainerDefinition(
createDefaultTaskContainerDefMock(resource, priority, execType, type,
"/default-rack/h1", 2));
s3 = ContainerSimulator.createFromTaskContainerDefinition(
createDefaultTaskContainerDefMock(resource, priority, execType, type,
"/default-rack/h2", 1));
containerSimulators.add(s1);
containerSimulators.add(s2);
containerSimulators.add(s3);
res = app.packageRequests(containerSimulators, priority);
// total 7 resource requests: any -> 2, rack -> 2, node -> 3
// All resource requests for any would be packaged into 2 as there are
// two different allocation id.
// All resource requests for racks would be packaged into 2 as all of them
// are for same rack but for two different allocation id.
// All resource requests for nodes would be packaged into 3 as either node
// or allocation id is different for each request.
Assert.assertEquals(7, res.size());
anyRequestCount = 0;
rackRequestCount = 0;
nodeRequestCount = 0;
for (ResourceRequest request : res) {
String resourceName = request.getResourceName();
long allocationId = request.getAllocationRequestId();
// allocation id should be either 1 or 2
Assert.assertTrue(allocationId == 1 || allocationId == 2);
if (resourceName.equals("*")) {
anyRequestCount++;
} else if (resourceName.equals("/default-rack")) {
rackRequestCount++;
} else {
nodeRequestCount++;
}
}
Assert.assertEquals(2, anyRequestCount);
Assert.assertEquals(2, rackRequestCount);
Assert.assertEquals(3, nodeRequestCount);
} |
public int size() {
return blocks.size();
} | @Test
public void shouldReturnSize() {
BlocksGroup group = newBlocksGroup(newBlock("a", 1), newBlock("b", 2));
assertThat(group.size(), is(2));
} |
@Override
@Deprecated
public <KR, VR> KStream<KR, VR> transform(final org.apache.kafka.streams.kstream.TransformerSupplier<? super K, ? super V, KeyValue<KR, VR>> transformerSupplier,
final String... stateStoreNames) {
Objects.requireNonNull(transformerSupplier, "transformerSupplier can't be null");
final String name = builder.newProcessorName(TRANSFORM_NAME);
return flatTransform(new TransformerSupplierAdapter<>(transformerSupplier), Named.as(name), stateStoreNames);
} | @Test
@SuppressWarnings("deprecation")
public void shouldNotAllowNullStoreNameOnTransformWithNamed() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.transform(transformerSupplier, Named.as("transform"), (String) null));
assertThat(exception.getMessage(), equalTo("stateStoreNames can't contain `null` as store name"));
} |
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Env env = (Env) o;
if (getName().equals(env.getName())) {
throw new RuntimeException(getName() + " is same environment name, but their Env not same");
} else {
return false;
}
} | @Test
public void testEquals() {
assertEquals(Env.DEV, Env.valueOf("dEv"));
String name = "someEEEE";
Env.addEnvironment(name);
assertNotEquals(Env.valueOf(name), Env.DEV);
} |
public Schema addToSchema(Schema schema) {
validate(schema);
schema.addProp(LOGICAL_TYPE_PROP, name);
schema.setLogicalType(this);
return schema;
} | @Test
void fixedDecimalToFromJson() {
Schema schema = Schema.createFixed("aDecimal", null, null, 4);
LogicalTypes.decimal(9, 2).addToSchema(schema);
Schema parsed = new Schema.Parser().parse(schema.toString(true));
assertEquals(schema, parsed, "Constructed and parsed schemas should match");
} |
@Override
public void validTenant(Long id) {
TenantDO tenant = getTenant(id);
if (tenant == null) {
throw exception(TENANT_NOT_EXISTS);
}
if (tenant.getStatus().equals(CommonStatusEnum.DISABLE.getStatus())) {
throw exception(TENANT_DISABLE, tenant.getName());
}
if (DateUtils.isExpired(tenant.getExpireTime())) {
throw exception(TENANT_EXPIRE, tenant.getName());
}
} | @Test
public void testValidTenant_disable() {
// mock 数据
TenantDO tenant = randomPojo(TenantDO.class, o -> o.setId(1L).setStatus(CommonStatusEnum.DISABLE.getStatus()));
tenantMapper.insert(tenant);
// 调用,并断言业务异常
assertServiceException(() -> tenantService.validTenant(1L), TENANT_DISABLE, tenant.getName());
} |
public static int compareObVersion(String version1, String version2) {
if (version1 == null || version2 == null) {
throw new RuntimeException("can not compare null version");
}
ObVersion v1 = new ObVersion(version1);
ObVersion v2 = new ObVersion(version2);
return v1.compareTo(v2);
} | @Test
public void compareObVersionTest() {
assert ObReaderUtils.compareObVersion("2.2.70", "3.2.2") == -1;
assert ObReaderUtils.compareObVersion("2.2.70", "2.2.50") == 1;
assert ObReaderUtils.compareObVersion("2.2.70", "3.1.2") == -1;
assert ObReaderUtils.compareObVersion("3.1.2", "3.1.2") == 0;
assert ObReaderUtils.compareObVersion("3.2.3.0", "3.2.3.0") == 0;
assert ObReaderUtils.compareObVersion("3.2.3.0-CE", "3.2.3.0") == 0;
} |
@Override
public Schema getSourceSchema() {
return sourceSchema;
} | @Test
public void renameBadlyFormattedSchemaWithAltCharMaskConfiguredTest() throws IOException {
TypedProperties props = Helpers.setupSchemaOnDFS("streamer-config", "file_schema_provider_invalid.avsc");
props.put(SANITIZE_SCHEMA_FIELD_NAMES.key(), "true");
props.put(SCHEMA_FIELD_NAME_INVALID_CHAR_MASK.key(), "_");
this.schemaProvider = new FilebasedSchemaProvider(props, jsc);
assertEquals(this.schemaProvider.getSourceSchema(), generateRenamedSchemaWithConfiguredReplacement());
} |
public static KiePMMLMiningField getKiePMMLMiningField(final MiningField toConvert, final Field<?> field) {
String name = toConvert.getName() != null ?toConvert.getName() : "" + toConvert.hashCode();
final FIELD_USAGE_TYPE fieldUsageType = toConvert.getUsageType() != null ?
FIELD_USAGE_TYPE.byName(toConvert.getUsageType().value()) : null;
final OP_TYPE opType = toConvert.getOpType() != null ? OP_TYPE.byName(toConvert.getOpType().value()) : null;
final DATA_TYPE dataType = field.getDataType() != null ?
DATA_TYPE.byName(field.getDataType().value()) : null;
final MISSING_VALUE_TREATMENT_METHOD missingValueTreatmentMethod =
toConvert.getMissingValueTreatment() != null ?
MISSING_VALUE_TREATMENT_METHOD.byName(toConvert.getMissingValueTreatment().value()) : null;
final INVALID_VALUE_TREATMENT_METHOD invalidValueTreatmentMethod =
toConvert.getInvalidValueTreatment() != null ?
INVALID_VALUE_TREATMENT_METHOD.byName(toConvert.getInvalidValueTreatment().value()) : null;
final String missingValueReplacement = toConvert.getMissingValueReplacement() != null ?
toConvert.getMissingValueReplacement().toString() : null;
final String invalidValueReplacement = toConvert.getInvalidValueReplacement() != null ?
toConvert.getInvalidValueReplacement().toString() : null;
final List<String> allowedValues = field instanceof DataField ?
convertDataFieldValues(((DataField) field).getValues()) : Collections.emptyList();
final List<KiePMMLInterval> intervals = field instanceof DataField ?
getKiePMMLIntervals(((DataField) field).getIntervals()) :
Collections.emptyList();
final KiePMMLMiningField.Builder builder = KiePMMLMiningField.builder(name, Collections.emptyList())
.withFieldUsageType(fieldUsageType)
.withOpType(opType)
.withDataType(dataType)
.withMissingValueTreatmentMethod(missingValueTreatmentMethod)
.withInvalidValueTreatmentMethod(invalidValueTreatmentMethod)
.withMissingValueReplacement(missingValueReplacement)
.withInvalidValueReplacement(invalidValueReplacement)
.withAllowedValues(allowedValues)
.withIntervals(intervals);
return builder.build();
} | @Test
void getKiePMMLMiningField() {
DataField dataField = getRandomDataField();
MiningField toConvert = getRandomMiningField(dataField);
KiePMMLMiningField toVerify = KiePMMLMiningFieldInstanceFactory.getKiePMMLMiningField(toConvert, dataField);
commonVerifyKiePMMLMiningField(toVerify, toConvert, dataField);
} |
@ApiOperation(value = "Delete a comment on a historic process instance", tags = { "History Process" }, code = 204)
@ApiResponses(value = {
@ApiResponse(code = 204, message = "Indicates the historic process instance and comment were found and the comment is deleted. Response body is left empty intentionally."),
@ApiResponse(code = 404, message = "Indicates the requested historic process instance was not found or the historic process instance does not have a comment with the given ID.") })
@DeleteMapping(value = "/history/historic-process-instances/{processInstanceId}/comments/{commentId}")
@ResponseStatus(HttpStatus.NO_CONTENT)
public void deleteComment(@ApiParam(name = "processInstanceId") @PathVariable("processInstanceId") String processInstanceId, @ApiParam(name = "commentId") @PathVariable("commentId") String commentId) {
HistoricProcessInstance instance = getHistoricProcessInstanceFromRequest(processInstanceId);
Comment comment = taskService.getComment(commentId);
if (comment == null || comment.getProcessInstanceId() == null || !comment.getProcessInstanceId().equals(instance.getId())) {
throw new FlowableObjectNotFoundException("Process instance '" + instance.getId() + "' does not have a comment with id '" + commentId + "'.", Comment.class);
}
taskService.deleteComment(commentId);
} | @Test
@Deployment(resources = { "org/flowable/rest/service/api/repository/oneTaskProcess.bpmn20.xml" })
public void testDeleteComment() throws Exception {
ProcessInstance pi = null;
try {
pi = runtimeService.startProcessInstanceByKey("oneTaskProcess");
// Add a comment as "kermit"
identityService.setAuthenticatedUserId("kermit");
Comment comment = taskService.addComment(null, pi.getId(), "This is a comment...");
identityService.setAuthenticatedUserId(null);
closeResponse(executeRequest(new HttpDelete(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_HISTORIC_PROCESS_INSTANCE_COMMENT, pi.getId(), comment.getId())),
HttpStatus.SC_NO_CONTENT));
// Test with unexisting instance
closeResponse(executeRequest(new HttpGet(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_HISTORIC_PROCESS_INSTANCE_COMMENT, "unexistinginstance", "123")),
HttpStatus.SC_NOT_FOUND));
// Test with unexisting comment
closeResponse(executeRequest(new HttpGet(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_HISTORIC_PROCESS_INSTANCE_COMMENT, pi.getId(), "unexistingcomment")),
HttpStatus.SC_NOT_FOUND));
} finally {
if (pi != null) {
List<Comment> comments = taskService.getProcessInstanceComments(pi.getId());
for (Comment c : comments) {
taskService.deleteComment(c.getId());
}
}
}
} |
@Override
public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay readerWay, IntsRef relationFlags) {
String surfaceTag = readerWay.getTag("surface");
Surface surface = Surface.find(surfaceTag);
if (surface == MISSING)
return;
surfaceEnc.setEnum(false, edgeId, edgeIntAccess, surface);
} | @Test
public void testSynonyms() {
IntsRef relFlags = new IntsRef(2);
ReaderWay readerWay = new ReaderWay(1);
EdgeIntAccess edgeIntAccess = new ArrayEdgeIntAccess(1);
int edgeId = 0;
readerWay.setTag("highway", "primary");
readerWay.setTag("surface", "metal");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(Surface.PAVED, surfaceEnc.getEnum(false, edgeId, edgeIntAccess));
readerWay.setTag("surface", "sett");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(Surface.COBBLESTONE, surfaceEnc.getEnum(false, edgeId, edgeIntAccess));
readerWay.setTag("surface", "unhewn_cobblestone");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(Surface.COBBLESTONE, surfaceEnc.getEnum(false, edgeId, edgeIntAccess));
readerWay.setTag("surface", "earth");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(Surface.DIRT, surfaceEnc.getEnum(false, edgeId, edgeIntAccess));
readerWay.setTag("surface", "pebblestone");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(Surface.GRAVEL, surfaceEnc.getEnum(false, edgeId, edgeIntAccess));
readerWay.setTag("surface", "grass_paver");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(Surface.GRASS, surfaceEnc.getEnum(false, edgeId, edgeIntAccess));
} |
public static <T> Fields<T> create() {
return fieldAccess(FieldAccessDescriptor.create());
} | @Test
@Category(NeedsRunner.class)
public void testSimpleSelectRename() {
PCollection<Schema1SelectedRenamed> rows =
pipeline
.apply(Create.of(Schema1.create()))
.apply(
Select.<Schema1>create()
.withFieldNameAs("field1", "fieldOne")
.withFieldNameAs("field3", "fieldThree"))
.apply(Convert.to(Schema1SelectedRenamed.class));
PAssert.that(rows).containsInAnyOrder(Schema1SelectedRenamed.create());
pipeline.run();
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.