focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@NonNull public static String getSysInfo(@NonNull Context context) {
StringBuilder sb = new StringBuilder();
sb.append("BRAND:").append(Build.BRAND).append(NEW_LINE);
sb.append("DEVICE:").append(Build.DEVICE).append(NEW_LINE);
sb.append("Build ID:").append(Build.DISPLAY).append(NEW_LINE);
sb.append("changelist number:").append(Build.ID).append(NEW_LINE);
sb.append("MODEL:").append(Build.MODEL).append(NEW_LINE);
sb.append("PRODUCT:").append(Build.PRODUCT).append(NEW_LINE);
sb.append("TAGS:").append(Build.TAGS).append(NEW_LINE);
sb.append("VERSION.INCREMENTAL:").append(Build.VERSION.INCREMENTAL).append(NEW_LINE);
sb.append("VERSION.RELEASE:").append(Build.VERSION.RELEASE).append(NEW_LINE);
sb.append("VERSION.SDK_INT:").append(Build.VERSION.SDK_INT).append(NEW_LINE);
Configuration configuration = context.getResources().getConfiguration();
sb.append("Locale:").append(configuration.locale).append(NEW_LINE);
sb.append("configuration:").append(configuration).append(NEW_LINE);
sb.append("That's all I know.");
return sb.toString();
} | @Test
public void testGetSysInfo() {
String info = ChewbaccaUtils.getSysInfo(ApplicationProvider.getApplicationContext());
Assert.assertTrue(info.contains("BRAND:" + Build.BRAND));
Assert.assertTrue(info.contains("VERSION.SDK_INT:" + Build.VERSION.SDK_INT));
Assert.assertTrue(
info.contains(
"Locale:"
+ ApplicationProvider.getApplicationContext()
.getResources()
.getConfiguration()
.locale));
} |
@Override
public AppResponse process(Flow flow, SessionDataRequest request) throws SharedServiceClientException {
return validateAmountOfApps(flow, appSession.getAccountId(), request)
.orElseGet(() -> validateSms(flow, appSession.getAccountId(), request.getSmscode())
.orElseGet(() -> confirmSession(flow, request)));
} | @Test
void processAccountRequestSessionDataTest() throws SharedServiceClientException {
AppAuthenticator appAuthenticatorMock = new AppAuthenticator();
appAuthenticatorMock.setAccountId(ACCOUNT_ID);
appAuthenticatorMock.setDeviceName(mockedSessionDataRequest.getDeviceName());
appAuthenticatorMock.setInstanceId(mockedSessionDataRequest.getInstanceId());
appAuthenticatorMock.setIssuerType(ACTIVATION_METHOD_LETTER);
appAuthenticatorMock.setUserAppId(APP_AUTHENTICATOR_USER_APP_ID);
when(appAuthenticatorService.countByAccountIdAndInstanceIdNot(ACCOUNT_ID, SESSION_DATA_REQUEST_INSTANCE_ID)).thenReturn(0);
when(appAuthenticatorService.createAuthenticator(ACCOUNT_ID, mockedSessionDataRequest.getDeviceName(), mockedSessionDataRequest.getInstanceId(), ACTIVATION_METHOD_LETTER)).thenReturn(appAuthenticatorMock);
AppResponse appResponse = sessionConfirmed.process(mock(RequestAccountAndAppFlow.class), mockedSessionDataRequest);
AppAuthenticator createdAppAuthenticator = sessionConfirmed.getAppAuthenticator();
assertEquals(ACCOUNT_ID, createdAppAuthenticator.getAccountId());
assertEquals(DEVICE_NAME, createdAppAuthenticator.getDeviceName());
assertEquals(SESSION_DATA_REQUEST_INSTANCE_ID, createdAppAuthenticator.getInstanceId());
assertEquals(ACTIVATION_METHOD_LETTER, createdAppAuthenticator.getIssuerType());
assertEquals(createdAppAuthenticator.getUserAppId(), sessionConfirmed.getAppSession().getUserAppId());
assertEquals(createdAppAuthenticator.getInstanceId(), sessionConfirmed.getAppSession().getInstanceId());
assertEquals(VALID_RESPONSE_CODE, ((SessionDataResponse)appResponse).getStatus());
assertEquals(APP_AUTHENTICATOR_USER_APP_ID, ((SessionDataResponse)appResponse).getUserAppId());
} |
public T fromBytes(byte[] bytes) throws IOException {
return fromJson(new String(bytes, 0, bytes.length, UTF_8));
} | @Test
public void testBadBytesRoundTrip() throws Throwable {
LambdaTestUtils.intercept(JsonParseException.class,
"token",
() -> serDeser.fromBytes(new byte[]{'a'}));
} |
@Override
public void getConfig(ZookeeperServerConfig.Builder builder) {
ConfigServer[] configServers = getConfigServers();
int[] zookeeperIds = getConfigServerZookeeperIds();
if (configServers.length != zookeeperIds.length) {
throw new IllegalArgumentException(String.format("Number of provided config server hosts (%d) must be the " +
"same as number of provided config server zookeeper ids (%d)",
configServers.length, zookeeperIds.length));
}
String myhostname = HostName.getLocalhost();
// TODO: Server index should be in interval [1, 254] according to doc,
// however, we cannot change this id for an existing server
for (int i = 0; i < configServers.length; i++) {
if (zookeeperIds[i] < 0) {
throw new IllegalArgumentException(String.format("Zookeeper ids cannot be negative, was %d for %s",
zookeeperIds[i], configServers[i].hostName));
}
if (configServers[i].hostName.equals(myhostname)) {
builder.myid(zookeeperIds[i]);
}
builder.server(getZkServer(configServers[i], zookeeperIds[i]));
}
if (options.zookeeperClientPort().isPresent()) {
builder.clientPort(options.zookeeperClientPort().get());
}
if (options.hostedVespa().orElse(false)) {
builder.vespaTlsConfigFile(Defaults.getDefaults().underVespaHome("var/zookeeper/conf/tls.conf.json"));
}
boolean isHostedVespa = options.hostedVespa().orElse(false);
builder.dynamicReconfiguration(isHostedVespa);
builder.reconfigureEnsemble(!isHostedVespa);
builder.snapshotMethod(options.zooKeeperSnapshotMethod());
builder.juteMaxBuffer(options.zookeeperJuteMaxBuffer());
} | @Test
void zookeeperConfig_self_hosted() {
final boolean hostedVespa = false;
TestOptions testOptions = createTestOptions(List.of("cfg1", "localhost", "cfg3"), List.of(4, 2, 3), hostedVespa);
ZookeeperServerConfig config = getConfig(ZookeeperServerConfig.class, testOptions);
assertZookeeperServerProperty(config.server(), ZookeeperServerConfig.Server::hostname, "cfg1", "localhost", "cfg3");
assertZookeeperServerProperty(config.server(), ZookeeperServerConfig.Server::id, 4, 2, 3);
assertEquals(2, config.myid());
assertEquals("gz", config.snapshotMethod());
assertEquals("", config.vespaTlsConfigFile());
} |
public static String prettyFormatXml(CharSequence xml) {
String xmlString = xml.toString();
StreamSource source = new StreamSource(new StringReader(xmlString));
StringWriter stringWriter = new StringWriter();
StreamResult result = new StreamResult(stringWriter);
try {
Transformer transformer = transformerFactory.newTransformer();
transformer.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes");
transformer.setOutputProperty(OutputKeys.INDENT, "yes");
transformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "2");
// Transform the requested string into a nice formatted XML string
transformer.transform(source, result);
}
catch (TransformerException | IllegalArgumentException e) {
LOGGER.log(Level.SEVERE, "Transformer error", e);
return xmlString;
}
return stringWriter.toString();
} | @Test
public void prettyFormatXmlTest() {
final String uglyXml = "<foo attr1='value1' attr2='value2'><inner-element attr1='value1'>Test</inner-element></foo>";
final String prettyXml = XmlUtil.prettyFormatXml(uglyXml);
assertEquals("<foo attr1=\"value1\" attr2=\"value2\">\n <inner-element attr1=\"value1\">Test</inner-element>\n</foo>\n",
prettyXml);
} |
@Override
public int readerIndex() {
return readerIndex;
} | @Test
void initialState() {
assertEquals(CAPACITY, buffer.capacity());
assertEquals(0, buffer.readerIndex());
} |
@Override
public NativeEntity<DataAdapterDto> createNativeEntity(Entity entity,
Map<String, ValueReference> parameters,
Map<EntityDescriptor, Object> nativeEntities,
String username) {
if (entity instanceof EntityV1) {
return decode((EntityV1) entity, parameters);
} else {
throw new IllegalArgumentException("Unsupported entity version: " + entity.getClass());
}
} | @Test
public void createNativeEntity() {
final Entity entity = EntityV1.builder()
.id(ModelId.of("1"))
.type(ModelTypes.LOOKUP_ADAPTER_V1)
.data(objectMapper.convertValue(LookupDataAdapterEntity.create(
ValueReference.of(DefaultEntityScope.NAME),
ValueReference.of("http-dsv"),
ValueReference.of("HTTP DSV"),
ValueReference.of("HTTP DSV"),
ReferenceMapUtils.toReferenceMap(Collections.emptyMap())
), JsonNode.class))
.build();
assertThat(dataAdapterService.findAll()).isEmpty();
final NativeEntity<DataAdapterDto> nativeEntity = facade.createNativeEntity(entity, Collections.emptyMap(), Collections.emptyMap(), "username");
assertThat(nativeEntity.descriptor().id()).isNotNull();
assertThat(nativeEntity.descriptor().type()).isEqualTo(ModelTypes.LOOKUP_ADAPTER_V1);
assertThat(nativeEntity.entity().name()).isEqualTo("http-dsv");
assertThat(nativeEntity.entity().title()).isEqualTo("HTTP DSV");
assertThat(nativeEntity.entity().description()).isEqualTo("HTTP DSV");
assertThat(dataAdapterService.findAll()).hasSize(1);
} |
@Override
public ProtobufSystemInfo.Section toProtobuf() {
ProtobufSystemInfo.Section.Builder protobuf = ProtobufSystemInfo.Section.newBuilder();
protobuf.setName("System");
setAttribute(protobuf, "Version", server.getVersion());
setAttribute(protobuf, "Official Distribution", officialDistribution.check());
setAttribute(protobuf, "Home Dir", config.get(PATH_HOME.getKey()).orElse(null));
setAttribute(protobuf, "Data Dir", config.get(PATH_DATA.getKey()).orElse(null));
setAttribute(protobuf, "Temp Dir", config.get(PATH_TEMP.getKey()).orElse(null));
setAttribute(protobuf, "Processors", Runtime.getRuntime().availableProcessors());
return protobuf.build();
} | @Test
public void return_official_distribution_flag() {
when(officialDistrib.check()).thenReturn(true);
ProtobufSystemInfo.Section section = underTest.toProtobuf();
assertThatAttributeIs(section, "Official Distribution", true);
} |
private Map<String, Object> augmentAndFilterConnectorConfig(String connectorConfigs) throws IOException {
return augmentAndFilterConnectorConfig(connectorConfigs, instanceConfig, secretsProvider,
componentClassLoader, componentType);
} | @Test(dataProvider = "configIgnoreUnknownFields")
public void testSinkConfigIgnoreUnknownFields(boolean ignoreUnknownConfigFields,
FunctionDetails.ComponentType type) throws Exception {
NarClassLoader narClassLoader = mock(NarClassLoader.class);
final ConnectorDefinition connectorDefinition = new ConnectorDefinition();
if (type == FunctionDetails.ComponentType.SINK) {
connectorDefinition.setSinkConfigClass(ConnectorTestConfig1.class.getName());
} else {
connectorDefinition.setSourceConfigClass(ConnectorTestConfig1.class.getName());
}
when(narClassLoader.getServiceDefinition(any())).thenReturn(ObjectMapperFactory
.getMapper().writer().writeValueAsString(connectorDefinition));
final InstanceConfig instanceConfig = new InstanceConfig();
instanceConfig.setIgnoreUnknownConfigFields(ignoreUnknownConfigFields);
final Map<String, Object> parsedConfig = JavaInstanceRunnable.augmentAndFilterConnectorConfig(
"{\"field1\": \"value\", \"field2\": \"value2\"}",
instanceConfig,
new EnvironmentBasedSecretsProvider(),
narClassLoader,
type
);
if (ignoreUnknownConfigFields) {
Assert.assertEquals(parsedConfig.size(), 1);
Assert.assertEquals(parsedConfig.get("field1"), "value");
} else {
Assert.assertEquals(parsedConfig.size(), 2);
Assert.assertEquals(parsedConfig.get("field1"), "value");
Assert.assertEquals(parsedConfig.get("field2"), "value2");
}
} |
public static String colorTag(Color color)
{
return OPENING_COLOR_TAG_START + colorToHexCode(color) + OPENING_COLOR_TAG_END;
} | @Test
public void colorTag()
{
COLOR_HEXSTRING_MAP.forEach((color, hex) ->
{
assertEquals("<col=" + hex + ">", ColorUtil.colorTag(color));
});
} |
public static String format(String json) {
final StringBuilder result = new StringBuilder();
Character wrapChar = null;
boolean isEscapeMode = false;
int length = json.length();
int number = 0;
char key;
for (int i = 0; i < length; i++) {
key = json.charAt(i);
if (CharUtil.DOUBLE_QUOTES == key || CharUtil.SINGLE_QUOTE == key) {
if (null == wrapChar) {
//字符串模式开始
wrapChar = key;
} else if (wrapChar.equals(key)) {
if (isEscapeMode) {
//字符串模式下,遇到结束符号,也同时结束转义
isEscapeMode = false;
}
//字符串包装结束
wrapChar = null;
}
if ((i > 1) && (json.charAt(i - 1) == CharUtil.COLON)) {
result.append(CharUtil.SPACE);
}
result.append(key);
continue;
}
if (CharUtil.BACKSLASH == key) {
if (null != wrapChar) {
//字符串模式下转义有效
isEscapeMode = !isEscapeMode;
result.append(key);
continue;
} else {
result.append(key);
}
}
if (null != wrapChar) {
//字符串模式
result.append(key);
continue;
}
//如果当前字符是前方括号、前花括号做如下处理:
if ((key == CharUtil.BRACKET_START) || (key == CharUtil.DELIM_START)) {
//如果前面还有字符,并且字符为“:”,打印:换行和缩进字符字符串。
if ((i > 1) && (json.charAt(i - 1) == CharUtil.COLON)) {
result.append(NEW_LINE);
result.append(indent(number));
}
result.append(key);
//前方括号、前花括号,的后面必须换行。打印:换行。
result.append(NEW_LINE);
//每出现一次前方括号、前花括号;缩进次数增加一次。打印:新行缩进。
number++;
result.append(indent(number));
continue;
}
// 3、如果当前字符是后方括号、后花括号做如下处理:
if ((key == CharUtil.BRACKET_END) || (key == CharUtil.DELIM_END)) {
// (1)后方括号、后花括号,的前面必须换行。打印:换行。
result.append(NEW_LINE);
// (2)每出现一次后方括号、后花括号;缩进次数减少一次。打印:缩进。
number--;
result.append(indent(number));
// (3)打印:当前字符。
result.append(key);
// (4)如果当前字符后面还有字符,并且字符不为“,”,打印:换行。
// if (((i + 1) < length) && (json.charAt(i + 1) != ',')) {
// result.append(NEW_LINE);
// }
// (5)继续下一次循环。
continue;
}
// 4、如果当前字符是逗号。逗号后面换行,并缩进,不改变缩进次数。
if ((key == ',')) {
result.append(key);
result.append(NEW_LINE);
result.append(indent(number));
continue;
}
if ((i > 1) && (json.charAt(i - 1) == CharUtil.COLON)) {
result.append(CharUtil.SPACE);
}
// 5、打印:当前字符。
result.append(key);
}
return result.toString();
} | @Test
public void formatTest3() {
String json = "{\"id\":13,\"title\":\"《标题》\",\"subtitle\":\"副标题z'c'z'xv'c'xv\",\"user_id\":6,\"type\":0}";
String result = JSONStrFormatter.format(json);
assertNotNull(result);
} |
public static <T> Match<T> ifNotValue(T value) {
return new Match<>(value, true);
} | @Test
public void testIfNotValue() {
Match<String> m1 = Match.ifNotValue(null);
Match<String> m2 = Match.ifNotValue("foo");
assertFalse(m1.matches(null));
assertFalse(m2.matches("foo"));
} |
static JavaInput reorderModifiers(String text) throws FormatterException {
return reorderModifiers(
new JavaInput(text), ImmutableList.of(Range.closedOpen(0, text.length())));
} | @Test
public void everythingIncludingDefault() throws FormatterException {
assertThat(
ModifierOrderer.reorderModifiers(
"strictfp native synchronized volatile transient final static default abstract"
+ " private protected public")
.getText())
.isEqualTo(
"public protected private abstract default static final transient volatile synchronized"
+ " native strictfp");
} |
public void clean(final Date now) {
List<String> files = this.findFiles();
List<String> expiredFiles = this.filterFiles(files, this.createExpiredFileFilter(now));
for (String f : expiredFiles) {
this.delete(new File(f));
}
if (this.totalSizeCap != CoreConstants.UNBOUNDED_TOTAL_SIZE_CAP && this.totalSizeCap > 0) {
this.capTotalSize(files);
}
List<String> emptyDirs = this.findEmptyDirs();
for (String dir : emptyDirs) {
this.delete(new File(dir));
}
} | @Test
public void keepsParentDirWhenNonEmpty() {
// Setting an expiration date of 0 would cause no files to be deleted
remover.clean(new Date(0));
verify(fileProvider, never()).deleteFile(any(File.class));
} |
@Override
public void abortCheckpointOnBarrier(long checkpointId, CheckpointException cause)
throws IOException {
if (isCurrentSyncSavepoint(checkpointId)) {
throw new FlinkRuntimeException("Stop-with-savepoint failed.");
}
subtaskCheckpointCoordinator.abortCheckpointOnBarrier(checkpointId, cause, operatorChain);
} | @Test
void testSavepointTerminateAborted() {
assertThatThrownBy(
() ->
testSyncSavepointWithEndInput(
(task, id) ->
task.abortCheckpointOnBarrier(
id,
new CheckpointException(
UNKNOWN_TASK_CHECKPOINT_NOTIFICATION_FAILURE)),
SavepointType.terminate(SavepointFormatType.CANONICAL),
true))
.isInstanceOf(FlinkRuntimeException.class)
.hasMessage("Stop-with-savepoint failed.");
} |
@SuppressWarnings("unused") // Part of required API.
public void execute(
final ConfiguredStatement<InsertValues> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final InsertValues insertValues = statement.getStatement();
final MetaStore metaStore = executionContext.getMetaStore();
final KsqlConfig config = statement.getSessionConfig().getConfig(true);
final DataSource dataSource = getDataSource(config, metaStore, insertValues);
validateInsert(insertValues.getColumns(), dataSource);
final ProducerRecord<byte[], byte[]> record =
buildRecord(statement, metaStore, dataSource, serviceContext);
try {
producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps());
} catch (final TopicAuthorizationException e) {
// TopicAuthorizationException does not give much detailed information about why it failed,
// except which topics are denied. Here we just add the ACL to make the error message
// consistent with other authorization error messages.
final Exception rootCause = new KsqlTopicAuthorizationException(
AclOperation.WRITE,
e.unauthorizedTopics()
);
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause);
} catch (final ClusterAuthorizationException e) {
// ClusterAuthorizationException is thrown when using idempotent producers
// and either a topic write permission or a cluster-level idempotent write
// permission (only applicable for broker versions no later than 2.8) is
// missing. In this case, we include additional context to help the user
// distinguish this type of failure from other permissions exceptions
// such as the ones thrown above when TopicAuthorizationException is caught.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} catch (final KafkaException e) {
if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) {
// The error message thrown when an idempotent producer is missing permissions
// is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException,
// as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException.
// ksqlDB handles these two the same way, accordingly.
// See https://issues.apache.org/jira/browse/KAFKA-14138 for more.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} else {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} catch (final Exception e) {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} | @Test
public void shouldThrowOnClusterAuthorizationException() {
// Given:
final ConfiguredStatement<InsertValues> statement = givenInsertValues(
allAndPseudoColumnNames(SCHEMA),
ImmutableList.of(
new LongLiteral(1L),
new StringLiteral("str"),
new StringLiteral("str"),
new LongLiteral(2L))
);
doThrow(new ClusterAuthorizationException("Cluster authorization failed"))
.when(producer).send(any());
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> executor.execute(statement, mock(SessionProperties.class), engine, serviceContext)
);
// Then:
assertThat(e.getCause(), (hasMessage(
containsString("Authorization denied to Write on topic(s): [" + TOPIC_NAME + "]. "
+ "Caused by: The producer is not authorized to do idempotent sends. "
+ "Check that you have write permissions to the specified topic, "
+ "and disable idempotent sends by setting 'enable.idempotent=false' "
+ " if necessary."))));
} |
public Optional<RouteContext> loadRouteContext(final OriginSQLRouter originSQLRouter, final QueryContext queryContext, final RuleMetaData globalRuleMetaData,
final ShardingSphereDatabase database, final ShardingCache shardingCache, final ConfigurationProperties props,
final ConnectionContext connectionContext) {
if (queryContext.getSql().length() > shardingCache.getConfiguration().getAllowedMaxSqlLength()) {
return Optional.empty();
}
ShardingRouteCacheableCheckResult cacheableCheckResult = shardingCache.getRouteCacheableChecker().check(database, queryContext);
if (!cacheableCheckResult.isProbablyCacheable()) {
return Optional.empty();
}
List<Object> shardingConditionParams = new ArrayList<>(cacheableCheckResult.getShardingConditionParameterMarkerIndexes().size());
for (int each : cacheableCheckResult.getShardingConditionParameterMarkerIndexes()) {
if (each >= queryContext.getParameters().size()) {
return Optional.empty();
}
shardingConditionParams.add(queryContext.getParameters().get(each));
}
Optional<RouteContext> cachedResult = shardingCache.getRouteCache().get(new ShardingRouteCacheKey(queryContext.getSql(), shardingConditionParams))
.flatMap(ShardingRouteCacheValue::getCachedRouteContext);
RouteContext result = cachedResult.orElseGet(
() -> originSQLRouter.createRouteContext(queryContext, globalRuleMetaData, database, shardingCache.getShardingRule(), props, connectionContext));
if (!cachedResult.isPresent() && hitOneShardOnly(result)) {
shardingCache.getRouteCache().put(new ShardingRouteCacheKey(queryContext.getSql(), shardingConditionParams), new ShardingRouteCacheValue(result));
}
return Optional.of(result);
} | @Test
void assertCreateRouteContextWithCacheableQueryButCacheMissed() {
QueryContext queryContext =
new QueryContext(sqlStatementContext, "insert into t values (?, ?)", Arrays.asList(0, 1), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class));
when(shardingCache.getConfiguration()).thenReturn(new ShardingCacheConfiguration(100, null));
when(shardingCache.getRouteCacheableChecker()).thenReturn(mock(ShardingRouteCacheableChecker.class));
when(shardingCache.getRouteCacheableChecker().check(null, queryContext)).thenReturn(new ShardingRouteCacheableCheckResult(true, Collections.singletonList(1)));
when(shardingCache.getRouteCache()).thenReturn(mock(ShardingRouteCache.class));
RouteContext expected = new RouteContext();
expected.getRouteUnits().add(new RouteUnit(new RouteMapper("ds_0", "ds_0"), Collections.singletonList(new RouteMapper("t", "t"))));
expected.getOriginalDataNodes().add(Collections.singletonList(new DataNode("ds_0", "t")));
when(shardingCache.getRouteCache().get(any(ShardingRouteCacheKey.class))).thenReturn(Optional.empty());
OriginSQLRouter router = (unused, globalRuleMetaData, database, rule, props, connectionContext) -> expected;
Optional<RouteContext> actual = new CachedShardingSQLRouter().loadRouteContext(router, queryContext, mock(RuleMetaData.class), null, shardingCache, null, null);
assertTrue(actual.isPresent());
assertThat(actual.get(), is(expected));
verify(shardingCache.getRouteCache()).put(any(ShardingRouteCacheKey.class), any(ShardingRouteCacheValue.class));
} |
@Override
public int predict(double[] x) {
if (x.length != p) {
throw new IllegalArgumentException(String.format("Invalid input vector size: %d, expected: %d", x.length, p));
}
double[] wx = project(x);
int y = 0;
double nearest = Double.POSITIVE_INFINITY;
for (int i = 0; i < k; i++) {
double d = MathEx.distance(wx, mu[i]);
if (d < nearest) {
nearest = d;
y = i;
}
}
return classes.valueOf(y);
} | @Test
public void testUSPS() throws Exception {
System.out.println("USPS");
ClassificationValidation<FLD> result = ClassificationValidation.of(USPS.x, USPS.y, USPS.testx, USPS.testy, FLD::fit);
System.out.println(result);
assertEquals(262, result.metrics.error);
java.nio.file.Path temp = Write.object(result.model);
FLD model = (FLD) Read.object(temp);
int error = Error.of(USPS.testy, model.predict(USPS.testx));
assertEquals(262, error);
} |
@Override
public <T> T convert(DataTable dataTable, Type type) {
return convert(dataTable, type, false);
} | @Test
void convert_to_object() {
registry.setDefaultDataTableEntryTransformer(TABLE_ENTRY_BY_TYPE_CONVERTER_SHOULD_NOT_BE_USED);
registry.setDefaultDataTableCellTransformer(TABLE_CELL_BY_TYPE_CONVERTER_SHOULD_NOT_BE_USED);
DataTable table = parse("",
" | | 1 | 2 | 3 |",
" | A | ♘ | | ♝ |",
" | B | | | |",
" | C | | ♝ | |");
registry.defineDataTableType(new DataTableType(ChessBoard.class, CHESS_BOARD_TABLE_TRANSFORMER));
ChessBoard expected = new ChessBoard(asList("♘", "♝", "♝"));
assertEquals(expected, converter.convert(table, ChessBoard.class));
} |
public static <T> TreeSet<Point<T>> subset(TimeWindow subsetWindow, NavigableSet<Point<T>> points) {
checkNotNull(subsetWindow);
checkNotNull(points);
//if the input collection is empty the output collection will be empty to
if (points.isEmpty()) {
return newTreeSet();
}
Point<T> midPoint = Point.<T>builder()
.time(subsetWindow.instantWithin(.5))
.latLong(0.0, 0.0)
.build();
/*
* Find exactly one point in the actual Track, ideally this point will be in the middle of
* the time window
*/
Point<T> aPointInTrack = points.floor(midPoint);
if (aPointInTrack == null) {
aPointInTrack = points.ceiling(midPoint);
}
TreeSet<Point<T>> outputSubset = newTreeSet();
//given a starting point....go up until you hit startTime.
NavigableSet<Point<T>> headset = points.headSet(aPointInTrack, true);
Iterator<Point<T>> iter = headset.descendingIterator();
while (iter.hasNext()) {
Point<T> pt = iter.next();
if (subsetWindow.contains(pt.time())) {
outputSubset.add(pt);
}
if (pt.time().isBefore(subsetWindow.start())) {
break;
}
}
//given a starting point....go down until you hit endTime.
NavigableSet<Point<T>> tailSet = points.tailSet(aPointInTrack, true);
iter = tailSet.iterator();
while (iter.hasNext()) {
Point<T> pt = iter.next();
if (subsetWindow.contains(pt.time())) {
outputSubset.add(pt);
}
if (pt.time().isAfter(subsetWindow.end())) {
break;
}
}
return outputSubset;
} | @Test
public void subset_reflectsStartTime() {
Track<NopHit> t1 = createTrackFromFile(getResourceFile("Track1.txt"));
//this is the time of 21st point in the track
Instant startTime = parseNopTime("07/08/2017", "14:10:45.534");
TimeWindow extractionWindow = TimeWindow.of(startTime, startTime.plus(365 * 20, DAYS));
NavigableSet<Point<NopHit>> subset = subset(extractionWindow, t1.points());
assertThat(subset, hasSize(t1.size() - 21 + 1)); //"+1" because the fence post Point is in both the original track and the subset
//the first point in the subset has the correct time
assertThat(subset.first().time(), is(startTime));
} |
public static RowCoder of(Schema schema) {
return new RowCoder(schema);
} | @Test
public void testEqualsMapWithBytesKeyFieldWorksOnReferenceEquality() throws Exception {
FieldType fieldType = FieldType.map(FieldType.BYTES, FieldType.INT32);
Schema schema = Schema.of(Schema.Field.of("f1", fieldType));
RowCoder coder = RowCoder.of(schema);
Map<byte[], Integer> map = Collections.singletonMap(new byte[] {1, 2, 3, 4}, 1);
Row row1 = Row.withSchema(schema).addValue(map).build();
Row row2 = Row.withSchema(schema).addValue(map).build();
Assume.assumeTrue(coder.consistentWithEquals());
CoderProperties.coderConsistentWithEquals(coder, row1, row2);
} |
@SuppressWarnings("unchecked")
public static <S, F> S visit(final SqlType type, final SqlTypeWalker.Visitor<S, F> visitor) {
final BiFunction<SqlTypeWalker.Visitor<?, ?>, SqlType, Object> handler = HANDLER
.get(type.baseType());
if (handler == null) {
throw new UnsupportedOperationException("Unsupported schema type: " + type.baseType());
}
return (S) handler.apply(visitor, type);
} | @Test
public void shouldVisitDecimal() {
// Given:
final SqlDecimal type = SqlTypes.decimal(10, 2);
when(visitor.visitDecimal(any())).thenReturn("Expected");
// When:
final String result = SqlTypeWalker.visit(type, visitor);
// Then:
verify(visitor).visitDecimal(same(type));
assertThat(result, is("Expected"));
} |
public static Float toFloat(Object value, Float defaultValue) {
return convertQuietly(Float.class, value, defaultValue);
} | @Test
public void doubleToFloatTest() {
final double a = 0.45f;
final float b = Convert.toFloat(a);
assertEquals(a, b, 0);
} |
@Override
public void actionPerformed(ActionEvent e) {
if (this.ok.equals(e.getSource())) {
this.fileName = this.input.getText();
presenter.fileNameChanged();
presenter.confirmed();
} else if (this.cancel.equals(e.getSource())) {
presenter.cancelled();
}
} | @Test
void testActionEvent() {
assertDoesNotThrow(() ->{
FileSelectorJframe jFrame = new FileSelectorJframe();
ActionEvent action = new ActionEvent("dummy", 1, "dummy");
jFrame.actionPerformed(action);
});
} |
public String getLogChannelId() {
return log.getLogChannelId();
} | @Test
public void testTwoJobsGetSameLogChannelId() {
Repository repository = mock( Repository.class );
JobMeta meta = mock( JobMeta.class );
Job job1 = new Job( repository, meta );
Job job2 = new Job( repository, meta );
assertEquals( job1.getLogChannelId(), job2.getLogChannelId() );
} |
@Override
public void run() {
// top-level command, do nothing
} | @Test
public void test_cluster() {
// When
run("cluster");
// Then
String actual = captureOut();
assertContains(actual, hz.getCluster().getLocalMember().getUuid().toString());
assertContains(actual, "ACTIVE");
} |
public static String getProcessId(Path path) throws IOException {
if (path == null) {
throw new IOException("Trying to access process id from a null path");
}
LOG.debug("Accessing pid from pid file {}", path);
String processId = null;
BufferedReader bufReader = null;
try {
File file = new File(path.toString());
if (file.exists()) {
FileInputStream fis = new FileInputStream(file);
bufReader = new BufferedReader(new InputStreamReader(fis, StandardCharsets.UTF_8));
while (true) {
String line = bufReader.readLine();
if (line == null) {
break;
}
String temp = line.trim();
if (!temp.isEmpty()) {
if (Shell.WINDOWS) {
// On Windows, pid is expected to be a container ID, so find first
// line that parses successfully as a container ID.
try {
ContainerId.fromString(temp);
processId = temp;
break;
} catch (Exception e) {
// do nothing
}
}
else {
// Otherwise, find first line containing a numeric pid.
try {
long pid = Long.parseLong(temp);
if (pid > 0) {
processId = temp;
break;
}
} catch (Exception e) {
// do nothing
}
}
}
}
}
} finally {
if (bufReader != null) {
bufReader.close();
}
}
LOG.debug("Got pid {} from path {}",
(processId != null ? processId : "null"), path);
return processId;
} | @Test (timeout = 30000)
public void testComplexGet() throws IOException {
String rootDir = new File(System.getProperty(
"test.build.data", "/tmp")).getAbsolutePath();
File testFile = null;
String processIdInFile = Shell.WINDOWS ?
" container_1353742680940_0002_01_000001 " :
" 23 ";
String expectedProcessId = processIdInFile.trim();
try {
testFile = new File(rootDir, "temp.txt");
PrintWriter fileWriter = new PrintWriter(testFile);
fileWriter.println(" ");
fileWriter.println("");
fileWriter.println("abc");
fileWriter.println("-123");
fileWriter.println("-123 ");
fileWriter.println(processIdInFile);
fileWriter.println("6236");
fileWriter.close();
String processId = null;
processId = ProcessIdFileReader.getProcessId(
new Path(rootDir + Path.SEPARATOR + "temp.txt"));
Assert.assertEquals(expectedProcessId, processId);
} finally {
if (testFile != null
&& testFile.exists()) {
testFile.delete();
}
}
} |
List<?> apply(
final GenericRow row,
final ProcessingLogger processingLogger
) {
final Object[] args = new Object[parameterExtractors.size()];
for (int i = 0; i < parameterExtractors.size(); i++) {
args[i] = evalParam(row, processingLogger, i);
}
try {
final List<?> result = tableFunction.apply(args);
if (result == null) {
processingLogger.error(RecordProcessingError.recordProcessingError(nullMsg, row));
return ImmutableList.of();
}
return result;
} catch (final Exception e) {
processingLogger.error(RecordProcessingError.recordProcessingError(exceptionMsg, e, row));
return ImmutableList.of();
}
} | @SuppressWarnings("unchecked")
@Test
public void shouldCallEvaluatorWithCorrectParams() {
// When:
applier.apply(VALUE, processingLogger);
// Then:
final ArgumentCaptor<Supplier<String>> errorMsgCaptor = ArgumentCaptor.forClass(Supplier.class);
verify(paramExtractor)
.evaluate(eq(VALUE), isNull(), eq(processingLogger), errorMsgCaptor.capture());
assertThat(errorMsgCaptor.getValue().get(),
is("Failed to evaluate table function parameter 0"));
} |
@Override
public TbPair<Boolean, JsonNode> upgrade(int fromVersion, JsonNode oldConfiguration) throws TbNodeException {
return fromVersion == 0 ?
upgradeRuleNodesWithOldPropertyToUseFetchTo(
oldConfiguration,
"addToMetadata",
TbMsgSource.METADATA.name(),
TbMsgSource.DATA.name()) :
new TbPair<>(false, oldConfiguration);
} | @Test
public void givenOldConfig_whenUpgrade_thenShouldReturnTrueResultWithNewConfig() throws Exception {
var defaultConfig = new TbGetTenantDetailsNodeConfiguration().defaultConfiguration();
var node = new TbGetTenantDetailsNode();
String oldConfig = "{\"detailsList\":[],\"addToMetadata\":false}";
JsonNode configJson = JacksonUtil.toJsonNode(oldConfig);
TbPair<Boolean, JsonNode> upgrade = node.upgrade(0, configJson);
Assertions.assertTrue(upgrade.getFirst());
Assertions.assertEquals(defaultConfig, JacksonUtil.treeToValue(upgrade.getSecond(), defaultConfig.getClass()));
} |
@SuppressWarnings("unchecked")
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
try {
if (statement.getStatement() instanceof CreateAsSelect) {
registerForCreateAs((ConfiguredStatement<? extends CreateAsSelect>) statement);
} else if (statement.getStatement() instanceof CreateSource) {
registerForCreateSource((ConfiguredStatement<? extends CreateSource>) statement);
}
} catch (final KsqlStatementException e) {
throw e;
} catch (final KsqlException e) {
throw new KsqlStatementException(
ErrorMessageUtil.buildErrorMessage(e),
statement.getMaskedStatementText(),
e.getCause());
}
// Remove schema id from SessionConfig
return stripSchemaIdConfig(statement);
} | @Test
public void shouldRegisterValueOverrideSchemaProtobuf()
throws IOException, RestClientException {
// Given:
when(schemaRegistryClient.register(anyString(), any(ParsedSchema.class))).thenReturn(1);
final SchemaAndId schemaAndId = SchemaAndId.schemaAndId(SCHEMA.value(), PROTOBUF_SCHEMA, 1);
givenStatement("CREATE STREAM source (id int key, f1 varchar) "
+ "WITH ("
+ "kafka_topic='expectedName', "
+ "key_format='JSON', "
+ "value_format='PROTOBUF', "
+ "value_schema_id=1, "
+ "partitions=1"
+ ");", Pair.of(null, schemaAndId));
// When:
injector.inject(statement);
// Then:
verify(schemaRegistryClient).register("expectedName-value", PROTOBUF_SCHEMA);
} |
static boolean applyTags(RuleDto rule, Set<String> tags) {
for (String tag : tags) {
RuleTagFormat.validate(tag);
}
Set<String> initialTags = rule.getTags();
final Set<String> systemTags = rule.getSystemTags();
Set<String> withoutSystemTags = Sets.filter(tags, input -> input != null && !systemTags.contains(input));
rule.setTags(withoutSystemTags);
return withoutSystemTags.size() != initialTags.size() || !withoutSystemTags.containsAll(initialTags);
} | @Test
public void applyTags_remove_all_existing_tags() {
RuleDto rule = new RuleDto().setTags(Sets.newHashSet("performance"));
boolean changed = RuleTagHelper.applyTags(rule, Collections.emptySet());
assertThat(rule.getTags()).isEmpty();
assertThat(changed).isTrue();
} |
@Override
public ConsumerBuilder<T> maxTotalReceiverQueueSizeAcrossPartitions(int maxTotalReceiverQueueSizeAcrossPartitions) {
checkArgument(maxTotalReceiverQueueSizeAcrossPartitions >= 0,
"maxTotalReceiverQueueSizeAcrossPartitions needs to be >= 0");
conf.setMaxTotalReceiverQueueSizeAcrossPartitions(maxTotalReceiverQueueSizeAcrossPartitions);
return this;
} | @Test(expectedExceptions = IllegalArgumentException.class)
public void testConsumerBuilderImplWhenMaxTotalReceiverQueueSizeAcrossPartitionsPropertyIsNegative() {
consumerBuilderImpl.maxTotalReceiverQueueSizeAcrossPartitions(-1);
} |
public static void requireNotEmpty(final String str, final String name) {
requireNotNull(str, name);
if (str.isEmpty()) {
throw new IllegalArgumentException(name + " is an empty string");
}
} | @Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = ".*foo.*")
public void testNotEmptyWithEmptyString() {
ArgumentUtil.requireNotEmpty("", "foo");
} |
public Duration cacheNegativeTimeToLive() {
return cacheNegativeTimeToLive;
} | @Test
void cacheNegativeTimeToLive() {
assertThat(builder.build().cacheNegativeTimeToLive()).isEqualTo(DEFAULT_CACHE_NEGATIVE_TIME_TO_LIVE);
Duration cacheNegativeTimeToLive = Duration.ofSeconds(5);
builder.cacheNegativeTimeToLive(cacheNegativeTimeToLive);
assertThat(builder.build().cacheNegativeTimeToLive()).isEqualTo(cacheNegativeTimeToLive);
} |
@Override
public String[] split(String text) {
boundary.setText(text);
List<String> words = new ArrayList<>();
int start = boundary.first();
int end = boundary.next();
while (end != BreakIterator.DONE) {
String word = text.substring(start, end).trim();
if (!word.isEmpty()) {
words.add(word);
}
start = end;
end = boundary.next();
}
return words.toArray(new String[0]);
} | @Test
public void testSplit() {
System.out.println("tokenize");
String text = "Good muffins cost $3.88\nin New York. Please buy "
+ "me\ntwo of them.\n\nYou cannot eat them. I gonna eat them. "
+ "Thanks. Of course, I won't. ";
String[] expResult = {"Good", "muffins", "cost", "$3.88", "in",
"New", "York", ".", "Please", "buy", "me", "two", "of", "them", ".",
"You", "cannot", "eat", "them", ".", "I", "gonna", "eat", "them", ".",
"Thanks", ".", "Of", "course", ",", "I", "won't", "."};
BreakIteratorTokenizer instance = new BreakIteratorTokenizer();
String[] result = instance.split(text);
assertEquals(expResult.length, result.length);
for (int i = 0; i < result.length; i++) {
assertEquals(expResult[i], result[i]);
}
} |
public static String prependColorTag(final String str, final Color color)
{
return colorTag(color) + str;
} | @Test
public void prependColorTag()
{
COLOR_HEXSTRING_MAP.forEach((color, hex) ->
{
assertEquals("<col=" + hex + ">test", ColorUtil.prependColorTag("test", color));
assertEquals("<col=" + hex + ">", ColorUtil.prependColorTag("", color));
});
assertEquals("<col=ff0000>94<col=ffffff>/99", ColorUtil.prependColorTag("94" + ColorUtil.prependColorTag("/99", Color.WHITE), Color.RED));
} |
public static CompilationUnit getKiePMMLModelCompilationUnit(final String className,
final String packageName,
final String javaTemplate,
final String modelClassName) {
logger.trace("getKiePMMLModelCompilationUnit {} {}", className, packageName);
CompilationUnit templateCU = getFromFileName(javaTemplate);
CompilationUnit toReturn = templateCU.clone();
if (packageName != null && !packageName.isEmpty()) {
toReturn.setPackageDeclaration(packageName);
}
ClassOrInterfaceDeclaration modelTemplate = toReturn.getClassByName(modelClassName)
.orElseThrow(() -> new KiePMMLException(MAIN_CLASS_NOT_FOUND + ": " + modelClassName));
modelTemplate.setName(className);
return toReturn;
} | @Test
void getKiePMMLModelCompilationUnitWithoutPackage() {
String className = "ClassName";
CompilationUnit retrieved = JavaParserUtils.getKiePMMLModelCompilationUnit(className, null, TEMPLATE_FILE, TEMPLATE_CLASS);
assertThat(retrieved).isNotNull();
assertThat(retrieved.getPackageDeclaration()).isNotPresent();
assertThat(retrieved.getClassByName(TEMPLATE_CLASS)).isNotPresent();
assertThat(retrieved.getClassByName(className)).isPresent();
} |
public static String convertToString(String[] value) {
if (value == null || value.length == 0) {
return null;
}
StringBuffer result = new StringBuffer(String.valueOf(value[0]));
for (int i = 1; i < value.length; i++) {
result.append(",").append(value[i]);
}
return result.toString();
} | @Test
public void testConvertToString() throws Exception {
assertEquals(null, StringArrayConverter.convertToString(null));
assertEquals(null, StringArrayConverter.convertToString(new String[]{}));
assertEquals("", StringArrayConverter.convertToString(new String[]{""}));
assertEquals("foo", StringArrayConverter.convertToString(new String[]{"foo"}));
assertEquals("foo,bar", StringArrayConverter.convertToString(new String[]{"foo", "bar"}));
assertEquals("foo,bar,baz", StringArrayConverter.convertToString(new String[]{"foo", "bar", "baz"}));
} |
public Map<String, List<String>> parameters() {
if (params == null) {
params = decodeParams(uri, pathEndIdx(), charset, maxParams, semicolonIsNormalChar);
}
return params;
} | @Test
public void testBasicUris() throws URISyntaxException {
QueryStringDecoder d = new QueryStringDecoder(new URI("http://localhost/path"));
assertEquals(0, d.parameters().size());
} |
@Override
public <KR, VR> KStream<KR, VR> flatMap(final KeyValueMapper<? super K, ? super V, ? extends Iterable<? extends KeyValue<? extends KR, ? extends VR>>> mapper) {
return flatMap(mapper, NamedInternal.empty());
} | @Test
public void shouldNotAllowNullMapperOnFlatMap() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.flatMap(null));
assertThat(exception.getMessage(), equalTo("mapper can't be null"));
} |
public static KHyperLogLog merge(KHyperLogLog khll1, KHyperLogLog khll2)
{
// Return the one with smallest K so resolution is not lost. This loss would happen in the case
// one merged a smaller KHLL into a bigger one because the former's minhash struct won't
// cover all of the latter's minhash space.
if (khll1.maxSize <= khll2.maxSize) {
return khll1.mergeWith(khll2);
}
return khll2.mergeWith(khll1);
} | @Test
public void testMerge()
throws Exception
{
// small vs small
verifyMerge(LongStream.rangeClosed(0, 100), LongStream.rangeClosed(50, 150));
// small vs big
verifyMerge(LongStream.rangeClosed(0, 100), LongStream.rangeClosed(50, 5000));
// big vs small
verifyMerge(LongStream.rangeClosed(50, 5000), LongStream.rangeClosed(0, 100));
// big vs big
verifyMerge(LongStream.rangeClosed(0, 5000), LongStream.rangeClosed(3000, 8000));
} |
@Override
public Result responseMessageForCheckConnectionToRepository(String responseBody) {
return jsonResultMessageHandler.toResult(responseBody);
} | @Test
public void shouldHandleNullMessagesForCheckRepositoryConnectionResponse() throws Exception {
assertSuccessResult(messageHandler.responseMessageForCheckConnectionToRepository("{\"status\":\"success\"}"), new ArrayList<>());
assertFailureResult(messageHandler.responseMessageForCheckConnectionToRepository("{\"status\":\"failure\"}"), new ArrayList<>());
} |
@Override
public Address getCaller() {
throw new UnsupportedOperationException();
} | @Test(expected = UnsupportedOperationException.class)
public void testGetCaller() {
queryCacheEventData.getCaller();
} |
@Bean
public PluginDataHandler paramMappingPluginDataHandler() {
return new ParamMappingPluginDataHandler();
} | @Test
public void testParamMappingPluginDataHandler() {
new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(ParamMappingPluginConfiguration.class, DefaultServerCodecConfigurer.class))
.withBean(ParamMappingPluginConfigurationTest.class)
.withPropertyValues("debug=true")
.run(context -> {
PluginDataHandler handler = context.getBean("paramMappingPluginDataHandler", PluginDataHandler.class);
assertNotNull(handler);
});
} |
@Override
public ScalarOperator visitCastOperator(CastOperator operator, Void context) {
return shuttleIfUpdate(operator);
} | @Test
void visitCastOperator() {
CastOperator operator = new CastOperator(INT, new ColumnRefOperator(1, INT, "id", true));
{
ScalarOperator newOperator = shuttle.visitCastOperator(operator, null);
assertEquals(operator, newOperator);
}
{
ScalarOperator newOperator = shuttle2.visitCastOperator(operator, null);
assertEquals(operator, newOperator);
}
} |
public static HttpResponseStatus valueOf(int code) {
HttpResponseStatus status = valueOf0(code);
return status != null ? status : new HttpResponseStatus(code);
} | @Test
public void testHttpStatusClassValueOf() {
// status scope: [100, 600).
for (int code = 100; code < 600; code ++) {
HttpStatusClass httpStatusClass = HttpStatusClass.valueOf(code);
assertNotSame(HttpStatusClass.UNKNOWN, httpStatusClass);
if (HttpStatusClass.INFORMATIONAL.contains(code)) {
assertEquals(HttpStatusClass.INFORMATIONAL, httpStatusClass);
} else if (HttpStatusClass.SUCCESS.contains(code)) {
assertEquals(HttpStatusClass.SUCCESS, httpStatusClass);
} else if (HttpStatusClass.REDIRECTION.contains(code)) {
assertEquals(HttpStatusClass.REDIRECTION, httpStatusClass);
} else if (HttpStatusClass.CLIENT_ERROR.contains(code)) {
assertEquals(HttpStatusClass.CLIENT_ERROR, httpStatusClass);
} else if (HttpStatusClass.SERVER_ERROR.contains(code)) {
assertEquals(HttpStatusClass.SERVER_ERROR, httpStatusClass);
} else {
fail("At least one of the if-branches above must be true");
}
}
// status scope: [Integer.MIN_VALUE, 100).
for (int code = Integer.MIN_VALUE; code < 100; code ++) {
HttpStatusClass httpStatusClass = HttpStatusClass.valueOf(code);
assertEquals(HttpStatusClass.UNKNOWN, httpStatusClass);
}
// status scope: [600, Integer.MAX_VALUE].
for (int code = 600; code > 0; code ++) {
HttpStatusClass httpStatusClass = HttpStatusClass.valueOf(code);
assertEquals(HttpStatusClass.UNKNOWN, httpStatusClass);
}
} |
public static SQLException toSQLException(final Exception cause, final DatabaseType databaseType) {
if (cause instanceof SQLException) {
return (SQLException) cause;
}
if (cause instanceof ShardingSphereSQLException) {
return ((ShardingSphereSQLException) cause).toSQLException();
}
if (cause instanceof SQLDialectException) {
if (cause instanceof DatabaseProtocolException) {
return new DatabaseProtocolSQLException(cause.getMessage()).toSQLException();
}
Optional<SQLDialectExceptionMapper> dialectExceptionMapper = DatabaseTypedSPILoader.findService(SQLDialectExceptionMapper.class, databaseType);
if (dialectExceptionMapper.isPresent()) {
return dialectExceptionMapper.get().convert((SQLDialectException) cause);
}
}
if (cause instanceof ShardingSphereServerException) {
return new ServerSQLException(cause).toSQLException();
}
return new UnknownSQLException(cause).toSQLException();
} | @Test
void assertToSQLExceptionWithSQLDialectException() {
assertThat(SQLExceptionTransformEngine.toSQLException(mock(SQLDialectException.class), databaseType).getMessage(), is("Dialect exception"));
} |
public CompletableFuture<Void> waitAsync(CompletableFuture<Void> eventPubFuture,
String bundle,
UnloadDecision decision,
long timeout,
TimeUnit timeoutUnit) {
return eventPubFuture.thenCompose(__ -> inFlightUnloadRequest.computeIfAbsent(bundle, ignore -> {
if (log.isDebugEnabled()) {
log.debug("Handle unload bundle: {}, timeout: {} {}", bundle, timeout, timeoutUnit);
}
CompletableFuture<Void> future = new CompletableFuture<>();
future.orTimeout(timeout, timeoutUnit).whenComplete((v, ex) -> {
if (ex != null) {
inFlightUnloadRequest.remove(bundle);
log.warn("Failed to wait unload for serviceUnit: {}", bundle, ex);
}
});
return future;
})).whenComplete((__, ex) -> {
if (ex != null) {
counter.update(Failure, Unknown);
log.warn("Failed to unload bundle: {}", bundle, ex);
return;
}
log.info("Complete unload bundle: {}", bundle);
counter.update(decision);
});
} | @Test
public void testTimeout() throws IllegalAccessException {
UnloadCounter counter = new UnloadCounter();
UnloadManager manager = new UnloadManager(counter, "mockBrokerId");
var unloadDecision =
new UnloadDecision(new Unload("broker-1", "bundle-1"), Success, Admin);
CompletableFuture<Void> future =
manager.waitAsync(CompletableFuture.completedFuture(null),
"bundle-1", unloadDecision, 3, TimeUnit.SECONDS);
Map<String, CompletableFuture<Void>> inFlightUnloadRequestMap = getInFlightUnloadRequestMap(manager);
assertEquals(inFlightUnloadRequestMap.size(), 1);
try {
future.get();
fail();
} catch (Exception ex) {
assertTrue(ex.getCause() instanceof TimeoutException);
}
assertEquals(inFlightUnloadRequestMap.size(), 0);
assertEquals(counter.getBreakdownCounters().get(Failure).get(Unknown).get(), 1);
} |
public String buildSql(List<HiveColumnHandle> columns, TupleDomain<HiveColumnHandle> tupleDomain)
{
// SELECT clause
StringBuilder sql = new StringBuilder("SELECT ");
if (columns.isEmpty()) {
sql.append("' '");
}
else {
String columnNames = columns.stream()
.map(this::getFullyQualifiedColumnName)
.collect(joining(", "));
sql.append(columnNames);
}
// FROM clause
sql.append(" FROM ");
sql.append(DATA_SOURCE);
// WHERE clause
List<String> clauses = toConjuncts(columns, tupleDomain);
if (!clauses.isEmpty()) {
sql.append(" WHERE ")
.append(Joiner.on(" AND ").join(clauses));
}
return sql.toString();
} | @Test
public void testDateColumn()
{
List<HiveColumnHandle> columns = ImmutableList.of(
new HiveColumnHandle("t1", HIVE_TIMESTAMP, parseTypeSignature(TIMESTAMP), 0, REGULAR, Optional.empty(), Optional.empty()),
new HiveColumnHandle("t2", HIVE_DATE, parseTypeSignature(StandardTypes.DATE), 1, REGULAR, Optional.empty(), Optional.empty()));
TupleDomain<HiveColumnHandle> tupleDomain = withColumnDomains(ImmutableMap.of(
columns.get(1), Domain.create(SortedRangeSet.copyOf(DATE, ImmutableList.of(Range.equal(DATE, (long) DateTimeUtils.parseDate("2001-08-22")))), false)));
// CSV
IonSqlQueryBuilder queryBuilder = new IonSqlQueryBuilder(createTestFunctionAndTypeManager(), CSV);
assertEquals("SELECT s._1, s._2 FROM S3Object s WHERE (case s._2 when '' then null else CAST(s._2 AS TIMESTAMP) end = `2001-08-22`)", queryBuilder.buildSql(columns, tupleDomain));
// JSON
queryBuilder = new IonSqlQueryBuilder(createTestFunctionAndTypeManager(), JSON);
assertEquals(queryBuilder.buildSql(columns, tupleDomain), "SELECT s.t1, s.t2 FROM S3Object s WHERE (case s.t2 when '' then null else CAST(s.t2 AS TIMESTAMP) end = `2001-08-22`)");
} |
@Override
public ObjectNode encode(LispListAddress address, CodecContext context) {
checkNotNull(address, "LispListAddress cannot be null");
final ObjectNode result = context.mapper().createObjectNode();
final JsonCodec<MappingAddress> addressCodec =
context.codec(MappingAddress.class);
if (address.getIpv4() != null) {
ObjectNode ipv4Node = addressCodec.encode(address.getIpv4(), context);
result.set(IPV4, ipv4Node);
}
if (address.getIpv6() != null) {
ObjectNode ipv6Node = addressCodec.encode(address.getIpv6(), context);
result.set(IPV6, ipv6Node);
}
if (address.getIpv4() == null && address.getIpv6() == null) {
log.error("Either IPv4 or IPv6 address should be specified.");
}
return result;
} | @Test
public void testLispListAddressEncode() {
LispListAddress address = new LispListAddress.Builder()
.withIpv4(MappingAddresses.ipv4MappingAddress(IPV4_PREFIX))
.withIpv6(MappingAddresses.ipv6MappingAddress(IPV6_PREFIX))
.build();
ObjectNode addressJson = listAddressCodec.encode(address, context);
assertThat("errors in encoding List address JSON",
addressJson, LispListAddressJsonMatcher.matchesListAddress(address));
} |
public static Builder builder() {
return new Builder();
} | @Test
public void testCanUseNullAsPropertyValue() throws JsonProcessingException {
String jsonNullValueInDefaults =
"{\"defaults\":{\"warehouse\":null},\"overrides\":{\"clients\":\"5\"}}";
assertRoundTripSerializesEquallyFrom(
jsonNullValueInDefaults,
ConfigResponse.builder()
.withDefaults(DEFAULTS_WITH_NULL_VALUE)
.withOverrides(OVERRIDES)
.build());
assertRoundTripSerializesEquallyFrom(
jsonNullValueInDefaults,
ConfigResponse.builder().withDefault("warehouse", null).withOverrides(OVERRIDES).build());
String jsonNullValueInOverrides =
"{\"defaults\":{\"warehouse\":\"s3://bucket/warehouse\"},\"overrides\":{\"clients\":null}}";
assertRoundTripSerializesEquallyFrom(
jsonNullValueInOverrides,
ConfigResponse.builder()
.withDefaults(DEFAULTS)
.withOverrides(OVERRIDES_WITH_NULL_VALUE)
.build());
assertRoundTripSerializesEquallyFrom(
jsonNullValueInOverrides,
ConfigResponse.builder().withDefaults(DEFAULTS).withOverride("clients", null).build());
} |
@Override
public List<Path> run(final Session<?> session) throws BackgroundException {
final Delete delete;
if(trash) {
if(null == session.getFeature(Trash.class)) {
log.warn(String.format("No trash feature available for %s", session));
delete = session.getFeature(Delete.class);
}
else {
delete = session.getFeature(Trash.class);
}
}
else {
delete = session.getFeature(Delete.class);
}
final ListService list = session.getFeature(ListService.class);
final Map<Path, TransferStatus> recursive = new LinkedHashMap<>();
for(Path file : files) {
if(this.isCanceled()) {
throw new ConnectionCanceledException();
}
recursive.putAll(this.compile(delete, list, new WorkerListProgressListener(this, listener), file));
}
// Iterate again to delete any files that can be omitted when recursive operation is supported
if(delete.isRecursive()) {
recursive.keySet().removeIf(f -> recursive.keySet().stream().anyMatch(f::isChild));
}
final HostPreferences preferences = new HostPreferences(session.getHost());
if(preferences.getBoolean("versioning.enable") && preferences.getBoolean("versioning.delete.enable")) {
switch(session.getHost().getProtocol().getVersioningMode()) {
case custom:
final Versioning versioning = session.getFeature(Versioning.class);
if(versioning != null) {
for(Iterator<Path> iter = recursive.keySet().iterator(); iter.hasNext(); ) {
final Path f = iter.next();
if(versioning.getConfiguration(f).isEnabled()) {
if(versioning.save(f)) {
if(log.isDebugEnabled()) {
log.debug(String.format("Skip deleting %s", f));
}
iter.remove();
}
}
}
}
}
}
if(!recursive.isEmpty()) {
delete.delete(recursive, prompt, new Delete.Callback() {
@Override
public void delete(final Path file) {
listener.message(MessageFormat.format(LocaleFactory.localizedString("Deleting {0}", "Status"), file.getName()));
callback.delete(file);
if(file.isDirectory()) {
if(delete.isRecursive()) {
files.stream().filter(f -> f.isChild(file)).forEach(callback::delete);
}
}
}
});
}
return new ArrayList<>(recursive.keySet());
} | @Test
public void testCompileDefault() throws Exception {
final Session session = new NullSession(new Host(new TestProtocol())) {
@Override
@SuppressWarnings("unchecked")
public <T> T _getFeature(final Class<T> type) {
if(type == Delete.class) {
return (T) new Delete() {
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) {
assertEquals(new Path("/t/a", EnumSet.of(Path.Type.file)), new ArrayList<>(files.keySet()).get(0));
assertEquals(new Path("/t/d/b", EnumSet.of(Path.Type.file)), new ArrayList<>(files.keySet()).get(1));
assertEquals(new Path("/t/d", EnumSet.of(Path.Type.directory)), new ArrayList<>(files.keySet()).get(2));
assertEquals(new Path("/t", EnumSet.of(Path.Type.directory)), new ArrayList<>(files.keySet()).get(3));
}
};
}
return super._getFeature(type);
}
@Override
public AttributedList<Path> list(final Path file, final ListProgressListener listener) {
if(file.equals(new Path("/t", EnumSet.of(Path.Type.directory)))) {
return new AttributedList<>(Arrays.asList(
new Path("/t/a", EnumSet.of(Path.Type.file)),
new Path("/t/d", EnumSet.of(Path.Type.directory))
));
}
if(file.equals(new Path("/t/d", EnumSet.of(Path.Type.directory)))) {
return new AttributedList<>(Collections.singletonList(
new Path("/t/d/b", EnumSet.of(Path.Type.file))
));
}
fail();
return null;
}
};
final DeleteWorker worker = new DeleteWorker(new DisabledLoginCallback(),
Collections.singletonList(new Path("/t", EnumSet.of(Path.Type.directory))),
new DisabledProgressListener());
int hashCode = worker.hashCode();
assertEquals(4, worker.run(session).size());
assertEquals(hashCode, worker.hashCode());
} |
@Override
public String format(Object value) {
return value == null ? EMPTY : nonNullFormat(value);
} | @Test
public void nullInput() {
assertEquals("wrong result", "", frm.format(null));
} |
@Override
public void close() throws Exception {
handlesToClose.forEach(IOUtils::closeQuietly);
handlesToClose.clear();
if (sharedResources != null) {
sharedResources.close();
}
cleanRelocatedDbLogs();
} | @Test
public void testFreeSharedResourcesAfterClose() throws Exception {
LRUCache cache = new LRUCache(1024L);
WriteBufferManager wbm = new WriteBufferManager(1024L, cache);
ForStSharedResources sharedResources = new ForStSharedResources(cache, wbm, 1024L, false);
final ThrowingRunnable<Exception> disposer = sharedResources::close;
OpaqueMemoryResource<ForStSharedResources> opaqueResource =
new OpaqueMemoryResource<>(sharedResources, 1024L, disposer);
ForStResourceContainer container = new ForStResourceContainer(null, opaqueResource);
container.close();
assertThat(cache.isOwningHandle(), is(false));
assertThat(wbm.isOwningHandle(), is(false));
} |
@Override
public TableDataConsistencyCheckResult swapToObject(final YamlTableDataConsistencyCheckResult yamlConfig) {
if (null == yamlConfig) {
return null;
}
if (!Strings.isNullOrEmpty(yamlConfig.getIgnoredType())) {
return new TableDataConsistencyCheckResult(TableDataConsistencyCheckIgnoredType.valueOf(yamlConfig.getIgnoredType()));
}
return new TableDataConsistencyCheckResult(yamlConfig.isMatched());
} | @Test
void assertSwapToObjectWithYamlTableDataConsistencyCheckResultMatched() {
YamlTableDataConsistencyCheckResult yamlConfig = new YamlTableDataConsistencyCheckResult(true);
TableDataConsistencyCheckResult result = yamlTableDataConsistencyCheckResultSwapper.swapToObject(yamlConfig);
assertNull(result.getIgnoredType());
assertTrue(result.isMatched());
} |
public static Map<TopicPartition, Long> parseSinkConnectorOffsets(Map<Map<String, ?>, Map<String, ?>> partitionOffsets) {
Map<TopicPartition, Long> parsedOffsetMap = new HashMap<>();
for (Map.Entry<Map<String, ?>, Map<String, ?>> partitionOffset : partitionOffsets.entrySet()) {
Map<String, ?> partitionMap = partitionOffset.getKey();
if (partitionMap == null) {
throw new BadRequestException("The partition for a sink connector offset cannot be null or missing");
}
if (!partitionMap.containsKey(KAFKA_TOPIC_KEY) || !partitionMap.containsKey(KAFKA_PARTITION_KEY)) {
throw new BadRequestException(String.format("The partition for a sink connector offset must contain the keys '%s' and '%s'",
KAFKA_TOPIC_KEY, KAFKA_PARTITION_KEY));
}
if (partitionMap.get(KAFKA_TOPIC_KEY) == null) {
throw new BadRequestException("Kafka topic names must be valid strings and may not be null");
}
if (partitionMap.get(KAFKA_PARTITION_KEY) == null) {
throw new BadRequestException("Kafka partitions must be valid numbers and may not be null");
}
String topic = String.valueOf(partitionMap.get(KAFKA_TOPIC_KEY));
int partition;
try {
// We parse it this way because both "10" and 10 should be accepted as valid partition values in the REST API's
// JSON request payload. If it throws an exception, we should propagate it since it's indicative of a badly formatted value.
partition = Integer.parseInt(String.valueOf(partitionMap.get(KAFKA_PARTITION_KEY)));
} catch (Exception e) {
throw new BadRequestException("Failed to parse the following Kafka partition value in the provided offsets: '" +
partitionMap.get(KAFKA_PARTITION_KEY) + "'. Partition values for sink connectors need " +
"to be integers.", e);
}
TopicPartition tp = new TopicPartition(topic, partition);
Map<String, ?> offsetMap = partitionOffset.getValue();
if (offsetMap == null) {
// represents an offset reset
parsedOffsetMap.put(tp, null);
} else {
if (!offsetMap.containsKey(KAFKA_OFFSET_KEY)) {
throw new BadRequestException(String.format("The offset for a sink connector should either be null or contain " +
"the key '%s'", KAFKA_OFFSET_KEY));
}
long offset;
try {
// We parse it this way because both "1000" and 1000 should be accepted as valid offset values in the REST API's
// JSON request payload. If it throws an exception, we should propagate it since it's indicative of a badly formatted value.
offset = Long.parseLong(String.valueOf(offsetMap.get(KAFKA_OFFSET_KEY)));
} catch (Exception e) {
throw new BadRequestException("Failed to parse the following Kafka offset value in the provided offsets: '" +
offsetMap.get(KAFKA_OFFSET_KEY) + "'. Offset values for sink connectors need " +
"to be integers.", e);
}
parsedOffsetMap.put(tp, offset);
}
}
return parsedOffsetMap;
} | @Test
public void testValidateAndParseInvalidOffset() {
Map<String, Object> partition = new HashMap<>();
partition.put(SinkUtils.KAFKA_TOPIC_KEY, "topic");
partition.put(SinkUtils.KAFKA_PARTITION_KEY, 10);
Map<String, Object> offset = new HashMap<>();
Map<Map<String, ?>, Map<String, ?>> partitionOffsets = new HashMap<>();
partitionOffsets.put(partition, offset);
// missing offset key
ConnectException e = assertThrows(ConnectException.class, () -> SinkUtils.parseSinkConnectorOffsets(partitionOffsets));
assertThat(e.getMessage(), containsString("The offset for a sink connector should either be null or contain the key 'kafka_offset'"));
// bad offset key
offset.put(SinkUtils.KAFKA_OFFSET_KEY, "not a number");
e = assertThrows(ConnectException.class, () -> SinkUtils.parseSinkConnectorOffsets(partitionOffsets));
assertThat(e.getMessage(), containsString("Failed to parse the following Kafka offset value in the provided offsets: 'not a number'"));
} |
public static KTableHolder<GenericKey> build(
final KGroupedStreamHolder groupedStream,
final StreamAggregate aggregate,
final RuntimeBuildContext buildContext,
final MaterializedFactory materializedFactory) {
return build(
groupedStream,
aggregate,
buildContext,
materializedFactory,
new AggregateParamsFactory()
);
} | @Test
public void shouldReturnCorrectSerdeForWindowedAggregate() {
for (final Runnable given : given()) {
// Given:
clearInvocations(groupedStream, timeWindowedStream, sessionWindowedStream, aggregated, buildContext);
given.run();
// When:
final KTableHolder<Windowed<GenericKey>> tableHolder = windowedAggregate.build(planBuilder, planInfo);
// Then:
final ExecutionKeyFactory<Windowed<GenericKey>> serdeFactory = tableHolder.getExecutionKeyFactory();
final FormatInfo mockFormat = mock(FormatInfo.class);
final PhysicalSchema mockSchema = mock(PhysicalSchema.class);
final QueryContext mockCtx = mock(QueryContext.class);
serdeFactory.buildKeySerde(mockFormat, mockSchema, mockCtx);
verify(buildContext).buildKeySerde(
same(mockFormat),
eq(windowedAggregate.getWindowExpression().getWindowInfo()),
same(mockSchema),
same(mockCtx)
);
}
} |
@Override
public RecoverableFsDataOutputStream open(Path path) throws IOException {
LOGGER.trace("Opening output stream for path {}", path);
Preconditions.checkNotNull(path);
GSBlobIdentifier finalBlobIdentifier = BlobUtils.parseUri(path.toUri());
return new GSRecoverableFsDataOutputStream(storage, options, finalBlobIdentifier);
} | @Test(expected = IllegalArgumentException.class)
public void testOpenWithEmptyObjectName() throws IOException {
Path path = new Path("gs://foo/");
writer.open(path);
} |
public static IpAddress valueOf(int value) {
byte[] bytes =
ByteBuffer.allocate(INET_BYTE_LENGTH).putInt(value).array();
return new IpAddress(Version.INET, bytes);
} | @Test(expected = IllegalArgumentException.class)
public void testInvalidValueOfShortArrayIPv4() {
IpAddress ipAddress;
byte[] value;
value = new byte[] {1, 2, 3};
ipAddress = IpAddress.valueOf(IpAddress.Version.INET, value);
} |
Optional<Integer> lastEpochSentOnCommit() {
return lastEpochSentOnCommit;
} | @Test
public void testLastEpochSentOnCommit() {
// Enable auto-commit but with very long interval to avoid triggering auto-commits on the
// interval and just test the auto-commits triggered before revocation
CommitRequestManager commitRequestManager = create(true, Integer.MAX_VALUE);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode));
TopicPartition tp = new TopicPartition("topic", 1);
subscriptionState.assignFromUser(singleton(tp));
subscriptionState.seek(tp, 100);
// Send auto commit to revoke partitions, expected to be retried on STALE_MEMBER_EPOCH
// with the latest epochs received (using long deadline to avoid expiring the request
// while retrying with the new epochs)
commitRequestManager.maybeAutoCommitSyncBeforeRevocation(Long.MAX_VALUE);
int initialEpoch = 1;
String memberId = "member1";
commitRequestManager.onMemberEpochUpdated(Optional.of(initialEpoch), Optional.of(memberId));
// Send request with epoch 1
completeOffsetCommitRequestWithError(commitRequestManager, Errors.STALE_MEMBER_EPOCH);
assertEquals(initialEpoch, commitRequestManager.lastEpochSentOnCommit().orElse(null));
// Receive new epoch. Last epoch sent should change only when sending out the next request
commitRequestManager.onMemberEpochUpdated(Optional.of(initialEpoch + 1), Optional.of(memberId));
assertEquals(initialEpoch, commitRequestManager.lastEpochSentOnCommit().get());
time.sleep(retryBackoffMs);
completeOffsetCommitRequestWithError(commitRequestManager, Errors.STALE_MEMBER_EPOCH);
assertEquals(initialEpoch + 1, commitRequestManager.lastEpochSentOnCommit().orElse(null));
// Receive empty epochs
commitRequestManager.onMemberEpochUpdated(Optional.empty(), Optional.empty());
time.sleep(retryBackoffMs * 2);
completeOffsetCommitRequestWithError(commitRequestManager, Errors.STALE_MEMBER_EPOCH);
assertFalse(commitRequestManager.lastEpochSentOnCommit().isPresent());
} |
public void removeBufferLinesBefore( long minTimeBoundary ) {
buffer.values().stream().filter( v -> v.getEvent().timeStamp < minTimeBoundary ).forEach( v -> buffer.remove( v.getNr() ) );
} | @Test
public void testRemoveBufferLinesBefore() {
LoggingBuffer loggingBuffer = new LoggingBuffer( 100 );
for ( int i = 0; i < 40; i++ ) {
KettleLoggingEvent event = new KettleLoggingEvent();
event.setMessage( new LogMessage( "test", LogLevel.BASIC ) );
event.setTimeStamp( i );
loggingBuffer.addLogggingEvent( event );
}
loggingBuffer.removeBufferLinesBefore( 20 );
Assert.assertEquals( 20, loggingBuffer.size() );
} |
List<String> decorateTextWithHtml(String text, DecorationDataHolder decorationDataHolder) {
return decorateTextWithHtml(text, decorationDataHolder, null, null);
} | @Test
public void returned_code_end_to_given_param() {
String javadocWithHtml =
"/**\n" +
" * Provides a basic framework to sequentially read any kind of character stream in order to feed a generic OUTPUT.\n" +
" * \n" +
" * This framework can used for instance in order to :\n" +
" * <ul>\n" +
" * <li>Create a lexer in charge to generate a list of tokens from a character stream</li>\n" +
" * <li>Create a source code syntax highligther in charge to decorate a source code with HTML tags</li>\n" +
" * <li>Create a javadoc generator</li>\n" +
" * <li>...</li>\n" +
" * </ul>\n" +
" */\n";
DecorationDataHolder decorationData = new DecorationDataHolder();
decorationData.loadSyntaxHighlightingData("0,453,cppd;");
HtmlTextDecorator htmlTextDecorator = new HtmlTextDecorator();
List<String> htmlOutput = htmlTextDecorator.decorateTextWithHtml(javadocWithHtml, decorationData, null, 4);
assertThat(htmlOutput)
.hasSize(4)
// End at line 4
.containsExactly(
"<span class=\"cppd\">/**</span>",
"<span class=\"cppd\"> * Provides a basic framework to sequentially read any kind of character stream in order to feed a generic OUTPUT.</span>",
"<span class=\"cppd\"> * </span>",
"<span class=\"cppd\"> * This framework can used for instance in order to :</span>");
} |
@PublicAPI(usage = ACCESS)
public JavaClasses importUrl(URL url) {
return importUrls(singletonList(url));
} | @Test
public void imports_simple_local_class() throws Exception {
JavaClasses classes = new ClassFileImporter().importUrl(getClass().getResource("testexamples/innerclassimport"));
JavaClass localClass = classes.get(ClassWithInnerClass.class.getName() + "$1LocalCaller");
assertThat(localClass)
.matches(Class.forName(localClass.getName()))
.isTopLevelClass(false)
.isNestedClass(true)
.isMemberClass(false)
.isInnerClass(true)
.isLocalClass(true)
.isAnonymousClass(false)
.isRecord(false);
} |
public ByteBuffer[] getPages() {
int numPages = _pages.size();
boolean lastPageIsEmpty = _written == (numPages - 1) * (long) _pageSize;
if (lastPageIsEmpty) {
numPages--;
}
if (numPages == 0) {
return new ByteBuffer[0];
}
ByteBuffer[] result = new ByteBuffer[numPages];
for (int i = 0; i < numPages; i++) {
ByteBuffer byteBuffer = _pages.get(i);
ByteBuffer page = byteBuffer.asReadOnlyBuffer();
page.clear();
result[i] = page;
}
if (!lastPageIsEmpty) {
long startOffset = getCurrentOffset();
seek(_written);
result[numPages - 1].limit(_offsetInPage);
seek(startOffset);
}
return result;
} | @Test
void testGetPagesEmpty() {
ByteBuffer[] pages = _pagedPinotOutputStream.getPages();
assertEquals(pages.length, 0);
} |
@Override
public void setConfig(RedisClusterNode node, String param, String value) {
RedisClient entry = getEntry(node);
RFuture<Void> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CONFIG_SET, param, value);
syncFuture(f);
} | @Test
public void testSetConfig() {
testInCluster(connection -> {
RedisClusterNode master = getFirstMaster(connection);
connection.setConfig(master, "timeout", "10");
});
} |
public String toString() {
StringBuilder sb = new StringBuilder();
Type prevType = null;
for (InterpreterResultMessage m : msg) {
if (prevType != null) {
sb.append("\n");
if (prevType == Type.TABLE) {
sb.append("\n");
}
}
sb.append(m.toString());
prevType = m.getType();
}
return sb.toString();
} | @Test
void testToString() {
assertEquals("%html hello", new InterpreterResult(InterpreterResult.Code.SUCCESS,
"%html hello").toString());
} |
@Override
public void open(Configuration parameters) throws Exception {
this.rateLimiterTriggeredCounter =
getRuntimeContext()
.getMetricGroup()
.addGroup(
TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT)
.counter(TableMaintenanceMetrics.RATE_LIMITER_TRIGGERED);
this.concurrentRunThrottledCounter =
getRuntimeContext()
.getMetricGroup()
.addGroup(
TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT)
.counter(TableMaintenanceMetrics.CONCURRENT_RUN_THROTTLED);
this.nothingToTriggerCounter =
getRuntimeContext()
.getMetricGroup()
.addGroup(
TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT)
.counter(TableMaintenanceMetrics.NOTHING_TO_TRIGGER);
this.triggerCounters =
taskNames.stream()
.map(
name ->
getRuntimeContext()
.getMetricGroup()
.addGroup(TableMaintenanceMetrics.GROUP_KEY, name)
.counter(TableMaintenanceMetrics.TRIGGERED))
.collect(Collectors.toList());
this.nextEvaluationTimeState =
getRuntimeContext()
.getState(new ValueStateDescriptor<>("triggerManagerNextTriggerTime", Types.LONG));
this.accumulatedChangesState =
getRuntimeContext()
.getListState(
new ListStateDescriptor<>(
"triggerManagerAccumulatedChange", TypeInformation.of(TableChange.class)));
this.lastTriggerTimesState =
getRuntimeContext()
.getListState(new ListStateDescriptor<>("triggerManagerLastTriggerTime", Types.LONG));
tableLoader.open();
} | @Test
void testDataFileSizeInBytes() throws Exception {
TriggerManager manager =
manager(
sql.tableLoader(TABLE_NAME),
new TriggerEvaluator.Builder().dataFileSizeInBytes(3).build());
try (KeyedOneInputStreamOperatorTestHarness<Boolean, TableChange, Trigger> testHarness =
harness(manager)) {
testHarness.open();
addEventAndCheckResult(testHarness, TableChange.builder().dataFileSizeInBytes(1L).build(), 0);
addEventAndCheckResult(testHarness, TableChange.builder().dataFileSizeInBytes(2L).build(), 1);
addEventAndCheckResult(testHarness, TableChange.builder().dataFileSizeInBytes(5L).build(), 2);
// No trigger in this case
addEventAndCheckResult(testHarness, TableChange.builder().dataFileSizeInBytes(1L).build(), 2);
addEventAndCheckResult(testHarness, TableChange.builder().dataFileSizeInBytes(2L).build(), 3);
}
} |
public static byte[] encode(byte[] arr, boolean lineSep) {
if (arr == null) {
return null;
}
return lineSep ?
java.util.Base64.getMimeEncoder().encode(arr) :
java.util.Base64.getEncoder().encode(arr);
} | @Test
public void issuesI5QR4WTest(){
String a = java.util.Base64.getEncoder().encodeToString("111".getBytes()); //java.util.Base64
String b = Base64.encode("111"); //cn.hutool.core.codec.Base64
assertEquals(a, b);
} |
@Override
public double calcEdgeWeight(EdgeIteratorState edgeState, boolean reverse) {
double priority = edgeToPriorityMapping.get(edgeState, reverse);
if (priority == 0) return Double.POSITIVE_INFINITY;
final double distance = edgeState.getDistance();
double seconds = calcSeconds(distance, edgeState, reverse);
if (Double.isInfinite(seconds)) return Double.POSITIVE_INFINITY;
// add penalty at start/stop/via points
if (edgeState.get(EdgeIteratorState.UNFAVORED_EDGE)) seconds += headingPenaltySeconds;
double distanceCosts = distance * distanceInfluence;
if (Double.isInfinite(distanceCosts)) return Double.POSITIVE_INFINITY;
return seconds / priority + distanceCosts;
} | @Test
public void testRoadClass() {
EdgeIteratorState primary = graph.edge(0, 1).setDistance(10).
set(roadClassEnc, PRIMARY).set(avSpeedEnc, 80);
EdgeIteratorState secondary = graph.edge(1, 2).setDistance(10).
set(roadClassEnc, SECONDARY).set(avSpeedEnc, 80);
CustomModel customModel = createSpeedCustomModel(avSpeedEnc).setDistanceInfluence(70d).
addToPriority(If("road_class == PRIMARY", MULTIPLY, "0.5"));
Weighting weighting = createWeighting(customModel);
assertEquals(1.6, weighting.calcEdgeWeight(primary, false), 0.01);
assertEquals(1.15, weighting.calcEdgeWeight(secondary, false), 0.01);
} |
@Override
public byte[] fromConnectData(String topic, Schema schema, Object value) {
if (schema == null && value == null) {
return null;
}
JsonNode jsonValue = config.schemasEnabled() ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value);
try {
return serializer.serialize(topic, jsonValue);
} catch (SerializationException e) {
throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e);
}
} | @Test
public void decimalToJsonWithoutSchema() {
assertThrows(
DataException.class,
() -> converter.fromConnectData(TOPIC, null, new BigDecimal(new BigInteger("156"), 2)),
"expected data exception when serializing BigDecimal without schema");
} |
public Optional<String> identityFromSignature(final String password) {
// for some generators, identity in the clear is just not a part of the password
if (!prependUsername || shouldDeriveUsername() || StringUtils.isBlank(password)) {
return Optional.empty();
}
// checking for the case of unexpected format
if (StringUtils.countMatches(password, DELIMITER) == 2) {
if (usernameIsTimestamp()) {
final int indexOfSecondDelimiter = password.indexOf(DELIMITER, password.indexOf(DELIMITER) + 1);
return Optional.of(password.substring(0, indexOfSecondDelimiter));
} else {
return Optional.of(password.substring(0, password.indexOf(DELIMITER)));
}
}
return Optional.empty();
} | @Test
public void testGetIdentityFromSignatureIsTimestamp() {
final String identity = usernameIsTimestampGenerator.identityFromSignature(usernameIsTimestampCredentials.password()).orElseThrow();
assertEquals(USERNAME_TIMESTAMP, identity);
} |
@Override
public void onBufferFinished() {
Optional<Decision> decision =
spillStrategy.onBufferFinished(numUnSpillBuffers.incrementAndGet(), getPoolSize());
handleDecision(decision);
} | @Test
void testHandleEmptyDecision() throws Exception {
CompletableFuture<Void> globalDecisionFuture = new CompletableFuture<>();
HsSpillingStrategy spillingStrategy =
TestingSpillingStrategy.builder()
.setOnBufferFinishedFunction(
(finishedBuffer, poolSize) -> {
// return empty optional to trigger global decision.
return Optional.empty();
})
.setDecideActionWithGlobalInfoFunction(
(provider) -> {
globalDecisionFuture.complete(null);
return Decision.NO_ACTION;
})
.build();
HsMemoryDataManager memoryDataManager = createMemoryDataManager(spillingStrategy);
// trigger an empty decision.
memoryDataManager.onBufferFinished();
assertThat(globalDecisionFuture).isCompleted();
} |
public static Fiat parseFiat(final String currencyCode, final String str) {
try {
long val = new BigDecimal(str).movePointRight(SMALLEST_UNIT_EXPONENT).longValueExact();
return Fiat.valueOf(currencyCode, val);
} catch (ArithmeticException e) {
throw new IllegalArgumentException(e);
}
} | @Test(expected = IllegalArgumentException.class)
public void testParseFiatOverprecise() {
Fiat.parseFiat("EUR", "0.00011");
} |
public LogicalSlot allocateLogicalSlot() {
LOG.debug("Allocating logical slot from shared slot ({})", physicalSlotRequestId);
Preconditions.checkState(
state == State.ALLOCATED, "The shared slot has already been released.");
final LogicalSlot slot =
new SingleLogicalSlot(
new SlotRequestId(),
physicalSlot,
Locality.UNKNOWN,
this,
slotWillBeOccupiedIndefinitely);
allocatedLogicalSlots.put(slot.getSlotRequestId(), slot);
return slot;
} | @Test
void testAllocateLogicalSlot() {
final TestingPhysicalSlot physicalSlot = TestingPhysicalSlot.builder().build();
final SharedSlot sharedSlot =
new SharedSlot(new SlotRequestId(), physicalSlot, false, () -> {});
final LogicalSlot logicalSlot = sharedSlot.allocateLogicalSlot();
assertThat(logicalSlot.getAllocationId()).isEqualTo(physicalSlot.getAllocationId());
assertThat(logicalSlot.getLocality()).isEqualTo(Locality.UNKNOWN);
assertThat(logicalSlot.getPayload()).isNull();
assertThat(logicalSlot.getTaskManagerLocation())
.isEqualTo(physicalSlot.getTaskManagerLocation());
assertThat(logicalSlot.getTaskManagerGateway())
.isEqualTo(physicalSlot.getTaskManagerGateway());
} |
public static Method getMostSpecificMethod(Method method, Class<?> targetClass) {
if (targetClass != null && targetClass != method.getDeclaringClass() && isOverridable(method, targetClass)) {
try {
if (Modifier.isPublic(method.getModifiers())) {
try {
return targetClass.getMethod(method.getName(), method.getParameterTypes());
} catch (NoSuchMethodException ex) {
return method;
}
} else {
return method;
}
} catch (SecurityException ex) {
// Security settings are disallowing reflective access; fall back to 'method' below.
}
}
return method;
} | @Test
public void testGetMostSpecificPrivateMethod() throws NoSuchMethodException {
Method method = AbstractList.class.getDeclaredMethod("rangeCheckForAdd", int.class);
Method specificMethod = ClassUtils.getMostSpecificMethod(method, ArrayList.class);
assertNotEquals(ArrayList.class.getDeclaredMethod("rangeCheckForAdd", int.class), specificMethod);
} |
public Set<String> validate(String expr, Set<String> whitelistVars) throws Exception {
checkExprLength(expr);
selParser.ReInit(new ByteArrayInputStream(expr.getBytes()));
ASTExecute n = selParser.Execute();
Map<String, Boolean> vars = new HashMap<>();
n.jjtAccept(validator, vars);
Set<String> res = new HashSet<>();
for (Map.Entry<String, Boolean> entry : vars.entrySet()) {
if (entry.getValue() && !whitelistVars.contains(entry.getKey())) {
res.add(entry.getKey());
}
}
return res;
} | @Test
public void testValidate() throws Exception {
Set<String> res = t1.validate("x.length;", new HashSet<>());
assertEquals("[x]", res.toString());
} |
public ListenableFuture<ListPluginsResponse> listPluginsWithDeadline(
ListPluginsRequest request, Deadline deadline) {
return pluginService.withDeadline(deadline).listPlugins(request);
} | @Test
public void listPlugins_returnsMultiplePlugins() throws Exception {
ListPluginsRequest request = ListPluginsRequest.getDefaultInstance();
List<PluginDefinition> plugins = Lists.newArrayList();
for (int i = 0; i < 5; i++) {
plugins.add(createSinglePluginDefinitionWithName(String.format(PLUGIN_NAME + "%d", i)));
}
PluginServiceImplBase listPluginsImpl =
new PluginServiceImplBase() {
@Override
public void listPlugins(
ListPluginsRequest request, StreamObserver<ListPluginsResponse> responseObserver) {
responseObserver.onNext(
ListPluginsResponse.newBuilder().addAllPlugins(plugins).build());
responseObserver.onCompleted();
}
};
serviceRegistry.addService(listPluginsImpl);
ListenableFuture<ListPluginsResponse> listPlugins =
pluginService.listPluginsWithDeadline(request, DEADLINE_DEFAULT);
assertThat(listPlugins.isDone()).isTrue();
assertThat(listPlugins.get().getPluginsList()).containsExactlyElementsIn(plugins);
} |
public Map<String, Map<InetSocketAddress, ChannelInitializer<SocketChannel>>> newChannelInitializers() {
Map<String, Map<InetSocketAddress, ChannelInitializer<SocketChannel>>> channelInitializers = new HashMap<>();
Set<InetSocketAddress> addresses = new HashSet<>();
for (Map.Entry<String, ProtocolHandlerWithClassLoader> handler : handlers.entrySet()) {
Map<InetSocketAddress, ChannelInitializer<SocketChannel>> initializers =
handler.getValue().newChannelInitializers();
initializers.forEach((address, initializer) -> {
if (!addresses.add(address)) {
log.error("Protocol handler for `{}` attempts to use {} for its listening port."
+ " But it is already occupied by other message protocols.",
handler.getKey(), address);
throw new RuntimeException("Protocol handler for `" + handler.getKey()
+ "` attempts to use " + address + " for its listening port. But it is"
+ " already occupied by other messaging protocols");
}
channelInitializers.put(handler.getKey(), initializers);
endpoints.put(address, handler.getKey());
});
}
return channelInitializers;
} | @Test
public void testNewChannelInitializersSuccess() {
ChannelInitializer<SocketChannel> i1 = mock(ChannelInitializer.class);
ChannelInitializer<SocketChannel> i2 = mock(ChannelInitializer.class);
Map<InetSocketAddress, ChannelInitializer<SocketChannel>> p1Initializers = new HashMap<>();
p1Initializers.put(new InetSocketAddress("127.0.0.1", 6650), i1);
p1Initializers.put(new InetSocketAddress("127.0.0.2", 6651), i2);
ChannelInitializer<SocketChannel> i3 = mock(ChannelInitializer.class);
ChannelInitializer<SocketChannel> i4 = mock(ChannelInitializer.class);
Map<InetSocketAddress, ChannelInitializer<SocketChannel>> p2Initializers = new HashMap<>();
p2Initializers.put(new InetSocketAddress("127.0.0.3", 6650), i3);
p2Initializers.put(new InetSocketAddress("127.0.0.4", 6651), i4);
when(handler1.newChannelInitializers()).thenReturn(p1Initializers);
when(handler2.newChannelInitializers()).thenReturn(p2Initializers);
Map<String, Map<InetSocketAddress, ChannelInitializer<SocketChannel>>> initializers =
handlers.newChannelInitializers();
assertEquals(2, initializers.size());
assertSame(p1Initializers, initializers.get(protocol1));
assertSame(p2Initializers, initializers.get(protocol2));
} |
public static TypeBuilder<Schema> builder() {
return new TypeBuilder<>(new SchemaCompletion(), new NameContext());
} | @Test
void bytes() {
Schema.Type type = Schema.Type.BYTES;
Schema simple = SchemaBuilder.builder().bytesType();
Schema expected = primitive(type, simple);
Schema built1 = SchemaBuilder.builder().bytesBuilder().prop("p", "v").endBytes();
assertEquals(expected, built1);
} |
public T divide(BigDecimal by) {
return create(value.divide(by, MAX_VALUE_SCALE, RoundingMode.DOWN));
} | @Test
void testValueScaleLimited() {
final Resource v1 = new TestResource(0.100000001);
assertTestResourceValueEquals(0.1, v1);
final Resource v2 = new TestResource(1.0).divide(3);
assertTestResourceValueEquals(0.33333333, v2);
} |
public static int jsonEscapedSizeInBytes(CharSequence v) {
boolean ascii = true;
int escapingOverhead = 0;
for (int i = 0, length = v.length(); i < length; i++) {
char c = v.charAt(i);
if (c == '\u2028' || c == '\u2029') {
escapingOverhead += 5;
} else if (c >= 0x80) {
ascii = false;
} else {
String maybeReplacement = REPLACEMENT_CHARS[c];
if (maybeReplacement != null) escapingOverhead += maybeReplacement.length() - 1;
}
}
if (ascii) return v.length() + escapingOverhead;
return WriteBuffer.utf8SizeInBytes(v) + escapingOverhead;
} | @Test void testJsonEscapedSizeInBytes() {
assertThat(jsonEscapedSizeInBytes(new String(new char[] {0, 'a', 1})))
.isEqualTo(13);
assertThat(jsonEscapedSizeInBytes(new String(new char[] {'"', '\\', '\t', '\b'})))
.isEqualTo(8);
assertThat(jsonEscapedSizeInBytes(new String(new char[] {'\n', '\r', '\f'})))
.isEqualTo(6);
assertThat(jsonEscapedSizeInBytes("\u2028 and \u2029"))
.isEqualTo(17);
assertThat(jsonEscapedSizeInBytes("\"foo"))
.isEqualTo(5);
} |
public static double[] toDoubleArray(DoubleArrayList doubleArrayList) {
double[] doubleArrayListElements = doubleArrayList.elements();
return doubleArrayListElements.length == doubleArrayList.size() ? doubleArrayListElements
: doubleArrayList.toDoubleArray();
} | @Test
public void testToDoubleArray() {
// Test empty list
DoubleArrayList doubleArrayList = new DoubleArrayList();
double[] doubleArray = ArrayListUtils.toDoubleArray(doubleArrayList);
assertEquals(doubleArray.length, 0);
// Test list with one element
doubleArrayList.add(1.0);
doubleArray = ArrayListUtils.toDoubleArray(doubleArrayList);
assertEquals(doubleArray.length, 1);
assertEquals(doubleArray[0], 1.0);
// Test list with multiple elements
doubleArrayList.add(2.0);
doubleArrayList.add(3.0);
doubleArray = ArrayListUtils.toDoubleArray(doubleArrayList);
assertEquals(doubleArray.length, 3);
assertEquals(doubleArray[0], 1.0);
assertEquals(doubleArray[1], 2.0);
assertEquals(doubleArray[2], 3.0);
} |
public final boolean checkIfExecuted(String input) {
return this.validator.isExecuted(Optional.of(ByteString.copyFromUtf8(input)));
} | @Test
public void checkIfExecuted_withByteString_executesValidator() {
TestValidatorIsCalledValidator testValidator = new TestValidatorIsCalledValidator();
Payload payload = new Payload("my-payload", testValidator, PAYLOAD_ATTRIBUTES, CONFIG);
payload.checkIfExecuted(ByteString.copyFromUtf8("my-input"));
assertTrue(testValidator.wasCalled);
} |
@VisibleForTesting
static List<String> collectColNamesFromSchema(Schema schema) {
List<String> result = new ArrayList<>();
Deque<String> visited = new LinkedList<>();
collectColNamesFromAvroSchema(schema, visited, result);
return result;
} | @Test
public void testCollectColumnNames() {
Schema simpleSchema = getSimpleSchema();
List<String> fieldNames = AvroInternalSchemaConverter.collectColNamesFromSchema(simpleSchema);
List<String> expectedOutput = getSimpleSchemaExpectedColumnNames();
assertEquals(expectedOutput.size(), fieldNames.size());
assertTrue(fieldNames.containsAll(expectedOutput));
Schema simpleSchemaWithNullable = getSimpleSchemaWithNullable();
fieldNames = AvroInternalSchemaConverter.collectColNamesFromSchema(simpleSchemaWithNullable);
expectedOutput = getSimpleSchemaExpectedColumnNames();
assertEquals(expectedOutput.size(), fieldNames.size());
assertTrue(fieldNames.containsAll(expectedOutput));
Schema complexSchemaSingleLevel = getComplexSchemaSingleLevel();
fieldNames = AvroInternalSchemaConverter.collectColNamesFromSchema(complexSchemaSingleLevel);
expectedOutput = getComplexSchemaSingleLevelExpectedColumnNames();
assertEquals(expectedOutput.size(), fieldNames.size());
assertTrue(fieldNames.containsAll(expectedOutput));
Schema deeplyNestedFieldSchema = getDeeplyNestedFieldSchema();
fieldNames = AvroInternalSchemaConverter.collectColNamesFromSchema(deeplyNestedFieldSchema);
expectedOutput = getDeeplyNestedFieldSchemaExpectedColumnNames();
assertEquals(expectedOutput.size(), fieldNames.size());
assertTrue(fieldNames.containsAll(expectedOutput));
} |
@Override
public Graph<Entity> resolveForInstallation(Entity entity, Map<String, ValueReference> parameters, Map<EntityDescriptor, Entity> entities) {
if (entity instanceof EntityV1) {
return resolveForInstallationV1((EntityV1) entity, parameters, entities);
} else {
throw new IllegalArgumentException("Unsupported entity version: " + entity.getClass());
}
} | @Test
@MongoDBFixtures("EventDefinitionFacadeTest.json")
public void resolveForInstallation() {
EntityV1 eventEntityV1 = createTestEntity();
final NotificationEntity notificationEntity = NotificationEntity.builder()
.title(ValueReference.of("title"))
.description(ValueReference.of("description"))
.config(HttpEventNotificationConfigEntity.builder()
.url(ValueReference.of("http://url")).build())
.build();
final JsonNode data = objectMapper.convertValue(notificationEntity, JsonNode.class);
final EntityV1 notificationV1 = EntityV1.builder()
.data(data)
.id(ModelId.of("123123"))
.type(ModelTypes.EVENT_DEFINITION_V1)
.build();
final EntityDescriptor entityDescriptor = EntityDescriptor.create("123123", ModelTypes.NOTIFICATION_V1);
Map<String, ValueReference> parameters = ImmutableMap.of();
Map<EntityDescriptor, Entity> entities = ImmutableMap.of(entityDescriptor, notificationV1);
Graph<Entity> graph = facade.resolveForInstallation(eventEntityV1, parameters, entities);
assertThat(graph).isNotNull();
Set<Entity> expectedNodes = ImmutableSet.of(eventEntityV1, notificationV1);
assertThat(graph.nodes()).isEqualTo(expectedNodes);
} |
@Override
protected synchronized void modifyConnectorOffsets(String connName, Map<Map<String, ?>, Map<String, ?>> offsets, Callback<Message> cb) {
if (!modifyConnectorOffsetsChecks(connName, cb)) {
return;
}
worker.modifyConnectorOffsets(connName, configState.connectorConfig(connName), offsets, cb);
} | @Test
public void testAlterConnectorOffsets() throws Exception {
initialize(false);
ArgumentCaptor<Callback<Message>> workerCallbackCapture = ArgumentCaptor.forClass(Callback.class);
Message msg = new Message("The offsets for this connector have been altered successfully");
doAnswer(invocation -> {
workerCallbackCapture.getValue().onCompletion(null, msg);
return null;
}).when(worker).modifyConnectorOffsets(eq(CONNECTOR_NAME), eq(connectorConfig(SourceSink.SOURCE)), any(Map.class), workerCallbackCapture.capture());
Map<String, String> connectorConfig = connectorConfig(SourceSink.SOURCE);
herder.configState = new ClusterConfigState(
10,
null,
Collections.singletonMap(CONNECTOR_NAME, 0),
Collections.singletonMap(CONNECTOR_NAME, connectorConfig(SourceSink.SOURCE)),
Collections.singletonMap(CONNECTOR_NAME, TargetState.STOPPED),
Collections.emptyMap(),
Collections.emptyMap(),
Collections.emptyMap(),
Collections.singletonMap(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)),
Collections.emptySet(),
Collections.emptySet()
);
FutureCallback<Message> alterOffsetsCallback = new FutureCallback<>();
herder.alterConnectorOffsets(CONNECTOR_NAME,
Collections.singletonMap(Collections.singletonMap("partitionKey", "partitionValue"), Collections.singletonMap("offsetKey", "offsetValue")),
alterOffsetsCallback);
assertEquals(msg, alterOffsetsCallback.get(1000, TimeUnit.MILLISECONDS));
} |
public long getBacklogBytes(String streamName, Instant countSince)
throws TransientKinesisException {
return getBacklogBytes(streamName, countSince, new Instant());
} | @Test
public void shouldNotCallCloudWatchWhenSpecifiedPeriodTooShort() throws Exception {
Instant countSince = new Instant("2017-04-06T10:00:00.000Z");
Instant countTo = new Instant("2017-04-06T10:00:02.000Z");
long backlogBytes = underTest.getBacklogBytes(STREAM, countSince, countTo);
assertThat(backlogBytes).isEqualTo(0L);
verifyZeroInteractions(cloudWatch);
} |
@Override
public synchronized void write(int b) throws IOException {
mUfsOutStream.write(b);
mBytesWritten++;
} | @Test
public void writeOffset() throws IOException, AlluxioException {
int bytesToWrite = CHUNK_SIZE * 5 + CHUNK_SIZE / 2;
int offset = CHUNK_SIZE / 3;
AlluxioURI ufsPath = getUfsPath();
try (FileOutStream outStream = mFileSystem.createFile(ufsPath)) {
byte[] array = BufferUtils.getIncreasingByteArray(bytesToWrite + offset);
outStream.write(array, offset, bytesToWrite);
}
verifyIncreasingBytesWritten(ufsPath, offset, bytesToWrite);
} |
public String getName() {
return name;
} | @Test
public void testGetName() throws Exception {
assertNull( info.getName() );
info.setName( "name" );
assertEquals( "name", info.getName() );
} |
public static void boundsCheck(int capacity, int index, int length) {
if (capacity < 0 || index < 0 || length < 0 || (index > (capacity - length))) {
throw new IndexOutOfBoundsException(String.format("index=%d, length=%d, capacity=%d", index, length, capacity));
}
} | @Test(expected = IndexOutOfBoundsException.class)
public void boundsCheck_whenCapacitySmallerThanZero() {
ArrayUtils.boundsCheck(-1, 0, 0);
} |
@SuppressWarnings("unchecked")
public V replace(final int key, final V value)
{
final V val = (V)mapNullValue(value);
requireNonNull(val, "value cannot be null");
final int[] keys = this.keys;
final Object[] values = this.values;
@DoNotSub final int mask = values.length - 1;
@DoNotSub int index = Hashing.hash(key, mask);
Object oldValue;
while (null != (oldValue = values[index]))
{
if (key == keys[index])
{
values[index] = val;
break;
}
index = ++index & mask;
}
return unmapNullValue(oldValue);
} | @Test
void replaceThrowsNullPointerExceptionIfNewValueIsNull()
{
final NullPointerException exception =
assertThrowsExactly(NullPointerException.class, () -> intToObjectMap.replace(42, "abc", null));
assertEquals("value cannot be null", exception.getMessage());
} |
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if(!(other instanceof HollowMapSchema))
return false;
HollowMapSchema otherSchema = (HollowMapSchema)other;
if(!getName().equals(otherSchema.getName()))
return false;
if(!getKeyType().equals(otherSchema.getKeyType()))
return false;
if(!getValueType().equals(otherSchema.getValueType()))
return false;
return isNullableObjectEquals(hashKey, otherSchema.getHashKey());
} | @Test
public void testEquals() {
{
HollowMapSchema s1 = new HollowMapSchema("Test", "TypeA", "TypeB");
HollowMapSchema s2 = new HollowMapSchema("Test", "TypeA", "TypeB");
Assert.assertEquals(s1, s2);
}
{
HollowMapSchema s1 = new HollowMapSchema("Test", "TypeA", "TypeB");
HollowMapSchema s2 = new HollowMapSchema("Test2", "TypeA", "TypeB");
Assert.assertNotEquals(s1, s2);
}
{
HollowMapSchema s1 = new HollowMapSchema("Test", "TypeA", "TypeB");
HollowMapSchema s2 = new HollowMapSchema("Test", "TypeB", "TypeB");
Assert.assertNotEquals(s1, s2);
}
{
HollowMapSchema s1 = new HollowMapSchema("Test", "TypeA", "TypeB");
HollowMapSchema s2 = new HollowMapSchema("Test", "TypeA", "TypeC");
Assert.assertNotEquals(s1, s2);
}
} |
public static StringBuilder appendWithBlankCheck(String str, String defaultValue, StringBuilder appender) {
if (isNotBlank(str)) {
appender.append(str);
} else {
appender.append(defaultValue);
}
return appender;
} | @Test
public void testAppendWithBlankCheck() {
Assert.assertEquals("bar", EagleEyeCoreUtils.appendWithBlankCheck(
null, "bar", new StringBuilder()).toString());
Assert.assertEquals("foo", EagleEyeCoreUtils.appendWithBlankCheck(
"foo", "bar", new StringBuilder()).toString());
} |
public static void validate(final String cronEntry) throws MessageFormatException {
List<String> list = tokenize(cronEntry);
List<CronEntry> entries = buildCronEntries(list);
for (CronEntry e : entries) {
validate(e);
}
} | @Test
public void testValidate() {
try {
CronParser.validate("30 08 10 06 ? ");
CronParser.validate("30 08 ? 06 5 ");
CronParser.validate("30 08 ? 06 * ");
CronParser.validate("* * * * * ");
CronParser.validate("* * * * 1-6 ");
CronParser.validate("* * * * 1,2,5 ");
CronParser.validate("*/10 0-4,8-12 * * 1-2,3-6/2 ");
} catch (Exception e) {
fail("Should be valid ");
}
try {
CronParser.validate("61 08 10 06 * ");
fail("Should not be valid ");
} catch (Exception e) {
}
try {
CronParser.validate("61 08 06 * ");
fail("Should not be valid ");
} catch (Exception e) {
}
} |
public static <
K extends @Nullable Object,
InputT extends @Nullable Object,
AccumT extends @Nullable Object,
OutputT extends @Nullable Object>
MultiStepCombine<K, InputT, AccumT, OutputT> of(
CombineFn<InputT, AccumT, OutputT> combineFn, Coder<KV<K, OutputT>> outputCoder) {
return new MultiStepCombine<>(combineFn, outputCoder);
} | @Test
public void testMultiStepCombineTimestampCombiner() {
TimestampCombiner combiner = TimestampCombiner.LATEST;
PCollection<KV<String, Long>> combined =
pipeline
.apply(
Create.timestamped(
TimestampedValue.of(KV.of("foo", 4L), new Instant(1L)),
TimestampedValue.of(KV.of("foo", 1L), new Instant(4L)),
TimestampedValue.of(KV.of("bazzle", 4L), new Instant(4L)),
TimestampedValue.of(KV.of("foo", 12L), new Instant(12L))))
.apply(
Window.<KV<String, Long>>into(FixedWindows.of(Duration.millis(5L)))
.withTimestampCombiner(combiner))
.apply(Combine.perKey(new MultiStepCombineFn()));
PCollection<KV<String, TimestampedValue<Long>>> reified =
combined.apply(
ParDo.of(
new DoFn<KV<String, Long>, KV<String, TimestampedValue<Long>>>() {
@ProcessElement
public void reifyTimestamp(ProcessContext context) {
context.output(
KV.of(
context.element().getKey(),
TimestampedValue.of(
context.element().getValue(), context.timestamp())));
}
}));
PAssert.that(reified)
.containsInAnyOrder(
KV.of("foo", TimestampedValue.of(5L, new Instant(4L))),
KV.of("bazzle", TimestampedValue.of(4L, new Instant(4L))),
KV.of("foo", TimestampedValue.of(12L, new Instant(12L))));
pipeline.run();
} |
public PointBuilder<T> latLong(LatLong latitudeAndLongitude) {
this.latitude = latitudeAndLongitude.latitude();
this.longitude = latitudeAndLongitude.longitude();
return this;
} | @Test
public void testLatLong_Double_Double_nullLong() {
assertThrows(
NullPointerException.class,
() -> Point.builder().latLong(1.23, null)
);
} |
public static <T extends PipelineOptions> T as(Class<T> klass) {
return new Builder().as(klass);
} | @Test
public void testAppNameIsSetWhenUsingAs() {
TestPipelineOptions options = PipelineOptionsFactory.as(TestPipelineOptions.class);
assertEquals(
PipelineOptionsFactoryTest.class.getSimpleName(),
options.as(ApplicationNameOptions.class).getAppName());
} |
@Override
public Mono<UserDetails> updatePassword(UserDetails user, String newPassword) {
return userService.updatePassword(user.getUsername(), newPassword)
.map(u -> withNewPassword(user, newPassword));
} | @Test
void shouldReturnErrorWhenFailedToUpdatePassword() {
var fakeUser = createFakeUserDetails();
var exception = new RuntimeException("failed to update password");
when(userService.updatePassword("faker", "new-fake-password")).thenReturn(
Mono.error(exception)
);
var userDetailsMono = userDetailService.updatePassword(fakeUser, "new-fake-password");
StepVerifier.create(userDetailsMono)
.expectSubscription()
.expectErrorMatches(throwable -> throwable == exception)
.verify();
verify(userService, times(1)).updatePassword(eq("faker"), eq("new-fake-password"));
} |
@Override
public boolean isInfinite() {
return this.equals(INFINITY)
|| (this.cpu == Double.POSITIVE_INFINITY)
|| (this.cpuRate == Double.POSITIVE_INFINITY);
} | @Test
public void testDefaultConstructorIsInfinite() {
BeamCostModel cost = BeamCostModel.FACTORY.makeCost(1, 1, 1);
Assert.assertTrue(cost.isInfinite());
} |
public void print(PrintStream out, String prefix)
{
print(out, prefix, data, data.length);
} | @Test
public void testPrint()
{
byte[] buf = new byte[10];
Arrays.fill(buf, (byte) 0xAA);
ZData data = new ZData(buf);
data.print(System.out, "ZData: ");
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.