focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public Object proceed() throws Throwable {
try {
return method.invoke(delegate, args);
} catch (Throwable t) {
if (t instanceof InvocationTargetException) {
t = t.getCause();
}
throw t;
}
} | @Test
void proceed() throws Throwable {
//given
MyMockMethodInvocation myMockMethodInvocation = new MyMockMethodInvocation();
Method method = MyMockMethodInvocation.class.getDeclaredMethod("proceed", int.class);
//when
DefaultInvocationWrapper invocationWrapper = new DefaultInvocationWrapper(myMockMethodInvocation, myMockMethodInvocation, method, new Object[]{1});
//then
Assertions.assertEquals(1, invocationWrapper.proceed());
//when
DefaultInvocationWrapper invocationWrapperThrowException = new DefaultInvocationWrapper(myMockMethodInvocation, myMockMethodInvocation, method, new Object[]{0});
//then should throw raw exception
Assertions.assertThrows(ArithmeticException.class, () -> invocationWrapperThrowException.proceed());
} |
static int crc(byte[] buf, int offset, int len)
{
return ~updateCrc(buf, offset, len);
} | @Test
void testCRCImpl()
{
byte[] b1 = "Hello World!".getBytes();
assertEquals(472456355, PNGConverter.crc(b1, 0, b1.length));
assertEquals(-632335482, PNGConverter.crc(b1, 2, b1.length - 4));
} |
public void setAcceptHeaders(List<String> headers) {
kp.put("acceptHeaders",headers);
} | @Test
public void testAcceptHeaders() throws Exception {
List<String> headers = Arrays.asList("header1: value1", "header2: value2");
fetcher().setAcceptHeaders(headers);
CrawlURI curi = makeCrawlURI("http://localhost:7777/");
fetcher().process(curi);
runDefaultChecks(curi, "acceptHeaders");
// special checks for this test
String requestString = httpRequestString(curi);
assertFalse(requestString.contains("Accept:"));
for (String h: headers) {
assertTrue(requestString.contains(h));
}
} |
public abstract OmemoKeyUtil<T_IdKeyPair, T_IdKey, T_PreKey, T_SigPreKey, T_Sess, T_ECPub, T_Bundle> keyUtil(); | @Test
public void keyUtilNotNull() {
assertNotNull(store.keyUtil());
} |
@VisibleForTesting
void saveApprove(Long userId, Integer userType, String clientId,
String scope, Boolean approved, LocalDateTime expireTime) {
// 先更新
OAuth2ApproveDO approveDO = new OAuth2ApproveDO().setUserId(userId).setUserType(userType)
.setClientId(clientId).setScope(scope).setApproved(approved).setExpiresTime(expireTime);
if (oauth2ApproveMapper.update(approveDO) == 1) {
return;
}
// 失败,则说明不存在,进行更新
oauth2ApproveMapper.insert(approveDO);
} | @Test
public void testSaveApprove_update() {
// mock 数据
OAuth2ApproveDO approve = randomPojo(OAuth2ApproveDO.class);
oauth2ApproveMapper.insert(approve);
// 准备参数
Long userId = approve.getUserId();
Integer userType = approve.getUserType();
String clientId = approve.getClientId();
String scope = approve.getScope();
Boolean approved = randomBoolean();
LocalDateTime expireTime = LocalDateTime.ofInstant(randomDay(1, 30).toInstant(), ZoneId.systemDefault());
// mock 方法
// 调用
oauth2ApproveService.saveApprove(userId, userType, clientId,
scope, approved, expireTime);
// 断言
List<OAuth2ApproveDO> result = oauth2ApproveMapper.selectList();
assertEquals(1, result.size());
assertEquals(approve.getId(), result.get(0).getId());
assertEquals(userId, result.get(0).getUserId());
assertEquals(userType, result.get(0).getUserType());
assertEquals(clientId, result.get(0).getClientId());
assertEquals(scope, result.get(0).getScope());
assertEquals(approved, result.get(0).getApproved());
assertEquals(expireTime, result.get(0).getExpiresTime());
} |
@Override
public void isEqualTo(@Nullable Object expected) {
super.isEqualTo(expected);
} | @Test
public void noCommas() {
// Maybe we should include commas, but we don't yet, so make sure we don't under GWT, either.
expectFailureWhenTestingThat(array(10000.0)).isEqualTo(array(20000.0));
assertFailureValue("expected", "[20000.0]");
assertFailureValue("but was", "[10000.0]");
} |
@Nonnull
public <K, V> KafkaProducer<K, V> getProducer(@Nullable String transactionalId) {
if (getConfig().isShared()) {
if (transactionalId != null) {
throw new IllegalArgumentException("Cannot use transactions with shared "
+ "KafkaProducer for DataConnection" + getConfig().getName());
}
retain();
//noinspection unchecked
return (KafkaProducer<K, V>) producerSupplier.get();
} else {
if (transactionalId != null) {
@SuppressWarnings({"rawtypes", "unchecked"})
Map<String, Object> castProperties = (Map) getConfig().getProperties();
Map<String, Object> copy = new HashMap<>(castProperties);
copy.put("transactional.id", transactionalId);
return new KafkaProducer<>(copy);
} else {
return new KafkaProducer<>(getConfig().getProperties());
}
}
} | @Test
public void releasing_data_connection_does_not_close_shared_producer() {
kafkaDataConnection = createKafkaDataConnection(kafkaTestSupport);
Producer<Object, Object> producer = kafkaDataConnection.getProducer(null);
kafkaDataConnection.release();
try {
producer.partitionsFor("my-topic");
} catch (Exception e) {
fail("Should not throw exception", e);
}
} |
public <T> HttpResponse<T> httpRequest(String url, String method, HttpHeaders headers, Object requestBodyData,
TypeReference<T> responseFormat) {
return httpRequest(url, method, headers, requestBodyData, responseFormat, null, null);
} | @Test
public void testNullUrl() {
RestClient client = spy(new RestClient(null));
assertThrows(NullPointerException.class, () -> {
client.httpRequest(
null,
TEST_METHOD,
null,
TEST_DTO,
TEST_TYPE,
MOCK_SECRET_KEY,
TEST_SIGNATURE_ALGORITHM
);
});
} |
public <T> T fromXmlPartial(String partial, Class<T> o) throws Exception {
return fromXmlPartial(toInputStream(partial, UTF_8), o);
} | @Test
void shouldLoadShallowFlagFromGitPartial() throws Exception {
String gitPartial = "<git url='file:///tmp/testGitRepo/project1' shallowClone=\"true\" />";
GitMaterialConfig gitMaterial = xmlLoader.fromXmlPartial(gitPartial, GitMaterialConfig.class);
assertThat(gitMaterial.isShallowClone()).isTrue();
} |
@Override
public String getDataSource() {
return DataSourceConstant.MYSQL;
} | @Test
void testGetDataSource() {
String dataSource = groupCapacityMapperByMysql.getDataSource();
assertEquals(DataSourceConstant.MYSQL, dataSource);
} |
@Override
public V get(final K key) {
Objects.requireNonNull(key);
final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType);
for (final ReadOnlyKeyValueStore<K, V> store : stores) {
try {
final V result = store.get(key);
if (result != null) {
return result;
}
} catch (final InvalidStateStoreException e) {
throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata.");
}
}
return null;
} | @Test
public void shouldFindValueForKeyWhenMultiStores() {
final KeyValueStore<String, String> cache = newStoreInstance();
stubProviderTwo.addStore(storeName, cache);
cache.put("key-two", "key-two-value");
stubOneUnderlying.put("key-one", "key-one-value");
assertEquals("key-two-value", theStore.get("key-two"));
assertEquals("key-one-value", theStore.get("key-one"));
} |
public T multiply(BigDecimal multiplier) {
return create(value.multiply(multiplier));
} | @Test
void testMultiplyInteger() {
final Resource resource = new TestResource(0.3);
final int by = 2;
assertTestResourceValueEquals(0.6, resource.multiply(by));
} |
public RestResponse() {
this.timestamp = DateKit.nowUnix();
} | @Test
public void testRestResponse() {
RestResponse<String> restResponse = new RestResponse<>();
Assert.assertTrue(restResponse.getTimestamp() > 0);
RestResponse restResponse2 = RestResponse.ok();
Assert.assertTrue(restResponse2.isSuccess());
RestResponse restResponse3 = RestResponse.ok("biezhi");
Assert.assertTrue(restResponse3.isSuccess());
Assert.assertEquals("biezhi", restResponse3.getPayload());
} |
@Override
public Collection<SchemaMetaData> load(final MetaDataLoaderMaterial material) throws SQLException {
Collection<TableMetaData> tableMetaDataList = new LinkedList<>();
Map<String, Collection<ColumnMetaData>> columnMetaDataMap = loadColumnMetaDataMap(material.getDataSource(), material.getActualTableNames());
Collection<String> viewNames = columnMetaDataMap.isEmpty() ? Collections.emptySet() : loadViewNames(material.getDataSource(), columnMetaDataMap.keySet());
Map<String, Collection<IndexMetaData>> indexMetaDataMap = columnMetaDataMap.isEmpty() ? Collections.emptyMap() : loadIndexMetaData(material.getDataSource(), columnMetaDataMap.keySet());
Map<String, Collection<ConstraintMetaData>> constraintMetaDataMap =
columnMetaDataMap.isEmpty() ? Collections.emptyMap() : loadConstraintMetaDataMap(material.getDataSource(), columnMetaDataMap.keySet());
for (Entry<String, Collection<ColumnMetaData>> entry : columnMetaDataMap.entrySet()) {
Collection<IndexMetaData> indexMetaDataList = indexMetaDataMap.getOrDefault(entry.getKey(), Collections.emptyList());
Collection<ConstraintMetaData> constraintMetaDataList = constraintMetaDataMap.getOrDefault(entry.getKey(), Collections.emptyList());
tableMetaDataList.add(
new TableMetaData(entry.getKey(), entry.getValue(), indexMetaDataList, constraintMetaDataList, viewNames.contains(entry.getKey()) ? TableType.VIEW : TableType.TABLE));
}
return Collections.singletonList(new SchemaMetaData(material.getDefaultSchemaName(), tableMetaDataList));
} | @Test
void assertLoadWithTables() throws SQLException {
DataSource dataSource = mockDataSource();
ResultSet resultSet = mockTableMetaDataResultSet();
when(dataSource.getConnection().prepareStatement("SELECT TABLE_NAME, COLUMN_NAME, DATA_TYPE, COLUMN_KEY, EXTRA, COLLATION_NAME, ORDINAL_POSITION, COLUMN_TYPE, IS_NULLABLE "
+ "FROM information_schema.columns WHERE TABLE_SCHEMA=? AND TABLE_NAME IN ('tbl') ORDER BY ORDINAL_POSITION")
.executeQuery()).thenReturn(resultSet);
ResultSet indexResultSet = mockIndexMetaDataResultSet();
when(dataSource.getConnection().prepareStatement("SELECT TABLE_NAME, INDEX_NAME, NON_UNIQUE, COLUMN_NAME FROM information_schema.statistics WHERE TABLE_SCHEMA=? and TABLE_NAME IN ('tbl') "
+ "ORDER BY NON_UNIQUE, INDEX_NAME, SEQ_IN_INDEX")
.executeQuery()).thenReturn(indexResultSet);
assertTableMetaDataMap(dialectMetaDataLoader.load(new MetaDataLoaderMaterial(Collections.singletonList("tbl"), dataSource, new MySQLDatabaseType(), "sharding_db")));
} |
public LinkedHashMap<String, String> getKeyPropertyList(ObjectName mbeanName) {
LinkedHashMap<String, String> keyProperties = keyPropertiesPerBean.get(mbeanName);
if (keyProperties == null) {
keyProperties = new LinkedHashMap<>();
String properties = mbeanName.getKeyPropertyListString();
Matcher match = PROPERTY_PATTERN.matcher(properties);
while (match.lookingAt()) {
keyProperties.put(match.group(1), match.group(2));
properties = properties.substring(match.end());
if (properties.startsWith(",")) {
properties = properties.substring(1);
}
match.reset(properties);
}
keyPropertiesPerBean.put(mbeanName, keyProperties);
}
return keyProperties;
} | @Test
public void testSimpleObjectName() throws Throwable {
JmxMBeanPropertyCache testCache = new JmxMBeanPropertyCache();
LinkedHashMap<String, String> parameterList =
testCache.getKeyPropertyList(
new ObjectName("com.organisation:name=value,name2=value2"));
assertSameElementsAndOrder(parameterList, "name", "value", "name2", "value2");
} |
@Override
public void open(Configuration parameters) throws Exception {
this.rateLimiterTriggeredCounter =
getRuntimeContext()
.getMetricGroup()
.addGroup(
TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT)
.counter(TableMaintenanceMetrics.RATE_LIMITER_TRIGGERED);
this.concurrentRunThrottledCounter =
getRuntimeContext()
.getMetricGroup()
.addGroup(
TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT)
.counter(TableMaintenanceMetrics.CONCURRENT_RUN_THROTTLED);
this.nothingToTriggerCounter =
getRuntimeContext()
.getMetricGroup()
.addGroup(
TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT)
.counter(TableMaintenanceMetrics.NOTHING_TO_TRIGGER);
this.triggerCounters =
taskNames.stream()
.map(
name ->
getRuntimeContext()
.getMetricGroup()
.addGroup(TableMaintenanceMetrics.GROUP_KEY, name)
.counter(TableMaintenanceMetrics.TRIGGERED))
.collect(Collectors.toList());
this.nextEvaluationTimeState =
getRuntimeContext()
.getState(new ValueStateDescriptor<>("triggerManagerNextTriggerTime", Types.LONG));
this.accumulatedChangesState =
getRuntimeContext()
.getListState(
new ListStateDescriptor<>(
"triggerManagerAccumulatedChange", TypeInformation.of(TableChange.class)));
this.lastTriggerTimesState =
getRuntimeContext()
.getListState(new ListStateDescriptor<>("triggerManagerLastTriggerTime", Types.LONG));
tableLoader.open();
} | @Test
void testPosDeleteRecordCount() throws Exception {
TriggerManager manager =
manager(
sql.tableLoader(TABLE_NAME),
new TriggerEvaluator.Builder().posDeleteRecordCount(3).build());
try (KeyedOneInputStreamOperatorTestHarness<Boolean, TableChange, Trigger> testHarness =
harness(manager)) {
testHarness.open();
addEventAndCheckResult(
testHarness, TableChange.builder().posDeleteRecordCount(1L).build(), 0);
addEventAndCheckResult(
testHarness, TableChange.builder().posDeleteRecordCount(2L).build(), 1);
addEventAndCheckResult(
testHarness, TableChange.builder().posDeleteRecordCount(5L).build(), 2);
// No trigger in this case
addEventAndCheckResult(
testHarness, TableChange.builder().posDeleteRecordCount(1L).build(), 2);
addEventAndCheckResult(
testHarness, TableChange.builder().posDeleteRecordCount(2L).build(), 3);
}
} |
public static Type convertType(TypeInfo typeInfo) {
switch (typeInfo.getOdpsType()) {
case BIGINT:
return Type.BIGINT;
case INT:
return Type.INT;
case SMALLINT:
return Type.SMALLINT;
case TINYINT:
return Type.TINYINT;
case FLOAT:
return Type.FLOAT;
case DECIMAL:
DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) typeInfo;
return ScalarType.createUnifiedDecimalType(decimalTypeInfo.getPrecision(), decimalTypeInfo.getScale());
case DOUBLE:
return Type.DOUBLE;
case CHAR:
CharTypeInfo charTypeInfo = (CharTypeInfo) typeInfo;
return ScalarType.createCharType(charTypeInfo.getLength());
case VARCHAR:
VarcharTypeInfo varcharTypeInfo = (VarcharTypeInfo) typeInfo;
return ScalarType.createVarcharType(varcharTypeInfo.getLength());
case STRING:
case JSON:
return ScalarType.createDefaultCatalogString();
case BINARY:
return Type.VARBINARY;
case BOOLEAN:
return Type.BOOLEAN;
case DATE:
return Type.DATE;
case TIMESTAMP:
case DATETIME:
return Type.DATETIME;
case MAP:
MapTypeInfo mapTypeInfo = (MapTypeInfo) typeInfo;
return new MapType(convertType(mapTypeInfo.getKeyTypeInfo()),
convertType(mapTypeInfo.getValueTypeInfo()));
case ARRAY:
ArrayTypeInfo arrayTypeInfo = (ArrayTypeInfo) typeInfo;
return new ArrayType(convertType(arrayTypeInfo.getElementTypeInfo()));
case STRUCT:
StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
List<Type> fieldTypeList =
structTypeInfo.getFieldTypeInfos().stream().map(EntityConvertUtils::convertType)
.collect(Collectors.toList());
return new StructType(fieldTypeList);
default:
return Type.VARCHAR;
}
} | @Test
public void testConvertTypeCaseBoolean() {
TypeInfo typeInfo = TypeInfoFactory.BOOLEAN;
Type result = EntityConvertUtils.convertType(typeInfo);
assertEquals(Type.BOOLEAN, result);
} |
public static Http2Headers toHttp2Headers(HttpMessage in, boolean validateHeaders) {
HttpHeaders inHeaders = in.headers();
final Http2Headers out = new DefaultHttp2Headers(validateHeaders, inHeaders.size());
if (in instanceof HttpRequest) {
HttpRequest request = (HttpRequest) in;
String host = inHeaders.getAsString(HttpHeaderNames.HOST);
if (isOriginForm(request.uri()) || isAsteriskForm(request.uri())) {
out.path(new AsciiString(request.uri()));
setHttp2Scheme(inHeaders, out);
} else {
URI requestTargetUri = URI.create(request.uri());
out.path(toHttp2Path(requestTargetUri));
// Take from the request-line if HOST header was empty
host = isNullOrEmpty(host) ? requestTargetUri.getAuthority() : host;
setHttp2Scheme(inHeaders, requestTargetUri, out);
}
setHttp2Authority(host, out);
out.method(request.method().asciiName());
} else if (in instanceof HttpResponse) {
HttpResponse response = (HttpResponse) in;
out.status(response.status().codeAsText());
}
// Add the HTTP headers which have not been consumed above
toHttp2Headers(inHeaders, out);
return out;
} | @Test
public void handlesRequest() throws Exception {
boolean validateHeaders = true;
HttpRequest msg = new DefaultHttpRequest(
HttpVersion.HTTP_1_1, HttpMethod.GET, "http://example.com/path/to/something", validateHeaders);
HttpHeaders inHeaders = msg.headers();
inHeaders.add(CONNECTION, "foo, bar");
inHeaders.add("hello", "world");
Http2Headers out = HttpConversionUtil.toHttp2Headers(msg, validateHeaders);
assertEquals(new AsciiString("/path/to/something"), out.path());
assertEquals(new AsciiString("http"), out.scheme());
assertEquals(new AsciiString("example.com"), out.authority());
assertEquals(HttpMethod.GET.asciiName(), out.method());
assertEquals("world", out.get("hello"));
} |
@CanIgnoreReturnValue
public final Ordered containsAtLeast(
@Nullable Object firstExpected,
@Nullable Object secondExpected,
@Nullable Object @Nullable ... restOfExpected) {
return containsAtLeastElementsIn(accumulate(firstExpected, secondExpected, restOfExpected));
} | @Test
public void iterableContainsAtLeastFailsWithSameToStringAndHomogeneousList() {
expectFailureWhenTestingThat(asList(1L, 2L)).containsAtLeast(1, 2);
assertFailureValue("missing (2)", "1, 2 (java.lang.Integer)");
assertFailureValue("though it did contain (2)", "1, 2 (java.lang.Long)");
} |
public ChannelFuture removeAndWriteAll() {
assert executor.inEventLoop();
if (isEmpty()) {
return null;
}
ChannelPromise p = invoker.newPromise();
PromiseCombiner combiner = new PromiseCombiner(executor);
try {
// It is possible for some of the written promises to trigger more writes. The new writes
// will "revive" the queue, so we need to write them up until the queue is empty.
for (PendingWrite write = head; write != null; write = head) {
head = tail = null;
size = 0;
bytes = 0;
while (write != null) {
PendingWrite next = write.next;
Object msg = write.msg;
ChannelPromise promise = write.promise;
recycle(write, false);
if (!(promise instanceof VoidChannelPromise)) {
combiner.add(promise);
}
invoker.write(msg, promise);
write = next;
}
}
combiner.finish(p);
} catch (Throwable cause) {
p.setFailure(cause);
}
assertEmpty();
return p;
} | @Test
public void testRemoveAndWriteAll() {
assertWrite(new TestHandler() {
@Override
public void flush(ChannelHandlerContext ctx) throws Exception {
assertFalse(ctx.channel().isWritable(), "Should not be writable anymore");
ChannelFuture future = queue.removeAndWriteAll();
future.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) {
assertQueueEmpty(queue);
}
});
super.flush(ctx);
}
}, 3);
} |
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) {
CharStream input = CharStreams.fromString(source);
FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input );
CommonTokenStream tokens = new CommonTokenStream( lexer );
FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens );
ParserHelper parserHelper = new ParserHelper(eventsManager);
additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol()));
parser.setHelper(parserHelper);
parser.setErrorHandler( new FEELErrorHandler() );
parser.removeErrorListeners(); // removes the error listener that prints to the console
parser.addErrorListener( new FEELParserErrorListener( eventsManager ) );
// pre-loads the parser with symbols
defineVariables( inputVariableTypes, inputVariables, parser );
if (typeRegistry != null) {
parserHelper.setTypeRegistry(typeRegistry);
}
return parser;
} | @Test
void emptyList() {
String inputExpression = "[]";
BaseNode list = parse( inputExpression );
assertThat( list).isInstanceOf(ListNode.class);
assertThat( list.getResultType()).isEqualTo(BuiltInType.LIST);
assertThat( list.getText()).isEqualTo(inputExpression);
ListNode ln = (ListNode) list;
assertThat( ln.getElements()).isEmpty();
} |
public static FromEndOfWindow pastEndOfWindow() {
return new FromEndOfWindow();
} | @Test
public void testEarlyAndLateOnMergeRewinds() throws Exception {
tester =
TriggerStateMachineTester.forTrigger(
AfterWatermarkStateMachine.pastEndOfWindow()
.withEarlyFirings(AfterPaneStateMachine.elementCountAtLeast(100))
.withLateFirings(AfterPaneStateMachine.elementCountAtLeast(1)),
Sessions.withGapDuration(Duration.millis(10)));
tester.injectElements(1);
tester.injectElements(5);
IntervalWindow firstWindow = new IntervalWindow(new Instant(1), new Instant(11));
IntervalWindow secondWindow = new IntervalWindow(new Instant(5), new Instant(15));
IntervalWindow mergedWindow = new IntervalWindow(new Instant(1), new Instant(15));
// Finish the AfterWatermark.pastEndOfWindow() bit of the trigger in only the first window
tester.advanceInputWatermark(new Instant(11));
assertTrue(tester.shouldFire(firstWindow));
assertFalse(tester.shouldFire(secondWindow));
tester.fireIfShouldFire(firstWindow);
// Confirm that we are on the late trigger by probing
assertFalse(tester.shouldFire(firstWindow));
tester.injectElements(1);
assertTrue(tester.shouldFire(firstWindow));
tester.fireIfShouldFire(firstWindow);
// Merging should re-activate the early trigger in the merged window
tester.mergeWindows();
// Confirm that we are not on the second trigger by probing
assertFalse(tester.shouldFire(mergedWindow));
tester.injectElements(1);
assertFalse(tester.shouldFire(mergedWindow));
// And confirm that advancing the watermark fires again
tester.advanceInputWatermark(new Instant(15));
assertTrue(tester.shouldFire(mergedWindow));
} |
public static Sensor hitRatioSensor(final StreamsMetricsImpl streamsMetrics,
final String threadId,
final String taskName,
final String storeName) {
final Sensor hitRatioSensor;
final String hitRatioName;
hitRatioName = HIT_RATIO;
hitRatioSensor = streamsMetrics.cacheLevelSensor(
threadId,
taskName,
storeName,
hitRatioName,
Sensor.RecordingLevel.DEBUG
);
addAvgAndMinAndMaxToSensor(
hitRatioSensor,
CACHE_LEVEL_GROUP,
streamsMetrics.cacheLevelTagMap(threadId, taskName, storeName),
hitRatioName,
HIT_RATIO_AVG_DESCRIPTION,
HIT_RATIO_MIN_DESCRIPTION,
HIT_RATIO_MAX_DESCRIPTION
);
return hitRatioSensor;
} | @Test
public void shouldGetHitRatioSensorWithBuiltInMetricsVersionCurrent() {
final String hitRatio = "hit-ratio";
when(streamsMetrics.cacheLevelSensor(THREAD_ID, TASK_ID, STORE_NAME, hitRatio, RecordingLevel.DEBUG)).thenReturn(expectedSensor);
when(streamsMetrics.cacheLevelTagMap(THREAD_ID, TASK_ID, STORE_NAME)).thenReturn(tagMap);
try (final MockedStatic<StreamsMetricsImpl> streamsMetricsStaticMock = mockStatic(StreamsMetricsImpl.class)) {
final Sensor sensor = NamedCacheMetrics.hitRatioSensor(streamsMetrics, THREAD_ID, TASK_ID, STORE_NAME);
streamsMetricsStaticMock.verify(
() -> StreamsMetricsImpl.addAvgAndMinAndMaxToSensor(
expectedSensor,
StreamsMetricsImpl.CACHE_LEVEL_GROUP,
tagMap,
hitRatio,
HIT_RATIO_AVG_DESCRIPTION,
HIT_RATIO_MIN_DESCRIPTION,
HIT_RATIO_MAX_DESCRIPTION
)
);
assertThat(sensor, is(expectedSensor));
}
} |
public static short getLocalFileMode(String filePath) throws IOException {
Set<PosixFilePermission> permission =
Files.readAttributes(Paths.get(filePath), PosixFileAttributes.class).permissions();
return translatePosixPermissionToMode(permission);
} | @Test
public void getLocalFileMode() throws IOException {
File tmpDir = mTestFolder.newFolder("dir");
File tmpFile777 = mTestFolder.newFile("dir/0777");
tmpFile777.setReadable(true, false /* owner only */);
tmpFile777.setWritable(true, false /* owner only */);
tmpFile777.setExecutable(true, false /* owner only */);
File tmpFile755 = mTestFolder.newFile("dir/0755");
tmpFile755.setReadable(true, false /* owner only */);
tmpFile755.setWritable(false, false /* owner only */);
tmpFile755.setExecutable(true, false /* owner only */);
tmpFile755.setWritable(true, true /* owner only */);
File tmpFile444 = mTestFolder.newFile("dir/0444");
tmpFile444.setReadOnly();
assertEquals((short) 0777, FileUtils.getLocalFileMode(tmpFile777.getPath()));
assertEquals((short) 0755, FileUtils.getLocalFileMode(tmpFile755.getPath()));
assertEquals((short) 0444, FileUtils.getLocalFileMode(tmpFile444.getPath()));
// Delete all of these.
FileUtils.deletePathRecursively(tmpDir.getAbsolutePath());
} |
public <T> void resetSerializer(Class<T> cls, Serializer<T> serializer) {
if (serializer == null) {
clearSerializer(cls);
} else {
setSerializer(cls, serializer);
}
} | @Test
public void testResetSerializer() {
Fury fury =
Fury.builder()
.withLanguage(Language.JAVA)
.withRefTracking(true)
.requireClassRegistration(false)
.build();
ClassResolver classResolver = fury.getClassResolver();
Assert.assertThrows(() -> Serializers.newSerializer(fury, Foo.class, ErrorSerializer.class));
Assert.assertNull(classResolver.getSerializer(Foo.class, false));
Assert.assertThrows(
() -> classResolver.createSerializerSafe(Foo.class, () -> new ErrorSerializer(fury)));
Assert.assertNull(classResolver.getSerializer(Foo.class, false));
} |
void fetchAndRunCommands() {
lastPollTime.set(clock.instant());
final List<QueuedCommand> commands = commandStore.getNewCommands(NEW_CMDS_TIMEOUT);
if (commands.isEmpty()) {
if (!commandTopicExists.get()) {
commandTopicDeleted = true;
}
return;
}
final List<QueuedCommand> compatibleCommands = checkForIncompatibleCommands(commands);
final Optional<QueuedCommand> terminateCmd =
findTerminateCommand(compatibleCommands, commandDeserializer);
if (terminateCmd.isPresent()) {
terminateCluster(terminateCmd.get().getAndDeserializeCommand(commandDeserializer));
return;
}
LOG.debug("Found {} new writes to command topic", compatibleCommands.size());
for (final QueuedCommand command : compatibleCommands) {
if (closed) {
return;
}
executeStatement(command);
}
} | @Test
public void shouldEarlyOutOnShutdown() {
// Given:
givenQueuedCommands(queuedCommand1, queuedCommand2);
doAnswer(closeRunner()).when(statementExecutor).handleStatement(queuedCommand1);
// When:
commandRunner.fetchAndRunCommands();
// Then:
verify(statementExecutor, never()).handleRestore(queuedCommand2);
} |
public String getManifestSourceUri(boolean useAbsolutePath) {
return new Path(getManifestFolder(useAbsolutePath).toString(), "*").toUri().toString();
} | @Test
public void getManifestSourceUri() {
ManifestFileWriter manifestFileWriter = ManifestFileWriter.builder().setMetaClient(metaClient).build();
String sourceUri = manifestFileWriter.getManifestSourceUri(false);
assertEquals(new Path(basePath, ".hoodie/manifest/*").toUri().toString(), sourceUri);
sourceUri = manifestFileWriter.getManifestSourceUri(true);
assertEquals(new Path(basePath, ".hoodie/absolute-path-manifest/*").toUri().toString(), sourceUri);
} |
public void recordAbortTxn(long duration) {
abortTxnSensor.record(duration);
} | @Test
public void shouldRecordTxAbortTime() {
// When:
producerMetrics.recordAbortTxn(METRIC_VALUE);
// Then:
assertMetricValue(TXN_ABORT_TIME_TOTAL);
} |
public Notification setFieldValue(String field, @Nullable String value) {
fields.put(field, value);
return this;
} | @Test
void equals_whenFieldsDontMatch_shouldReturnFalse() {
Notification notification1 = new Notification("type");
Notification notification2 = new Notification("type");
notification1.setFieldValue("key", "value1");
notification2.setFieldValue("key", "value2");
assertThat(notification1).isNotEqualTo(notification2);
} |
@Udf(description = "Returns the hex-encoded md5 hash of the input string")
public String md5(
@UdfParameter(description = "The input string") final String s
) {
if (s == null) {
return null;
}
return DigestUtils.md5Hex(s);
} | @Test
public void shouldReturnNullForNull() {
assertThat(udf.md5(null), is(nullValue()));
} |
public static Stream<Path> iterPaths(Path path) {
Deque<Path> parents = new ArrayDeque<>(path.getNameCount());
// Push parents to the front of the stack, so the "root" is at the front
Path next = path;
while (next != null) {
parents.addFirst(next);
next = next.getParent();
}
// now just iterate straight over them
return ImmutableList.copyOf(parents).stream();
} | @Test
void testEmpty() {
assertEquals(
paths(""),
MorePaths.iterPaths(Paths.get("")).collect(toList())
);
} |
@VisibleForTesting
AzureADToken getTokenUsingJWTAssertion(String clientAssertion) throws IOException {
return AzureADAuthenticator
.getTokenUsingJWTAssertion(authEndpoint, clientId, clientAssertion);
} | @Test
public void testTokenFetchWithTokenFileNotFound() throws Exception {
AzureADToken azureAdToken = new AzureADToken();
WorkloadIdentityTokenProvider tokenProvider = Mockito.spy(
new WorkloadIdentityTokenProvider(AUTHORITY, TENANT_ID, CLIENT_ID, TOKEN_FILE));
Mockito.doReturn(azureAdToken)
.when(tokenProvider).getTokenUsingJWTAssertion(TOKEN);
IOException ex = intercept(IOException.class, () -> {
tokenProvider.getToken();
});
Assertions.assertThat(ex.getMessage())
.describedAs("Exception should be thrown when the token file not found")
.contains("Error reading token file");
} |
@Override
public DescribeConsumerGroupsResult describeConsumerGroups(final Collection<String> groupIds,
final DescribeConsumerGroupsOptions options) {
SimpleAdminApiFuture<CoordinatorKey, ConsumerGroupDescription> future =
DescribeConsumerGroupsHandler.newFuture(groupIds);
DescribeConsumerGroupsHandler handler = new DescribeConsumerGroupsHandler(options.includeAuthorizedOperations(), logContext);
invokeDriver(handler, future, options.timeoutMs);
return new DescribeConsumerGroupsResult(future.all().entrySet().stream()
.collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue)));
} | @Test
public void testDescribeConsumerGroupsWithAuthorizedOperationsOmitted() throws Exception {
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(
prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
// The first request sent will be a ConsumerGroupDescribe request. Let's
// fail it in order to fail back to using the classic version.
env.kafkaClient().prepareUnsupportedVersionResponse(
request -> request instanceof ConsumerGroupDescribeRequest);
DescribeGroupsResponseData data = new DescribeGroupsResponseData();
data.groups().add(DescribeGroupsResponse.groupMetadata(
GROUP_ID,
Errors.NONE,
"",
ConsumerProtocol.PROTOCOL_TYPE,
"",
Collections.emptyList(),
MetadataResponse.AUTHORIZED_OPERATIONS_OMITTED));
env.kafkaClient().prepareResponse(new DescribeGroupsResponse(data));
final DescribeConsumerGroupsResult result = env.adminClient().describeConsumerGroups(singletonList(GROUP_ID));
final ConsumerGroupDescription groupDescription = result.describedGroups().get(GROUP_ID).get();
assertNull(groupDescription.authorizedOperations());
}
} |
@Override
public boolean retryRequest(IOException exception, int executionCount, HttpContext ctx) {
log.fine(() -> String.format("retryRequest(exception='%s', executionCount='%d', ctx='%s'",
exception.getClass().getName(), executionCount, ctx));
HttpClientContext clientCtx = HttpClientContext.adapt(ctx);
if (!predicate.test(exception, clientCtx)) {
log.fine(() -> String.format("Not retrying for '%s'", ctx));
return false;
}
if (executionCount > maxRetries) {
log.fine(() -> String.format("Max retries exceeded for '%s'", ctx));
retryFailedConsumer.onRetryFailed(exception, executionCount, clientCtx);
return false;
}
Duration delay = delaySupplier.getDelay(executionCount);
log.fine(() -> String.format("Retrying after %s for '%s'", delay, ctx));
retryConsumer.onRetry(exception, delay, executionCount, clientCtx);
sleeper.sleep(delay);
return true;
} | @Test
void retry_with_fixed_delay_sleeps_for_expected_duration() {
Sleeper sleeper = mock(Sleeper.class);
Duration delay = Duration.ofSeconds(2);
int maxRetries = 2;
DelayedConnectionLevelRetryHandler handler = DelayedConnectionLevelRetryHandler.Builder
.withFixedDelay(delay, maxRetries)
.withSleeper(sleeper)
.build();
IOException exception = new IOException();
HttpClientContext ctx = new HttpClientContext();
int lastExecutionCount = maxRetries + 1;
for (int i = 1; i <= lastExecutionCount; i++) {
handler.retryRequest(exception, i, ctx);
}
verify(sleeper, times(2)).sleep(delay);
} |
@Deprecated
public static ByteBuf unmodifiableBuffer(ByteBuf buffer) {
ByteOrder endianness = buffer.order();
if (endianness == BIG_ENDIAN) {
return new ReadOnlyByteBuf(buffer);
}
return new ReadOnlyByteBuf(buffer.order(BIG_ENDIAN)).order(LITTLE_ENDIAN);
} | @Test
public void testUnmodifiableBuffer() throws Exception {
ByteBuf buf = unmodifiableBuffer(buffer(16));
try {
buf.discardReadBytes();
fail();
} catch (UnsupportedOperationException e) {
// Expected
}
try {
buf.setByte(0, (byte) 0);
fail();
} catch (UnsupportedOperationException e) {
// Expected
}
try {
buf.setBytes(0, EMPTY_BUFFER, 0, 0);
fail();
} catch (UnsupportedOperationException e) {
// Expected
}
try {
buf.setBytes(0, EMPTY_BYTES, 0, 0);
fail();
} catch (UnsupportedOperationException e) {
// Expected
}
try {
buf.setBytes(0, ByteBuffer.allocate(0));
fail();
} catch (UnsupportedOperationException e) {
// Expected
}
try {
buf.setShort(0, (short) 0);
fail();
} catch (UnsupportedOperationException e) {
// Expected
}
try {
buf.setMedium(0, 0);
fail();
} catch (UnsupportedOperationException e) {
// Expected
}
try {
buf.setInt(0, 0);
fail();
} catch (UnsupportedOperationException e) {
// Expected
}
try {
buf.setLong(0, 0);
fail();
} catch (UnsupportedOperationException e) {
// Expected
}
InputStream inputStream = Mockito.mock(InputStream.class);
try {
buf.setBytes(0, inputStream, 0);
fail();
} catch (UnsupportedOperationException e) {
// Expected
}
Mockito.verifyZeroInteractions(inputStream);
ScatteringByteChannel scatteringByteChannel = Mockito.mock(ScatteringByteChannel.class);
try {
buf.setBytes(0, scatteringByteChannel, 0);
fail();
} catch (UnsupportedOperationException e) {
// Expected
}
Mockito.verifyZeroInteractions(scatteringByteChannel);
buf.release();
} |
public static String buildErrorMessage(final Throwable throwable) {
if (throwable == null) {
return "";
}
final List<String> messages = dedup(getErrorMessages(throwable));
final String msg = messages.remove(0);
final String causeMsg = messages.stream()
.filter(s -> !s.isEmpty())
.map(cause -> WordUtils.wrap(PREFIX + cause, 80, "\n\t", true))
.collect(Collectors.joining(System.lineSeparator()));
return causeMsg.isEmpty() ? msg : msg + System.lineSeparator() + causeMsg;
} | @Test
public void shouldRemoveSubMessages() {
final Throwable cause = new TestException("Sub-message2");
final Throwable subLevel1 = new TestException("This is Sub-message1", cause);
final Throwable e = new TestException("The Main Message that Contains Sub-message1 and Sub-message2", subLevel1);
assertThat(
buildErrorMessage(e),
is("The Main Message that Contains Sub-message1 and Sub-message2" + System.lineSeparator()
+ "Caused by: This is Sub-message1")
);
} |
@Override
public Optional<SimpleAddress> selectAddress(Optional<String> addressSelectionContext)
{
if (addressSelectionContext.isPresent()) {
return addressSelectionContext
.map(HostAndPort::fromString)
.map(SimpleAddress::new);
}
List<HostAndPort> catalogServers = internalNodeManager.getCatalogServers().stream()
.filter(node -> node.getThriftPort().isPresent())
.map(catalogServerNode -> {
HostAddress hostAndPort = catalogServerNode.getHostAndPort();
return HostAndPort.fromParts(hostAndPort.getHostText(), catalogServerNode.getThriftPort().getAsInt());
})
.collect(toImmutableList());
return hostSelector.apply(catalogServers).map(SimpleAddress::new);
} | @Test(expectedExceptions = IllegalArgumentException.class)
public void testAddressSelectionContextPresentWithInvalidAddress()
{
InMemoryNodeManager internalNodeManager = new InMemoryNodeManager();
RandomCatalogServerAddressSelector selector = new RandomCatalogServerAddressSelector(internalNodeManager);
selector.selectAddress(Optional.of("host:123.456"));
} |
public static void load(String originalName, ClassLoader loader) {
String mangledPackagePrefix = calculateMangledPackagePrefix();
String name = mangledPackagePrefix + originalName;
List<Throwable> suppressed = new ArrayList<Throwable>();
try {
// first try to load from java.library.path
loadLibrary(loader, name, false);
return;
} catch (Throwable ex) {
suppressed.add(ex);
}
String libname = System.mapLibraryName(name);
String path = NATIVE_RESOURCE_HOME + libname;
InputStream in = null;
OutputStream out = null;
File tmpFile = null;
URL url = getResource(path, loader);
try {
if (url == null) {
if (PlatformDependent.isOsx()) {
String fileName = path.endsWith(".jnilib") ? NATIVE_RESOURCE_HOME + "lib" + name + ".dynlib" :
NATIVE_RESOURCE_HOME + "lib" + name + ".jnilib";
url = getResource(fileName, loader);
if (url == null) {
FileNotFoundException fnf = new FileNotFoundException(fileName);
ThrowableUtil.addSuppressedAndClear(fnf, suppressed);
throw fnf;
}
} else {
FileNotFoundException fnf = new FileNotFoundException(path);
ThrowableUtil.addSuppressedAndClear(fnf, suppressed);
throw fnf;
}
}
int index = libname.lastIndexOf('.');
String prefix = libname.substring(0, index);
String suffix = libname.substring(index);
tmpFile = PlatformDependent.createTempFile(prefix, suffix, WORKDIR);
in = url.openStream();
out = new FileOutputStream(tmpFile);
byte[] buffer = new byte[8192];
int length;
while ((length = in.read(buffer)) > 0) {
out.write(buffer, 0, length);
}
out.flush();
if (shouldShadedLibraryIdBePatched(mangledPackagePrefix)) {
// Let's try to patch the id and re-sign it. This is a best-effort and might fail if a
// SecurityManager is setup or the right executables are not installed :/
tryPatchShadedLibraryIdAndSign(tmpFile, originalName);
}
// Close the output stream before loading the unpacked library,
// because otherwise Windows will refuse to load it when it's in use by other process.
closeQuietly(out);
out = null;
loadLibrary(loader, tmpFile.getPath(), true);
} catch (UnsatisfiedLinkError e) {
try {
if (tmpFile != null && tmpFile.isFile() && tmpFile.canRead() &&
!NoexecVolumeDetector.canExecuteExecutable(tmpFile)) {
// Pass "io.netty.native.workdir" as an argument to allow shading tools to see
// the string. Since this is printed out to users to tell them what to do next,
// we want the value to be correct even when shading.
logger.info("{} exists but cannot be executed even when execute permissions set; " +
"check volume for \"noexec\" flag; use -D{}=[path] " +
"to set native working directory separately.",
tmpFile.getPath(), "io.netty.native.workdir");
}
} catch (Throwable t) {
suppressed.add(t);
logger.debug("Error checking if {} is on a file store mounted with noexec", tmpFile, t);
}
// Re-throw to fail the load
ThrowableUtil.addSuppressedAndClear(e, suppressed);
throw e;
} catch (Exception e) {
UnsatisfiedLinkError ule = new UnsatisfiedLinkError("could not load a native library: " + name);
ule.initCause(e);
ThrowableUtil.addSuppressedAndClear(ule, suppressed);
throw ule;
} finally {
closeQuietly(in);
closeQuietly(out);
// After we load the library it is safe to delete the file.
// We delete the file immediately to free up resources as soon as possible,
// and if this fails fallback to deleting on JVM exit.
if (tmpFile != null && (!DELETE_NATIVE_LIB_AFTER_LOADING || !tmpFile.delete())) {
tmpFile.deleteOnExit();
}
}
} | @Test
@EnabledOnOs(LINUX)
@EnabledIf("is_x86_64")
void testMultipleResourcesInTheClassLoader() throws MalformedURLException {
URL url1 = new File("src/test/data/NativeLibraryLoader/1").toURI().toURL();
URL url2 = new File("src/test/data/NativeLibraryLoader/2").toURI().toURL();
final URLClassLoader loader = new URLClassLoader(new URL[] {url1, url2});
final String resourceName = "test1";
Exception ise = assertThrows(IllegalStateException.class, new Executable() {
@Override
public void execute() {
NativeLibraryLoader.load(resourceName, loader);
}
});
assertTrue(ise.getMessage()
.contains("Multiple resources found for 'META-INF/native/lib" + resourceName + ".so'"));
} |
@Override
public WebSocketClientExtension handshakeExtension(WebSocketExtensionData extensionData) {
if (!X_WEBKIT_DEFLATE_FRAME_EXTENSION.equals(extensionData.name()) &&
!DEFLATE_FRAME_EXTENSION.equals(extensionData.name())) {
return null;
}
if (extensionData.parameters().isEmpty()) {
return new DeflateFrameClientExtension(compressionLevel, extensionFilterProvider);
} else {
return null;
}
} | @Test
public void testNormalHandshake() {
DeflateFrameClientExtensionHandshaker handshaker =
new DeflateFrameClientExtensionHandshaker(false);
WebSocketClientExtension extension = handshaker.handshakeExtension(
new WebSocketExtensionData(DEFLATE_FRAME_EXTENSION, Collections.<String, String>emptyMap()));
assertNotNull(extension);
assertEquals(WebSocketClientExtension.RSV1, extension.rsv());
assertTrue(extension.newExtensionDecoder() instanceof PerFrameDeflateDecoder);
assertTrue(extension.newExtensionEncoder() instanceof PerFrameDeflateEncoder);
} |
public static <InputT, OutputT> Growth<InputT, OutputT, OutputT> growthOf(
Growth.PollFn<InputT, OutputT> pollFn, Requirements requirements) {
return new AutoValue_Watch_Growth.Builder<InputT, OutputT, OutputT>()
.setTerminationPerInput(Growth.never())
.setPollFn(Contextful.of(pollFn, requirements))
// use null as a signal that this is the identity function and output coder can be
// reused as key coder
.setOutputKeyFn(null)
.build();
} | @Test
@Category({NeedsRunner.class, UsesUnboundedSplittableParDo.class})
public void testMultiplePollsStopAfterTimeSinceNewOutput() {
List<Integer> all = Arrays.asList(0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
PCollection<Integer> res =
p.apply(Create.of("a"))
.apply(
Watch.growthOf(
new TimedPollFn<String, Integer>(
all,
standardSeconds(1) /* timeToOutputEverything */,
// Never declare output final
standardSeconds(1000) /* timeToDeclareOutputFinal */,
standardSeconds(30) /* timeToFail */))
// Should terminate after 4 seconds - earlier than timeToFail
.withTerminationPerInput(afterTimeSinceNewOutput(standardSeconds(3)))
.withPollInterval(Duration.millis(300))
.withOutputCoder(VarIntCoder.of()))
.apply("Drop input", Values.create());
PAssert.that(res).containsInAnyOrder(all);
p.run();
} |
@Override
public boolean removeLastOccurrence(Object o) {
return remove(o, -1);
} | @Test
public void testRemoveLastOccurrence() {
RDeque<Integer> queue1 = redisson.getDeque("deque1");
queue1.addFirst(3);
queue1.addFirst(1);
queue1.addFirst(2);
queue1.addFirst(3);
queue1.removeLastOccurrence(3);
assertThat(queue1).containsExactly(3, 2, 1);
} |
static @Nullable <T extends PluginConfig> T getPluginConfig(
Map<String, Object> params, Class<T> configClass) {
// Validate configClass
if (configClass == null) {
throw new IllegalArgumentException("Config class must be not null!");
}
List<Field> allFields = new ArrayList<>();
Class<?> currClass = configClass;
while (currClass != null && !currClass.equals(Object.class)) {
allFields.addAll(
Arrays.stream(currClass.getDeclaredFields())
.filter(f -> !Modifier.isStatic(f.getModifiers()))
.collect(Collectors.toList()));
currClass = currClass.getSuperclass();
}
InstantiatorFactory instantiatorFactory = new InstantiatorFactory(false);
@Initialized T config = instantiatorFactory.get(TypeToken.of(configClass)).create();
if (config != null) {
for (Field field : allFields) {
field.setAccessible(true);
Class<?> fieldType = field.getType();
Name declaredAnnotation = field.getDeclaredAnnotation(Name.class);
Object fieldValue =
declaredAnnotation != null ? params.get(declaredAnnotation.value()) : null;
if (fieldValue != null && fieldType.equals(fieldValue.getClass())) {
try {
field.set(config, fieldValue);
} catch (IllegalAccessException e) {
LOG.error("Can not set a field with value {}", fieldValue);
}
} else if (field.getName().equals(MACRO_FIELDS_FIELD_NAME)) {
try {
field.set(config, Collections.emptySet());
} catch (IllegalAccessException e) {
LOG.error("Can not set macro fields");
}
}
}
}
return config;
} | @Test
public void testBuildingPluginConfigFromEmptyParamsMap() {
try {
ServiceNowSourceConfig config =
PluginConfigInstantiationUtils.getPluginConfig(
new HashMap<>(), ServiceNowSourceConfig.class);
assertNotNull(config);
} catch (Exception e) {
LOG.error("Error occurred while building the config object", e);
fail();
}
} |
@Bean
public ShiroFilterFactoryBean shiroFilterFactoryBean(
@Qualifier("shiroSecurityManager") final DefaultWebSecurityManager securityManager,
@Qualifier("shiroProperties") final ShiroProperties shiroProperties) {
ShiroFilterFactoryBean factoryBean = new ShiroFilterFactoryBean();
factoryBean.setSecurityManager(securityManager);
Map<String, Filter> filterMap = new LinkedHashMap<>();
filterMap.put("statelessAuth", new StatelessAuthFilter());
factoryBean.setFilters(filterMap);
Map<String, String> filterChainDefinitionMap = new LinkedHashMap<>();
for (String s : shiroProperties.getWhiteList()) {
filterChainDefinitionMap.put(s, "anon");
}
filterChainDefinitionMap.put("/**", "statelessAuth");
factoryBean.setFilterChainDefinitionMap(filterChainDefinitionMap);
return factoryBean;
} | @Test
public void testShiroFilterFactoryBean() {
ShiroProperties shiroProperties = mock(ShiroProperties.class);
List<String> whiteList = Arrays.asList("test1", "test2");
when(shiroProperties.getWhiteList()).thenReturn(whiteList);
ShiroFilterFactoryBean shiroFilterFactoryBean = shiroConfiguration.shiroFilterFactoryBean(securityManager, shiroProperties);
assertEquals(securityManager, shiroFilterFactoryBean.getSecurityManager());
assertNotNull(shiroFilterFactoryBean.getFilters());
assertNotNull(shiroFilterFactoryBean.getFilters().get("statelessAuth"));
Map<String, String> map = shiroFilterFactoryBean.getFilterChainDefinitionMap();
assertNotNull(map);
whiteList.stream().forEach(s -> assertEquals("anon", map.get(s)));
assertEquals("statelessAuth", map.get("/**"));
} |
static void checkForDuplicates(ClassLoader classLoader, ILogger logger, String resourceName) {
try {
List<URL> resources = Collections.list(classLoader.getResources(resourceName));
if (resources.size() > 1) {
String formattedResourceUrls = resources.stream().map(URL::toString).collect(Collectors.joining(", "));
logger.warning("WARNING: Classpath misconfiguration: found multiple " + resourceName
+ " resources: " + formattedResourceUrls);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
} | @Test
public void should_NOT_log_warning_when_no_occurrence() {
DuplicatedResourcesScanner.checkForDuplicates(getClass().getClassLoader(), logger, "META-INF/some-non-existing-file");
verifyNoInteractions(logger);
} |
public boolean cleanUnusedTopicByAddr(final String addr,
long timeoutMillis) throws MQClientException, RemotingConnectException,
RemotingSendRequestException, RemotingTimeoutException, InterruptedException {
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.CLEAN_UNUSED_TOPIC, null);
RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr),
request, timeoutMillis);
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
return true;
}
default:
break;
}
throw new MQClientException(response.getCode(), response.getRemark());
} | @Test
public void assertCleanUnusedTopicByAddr() throws RemotingException, InterruptedException, MQClientException {
mockInvokeSync();
assertTrue(mqClientAPI.cleanUnusedTopicByAddr(defaultBrokerAddr, defaultTimeout));
} |
public void runExtractor(Message msg) {
try(final Timer.Context ignored = completeTimer.time()) {
final String field;
try (final Timer.Context ignored2 = conditionTimer.time()) {
// We can only work on Strings.
if (!(msg.getField(sourceField) instanceof String)) {
conditionMissesCounter.inc();
return;
}
field = (String) msg.getField(sourceField);
// Decide if to extract at all.
if (conditionType.equals(ConditionType.STRING)) {
if (field.contains(conditionValue)) {
conditionHitsCounter.inc();
} else {
conditionMissesCounter.inc();
return;
}
} else if (conditionType.equals(ConditionType.REGEX)) {
if (regexConditionPattern.matcher(field).find()) {
conditionHitsCounter.inc();
} else {
conditionMissesCounter.inc();
return;
}
}
}
try (final Timer.Context ignored2 = executionTimer.time()) {
Result[] results;
try {
results = run(field);
} catch (ExtractorException e) {
final String error = "Could not apply extractor <" + getTitle() + " (" + getId() + ")>";
msg.addProcessingError(new Message.ProcessingError(
ProcessingFailureCause.ExtractorException, error, ExceptionUtils.getRootCauseMessage(e)));
return;
}
if (results == null || results.length == 0 || Arrays.stream(results).anyMatch(result -> result.getValue() == null)) {
return;
} else if (results.length == 1 && results[0].target == null) {
// results[0].target is null if this extractor cannot produce multiple fields use targetField in that case
msg.addField(targetField, results[0].getValue());
} else {
for (final Result result : results) {
msg.addField(result.getTarget(), result.getValue());
}
}
// Remove original from message?
if (cursorStrategy.equals(CursorStrategy.CUT) && !targetField.equals(sourceField) && !Message.RESERVED_FIELDS.contains(sourceField) && results[0].beginIndex != -1) {
final StringBuilder sb = new StringBuilder(field);
final List<Result> reverseList = Arrays.stream(results)
.sorted(Comparator.<Result>comparingInt(result -> result.endIndex).reversed())
.collect(Collectors.toList());
// remove all from reverse so that the indices still match
for (final Result result : reverseList) {
sb.delete(result.getBeginIndex(), result.getEndIndex());
}
final String builtString = sb.toString();
final String finalResult = builtString.trim().isEmpty() ? "fullyCutByExtractor" : builtString;
msg.removeField(sourceField);
// TODO don't add an empty field back, or rather don't add fullyCutByExtractor
msg.addField(sourceField, finalResult);
}
runConverters(msg);
}
}
} | @Test
public void testWithMultipleValueOnlyResultsAndOneValueIsNull() throws Exception {
final TestExtractor extractor = new TestExtractor.Builder()
.callback(new Callable<Result[]>() {
@Override
public Result[] call() throws Exception {
return new Result[]{
new Result("1", -1, -1),
new Result(null, -1, -1),
new Result("3", -1, -1)
};
}
})
.build();
final Message msg = createMessage("the hello");
extractor.runExtractor(msg);
assertThat(msg.hasField("target")).isFalse();
} |
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ShareGroupMember that = (ShareGroupMember) o;
return memberEpoch == that.memberEpoch
&& previousMemberEpoch == that.previousMemberEpoch
&& state == that.state
&& Objects.equals(memberId, that.memberId)
&& Objects.equals(rackId, that.rackId)
&& Objects.equals(clientId, that.clientId)
&& Objects.equals(clientHost, that.clientHost)
&& Objects.equals(subscribedTopicNames, that.subscribedTopicNames)
&& Objects.equals(assignedPartitions, that.assignedPartitions);
} | @Test
public void testEquals() {
Uuid topicId1 = Uuid.randomUuid();
ShareGroupMember member1 = new ShareGroupMember.Builder("member-id")
.setMemberEpoch(10)
.setPreviousMemberEpoch(9)
.setRackId("rack-id")
.setClientId("client-id")
.setClientHost("hostname")
.setSubscribedTopicNames(Arrays.asList("foo", "bar"))
.setAssignedPartitions(mkAssignment(
mkTopicAssignment(topicId1, 1, 2, 3)))
.build();
ShareGroupMember member2 = new ShareGroupMember.Builder("member-id")
.setMemberEpoch(10)
.setPreviousMemberEpoch(9)
.setRackId("rack-id")
.setClientId("client-id")
.setClientHost("hostname")
.setSubscribedTopicNames(Arrays.asList("foo", "bar"))
.setAssignedPartitions(mkAssignment(
mkTopicAssignment(topicId1, 1, 2, 3)))
.build();
assertEquals(member1, member2);
} |
@Override
public ByteBuf duplicate() {
return newLeakAwareByteBuf(super.duplicate());
} | @Test
public void testWrapDuplicate() {
assertWrapped(newBuffer(8).duplicate());
} |
@Override
public Output run(RunContext runContext) throws Exception {
URI from = new URI(runContext.render(this.from));
final PebbleFieldExtractor keyExtractor = getKeyExtractor(runContext);
final Map<String, Long> index = new HashMap<>(); // can be replaced by small-footprint Map implementation
// 1st iteration: build a map of key->offset
try (final BufferedReader reader = newBufferedReader(runContext, from)) {
long offset = 0L;
String item;
while ((item = reader.readLine()) != null) {
String key = keyExtractor.apply(item);
index.put(key, offset);
offset++;
}
}
// metrics
long processedItemsTotal = 0L;
long droppedItemsTotal = 0L;
long numKeys = index.size();
final Path path = runContext.workingDir().createTempFile(".ion");
// 2nd iteration: write deduplicate
try (final BufferedWriter writer = Files.newBufferedWriter(path);
final BufferedReader reader = newBufferedReader(runContext, from)) {
long offset = 0L;
String item;
while ((item = reader.readLine()) != null) {
String key = keyExtractor.apply(item);
Long lastOffset = index.get(key);
if (lastOffset != null && lastOffset == offset) {
writer.write(item);
writer.newLine();
} else {
droppedItemsTotal++;
}
offset++;
processedItemsTotal++;
}
}
URI uri = runContext.storage().putFile(path.toFile());
index.clear();
return Output
.builder()
.uri(uri)
.numKeys(numKeys)
.processedItemsTotal(processedItemsTotal)
.droppedItemsTotal(droppedItemsTotal)
.build();
} | @Test
void shouldDeduplicateFileGivenKeyExpression() throws Exception {
// Given
RunContext runContext = runContextFactory.of();
List<KeyValue1> values = List.of(
new KeyValue1("k1", "v1"),
new KeyValue1("k2", "v1"),
new KeyValue1("k3", "v1"),
new KeyValue1("k1", "v2"),
new KeyValue1("k2", "v2"),
new KeyValue1("k2", null),
new KeyValue1("k3", "v2"),
new KeyValue1("k1", "v3")
);
DeduplicateItems task = DeduplicateItems
.builder()
.from(generateKeyValueFile(values, runContext).toString())
.expr(" {{ key }} ")
.build();
// When
DeduplicateItems.Output output = task.run(runContext);
// Then
Assertions.assertNotNull(output);
Assertions.assertNotNull(output.getUri());
Assertions.assertEquals(3, output.getNumKeys());
Assertions.assertEquals(5, output.getDroppedItemsTotal());
Assertions.assertEquals(8, output.getProcessedItemsTotal());
List<KeyValue1> expected = List.of(
new KeyValue1("k2", null),
new KeyValue1("k3", "v2"),
new KeyValue1("k1", "v3")
);
assertSimpleCompactedFile(runContext, output, expected, KeyValue1.class);
} |
public static boolean shouldDelete(Feed feed) {
if (feed.getState() != Feed.STATE_NOT_SUBSCRIBED) {
return false;
} else if (feed.getItems() == null) {
return false;
}
for (FeedItem item : feed.getItems()) {
if (item.isTagged(FeedItem.TAG_FAVORITE)
|| item.isTagged(FeedItem.TAG_QUEUE)
|| item.isDownloaded()) {
return false;
}
}
return feed.getLastRefreshAttempt() < System.currentTimeMillis() - TIME_TO_KEEP;
} | @Test
public void testQueuedItem() {
Feed feed = createFeed();
feed.setState(Feed.STATE_NOT_SUBSCRIBED);
feed.setLastRefreshAttempt(System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(200, TimeUnit.DAYS));
feed.getItems().add(createItem(feed));
assertTrue(NonSubscribedFeedsCleaner.shouldDelete(feed));
FeedItem queuedItem = createItem(feed);
queuedItem.addTag(FeedItem.TAG_QUEUE);
feed.getItems().add(queuedItem);
assertFalse(NonSubscribedFeedsCleaner.shouldDelete(feed));
} |
@Override
public void download(String path, File outFile) {
final String fileName = FileUtil.getName(path);
final String dir = StrUtil.removeSuffix(path, fileName);
download(dir, fileName, outFile);
} | @Test
@Disabled
public void downloadTest() {
String downloadPath = "d:/test/download/";
try (final Ftp ftp = new Ftp("localhost")) {
final List<FTPFile> ftpFiles = ftp.lsFiles("temp/", null);
for (final FTPFile ftpFile : ftpFiles) {
String name = ftpFile.getName();
if (ftpFile.isDirectory()) {
File dp = new File(downloadPath + name);
if (!dp.exists()) {
dp.mkdir();
}
} else {
ftp.download("", name, FileUtil.file(downloadPath + name));
}
}
} catch (final IOException e) {
throw new IORuntimeException(e);
}
} |
@VisibleForTesting
static double convert(double value, TimeUnit from, TimeUnit target) {
if (target.compareTo(from) > 0) {
return value / from.convert(1, target);
}
return value * target.convert(1, from);
} | @Test
public void convertTest() {
for (TimeUnit from : TimeUnit.values()) {
for (TimeUnit to : TimeUnit.values()) {
assertEquals(1.0, convert(convert(1.0, from, to), to, from), 0.0001,
from + " to " + to + " and back");
}
}
} |
public abstract @NonNull VirtualFile[] list() throws IOException; | @Test
@Issue("SECURITY-1452")
public void list_NoFollowLinks_AbstractBase() throws Exception {
// This test checks the method's behavior in the abstract base class,
// which has limited behavior.
prepareFileStructureForIsDescendant(tmp.getRoot());
File root = tmp.getRoot();
VirtualFile virtualRoot = new VirtualFileMinimalImplementation(root);
List<VirtualFile> children = Arrays.asList(virtualRoot.list(LinkOption.NOFOLLOW_LINKS));
assertThat(children, hasSize(2));
assertThat(children, containsInAnyOrder(
VFMatcher.hasName("a"),
VFMatcher.hasName("b")
));
} |
@Override
public void updateHost(K8sHost host) {
checkNotNull(host, ERR_NULL_HOST);
hostStore.updateHost(host);
log.info(String.format(MSG_HOST, host.hostIp().toString(), MSG_UPDATED));
} | @Test(expected = IllegalArgumentException.class)
public void testUpdateNotExistingHost() {
target.updateHost(HOST_1);
} |
void resolveSelectors(EngineDiscoveryRequest request, CucumberEngineDescriptor engineDescriptor) {
Predicate<String> packageFilter = buildPackageFilter(request);
resolve(request, engineDescriptor, packageFilter);
filter(engineDescriptor, packageFilter);
pruneTree(engineDescriptor);
} | @Test
void resolveRequestWithFileSelector() {
DiscoverySelector resource = selectFile("src/test/resources/io/cucumber/junit/platform/engine/single.feature");
EngineDiscoveryRequest discoveryRequest = new SelectorRequest(resource);
resolver.resolveSelectors(discoveryRequest, testDescriptor);
assertEquals(1, testDescriptor.getChildren().size());
} |
public static void main(String[] args) {
var states = new Stack<StarMemento>();
var star = new Star(StarType.SUN, 10000000, 500000);
LOGGER.info(star.toString());
states.add(star.getMemento());
star.timePasses();
LOGGER.info(star.toString());
states.add(star.getMemento());
star.timePasses();
LOGGER.info(star.toString());
states.add(star.getMemento());
star.timePasses();
LOGGER.info(star.toString());
states.add(star.getMemento());
star.timePasses();
LOGGER.info(star.toString());
while (!states.isEmpty()) {
star.setMemento(states.pop());
LOGGER.info(star.toString());
}
} | @Test
void shouldExecuteWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
} |
@Override
public Predicate<FileInfo> get() {
long currentTimeMS = System.currentTimeMillis();
Interval interval = Interval.between(currentTimeMS, currentTimeMS + 1);
return FileInfo -> {
try {
return interval.intersect(mInterval.add(mGetter.apply(FileInfo))).isValid();
} catch (RuntimeException e) {
LOG.debug("Failed to filter: ", e);
return false;
}
};
} | @Test
public void testCreateEmptyPredicate() {
FileFilter filter = FileFilter.newBuilder().setName("").setValue("").build();
long timestamp = System.currentTimeMillis();
FileInfo info = new FileInfo();
info.setLastModificationTimeMs(timestamp);
mThrown.expect(UnsupportedOperationException.class);
mThrown.expectMessage("Invalid filter name: ");
FilePredicate.create(filter).get().test(info);
} |
@Override
@SuppressWarnings("UseOfSystemOutOrSystemErr")
public void run(Namespace namespace, Liquibase liquibase) throws Exception {
final Set<Class<? extends DatabaseObject>> compareTypes = new HashSet<>();
if (isTrue(namespace.getBoolean("columns"))) {
compareTypes.add(Column.class);
}
if (isTrue(namespace.getBoolean("data"))) {
compareTypes.add(Data.class);
}
if (isTrue(namespace.getBoolean("foreign-keys"))) {
compareTypes.add(ForeignKey.class);
}
if (isTrue(namespace.getBoolean("indexes"))) {
compareTypes.add(Index.class);
}
if (isTrue(namespace.getBoolean("primary-keys"))) {
compareTypes.add(PrimaryKey.class);
}
if (isTrue(namespace.getBoolean("sequences"))) {
compareTypes.add(Sequence.class);
}
if (isTrue(namespace.getBoolean("tables"))) {
compareTypes.add(Table.class);
}
if (isTrue(namespace.getBoolean("unique-constraints"))) {
compareTypes.add(UniqueConstraint.class);
}
if (isTrue(namespace.getBoolean("views"))) {
compareTypes.add(View.class);
}
final DiffToChangeLog diffToChangeLog = new DiffToChangeLog(new DiffOutputControl());
final Database database = liquibase.getDatabase();
final String filename = namespace.getString("output");
if (filename != null) {
try (PrintStream file = new PrintStream(filename, StandardCharsets.UTF_8.name())) {
generateChangeLog(database, database.getDefaultSchema(), diffToChangeLog, file, compareTypes);
}
} else {
generateChangeLog(database, database.getDefaultSchema(), diffToChangeLog, outputStream, compareTypes);
}
} | @Test
void testDumpSchemaAndData() throws Exception {
dumpCommand.run(null, new Namespace(Stream.concat(ATTRIBUTE_NAMES.stream(), Stream.of("data"))
.collect(Collectors.toMap(a -> a, b -> true))), existedDbConf);
final NodeList changeSets = toXmlDocument(baos).getDocumentElement().getElementsByTagName("changeSet");
assertCreateTable((Element) changeSets.item(0));
assertInsertData((Element) changeSets.item(1));
} |
@Override
public Response toResponse(Throwable exception) {
debugLog(exception);
if (exception instanceof WebApplicationException w) {
var res = w.getResponse();
if (res.getStatus() >= 500) {
log(w);
}
return res;
}
if (exception instanceof AuthenticationException) {
return Response.status(Status.UNAUTHORIZED).build();
}
if (exception instanceof ValidationException ve) {
if (ve.seeOther() != null) {
return Response.seeOther(ve.seeOther()).build();
}
return buildContentNegotiatedErrorResponse(ve.localizedMessage(), Status.BAD_REQUEST);
}
// the remaining exceptions are unexpected, let's log them
log(exception);
if (exception instanceof FederationException fe) {
var errorMessage = new Message(FEDERATION_ERROR_MESSAGE, fe.reason().name());
return buildContentNegotiatedErrorResponse(errorMessage, Status.INTERNAL_SERVER_ERROR);
}
var status = Status.INTERNAL_SERVER_ERROR;
var errorMessage = new Message(SERVER_ERROR_MESSAGE, (String) null);
return buildContentNegotiatedErrorResponse(errorMessage, status);
} | @Test
void toResponse_withBody_Unauthorized() {
// when
var res = mapper.toResponse(new AuthenticationException("Unauthorized"));
// then
assertEquals(401, res.getStatus());
} |
@SuppressWarnings("unchecked")
public <T extends Metric> T register(String name, T metric) throws IllegalArgumentException {
return register(MetricName.build(name), metric);
} | @Test
public void registeringAHistogramTriggersANotification() throws Exception {
assertThat(registry.register(THING, histogram))
.isEqualTo(histogram);
verify(listener).onHistogramAdded(THING, histogram);
} |
public void lockClusterState(ClusterStateChange stateChange, Address initiator, UUID txnId, long leaseTime,
int memberListVersion, long partitionStateStamp) {
Preconditions.checkNotNull(stateChange);
clusterServiceLock.lock();
try {
if (!node.getNodeExtension().isStartCompleted()) {
throw new IllegalStateException("Can not lock cluster state! Startup is not completed yet!");
}
if (node.getClusterService().getClusterJoinManager().isMastershipClaimInProgress()) {
throw new IllegalStateException("Can not lock cluster state! Mastership claim is in progress!");
}
if (stateChange.isOfType(Version.class)) {
validateNodeCompatibleWith((Version) stateChange.getNewState());
validateClusterVersionChange((Version) stateChange.getNewState());
}
checkMemberListVersion(memberListVersion);
checkMigrationsAndPartitionStateStamp(stateChange, partitionStateStamp);
lockOrExtendClusterState(initiator, txnId, leaseTime);
try {
// check migration status and partition-state version again
// if partition state is changed then release the lock and fail.
checkMigrationsAndPartitionStateStamp(stateChange, partitionStateStamp);
} catch (IllegalStateException e) {
stateLockRef.set(LockGuard.NOT_LOCKED);
throw e;
}
} finally {
clusterServiceLock.unlock();
}
} | @Test(expected = TransactionException.class)
public void test_lockClusterState_fail() throws Exception {
Address initiator = newAddress();
ClusterStateChange newState = ClusterStateChange.from(FROZEN);
clusterStateManager.lockClusterState(newState, initiator, TXN, 1000, MEMBERLIST_VERSION, PARTITION_STAMP);
clusterStateManager.lockClusterState(newState, initiator, ANOTHER_TXN, 1000, MEMBERLIST_VERSION, PARTITION_STAMP);
} |
@Override
public SelDouble assignOps(SelOp op, SelType rhs) {
SelTypeUtil.checkTypeMatch(this.type(), rhs.type());
double another = ((SelDouble) rhs).val;
switch (op) {
case ASSIGN:
this.val = another;
return this;
case ADD_ASSIGN:
this.val += another;
return this;
case SUB_ASSIGN:
this.val -= another;
return this;
case MUL_ASSIGN:
this.val *= another;
return this;
case DIV_ASSIGN:
this.val /= another;
return this;
case MOD_ASSIGN:
this.val %= another;
return this;
default:
throw new UnsupportedOperationException(
"float/Float/double/Doubles DO NOT support assignment operation " + op);
}
} | @Test(expected = UnsupportedOperationException.class)
public void testInvalidAssignOpType() {
one.assignOps(SelOp.EQUAL, one);
} |
@NonNull
@Override
public String getId() {
return ID;
} | @Test
public void getOrganizationsWithoutCredentialId() throws IOException, UnirestException {
createCredential(BitbucketCloudScm.ID);
List orgs = new RequestBuilder(baseUrl)
.crumb(crumb)
.status(200)
.jwtToken(getJwtToken(j.jenkins, authenticatedUser.getId(), authenticatedUser.getId()))
.post("/organizations/jenkins/scm/"+BitbucketCloudScm.ID+"/organizations/"+getApiUrlParam())
.build(List.class);
assertEquals(2, orgs.size());
assertEquals(BbCloudWireMock.USER_UUID, ((Map)orgs.get(0)).get("key"));
assertEquals("Vivek Pandey", ((Map)orgs.get(0)).get("name"));
assertEquals(BbCloudWireMock.TEAM_UUID, ((Map)orgs.get(1)).get("key"));
assertEquals("Vivek's Team", ((Map)orgs.get(1)).get("name"));
} |
@Override
public void refreshJobRetentionSettings() throws IOException {
UserGroupInformation user = checkAcls("refreshJobRetentionSettings");
try {
loginUGI.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws IOException {
jobHistoryService.refreshJobRetentionSettings();
return null;
}
});
} catch (InterruptedException e) {
throw new IOException(e);
}
HSAuditLogger.logSuccess(user.getShortUserName(),
"refreshJobRetentionSettings", HISTORY_ADMIN_SERVER);
} | @Test
public void testRefreshJobRetentionSettings() throws Exception {
String[] args = new String[1];
args[0] = "-refreshJobRetentionSettings";
hsAdminClient.run(args);
verify(jobHistoryService).refreshJobRetentionSettings();
} |
@Override
protected InputStream openObject(String key, OpenOptions options, RetryPolicy retryPolicy) {
return new GCSInputStream(mBucketName, key, mClient, options.getOffset(), retryPolicy);
} | @Test
public void testOpenObject() throws IOException, ServiceException {
// test successful open object
Mockito.when(mClient.getObject(ArgumentMatchers.anyString(), ArgumentMatchers.anyString()))
.thenReturn(new GSObject());
OpenOptions options = OpenOptions.defaults();
RetryPolicy retryPolicy = new CountingRetry(1);
InputStream result = mGCSUnderFileSystem.openObject(KEY, options, retryPolicy);
Assert.assertTrue(result instanceof GCSInputStream);
} |
@Override
@Deprecated
public <VR> KStream<K, VR> flatTransformValues(final org.apache.kafka.streams.kstream.ValueTransformerSupplier<? super V, Iterable<VR>> valueTransformerSupplier,
final String... stateStoreNames) {
Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null");
return doFlatTransformValues(
toValueTransformerWithKeySupplier(valueTransformerSupplier),
NamedInternal.empty(),
stateStoreNames);
} | @Test
@SuppressWarnings("deprecation")
public void shouldNotAllowNullNamedOnFlatTransformValuesWithFlatValueSupplierAndStores() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.flatTransformValues(
flatValueTransformerSupplier,
(Named) null,
"storeName"));
assertThat(exception.getMessage(), equalTo("named can't be null"));
} |
@Override
public Producer createProducer() throws Exception {
return new FopProducer(this, fopFactory, outputType.getFormatExtended());
} | @Test
public void overridePdfOutputFormatToPlainText() throws Exception {
if (!canTest()) {
// cannot run on CI
return;
}
String defaultOutputFormat = "pdf";
Endpoint endpoint = context().getEndpoint("fop:" + defaultOutputFormat);
Producer producer = endpoint.createProducer();
Exchange exchange = new DefaultExchange(context);
exchange.getIn().setHeader(FopConstants.CAMEL_FOP_OUTPUT_FORMAT, "txt");
exchange.getIn().setBody(FopHelper.decorateTextWithXSLFO("Test Content"));
producer.process(exchange);
String plainText = exchange.getMessage().getBody(String.class).trim();
assertEquals("Test Content", plainText);
} |
public static String unescapeQuotedString(String string) {
StringBuilder sb = new StringBuilder(string);
for (int i = 0; i < sb.length(); i++) {
if (sb.charAt(i) == '\\') {
sb.deleteCharAt(i);
if (i == sb.length()) {
throw new IllegalArgumentException("Parse error" + string);
}
switch (sb.charAt(i)) {
case 'n' -> sb.setCharAt(i, '\n');
case 'r' -> sb.setCharAt(i, '\r');
case 't' -> sb.setCharAt(i, '\t');
case 'f' -> sb.setCharAt(i, '\f');
case 'x' -> {
if (i + 2 >= sb.length()) {
throw new IllegalArgumentException("Could not parse hex value " + string);
}
sb.setCharAt(i, (char) Integer.parseInt(sb.substring(i + 1, i + 3), 16));
sb.delete(i + 1, i + 3);
}
case '\\' -> sb.setCharAt(i, '\\');
}
}
}
if (sb.length() > 0 && (sb.charAt(0) == '"') && sb.charAt(sb.length() - 1) == '"') {
sb.deleteCharAt(sb.length() - 1);//remove last quote
if (sb.length() > 0) {
sb.deleteCharAt(0); //remove first quote
}
}
return sb.toString();
} | @Test
void testUnescapeQuotedString() {
String a = "\"Hei\"";
assertEquals("Hei", StringNode.unescapeQuotedString(a));
assertEquals("foo\"bar\"", StringNode.unescapeQuotedString("foo\"bar\""));
assertEquals("foo\"bar\"", StringNode.unescapeQuotedString("foo\\\"bar\\\""));
assertEquals("a\rb\tc\fd", StringNode.unescapeQuotedString("a\\rb\\tc\\fd"));
assertEquals("U", StringNode.unescapeQuotedString("\\x55"));
} |
Long nextUniqueId() {
try {
Thread.sleep(ThreadLocalRandom.current().nextInt(RANDOM_JITTER_DELAY));
return executor
.submit(stepInstanceDao::getNextUniqueId)
.get(TIMEOUT_IN_MILLIS, TimeUnit.MILLISECONDS);
} catch (Exception e) {
throw new MaestroInternalError(e, "nextUniqueId throws an exception");
}
} | @Test
public void testNextUniqueId() {
Long expected = 750762533885116445L;
when(stepInstanceDao.getNextUniqueId()).thenReturn(expected);
assertEquals(expected, paramExtension.nextUniqueId());
when(stepInstanceDao.getNextUniqueId())
.thenThrow(new MaestroNotFoundException("test exception"));
AssertHelper.assertThrows(
"cannot get next unique id",
MaestroInternalError.class,
"nextUniqueId throws an exception",
() -> paramExtension.nextUniqueId());
} |
public static Statement sanitize(
final Statement node,
final MetaStore metaStore) {
return sanitize(node, metaStore, true);
} | @Test
public void shouldThrowOnUnknownRightJoinSource() {
// Given:
final Statement stmt =
givenQuery("SELECT * FROM TEST1 T1 JOIN UNKNOWN WITHIN 1 SECOND ON T1.ID = UNKNOWN.ID;");
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> AstSanitizer.sanitize(stmt, META_STORE)
);
// Then:
assertThat(e.getMessage(), containsString(
"UNKNOWN does not exist"));
} |
public String getSourceText() {
return sourceBuilder.toString();
} | @Test
public void getSourceText() {
assertThat(sourceFile.getSourceText()).isEqualTo(SOURCE_TEXT);
} |
int capacity() { return data.length; } | @Test
public void testMinimalInitialCapacity() {
DecodeIndex index = new DecodeIndex(2, 1);
assertEquals(16, index.capacity());
} |
public Set<String> makeReady(final Map<String, InternalTopicConfig> topics) {
// we will do the validation / topic-creation in a loop, until we have confirmed all topics
// have existed with the expected number of partitions, or some create topic returns fatal errors.
log.debug("Starting to validate internal topics {} in partition assignor.", topics);
long currentWallClockMs = time.milliseconds();
final long deadlineMs = currentWallClockMs + retryTimeoutMs;
Set<String> topicsNotReady = new HashSet<>(topics.keySet());
final Set<String> newlyCreatedTopics = new HashSet<>();
while (!topicsNotReady.isEmpty()) {
final Set<String> tempUnknownTopics = new HashSet<>();
topicsNotReady = validateTopics(topicsNotReady, topics, tempUnknownTopics);
newlyCreatedTopics.addAll(topicsNotReady);
if (!topicsNotReady.isEmpty()) {
final Set<NewTopic> newTopics = new HashSet<>();
for (final String topicName : topicsNotReady) {
if (tempUnknownTopics.contains(topicName)) {
// for the tempUnknownTopics, don't create topic for them
// we'll check again later if remaining retries > 0
continue;
}
final InternalTopicConfig internalTopicConfig = Objects.requireNonNull(topics.get(topicName));
final Map<String, String> topicConfig = internalTopicConfig.properties(defaultTopicConfigs, windowChangeLogAdditionalRetention);
log.debug("Going to create topic {} with {} partitions and config {}.",
internalTopicConfig.name(),
internalTopicConfig.numberOfPartitions(),
topicConfig);
newTopics.add(
new NewTopic(
internalTopicConfig.name(),
internalTopicConfig.numberOfPartitions(),
Optional.of(replicationFactor))
.configs(topicConfig));
}
// it's possible that although some topics are not ready yet because they
// are temporarily not available, not that they do not exist; in this case
// the new topics to create may be empty and hence we can skip here
if (!newTopics.isEmpty()) {
final CreateTopicsResult createTopicsResult = adminClient.createTopics(newTopics);
for (final Map.Entry<String, KafkaFuture<Void>> createTopicResult : createTopicsResult.values().entrySet()) {
final String topicName = createTopicResult.getKey();
try {
createTopicResult.getValue().get();
topicsNotReady.remove(topicName);
} catch (final InterruptedException fatalException) {
// this should not happen; if it ever happens it indicate a bug
Thread.currentThread().interrupt();
log.error(INTERRUPTED_ERROR_MESSAGE, fatalException);
throw new IllegalStateException(INTERRUPTED_ERROR_MESSAGE, fatalException);
} catch (final ExecutionException executionException) {
final Throwable cause = executionException.getCause();
if (cause instanceof TopicExistsException) {
// This topic didn't exist earlier or its leader not known before; just retain it for next round of validation.
log.info(
"Could not create topic {}. Topic is probably marked for deletion (number of partitions is unknown).\n"
+
"Will retry to create this topic in {} ms (to let broker finish async delete operation first).\n"
+
"Error message was: {}", topicName, retryBackOffMs,
cause.toString());
} else {
log.error("Unexpected error during topic creation for {}.\n" +
"Error message was: {}", topicName, cause.toString());
if (cause instanceof UnsupportedVersionException) {
final String errorMessage = cause.getMessage();
if (errorMessage != null &&
errorMessage.startsWith("Creating topics with default partitions/replication factor are only supported in CreateTopicRequest version 4+")) {
throw new StreamsException(String.format(
"Could not create topic %s, because brokers don't support configuration replication.factor=-1."
+ " You can change the replication.factor config or upgrade your brokers to version 2.4 or newer to avoid this error.",
topicName)
);
}
} else if (cause instanceof TimeoutException) {
log.error("Creating topic {} timed out.\n" +
"Error message was: {}", topicName, cause.toString());
} else {
throw new StreamsException(
String.format("Could not create topic %s.", topicName),
cause
);
}
}
}
}
}
}
if (!topicsNotReady.isEmpty()) {
currentWallClockMs = time.milliseconds();
if (currentWallClockMs >= deadlineMs) {
final String timeoutError = String.format("Could not create topics within %d milliseconds. " +
"This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs);
log.error(timeoutError);
throw new TimeoutException(timeoutError);
}
log.info(
"Topics {} could not be made ready. Will retry in {} milliseconds. Remaining time in milliseconds: {}",
topicsNotReady,
retryBackOffMs,
deadlineMs - currentWallClockMs
);
Utils.sleep(retryBackOffMs);
}
}
log.debug("Completed validating internal topics and created {}", newlyCreatedTopics);
return newlyCreatedTopics;
} | @Test
public void shouldNotThrowExceptionForEmptyTopicMap() {
internalTopicManager.makeReady(Collections.emptyMap());
} |
@Override
public boolean add(R e) {
throw new UnsupportedOperationException("LazySet is not modifiable");
} | @Test(expected = UnsupportedOperationException.class)
public void testAdd_throwsException() {
set.add(null);
} |
protected Boolean convertBigNumberToBoolean( BigDecimal number ) {
if ( number == null ) {
return null;
}
return Boolean.valueOf( number.signum() != 0 );
} | @Test
public void testConvertBigNumberToBoolean() {
ValueMetaBase vmb = new ValueMetaBase();
assertTrue( vmb.convertBigNumberToBoolean( new BigDecimal( "-234" ) ) );
assertTrue( vmb.convertBigNumberToBoolean( new BigDecimal( "234" ) ) );
assertFalse( vmb.convertBigNumberToBoolean( new BigDecimal( "0" ) ) );
assertTrue( vmb.convertBigNumberToBoolean( new BigDecimal( "1.7976E308" ) ) );
} |
static String trimFieldsAndRemoveEmptyFields(String str) {
char[] chars = str.toCharArray();
char[] res = new char[chars.length];
/*
* set when reading the first non trimmable char after a separator char (or the beginning of the string)
* unset when reading a separator
*/
boolean inField = false;
boolean inQuotes = false;
int i = 0;
int resI = 0;
for (; i < chars.length; i++) {
boolean isSeparator = chars[i] == ',';
if (!inQuotes && isSeparator) {
// exiting field (may already be unset)
inField = false;
if (resI > 0) {
resI = retroTrim(res, resI);
}
} else {
boolean isTrimmed = !inQuotes && istrimmable(chars[i]);
if (isTrimmed && !inField) {
// we haven't meet any non trimmable char since the last separator yet
continue;
}
boolean isEscape = isEscapeChar(chars[i]);
if (isEscape) {
inQuotes = !inQuotes;
}
// add separator as we already had one field
if (!inField && resI > 0) {
res[resI] = ',';
resI++;
}
// register in field (may already be set)
inField = true;
// copy current char
res[resI] = chars[i];
resI++;
}
}
// inQuotes can only be true at this point if quotes are unbalanced
if (!inQuotes) {
// trim end of str
resI = retroTrim(res, resI);
}
return new String(res, 0, resI);
} | @Test
public void trimFieldsAndRemoveEmptyFields_supports_escaped_quote_in_quotes() {
assertThat(trimFieldsAndRemoveEmptyFields("\"f\"\"oo\"")).isEqualTo("\"f\"\"oo\"");
assertThat(trimFieldsAndRemoveEmptyFields("\"f\"\"oo\",\"bar\"\"\"")).isEqualTo("\"f\"\"oo\",\"bar\"\"\"");
} |
@VisibleForTesting
protected static List<FileStatus> scanDirectory(Path path, FileContext fc,
PathFilter pathFilter) throws IOException {
path = fc.makeQualified(path);
List<FileStatus> jhStatusList = new ArrayList<FileStatus>();
try {
RemoteIterator<FileStatus> fileStatusIter = fc.listStatus(path);
while (fileStatusIter.hasNext()) {
FileStatus fileStatus = fileStatusIter.next();
Path filePath = fileStatus.getPath();
if (fileStatus.isFile() && pathFilter.accept(filePath)) {
jhStatusList.add(fileStatus);
}
}
} catch (FileNotFoundException fe) {
LOG.error("Error while scanning directory " + path, fe);
}
return jhStatusList;
} | @Test
public void testScanDirectory() throws Exception {
Path p = new Path("any");
FileContext fc = mock(FileContext.class);
when(fc.makeQualified(p)).thenReturn(p);
when(fc.listStatus(p)).thenThrow(new FileNotFoundException());
List<FileStatus> lfs = HistoryFileManager.scanDirectory(p, fc, null);
//primarily, succcess is that an exception was not thrown. Also nice to
//check this
Assert.assertNotNull(lfs);
} |
private void sendResponse(Response response) {
try {
((GrpcConnection) this.currentConnection).sendResponse(response);
} catch (Exception e) {
LOGGER.error("[{}]Error to send ack response, ackId->{}", this.currentConnection.getConnectionId(),
response.getRequestId());
}
} | @Test
void testSendResponseWithException()
throws NoSuchMethodException, InvocationTargetException, IllegalAccessException, NoSuchFieldException {
GrpcConnection connection = mock(GrpcConnection.class);
setCurrentConnection(connection, grpcClient);
doThrow(new RuntimeException("test")).when(connection).sendResponse(any(Response.class));
Method sendResponseMethod = GrpcClient.class.getDeclaredMethod("sendResponse", Response.class);
sendResponseMethod.setAccessible(true);
sendResponseMethod.invoke(grpcClient, new ConnectResetResponse());
// don't throw any exception.
} |
public List<String> supportedAnalyticsDashboardMetrics() {
return this.supportedAnalytics.stream().filter(s -> DASHBOARD_TYPE.equalsIgnoreCase(s.getType())).map(SupportedAnalytics::getTitle).collect(Collectors.toList());
} | @Test
public void shouldListSupportedDashBoardAnalytics() {
Capabilities capabilities = new Capabilities(List.of(new SupportedAnalytics("dashboard", "id1", "title1" ),
new SupportedAnalytics("DashBoard", "id2", "title2" )));
assertThat(capabilities.supportedAnalyticsDashboardMetrics(), is(List.of("title1", "title2")));
assertTrue(new Capabilities(Collections.emptyList()).supportedAnalyticsDashboardMetrics().isEmpty());
} |
public static List<String> splitPathToElements(Path path) {
checkArgument(path.isAbsolute(), "path is relative");
String uriPath = path.toUri().getPath();
checkArgument(!uriPath.isEmpty(), "empty path");
if ("/".equals(uriPath)) {
// special case: empty list
return new ArrayList<>(0);
}
List<String> elements = new ArrayList<>();
int len = uriPath.length();
int firstElementChar = 1;
int endOfElement = uriPath.indexOf('/', firstElementChar);
while (endOfElement > 0) {
elements.add(uriPath.substring(firstElementChar, endOfElement));
firstElementChar = endOfElement + 1;
endOfElement = firstElementChar == len ? -1
: uriPath.indexOf('/', firstElementChar);
}
// expect a possible child element here
if (firstElementChar != len) {
elements.add(uriPath.substring(firstElementChar));
}
return elements;
} | @Test
public void testSplitPathEmpty() throws Throwable {
intercept(IllegalArgumentException.class,
() -> splitPathToElements(new Path("")));
} |
public final void tag(I input, ScopedSpan span) {
if (input == null) throw new NullPointerException("input == null");
if (span == null) throw new NullPointerException("span == null");
if (span.isNoop()) return;
tag(span, input, span.context());
} | @Test void tag_customizer() {
when(parseValue.apply(input, null)).thenReturn("value");
tag.tag(input, customizer);
verify(parseValue).apply(input, null);
verifyNoMoreInteractions(parseValue); // doesn't parse twice
verify(customizer).tag("key", "value");
verifyNoMoreInteractions(customizer); // doesn't tag twice
} |
@Override
public Iterator<IndexKeyEntries> getSqlRecordIteratorBatch(@Nonnull Comparable value, boolean descending) {
return getSqlRecordIteratorBatch(value, descending, null);
} | @Test
public void getSqlRecordIteratorBatchLeftIncludedRightIncludedDescending() {
var expectedKeyOrder = List.of(7, 4, 1, 6, 3, 0);
var result = store.getSqlRecordIteratorBatch(0, true, 1, true, true);
assertResult(expectedKeyOrder, result);
} |
@Override
public V pollFirstFromAny(long timeout, TimeUnit unit, String... queueNames) {
return get(pollFirstFromAnyAsync(timeout, unit, queueNames));
} | @Test
public void testPollFirstFromAny() {
final RScoredSortedSet<Integer> queue1 = redisson.getScoredSortedSet("queue:pollany");
Executors.newSingleThreadScheduledExecutor().schedule(() -> {
RScoredSortedSet<Integer> queue2 = redisson.getScoredSortedSet("queue:pollany1");
RScoredSortedSet<Integer> queue3 = redisson.getScoredSortedSet("queue:pollany2");
queue3.add(0.1, 2);
queue1.add(0.1, 1);
queue2.add(0.1, 3);
}, 3, TimeUnit.SECONDS);
long s = System.currentTimeMillis();
int l = queue1.pollFirstFromAny(4, TimeUnit.SECONDS, "queue:pollany1", "queue:pollany2");
Assertions.assertEquals(2, l);
Assertions.assertTrue(System.currentTimeMillis() - s > 2000);
} |
@Override
public Map<String, String> getMetadata(final Path file) throws BackgroundException {
try {
final String fileid = this.fileid.getFileId(file);
final Map<String, String> properties = session.getClient().files().get(fileid).setFields("properties")
.setSupportsAllDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable")).execute().getProperties();
if(null == properties) {
return Collections.emptyMap();
}
return properties;
}
catch(IOException e) {
throw new DriveExceptionMappingService(fileid).map("Failure to read attributes of {0}", e, file);
}
} | @Test
public void testChangedFileId() throws Exception {
final DriveFileIdProvider fileid = new DriveFileIdProvider(session);
final Path room = new DriveDirectoryFeature(session, fileid).mkdir(
new Path(MYDRIVE_FOLDER, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final Path test = new DriveTouchFeature(session, fileid).touch(new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final String latestfileid = test.attributes().getFileId();
assertNotNull(latestfileid);
// Assume previously seen but changed on server
fileid.cache(test, String.valueOf(RandomUtils.nextLong()));
final DriveMetadataFeature f = new DriveMetadataFeature(session, fileid);
try {
f.getMetadata(test);
}
catch(NotfoundException e) {
assertNull(test.attributes().getFileId());
}
new DriveDeleteFeature(session, fileid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Deprecated(since="4.0.0", forRemoval=true)
public static long populateBuffer(InputStream in, byte[] buffer) throws IOException
{
return in.readNBytes(buffer, 0, buffer.length);
} | @Test
void testPopulateBuffer() throws IOException
{
byte[] data = "Hello World!".getBytes();
byte[] buffer = new byte[data.length];
long count = IOUtils.populateBuffer(new ByteArrayInputStream(data), buffer);
assertEquals(12, count);
buffer = new byte[data.length - 2]; //Buffer too small
InputStream in = new ByteArrayInputStream(data);
count = IOUtils.populateBuffer(in, buffer);
assertEquals(10, count);
byte[] leftOver = in.readAllBytes();
assertEquals(2, leftOver.length);
buffer = new byte[data.length + 2]; //Buffer too big
in = new ByteArrayInputStream(data);
count = IOUtils.populateBuffer(in, buffer);
assertEquals(12, count);
assertEquals(-1, in.read()); //EOD reached
} |
public boolean isNeedMerge() {
boolean selectContainsSubquery = sqlStatementContext instanceof SelectStatementContext && ((SelectStatementContext) sqlStatementContext).isContainsSubquery();
boolean insertSelectContainsSubquery = sqlStatementContext instanceof InsertStatementContext && null != ((InsertStatementContext) sqlStatementContext).getInsertSelectContext()
&& ((InsertStatementContext) sqlStatementContext).getInsertSelectContext().getSelectStatementContext().isContainsSubquery();
return (selectContainsSubquery || insertSelectContainsSubquery) && !rule.getShardingLogicTableNames(((TableAvailable) sqlStatementContext).getTablesContext().getTableNames()).isEmpty();
} | @Test
void assertIsNeedMerge() {
assertFalse(createSingleShardingConditions().isNeedMerge());
} |
@Override
public Object handle(ProceedingJoinPoint proceedingJoinPoint, RateLimiter rateLimiter,
String methodName) throws Throwable {
RateLimiterOperator<?> rateLimiterOperator = RateLimiterOperator.of(rateLimiter);
Object returnValue = proceedingJoinPoint.proceed();
return executeRxJava3Aspect(rateLimiterOperator, returnValue);
} | @Test
public void testRxTypes() throws Throwable {
RateLimiter rateLimiter = RateLimiter.ofDefaults("test");
when(proceedingJoinPoint.proceed()).thenReturn(Single.just("Test"));
assertThat(
rxJava3RateLimiterAspectExt.handle(proceedingJoinPoint, rateLimiter, "testMethod"))
.isNotNull();
when(proceedingJoinPoint.proceed()).thenReturn(Flowable.just("Test"));
assertThat(
rxJava3RateLimiterAspectExt.handle(proceedingJoinPoint, rateLimiter, "testMethod"))
.isNotNull();
when(proceedingJoinPoint.proceed()).thenReturn(Completable.complete());
assertThat(
rxJava3RateLimiterAspectExt.handle(proceedingJoinPoint, rateLimiter, "testMethod"))
.isNotNull();
when(proceedingJoinPoint.proceed()).thenReturn(Maybe.just("Test"));
assertThat(
rxJava3RateLimiterAspectExt.handle(proceedingJoinPoint, rateLimiter, "testMethod"))
.isNotNull();
when(proceedingJoinPoint.proceed()).thenReturn(Observable.just("Test"));
assertThat(
rxJava3RateLimiterAspectExt.handle(proceedingJoinPoint, rateLimiter, "testMethod"))
.isNotNull();
} |
ObjectFactory loadObjectFactory() {
Class<? extends ObjectFactory> objectFactoryClass = options.getObjectFactoryClass();
ClassLoader classLoader = classLoaderSupplier.get();
ServiceLoader<ObjectFactory> loader = ServiceLoader.load(ObjectFactory.class, classLoader);
if (objectFactoryClass == null) {
return loadSingleObjectFactoryOrDefault(loader);
}
return loadSelectedObjectFactory(loader, objectFactoryClass);
} | @Test
void shouldLoadDefaultObjectFactoryService() {
Options options = () -> null;
ObjectFactoryServiceLoader loader = new ObjectFactoryServiceLoader(
ObjectFactoryServiceLoaderTest.class::getClassLoader,
options);
assertThat(loader.loadObjectFactory(), instanceOf(DefaultObjectFactory.class));
} |
@Override
public CompletableFuture<Boolean> isCompatible(String schemaId, SchemaData schema,
SchemaCompatibilityStrategy strategy) {
try {
SchemaDataValidator.validateSchemaData(schema);
} catch (InvalidSchemaDataException e) {
return FutureUtil.failedFuture(e);
}
return service.isCompatible(schemaId, schema, strategy);
} | @Test
public void testIsCompatibleWithBadSchemaData() {
String schemaId = "test-schema-id";
SchemaCompatibilityStrategy strategy = SchemaCompatibilityStrategy.FULL;
CompletableFuture<Boolean> future = new CompletableFuture<>();
when(underlyingService.isCompatible(eq(schemaId), any(SchemaData.class), eq(strategy)))
.thenReturn(future);
SchemaData schemaData = SchemaData.builder()
.type(SchemaType.BOOLEAN)
.data(new byte[10])
.build();
try {
service.isCompatible(schemaId, schemaData, strategy).get();
fail("Should fail isCompatible check");
} catch (Exception e) {
assertTrue(e.getCause() instanceof InvalidSchemaDataException);
}
verify(underlyingService, times(0))
.isCompatible(eq(schemaId), same(schemaData), eq(strategy));
} |
public boolean transitionToCanceled()
{
return state.setIf(CANCELED, currentState -> !currentState.isDone());
} | @Test
public void testCanceled()
{
StageExecutionStateMachine stateMachine = createStageStateMachine();
assertTrue(stateMachine.transitionToCanceled());
assertFinalState(stateMachine, StageExecutionState.CANCELED);
} |
public static void validate(
FederationPolicyInitializationContext policyContext, String myType)
throws FederationPolicyInitializationException {
if (myType == null) {
throw new FederationPolicyInitializationException(
"The myType parameter" + " should not be null.");
}
if (policyContext == null) {
throw new FederationPolicyInitializationException(
"The FederationPolicyInitializationContext provided is null. Cannot"
+ " reinitialize " + "successfully.");
}
if (policyContext.getFederationStateStoreFacade() == null) {
throw new FederationPolicyInitializationException(
"The FederationStateStoreFacade provided is null. Cannot"
+ " reinitialize successfully.");
}
if (policyContext.getFederationSubclusterResolver() == null) {
throw new FederationPolicyInitializationException(
"The FederationSubclusterResolver provided is null. Cannot"
+ " reinitialize successfully.");
}
if (policyContext.getSubClusterPolicyConfiguration() == null) {
throw new FederationPolicyInitializationException(
"The SubClusterPolicyConfiguration provided is null. Cannot "
+ "reinitialize successfully.");
}
String intendedType =
policyContext.getSubClusterPolicyConfiguration().getType();
if (!myType.equals(intendedType)) {
throw new FederationPolicyInitializationException(
"The FederationPolicyConfiguration carries a type (" + intendedType
+ ") different then mine (" + myType
+ "). Cannot reinitialize successfully.");
}
} | @Test(expected = FederationPolicyInitializationException.class)
public void nullType() throws Exception {
FederationPolicyInitializationContextValidator.validate(context, null);
} |
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
final List<Path> deleted = new ArrayList<Path>();
for(Map.Entry<Path, TransferStatus> entry : files.entrySet()) {
boolean skip = false;
final Path file = entry.getKey();
for(Path d : deleted) {
if(file.isChild(d)) {
skip = true;
break;
}
}
if(skip) {
continue;
}
deleted.add(file);
callback.delete(file);
try {
final TransferStatus status = entry.getValue();
session.getClient().execute(this.toRequest(file, status), new VoidResponseHandler());
}
catch(SardineException e) {
throw new DAVExceptionMappingService().map("Cannot delete {0}", e, file);
}
catch(IOException e) {
throw new HttpExceptionMappingService().map(e, file);
}
}
} | @Test
public void testDeleteFile() throws Exception {
final Path test = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new DAVTouchFeature(session).touch(test, new TransferStatus());
assertTrue(new DAVFindFeature(session).find(test));
new DAVDeleteFeature(session).delete(Collections.singletonMap(test, new TransferStatus()), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(new DAVFindFeature(session).find(test));
} |
protected boolean dataValidationAndGoOn(ConnectionProxy conn) throws SQLException {
TableRecords beforeRecords = sqlUndoLog.getBeforeImage();
TableRecords afterRecords = sqlUndoLog.getAfterImage();
// Compare current data with before data
// No need undo if the before data snapshot is equivalent to the after data snapshot.
Result<Boolean> beforeEqualsAfterResult = DataCompareUtils.isRecordsEquals(beforeRecords, afterRecords);
if (beforeEqualsAfterResult.getResult()) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info("Stop rollback because there is no data change " +
"between the before data snapshot and the after data snapshot.");
}
// no need continue undo.
return false;
}
// Validate if data is dirty.
TableRecords currentRecords = queryCurrentRecords(conn);
// compare with current data and after image.
Result<Boolean> afterEqualsCurrentResult = DataCompareUtils.isRecordsEquals(afterRecords, currentRecords);
if (!afterEqualsCurrentResult.getResult()) {
// If current data is not equivalent to the after data, then compare the current data with the before
// data, too. No need continue to undo if current data is equivalent to the before data snapshot
Result<Boolean> beforeEqualsCurrentResult = DataCompareUtils.isRecordsEquals(beforeRecords, currentRecords);
if (beforeEqualsCurrentResult.getResult()) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info("Stop rollback because there is no data change " +
"between the before data snapshot and the current data snapshot.");
}
// no need continue undo.
return false;
} else {
if (LOGGER.isInfoEnabled()) {
if (StringUtils.isNotBlank(afterEqualsCurrentResult.getErrMsg())) {
LOGGER.info(afterEqualsCurrentResult.getErrMsg(), afterEqualsCurrentResult.getErrMsgParams());
}
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("check dirty data failed, old and new data are not equal, " +
"tableName:[" + sqlUndoLog.getTableName() + "]," +
"oldRows:[" + JSON.toJSONString(afterRecords.getRows()) + "]," +
"newRows:[" + JSON.toJSONString(currentRecords.getRows()) + "].");
}
throw new SQLUndoDirtyException("Has dirty records when undo.");
}
}
return true;
} | @Test
public void dataValidationUpdate() throws SQLException {
execSQL("INSERT INTO table_name(id, name) VALUES (12345,'aaa');");
execSQL("INSERT INTO table_name(id, name) VALUES (12346,'aaa');");
TableRecords beforeImage = execQuery(tableMeta, "SELECT * FROM table_name WHERE id IN (12345, 12346);");
execSQL("update table_name set name = 'xxx' where id in (12345, 12346);");
TableRecords afterImage = execQuery(tableMeta, "SELECT * FROM table_name WHERE id IN (12345, 12346);");
SQLUndoLog sqlUndoLog = new SQLUndoLog();
sqlUndoLog.setSqlType(SQLType.UPDATE);
sqlUndoLog.setTableMeta(tableMeta);
sqlUndoLog.setTableName("table_name");
sqlUndoLog.setBeforeImage(beforeImage);
sqlUndoLog.setAfterImage(afterImage);
TestUndoExecutor spy = new TestUndoExecutor(sqlUndoLog, false);
// case1: normal case before:aaa -> after:xxx -> current:xxx
Assertions.assertTrue(spy.dataValidationAndGoOn(connection));
// case2: dirty data before:aaa -> after:xxx -> current:yyy
execSQL("update table_name set name = 'yyy' where id in (12345, 12346);");
try {
spy.dataValidationAndGoOn(connection);
Assertions.fail();
} catch (Exception e) {
Assertions.assertTrue(e instanceof SQLException);
}
// case 3: before == current before:aaa -> after:xxx -> current:aaa
execSQL("update table_name set name = 'aaa' where id in (12345, 12346);");
Assertions.assertFalse(spy.dataValidationAndGoOn(connection));
// case 4: before == after before:aaa -> after:aaa
afterImage = execQuery(tableMeta, "SELECT * FROM table_name WHERE id IN (12345, 12346);");
sqlUndoLog.setAfterImage(afterImage);
Assertions.assertFalse(spy.dataValidationAndGoOn(connection));
} |
@Override
public DescribeMetadataQuorumResult describeMetadataQuorum(DescribeMetadataQuorumOptions options) {
NodeProvider provider = new LeastLoadedBrokerOrActiveKController();
final KafkaFutureImpl<QuorumInfo> future = new KafkaFutureImpl<>();
final long now = time.milliseconds();
final Call call = new Call(
"describeMetadataQuorum", calcDeadlineMs(now, options.timeoutMs()), provider) {
private QuorumInfo.ReplicaState translateReplicaState(DescribeQuorumResponseData.ReplicaState replica) {
return new QuorumInfo.ReplicaState(
replica.replicaId(),
replica.replicaDirectoryId() == null ? Uuid.ZERO_UUID : replica.replicaDirectoryId(),
replica.logEndOffset(),
replica.lastFetchTimestamp() == -1 ? OptionalLong.empty() : OptionalLong.of(replica.lastFetchTimestamp()),
replica.lastCaughtUpTimestamp() == -1 ? OptionalLong.empty() : OptionalLong.of(replica.lastCaughtUpTimestamp()));
}
private QuorumInfo createQuorumResult(final DescribeQuorumResponseData.PartitionData partition, DescribeQuorumResponseData.NodeCollection nodeCollection) {
List<QuorumInfo.ReplicaState> voters = partition.currentVoters().stream()
.map(this::translateReplicaState)
.collect(Collectors.toList());
List<QuorumInfo.ReplicaState> observers = partition.observers().stream()
.map(this::translateReplicaState)
.collect(Collectors.toList());
Map<Integer, QuorumInfo.Node> nodes = nodeCollection.stream().map(n -> {
List<RaftVoterEndpoint> endpoints = n.listeners().stream()
.map(l -> new RaftVoterEndpoint(l.name(), l.host(), l.port()))
.collect(Collectors.toList());
return new QuorumInfo.Node(n.nodeId(), endpoints);
}).collect(Collectors.toMap(QuorumInfo.Node::nodeId, Function.identity()));
return new QuorumInfo(
partition.leaderId(),
partition.leaderEpoch(),
partition.highWatermark(),
voters,
observers,
nodes
);
}
@Override
DescribeQuorumRequest.Builder createRequest(int timeoutMs) {
return new Builder(DescribeQuorumRequest.singletonRequest(
new TopicPartition(CLUSTER_METADATA_TOPIC_NAME, CLUSTER_METADATA_TOPIC_PARTITION.partition())));
}
@Override
void handleResponse(AbstractResponse response) {
final DescribeQuorumResponse quorumResponse = (DescribeQuorumResponse) response;
if (quorumResponse.data().errorCode() != Errors.NONE.code()) {
throw Errors.forCode(quorumResponse.data().errorCode()).exception(quorumResponse.data().errorMessage());
}
if (quorumResponse.data().topics().size() != 1) {
String msg = String.format("DescribeMetadataQuorum received %d topics when 1 was expected",
quorumResponse.data().topics().size());
log.debug(msg);
throw new UnknownServerException(msg);
}
DescribeQuorumResponseData.TopicData topic = quorumResponse.data().topics().get(0);
if (!topic.topicName().equals(CLUSTER_METADATA_TOPIC_NAME)) {
String msg = String.format("DescribeMetadataQuorum received a topic with name %s when %s was expected",
topic.topicName(), CLUSTER_METADATA_TOPIC_NAME);
log.debug(msg);
throw new UnknownServerException(msg);
}
if (topic.partitions().size() != 1) {
String msg = String.format("DescribeMetadataQuorum received a topic %s with %d partitions when 1 was expected",
topic.topicName(), topic.partitions().size());
log.debug(msg);
throw new UnknownServerException(msg);
}
DescribeQuorumResponseData.PartitionData partition = topic.partitions().get(0);
if (partition.partitionIndex() != CLUSTER_METADATA_TOPIC_PARTITION.partition()) {
String msg = String.format("DescribeMetadataQuorum received a single partition with index %d when %d was expected",
partition.partitionIndex(), CLUSTER_METADATA_TOPIC_PARTITION.partition());
log.debug(msg);
throw new UnknownServerException(msg);
}
if (partition.errorCode() != Errors.NONE.code()) {
throw Errors.forCode(partition.errorCode()).exception(partition.errorMessage());
}
future.complete(createQuorumResult(partition, quorumResponse.data().nodes()));
}
@Override
void handleFailure(Throwable throwable) {
future.completeExceptionally(throwable);
}
};
runnable.call(call, now);
return new DescribeMetadataQuorumResult(future);
} | @Test
public void testDescribeMetadataQuorumRetriableError() throws Exception {
try (final AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(ApiKeys.DESCRIBE_QUORUM.id,
ApiKeys.DESCRIBE_QUORUM.oldestVersion(),
ApiKeys.DESCRIBE_QUORUM.latestVersion()));
// First request fails with a NOT_LEADER_OR_FOLLOWER error (which is retriable)
env.kafkaClient().prepareResponse(
body -> body instanceof DescribeQuorumRequest,
prepareDescribeQuorumResponse(Errors.NONE, Errors.NOT_LEADER_OR_FOLLOWER, false, false, false, false, false));
// The second request succeeds
env.kafkaClient().prepareResponse(
body -> body instanceof DescribeQuorumRequest,
prepareDescribeQuorumResponse(Errors.NONE, Errors.NONE, false, false, false, false, false));
KafkaFuture<QuorumInfo> future = env.adminClient().describeMetadataQuorum().quorumInfo();
QuorumInfo quorumInfo = future.get();
assertEquals(defaultQuorumInfo(false), quorumInfo);
}
} |
public static <T> boolean isNotEmpty(T[] array) {
return !isEmpty(array);
} | @Test
public void assertIsNotEmpty() {
String[] array = new String[0];
Assert.isTrue(!ArrayUtil.isNotEmpty(array));
} |
public void writeToStream(OutputStream os) throws IOException
{
if (glyphIds.isEmpty() && uniToGID.isEmpty())
{
LOG.info("font subset is empty");
}
addCompoundReferences();
try (DataOutputStream out = new DataOutputStream(os))
{
long[] newLoca = new long[glyphIds.size() + 1];
// generate tables in dependency order
byte[] head = buildHeadTable();
byte[] hhea = buildHheaTable();
byte[] maxp = buildMaxpTable();
byte[] name = buildNameTable();
byte[] os2 = buildOS2Table();
byte[] glyf = buildGlyfTable(newLoca);
byte[] loca = buildLocaTable(newLoca);
byte[] cmap = buildCmapTable();
byte[] hmtx = buildHmtxTable();
byte[] post = buildPostTable();
// save to TTF in optimized order
Map<String, byte[]> tables = new TreeMap<>();
if (os2 != null)
{
tables.put(OS2WindowsMetricsTable.TAG, os2);
}
if (cmap != null)
{
tables.put(CmapTable.TAG, cmap);
}
tables.put(GlyphTable.TAG, glyf);
tables.put(HeaderTable.TAG, head);
tables.put(HorizontalHeaderTable.TAG, hhea);
tables.put(HorizontalMetricsTable.TAG, hmtx);
tables.put(IndexToLocationTable.TAG, loca);
tables.put(MaximumProfileTable.TAG, maxp);
if (name != null)
{
tables.put(NamingTable.TAG, name);
}
if (post != null)
{
tables.put(PostScriptTable.TAG, post);
}
// copy all other tables
for (Map.Entry<String, TTFTable> entry : ttf.getTableMap().entrySet())
{
String tag = entry.getKey();
TTFTable table = entry.getValue();
if (!tables.containsKey(tag) && (keepTables == null || keepTables.contains(tag)))
{
tables.put(tag, ttf.getTableBytes(table));
}
}
// calculate checksum
long checksum = writeFileHeader(out, tables.size());
long offset = 12L + 16L * tables.size();
for (Map.Entry<String, byte[]> entry : tables.entrySet())
{
checksum += writeTableHeader(out, entry.getKey(), offset, entry.getValue());
offset += (entry.getValue().length + 3L) / 4 * 4;
}
checksum = 0xB1B0AFBAL - (checksum & 0xffffffffL);
// update checksumAdjustment in 'head' table
head[8] = (byte)(checksum >>> 24);
head[9] = (byte)(checksum >>> 16);
head[10] = (byte)(checksum >>> 8);
head[11] = (byte)checksum;
for (byte[] bytes : tables.values())
{
writeTableBody(out, bytes);
}
}
} | @Test
void testEmptySubset() throws IOException
{
TrueTypeFont x = new TTFParser().parse(new RandomAccessReadBufferedFile(
"src/test/resources/ttf/LiberationSans-Regular.ttf"));
TTFSubsetter ttfSubsetter = new TTFSubsetter(x);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ttfSubsetter.writeToStream(baos);
try (TrueTypeFont subset = new TTFParser(true)
.parse(new RandomAccessReadBuffer(baos.toByteArray())))
{
assertEquals(1, subset.getNumberOfGlyphs());
assertEquals(0, subset.nameToGID(".notdef"));
assertNotNull(subset.getGlyph().getGlyph(0));
}
} |
public void handleAssignment(final Map<TaskId, Set<TopicPartition>> activeTasks,
final Map<TaskId, Set<TopicPartition>> standbyTasks) {
log.info("Handle new assignment with:\n" +
"\tNew active tasks: {}\n" +
"\tNew standby tasks: {}\n" +
"\tExisting active tasks: {}\n" +
"\tExisting standby tasks: {}",
activeTasks.keySet(), standbyTasks.keySet(), activeTaskIds(), standbyTaskIds());
topologyMetadata.addSubscribedTopicsFromAssignment(
activeTasks.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()),
logPrefix
);
final Map<TaskId, Set<TopicPartition>> activeTasksToCreate = new HashMap<>(activeTasks);
final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate = new HashMap<>(standbyTasks);
final Map<Task, Set<TopicPartition>> tasksToRecycle = new HashMap<>();
final Set<Task> tasksToCloseClean = new TreeSet<>(Comparator.comparing(Task::id));
final Set<TaskId> tasksToLock =
tasks.allTaskIds().stream()
.filter(x -> activeTasksToCreate.containsKey(x) || standbyTasksToCreate.containsKey(x))
.collect(Collectors.toSet());
maybeLockTasks(tasksToLock);
// first put aside those unrecognized tasks because of unknown named-topologies
tasks.clearPendingTasksToCreate();
tasks.addPendingActiveTasksToCreate(pendingTasksToCreate(activeTasksToCreate));
tasks.addPendingStandbyTasksToCreate(pendingTasksToCreate(standbyTasksToCreate));
// first rectify all existing tasks:
// 1. for tasks that are already owned, just update input partitions / resume and skip re-creating them
// 2. for tasks that have changed active/standby status, just recycle and skip re-creating them
// 3. otherwise, close them since they are no longer owned
final Map<TaskId, RuntimeException> failedTasks = new LinkedHashMap<>();
if (stateUpdater == null) {
handleTasksWithoutStateUpdater(activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean);
} else {
handleTasksWithStateUpdater(
activeTasksToCreate,
standbyTasksToCreate,
tasksToRecycle,
tasksToCloseClean,
failedTasks
);
failedTasks.putAll(collectExceptionsAndFailedTasksFromStateUpdater());
}
final Map<TaskId, RuntimeException> taskCloseExceptions = closeAndRecycleTasks(tasksToRecycle, tasksToCloseClean);
maybeUnlockTasks(tasksToLock);
failedTasks.putAll(taskCloseExceptions);
maybeThrowTaskExceptions(failedTasks);
createNewTasks(activeTasksToCreate, standbyTasksToCreate);
} | @Test
public void shouldLockActiveOnHandleAssignmentWithProcessingThreads() {
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManager(ProcessingMode.AT_LEAST_ONCE, tasks, true, true);
when(tasks.allTaskIds()).thenReturn(mkSet(taskId00, taskId01));
final KafkaFuture<Void> mockFuture = KafkaFuture.completedFuture(null);
when(schedulingTaskManager.lockTasks(any())).thenReturn(mockFuture);
taskManager.handleAssignment(
mkMap(mkEntry(taskId00, taskId00Partitions)),
mkMap(mkEntry(taskId01, taskId01Partitions))
);
verify(schedulingTaskManager).lockTasks(mkSet(taskId00, taskId01));
verify(schedulingTaskManager).unlockTasks(mkSet(taskId00, taskId01));
} |
@Override
public RedisClusterNode clusterGetNodeForKey(byte[] key) {
int slot = executorService.getConnectionManager().calcSlot(key);
return clusterGetNodeForSlot(slot);
} | @Test
public void testClusterGetNodeForKey() {
RedisClusterNode node = connection.clusterGetNodeForKey("123".getBytes());
assertThat(node).isNotNull();
} |
public static SourceOperationExecutor create(
PipelineOptions options,
SourceOperationRequest request,
CounterSet counters,
DataflowExecutionContext<?> executionContext,
String stageName)
throws Exception {
Preconditions.checkNotNull(request, "SourceOperationRequest must be non-null");
Preconditions.checkNotNull(executionContext, "executionContext must be non-null");
DataflowOperationContext operationContext =
executionContext.createOperationContext(
NameContext.create(
stageName, request.getOriginalName(), request.getSystemName(), request.getName()));
return new WorkerCustomSourceOperationExecutor(
options, request, counters, executionContext, operationContext);
} | @Test
public void testCreateDefault() throws Exception {
SourceOperationRequest request =
new SourceOperationRequest()
.setName("name")
.setOriginalName("original")
.setSystemName("system")
.setStageName("stage")
.setSplit(new SourceSplitRequest());
DataflowOperationContext mockOperation = Mockito.mock(DataflowOperationContext.class);
Mockito.when(executionContext.createOperationContext(Mockito.isA(NameContext.class)))
.thenReturn(mockOperation);
SourceOperationExecutor sourceOperationExecutor =
SourceOperationExecutorFactory.create(
PipelineOptionsFactory.create(), request, null, executionContext, "STAGE");
assertThat(sourceOperationExecutor, instanceOf(WorkerCustomSourceOperationExecutor.class));
} |
@Override
public long size() {
return size;
} | @Test
public void testSize() {
System.out.println("size");
assertEquals(58064, corpus.size());
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.