focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public void doFilter(ServletRequest req, ServletResponse resp, FilterChain chain) throws IOException, ServletException {
String path = ((HttpServletRequest) req).getRequestURI().replaceFirst(((HttpServletRequest) req).getContextPath(), "");
MAX_AGE_BY_PATH.entrySet().stream()
.filter(m -> path.startsWith(m.getKey()))
.map(Map.Entry::getValue)
.findFirst()
.ifPresent(maxAge -> ((HttpServletResponse) resp).addHeader(CACHE_CONTROL_HEADER, format(MAX_AGE_TEMPLATE, maxAge)));
chain.doFilter(req, resp);
} | @Test
public void max_age_is_set_to_five_minutes_on_css_of_static() throws Exception {
HttpServletRequest request = newRequest("/static/css/custom.css");
underTest.doFilter(request, response, chain);
verify(response).addHeader("Cache-Control", format("max-age=%s", 300));
} |
public static DefaultCryptoKeyReaderBuilder builder() {
return new DefaultCryptoKeyReaderBuilder();
} | @Test
public void testBuild() throws Exception {
Map<String, String> publicKeys = new HashMap<>();
publicKeys.put("key1", "file:///path/to/public1.key");
publicKeys.put("key2", "file:///path/to/public2.key");
Map<String, String> privateKeys = new HashMap<>();
privateKeys.put("key3", "file:///path/to/private3.key");
DefaultCryptoKeyReader keyReader = DefaultCryptoKeyReader.builder()
.defaultPublicKey("file:///path/to/default-public.key")
.defaultPrivateKey("file:///path/to/default-private.key")
.publicKey("key4", "file:///path/to/public4.key").publicKeys(publicKeys)
.publicKey("key5", "file:///path/to/public5.key").privateKey("key6", "file:///path/to/private6.key")
.privateKeys(privateKeys).privateKey("key7", "file:///path/to/private7.key").build();
Field defaultPublicKeyField = keyReader.getClass().getDeclaredField("defaultPublicKey");
defaultPublicKeyField.setAccessible(true);
Field defaultPrivateKeyField = keyReader.getClass().getDeclaredField("defaultPrivateKey");
defaultPrivateKeyField.setAccessible(true);
Field publicKeysField = keyReader.getClass().getDeclaredField("publicKeys");
publicKeysField.setAccessible(true);
Field privateKeysField = keyReader.getClass().getDeclaredField("privateKeys");
privateKeysField.setAccessible(true);
Map<String, String> expectedPublicKeys = new HashMap<>();
expectedPublicKeys.put("key1", "file:///path/to/public1.key");
expectedPublicKeys.put("key2", "file:///path/to/public2.key");
expectedPublicKeys.put("key4", "file:///path/to/public4.key");
expectedPublicKeys.put("key5", "file:///path/to/public5.key");
Map<String, String> expectedPrivateKeys = new HashMap<>();
expectedPrivateKeys.put("key3", "file:///path/to/private3.key");
expectedPrivateKeys.put("key6", "file:///path/to/private6.key");
expectedPrivateKeys.put("key7", "file:///path/to/private7.key");
assertEquals((String) defaultPublicKeyField.get(keyReader), "file:///path/to/default-public.key");
assertEquals((String) defaultPrivateKeyField.get(keyReader), "file:///path/to/default-private.key");
assertEquals((Map<String, String>) publicKeysField.get(keyReader), expectedPublicKeys);
assertEquals((Map<String, String>) privateKeysField.get(keyReader), expectedPrivateKeys);
} |
public static String dataToAvroSchemaJson(DataSchema dataSchema)
{
return dataToAvroSchemaJson(dataSchema, new DataToAvroSchemaTranslationOptions());
} | @Test(dataProvider = "schemaWithNamespaceOverride")
public void testSchemaWithNamespaceOverride(String schemaText, String expected) throws IOException
{
DataToAvroSchemaTranslationOptions options = new DataToAvroSchemaTranslationOptions(JsonBuilder.Pretty.SPACES).setOverrideNamespace(true);
String avroSchemaJson = SchemaTranslator.dataToAvroSchemaJson(TestUtil.dataSchemaFromString(schemaText), options);
assertEquals(avroSchemaJson, expected);
} |
public static RangeQueryBuilder rangeQuery(String name) {
return new RangeQueryBuilder(name);
} | @Test
public void testRangeQuery() throws Exception {
assertEquals("{\"range\":{\"k\":{\"lt\":123}}}",
toJson(QueryBuilders.rangeQuery("k").lt(123)));
assertEquals("{\"range\":{\"k\":{\"gt\":123}}}",
toJson(QueryBuilders.rangeQuery("k").gt(123)));
assertEquals("{\"range\":{\"k\":{\"gte\":12345678}}}",
toJson(QueryBuilders.rangeQuery("k").gte(12345678)));
assertEquals("{\"range\":{\"k\":{\"lte\":12345678}}}",
toJson(QueryBuilders.rangeQuery("k").lte(12345678)));
assertEquals("{\"range\":{\"k\":{\"gt\":123,\"lt\":345}}}",
toJson(QueryBuilders.rangeQuery("k").gt(123).lt(345)));
assertEquals("{\"range\":{\"k\":{\"gt\":-456.6,\"lt\":12.3}}}",
toJson(QueryBuilders.rangeQuery("k").lt(12.3f).gt(-456.6f)));
assertEquals("{\"range\":{\"k\":{\"gt\":6789.33,\"lte\":9999.99}}}",
toJson(QueryBuilders.rangeQuery("k").gt(6789.33f).lte(9999.99f)));
assertEquals("{\"range\":{\"k\":{\"gte\":1,\"lte\":\"zzz\"}}}",
toJson(QueryBuilders.rangeQuery("k").gte(1).lte("zzz")));
assertEquals("{\"range\":{\"k\":{\"gte\":\"zzz\"}}}",
toJson(QueryBuilders.rangeQuery("k").gte("zzz")));
assertEquals("{\"range\":{\"k\":{\"gt\":\"aaa\",\"lt\":\"zzz\"}}}",
toJson(QueryBuilders.rangeQuery("k").gt("aaa").lt("zzz")));
} |
public static String getTemporaryObjectName(
GSBlobIdentifier finalBlobIdentifier, UUID temporaryObjectId) {
return getTemporaryObjectPartialName(finalBlobIdentifier) + temporaryObjectId.toString();
} | @Test
public void shouldProperlyConstructTemporaryObjectName() {
GSBlobIdentifier identifier = new GSBlobIdentifier("foo", "bar");
UUID temporaryObjectId = UUID.fromString("f09c43e5-ea49-4537-a406-0586f8f09d47");
String partialName = BlobUtils.getTemporaryObjectName(identifier, temporaryObjectId);
assertEquals(".inprogress/foo/bar/f09c43e5-ea49-4537-a406-0586f8f09d47", partialName);
} |
int maxCongestionWindow()
{
return maxCwnd;
} | @Test
void shouldSetWindowLengthFromContext()
{
final CubicCongestionControl cubicCongestionControl = new CubicCongestionControl(
0, channelWithoutWindow, 0, 0, bigTermLength, MTU_LENGTH, null, null, nanoClock, context, countersManager);
assertEquals(CONTEXT_RECEIVER_WINDOW_LENGTH / MTU_LENGTH, cubicCongestionControl.maxCongestionWindow());
} |
public static double getSquaredDistanceToLine(
final double pFromX, final double pFromY,
final double pAX, final double pAY, final double pBX, final double pBY
) {
return getSquaredDistanceToProjection(pFromX, pFromY, pAX, pAY, pBX, pBY,
getProjectionFactorToLine(pFromX, pFromY, pAX, pAY, pBX, pBY));
} | @Test
public void test_getSquareDistanceToLine() {
final int xA = 100;
final int yA = 200;
final int deltaX = 10;
final int deltaY = 20;
Assert.assertEquals(0,
Distance.getSquaredDistanceToLine(xA, yA, xA, yA, xA, yA), mDelta);
Assert.assertEquals(deltaX * deltaX,
Distance.getSquaredDistanceToLine(xA, yA, xA + deltaX, yA, xA + deltaX, yA), mDelta);
Assert.assertEquals(deltaY * deltaY,
Distance.getSquaredDistanceToLine(xA, yA, xA, yA + deltaY, xA, yA + deltaY), mDelta);
Assert.assertEquals(20 * 20,
Distance.getSquaredDistanceToLine(xA, yA + 20, xA, yA, xA + 100, yA), mDelta);
Assert.assertEquals(30 * 30,
Distance.getSquaredDistanceToLine(xA - 10, yA - 30, xA, yA, xA + 100, yA), mDelta);
Assert.assertEquals(70 * 70,
Distance.getSquaredDistanceToLine(xA + 200, yA - 70, xA, yA, xA + 100, yA), mDelta);
Assert.assertEquals(7000 * 7000,
Distance.getSquaredDistanceToLine(xA + 200, yA - 7000, xA, yA, xA + 200, yA), mDelta);
Assert.assertEquals(7000 * 7000,
Distance.getSquaredDistanceToLine(xA + 200, yA - 7000, xA, yA, xA + 1000, yA), mDelta);
} |
public static Long jsToInteger( Object value, Class<?> clazz ) {
if ( Number.class.isAssignableFrom( clazz ) ) {
return ( (Number) value ).longValue();
} else {
String classType = clazz.getName();
if ( classType.equalsIgnoreCase( "java.lang.String" ) ) {
return ( new Long( (String) value ) );
} else if ( classType.equalsIgnoreCase( JS_UNDEFINED ) ) {
return null;
} else if ( classType.equalsIgnoreCase( JS_NATIVE_NUM ) ) {
Number nb = Context.toNumber( value );
return nb.longValue();
} else if ( classType.equalsIgnoreCase( JS_NATIVE_JAVA_OBJ ) ) {
// Is it a Value?
//
try {
Value v = (Value) Context.jsToJava( value, Value.class );
return v.getInteger();
} catch ( Exception e2 ) {
String string = Context.toString( value );
return Long.parseLong( Const.trim( string ) );
}
} else {
return Long.parseLong( value.toString() );
}
}
} | @Test( expected = NumberFormatException.class )
public void jsToInteger_String_Unparseable() throws Exception {
JavaScriptUtils.jsToInteger( "q", String.class );
} |
public long getFailedSyncCount() {
final AtomicLong result = new AtomicLong();
distroRecords.forEach((s, distroRecord) -> result.addAndGet(distroRecord.getFailedSyncCount()));
return result.get();
} | @Test
void testGetFailedSyncCount() {
DistroRecordsHolder.getInstance().getRecord("testGetFailedSyncCount");
Optional<DistroRecord> actual = DistroRecordsHolder.getInstance().getRecordIfExist("testGetFailedSyncCount");
assertTrue(actual.isPresent());
assertEquals(0, DistroRecordsHolder.getInstance().getFailedSyncCount());
actual.get().syncFail();
assertEquals(1, DistroRecordsHolder.getInstance().getFailedSyncCount());
} |
@Override
public int compareTo(Resource other) {
checkArgument(other != null && getClass() == other.getClass() && name.equals(other.name));
return value.compareTo(other.value);
} | @Test
void testCompareToFailDifferentType() {
// initialized as different anonymous classes
final Resource resource1 = new TestResource(0.0) {};
final Resource resource2 = new TestResource(0.0) {};
assertThatThrownBy(() -> resource1.compareTo(resource2))
.isInstanceOf(IllegalArgumentException.class);
} |
@Override
public Processor<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>, K, SubscriptionResponseWrapper<VO>> get() {
return new ContextualProcessor<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>, K, SubscriptionResponseWrapper<VO>>() {
private KTableValueGetter<KO, VO> foreignValues;
@Override
public void init(final ProcessorContext<K, SubscriptionResponseWrapper<VO>> context) {
super.init(context);
foreignValues = foreignValueGetterSupplier.get();
foreignValues.init(context);
}
@Override
public void process(final Record<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> record) {
Objects.requireNonNull(record.key(), "This processor should never see a null key.");
Objects.requireNonNull(record.value(), "This processor should never see a null value.");
final ValueAndTimestamp<SubscriptionWrapper<K>> valueAndTimestamp = record.value().newValue;
Objects.requireNonNull(valueAndTimestamp, "This processor should never see a null newValue.");
final SubscriptionWrapper<K> value = valueAndTimestamp.value();
if (value.getVersion() > SubscriptionWrapper.CURRENT_VERSION) {
//Guard against modifications to SubscriptionWrapper. Need to ensure that there is compatibility
//with previous versions to enable rolling upgrades. Must develop a strategy for upgrading
//from older SubscriptionWrapper versions to newer versions.
throw new UnsupportedVersionException("SubscriptionWrapper is of an incompatible version.");
}
final ValueAndTimestamp<VO> foreignValueAndTime =
record.key().getForeignKey() == null ?
null :
foreignValues.get(record.key().getForeignKey());
final long resultTimestamp =
foreignValueAndTime == null ?
valueAndTimestamp.timestamp() :
Math.max(valueAndTimestamp.timestamp(), foreignValueAndTime.timestamp());
switch (value.getInstruction()) {
case DELETE_KEY_AND_PROPAGATE:
context().forward(
record.withKey(record.key().getPrimaryKey())
.withValue(new SubscriptionResponseWrapper<VO>(
value.getHash(),
null,
value.getPrimaryPartition()
))
.withTimestamp(resultTimestamp)
);
break;
case PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE:
//This one needs to go through regardless of LEFT or INNER join, since the extracted FK was
//changed and there is no match for it. We must propagate the (key, null) to ensure that the
//downstream consumers are alerted to this fact.
final VO valueToSend = foreignValueAndTime == null ? null : foreignValueAndTime.value();
context().forward(
record.withKey(record.key().getPrimaryKey())
.withValue(new SubscriptionResponseWrapper<>(
value.getHash(),
valueToSend,
value.getPrimaryPartition()
))
.withTimestamp(resultTimestamp)
);
break;
case PROPAGATE_ONLY_IF_FK_VAL_AVAILABLE:
if (foreignValueAndTime != null) {
context().forward(
record.withKey(record.key().getPrimaryKey())
.withValue(new SubscriptionResponseWrapper<>(
value.getHash(),
foreignValueAndTime.value(),
value.getPrimaryPartition()
))
.withTimestamp(resultTimestamp)
);
}
break;
case DELETE_KEY_NO_PROPAGATE:
break;
default:
throw new IllegalStateException("Unhandled instruction: " + value.getInstruction());
}
}
};
} | @Test
public void shouldPropagateNullIfNoFKAvailableV1() {
final MockProcessorContext<String, SubscriptionResponseWrapper<String>> context = new MockProcessorContext<>();
processor.init(context);
final SubscriptionWrapper<String> newValue = new SubscriptionWrapper<>(
new long[]{1L},
Instruction.PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE,
"pk1",
SubscriptionWrapper.VERSION_1,
12);
Record<CombinedKey<String, String>, Change<ValueAndTimestamp<SubscriptionWrapper<String>>>> record =
new Record<>(
new CombinedKey<>("fk1", "pk1"),
new Change<>(ValueAndTimestamp.make(newValue, 1L), null),
1L
);
processor.process(record);
List<CapturedForward<? extends String, ? extends SubscriptionResponseWrapper<String>>> forwarded = context.forwarded();
assertEquals(1, forwarded.size());
assertEquals(
new Record<>(
"pk1",
new SubscriptionResponseWrapper<>(
newValue.getHash(),
"foo",
12
),
1L
),
forwarded.get(0).record());
record = new Record<>(
new CombinedKey<>("fk9000", "pk1"),
new Change<>(ValueAndTimestamp.make(newValue, 1L), null),
1L
);
processor.process(record);
// propagate null if there is no match
forwarded = context.forwarded();
assertEquals(2, forwarded.size());
assertEquals(
new Record<>(
"pk1",
new SubscriptionResponseWrapper<>(
newValue.getHash(),
null,
12
),
1L
),
forwarded.get(1).record());
} |
public ReliableTopicConfig setStatisticsEnabled(boolean statisticsEnabled) {
this.statisticsEnabled = statisticsEnabled;
return this;
} | @Test
public void setStatisticsEnabled() {
ReliableTopicConfig config = new ReliableTopicConfig("foo");
boolean newValue = !DEFAULT_STATISTICS_ENABLED;
config.setStatisticsEnabled(newValue);
assertEquals(newValue, config.isStatisticsEnabled());
} |
public static PulsarLogCollectClient getPulsarLogCollectClient() {
return PULSAR_LOG_COLLECT_CLIENT;
} | @Test
public void testGetPulsarLogCollectClient() {
Assertions.assertEquals(LoggingPulsarPluginDataHandler.getPulsarLogCollectClient().getClass(), PulsarLogCollectClient.class);
} |
@Override
public GetApplicationAttemptsResponse getApplicationAttempts(
GetApplicationAttemptsRequest request) throws YarnException, IOException {
ApplicationId appId = request.getApplicationId();
UserGroupInformation callerUGI = getCallerUgi(appId,
AuditConstants.GET_APP_ATTEMPTS);
RMApp application = verifyUserAccessForRMApp(appId, callerUGI,
AuditConstants.GET_APP_ATTEMPTS, ApplicationAccessType.VIEW_APP,
false);
boolean allowAccess = checkAccess(callerUGI, application.getUser(),
ApplicationAccessType.VIEW_APP, application);
GetApplicationAttemptsResponse response = null;
if (allowAccess) {
Map<ApplicationAttemptId, RMAppAttempt> attempts = application
.getAppAttempts();
List<ApplicationAttemptReport> listAttempts =
new ArrayList<ApplicationAttemptReport>();
Iterator<Map.Entry<ApplicationAttemptId, RMAppAttempt>> iter = attempts
.entrySet().iterator();
while (iter.hasNext()) {
listAttempts.add(iter.next().getValue()
.createApplicationAttemptReport());
}
response = GetApplicationAttemptsResponse.newInstance(listAttempts);
} else {
throw new YarnException("User " + callerUGI.getShortUserName()
+ " does not have privilege to see this application " + appId);
}
return response;
} | @Test
public void testGetApplicationAttempts() throws YarnException, IOException {
ClientRMService rmService = createRMService();
GetApplicationAttemptsRequest request = recordFactory
.newRecordInstance(GetApplicationAttemptsRequest.class);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(
ApplicationId.newInstance(123456, 1), 1);
request.setApplicationId(ApplicationId.newInstance(123456, 1));
try {
GetApplicationAttemptsResponse response = rmService
.getApplicationAttempts(request);
Assert.assertEquals(1, response.getApplicationAttemptList().size());
Assert.assertEquals(attemptId, response.getApplicationAttemptList()
.get(0).getApplicationAttemptId());
} catch (ApplicationNotFoundException ex) {
Assert.fail(ex.getMessage());
}
} |
public static <K, E> Collector<E, ImmutableSetMultimap.Builder<K, E>, ImmutableSetMultimap<K, E>> unorderedIndex(Function<? super E, K> keyFunction) {
return unorderedIndex(keyFunction, Function.identity());
} | @Test
public void unorderedIndex_fails_if_key_function_is_null() {
assertThatThrownBy(() -> unorderedIndex(null))
.isInstanceOf(NullPointerException.class)
.hasMessage("Key function can't be null");
} |
@Override
public ClusterInfo getClusterInfo() {
try {
long startTime = Time.now();
Collection<SubClusterInfo> subClustersActive = federationFacade.getActiveSubClusters();
Class[] argsClasses = new Class[]{};
Object[] args = new Object[]{};
ClientMethod remoteMethod = new ClientMethod("getClusterInfo", argsClasses, args);
Map<SubClusterInfo, ClusterInfo> subClusterInfoMap =
invokeConcurrent(subClustersActive, remoteMethod, ClusterInfo.class);
FederationClusterInfo federationClusterInfo = new FederationClusterInfo();
subClusterInfoMap.forEach((subClusterInfo, clusterInfo) -> {
SubClusterId subClusterId = subClusterInfo.getSubClusterId();
clusterInfo.setSubClusterId(subClusterId.getId());
federationClusterInfo.getList().add(clusterInfo);
});
long stopTime = Time.now();
RouterAuditLogger.logSuccess(getUser().getShortUserName(), GET_CLUSTERINFO,
TARGET_WEB_SERVICE);
routerMetrics.succeededGetClusterInfoRetrieved(stopTime - startTime);
return federationClusterInfo;
} catch (NotFoundException e) {
routerMetrics.incrGetClusterInfoFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_CLUSTERINFO, UNKNOWN,
TARGET_WEB_SERVICE, e.getLocalizedMessage());
RouterServerUtil.logAndThrowRunTimeException("Get all active sub cluster(s) error.", e);
} catch (YarnException | IOException e) {
routerMetrics.incrGetClusterInfoFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_CLUSTERINFO, UNKNOWN,
TARGET_WEB_SERVICE, e.getLocalizedMessage());
RouterServerUtil.logAndThrowRunTimeException("getClusterInfo error.", e);
}
routerMetrics.incrGetClusterInfoFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_CLUSTERINFO, UNKNOWN,
TARGET_WEB_SERVICE, "getClusterInfo error.");
throw new RuntimeException("getClusterInfo error.");
} | @Test
public void testGetClusterInfo() {
ClusterInfo clusterInfos = interceptor.getClusterInfo();
Assert.assertNotNull(clusterInfos);
Assert.assertTrue(clusterInfos instanceof FederationClusterInfo);
FederationClusterInfo federationClusterInfos =
(FederationClusterInfo) (clusterInfos);
List<ClusterInfo> fedClusterInfosList = federationClusterInfos.getList();
Assert.assertNotNull(fedClusterInfosList);
Assert.assertEquals(4, fedClusterInfosList.size());
List<String> subClusterIds = subClusters.stream().map(
subClusterId -> subClusterId.getId()).collect(Collectors.toList());
MockRM mockRM = interceptor.getMockRM();
String yarnVersion = YarnVersionInfo.getVersion();
for (ClusterInfo clusterInfo : fedClusterInfosList) {
String subClusterId = clusterInfo.getSubClusterId();
// Check subClusterId
Assert.assertTrue(subClusterIds.contains(subClusterId));
// Check state
String clusterState = mockRM.getServiceState().toString();
Assert.assertEquals(clusterState, clusterInfo.getState());
// Check rmStateStoreName
String rmStateStoreName =
mockRM.getRMContext().getStateStore().getClass().getName();
Assert.assertEquals(rmStateStoreName, clusterInfo.getRMStateStore());
// Check RM Version
Assert.assertEquals(yarnVersion, clusterInfo.getRMVersion());
// Check haZooKeeperConnectionState
String rmHAZookeeperConnectionState = mockRM.getRMContext().getHAZookeeperConnectionState();
Assert.assertEquals(rmHAZookeeperConnectionState,
clusterInfo.getHAZookeeperConnectionState());
}
} |
public static void addThreadBlockedTimeMetric(final String threadId,
final StreamThreadTotalBlockedTime blockedTime,
final StreamsMetricsImpl streamsMetrics) {
streamsMetrics.addThreadLevelMutableMetric(
BLOCKED_TIME,
BLOCKED_TIME_DESCRIPTION,
threadId,
(config, now) -> blockedTime.compute()
);
} | @Test
public void shouldAddTotalBlockedTimeMetric() {
// Given:
final double startTime = 123.45;
final StreamThreadTotalBlockedTime blockedTime = mock(StreamThreadTotalBlockedTime.class);
when(blockedTime.compute()).thenReturn(startTime);
// When:
ThreadMetrics.addThreadBlockedTimeMetric(
"burger",
blockedTime,
streamsMetrics
);
// Then:
final ArgumentCaptor<Gauge<Double>> captor = gaugeCaptor();
verify(streamsMetrics).addThreadLevelMutableMetric(
eq("blocked-time-ns-total"),
eq("The total time the thread spent blocked on kafka in nanoseconds"),
eq("burger"),
captor.capture()
);
assertThat(captor.getValue().value(null, 678L), is(startTime));
} |
public static void updateTmpDirectoriesInConfiguration(
Configuration configuration, @Nullable String defaultDirs) {
if (configuration.contains(CoreOptions.TMP_DIRS)) {
LOG.info(
"Overriding Flink's temporary file directories with those "
+ "specified in the Flink config: {}",
configuration.getValue(CoreOptions.TMP_DIRS));
} else if (defaultDirs != null) {
LOG.info("Setting directories for temporary files to: {}", defaultDirs);
configuration.set(CoreOptions.TMP_DIRS, defaultDirs);
configuration.set(USE_LOCAL_DEFAULT_TMP_DIRS, true);
}
} | @Test
void testShouldNotUpdateTmpDirectoriesInConfigurationIfNoValueConfigured() {
Configuration config = new Configuration();
BootstrapTools.updateTmpDirectoriesInConfiguration(config, null);
assertThat(CoreOptions.TMP_DIRS.defaultValue()).isEqualTo(config.get(CoreOptions.TMP_DIRS));
} |
public MetaString(
String string, Encoding encoding, char specialChar1, char specialChar2, byte[] bytes) {
this.string = string;
this.encoding = encoding;
this.specialChar1 = specialChar1;
this.specialChar2 = specialChar2;
this.bytes = bytes;
if (encoding != Encoding.UTF_8) {
Preconditions.checkArgument(bytes.length > 0);
this.stripLastChar = (bytes[0] & 0x80) != 0;
} else {
this.stripLastChar = false;
}
} | @Test(dataProvider = "specialChars")
public void testMetaString(char specialChar1, char specialChar2) {
MetaStringEncoder encoder = new MetaStringEncoder(specialChar1, specialChar2);
for (int i = 1; i < 128; i++) {
try {
String str = createString(i, specialChar1, specialChar2);
MetaString metaString = encoder.encode(str);
assertNotSame(metaString.getEncoding(), MetaString.Encoding.UTF_8);
assertEquals(metaString.getString(), str);
assertEquals(metaString.getSpecialChar1(), specialChar1);
assertEquals(metaString.getSpecialChar2(), specialChar2);
MetaStringDecoder decoder = new MetaStringDecoder(specialChar1, specialChar2);
String newStr = decoder.decode(metaString.getBytes(), metaString.getEncoding());
assertEquals(newStr, str);
} catch (Throwable e) {
throw new RuntimeException("Failed at " + i, e);
}
}
} |
@Override
public List<PinotTaskConfig> generateTasks(List<TableConfig> tableConfigs) {
String taskType = RealtimeToOfflineSegmentsTask.TASK_TYPE;
List<PinotTaskConfig> pinotTaskConfigs = new ArrayList<>();
for (TableConfig tableConfig : tableConfigs) {
String realtimeTableName = tableConfig.getTableName();
if (tableConfig.getTableType() != TableType.REALTIME) {
LOGGER.warn("Skip generating task: {} for non-REALTIME table: {}", taskType, realtimeTableName);
continue;
}
LOGGER.info("Start generating task configs for table: {} for task: {}", realtimeTableName, taskType);
// Only schedule 1 task of this type, per table
Map<String, TaskState> incompleteTasks =
TaskGeneratorUtils.getIncompleteTasks(taskType, realtimeTableName, _clusterInfoAccessor);
if (!incompleteTasks.isEmpty()) {
LOGGER.warn("Found incomplete tasks: {} for same table: {} and task type: {}. Skipping task generation.",
incompleteTasks.keySet(), realtimeTableName, taskType);
continue;
}
// Get all segment metadata for completed segments (DONE/UPLOADED status).
List<SegmentZKMetadata> completedSegmentsZKMetadata = new ArrayList<>();
Map<Integer, String> partitionToLatestLLCSegmentName = new HashMap<>();
Set<Integer> allPartitions = new HashSet<>();
getCompletedSegmentsInfo(realtimeTableName, completedSegmentsZKMetadata, partitionToLatestLLCSegmentName,
allPartitions);
if (completedSegmentsZKMetadata.isEmpty()) {
LOGGER.info("No realtime-completed segments found for table: {}, skipping task generation: {}",
realtimeTableName, taskType);
continue;
}
allPartitions.removeAll(partitionToLatestLLCSegmentName.keySet());
if (!allPartitions.isEmpty()) {
LOGGER.info(
"Partitions: {} have no completed segments. Table: {} is not ready for {}. Skipping task generation.",
allPartitions, realtimeTableName, taskType);
continue;
}
TableTaskConfig tableTaskConfig = tableConfig.getTaskConfig();
Preconditions.checkState(tableTaskConfig != null);
Map<String, String> taskConfigs = tableTaskConfig.getConfigsForTaskType(taskType);
Preconditions.checkState(taskConfigs != null, "Task config shouldn't be null for table: %s", realtimeTableName);
// Get the bucket size and buffer
String bucketTimePeriod =
taskConfigs.getOrDefault(RealtimeToOfflineSegmentsTask.BUCKET_TIME_PERIOD_KEY, DEFAULT_BUCKET_PERIOD);
String bufferTimePeriod =
taskConfigs.getOrDefault(RealtimeToOfflineSegmentsTask.BUFFER_TIME_PERIOD_KEY, DEFAULT_BUFFER_PERIOD);
long bucketMs = TimeUtils.convertPeriodToMillis(bucketTimePeriod);
long bufferMs = TimeUtils.convertPeriodToMillis(bufferTimePeriod);
// Get watermark from RealtimeToOfflineSegmentsTaskMetadata ZNode. WindowStart = watermark. WindowEnd =
// windowStart + bucket.
long windowStartMs = getWatermarkMs(realtimeTableName, completedSegmentsZKMetadata, bucketMs);
long windowEndMs = windowStartMs + bucketMs;
// Find all COMPLETED segments with data overlapping execution window: windowStart (inclusive) to windowEnd
// (exclusive)
List<String> segmentNames = new ArrayList<>();
List<String> downloadURLs = new ArrayList<>();
Set<String> lastLLCSegmentPerPartition = new HashSet<>(partitionToLatestLLCSegmentName.values());
boolean skipGenerate = false;
while (true) {
// Check that execution window is older than bufferTime
if (windowEndMs > System.currentTimeMillis() - bufferMs) {
LOGGER.info(
"Window with start: {} and end: {} is not older than buffer time: {} configured as {} ago. Skipping task "
+ "generation: {}", windowStartMs, windowEndMs, bufferMs, bufferTimePeriod, taskType);
skipGenerate = true;
break;
}
for (SegmentZKMetadata segmentZKMetadata : completedSegmentsZKMetadata) {
String segmentName = segmentZKMetadata.getSegmentName();
long segmentStartTimeMs = segmentZKMetadata.getStartTimeMs();
long segmentEndTimeMs = segmentZKMetadata.getEndTimeMs();
// Check overlap with window
if (windowStartMs <= segmentEndTimeMs && segmentStartTimeMs < windowEndMs) {
// If last completed segment is being used, make sure that segment crosses over end of window.
// In the absence of this check, CONSUMING segments could contain some portion of the window. That data
// would be skipped forever.
if (lastLLCSegmentPerPartition.contains(segmentName) && segmentEndTimeMs < windowEndMs) {
LOGGER.info("Window data overflows into CONSUMING segments for partition of segment: {}. Skipping task "
+ "generation: {}", segmentName, taskType);
skipGenerate = true;
break;
}
segmentNames.add(segmentName);
downloadURLs.add(segmentZKMetadata.getDownloadUrl());
}
}
if (skipGenerate || !segmentNames.isEmpty()) {
break;
}
LOGGER.info("Found no eligible segments for task: {} with window [{} - {}), moving to the next time bucket",
taskType, windowStartMs, windowEndMs);
windowStartMs = windowEndMs;
windowEndMs += bucketMs;
}
if (skipGenerate) {
continue;
}
Map<String, String> configs = MinionTaskUtils.getPushTaskConfig(realtimeTableName, taskConfigs,
_clusterInfoAccessor);
configs.putAll(getBaseTaskConfigs(tableConfig, segmentNames));
configs.put(MinionConstants.DOWNLOAD_URL_KEY, StringUtils.join(downloadURLs, MinionConstants.URL_SEPARATOR));
configs.put(MinionConstants.UPLOAD_URL_KEY, _clusterInfoAccessor.getVipUrl() + "/segments");
// Segment processor configs
configs.put(RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY, String.valueOf(windowStartMs));
configs.put(RealtimeToOfflineSegmentsTask.WINDOW_END_MS_KEY, String.valueOf(windowEndMs));
String roundBucketTimePeriod = taskConfigs.get(RealtimeToOfflineSegmentsTask.ROUND_BUCKET_TIME_PERIOD_KEY);
if (roundBucketTimePeriod != null) {
configs.put(RealtimeToOfflineSegmentsTask.ROUND_BUCKET_TIME_PERIOD_KEY, roundBucketTimePeriod);
}
// NOTE: Check and put both keys for backward-compatibility
String mergeType = taskConfigs.get(RealtimeToOfflineSegmentsTask.MERGE_TYPE_KEY);
if (mergeType == null) {
mergeType = taskConfigs.get(RealtimeToOfflineSegmentsTask.COLLECTOR_TYPE_KEY);
}
if (mergeType != null) {
configs.put(RealtimeToOfflineSegmentsTask.MERGE_TYPE_KEY, mergeType);
configs.put(RealtimeToOfflineSegmentsTask.COLLECTOR_TYPE_KEY, mergeType);
}
for (Map.Entry<String, String> entry : taskConfigs.entrySet()) {
if (entry.getKey().endsWith(RealtimeToOfflineSegmentsTask.AGGREGATION_TYPE_KEY_SUFFIX)) {
configs.put(entry.getKey(), entry.getValue());
}
}
String maxNumRecordsPerSegment = taskConfigs.get(RealtimeToOfflineSegmentsTask.MAX_NUM_RECORDS_PER_SEGMENT_KEY);
if (maxNumRecordsPerSegment != null) {
configs.put(RealtimeToOfflineSegmentsTask.MAX_NUM_RECORDS_PER_SEGMENT_KEY, maxNumRecordsPerSegment);
}
pinotTaskConfigs.add(new PinotTaskConfig(taskType, configs));
LOGGER.info("Finished generating task configs for table: {} for task: {}", realtimeTableName, taskType);
}
return pinotTaskConfigs;
} | @Test
public void testGenerateTasksCheckConfigs() {
ClusterInfoAccessor mockClusterInfoProvide = mock(ClusterInfoAccessor.class);
when(mockClusterInfoProvide.getTaskStates(RealtimeToOfflineSegmentsTask.TASK_TYPE)).thenReturn(new HashMap<>());
SegmentZKMetadata segmentZKMetadata =
getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, 5000, 50_000, TimeUnit.MILLISECONDS, null);
when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME))
.thenReturn(Lists.newArrayList(segmentZKMetadata));
when(mockClusterInfoProvide.getIdealState(REALTIME_TABLE_NAME))
.thenReturn(getIdealState(REALTIME_TABLE_NAME, Lists.newArrayList(segmentZKMetadata.getSegmentName())));
RealtimeToOfflineSegmentsTaskGenerator generator = new RealtimeToOfflineSegmentsTaskGenerator();
generator.init(mockClusterInfoProvide);
// Skip task generation, if offline table
TableConfig offlineTableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(RAW_TABLE_NAME).build();
List<PinotTaskConfig> pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
assertTrue(pinotTaskConfigs.isEmpty());
// No tableTaskConfig, error
TableConfig realtimeTableConfig = getRealtimeTableConfig(new HashMap<>());
realtimeTableConfig.setTaskConfig(null);
try {
generator.generateTasks(Lists.newArrayList(realtimeTableConfig));
Assert.fail("Should have failed for null tableTaskConfig");
} catch (IllegalStateException e) {
// expected
}
// No taskConfig for task, error
realtimeTableConfig = getRealtimeTableConfig(new HashMap<>());
try {
generator.generateTasks(Lists.newArrayList(realtimeTableConfig));
Assert.fail("Should have failed for null taskConfig");
} catch (IllegalStateException e) {
// expected
}
} |
@Udf(description = "Returns a new string encoded using the outputEncoding ")
public String encode(
@UdfParameter(
description = "The source string. If null, then function returns null.") final String str,
@UdfParameter(
description = "The input encoding."
+ " If null, then function returns null.") final String inputEncoding,
@UdfParameter(
description = "The output encoding."
+ " If null, then function returns null.") final String outputEncoding) {
if (str == null || inputEncoding == null || outputEncoding == null) {
return null;
}
final String encodedString = inputEncoding.toLowerCase() + outputEncoding.toLowerCase();
final Encode.Encoder encoder = ENCODER_MAP.get(encodedString);
if (encoder == null) {
throw new KsqlFunctionException("Supported input and output encodings are: "
+ "hex, utf8, ascii and base64");
}
return encoder.apply(str);
} | @Test
public void shouldEncodeHexToBase64() {
assertThat(udf.encode("4578616d706c6521", "hex", "base64"), is("RXhhbXBsZSE="));
assertThat(udf.encode("506c616e74207472656573", "hex", "base64"), is("UGxhbnQgdHJlZXM="));
assertThat(udf.encode("31202b2031203d2031", "hex", "base64"), is("MSArIDEgPSAx"));
assertThat(udf.encode("ce95cebbcebbceacceb4ceb1", "hex", "base64"), is("zpXOu867zqzOtM6x"));
assertThat(udf.encode("c39c6265726d656e736368", "hex", "base64"), is("w5xiZXJtZW5zY2g="));
assertThat(udf.encode("0x4578616d706c6521", "hex", "base64"), is("RXhhbXBsZSE="));
assertThat(udf.encode("X'7e8a016abfff'", "hex", "base64"), is("fooBar//"));
assertThat(udf.encode("x'328ba7b5a8a75627b0'", "hex", "base64"), is("MountainView"));
assertThat(udf.encode("0x", "hex", "base64"), is(""));
assertThat(udf.encode("X''", "hex", "base64"), is(""));
assertThat(udf.encode("x''", "hex", "base64"), is(""));
} |
public static byte[] compress(byte[] bytes) {
if (bytes == null) {
throw new NullPointerException("bytes is null");
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
try (ZipOutputStream zip = new ZipOutputStream(out)) {
ZipEntry entry = new ZipEntry("zip");
entry.setSize(bytes.length);
zip.putNextEntry(entry);
zip.write(bytes);
zip.closeEntry();
return out.toByteArray();
} catch (IOException e) {
throw new RuntimeException("Zip compress error", e);
}
} | @Test
public void test_compress() {
Assertions.assertThrows(NullPointerException.class, () -> {
ZipUtil.compress(null);
});
} |
public static List<Column> generateReferencedColumns(
String projectionExpression, @Nullable String filterExpression, List<Column> columns) {
if (isNullOrWhitespaceOnly(projectionExpression)) {
return new ArrayList<>();
}
Set<String> referencedColumnNames = new HashSet<>();
SqlSelect sqlProject = parseProjectionExpression(projectionExpression);
if (!sqlProject.getSelectList().isEmpty()) {
for (SqlNode sqlNode : sqlProject.getSelectList()) {
if (sqlNode instanceof SqlBasicCall) {
SqlBasicCall sqlBasicCall = (SqlBasicCall) sqlNode;
if (SqlKind.AS.equals(sqlBasicCall.getOperator().kind)) {
referencedColumnNames.addAll(
parseColumnNameList(sqlBasicCall.getOperandList().get(0)));
} else {
throw new ParseException(
"Unrecognized projection expression: "
+ sqlBasicCall
+ ". Should be <EXPR> AS <IDENTIFIER>");
}
} else if (sqlNode instanceof SqlIdentifier) {
SqlIdentifier sqlIdentifier = (SqlIdentifier) sqlNode;
if (sqlIdentifier.isStar()) {
// wildcard star character matches all columns
return columns;
}
referencedColumnNames.add(
sqlIdentifier.names.get(sqlIdentifier.names.size() - 1));
}
}
}
if (!isNullOrWhitespaceOnly(projectionExpression)) {
SqlSelect sqlFilter = parseFilterExpression(filterExpression);
referencedColumnNames.addAll(parseColumnNameList(sqlFilter.getWhere()));
}
return columns.stream()
.filter(e -> referencedColumnNames.contains(e.getName()))
.collect(Collectors.toList());
} | @Test
public void testGenerateReferencedColumns() {
List<Column> testColumns =
Arrays.asList(
Column.physicalColumn("id", DataTypes.INT(), "id"),
Column.physicalColumn("name", DataTypes.STRING(), "string"),
Column.physicalColumn("age", DataTypes.INT(), "age"),
Column.physicalColumn("address", DataTypes.STRING(), "address"),
Column.physicalColumn("weight", DataTypes.DOUBLE(), "weight"),
Column.physicalColumn("height", DataTypes.DOUBLE(), "height"),
Column.physicalColumn("birthday", DataTypes.DATE(), "birthday"));
List<Column> result =
TransformParser.generateReferencedColumns(
"id, upper(name) as name, age + 1 as newage, weight / (height * height) as bmi",
"bmi > 17 and char_length(address) > 10",
testColumns);
List<String> expected =
Arrays.asList(
"`id` INT 'id'",
"`name` STRING 'string'",
"`age` INT 'age'",
"`address` STRING 'address'",
"`weight` DOUBLE 'weight'",
"`height` DOUBLE 'height'");
Assertions.assertThat(result.toString()).isEqualTo("[" + String.join(", ", expected) + "]");
// calculated columns must use AS to provide an alias name
Assertions.assertThatThrownBy(
() ->
TransformParser.generateReferencedColumns(
"id, 1 + 1", null, testColumns))
.isExactlyInstanceOf(ParseException.class);
} |
public Column getColumn(String value) {
Matcher m = PATTERN.matcher(value);
if (!m.matches()) {
throw new IllegalArgumentException("value " + value + " is not a valid column definition");
}
String name = m.group(1);
String type = m.group(6);
type = type == null ? "String" : type;
boolean array = (m.group(4) != null) || (m.group(7) != null);
if (array) {
return new ArrayColumn(name,
createColumn(name,
type));
}
return createColumn(name,
type);
} | @Test
public void testGetLongArrayColumn() {
ColumnFactory f = new ColumnFactory();
Column column = f.getColumn("column: Long[]");
assertThat(column instanceof ArrayColumn).isTrue();
assertThat(column.getName()).isEqualTo("column");
assertThat(column.getCellType()).isEqualTo("LongCell");
} |
public IndexerDirectoryInformation parse(Path path) {
if (!Files.exists(path)) {
throw new IndexerInformationParserException("Path " + path + " does not exist.");
}
if (!Files.isDirectory(path)) {
throw new IndexerInformationParserException("Path " + path + " is not a directory");
}
if (!Files.isReadable(path)) {
throw new IndexerInformationParserException("Path " + path + " is not readable");
}
final Path nodesPath = path.resolve("nodes");
if (!Files.exists(nodesPath)) {
return IndexerDirectoryInformation.empty(path);
}
try (final Stream<Path> nodes = Files.list(nodesPath)) {
final List<NodeInformation> nodeInformation = nodes.filter(Files::isDirectory)
.filter(p -> p.getFileName().toString().matches("\\d+"))
.map(this::parseNode)
.filter(node -> !node.isEmpty())
.toList();
return new IndexerDirectoryInformation(path, nodeInformation);
} catch (IOException e) {
throw new IndexerInformationParserException("Failed to list nodes", e);
}
} | @Test
void testElasticsearch7() throws URISyntaxException {
final URI uri = getClass().getResource("/indices/elasticsearch7").toURI();
final IndexerDirectoryInformation result = parser.parse(Path.of(uri));
Assertions.assertThat(result.nodes())
.hasSize(1)
.allSatisfy(node -> {
Assertions.assertThat(node.nodeVersion()).isEqualTo("7.10.0");
Assertions.assertThat(node.indices())
.hasSize(1)
.extracting(IndexInformation::indexName)
.contains("graylog_0");
final IndexInformation graylog_0 = node.indices().stream().filter(i -> i.indexName().equals("graylog_0")).findFirst().orElseThrow(() -> new RuntimeException("Failed to detect graylog_0 index"));
Assertions.assertThat(graylog_0.indexVersionCreated()).isEqualTo("7.10.0");
Assertions.assertThat(graylog_0.shards())
.hasSize(1)
.allSatisfy(shard -> {
Assertions.assertThat(shard.documentsCount()).isEqualTo(1);
Assertions.assertThat(shard.name()).isEqualTo("S0");
Assertions.assertThat(shard.primary()).isEqualTo(true);
Assertions.assertThat(shard.minLuceneVersion()).isEqualTo("8.7.0");
});
});
} |
@VisibleForTesting
public void validateNoticeExists(Long id) {
if (id == null) {
return;
}
NoticeDO notice = noticeMapper.selectById(id);
if (notice == null) {
throw exception(NOTICE_NOT_FOUND);
}
} | @Test
public void testValidateNoticeExists_success() {
// 插入前置数据
NoticeDO dbNotice = randomPojo(NoticeDO.class);
noticeMapper.insert(dbNotice);
// 成功调用
noticeService.validateNoticeExists(dbNotice.getId());
} |
@Override
public String toString() {
return new ReflectionToStringBuilder(this, ToStringStyle.SIMPLE_STYLE).toString();
} | @Test
void testToString() {
PurgeableAnalysisDto dto = new PurgeableAnalysisDto().setAnalysisUuid("u3");
assertThat(dto.toString()).isNotEmpty();
} |
@Override
public void bind() {
server.bind();
} | @Test
public void bind() throws IOException {
ServerSupport support = new ServerSupport(RandomPort::getSafeRandomPort);
support.bind();
while (!support.isActive()) {
LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100L));
}
Assert.assertTrue(support.isActive());
support.close();
} |
public <E extends Enum> E getEnum(HazelcastProperty property, Class<E> enumClazz) {
String value = getString(property);
for (E enumConstant : enumClazz.getEnumConstants()) {
if (equalsIgnoreCase(enumConstant.name(), value)) {
return enumConstant;
}
}
throw new IllegalArgumentException(format("value '%s' for property '%s' is not a valid %s value",
value, property.getName(), enumClazz.getName()));
} | @Test
public void getEnum() {
config.setProperty(ClusterProperty.HEALTH_MONITORING_LEVEL.getName(), "NOISY");
HazelcastProperties properties = new HazelcastProperties(config.getProperties());
HealthMonitorLevel healthMonitorLevel = properties
.getEnum(ClusterProperty.HEALTH_MONITORING_LEVEL, HealthMonitorLevel.class);
assertEquals(HealthMonitorLevel.NOISY, healthMonitorLevel);
} |
@Override
public ManagementMetadata get(EurekaInstanceConfigBean instance, int serverPort, String serverContextPath,
String managementContextPath, Integer managementPort) {
if (isRandom(managementPort)) {
return null;
}
if (managementPort == null && isRandom(serverPort)) {
return null;
}
String healthCheckUrl = getHealthCheckUrl(instance, serverPort, serverContextPath, managementContextPath,
managementPort, false);
String statusPageUrl = getStatusPageUrl(instance, serverPort, serverContextPath, managementContextPath,
managementPort);
ManagementMetadata metadata = new ManagementMetadata(healthCheckUrl, statusPageUrl,
managementPort == null ? serverPort : managementPort);
if (instance.isSecurePortEnabled()) {
metadata.setSecureHealthCheckUrl(getHealthCheckUrl(instance, serverPort, serverContextPath,
managementContextPath, managementPort, true));
}
return metadata;
} | @Test
void serverPortIsRandomAndManagementPortIsNull() {
int serverPort = 0;
String serverContextPath = "/";
String managementContextPath = null;
Integer managementPort = null;
ManagementMetadata actual = provider.get(INSTANCE, serverPort, serverContextPath, managementContextPath,
managementPort);
assertThat(actual).isNull();
} |
@Override
public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException {
try {
final ResourceCreationRepresentationArrayInner resourceCreationRepresentation = new ResourceCreationRepresentationArrayInner();
final String path = StringUtils.removeStart(folder.getAbsolute(), String.valueOf(Path.DELIMITER));
resourceCreationRepresentation.setPath(path);
resourceCreationRepresentation.setResourceType(ResourceCreationRepresentationArrayInner.ResourceTypeEnum.CONTAINER);
final EueApiClient client = new EueApiClient(session);
final ResourceCreationResponseEntries resourceCreationResponseEntries = new PostChildrenForAliasApi(client).resourceAliasAliasChildrenPost(
EueResourceIdProvider.ROOT, Collections.singletonList(resourceCreationRepresentation), null, null, null, null, null);
if(!resourceCreationResponseEntries.containsKey(path)) {
throw new NotfoundException(folder.getAbsolute());
}
final ResourceCreationResponseEntry resourceCreationResponseEntry = resourceCreationResponseEntries.get(path);
switch(resourceCreationResponseEntry.getStatusCode()) {
case HttpStatus.SC_OK:
// Already exists
throw new ConflictException(folder.getAbsolute());
case HttpStatus.SC_CREATED:
final String resourceId = EueResourceIdProvider.getResourceIdFromResourceUri(resourceCreationResponseEntry.getHeaders().getLocation());
fileid.cache(folder, resourceId);
return folder;
default:
log.warn(String.format("Failure %s creating folder %s", resourceCreationResponseEntry, folder));
final ResourceCreationResponseEntryEntity entity = resourceCreationResponseEntry.getEntity();
if(null == entity) {
throw new EueExceptionMappingService().map(new ApiException(resourceCreationResponseEntry.getReason(),
null, resourceCreationResponseEntry.getStatusCode(), client.getResponseHeaders()));
}
throw new EueExceptionMappingService().map(new ApiException(resourceCreationResponseEntry.getEntity().getError(),
null, resourceCreationResponseEntry.getStatusCode(), client.getResponseHeaders()));
}
}
catch(ApiException e) {
throw new EueExceptionMappingService().map("Cannot create folder {0}", e, folder);
}
} | @Test
public void testAttributes() throws Exception {
final EueResourceIdProvider fileid = new EueResourceIdProvider(session);
final TransferStatus status = new TransferStatus();
final Path directory = new EueDirectoryFeature(session, fileid).mkdir(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), status);
assertThrows(ConflictException.class, () -> new EueDirectoryFeature(session, fileid).mkdir(directory, new TransferStatus()));
assertEquals(new EueAttributesFinderFeature(session, fileid).find(directory).getFileId(), directory.attributes().getFileId());
new EueDeleteFeature(session, fileid).delete(Collections.singletonList(directory), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
List<AttributeKvEntry> filterChangedAttr(List<AttributeKvEntry> currentAttributes, List<AttributeKvEntry> newAttributes) {
if (currentAttributes == null || currentAttributes.isEmpty()) {
return newAttributes;
}
Map<String, AttributeKvEntry> currentAttrMap = currentAttributes.stream()
.collect(Collectors.toMap(AttributeKvEntry::getKey, Function.identity(), (existing, replacement) -> existing));
return newAttributes.stream()
.filter(item -> {
AttributeKvEntry cacheAttr = currentAttrMap.get(item.getKey());
return cacheAttr == null
|| !Objects.equals(item.getValue(), cacheAttr.getValue()) //JSON and String can be equals by value, but different by type
|| !Objects.equals(item.getDataType(), cacheAttr.getDataType());
})
.collect(Collectors.toList());
} | @Test
void testFilterChangedAttr_whenCurrentAttributesEmpty_thenReturnNewAttributes() {
List<AttributeKvEntry> newAttributes = new ArrayList<>();
List<AttributeKvEntry> filtered = node.filterChangedAttr(Collections.emptyList(), newAttributes);
assertThat(filtered).isSameAs(newAttributes);
} |
@JsonIgnore
public LongParamDefinition getCompletedByTsParam() {
if (completedByTs != null) {
return ParamDefinition.buildParamDefinition(PARAM_NAME, completedByTs);
}
if (completedByHour != null) {
String timeZone = tz == null ? "WORKFLOW_CRON_TIMEZONE" : String.format("'%s'", tz);
return LongParamDefinition.builder()
.name(PARAM_NAME)
.expression(String.format(COMPLETED_HOUR_TCT_TS, timeZone, completedByHour))
.build();
}
if (durationMinutes != null) {
return LongParamDefinition.builder()
.name(PARAM_NAME)
.expression(String.format(DURATION_MINUTES_TCT_TS, durationMinutes))
.build();
}
throw new MaestroInternalError(
"Invalid TCT definition, neither of time fields is set: %s", this);
} | @Test
public void testGetCompletedByTsParam() {
Tct tct = new Tct();
tct.setCompletedByTs(123L);
tct.setTz("UTC");
LongParamDefinition expected =
LongParamDefinition.builder().name("completed_by_ts").value(123L).build();
LongParamDefinition actual = tct.getCompletedByTsParam();
assertEquals(expected, actual);
} |
@Override
public CreateAclsRequestData data() {
return data;
} | @Test
public void shouldThrowOnV0IfNotLiteral() {
assertThrows(UnsupportedVersionException.class, () -> new CreateAclsRequest(data(PREFIXED_ACL1), V0));
} |
public String getFilePath() {
return this.filePath;
} | @Test
public void testGetFilePath() {
Dependency instance = new Dependency();
String expResult = "file.tar";
instance.setFilePath(expResult);
String result = instance.getFilePath();
assertEquals(expResult, result);
} |
public Collection<ServerPluginInfo> loadPlugins() {
Map<String, ServerPluginInfo> bundledPluginsByKey = new LinkedHashMap<>();
for (ServerPluginInfo bundled : getBundledPluginsMetadata()) {
failIfContains(bundledPluginsByKey, bundled,
plugin -> MessageException.of(format("Found two versions of the plugin %s [%s] in the directory %s. Please remove one of %s or %s.",
bundled.getName(), bundled.getKey(), getRelativeDir(fs.getInstalledBundledPluginsDir()), bundled.getNonNullJarFile().getName(), plugin.getNonNullJarFile().getName())));
bundledPluginsByKey.put(bundled.getKey(), bundled);
}
Map<String, ServerPluginInfo> externalPluginsByKey = new LinkedHashMap<>();
for (ServerPluginInfo external : getExternalPluginsMetadata()) {
failIfContains(bundledPluginsByKey, external,
plugin -> MessageException.of(format("Found a plugin '%s' in the directory '%s' with the same key [%s] as a built-in feature '%s'. Please remove '%s'.",
external.getName(), getRelativeDir(fs.getInstalledExternalPluginsDir()), external.getKey(), plugin.getName(),
new File(getRelativeDir(fs.getInstalledExternalPluginsDir()), external.getNonNullJarFile().getName()))));
failIfContains(externalPluginsByKey, external,
plugin -> MessageException.of(format("Found two versions of the plugin '%s' [%s] in the directory '%s'. Please remove %s or %s.", external.getName(), external.getKey(),
getRelativeDir(fs.getInstalledExternalPluginsDir()), external.getNonNullJarFile().getName(), plugin.getNonNullJarFile().getName())));
externalPluginsByKey.put(external.getKey(), external);
}
for (PluginInfo downloaded : getDownloadedPluginsMetadata()) {
failIfContains(bundledPluginsByKey, downloaded,
plugin -> MessageException.of(format("Fail to update plugin: %s. Built-in feature with same key already exists: %s. Move or delete plugin from %s directory",
plugin.getName(), plugin.getKey(), getRelativeDir(fs.getDownloadedPluginsDir()))));
ServerPluginInfo installedPlugin;
if (externalPluginsByKey.containsKey(downloaded.getKey())) {
deleteQuietly(externalPluginsByKey.get(downloaded.getKey()).getNonNullJarFile());
installedPlugin = moveDownloadedPluginToExtensions(downloaded);
LOG.info("Plugin {} [{}] updated to version {}", installedPlugin.getName(), installedPlugin.getKey(), installedPlugin.getVersion());
} else {
installedPlugin = moveDownloadedPluginToExtensions(downloaded);
LOG.info("Plugin {} [{}] installed", installedPlugin.getName(), installedPlugin.getKey());
}
externalPluginsByKey.put(downloaded.getKey(), installedPlugin);
}
Map<String, ServerPluginInfo> plugins = new HashMap<>(externalPluginsByKey.size() + bundledPluginsByKey.size());
plugins.putAll(externalPluginsByKey);
plugins.putAll(bundledPluginsByKey);
PluginRequirementsValidator.unloadIncompatiblePlugins(plugins);
return plugins.values();
} | @Test
public void fail_if_plugin_does_not_support_plugin_api_version() throws Exception {
when(sonarRuntime.getApiVersion()).thenReturn(org.sonar.api.utils.Version.parse("1.0"));
copyTestPluginTo("test-base-plugin", fs.getInstalledExternalPluginsDir());
assertThatThrownBy(() -> underTest.loadPlugins())
.hasMessage("Plugin Base Plugin [testbase] requires at least Sonar Plugin API version 4.5.4 (current: 1.0)");
} |
public static TbMathArgumentValue fromMessageMetadata(TbMathArgument arg, String argKey, TbMsgMetaData metaData) {
Double defaultValue = arg.getDefaultValue();
if (metaData == null) {
return defaultOrThrow(defaultValue, "Message metadata is empty!");
}
var value = metaData.getValue(argKey);
if (StringUtils.isEmpty(value)) {
return defaultOrThrow(defaultValue, "Message metadata has no '" + argKey + "'!");
}
return fromString(value);
} | @Test
public void test_fromMessageMetadata_then_valueEmpty() {
TbMathArgument tbMathArgument = new TbMathArgument(TbMathArgumentType.MESSAGE_BODY, "TestKey");
Throwable thrown = assertThrows(RuntimeException.class, () -> TbMathArgumentValue.fromMessageMetadata(tbMathArgument, tbMathArgument.getKey(), null));
Assertions.assertNotNull(thrown.getMessage());
} |
public static boolean isSvgEnabled() {
return true;
} | @Test
public void testIsSvgEnabled() throws Exception {
assertTrue( SvgSupport.isSvgEnabled() );
} |
public static MulticastMappingInstruction multicastPriority(int priority) {
return new MulticastMappingInstruction.PriorityMappingInstruction(
MulticastType.PRIORITY, priority);
} | @Test
public void testMulticastPriorityMethod() {
final MappingInstruction instruction = MappingInstructions.multicastPriority(2);
final MulticastMappingInstruction.PriorityMappingInstruction priorityMappingInstruction =
checkAndConvert(instruction,
MulticastMappingInstruction.Type.MULTICAST,
MulticastMappingInstruction.PriorityMappingInstruction.class);
assertThat(priorityMappingInstruction.priority(), is(equalTo(2)));
} |
public static String formatHostnameForHttp(InetSocketAddress addr) {
String hostString = NetUtil.getHostname(addr);
if (NetUtil.isValidIpV6Address(hostString)) {
if (!addr.isUnresolved()) {
hostString = NetUtil.toAddressString(addr.getAddress());
} else if (hostString.charAt(0) == '[' && hostString.charAt(hostString.length() - 1) == ']') {
// If IPv6 address already contains brackets, let's return as is.
return hostString;
}
return '[' + hostString + ']';
}
return hostString;
} | @Test
public void testIpv6() throws Exception {
InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getByName("::1"), 8080);
assertEquals("[::1]", HttpUtil.formatHostnameForHttp(socketAddress));
} |
public static <K, V> AsMultimap<K, V> asMultimap() {
return new AsMultimap<>(false);
} | @Test
public void testViewUnboundedAsMultimapDirect() {
testViewUnbounded(pipeline, View.asMultimap());
} |
@Override
public synchronized void write(int b) throws IOException {
checkNotClosed();
file.writeLock().lock();
try {
if (append) {
pos = file.sizeWithoutLocking();
}
file.write(pos++, (byte) b);
file.setLastModifiedTime(fileSystemState.now());
} finally {
file.writeLock().unlock();
}
} | @Test
public void testWrite_partialArray() throws IOException {
JimfsOutputStream out = newOutputStream(false);
out.write(new byte[] {1, 2, 3, 4, 5, 6}, 1, 3);
assertStoreContains(out, 2, 3, 4);
} |
@SuppressWarnings("unused") // Required for automatic type inference
public static <K> Builder0<K> forClass(final Class<K> type) {
return new Builder0<>();
} | @Test
public void shouldNotThrowOnDuplicateHandler1() {
HandlerMaps.forClass(BaseType.class).withArgType(String.class)
.put(LeafTypeA.class, handler1_1)
.put(LeafTypeB.class, handler1_1);
} |
public SSLParametersConfiguration getParameters() {
if (parameters == null) {
parameters = new SSLParametersConfiguration();
}
return parameters;
} | @Test
public void testParameters() throws Exception {
assertNotNull(configuration.getParameters());
} |
static void cleanStackTrace(Throwable throwable) {
new StackTraceCleaner(throwable).clean(Sets.<Throwable>newIdentityHashSet());
} | @Test
public void allFramesAboveSubjectCleaned() {
Throwable throwable =
createThrowableWithStackTrace(
"com.google.random.Package",
"com.google.common.base.collection.ImmutableMap",
"com.google.common.truth.StringSubject",
"com.google.example.SomeClass");
StackTraceCleaner.cleanStackTrace(throwable);
assertThat(throwable.getStackTrace())
.isEqualTo(
new StackTraceElement[] {
createStackTraceElement("com.google.example.SomeClass"),
});
} |
protected static SimpleDateFormat getLog4j2Appender() {
Optional<Appender> log4j2xmlAppender =
configuration.getAppenders().values().stream()
.filter( a -> a.getName().equalsIgnoreCase( log4J2Appender ) ).findFirst();
if ( log4j2xmlAppender.isPresent() ) {
ArrayList<String> matchesArray = new ArrayList<>();
String dateFormatFromLog4j2xml = log4j2xmlAppender.get().getLayout().getContentFormat().get( "format" );
Pattern pattern = Pattern.compile( "(\\{(.*?)})" );
Matcher matcher = pattern.matcher( dateFormatFromLog4j2xml );
while ( matcher.find() ) {
matchesArray.add( matcher.group( 2 ) );
}
if ( !matchesArray.isEmpty() ) {
return processMatches( matchesArray );
}
}
return new SimpleDateFormat( "yyyy/MM/dd HH:mm:ss" );
} | @Test
public void testGetLog4j2UsingAppender10() {
// Testing adding TimeZone GMT+0
KettleLogLayout.log4J2Appender = "pdi-execution-appender-test-10";
Assert.assertEquals( "MMM dd,yyyy HH:mm:ss",
KettleLogLayout.getLog4j2Appender().toPattern() );
} |
public void startCluster() throws ClusterEntrypointException {
LOG.info("Starting {}.", getClass().getSimpleName());
try {
FlinkSecurityManager.setFromConfiguration(configuration);
PluginManager pluginManager =
PluginUtils.createPluginManagerFromRootFolder(configuration);
configureFileSystems(configuration, pluginManager);
SecurityContext securityContext = installSecurityContext(configuration);
ClusterEntrypointUtils.configureUncaughtExceptionHandler(configuration);
securityContext.runSecured(
(Callable<Void>)
() -> {
runCluster(configuration, pluginManager);
return null;
});
} catch (Throwable t) {
final Throwable strippedThrowable =
ExceptionUtils.stripException(t, UndeclaredThrowableException.class);
try {
// clean up any partial state
shutDownAsync(
ApplicationStatus.FAILED,
ShutdownBehaviour.GRACEFUL_SHUTDOWN,
ExceptionUtils.stringifyException(strippedThrowable),
false)
.get(
INITIALIZATION_SHUTDOWN_TIMEOUT.toMilliseconds(),
TimeUnit.MILLISECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
strippedThrowable.addSuppressed(e);
}
throw new ClusterEntrypointException(
String.format(
"Failed to initialize the cluster entrypoint %s.",
getClass().getSimpleName()),
strippedThrowable);
}
} | @Test
public void testWorkingDirectoryIsSetupWhenStartingTheClusterEntrypoint() throws Exception {
final File workingDirBase = TEMPORARY_FOLDER.newFolder();
final ResourceID resourceId = new ResourceID("foobar");
configureWorkingDirectory(flinkConfig, workingDirBase, resourceId);
final File workingDir =
ClusterEntrypointUtils.generateJobManagerWorkingDirectoryFile(
flinkConfig, resourceId);
try (final TestingEntryPoint testingEntryPoint =
new TestingEntryPoint.Builder().setConfiguration(flinkConfig).build()) {
testingEntryPoint.startCluster();
assertTrue(workingDir.exists());
}
} |
@Override
public void start(
final KsqlModuleType moduleType,
final Properties ksqlProperties) {
final BaseSupportConfig ksqlVersionCheckerConfig =
new PhoneHomeConfig(ksqlProperties, "ksql");
if (!ksqlVersionCheckerConfig.isProactiveSupportEnabled()) {
log.warn(legalDisclaimerProactiveSupportDisabled());
return;
}
try {
final KsqlVersionChecker ksqlVersionChecker = versionCheckerFactory.create(
ksqlVersionCheckerConfig,
moduleType,
enableSettlingTime,
this::isActive
);
ksqlVersionChecker.init();
ksqlVersionChecker.setUncaughtExceptionHandler((t, e)
-> log.error("Uncaught exception in thread '{}':", t.getName(), e));
ksqlVersionChecker.start();
final long reportIntervalMs = ksqlVersionCheckerConfig.getReportIntervalMs();
final long reportIntervalHours = reportIntervalMs / (60 * 60 * 1000);
// We log at WARN level to increase the visibility of this information.
log.warn(legalDisclaimerProactiveSupportEnabled(reportIntervalHours));
} catch (final Exception e) {
// We catch any exceptions to prevent collateral damage to the more important broker
// threads that are running in the same JVM.
log.error("Failed to start KsqlVersionCheckerAgent: {}", e.getMessage());
}
} | @Test
@SuppressWarnings("unchecked")
public void shouldCreateKsqlVersionCheckerWithCorrectActivenessStatusSupplier() {
// When:
ksqlVersionCheckerAgent.start(KsqlModuleType.SERVER, properties);
// Then:
verify(versionCheckerFactory).create(any(), any(), anyBoolean(), activenessCaptor.capture());
assertThat(activenessCaptor.getValue().get(), equalTo(true));
} |
@Override
public SelType binaryOps(SelOp op, SelType rhs) {
if (rhs.type() == SelTypes.NULL && (op == SelOp.EQUAL || op == SelOp.NOT_EQUAL)) {
return rhs.binaryOps(op, this);
}
if (rhs.type() == SelTypes.DOUBLE) {
SelDouble lhs = SelDouble.of(this.val);
return lhs.binaryOps(op, rhs);
}
if (rhs.type() == SelTypes.STRING) {
return SelString.of(String.valueOf(this.val)).binaryOps(op, rhs);
}
SelTypeUtil.checkTypeMatch(this.type(), rhs.type());
long another = ((SelLong) rhs).val;
switch (op) {
case EQUAL:
return SelBoolean.of(this.val == another);
case NOT_EQUAL:
return SelBoolean.of(this.val != another);
case LT:
return SelBoolean.of(this.val < another);
case GT:
return SelBoolean.of(this.val > another);
case LTE:
return SelBoolean.of(this.val <= another);
case GTE:
return SelBoolean.of(this.val >= another);
case ADD:
return new SelLong(this.val + another);
case SUB:
return new SelLong(this.val - another);
case MUL:
return new SelLong(this.val * another);
case DIV:
return new SelLong(this.val / another);
case MOD:
return new SelLong(this.val % another);
case PLUS:
return new SelLong(this.val);
case MINUS:
return new SelLong(-this.val);
default:
throw new UnsupportedOperationException(
"int/Integer/long/Long DO NOT support expression operation " + op);
}
} | @Test
public void testBinaryOps() {
SelType obj = SelLong.of(2);
SelType res = orig.binaryOps(SelOp.EQUAL, obj);
assertEquals("BOOLEAN: false", res.type() + ": " + res);
res = orig.binaryOps(SelOp.NOT_EQUAL, obj);
assertEquals("BOOLEAN: true", res.type() + ": " + res);
res = orig.binaryOps(SelOp.LT, obj);
assertEquals("BOOLEAN: true", res.type() + ": " + res);
res = orig.binaryOps(SelOp.GT, obj);
assertEquals("BOOLEAN: false", res.type() + ": " + res);
res = orig.binaryOps(SelOp.LTE, obj);
assertEquals("BOOLEAN: true", res.type() + ": " + res);
res = orig.binaryOps(SelOp.GTE, obj);
assertEquals("BOOLEAN: false", res.type() + ": " + res);
res = orig.binaryOps(SelOp.ADD, obj);
assertEquals("LONG: 3", res.type() + ": " + res);
res = orig.binaryOps(SelOp.SUB, obj);
assertEquals("LONG: -1", res.type() + ": " + res);
res = orig.binaryOps(SelOp.MUL, obj);
assertEquals("LONG: 2", res.type() + ": " + res);
res = orig.binaryOps(SelOp.DIV, obj);
assertEquals("LONG: 0", res.type() + ": " + res);
res = orig.binaryOps(SelOp.MOD, obj);
assertEquals("LONG: 1", res.type() + ": " + res);
res = orig.binaryOps(SelOp.PLUS, obj);
assertEquals("LONG: 1", res.type() + ": " + res);
res = orig.binaryOps(SelOp.MINUS, obj);
assertEquals("LONG: -1", res.type() + ": " + res);
res = orig.binaryOps(SelOp.EQUAL, SelType.NULL);
assertEquals("BOOLEAN: false", res.type() + ": " + res);
res = orig.binaryOps(SelOp.NOT_EQUAL, SelType.NULL);
assertEquals("BOOLEAN: true", res.type() + ": " + res);
res = orig.binaryOps(SelOp.ADD, SelDouble.of(2.0));
assertEquals("DOUBLE: 3.0", res.type() + ": " + res);
res = orig.binaryOps(SelOp.ADD, SelString.of("2"));
assertEquals("STRING: 12", res.type() + ": " + res);
} |
public boolean hasNoLeaderInformation() {
return leaderInformationPerComponentId.isEmpty();
} | @Test
void hasNoLeaderInformation() {
LeaderInformationRegister register = LeaderInformationRegister.empty();
assertThat(register.hasNoLeaderInformation()).isTrue();
register = LeaderInformationRegister.of("component-id", LeaderInformation.empty());
assertThat(register.hasNoLeaderInformation()).isTrue();
register =
LeaderInformationRegister.merge(
register,
"other-component-id",
LeaderInformation.known(UUID.randomUUID(), "address"));
assertThat(register.hasNoLeaderInformation()).isFalse();
} |
public static OffsetBasedPagination forStartRowNumber(int startRowNumber, int pageSize) {
checkArgument(startRowNumber >= 1, "startRowNumber must be >= 1");
checkArgument(pageSize >= 1, "page size must be >= 1");
return new OffsetBasedPagination(startRowNumber - 1, pageSize);
} | @Test
void forStartRowNumber_whenStartRowNumberLowerThanOne_shouldfailsWithIAE() {
assertThatThrownBy(() -> OffsetBasedPagination.forStartRowNumber(0, 10))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("startRowNumber must be >= 1");
} |
public static EncryptionPluginManager instance() {
return INSTANCE;
} | @Test
void testInstance() {
EncryptionPluginManager instance = EncryptionPluginManager.instance();
assertNotNull(instance);
} |
@Override
public V replace(K key, V newValue) {
begin();
V oldValue = transactionalMap.replace(key, newValue);
commit();
return oldValue;
} | @Test
public void testReplace() {
map.put(42, "oldValue");
String oldValue = adapter.replace(42, "newValue");
assertEquals("oldValue", oldValue);
assertEquals("newValue", map.get(42));
} |
@Inject
public FileMergeCacheManager(
CacheConfig cacheConfig,
FileMergeCacheConfig fileMergeCacheConfig,
CacheStats stats,
ExecutorService cacheFlushExecutor,
ExecutorService cacheRemovalExecutor,
ScheduledExecutorService cacheSizeCalculateExecutor)
{
requireNonNull(cacheConfig, "directory is null");
this.cacheFlushExecutor = cacheFlushExecutor;
this.cacheRemovalExecutor = cacheRemovalExecutor;
this.cacheSizeCalculateExecutor = cacheSizeCalculateExecutor;
this.cache = CacheBuilder.newBuilder()
.maximumSize(fileMergeCacheConfig.getMaxCachedEntries())
.expireAfterAccess(fileMergeCacheConfig.getCacheTtl().toMillis(), MILLISECONDS)
.removalListener(new CacheRemovalListener())
.recordStats()
.build();
this.stats = requireNonNull(stats, "stats is null");
this.baseDirectory = new Path(cacheConfig.getBaseDirectory());
checkArgument(fileMergeCacheConfig.getMaxInMemoryCacheSize().toBytes() >= 0, "maxInflightBytes is negative");
this.maxInflightBytes = fileMergeCacheConfig.getMaxInMemoryCacheSize().toBytes();
File target = new File(baseDirectory.toUri());
if (!target.exists()) {
try {
Files.createDirectories(target.toPath());
}
catch (IOException e) {
throw new PrestoException(GENERIC_INTERNAL_ERROR, "cannot create cache directory " + target, e);
}
}
else {
File[] files = target.listFiles();
if (files == null) {
return;
}
this.cacheRemovalExecutor.submit(() -> Arrays.stream(files).forEach(file -> {
try {
Files.delete(file.toPath());
}
catch (IOException e) {
// ignore
}
}));
}
this.cacheSizeCalculateExecutor.scheduleAtFixedRate(
() -> {
try {
cacheScopeFiles.keySet().forEach(cacheIdentifier -> cacheScopeSizeInBytes.put(cacheIdentifier, getCacheScopeSizeInBytes(cacheIdentifier)));
cacheScopeSizeInBytes.keySet().removeIf(key -> !cacheScopeFiles.containsKey(key));
}
catch (Throwable t) {
log.error(t, "Error calculating cache size");
}
},
0,
15,
TimeUnit.SECONDS);
} | @Test(timeOut = 30_000)
public void testQuota()
throws InterruptedException, ExecutionException, IOException
{
TestingCacheStats stats = new TestingCacheStats();
CacheManager cacheManager = fileMergeCacheManager(stats);
byte[] buffer = new byte[10240];
CacheQuota cacheQuota = new CacheQuota("test.table", Optional.of(DataSize.succinctDataSize(1, KILOBYTE)));
// read within the cache quota
assertFalse(readFully(cacheManager, cacheQuota, 42, buffer, 0, 100));
assertEquals(stats.getCacheMiss(), 1);
assertEquals(stats.getCacheHit(), 0);
assertEquals(stats.getQuotaExceed(), 0);
stats.trigger();
assertEquals(stats.getInMemoryRetainedBytes(), 0);
validateBuffer(data, 42, buffer, 0, 100);
// read beyond cache quota
assertFalse(readFully(cacheManager, cacheQuota, 47, buffer, 0, 9000));
assertEquals(stats.getCacheMiss(), 1);
assertEquals(stats.getCacheHit(), 0);
assertEquals(stats.getQuotaExceed(), 1);
assertEquals(stats.getInMemoryRetainedBytes(), 0);
validateBuffer(data, 47, buffer, 0, 90);
// previous data won't be evicted if last read exceed quota
assertTrue(readFully(cacheManager, cacheQuota, 47, buffer, 0, 90));
assertEquals(stats.getCacheMiss(), 1);
assertEquals(stats.getCacheHit(), 1);
assertEquals(stats.getQuotaExceed(), 1);
assertEquals(stats.getInMemoryRetainedBytes(), 0);
validateBuffer(data, 47, buffer, 0, 90);
} |
@Operation(summary = "Get SAML metadata")
@GetMapping(value = "/idp/metadata", produces = MediaType.APPLICATION_XML_VALUE)
@ResponseBody
public String metadata() throws MetadataException {
logger.debug("Receive SAML metadata request!");
return idpMetadataService.getMetadata();
} | @Test
public void idpMetadataTest() throws MetadataException {
String idpMetadata = "idpMetadata";
when(idpMetadataServiceMock.getMetadata()).thenReturn(idpMetadata);
String result = metadataControllerMock.metadata();
assertNotNull(result);
assertEquals(idpMetadata, result);
verify(idpMetadataServiceMock, times(1)).getMetadata();
} |
public static String getParent(String filePath, int level) {
final File parent = getParent(file(filePath), level);
try {
return null == parent ? null : parent.getCanonicalPath();
} catch (IOException e) {
throw new IORuntimeException(e);
}
} | @Test
public void getParentTest() {
// 只在Windows下测试
if (FileUtil.isWindows()) {
File parent = FileUtil.getParent(FileUtil.file("d:/aaa/bbb/cc/ddd"), 0);
assertEquals(FileUtil.file("d:\\aaa\\bbb\\cc\\ddd"), parent);
parent = FileUtil.getParent(FileUtil.file("d:/aaa/bbb/cc/ddd"), 1);
assertEquals(FileUtil.file("d:\\aaa\\bbb\\cc"), parent);
parent = FileUtil.getParent(FileUtil.file("d:/aaa/bbb/cc/ddd"), 2);
assertEquals(FileUtil.file("d:\\aaa\\bbb"), parent);
parent = FileUtil.getParent(FileUtil.file("d:/aaa/bbb/cc/ddd"), 4);
assertEquals(FileUtil.file("d:\\"), parent);
parent = FileUtil.getParent(FileUtil.file("d:/aaa/bbb/cc/ddd"), 5);
assertNull(parent);
parent = FileUtil.getParent(FileUtil.file("d:/aaa/bbb/cc/ddd"), 10);
assertNull(parent);
}
} |
private String toStringBoolean() {
// Code was removed from this method as ValueBoolean
// did not store length, so some parts could never be
// called.
String retval;
if ( value == null ) {
return null;
}
if ( isNull() ) {
retval = Const.NULL_BOOLEAN;
} else {
retval = value.getBoolean() ? "true" : "false";
}
return retval;
} | @Test
public void testToStringBoolean() {
String result = null;
Value vs = new Value( "Name", Value.VALUE_TYPE_BOOLEAN );
vs.setValue( true );
result = vs.toString( true );
assertEquals( "true", result );
Value vs1 = new Value( "Name", Value.VALUE_TYPE_BOOLEAN );
vs1.setValue( false );
result = vs1.toString( true );
assertEquals( "false", result );
// set to "null"
Value vs2 = new Value( "Name", Value.VALUE_TYPE_BOOLEAN );
vs2.setValue( true );
vs2.setNull();
result = vs2.toString( true );
assertEquals( "", result );
// set to "null"
Value vs3 = new Value( "Name", Value.VALUE_TYPE_BOOLEAN );
vs3.setValue( false );
vs3.setNull();
result = vs3.toString( true );
assertEquals( "", result );
// set to length = 1 => get Y/N
Value vs4 = new Value( "Name", Value.VALUE_TYPE_BOOLEAN );
vs4.setValue( true );
vs4.setLength( 1 );
result = vs4.toString( true );
assertEquals( "true", result );
// set to length = 1 => get Y/N
Value vs5 = new Value( "Name", Value.VALUE_TYPE_BOOLEAN );
vs5.setValue( false );
vs5.setLength( 1 );
result = vs5.toString( true );
assertEquals( "false", result );
// set to length > 1 => get true/false
Value vs6 = new Value( "Name", Value.VALUE_TYPE_BOOLEAN );
vs6.setValue( true );
vs6.setLength( 3 );
result = vs6.toString( true );
assertEquals( "true", result );
// set to length > 1 => get true/false (+ truncation)
Value vs7 = new Value( "Name", Value.VALUE_TYPE_BOOLEAN );
vs7.setValue( false );
vs7.setLength( 3 );
result = vs7.toString( true );
assertEquals( "false", result );
} |
public GoConfigHolder loadConfigHolder(final String content, Callback callback) throws Exception {
CruiseConfig configForEdit;
CruiseConfig config;
LOGGER.debug("[Config Save] Loading config holder");
configForEdit = deserializeConfig(content);
if (callback != null) callback.call(configForEdit);
config = preprocessAndValidate(configForEdit);
return new GoConfigHolder(config, configForEdit);
} | @Test
void shouldThrowXsdValidationException_When2RepositoriesInSameConfigElement() {
assertThatThrownBy(() -> xmlLoader.loadConfigHolder(configWithConfigRepos(
"""
<config-repos>
<config-repo pluginId="myplugin">
<git url="https://github.com/tomzo/gocd-indep-config-part.git" />
<git url="https://github.com/tomzo/gocd-refmain-config-part.git" />
</config-repo >
</config-repos>
"""
))).isInstanceOf(XsdValidationException.class);
} |
Future<Boolean> canRollController(int nodeId) {
LOGGER.debugCr(reconciliation, "Determining whether controller pod {} can be rolled", nodeId);
return describeMetadataQuorum().map(info -> {
boolean canRoll = isQuorumHealthyWithoutNode(nodeId, info);
if (!canRoll) {
LOGGER.debugCr(reconciliation, "Not restarting controller pod {}. Restart would affect the quorum health", nodeId);
}
return canRoll;
}).recover(error -> {
LOGGER.warnCr(reconciliation, "Error determining whether it is safe to restart controller pod {}", nodeId, error);
return Future.failedFuture(error);
});
} | @Test
public void cannotRollController2NodeQuorumFollowerBehind(VertxTestContext context) {
Map<Integer, OptionalLong> controllers = new HashMap<>();
controllers.put(1, OptionalLong.of(10000L));
controllers.put(2, OptionalLong.of(7000L));
Admin admin = setUpMocks(1, controllers);
KafkaQuorumCheck quorumCheck = new KafkaQuorumCheck(Reconciliation.DUMMY_RECONCILIATION, admin, vertx, CONTROLLER_QUORUM_FETCH_TIMEOUT_MS);
quorumCheck.canRollController(1).onComplete(context.succeeding(result -> {
context.verify(() -> assertFalse(result));
context.completeNow();
}));
} |
public static boolean matches(String wildcard, String ipAddress) {
if (false == ReUtil.isMatch(PatternPool.IPV4, ipAddress)) {
return false;
}
final String[] wildcardSegments = wildcard.split("\\.");
final String[] ipSegments = ipAddress.split("\\.");
if (wildcardSegments.length != ipSegments.length) {
return false;
}
for (int i = 0; i < wildcardSegments.length; i++) {
if (false == "*".equals(wildcardSegments[i])
&& false == wildcardSegments[i].equals(ipSegments[i])) {
return false;
}
}
return true;
} | @Test
public void matchesTest() {
final boolean matches1 = Ipv4Util.matches("127.*.*.1", "127.0.0.1");
assertTrue(matches1);
final boolean matches2 = Ipv4Util.matches("192.168.*.1", "127.0.0.1");
assertFalse(matches2);
} |
@Override public SlotAssignmentResult ensure(long key1, long key2) {
return super.ensure0(key1, key2);
} | @Test
public void testPut() {
final long key1 = randomKey();
final long key2 = randomKey();
SlotAssignmentResult slot = insert(key1, key2);
assertTrue(slot.isNew());
final long valueAddress = slot.address();
slot = hsa.ensure(key1, key2);
assertFalse(slot.isNew());
assertEquals(valueAddress, slot.address());
} |
public static GenericRecord rewriteRecord(GenericRecord oldRecord, Schema newSchema) {
GenericRecord newRecord = new GenericData.Record(newSchema);
boolean isSpecificRecord = oldRecord instanceof SpecificRecordBase;
for (Schema.Field f : newSchema.getFields()) {
if (!(isSpecificRecord && isMetadataField(f.name()))) {
copyOldValueOrSetDefault(oldRecord, newRecord, f);
}
}
return newRecord;
} | @Test
public void testNonNullableFieldWithDefault() {
GenericRecord rec = new GenericData.Record(new Schema.Parser().parse(EXAMPLE_SCHEMA));
rec.put("_row_key", "key1");
rec.put("non_pii_col", "val1");
rec.put("pii_col", "val2");
rec.put("timestamp", 3.5);
GenericRecord rec1 = HoodieAvroUtils.rewriteRecord(rec, new Schema.Parser().parse(SCHEMA_WITH_NON_NULLABLE_FIELD_WITH_DEFAULT));
assertEquals("dummy", rec1.get("non_nullable_field_with_default"));
} |
@Override
@Nonnull
public <T extends DataConnection> T getAndRetainDataConnection(String name, Class<T> clazz) {
DataConnectionEntry dataConnection = dataConnections.computeIfPresent(name, (k, v) -> {
if (!clazz.isInstance(v.instance)) {
throw new HazelcastException("Data connection '" + name + "' must be an instance of " + clazz);
}
v.instance.retain();
return v;
});
if (dataConnection == null) {
throw new HazelcastException("Data connection '" + name + "' not found");
}
//noinspection unchecked
return (T) dataConnection.instance;
} | @Test
public void should_return_data_connection_from_config() {
DataConnection dataConnection = dataConnectionService.getAndRetainDataConnection(TEST_CONFIG, DummyDataConnection.class);
assertThat(dataConnection).isInstanceOf(DummyDataConnection.class);
assertThat(dataConnection.getName()).isEqualTo(TEST_CONFIG);
assertThat(dataConnection.getConfig().getProperties())
.containsEntry("customProperty", "value");
} |
List<MappingField> resolveFields(
@Nonnull String[] externalName,
@Nullable String dataConnectionName,
@Nonnull Map<String, String> options,
@Nonnull List<MappingField> userFields,
boolean stream
) {
Predicate<MappingField> pkColumnName = Options.getPkColumnChecker(options, stream);
Map<String, DocumentField> dbFields = readFields(externalName, dataConnectionName, options, stream);
List<MappingField> resolvedFields = new ArrayList<>();
if (userFields.isEmpty()) {
for (DocumentField documentField : dbFields.values()) {
MappingField mappingField = new MappingField(
documentField.columnName,
resolveType(documentField.columnType),
documentField.columnName,
documentField.columnType.name()
);
mappingField.setPrimaryKey(pkColumnName.test(mappingField));
resolvedFields.add(mappingField);
}
} else {
for (MappingField f : userFields) {
String prefixIfStream = stream ? "fullDocument." : "";
String nameInMongo = f.externalName() == null ? prefixIfStream + f.name() : f.externalName();
DocumentField documentField = getField(dbFields, f, stream);
if (documentField == null) {
throw new IllegalArgumentException("Could not resolve field with name " + nameInMongo);
}
MappingField mappingField = new MappingField(f.name(), f.type(), documentField.columnName,
documentField.columnType.name());
mappingField.setPrimaryKey(pkColumnName.test(mappingField));
validateType(f, documentField);
resolvedFields.add(mappingField);
}
}
return resolvedFields;
} | @Test
public void testResolvesMappingFieldsViaSample_wrongUserType() {
try (MongoClient client = MongoClients.create(mongoContainer.getConnectionString())) {
String databaseName = "testDatabase";
String collectionName = "people_3";
MongoDatabase testDatabase = client.getDatabase(databaseName);
MongoCollection<Document> collection = testDatabase.getCollection(collectionName);
collection.insertOne(new Document("firstName", "Tomasz")
.append("lastName", "Gawęda")
.append("birthYear", 1992));
FieldResolver resolver = new FieldResolver(null);
Map<String, String> readOpts = new HashMap<>();
readOpts.put("connectionString", mongoContainer.getConnectionString());
try {
resolver.resolveFields(new String[]{databaseName, collectionName}, null, readOpts,
singletonList(new MappingField("id", QueryDataType.MAP).setExternalName("_id")), false);
} catch (IllegalStateException e) {
assertThat(e.getMessage()).isEqualTo("Type MAP of field id does not match db type OBJECT");
}
}
} |
public static int compare(byte[] left, byte[] right) {
return compare(left, 0, left.length, right, 0, right.length);
} | @Test
public void testCompare() {
byte[] foo = "foo".getBytes(StandardCharsets.UTF_8);
assertEquals(ByteArray.compare(foo, foo), 0);
assertEquals(ByteArray.compare(foo, Arrays.copyOf(foo, foo.length)), 0);
assertTrue(ByteArray.compare(foo, Arrays.copyOf(foo, foo.length - 1)) > 0);
assertTrue(ByteArray.compare(foo, Arrays.copyOf(foo, foo.length + 1)) < 0);
byte[] bar = "bar".getBytes(StandardCharsets.UTF_8);
assertTrue(ByteArray.compare(foo, bar) > 0);
assertTrue(ByteArray.compare(bar, foo) < 0);
assertTrue(ByteArray.compare(Arrays.copyOf(bar, bar.length - 1), foo) < 0);
assertTrue(ByteArray.compare(Arrays.copyOf(bar, bar.length + 1), foo) < 0);
} |
@Override
public void deleteRole(String role) {
String sql = "DELETE FROM roles WHERE role=?";
try {
EmbeddedStorageContextHolder.addSqlContext(sql, role);
databaseOperate.update(EmbeddedStorageContextHolder.getCurrentSqlContext());
} finally {
EmbeddedStorageContextHolder.cleanAllContext();
}
} | @Test
void testDeleteRole() {
embeddedRolePersistService.deleteRole("role");
embeddedRolePersistService.deleteRole("role", "userName");
List<ModifyRequest> currentSqlContext = EmbeddedStorageContextHolder.getCurrentSqlContext();
assertEquals(0, currentSqlContext.size());
} |
@Override
public KTable<K, Long> count() {
return doCount(NamedInternal.empty(), Materialized.with(keySerde, Serdes.Long()));
} | @Test
public void shouldCountAndMaterializeResults() {
groupedStream.count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("count").withKeySerde(Serdes.String()));
try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
processData(driver);
{
final KeyValueStore<String, Long> count = driver.getKeyValueStore("count");
assertThat(count.get("1"), equalTo(3L));
assertThat(count.get("2"), equalTo(1L));
assertThat(count.get("3"), equalTo(2L));
}
{
final KeyValueStore<String, ValueAndTimestamp<Long>> count = driver.getTimestampedKeyValueStore("count");
assertThat(count.get("1"), equalTo(ValueAndTimestamp.make(3L, 10L)));
assertThat(count.get("2"), equalTo(ValueAndTimestamp.make(1L, 1L)));
assertThat(count.get("3"), equalTo(ValueAndTimestamp.make(2L, 9L)));
}
}
} |
public static boolean httpRequestWasMade() {
return getFakeHttpLayer().hasRequestInfos();
} | @Test
public void httpRequestWasMade_returnsTrueIfRequestMatchingGivenRuleWasMade()
throws IOException, HttpException {
makeRequest("http://example.com");
assertTrue(FakeHttp.httpRequestWasMade("http://example.com"));
} |
@Override
@Nonnull
public <T extends DataConnection> T getAndRetainDataConnection(String name, Class<T> clazz) {
DataConnectionEntry dataConnection = dataConnections.computeIfPresent(name, (k, v) -> {
if (!clazz.isInstance(v.instance)) {
throw new HazelcastException("Data connection '" + name + "' must be an instance of " + clazz);
}
v.instance.retain();
return v;
});
if (dataConnection == null) {
throw new HazelcastException("Data connection '" + name + "' not found");
}
//noinspection unchecked
return (T) dataConnection.instance;
} | @Test
public void should_fail_when_non_existing_data_connection() {
assertThatThrownBy(() -> dataConnectionService.getAndRetainDataConnection("non-existing-data-connection", DummyDataConnection.class))
.isInstanceOf(HazelcastException.class)
.hasMessage("Data connection 'non-existing-data-connection' not found");
} |
@Override
public Integer addScoreAndGetRevRank(V object, Number value) {
return get(addScoreAndGetRevRankAsync(object, value));
} | @Test
public void testAddScoreAndGetRevRank() {
RScoredSortedSet<String> set = redisson.getScoredSortedSet("simple");
Integer res1 = set.addScoreAndGetRevRank("12", 12);
assertThat(res1).isEqualTo(0);
Integer res2 = set.addScoreAndGetRevRank("15", 10);
assertThat(res2).isEqualTo(1);
assertThat(set.revRank("12")).isEqualTo(0);
assertThat(set.revRank("15")).isEqualTo(1);
Integer res3 = set.addScoreAndGetRevRank("12", 2);
assertThat(res3).isEqualTo(0);
Integer res4 = set.addScoreAndGetRevRank("15", -1);
assertThat(res4).isEqualTo(1);
Double score = set.getScore("12");
assertThat(score).isEqualTo(14);
} |
public static void initSSL(Properties consumerProps) {
// Check if one-way SSL is enabled. In this scenario, the client validates the server certificate.
String trustStoreLocation = consumerProps.getProperty(SSL_TRUSTSTORE_LOCATION);
String trustStorePassword = consumerProps.getProperty(SSL_TRUSTSTORE_PASSWORD);
String serverCertificate = consumerProps.getProperty(STREAM_KAFKA_SSL_SERVER_CERTIFICATE);
if (StringUtils.isAnyEmpty(trustStoreLocation, trustStorePassword, serverCertificate)) {
LOGGER.info("Skipping auto SSL server validation since it's not configured.");
return;
}
if (shouldRenewTrustStore(consumerProps)) {
initTrustStore(consumerProps);
}
// Set the security protocol
String securityProtocol = consumerProps.getProperty(SECURITY_PROTOCOL, DEFAULT_SECURITY_PROTOCOL);
consumerProps.setProperty(SECURITY_PROTOCOL, securityProtocol);
// Check if two-way SSL is enabled. In this scenario, the client validates the server's certificate and the server
// validates the client's certificate.
String keyStoreLocation = consumerProps.getProperty(SSL_KEYSTORE_LOCATION);
String keyStorePassword = consumerProps.getProperty(SSL_KEYSTORE_PASSWORD);
String keyPassword = consumerProps.getProperty(SSL_KEY_PASSWORD);
String clientCertificate = consumerProps.getProperty(STREAM_KAFKA_SSL_CLIENT_CERTIFICATE);
if (StringUtils.isAnyEmpty(keyStoreLocation, keyStorePassword, keyPassword, clientCertificate)) {
LOGGER.info("Skipping auto SSL client validation since it's not configured.");
return;
}
if (shouldRenewKeyStore(consumerProps)) {
initKeyStore(consumerProps);
}
} | @Test
public void testInitSSLTrustStoreOnly()
throws CertificateException, NoSuchAlgorithmException, OperatorCreationException, NoSuchProviderException,
IOException, KeyStoreException {
Properties consumerProps = new Properties();
setTrustStoreProps(consumerProps);
// should not throw any exceptions
KafkaSSLUtils.initSSL(consumerProps);
// validate
validateTrustStoreCertificateCount(1);
} |
public static String valueOf(String regionId) {
if (ObjectHelper.isEmpty(regionId)) {
throw new IllegalArgumentException("Unexpected empty parameter: regionId.");
} else {
String endpoint = REGIONS.get(regionId.toLowerCase());
if (ObjectHelper.isNotEmpty(endpoint)) {
return endpoint;
} else {
throw new IllegalArgumentException("Unexpected regionId: " + regionId);
}
}
} | @Test
public void testRegions() {
assertEquals("obs.af-south-1.myhuaweicloud.com", OBSRegion.valueOf("af-south-1"));
assertEquals("obs.ap-southeast-2.myhuaweicloud.com", OBSRegion.valueOf("ap-southeast-2"));
assertEquals("obs.ap-southeast-3.myhuaweicloud.com", OBSRegion.valueOf("ap-southeast-3"));
assertEquals("obs.cn-east-3.myhuaweicloud.com", OBSRegion.valueOf("cn-east-3"));
assertEquals("obs.cn-east-2.myhuaweicloud.com", OBSRegion.valueOf("cn-east-2"));
assertEquals("obs.cn-north-1.myhuaweicloud.com", OBSRegion.valueOf("cn-north-1"));
assertEquals("obs.cn-south-1.myhuaweicloud.com", OBSRegion.valueOf("cn-south-1"));
assertEquals("obs.ap-southeast-1.myhuaweicloud.com", OBSRegion.valueOf("ap-southeast-1"));
assertEquals("obs.sa-argentina-1.myhuaweicloud.com", OBSRegion.valueOf("sa-argentina-1"));
assertEquals("obs.sa-peru-1.myhuaweicloud.com", OBSRegion.valueOf("sa-peru-1"));
assertEquals("obs.na-mexico-1.myhuaweicloud.com", OBSRegion.valueOf("na-mexico-1"));
assertEquals("obs.la-south-2.myhuaweicloud.com", OBSRegion.valueOf("la-south-2"));
assertEquals("obs.sa-chile-1.myhuaweicloud.com", OBSRegion.valueOf("sa-chile-1"));
assertEquals("obs.sa-brazil-1.myhuaweicloud.com", OBSRegion.valueOf("sa-brazil-1"));
} |
public static <T> Partition<T> of(
int numPartitions,
PartitionWithSideInputsFn<? super T> partitionFn,
Requirements requirements) {
Contextful ctfFn =
Contextful.fn(
(T element, Contextful.Fn.Context c) ->
partitionFn.partitionFor(element, numPartitions, c),
requirements);
return new Partition<>(new PartitionDoFn<T>(numPartitions, ctfFn, partitionFn));
} | @Test
public void testDisplayData() {
Partition<?> partition = Partition.of(123, new IdentityFn());
DisplayData displayData = DisplayData.from(partition);
assertThat(displayData, hasDisplayItem("numPartitions", 123));
assertThat(displayData, hasDisplayItem("partitionFn", IdentityFn.class));
} |
@Override
public ApiResult<TopicPartition, PartitionProducerState> handleResponse(
Node broker,
Set<TopicPartition> keys,
AbstractResponse abstractResponse
) {
DescribeProducersResponse response = (DescribeProducersResponse) abstractResponse;
Map<TopicPartition, PartitionProducerState> completed = new HashMap<>();
Map<TopicPartition, Throwable> failed = new HashMap<>();
List<TopicPartition> unmapped = new ArrayList<>();
for (DescribeProducersResponseData.TopicResponse topicResponse : response.data().topics()) {
for (DescribeProducersResponseData.PartitionResponse partitionResponse : topicResponse.partitions()) {
TopicPartition topicPartition = new TopicPartition(
topicResponse.name(), partitionResponse.partitionIndex());
Errors error = Errors.forCode(partitionResponse.errorCode());
if (error != Errors.NONE) {
ApiError apiError = new ApiError(error, partitionResponse.errorMessage());
handlePartitionError(topicPartition, apiError, failed, unmapped);
continue;
}
List<ProducerState> activeProducers = partitionResponse.activeProducers().stream()
.map(activeProducer -> {
OptionalLong currentTransactionFirstOffset =
activeProducer.currentTxnStartOffset() < 0 ?
OptionalLong.empty() :
OptionalLong.of(activeProducer.currentTxnStartOffset());
OptionalInt coordinatorEpoch =
activeProducer.coordinatorEpoch() < 0 ?
OptionalInt.empty() :
OptionalInt.of(activeProducer.coordinatorEpoch());
return new ProducerState(
activeProducer.producerId(),
activeProducer.producerEpoch(),
activeProducer.lastSequence(),
activeProducer.lastTimestamp(),
coordinatorEpoch,
currentTransactionFirstOffset
);
}).collect(Collectors.toList());
completed.put(topicPartition, new PartitionProducerState(activeProducers));
}
}
return new ApiResult<>(completed, failed, unmapped);
} | @Test
public void testCompletedResult() {
TopicPartition topicPartition = new TopicPartition("foo", 5);
DescribeProducersOptions options = new DescribeProducersOptions().brokerId(1);
DescribeProducersHandler handler = newHandler(options);
PartitionResponse partitionResponse = sampleProducerState(topicPartition);
DescribeProducersResponse response = describeProducersResponse(
singletonMap(topicPartition, partitionResponse)
);
Node node = new Node(3, "host", 1);
ApiResult<TopicPartition, PartitionProducerState> result =
handler.handleResponse(node, mkSet(topicPartition), response);
assertEquals(mkSet(topicPartition), result.completedKeys.keySet());
assertEquals(emptyMap(), result.failedKeys);
assertEquals(emptyList(), result.unmappedKeys);
PartitionProducerState producerState = result.completedKeys.get(topicPartition);
assertMatchingProducers(partitionResponse, producerState);
} |
public static HttpUrl buildHttpUrl(final String url) {
return buildHttpUrl(url, null);
} | @Test
public void buildHttpUrlTest() {
HttpUrl httpUrl = HttpUtils.buildHttpUrl(TEST_URL, formMap);
Assert.assertNotNull(httpUrl);
} |
public static <EventT> Write<EventT> write() {
return new AutoValue_JmsIO_Write.Builder<EventT>().build();
} | @Test
public void testPublisherWithRetryConfiguration() {
RetryConfiguration retryPolicy =
RetryConfiguration.create(5, Duration.standardSeconds(15), null);
JmsIO.Write<String> publisher =
JmsIO.<String>write()
.withConnectionFactory(connectionFactory)
.withRetryConfiguration(retryPolicy)
.withQueue(QUEUE)
.withUsername(USERNAME)
.withPassword(PASSWORD);
assertEquals(
publisher.getRetryConfiguration(),
RetryConfiguration.create(5, Duration.standardSeconds(15), null));
} |
public void setIncludedProtocols(String protocols) {
this.includedProtocols = protocols;
} | @Test
public void testSetIncludedProtocols() throws Exception {
configurable.setSupportedProtocols(new String[] { "A", "B", "C", "D" });
configuration.setIncludedProtocols("A,B ,C, D");
configuration.configure(configurable);
assertTrue(Arrays.equals(new String[] { "A", "B", "C", "D" },
configurable.getEnabledProtocols()));
} |
NodeIndices(ClusterSpec.Id cluster, NodeList allNodes) {
this(allNodes.cluster(cluster).mapToList(node -> node.allocation().get().membership().index()));
} | @Test
public void testNodeIndices() {
NodeIndices indices = new NodeIndices(List.of(1, 3, 4));
assertEquals(0, indices.probeNext());
assertEquals(2, indices.probeNext());
assertEquals(5, indices.probeNext());
assertEquals(6, indices.probeNext());
indices.resetProbe();
assertEquals(0, indices.probeNext());
assertEquals(2, indices.probeNext());
indices.commitProbe();
assertEquals(5, indices.probeNext());
assertEquals(6, indices.probeNext());
indices.resetProbe();
assertEquals(5, indices.next());
assertEquals(6, indices.next());
assertEquals(7, indices.probeNext());
try {
indices.next();
}
catch (IllegalStateException e) {
assertEquals("Must commit ongoing probe before calling 'next'", e.getMessage());
}
} |
@Override
public Object getObject(final int columnIndex) throws SQLException {
return mergeResultSet.getValue(columnIndex, Object.class);
} | @Test
void assertGetObjectWithLong() throws SQLException {
long result = 0L;
when(mergeResultSet.getValue(1, long.class)).thenReturn(result);
assertThat(shardingSphereResultSet.getObject(1, long.class), is(result));
when(mergeResultSet.getValue(1, Long.class)).thenReturn(result);
assertThat(shardingSphereResultSet.getObject(1, Long.class), is(result));
} |
public void loadProperties(Properties properties) {
Set<Entry<Object, Object>> entries = properties.entrySet();
for (Entry entry : entries) {
String key = (String) entry.getKey();
Object value = entry.getValue();
String[] keySplit = key.split("[.]");
Map<String, Object> target = this;
for (int i = 0; i < keySplit.length - 1; i++) {
if (!target.containsKey(keySplit[i])) {
HashMap subEntry = new HashMap();
target.put(keySplit[i], subEntry);
target = subEntry;
} else {
Object subEntry = target.get(keySplit[i]);
if (!(subEntry instanceof Map)) {
HashMap replace = new HashMap();
replace.put("_", subEntry);
target.put(keySplit[i], replace);
}
target = (Map<String, Object>) target.get(keySplit[i]);
}
}
if (target.get(keySplit[keySplit.length - 1]) instanceof Map) {
((Map) target.get(keySplit[keySplit.length - 1])).put("_", value);
} else {
target.put(keySplit[keySplit.length - 1], value);
}
}
} | @Test
void testLoadProperties() {
// given
K8sSpecTemplate template = new K8sSpecTemplate();
Properties p = new Properties();
p.put("k8s.intp.key1", "v1");
p.put("k8s.intp.key2", "v2");
p.put("k8s.key3", "v3");
p.put("key4", "v4");
// when
template.loadProperties(p);
// then
assertEquals("v4", template.get("key4"));
assertEquals("v3", ((Map) template.get("k8s")).get("key3"));
assertEquals("v2", ((Map) ((Map) template.get("k8s")).get("intp")).get("key2"));
assertEquals("v1", ((Map) ((Map) template.get("k8s")).get("intp")).get("key1"));
} |
public Instant getEndOfNextNthPeriod(Instant instant, int periods) {
return innerGetEndOfNextNthPeriod(this, this.periodicityType, instant, periods);
} | @Test
public void testVaryingNumberOfHourlyPeriods() {
RollingCalendar rc = new RollingCalendar("yyyy-MM-dd_HH");
long MILLIS_IN_HOUR = 3600 * 1000;
for (int p = 100; p > -100; p--) {
long now = 1223325293589L; // Mon Oct 06 22:34:53 CEST 2008
Instant result = rc.getEndOfNextNthPeriod(Instant.ofEpochMilli(now), p);
long expected = now - (now % (MILLIS_IN_HOUR)) + p * MILLIS_IN_HOUR;
assertEquals(expected, result.toEpochMilli());
}
} |
@Override
public TaskId id() {
return task.id();
} | @Test
public void shouldDelegateId() {
final ReadOnlyTask readOnlyTask = new ReadOnlyTask(task);
readOnlyTask.id();
verify(task).id();
} |
public static void free(final DirectBuffer buffer)
{
if (null != buffer)
{
free(buffer.byteBuffer());
}
} | @Test
void freeShouldReleaseDirectBufferResources()
{
final UnsafeBuffer buffer = new UnsafeBuffer(ByteBuffer.allocateDirect(4));
buffer.setMemory(0, 4, (byte)111);
BufferUtil.free(buffer);
} |
public String getDefMd5() { return defMd5; } | @Test
public void require_correct_defmd5() {
final String defMd5ForEmptyDefContent = "d41d8cd98f00b204e9800998ecf8427e";
RawConfig config = new RawConfig(key, null, payload, payloadChecksums, generation, false, defContent, Optional.empty());
assertThat(config.getDefMd5(), is(defMd5));
config = new RawConfig(key, "", payload, payloadChecksums, generation, false, defContent, Optional.empty());
assertThat(config.getDefMd5(), is(defMd5));
config = new RawConfig(key, defMd5, payload, payloadChecksums, generation, false, defContent, Optional.empty());
assertThat(config.getDefMd5(), is(defMd5));
config = new RawConfig(key, null, payload, payloadChecksums, generation, false, null, Optional.empty());
assertNull(config.getDefMd5());
config = new RawConfig(key, null, payload, payloadChecksums, generation, false, List.of(""), Optional.empty());
assertThat(config.getDefMd5(), is(defMd5ForEmptyDefContent));
config = new RawConfig(key, "", payload, payloadChecksums, generation, false, null, Optional.empty());
assertThat(config.getDefMd5(), is(""));
config = new RawConfig(key, "", payload, payloadChecksums, generation, false, List.of(""), Optional.empty());
assertThat(config.getDefMd5(), is(defMd5ForEmptyDefContent));
} |
@Override
public void clear() {
if (isEmpty()) {
return;
}
if (begin < end) {
Arrays.fill(elements, begin, end, null);
} else {
Arrays.fill(elements, 0, end, null);
Arrays.fill(elements, begin, elements.length, null);
}
begin = -1;
end = 0;
} | @Test
public void testClear() {
CircularArrayList<String> list = new CircularArrayList<>();
list.clear();
assertEmpty(list);
for (int i = 0; i < 20; i++) {
list.add("str" + i);
}
list.clear();
assertEmpty(list);
for (int i = 10; i < 20; i++) {
list.add("str" + i);
}
for (int i = 9; i >= 0; i--) {
list.addFirst("str" + i);
}
list.clear();
assertEmpty(list);
} |
@Override
public Batch toBatch() {
return new SparkBatch(
sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode());
} | @TestTemplate
public void testUnpartitionedIsNotNull() throws Exception {
createUnpartitionedTable(spark, tableName);
SparkScanBuilder builder = scanBuilder();
TruncateFunction.TruncateString function = new TruncateFunction.TruncateString();
UserDefinedScalarFunc udf = toUDF(function, expressions(intLit(4), fieldRef("data")));
Predicate predicate = new Predicate("IS_NOT_NULL", expressions(udf));
pushFilters(builder, predicate);
Batch scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(10);
// NOT IsNotNull
builder = scanBuilder();
predicate = new Not(predicate);
pushFilters(builder, predicate);
scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(10);
} |
static Properties getSystemProperties() {
String[] inputConfigSources = System.getProperty("proxyconfigsources",
DEFAULT_PROXY_CONFIG_SOURCES).split(",");
return new Properties(inputConfigSources);
} | @Test
void testReadingSystemProperties() {
ProxyServer.Properties properties = ProxyServer.getSystemProperties();
assertEquals(1, properties.configSources.length);
assertEquals(ProxyServer.DEFAULT_PROXY_CONFIG_SOURCES, properties.configSources[0]);
} |
@VisibleForTesting
int getSleepDuration() {
return sleepDuration;
} | @Test
public void testNoMetricUpdatesThenNoWaiting() {
ClientThrottlingAnalyzer analyzer = new ClientThrottlingAnalyzer(
"test",
ANALYSIS_PERIOD);
validate(0, analyzer.getSleepDuration());
sleep(ANALYSIS_PERIOD_PLUS_10_PERCENT);
validate(0, analyzer.getSleepDuration());
} |
public static <T> Encoder<T> encoderFor(Coder<T> coder) {
Encoder<T> enc = getOrCreateDefaultEncoder(coder.getEncodedTypeDescriptor().getRawType());
return enc != null ? enc : binaryEncoder(coder, true);
} | @Test
public void testBeamBinaryEncoder() {
List<List<String>> data = asList(asList("a1", "a2", "a3"), asList("b1", "b2"), asList("c1"));
Encoder<List<String>> encoder = encoderFor(ListCoder.of(StringUtf8Coder.of()));
serializeAndDeserialize(data.get(0), encoder);
Dataset<List<String>> dataset = createDataset(data, encoder);
assertThat(dataset.collect(), equalTo(data.toArray()));
} |
public static <C> AsyncBuilder<C> builder() {
return new AsyncBuilder<>();
} | @Test
void mapAndDecodeExecutesMapFunction() throws Throwable {
server.enqueue(new MockResponse().setBody("response!"));
TestInterfaceAsync api =
AsyncFeign.builder().mapAndDecode(upperCaseResponseMapper(), new StringDecoder())
.target(TestInterfaceAsync.class, "http://localhost:" + server.getPort());
assertThat(unwrap(api.post())).isEqualTo("RESPONSE!");
} |
public static <T> List<List<T>> splitBySize(List<T> list, int expectedSize)
throws NullPointerException, IllegalArgumentException {
Preconditions.checkNotNull(list, "list must not be null");
Preconditions.checkArgument(expectedSize > 0, "expectedSize must larger than 0");
if (1 == expectedSize) {
return Collections.singletonList(list);
}
int splitSize = Math.min(expectedSize, list.size());
List<List<T>> result = new ArrayList<List<T>>(splitSize);
for (int i = 0; i < splitSize; i++) {
result.add(new ArrayList<>());
}
int index = 0;
for (T t : list) {
result.get(index).add(t);
index = (index + 1) % splitSize;
}
return result;
} | @Test
public void testSplitBySizeNormal2() {
List<Integer> lists = Lists.newArrayList(1, 2, 3, 4, 5, 6, 7);
int expectSize = 1;
List<List<Integer>> splitLists = ListUtil.splitBySize(lists, expectSize);
Assert.assertEquals(splitLists.size(), 1);
Assert.assertEquals(lists, splitLists.get(0));
} |
public static <T> CompletionStage<T> recover(CompletionStage<T> completionStage, Function<Throwable, T> exceptionHandler){
return completionStage.exceptionally(exceptionHandler);
} | @Test
public void shouldReturnResult2() throws Exception {
CompletableFuture<String> future = CompletableFuture.completedFuture("result");
String result = recover(future, TimeoutException.class, (e) -> "fallback").toCompletableFuture()
.get(1, TimeUnit.SECONDS);
assertThat(result).isEqualTo("result");
} |
@Override
public Map<RedisClusterNode, Collection<RedisClusterNode>> clusterGetMasterSlaveMap() {
Iterable<RedisClusterNode> res = clusterGetNodes();
Set<RedisClusterNode> masters = new HashSet<RedisClusterNode>();
for (Iterator<RedisClusterNode> iterator = res.iterator(); iterator.hasNext();) {
RedisClusterNode redisClusterNode = iterator.next();
if (redisClusterNode.isMaster()) {
masters.add(redisClusterNode);
}
}
Map<RedisClusterNode, Collection<RedisClusterNode>> result = new HashMap<RedisClusterNode, Collection<RedisClusterNode>>();
for (Iterator<RedisClusterNode> iterator = res.iterator(); iterator.hasNext();) {
RedisClusterNode redisClusterNode = iterator.next();
for (RedisClusterNode masterNode : masters) {
if (redisClusterNode.getMasterId() != null
&& redisClusterNode.getMasterId().equals(masterNode.getId())) {
Collection<RedisClusterNode> list = result.get(masterNode);
if (list == null) {
list = new ArrayList<RedisClusterNode>();
result.put(masterNode, list);
}
list.add(redisClusterNode);
}
}
}
return result;
} | @Test
public void testClusterGetMasterSlaveMap() {
Map<RedisClusterNode, Collection<RedisClusterNode>> map = connection.clusterGetMasterSlaveMap();
assertThat(map).hasSize(3);
for (Collection<RedisClusterNode> slaves : map.values()) {
assertThat(slaves).hasSize(1);
}
} |
@Override
public String toString() {
StringBuilder buf = new StringBuilder();
int[] colWidths = colWidths();
if (_headerColumnNames != null) {
append(buf, colWidths, _headerColumnNames);
int totalWidth = 0;
for (int width : colWidths) {
totalWidth += width;
}
buf.append(rightPad("", totalWidth, '='));
buf.append('\n');
}
for (String[] row : _rows) {
append(buf, colWidths, row);
}
return buf.toString();
} | @Test(priority = 1)
public void testToStringForEmptyTextTable() {
// Run the test
final String result = _textTableUnderTest.toString();
// Verify the results
assertEquals("", result);
} |
public ProtocolBuilder optimizer(String optimizer) {
this.optimizer = optimizer;
return getThis();
} | @Test
void optimizer() {
ProtocolBuilder builder = new ProtocolBuilder();
builder.optimizer("optimizer");
Assertions.assertEquals("optimizer", builder.build().getOptimizer());
} |
@Override
public short getTypeCode() {
return MessageType.TYPE_BATCH_RESULT_MSG;
} | @Test
void getTypeCode() {
BatchResultMessage batchResultMessage = new BatchResultMessage();
Assertions.assertEquals(MessageType.TYPE_BATCH_RESULT_MSG, batchResultMessage.getTypeCode());
} |
@Override
public KsMaterializedQueryResult<Row> get(
final GenericKey key,
final int partition,
final Optional<Position> position
) {
try {
final KeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = KeyQuery.withKey(key);
StateQueryRequest<ValueAndTimestamp<GenericRow>>
request = inStore(stateStore.getStateStoreName())
.withQuery(query)
.withPartitions(ImmutableSet.of(partition));
if (position.isPresent()) {
request = request.withPositionBound(PositionBound.at(position.get()));
}
final StateQueryResult<ValueAndTimestamp<GenericRow>>
result = stateStore.getKafkaStreams().query(request);
final QueryResult<ValueAndTimestamp<GenericRow>> queryResult =
result.getPartitionResults().get(partition);
// Some of these failures are retriable, and in the future, we may want to retry
// locally before throwing.
if (queryResult.isFailure()) {
throw failedQueryException(queryResult);
} else if (queryResult.getResult() == null) {
return KsMaterializedQueryResult.rowIteratorWithPosition(
Collections.emptyIterator(), queryResult.getPosition());
} else {
final ValueAndTimestamp<GenericRow> row = queryResult.getResult();
return KsMaterializedQueryResult.rowIteratorWithPosition(
ImmutableList.of(Row.of(stateStore.schema(), key, row.value(), row.timestamp()))
.iterator(),
queryResult.getPosition());
}
} catch (final NotUpToBoundException | MaterializationException e) {
throw e;
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
} | @Test
public void shouldReturnValuesFullTableScan() {
// Given:
when(kafkaStreams.query(any())).thenReturn(getIteratorResult());
// When:
final KsMaterializedQueryResult<Row> result = table.get(PARTITION);
// Then:
Iterator<Row> rowIterator = result.getRowIterator();
assertThat(rowIterator.hasNext(), is(true));
assertThat(rowIterator.next(), is(Row.of(SCHEMA, A_KEY, ROW1, TIME1)));
assertThat(rowIterator.next(), is(Row.of(SCHEMA, A_KEY2, ROW2, TIME2)));
assertThat(rowIterator.hasNext(), is(false));
assertThat(result.getPosition(), not(Optional.empty()));
assertThat(result.getPosition().get(), is(POSITION));
} |
@PublicAPI(usage = ACCESS)
public Set<Dependency> getDirectDependenciesFromSelf() {
return javaClassDependencies.getDirectDependenciesFromClass();
} | @Test
public void direct_dependencies_from_self_by_references() {
JavaClass javaClass = importClasses(AReferencingB.class, BReferencedByA.class).get(AReferencingB.class);
assertReferencesFromAToB(javaClass.getDirectDependenciesFromSelf());
} |
public static LinearModel fit(Formula formula, DataFrame data) {
return fit(formula, data, new Properties());
} | @Test
public void testProstate() {
System.out.println("Prostate");
LinearModel model = OLS.fit(Prostate.formula, Prostate.train);
System.out.println(model);
double[] prediction = model.predict(Prostate.test);
double rmse = RMSE.of(Prostate.testy, prediction);
System.out.println("RMSE on test data = " + rmse);
assertEquals(0.721993, rmse, 1E-4);
} |
@Override
public void setRampDownPercent(long rampDownPercent) {
Validate.isTrue((rampDownPercent >= 0) && (rampDownPercent < 100), "rampDownPercent must be a value between 0 and 99");
this.rampDownPercent = rampDownPercent;
} | @Test(expected = IllegalArgumentException.class)
public void testSetRampDownPercent_exceeds99() {
sampler.setRampDownPercent(100);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.