focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public AsyncHttpResponse handle(HttpRequest request) {
Instant from = Optional.ofNullable(request.getProperty("from"))
.map(Long::valueOf).map(Instant::ofEpochMilli).orElse(Instant.MIN);
Instant to = Optional.ofNullable(request.getProperty("to"))
.map(Long::valueOf).map(Instant::ofEpochMilli).orElse(Instant.MAX);
long maxLines = Optional.ofNullable(request.getProperty("maxLines"))
.map(Long::valueOf).orElse(100_000L);
Optional<String> hostname = Optional.ofNullable(request.getProperty("hostname"));
return new AsyncHttpResponse(200) {
@Override
public long maxPendingBytes() { return MB; }
@Override
public void render(OutputStream output, ContentChannel networkChannel, CompletionHandler handler) {
try (output) {
logReader.writeLogs(output, from, to, maxLines, hostname);
}
catch (Throwable t) {
log.log(Level.WARNING, "Failed reading logs from " + from + " to " + to, t);
}
finally {
networkChannel.close(handler);
}
}
};
} | @Test
void handleCorrectlyParsesQueryParameters() throws IOException {
MockLogReader mockLogReader = new MockLogReader();
LogHandler logHandler = new LogHandler(mock(Executor.class), mockLogReader);
{
String uri = "http://myhost.com:1111/logs?from=1000&to=2000";
AsyncHttpResponse response = logHandler.handle(HttpRequest.createTestRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.GET));
ReadableContentChannel out = new ReadableContentChannel();
new Thread(() -> Exceptions.uncheck(() -> response.render(new ContentChannelOutputStream(out), out, null))).start();
String expectedResponse = "newer log";
assertEquals(expectedResponse, new String(out.toStream().readAllBytes(), UTF_8));
}
{
String uri = "http://myhost.com:1111/logs?from=0&to=1000";
AsyncHttpResponse response = logHandler.handle(HttpRequest.createTestRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.GET));
ReadableContentChannel out = new ReadableContentChannel();
new Thread(() -> Exceptions.uncheck(() -> response.render(new ContentChannelOutputStream(out), out, null))).start();
String expectedResponse = "older log";
assertEquals(expectedResponse, new String(out.toStream().readAllBytes(), UTF_8));
}
} |
public boolean isValid() {
return !Double.isNaN(lat) && !Double.isNaN(lon);
} | @Test
public void testIsValid() {
GHPoint instance = new GHPoint();
assertFalse(instance.isValid());
instance.lat = 1;
assertFalse(instance.isValid());
instance.lon = 1;
assertTrue(instance.isValid());
} |
@Override
public boolean isShutdown() {
return shutdown.get();
} | @Test
public void isShutdown() {
ManagedExecutorService executorService = newManagedExecutorService();
executorService.shutdown();
assertTrue(executorService.isShutdown());
} |
Duration lap() {
Instant now = clock.instant();
Duration duration = Duration.between(lapStartTime, now);
lapStartTime = now;
return duration;
} | @Test
public void testLap() {
Mockito.when(mockClock.instant()).thenReturn(Instant.EPOCH);
Timer parentTimer = new Timer(mockClock, null);
Mockito.when(mockClock.instant()).thenReturn(Instant.EPOCH.plusMillis(5));
Duration parentDuration1 = parentTimer.lap();
Mockito.when(mockClock.instant()).thenReturn(Instant.EPOCH.plusMillis(15));
Duration parentDuration2 = parentTimer.lap();
Mockito.when(mockClock.instant()).thenReturn(Instant.EPOCH.plusMillis(16));
Timer childTimer = new Timer(mockClock, parentTimer);
Mockito.when(mockClock.instant()).thenReturn(Instant.EPOCH.plusMillis(16).plusNanos(1));
Duration childDuration = childTimer.lap();
Mockito.when(mockClock.instant()).thenReturn(Instant.EPOCH.plusMillis(16).plusNanos(2));
Duration parentDuration3 = parentTimer.lap();
Assert.assertTrue(parentDuration2.compareTo(parentDuration1) > 0);
Assert.assertTrue(parentDuration1.compareTo(parentDuration3) > 0);
Assert.assertTrue(parentDuration3.compareTo(childDuration) > 0);
} |
public MethodDescriptor getMethod(String methodName, String params) {
Map<String, MethodDescriptor> methods = descToMethods.get(methodName);
if (CollectionUtils.isNotEmptyMap(methods)) {
return methods.get(params);
}
return null;
} | @Test
void getMethod() {
String desc = ReflectUtils.getDesc(String.class);
Assertions.assertNotNull(service.getMethod("sayHello", desc));
} |
String getProfileViewResponseFromBody(String responseBody) {
String template = (String) DEFAULT_GSON.fromJson(responseBody, Map.class).get("template");
if (StringUtils.isBlank(template)) {
throw new RuntimeException("Template was blank!");
}
return template;
} | @Test
public void shouldUnJSONizeGetProfileViewResponseFromBody() {
String template = new ElasticAgentExtensionConverterV4().getProfileViewResponseFromBody("{\"template\":\"foo\"}");
assertThat(template, is("foo"));
} |
public static PrivilegedOperation squashCGroupOperations
(List<PrivilegedOperation> ops) throws PrivilegedOperationException {
if (ops.size() == 0) {
return null;
}
StringBuilder finalOpArg = new StringBuilder(PrivilegedOperation
.CGROUP_ARG_PREFIX);
boolean noTasks = true;
for (PrivilegedOperation op : ops) {
if (!op.getOperationType()
.equals(PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP)) {
LOG.warn("Unsupported operation type: " + op.getOperationType());
throw new PrivilegedOperationException("Unsupported operation type:"
+ op.getOperationType());
}
List<String> args = op.getArguments();
if (args.size() != 1) {
LOG.warn("Invalid number of args: " + args.size());
throw new PrivilegedOperationException("Invalid number of args: "
+ args.size());
}
String arg = args.get(0);
String tasksFile = StringUtils.substringAfter(arg,
PrivilegedOperation.CGROUP_ARG_PREFIX);
if (tasksFile == null || tasksFile.isEmpty()) {
LOG.warn("Invalid argument: " + arg);
throw new PrivilegedOperationException("Invalid argument: " + arg);
}
if (tasksFile.equals(PrivilegedOperation.CGROUP_ARG_NO_TASKS)) {
//Don't append to finalOpArg
continue;
}
if (noTasks == false) {
//We have already appended at least one tasks file.
finalOpArg.append(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR);
finalOpArg.append(tasksFile);
} else {
finalOpArg.append(tasksFile);
noTasks = false;
}
}
if (noTasks) {
finalOpArg.append(PrivilegedOperation.CGROUP_ARG_NO_TASKS); //there
// were no tasks file to append
}
PrivilegedOperation finalOp = new PrivilegedOperation(
PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP, finalOpArg
.toString());
return finalOp;
} | @Test
public void testSquashCGroupOperationsWithInvalidOperations() {
List<PrivilegedOperation> ops = new ArrayList<>();
//Ensure that disallowed ops are rejected
ops.add(opTasksNone);
ops.add(opDisallowed);
try {
PrivilegedOperationExecutor.squashCGroupOperations(ops);
Assert.fail("Expected squash operation to fail with an exception!");
} catch (PrivilegedOperationException e) {
LOG.info("Caught expected exception : " + e);
}
//Ensure that invalid strings are rejected
ops.clear();
ops.add(opTasksNone);
ops.add(opTasksInvalid);
try {
PrivilegedOperationExecutor.squashCGroupOperations(ops);
Assert.fail("Expected squash operation to fail with an exception!");
} catch (PrivilegedOperationException e) {
LOG.info("Caught expected exception : " + e);
}
} |
public void isEqualTo(@Nullable Object expected) {
standardIsEqualTo(expected);
} | @SuppressWarnings("TruthSelfEquals")
@Test
public void isEqualToSameInstanceBadEqualsImplementation() {
Object o = new ThrowsOnEquals();
assertThat(o).isEqualTo(o);
} |
@Override
public Counter counter(String name) {
return NoopCounter.INSTANCE;
} | @Test
public void accessingACustomCounterRegistersAndReusesTheCounter() {
final MetricRegistry.MetricSupplier<Counter> supplier = () -> counter;
final Counter counter1 = registry.counter("thing", supplier);
final Counter counter2 = registry.counter("thing", supplier);
assertThat(counter1).isExactlyInstanceOf(NoopMetricRegistry.NoopCounter.class);
assertThat(counter2).isExactlyInstanceOf(NoopMetricRegistry.NoopCounter.class);
assertThat(counter1).isSameAs(counter2);
verify(listener, never()).onCounterAdded("thing", counter1);
} |
public void selectPhyOffset(final List<Long> phyOffsets, final String key, final int maxNum,
final long begin, final long end) {
if (this.mappedFile.hold()) {
int keyHash = indexKeyHashMethod(key);
int slotPos = keyHash % this.hashSlotNum;
int absSlotPos = IndexHeader.INDEX_HEADER_SIZE + slotPos * hashSlotSize;
try {
int slotValue = this.mappedByteBuffer.getInt(absSlotPos);
if (slotValue <= invalidIndex || slotValue > this.indexHeader.getIndexCount()
|| this.indexHeader.getIndexCount() <= 1) {
} else {
for (int nextIndexToRead = slotValue; ; ) {
if (phyOffsets.size() >= maxNum) {
break;
}
int absIndexPos =
IndexHeader.INDEX_HEADER_SIZE + this.hashSlotNum * hashSlotSize
+ nextIndexToRead * indexSize;
int keyHashRead = this.mappedByteBuffer.getInt(absIndexPos);
long phyOffsetRead = this.mappedByteBuffer.getLong(absIndexPos + 4);
long timeDiff = this.mappedByteBuffer.getInt(absIndexPos + 4 + 8);
int prevIndexRead = this.mappedByteBuffer.getInt(absIndexPos + 4 + 8 + 4);
if (timeDiff < 0) {
break;
}
timeDiff *= 1000L;
long timeRead = this.indexHeader.getBeginTimestamp() + timeDiff;
boolean timeMatched = timeRead >= begin && timeRead <= end;
if (keyHash == keyHashRead && timeMatched) {
phyOffsets.add(phyOffsetRead);
}
if (prevIndexRead <= invalidIndex
|| prevIndexRead > this.indexHeader.getIndexCount()
|| prevIndexRead == nextIndexToRead || timeRead < begin) {
break;
}
nextIndexToRead = prevIndexRead;
}
}
} catch (Exception e) {
log.error("selectPhyOffset exception ", e);
} finally {
this.mappedFile.release();
}
}
} | @Test
public void testSelectPhyOffset() throws Exception {
IndexFile indexFile = new IndexFile("200", HASH_SLOT_NUM, INDEX_NUM, 0, 0);
for (long i = 0; i < (INDEX_NUM - 1); i++) {
boolean putResult = indexFile.putKey(Long.toString(i), i, System.currentTimeMillis());
assertThat(putResult).isTrue();
}
// put over index file capacity.
boolean putResult = indexFile.putKey(Long.toString(400), 400, System.currentTimeMillis());
assertThat(putResult).isFalse();
final List<Long> phyOffsets = new ArrayList<>();
indexFile.selectPhyOffset(phyOffsets, "60", 10, 0, Long.MAX_VALUE);
assertThat(phyOffsets).isNotEmpty();
assertThat(phyOffsets.size()).isEqualTo(1);
indexFile.destroy(0);
File file = new File("200");
UtilAll.deleteFile(file);
} |
@Override
public Collection<Member> getMemberList() {
return client.getClientClusterService().getMemberList();
} | @Test
public void testGetMemberList() {
Collection<Member> memberList = context.getMemberList();
assertNotNull(memberList);
assertEquals(1, memberList.size());
} |
@Override
public long timestamp() {
throw new UnsupportedOperationException("StateStores can't access timestamp.");
} | @Test
public void shouldThrowOnTimestamp() {
assertThrows(UnsupportedOperationException.class, () -> context.timestamp());
} |
public static SqlPrimitiveType of(final String typeName) {
switch (typeName.toUpperCase()) {
case INT:
return SqlPrimitiveType.of(SqlBaseType.INTEGER);
case VARCHAR:
return SqlPrimitiveType.of(SqlBaseType.STRING);
default:
try {
final SqlBaseType sqlType = SqlBaseType.valueOf(typeName.toUpperCase());
return SqlPrimitiveType.of(sqlType);
} catch (final IllegalArgumentException e) {
throw new SchemaException("Unknown primitive type: " + typeName, e);
}
}
} | @Test
public void shouldSupportAlternativeSqlPrimitiveTypeNames() {
// Given:
final java.util.Map<String, SqlBaseType> primitives = ImmutableMap.of(
"InT", SqlBaseType.INTEGER,
"VarchaR", SqlBaseType.STRING
);
primitives.forEach((string, expected) ->
// Then:
assertThat(SqlPrimitiveType.of(string).baseType(), is(expected))
);
} |
public static String getPID() {
String pid = System.getProperty("pid");
if (pid == null) {
// first, reliable with sun jdk (http://golesny.de/wiki/code:javahowtogetpid)
final RuntimeMXBean rtb = ManagementFactory.getRuntimeMXBean();
final String processName = rtb.getName();
/* tested on: */
/* - windows xp sp 2, java 1.5.0_13 */
/* - mac os x 10.4.10, java 1.5.0 */
/* - debian linux, java 1.5.0_13 */
/* all return pid@host, e.g 2204@antonius */
if (processName.indexOf('@') != -1) {
pid = processName.substring(0, processName.indexOf('@'));
} else {
pid = getPIDFromOS();
}
System.setProperty("pid", pid);
}
return pid;
} | @Test
public void testGetPID() {
assertNotNull("getPID", PID.getPID());
} |
public static void main(String[] args) throws Exception {
Arguments arguments = new Arguments();
CommandLine commander = new CommandLine(arguments);
try {
commander.parseArgs(args);
if (arguments.help) {
commander.usage(commander.getOut());
return;
}
if (arguments.generateDocs) {
CmdGenerateDocs cmd = new CmdGenerateDocs("pulsar");
cmd.addCommand("delete-cluster-metadata", commander);
cmd.run(null);
return;
}
} catch (Exception e) {
commander.getErr().println(e);
throw e;
}
@Cleanup
MetadataStoreExtended metadataStore = MetadataStoreExtended.create(arguments.zookeeper,
MetadataStoreConfig.builder()
.sessionTimeoutMillis(arguments.zkSessionTimeoutMillis)
.metadataStoreName(MetadataStoreConfig.METADATA_STORE)
.build());
if (arguments.bkMetadataServiceUri != null) {
@Cleanup
BookKeeper bookKeeper =
new BookKeeper(new ClientConfiguration().setMetadataServiceUri(arguments.bkMetadataServiceUri));
@Cleanup("shutdown")
ManagedLedgerFactory managedLedgerFactory = new ManagedLedgerFactoryImpl(metadataStore, bookKeeper);
deleteManagedLedgers(metadataStore, managedLedgerFactory);
deleteSchemaLedgers(metadataStore, bookKeeper);
}
for (String localZkNode : localZkNodes) {
deleteRecursively(metadataStore, "/" + localZkNode).join();
}
if (arguments.configurationStore != null && arguments.cluster != null) {
// Should it be done by REST API before broker is down?
@Cleanup
MetadataStore configMetadataStore = MetadataStoreFactory.create(arguments.configurationStore,
MetadataStoreConfig.builder().sessionTimeoutMillis(arguments.zkSessionTimeoutMillis)
.metadataStoreName(MetadataStoreConfig.CONFIGURATION_METADATA_STORE).build());
deleteRecursively(configMetadataStore, "/admin/clusters/" + arguments.cluster).join();
}
log.info("Cluster metadata for '{}' teardown.", arguments.cluster);
} | @Test
public void testMainGenerateDocs() throws Exception {
PrintStream oldStream = System.out;
try {
ByteArrayOutputStream baoStream = new ByteArrayOutputStream();
System.setOut(new PrintStream(baoStream));
Class argumentsClass =
Class.forName("org.apache.pulsar.PulsarClusterMetadataTeardown$Arguments");
PulsarClusterMetadataTeardown.main(new String[]{"-zk", "zk", "-g"});
String message = baoStream.toString();
Field[] fields = argumentsClass.getDeclaredFields();
for (Field field : fields) {
boolean fieldHasAnno = field.isAnnotationPresent(Option.class);
if (fieldHasAnno) {
Option fieldAnno = field.getAnnotation(Option.class);
String[] names = fieldAnno.names();
if (names.length == 0) {
continue;
}
String nameStr = Arrays.asList(names).toString();
nameStr = nameStr.substring(1, nameStr.length() - 1);
assertTrue(message.indexOf(nameStr) > 0);
}
}
} finally {
System.setOut(oldStream);
}
} |
public void fillMaxSpeed(Graph graph, EncodingManager em) {
// In DefaultMaxSpeedParser and in OSMMaxSpeedParser we don't have the rural/urban info,
// but now we have and can fill the country-dependent max_speed value where missing.
EnumEncodedValue<UrbanDensity> udEnc = em.getEnumEncodedValue(UrbanDensity.KEY, UrbanDensity.class);
fillMaxSpeed(graph, em, edge -> edge.get(udEnc) != UrbanDensity.RURAL);
} | @Test
public void testLivingStreetWithWalk() {
ReaderWay way = new ReaderWay(0L);
way.setTag("country", Country.AUT);
way.setTag("highway", "living_street");
EdgeIteratorState edge = createEdge(way).set(urbanDensity, CITY);
calc.fillMaxSpeed(graph, em);
assertEquals(6, edge.get(maxSpeedEnc), 1);
} |
public List<MemorySegment> requestBuffers() throws Exception {
List<MemorySegment> allocated = new ArrayList<>(numBuffersPerRequest);
synchronized (buffers) {
checkState(!destroyed, "Buffer pool is already destroyed.");
if (!initialized) {
initialize();
}
Deadline deadline = Deadline.fromNow(WAITING_TIME);
while (buffers.size() < numBuffersPerRequest) {
checkState(!destroyed, "Buffer pool is already destroyed.");
buffers.wait(WAITING_TIME.toMillis());
if (!deadline.hasTimeLeft()) {
return allocated; // return the empty list
}
}
while (allocated.size() < numBuffersPerRequest) {
allocated.add(buffers.poll());
}
lastBufferOperationTimestamp = System.currentTimeMillis();
}
return allocated;
} | @Test
void testRequestBuffers() throws Exception {
BatchShuffleReadBufferPool bufferPool = createBufferPool();
List<MemorySegment> buffers = new ArrayList<>();
try {
buffers.addAll(bufferPool.requestBuffers());
assertThat(buffers).hasSize(bufferPool.getNumBuffersPerRequest());
} finally {
bufferPool.recycle(buffers);
bufferPool.destroy();
}
} |
@SuppressWarnings("unchecked")
public <T extends Metric> T register(String name, T metric) throws IllegalArgumentException {
return register(MetricName.build(name), metric);
} | @Test
public void registeringATimerTriggersANotification() throws Exception {
assertThat(registry.register(THING, timer))
.isEqualTo(timer);
verify(listener).onTimerAdded(THING, timer);
} |
@Override
public void validate(CruiseConfig cruiseConfig) {
ServerConfig serverConfig = cruiseConfig.server();
String artifactDir = serverConfig.artifactsDir();
if (isEmpty(artifactDir)) {
throw new RuntimeException("Please provide a not empty value for artifactsdir");
}
if (StringUtils.equals(".", artifactDir) || new File("").getAbsolutePath().equals(
new File(artifactDir).getAbsolutePath())) {
throw new RuntimeException("artifactsdir should not point to the root of sand box [" +
new File(artifactDir).getAbsolutePath()
+ "]");
}
} | @Test
public void shouldNotThrowExceptionWhenUserProvidesValidPath() throws Exception {
File file = new File("");
CruiseConfig cruiseConfig = new BasicCruiseConfig();
cruiseConfig.setServerConfig(new ServerConfig(file.getAbsolutePath() + "/logs", null));
ArtifactDirValidator dirValidator = new ArtifactDirValidator();
dirValidator.validate(cruiseConfig);
} |
@Override
public String toString() {
return pathService.toString(this);
} | @Test
public void testPathParsing_windowsStylePaths() throws IOException {
PathService windowsPathService = PathServiceTest.fakeWindowsPathService();
assertEquals("C:\\", pathService.parsePath("C:\\").toString());
assertEquals("C:\\foo", windowsPathService.parsePath("C:\\foo").toString());
assertEquals("C:\\foo", windowsPathService.parsePath("C:\\", "foo").toString());
assertEquals("C:\\foo", windowsPathService.parsePath("C:", "\\foo").toString());
assertEquals("C:\\foo", windowsPathService.parsePath("C:", "foo").toString());
assertEquals("C:\\foo\\bar", windowsPathService.parsePath("C:", "foo/bar").toString());
} |
@Override
public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) {
return payload.readStringFixByBytes(readLengthFromMeta(columnDef.getColumnMeta(), payload));
} | @Test
void assertReadWithMeta2() {
columnDef.setColumnMeta(2);
when(payload.getByteBuf()).thenReturn(byteBuf);
when(byteBuf.readUnsignedShortLE()).thenReturn(0xffff);
when(payload.readStringFixByBytes(0xffff)).thenReturn(new byte[65535]);
assertThat(new MySQLBlobBinlogProtocolValue().read(columnDef, payload), is(new byte[65535]));
} |
List<String> getStatements(int oldVersion, int newVersion) {
List<String> result = new ArrayList<>();
for (int i = oldVersion + 1; i <= newVersion; ++i) {
List<String> list = statementsMap.get(i);
if (list != null) {
result.addAll(list);
}
}
return result;
} | @Test
public void testGetStatements() {
MSQLiteBuilder builder = new MSQLiteBuilder()
.version(1)
.statement("11")
.statement("12")
.version(3)
.statement("31")
.statement("32");
List<String> list = new ArrayList<>();
list.add("31");
list.add("32");
assertEquals(list, builder.getStatements(1, 3));
} |
public static String wrap(String input, Formatter formatter) throws FormatterException {
return StringWrapper.wrap(Formatter.MAX_LINE_LENGTH, input, formatter);
} | @Test
public void textBlockTrailingWhitespace() throws Exception {
assumeTrue(Runtime.version().feature() >= 15);
String input =
lines(
"public class T {",
" String s =",
" \"\"\"",
" lorem ",
" ipsum",
" \"\"\";",
"}");
String expected =
lines(
"public class T {",
" String s =",
" \"\"\"",
" lorem",
" ipsum",
" \"\"\";",
"}");
String actual = StringWrapper.wrap(100, input, new Formatter());
assertThat(actual).isEqualTo(expected);
} |
@Config
public static PrintStream fallbackLogger()
{
final String fallbackLoggerName = getProperty(FALLBACK_LOGGER_PROP_NAME, "stderr");
switch (fallbackLoggerName)
{
case "stdout":
return System.out;
case "no_op":
return NO_OP_LOGGER;
case "stderr":
default:
return System.err;
}
} | @Test
void fallbackLoggerReturnsNoOpLoggerIfConfigured()
{
System.setProperty(FALLBACK_LOGGER_PROP_NAME, "no_op");
try
{
final PrintStream logger = CommonContext.fallbackLogger();
assertNotNull(logger);
assertNotSame(System.err, logger);
assertNotSame(System.out, logger);
assertSame(logger, CommonContext.fallbackLogger());
}
finally
{
System.clearProperty(FALLBACK_LOGGER_PROP_NAME);
}
} |
public static boolean isCoastedRadarHit(CenterRadarHit centerRh) {
String cmsFieldValue = centerRh.cmsField153A();
return nonNull(cmsFieldValue)
? CENTER_COASTED_FLAGS.contains(cmsFieldValue)
: false;
} | @Test
public void testIsCoastedRadarHit() {
CenterRadarHit notCoasted = (CenterRadarHit) parse(NON_COASTED_RH);
CenterRadarHit coasted = (CenterRadarHit) parse(COASTED_RH);
assertFalse(CenterSmoothing.isCoastedRadarHit(notCoasted));
assertTrue(CenterSmoothing.isCoastedRadarHit(coasted));
} |
@Override
public Map<String, String> convertToEntityAttribute(String dbData) {
return GSON.fromJson(dbData, TYPE);
} | @Test
void convertToEntityAttribute_null_twoElement() throws IOException {
Map<String, String> map = new HashMap<>(8);
map.put("a", "1");
map.put("disableCheck", "true");
String content = readAllContentOf("json/converter/element.2.json");
assertEquals(map, this.converter.convertToEntityAttribute(content));
} |
public OkHttpClientBuilder setResponseTimeoutMs(long l) {
if (l < 0) {
throw new IllegalArgumentException("Response timeout must be positive. Got " + l);
}
this.responseTimeoutMs = l;
return this;
} | @Test
public void build_throws_IAE_if_response_timeout_is_negative() {
assertThatThrownBy(() -> underTest.setResponseTimeoutMs(-10))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Response timeout must be positive. Got -10");
} |
public void fillMaxSpeed(Graph graph, EncodingManager em) {
// In DefaultMaxSpeedParser and in OSMMaxSpeedParser we don't have the rural/urban info,
// but now we have and can fill the country-dependent max_speed value where missing.
EnumEncodedValue<UrbanDensity> udEnc = em.getEnumEncodedValue(UrbanDensity.KEY, UrbanDensity.class);
fillMaxSpeed(graph, em, edge -> edge.get(udEnc) != UrbanDensity.RURAL);
} | @Test
public void testLivingStreetWithMaxSpeed() {
ReaderWay way = new ReaderWay(0L);
way.setTag("country", Country.DEU);
way.setTag("highway", "living_street");
way.setTag("maxspeed", "30");
EdgeIteratorState edge = createEdge(way).set(urbanDensity, CITY);
calc.fillMaxSpeed(graph, em);
assertEquals(30, edge.get(maxSpeedEnc), 1);
assertEquals(30, edge.getReverse(maxSpeedEnc), 1);
} |
@Override
public void write(int b) throws IOException {
if (pos >= writeBuffer.length) {
flush();
}
writeBuffer[pos++] = (byte) b;
} | @Test
void testWriteLargerThanBufferSize() throws IOException {
final Path workingDir = new Path(TempDirUtils.newFolder(temporaryFolder).getAbsolutePath());
final Path file = new Path(workingDir, "test-file");
TestingFsBatchFlushOutputStream outputStream =
new TestingFsBatchFlushOutputStream(
file.getFileSystem(), file, FileSystem.WriteMode.NO_OVERWRITE, BUFFER_SIZE);
assertThat(outputStream.flushCount).isZero();
outputStream.write(new byte[BUFFER_SIZE + 1]);
assertThat(outputStream.flushCount).isEqualTo(1);
} |
@Override
void decode(ByteBufAllocator alloc, ByteBuf headerBlock, SpdyHeadersFrame frame) throws Exception {
ObjectUtil.checkNotNull(headerBlock, "headerBlock");
ObjectUtil.checkNotNull(frame, "frame");
if (cumulation == null) {
decodeHeaderBlock(headerBlock, frame);
if (headerBlock.isReadable()) {
cumulation = alloc.buffer(headerBlock.readableBytes());
cumulation.writeBytes(headerBlock);
}
} else {
cumulation.writeBytes(headerBlock);
decodeHeaderBlock(cumulation, frame);
if (cumulation.isReadable()) {
cumulation.discardReadBytes();
} else {
releaseBuffer();
}
}
} | @Test
public void testIllegalValueStartsWithNull() throws Exception {
ByteBuf headerBlock = Unpooled.buffer(22);
headerBlock.writeInt(1);
headerBlock.writeInt(4);
headerBlock.writeBytes(nameBytes);
headerBlock.writeInt(6);
headerBlock.writeByte(0);
headerBlock.writeBytes(valueBytes);
decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame);
assertFalse(headerBlock.isReadable());
assertTrue(frame.isInvalid());
assertEquals(0, frame.headers().names().size());
headerBlock.release();
} |
@Override
public SortedSet<IndexRange> findAll() {
try (DBCursor<MongoIndexRange> cursor = collection.find(DBQuery.notExists("start"))) {
return ImmutableSortedSet.copyOf(IndexRange.COMPARATOR, (Iterator<? extends IndexRange>) cursor);
}
} | @Test
@MongoDBFixtures("MongoIndexRangeServiceTest-LegacyIndexRanges.json")
public void findAllReturnsAllIgnoresLegacyIndexRanges() throws Exception {
assertThat(indexRangeService.findAll()).hasSize(1);
} |
@Override
protected String buildUndoSQL() {
TableRecords beforeImage = sqlUndoLog.getBeforeImage();
List<Row> beforeImageRows = beforeImage.getRows();
if (CollectionUtils.isEmpty(beforeImageRows)) {
throw new ShouldNeverHappenException("Invalid UNDO LOG"); // TODO
}
Row row = beforeImageRows.get(0);
List<Field> nonPkFields = row.nonPrimaryKeys();
// update sql undo log before image all field come from table meta. need add escape.
// see BaseTransactionalExecutor#buildTableRecords
String updateColumns = nonPkFields.stream().map(
field -> ColumnUtils.addEscape(field.getName(), JdbcConstants.ORACLE) + " = ?").collect(
Collectors.joining(", "));
List<String> pkNameList = getOrderedPkList(beforeImage, row, JdbcConstants.ORACLE).stream().map(
e -> e.getName()).collect(Collectors.toList());
String whereSql = SqlGenerateUtils.buildWhereConditionByPKs(pkNameList, JdbcConstants.ORACLE);
return String.format(UPDATE_SQL_TEMPLATE, sqlUndoLog.getTableName(), updateColumns, whereSql);
} | @Test
public void buildUndoSQLByUpperCase() {
OracleUndoUpdateExecutor executor = upperCaseSQL();
String sql = executor.buildUndoSQL();
Assertions.assertNotNull(sql);
Assertions.assertTrue(sql.contains("UPDATE"));
Assertions.assertTrue(sql.contains("ID"));
Assertions.assertTrue(sql.contains("AGE"));
Assertions.assertTrue(sql.contains("TABLE_NAME"));
} |
@Nonnull
public ResourceProfile merge(final ResourceProfile other) {
checkNotNull(other, "Cannot merge with null resources");
if (equals(ANY) || other.equals(ANY)) {
return ANY;
}
if (this.equals(UNKNOWN) || other.equals(UNKNOWN)) {
return UNKNOWN;
}
Map<String, ExternalResource> resultExtendedResource = new HashMap<>(extendedResources);
other.extendedResources.forEach(
(String name, ExternalResource resource) -> {
resultExtendedResource.compute(
name,
(ignored, oldResource) ->
oldResource == null ? resource : oldResource.merge(resource));
});
return new ResourceProfile(
cpuCores.merge(other.cpuCores),
taskHeapMemory.add(other.taskHeapMemory),
taskOffHeapMemory.add(other.taskOffHeapMemory),
managedMemory.add(other.managedMemory),
networkMemory.add(other.networkMemory),
resultExtendedResource);
} | @Test
void testMerge() {
final ResourceProfile rp1 =
ResourceProfile.newBuilder()
.setCpuCores(1.0)
.setTaskHeapMemoryMB(100)
.setTaskOffHeapMemoryMB(100)
.setManagedMemoryMB(100)
.setNetworkMemoryMB(100)
.build();
final ResourceProfile rp2 =
ResourceProfile.newBuilder()
.setCpuCores(2.0)
.setTaskHeapMemoryMB(200)
.setTaskOffHeapMemoryMB(200)
.setManagedMemoryMB(200)
.setNetworkMemoryMB(200)
.setExtendedResource(new ExternalResource(EXTERNAL_RESOURCE_NAME, 2.0))
.build();
final ResourceProfile rp1MergeRp1 =
ResourceProfile.newBuilder()
.setCpuCores(2.0)
.setTaskHeapMemoryMB(200)
.setTaskOffHeapMemoryMB(200)
.setManagedMemoryMB(200)
.setNetworkMemoryMB(200)
.build();
final ResourceProfile rp1MergeRp2 =
ResourceProfile.newBuilder()
.setCpuCores(3.0)
.setTaskHeapMemoryMB(300)
.setTaskOffHeapMemoryMB(300)
.setManagedMemoryMB(300)
.setNetworkMemoryMB(300)
.setExtendedResource(new ExternalResource(EXTERNAL_RESOURCE_NAME, 2.0))
.build();
final ResourceProfile rp2MergeRp2 =
ResourceProfile.newBuilder()
.setCpuCores(4.0)
.setTaskHeapMemoryMB(400)
.setTaskOffHeapMemoryMB(400)
.setManagedMemoryMB(400)
.setNetworkMemoryMB(400)
.setExtendedResource(new ExternalResource(EXTERNAL_RESOURCE_NAME, 4.0))
.build();
assertThat(rp1.merge(rp1)).isEqualTo(rp1MergeRp1);
assertThat(rp1.merge(rp2)).isEqualTo(rp1MergeRp2);
assertThat(rp2.merge(rp1)).isEqualTo(rp1MergeRp2);
assertThat(rp2.merge(rp2)).isEqualTo(rp2MergeRp2);
assertThat(rp1.merge(ResourceProfile.UNKNOWN)).isEqualTo(ResourceProfile.UNKNOWN);
assertThat(ResourceProfile.UNKNOWN.merge(rp1)).isEqualTo(ResourceProfile.UNKNOWN);
assertThat(ResourceProfile.UNKNOWN.merge(ResourceProfile.UNKNOWN))
.isEqualTo(ResourceProfile.UNKNOWN);
assertThat(rp1.merge(ResourceProfile.ANY)).isEqualTo(ResourceProfile.ANY);
assertThat(ResourceProfile.ANY.merge(rp1)).isEqualTo(ResourceProfile.ANY);
assertThat(ResourceProfile.ANY.merge(ResourceProfile.ANY)).isEqualTo(ResourceProfile.ANY);
} |
public TolerantDoubleComparison isWithin(double tolerance) {
return new TolerantDoubleComparison() {
@Override
public void of(double expected) {
Double actual = DoubleSubject.this.actual;
checkNotNull(
actual, "actual value cannot be null. tolerance=%s expected=%s", tolerance, expected);
checkTolerance(tolerance);
if (!equalWithinTolerance(actual, expected, tolerance)) {
failWithoutActual(
fact("expected", doubleToString(expected)),
butWas(),
fact("outside tolerance", doubleToString(tolerance)));
}
}
};
} | @Test
public void isWithinOfZero() {
assertThat(+0.0).isWithin(0.00001).of(+0.0);
assertThat(+0.0).isWithin(0.00001).of(-0.0);
assertThat(-0.0).isWithin(0.00001).of(+0.0);
assertThat(-0.0).isWithin(0.00001).of(-0.0);
assertThat(+0.0).isWithin(0.0).of(+0.0);
assertThat(+0.0).isWithin(0.0).of(-0.0);
assertThat(-0.0).isWithin(0.0).of(+0.0);
assertThat(-0.0).isWithin(0.0).of(-0.0);
} |
public void add(Boolean bool) {
elements.add(bool == null ? JsonNull.INSTANCE : new JsonPrimitive(bool));
} | @Test
public void testStringPrimitiveAddition() {
JsonArray jsonArray = new JsonArray();
jsonArray.add("Hello");
jsonArray.add("Goodbye");
jsonArray.add("Thank you");
jsonArray.add((String) null);
jsonArray.add("Yes");
assertThat(jsonArray.toString())
.isEqualTo("[\"Hello\",\"Goodbye\",\"Thank you\",null,\"Yes\"]");
} |
@Override
public void open() throws Exception {
this.timerService =
getInternalTimerService("processing timer", VoidNamespaceSerializer.INSTANCE, this);
this.keySet = new HashSet<>();
super.open();
} | @Test
void testEndInput() throws Exception {
AtomicInteger firstInputCounter = new AtomicInteger();
AtomicInteger secondInputCounter = new AtomicInteger();
KeyedTwoInputNonBroadcastProcessOperator<Long, Integer, Long, Long> processOperator =
new KeyedTwoInputNonBroadcastProcessOperator<>(
new TwoInputNonBroadcastStreamProcessFunction<Integer, Long, Long>() {
@Override
public void processRecordFromFirstInput(
Integer record,
Collector<Long> output,
PartitionedContext ctx) {
// do nothing.
}
@Override
public void processRecordFromSecondInput(
Long record, Collector<Long> output, PartitionedContext ctx) {
// do nothing.
}
@Override
public void endFirstInput(NonPartitionedContext<Long> ctx) {
try {
ctx.applyToAllPartitions(
(out, context) -> {
firstInputCounter.incrementAndGet();
Long currentKey =
context.getStateManager().getCurrentKey();
out.collect(currentKey);
});
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public void endSecondInput(NonPartitionedContext<Long> ctx) {
try {
ctx.applyToAllPartitions(
(out, context) -> {
secondInputCounter.incrementAndGet();
Long currentKey =
context.getStateManager().getCurrentKey();
out.collect(currentKey);
});
} catch (Exception e) {
throw new RuntimeException(e);
}
}
});
try (KeyedTwoInputStreamOperatorTestHarness<Long, Integer, Long, Long> testHarness =
new KeyedTwoInputStreamOperatorTestHarness<>(
processOperator,
(KeySelector<Integer, Long>) Long::valueOf,
(KeySelector<Long, Long>) value -> value,
Types.LONG)) {
testHarness.open();
testHarness.processElement1(new StreamRecord<>(1)); // key is 1L
testHarness.processElement2(new StreamRecord<>(2L)); // key is 2L
testHarness.endInput1();
assertThat(firstInputCounter).hasValue(2);
Collection<StreamRecord<Long>> recordOutput = testHarness.getRecordOutput();
assertThat(recordOutput)
.containsExactly(new StreamRecord<>(1L), new StreamRecord<>(2L));
testHarness.processElement2(new StreamRecord<>(3L)); // key is 3L
testHarness.getOutput().clear();
testHarness.endInput2();
assertThat(secondInputCounter).hasValue(3);
recordOutput = testHarness.getRecordOutput();
assertThat(recordOutput)
.containsExactly(
new StreamRecord<>(1L), new StreamRecord<>(2L), new StreamRecord<>(3L));
}
} |
public static long readUint32BE(ByteBuffer buf) throws BufferUnderflowException {
return Integer.toUnsignedLong(buf.order(ByteOrder.BIG_ENDIAN).getInt());
} | @Test(expected = ArrayIndexOutOfBoundsException.class)
public void testReadUint32BEThrowsException2() {
ByteUtils.readUint32BE(new byte[]{1, 2, 3, 4, 5}, 2);
} |
@Override
public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException {
if(containerService.isContainer(folder)) {
return super.mkdir(folder, status);
}
else {
status.setChecksum(writer.checksum(folder, status).compute(new NullInputStream(0L), status));
return super.mkdir(folder, status);
}
} | @Test
public void testCreatePlaceholder() throws Exception {
final String bucketname = new AlphanumericRandomStringService().random();
final String name = new AlphanumericRandomStringService().random();
final Path container = new SpectraDirectoryFeature(session, new SpectraWriteFeature(session)).mkdir(
new Path(bucketname, EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path test = new SpectraDirectoryFeature(session, new SpectraWriteFeature(session)).mkdir(
new Path(container, name, EnumSet.of(Path.Type.directory)), new TransferStatus());
assertTrue(new SpectraFindFeature(session).find(test));
assertTrue(new DefaultFindFeature(session).find(test));
new SpectraDeleteFeature(session).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
public void parse(InputStream stream, ContentHandler handler, Metadata metadata,
ParseContext context) throws IOException, SAXException, TikaException {
// Get the mime4j configuration, or use a default one
MimeConfig config =
new MimeConfig.Builder().setMaxLineLen(100000).setMaxHeaderLen(100000).build();
config = context.get(MimeConfig.class, config);
Detector localDetector = context.get(Detector.class);
if (localDetector == null) {
//lazily load this if necessary
if (detector == null) {
EmbeddedDocumentUtil embeddedDocumentUtil = new EmbeddedDocumentUtil(context);
detector = embeddedDocumentUtil.getDetector();
}
localDetector = detector;
}
MimeStreamParser parser =
new MimeStreamParser(config, null, new DefaultBodyDescriptorBuilder());
XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata);
MailContentHandler mch = new MailContentHandler(xhtml, localDetector, metadata, context,
config.isStrictParsing(), extractAllAlternatives);
parser.setContentHandler(mch);
parser.setContentDecoding(true);
parser.setNoRecurse();
xhtml.startDocument();
TikaInputStream tstream = TikaInputStream.get(stream);
try {
parser.parse(tstream);
} catch (IOException e) {
tstream.throwIfCauseOf(e);
throw new TikaException("Failed to parse an email message", e);
} catch (MimeException e) {
// Unwrap the exception in case it was not thrown by mime4j
Throwable cause = e.getCause();
if (cause instanceof TikaException) {
throw (TikaException) cause;
} else if (cause instanceof SAXException) {
throw (SAXException) cause;
} else {
throw new TikaException("Failed to parse an email message", e);
}
}
xhtml.endDocument();
} | @Test
public void testSimple() throws Exception {
Metadata metadata = new Metadata();
InputStream stream = getStream("test-documents/testRFC822");
ContentHandler handler = mock(DefaultHandler.class);
try {
EXTRACT_ALL_ALTERNATIVES_PARSER.parse(stream, handler, metadata, new ParseContext());
verify(handler).startDocument();
//just one body
verify(handler).startElement(eq(XHTMLContentHandler.XHTML), eq("p"), eq("p"),
any(Attributes.class));
verify(handler).endElement(XHTMLContentHandler.XHTML, "p", "p");
//no multi-part body parts
verify(handler, never())
.startElement(eq(XHTMLContentHandler.XHTML), eq("div"), eq("div"),
any(Attributes.class));
verify(handler, never()).endElement(XHTMLContentHandler.XHTML, "div", "div");
verify(handler).endDocument();
//note no leading spaces, and no quotes
assertEquals("Julien Nioche (JIRA) <jira@apache.org>",
metadata.get(TikaCoreProperties.CREATOR));
assertEquals("[jira] Commented: (TIKA-461) RFC822 messages not parsed",
metadata.get(TikaCoreProperties.TITLE));
assertEquals("[jira] Commented: (TIKA-461) RFC822 messages not parsed",
metadata.get(TikaCoreProperties.SUBJECT));
} catch (Exception e) {
fail("Exception thrown: " + e.getMessage());
}
} |
@VisibleForTesting
static String getDataKeyName(UUID jobId, String key) {
return String.format("%s-%s", jobId, key);
} | @Test
public void getDataKeyName() throws Exception {
assertEquals(
JOB_ID + "-tempTaskData",
GoogleJobStore.getDataKeyName(JOB_ID, "tempTaskData"));
assertEquals(
JOB_ID + "-tempCalendarData",
GoogleJobStore.getDataKeyName(JOB_ID, "tempCalendarData"));
} |
@Override
public Object getDateValue(final ResultSet resultSet, final int columnIndex) throws SQLException {
if (isYearDataType(resultSet.getMetaData().getColumnTypeName(columnIndex))) {
return resultSet.wasNull() ? null : resultSet.getObject(columnIndex);
}
return resultSet.getDate(columnIndex);
} | @Test
void assertGetDateValueWithYearDataTypeAndNotNullValue() throws SQLException {
when(resultSet.getMetaData().getColumnTypeName(1)).thenReturn("YEAR");
Object expectedObject = new Object();
when(resultSet.getObject(1)).thenReturn(expectedObject);
assertThat(dialectResultSetMapper.getDateValue(resultSet, 1), is(expectedObject));
} |
public static String toJson(UpdateRequirement updateRequirement) {
return toJson(updateRequirement, false);
} | @Test
public void testAssertCurrentSchemaIdToJson() {
String requirementType = UpdateRequirementParser.ASSERT_CURRENT_SCHEMA_ID;
int schemaId = 4;
String expected =
String.format("{\"type\":\"%s\",\"current-schema-id\":%d}", requirementType, schemaId);
UpdateRequirement actual = new UpdateRequirement.AssertCurrentSchemaID(schemaId);
assertThat(UpdateRequirementParser.toJson(actual))
.as("AssertCurrentSchemaId should convert to the correct JSON value")
.isEqualTo(expected);
} |
@Override
public int launch(AgentLaunchDescriptor descriptor) {
LogConfigurator logConfigurator = new LogConfigurator("agent-launcher-logback.xml");
return logConfigurator.runWithLogger(() -> doLaunch(descriptor));
} | @Test
public void shouldNotThrowException_insteadReturnAppropriateErrorCode_whenSomethingGoesWrongInLaunch() {
AgentLaunchDescriptor launchDesc = mock(AgentLaunchDescriptor.class);
when(launchDesc.context().get(AgentBootstrapperArgs.SERVER_URL)).thenThrow(new RuntimeException("Ouch!"));
assertThat("should not have blown up, because it directly interfaces with bootstrapper",
new AgentLauncherImpl().launch(launchDesc), is(-273));
} |
public static String executeDockerCommand(DockerCommand dockerCommand,
String containerId, Map<String, String> env,
PrivilegedOperationExecutor privilegedOperationExecutor,
boolean disableFailureLogging, Context nmContext)
throws ContainerExecutionException {
PrivilegedOperation dockerOp = dockerCommand.preparePrivilegedOperation(
dockerCommand, containerId, env, nmContext);
if (disableFailureLogging) {
dockerOp.disableFailureLogging();
}
LOG.debug("Running docker command: {}", dockerCommand);
try {
String result = privilegedOperationExecutor
.executePrivilegedOperation(null, dockerOp, null,
env, true, false);
if (result != null && !result.isEmpty()) {
result = result.trim();
}
return result;
} catch (PrivilegedOperationException e) {
throw new ContainerExecutionException("Docker operation failed",
e.getExitCode(), e.getOutput(), e.getErrorOutput());
}
} | @Test
public void testExecuteDockerStop() throws Exception {
DockerStopCommand dockerCommand = new DockerStopCommand(MOCK_CONTAINER_ID);
DockerCommandExecutor.executeDockerCommand(dockerCommand, MOCK_CONTAINER_ID,
env, mockExecutor, false, nmContext);
List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor
.capturePrivilegedOperations(mockExecutor, 1, true);
List<String> dockerCommands = getValidatedDockerCommands(ops);
assertEquals(1, ops.size());
assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
ops.get(0).getOperationType().name());
assertEquals(3, dockerCommands.size());
assertEquals("[docker-command-execution]", dockerCommands.get(0));
assertEquals(" docker-command=stop", dockerCommands.get(1));
assertEquals(" name=" + MOCK_CONTAINER_ID, dockerCommands.get(2));
} |
@Override
public boolean equals(Object o) {
if (!(o instanceof AtomicValueEvent)) {
return false;
}
AtomicValueEvent that = (AtomicValueEvent) o;
return Objects.equals(this.name, that.name) &&
Objects.equals(this.newValue, that.newValue) &&
Objects.equals(this.oldValue, that.oldValue);
} | @Test
public void testEquals() {
new EqualsTester()
.addEqualityGroup(event1)
.addEqualityGroup(event2, sameAsEvent2)
.addEqualityGroup(event3)
.testEquals();
} |
@Override
public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
Map<String, String> mdcContextMap = getMdcContextMap();
return super.schedule(ContextPropagator.decorateRunnable(contextPropagators, () -> {
try {
setMDCContext(mdcContextMap);
command.run();
} finally {
MDC.clear();
}
}), delay, unit);
} | @Test
public void testThreadFactory() {
final ScheduledFuture<String> schedule = schedulerService.schedule(() -> Thread.currentThread().getName(), 0, TimeUnit.MILLISECONDS);
waitAtMost(1, TimeUnit.SECONDS).until(matches(() ->
assertThat(schedule.get()).contains("ContextAwareScheduledThreadPool")));
} |
public static void setCommitDirectory(Job job, Path commitDirectory) {
job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,
commitDirectory.toString());
} | @Test
public void testSetCommitDirectory() {
try {
Job job = Job.getInstance(new Configuration());
Assert.assertEquals(null, CopyOutputFormat.getCommitDirectory(job));
job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, "");
Assert.assertEquals(null, CopyOutputFormat.getCommitDirectory(job));
Path directory = new Path("/tmp/test");
CopyOutputFormat.setCommitDirectory(job, directory);
Assert.assertEquals(directory, CopyOutputFormat.getCommitDirectory(job));
Assert.assertEquals(directory.toString(), job.getConfiguration().
get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
} catch (IOException e) {
LOG.error("Exception encountered while running test", e);
Assert.fail("Failed while testing for set Commit Directory");
}
} |
@Override
public Set<DatabaseTableName> getCachedTableNames() {
return cachingMetastore.getCachedTableNames();
} | @Test
public void testGetCachedTable(@Mocked ConnectContext connectContext) {
new Expectations() {
{
ConnectContext.get();
result = connectContext;
minTimes = 0;
connectContext.getCommand();
result = MysqlCommand.COM_QUERY;
minTimes = 0;
}
};
new MockUp<DeltaUtils>() {
@mockit.Mock
public DeltaLakeTable convertDeltaToSRTable(String catalog, String dbName, String tblName, String path,
Engine deltaEngine, long createTime) {
return new DeltaLakeTable(1, "delta0", "db1", "table1",
Lists.newArrayList(), Lists.newArrayList("ts"), null,
"s3://bucket/path/to/table", null, 0);
}
};
DeltaLakeCacheUpdateProcessor deltaLakeCacheUpdateProcessor =
new DeltaLakeCacheUpdateProcessor(cachingDeltaLakeMetastore);
Table table = cachingDeltaLakeMetastore.getTable("db1", "table1");
Assert.assertEquals(1, deltaLakeCacheUpdateProcessor.getCachedTableNames().size());
Assert.assertTrue(deltaLakeCacheUpdateProcessor.getCachedTableNames().
contains(DatabaseTableName.of("db1", "table1")));
Assert.assertTrue(table instanceof DeltaLakeTable);
} |
public Future<Void> maybeRollingUpdate(Reconciliation reconciliation, int replicas, Labels selectorLabels, Function<Pod, List<String>> podRestart, TlsPemIdentity coTlsPemIdentity) {
String namespace = reconciliation.namespace();
// We prepare the list of expected Pods. This is needed as we need to account for pods which might be missing.
// We need to wait for them before rolling any running pods to avoid problems.
List<String> expectedPodNames = new ArrayList<>();
for (int i = 0; i < replicas; i++) {
expectedPodNames.add(KafkaResources.zookeeperPodName(reconciliation.name(), i));
}
return podOperator.listAsync(namespace, selectorLabels)
.compose(pods -> {
ZookeeperClusterRollContext clusterRollContext = new ZookeeperClusterRollContext();
for (String podName : expectedPodNames) {
Pod pod = pods.stream().filter(p -> podName.equals(p.getMetadata().getName())).findFirst().orElse(null);
if (pod != null) {
List<String> restartReasons = podRestart.apply(pod);
final boolean ready = podOperator.isReady(namespace, pod.getMetadata().getName());
ZookeeperPodContext podContext = new ZookeeperPodContext(podName, restartReasons, true, ready);
if (restartReasons != null && !restartReasons.isEmpty()) {
LOGGER.debugCr(reconciliation, "Pod {} should be rolled due to {}", podContext.getPodName(), restartReasons);
} else {
LOGGER.debugCr(reconciliation, "Pod {} does not need to be rolled", podContext.getPodName());
}
clusterRollContext.add(podContext);
} else {
// Pod does not exist, but we still add it to the roll context because we should not roll
// any other pods before it is ready
LOGGER.debugCr(reconciliation, "Pod {} does not exist and cannot be rolled", podName);
ZookeeperPodContext podContext = new ZookeeperPodContext(podName, null, false, false);
clusterRollContext.add(podContext);
}
}
if (clusterRollContext.requiresRestart()) {
return Future.succeededFuture(clusterRollContext);
} else {
return Future.succeededFuture(null);
}
}).compose(clusterRollContext -> {
if (clusterRollContext != null) {
Promise<Void> promise = Promise.promise();
Future<String> leaderFuture = leaderFinder.findZookeeperLeader(reconciliation, clusterRollContext.podNames(), coTlsPemIdentity);
leaderFuture.compose(leader -> {
LOGGER.debugCr(reconciliation, "Zookeeper leader is " + (ZookeeperLeaderFinder.UNKNOWN_LEADER.equals(leader) ? "unknown" : "pod " + leader));
Future<Void> fut = Future.succeededFuture();
// Then roll each non-leader pod => the leader is rolled last
for (ZookeeperPodContext podContext : clusterRollContext.getPodContextsWithNonExistingAndNonReadyFirst()) {
if (podContext.requiresRestart() && !podContext.getPodName().equals(leader)) {
LOGGER.debugCr(reconciliation, "Pod {} needs to be restarted", podContext.getPodName());
// roll the pod and wait until it is ready
// this prevents rolling into faulty state (note: this applies just for ZK pods)
fut = fut.compose(ignore -> restartPod(reconciliation, podContext.getPodName(), podContext.reasonsToRestart));
} else {
if (podContext.requiresRestart()) {
LOGGER.debugCr(reconciliation, "Deferring restart of leader {}", podContext.getPodName());
} else {
LOGGER.debugCr(reconciliation, "Pod {} does not need to be restarted", podContext.getPodName());
}
fut = fut.compose(ignore -> podOperator.readiness(reconciliation, reconciliation.namespace(), podContext.getPodName(), READINESS_POLLING_INTERVAL_MS, operationTimeoutMs));
}
}
// Check if we have a leader and if it needs rolling
if (ZookeeperLeaderFinder.UNKNOWN_LEADER.equals(leader) || clusterRollContext.get(leader) == null || !clusterRollContext.get(leader).requiresRestart()) {
return fut;
} else {
// Roll the leader pod
return fut.compose(ar -> {
// the leader is rolled as the last
LOGGER.debugCr(reconciliation, "Restarting leader pod (previously deferred) {}", leader);
return restartPod(reconciliation, leader, clusterRollContext.get(leader).reasonsToRestart);
});
}
}).onComplete(promise);
return promise.future();
} else {
return Future.succeededFuture();
}
});
} | @Test
public void testOnlySomePodsAreRolled(VertxTestContext context) {
PodOperator podOperator = mock(PodOperator.class);
when(podOperator.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(PODS));
when(podOperator.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
ZookeeperLeaderFinder leaderFinder = mock(ZookeeperLeaderFinder.class);
when(leaderFinder.findZookeeperLeader(any(), any(), any())).thenReturn(Future.succeededFuture(ZookeeperLeaderFinder.UNKNOWN_LEADER));
MockZooKeeperRoller roller = new MockZooKeeperRoller(podOperator, leaderFinder, 300_00L);
Function<Pod, List<String>> shouldRoll = pod -> {
if (!"name-zookeeper-1".equals(pod.getMetadata().getName())) {
return List.of("Should restart");
} else {
return List.of();
}
};
roller.maybeRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, 3, DUMMY_SELECTOR, shouldRoll, DUMMY_IDENTITY)
.onComplete(context.succeeding(v -> context.verify(() -> {
assertThat(roller.podRestarts.size(), is(2));
assertThat(roller.podRestarts.contains("name-zookeeper-0"), is(true));
assertThat(roller.podRestarts.contains("name-zookeeper-2"), is(true));
context.completeNow();
})));
} |
Optional<String> getQueriesFile(final Map<String, String> properties) {
if (queriesFile != null) {
return Optional.of(queriesFile);
}
return Optional.ofNullable(properties.get(QUERIES_FILE_CONFIG));
} | @Test
public void shouldNotHaveQueriesFileIfNotInPropertiesOrCommandLine() {
assertThat(serverOptions.getQueriesFile(emptyMap()), is(Optional.empty()));
} |
@Override
public float getFloat(int index) {
return Float.intBitsToFloat(getInt(index));
} | @Test
public void testGetFloatAfterRelease() {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().getFloat(0);
}
});
} |
public RunResponse restartDirectly(
RunResponse restartStepInfo, RunRequest runRequest, boolean blocking) {
WorkflowInstance instance = restartStepInfo.getInstance();
String stepId = restartStepInfo.getStepId();
validateStepId(instance, stepId, Actions.StepInstanceAction.RESTART);
StepInstance stepInstance =
getStepInstanceAndValidate(instance, stepId, runRequest.getRestartConfig());
// prepare payload and then add to db
StepAction stepAction = StepAction.createRestart(stepInstance, runRequest);
saveAction(stepInstance, stepAction);
if (blocking) {
return waitResponseWithTimeout(stepInstance, stepAction);
} else {
return RunResponse.from(stepInstance, stepAction.toTimelineEvent());
}
} | @Test
public void testRestartDirectlyWithTerminatedStep() {
stepInstance.getRuntimeState().setStatus(StepInstance.Status.FATALLY_FAILED);
// emulate restarted step finishes
stepInstance.getRuntimeState().setCreateTime(System.currentTimeMillis() + 3600 * 1000);
stepInstance.getStepRetry().setRetryable(false);
((TypedStep) stepInstance.getDefinition()).setFailureMode(FailureMode.FAIL_AFTER_RUNNING);
stepInstanceDao.insertOrUpsertStepInstance(stepInstance, true);
RunResponse restartStepInfo = RunResponse.builder().instance(instance).stepId("job1").build();
RunRequest runRequest =
RunRequest.builder()
.requester(user)
.currentPolicy(RunPolicy.RESTART_FROM_SPECIFIC)
.stepRunParams(
Collections.singletonMap(
"job1",
Collections.singletonMap(
"foo", ParamDefinition.buildParamDefinition("foo", "bar"))))
.build();
RunResponse response = actionDao.restartDirectly(restartStepInfo, runRequest, true);
Assert.assertEquals("sample-dag-test-3", response.getWorkflowId());
Assert.assertEquals(1, response.getWorkflowInstanceId());
Assert.assertEquals(1, response.getWorkflowRunId());
Assert.assertEquals("job1", response.getStepId());
Assert.assertEquals(2L, response.getStepAttemptId().longValue());
Assert.assertEquals(
"User [tester] take action [RESTART] on the step",
response.getTimelineEvent().getMessage());
Mockito.verify(publisher, Mockito.times(1)).publish(any(StepInstanceWakeUpEvent.class));
} |
public static ParsedCommand parse(
// CHECKSTYLE_RULES.ON: CyclomaticComplexity
final String sql, final Map<String, String> variables) {
validateSupportedStatementType(sql);
final String substituted;
try {
substituted = VariableSubstitutor.substitute(KSQL_PARSER.parse(sql).get(0), variables);
} catch (ParseFailedException e) {
throw new MigrationException(String.format(
"Failed to parse the statement. Statement: %s. Reason: %s",
sql, e.getMessage()));
}
final SqlBaseParser.SingleStatementContext statementContext = KSQL_PARSER.parse(substituted)
.get(0).getStatement();
final boolean isStatement = StatementType.get(statementContext.statement().getClass())
== StatementType.STATEMENT;
return new ParsedCommand(substituted,
isStatement ? Optional.empty() : Optional.of(new AstBuilder(TypeRegistry.EMPTY)
.buildStatement(statementContext)));
} | @Test
public void shouldThrowOnInvalidInsertValues() {
// When:
final MigrationException e = assertThrows(MigrationException.class,
() -> parse("insert into foo values (this_should_not_here) ('val');"));
// Then:
assertThat(e.getMessage(), containsString("Failed to parse the statement"));
} |
public int exceptionCount() {
return this.exceptionCount;
} | @Test
public void testFileWithManyFlaws() {
NopParser parser = new NopParser(new File(FILE_WITH_LOTS_OF_BAD_LINES));
assertDoesNotThrow(() -> parseAllMessages(parser));
assertEquals(10000, parser.exceptionCount());
} |
public static CommandExecutor newInstance(final PostgreSQLCommandPacketType commandPacketType, final PostgreSQLCommandPacket commandPacket,
final ConnectionSession connectionSession, final PortalContext portalContext) throws SQLException {
if (commandPacket instanceof SQLReceivedPacket) {
log.debug("Execute packet type: {}, sql: {}", commandPacketType, ((SQLReceivedPacket) commandPacket).getSQL());
} else {
log.debug("Execute packet type: {}", commandPacketType);
}
if (!(commandPacket instanceof PostgreSQLAggregatedCommandPacket)) {
return getCommandExecutor(commandPacketType, commandPacket, connectionSession, portalContext);
}
PostgreSQLAggregatedCommandPacket aggregatedCommandPacket = (PostgreSQLAggregatedCommandPacket) commandPacket;
if (aggregatedCommandPacket.isContainsBatchedStatements()) {
return new PostgreSQLAggregatedCommandExecutor(getExecutorsOfAggregatedBatchedStatements(aggregatedCommandPacket, connectionSession, portalContext));
}
List<CommandExecutor> result = new ArrayList<>(aggregatedCommandPacket.getPackets().size());
for (PostgreSQLCommandPacket each : aggregatedCommandPacket.getPackets()) {
result.add(getCommandExecutor((PostgreSQLCommandPacketType) each.getIdentifier(), each, connectionSession, portalContext));
}
return new PostgreSQLAggregatedCommandExecutor(result);
} | @Test
void assertAggregatedPacketIsBatchedStatements() throws SQLException {
PostgreSQLComParsePacket parsePacket = mock(PostgreSQLComParsePacket.class);
when(parsePacket.getIdentifier()).thenReturn(PostgreSQLCommandPacketType.PARSE_COMMAND);
PostgreSQLComBindPacket bindPacket = mock(PostgreSQLComBindPacket.class);
PostgreSQLComDescribePacket describePacket = mock(PostgreSQLComDescribePacket.class);
PostgreSQLComExecutePacket executePacket = mock(PostgreSQLComExecutePacket.class);
PostgreSQLComSyncPacket syncPacket = mock(PostgreSQLComSyncPacket.class);
when(syncPacket.getIdentifier()).thenReturn(PostgreSQLCommandPacketType.SYNC_COMMAND);
PostgreSQLAggregatedCommandPacket packet = mock(PostgreSQLAggregatedCommandPacket.class);
when(packet.isContainsBatchedStatements()).thenReturn(true);
when(packet.getPackets()).thenReturn(Arrays.asList(parsePacket, bindPacket, describePacket, executePacket, bindPacket, describePacket, executePacket, syncPacket));
when(packet.getBatchPacketBeginIndex()).thenReturn(1);
when(packet.getBatchPacketEndIndex()).thenReturn(6);
CommandExecutor actual = PostgreSQLCommandExecutorFactory.newInstance(null, packet, connectionSession, portalContext);
assertThat(actual, instanceOf(PostgreSQLAggregatedCommandExecutor.class));
Iterator<CommandExecutor> actualPacketsIterator = getExecutorsFromAggregatedCommandExecutor((PostgreSQLAggregatedCommandExecutor) actual).iterator();
assertThat(actualPacketsIterator.next(), instanceOf(PostgreSQLComParseExecutor.class));
assertThat(actualPacketsIterator.next(), instanceOf(PostgreSQLAggregatedBatchedStatementsCommandExecutor.class));
assertThat(actualPacketsIterator.next(), instanceOf(PostgreSQLComSyncExecutor.class));
assertFalse(actualPacketsIterator.hasNext());
} |
private MergeSortedPages() {} | @Test
public void testDifferentTypes()
throws Exception
{
List<Type> types = ImmutableList.of(DOUBLE, VARCHAR, INTEGER);
MaterializedResult actual = mergeSortedPages(
types,
ImmutableList.of(2, 0, 1),
ImmutableList.of(DESC_NULLS_LAST, DESC_NULLS_FIRST, ASC_NULLS_FIRST),
ImmutableList.of(
rowPagesBuilder(types)
.row(16.0, "a1", 16)
.row(8.0, "b1", 16)
.pageBreak()
.row(4.0, "c1", 16)
.row(4.0, "d1", 16)
.row(null, "d1", 8)
.row(16.0, "a1", 8)
.row(16.0, "b1", 8)
.row(16.0, "c1", 4)
.row(8.0, "d1", 4)
.row(16.0, "a1", 2)
.row(null, "a1", null)
.row(16.0, "a1", null)
.build(),
rowPagesBuilder(types)
.row(15.0, "a2", 17)
.row(9.0, "b2", 17)
.pageBreak()
.row(5.0, "c2", 17)
.row(5.0, "d2", 17)
.row(null, "d2", 8)
.row(17.0, "a0", 8)
.row(17.0, "b0", 8)
.row(17.0, "c0", 5)
.row(9.0, "d0", 5)
.row(17.0, "a0", 3)
.row(null, "a0", null)
.row(17.0, "a0", null)
.build()));
MaterializedResult expected = resultBuilder(TEST_SESSION, types)
.row(15.0, "a2", 17)
.row(9.0, "b2", 17)
.row(5.0, "c2", 17)
.row(5.0, "d2", 17)
.row(16.0, "a1", 16)
.row(8.0, "b1", 16)
.row(4.0, "c1", 16)
.row(4.0, "d1", 16)
.row(null, "d1", 8)
.row(null, "d2", 8)
.row(17.0, "a0", 8)
.row(17.0, "b0", 8)
.row(16.0, "a1", 8)
.row(16.0, "b1", 8)
.row(17.0, "c0", 5)
.row(9.0, "d0", 5)
.row(16.0, "c1", 4)
.row(8.0, "d1", 4)
.row(17.0, "a0", 3)
.row(16.0, "a1", 2)
.row(null, "a0", null)
.row(null, "a1", null)
.row(17.0, "a0", null)
.row(16.0, "a1", null)
.build();
assertEquals(actual, expected);
} |
@Udf(description = "Returns a masked version of the input string. All characters except for the"
+ " first n will be replaced according to the default masking rules.")
@SuppressWarnings("MethodMayBeStatic") // Invoked via reflection
public String mask(
@UdfParameter("input STRING to be masked") final String input,
@UdfParameter("number of characters to keep unmasked at the start") final int numChars
) {
return doMask(new Masker(), input, numChars);
} | @Test
public void shouldNotMaskFirstNChars() {
final String result = udf.mask("AbCd#$123xy Z", 5);
assertThat(result, is("AbCd#-nnnxx-X"));
} |
@Override
public CompletableFuture<Acknowledge> disposeSavepoint(String savepointPath, Time timeout) {
final ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
return CompletableFuture.supplyAsync(
() -> {
log.info("Disposing savepoint {}.", savepointPath);
try {
Checkpoints.disposeSavepoint(
savepointPath, configuration, classLoader, log);
} catch (IOException | FlinkException e) {
throw new CompletionException(
new FlinkException(
String.format(
"Could not dispose savepoint %s.", savepointPath),
e));
}
return Acknowledge.get();
},
jobManagerSharedServices.getIoExecutor());
} | @Test
public void testSavepointDisposal() throws Exception {
final URI externalPointer = createTestingSavepoint();
final Path savepointPath = Paths.get(externalPointer);
dispatcher =
createAndStartDispatcher(
heartbeatServices,
haServices,
new ExpectedJobIdJobManagerRunnerFactory(jobId));
final DispatcherGateway dispatcherGateway =
dispatcher.getSelfGateway(DispatcherGateway.class);
assertThat(Files.exists(savepointPath), is(true));
dispatcherGateway.disposeSavepoint(externalPointer.toString(), TIMEOUT).get();
assertThat(Files.exists(savepointPath), is(false));
} |
public static Map<String, Set<String>> getVarListInString(String input) {
Map<String, Set<String>> varMap = new HashMap<>();
Matcher matcher = VAR_PATTERN_IN_DEST.matcher(input);
while (matcher.find()) {
// $var or ${var}
String varName = matcher.group(0);
// var or {var}
String strippedVarName = matcher.group(1);
if (strippedVarName.startsWith("{")) {
// {varName} = > varName
strippedVarName =
strippedVarName.substring(1, strippedVarName.length() - 1);
}
varMap.putIfAbsent(strippedVarName, new HashSet<>());
varMap.get(strippedVarName).add(varName);
}
return varMap;
} | @Test
public void testGetVarListInString() throws IOException {
String srcRegex = "/(\\w+)";
String target = "/$0/${1}/$1/${2}/${2}";
RegexMountPoint regexMountPoint =
new RegexMountPoint(inodeTree, srcRegex, target, null);
regexMountPoint.initialize();
Map<String, Set<String>> varMap = regexMountPoint.getVarInDestPathMap();
Assert.assertEquals(varMap.size(), 3);
Assert.assertEquals(varMap.get("0").size(), 1);
Assert.assertTrue(varMap.get("0").contains("$0"));
Assert.assertEquals(varMap.get("1").size(), 2);
Assert.assertTrue(varMap.get("1").contains("${1}"));
Assert.assertTrue(varMap.get("1").contains("$1"));
Assert.assertEquals(varMap.get("2").size(), 1);
Assert.assertTrue(varMap.get("2").contains("${2}"));
} |
@Nullable
@Override
public Message decode(@Nonnull final RawMessage rawMessage) {
final GELFMessage gelfMessage = new GELFMessage(rawMessage.getPayload(), rawMessage.getRemoteAddress());
final String json = gelfMessage.getJSON(decompressSizeLimit, charset);
final JsonNode node;
try {
node = objectMapper.readTree(json);
if (node == null) {
throw new IOException("null result");
}
} catch (final Exception e) {
log.error("Could not parse JSON, first 400 characters: " +
StringUtils.abbreviate(json, 403), e);
throw new IllegalStateException("JSON is null/could not be parsed (invalid JSON)", e);
}
try {
validateGELFMessage(node, rawMessage.getId(), rawMessage.getRemoteAddress());
} catch (IllegalArgumentException e) {
log.trace("Invalid GELF message <{}>", node);
throw e;
}
// Timestamp.
final double messageTimestamp = timestampValue(node);
final DateTime timestamp;
if (messageTimestamp <= 0) {
timestamp = rawMessage.getTimestamp();
} else {
// we treat this as a unix timestamp
timestamp = Tools.dateTimeFromDouble(messageTimestamp);
}
final Message message = messageFactory.createMessage(
stringValue(node, "short_message"),
stringValue(node, "host"),
timestamp
);
message.addField(Message.FIELD_FULL_MESSAGE, stringValue(node, "full_message"));
final String file = stringValue(node, "file");
if (file != null && !file.isEmpty()) {
message.addField("file", file);
}
final long line = longValue(node, "line");
if (line > -1) {
message.addField("line", line);
}
// Level is set by server if not specified by client.
final int level = intValue(node, "level");
if (level > -1) {
message.addField("level", level);
}
// Facility is set by server if not specified by client.
final String facility = stringValue(node, "facility");
if (facility != null && !facility.isEmpty()) {
message.addField("facility", facility);
}
// Add additional data if there is some.
final Iterator<Map.Entry<String, JsonNode>> fields = node.fields();
while (fields.hasNext()) {
final Map.Entry<String, JsonNode> entry = fields.next();
String key = entry.getKey();
// Do not index useless GELF "version" field.
if ("version".equals(key)) {
continue;
}
// Don't include GELF syntax underscore in message field key.
if (key.startsWith("_") && key.length() > 1) {
key = key.substring(1);
}
// We already set short_message and host as message and source. Do not add as fields again.
if ("short_message".equals(key) || "host".equals(key)) {
continue;
}
// Skip standard or already set fields.
if (message.getField(key) != null || Message.RESERVED_FIELDS.contains(key) && !Message.RESERVED_SETTABLE_FIELDS.contains(key)) {
continue;
}
// Convert JSON containers to Strings, and pick a suitable number representation.
final JsonNode value = entry.getValue();
final Object fieldValue;
if (value.isContainerNode()) {
fieldValue = value.toString();
} else if (value.isFloatingPointNumber()) {
fieldValue = value.asDouble();
} else if (value.isIntegralNumber()) {
fieldValue = value.asLong();
} else if (value.isNull()) {
log.debug("Field [{}] is NULL. Skipping.", key);
continue;
} else if (value.isTextual()) {
fieldValue = value.asText();
} else {
log.debug("Field [{}] has unknown value type. Skipping.", key);
continue;
}
message.addField(key, fieldValue);
}
return message;
} | @Test
public void decodeFailsWithEmptyHost() throws Exception {
final String json = "{"
+ "\"version\": \"1.1\","
+ "\"host\": \"\","
+ "\"short_message\": \"A short message that helps you identify what is going on\""
+ "}";
final RawMessage rawMessage = new RawMessage(json.getBytes(StandardCharsets.UTF_8));
assertThatIllegalArgumentException().isThrownBy(() -> codec.decode(rawMessage))
.withNoCause()
.withMessageMatching("GELF message <[0-9a-f-]+> has empty mandatory \"host\" field.");
} |
public TaskManagerJobMetricGroup addJob(JobID jobId, String jobName) {
Preconditions.checkNotNull(jobId);
String resolvedJobName = jobName == null || jobName.isEmpty() ? jobId.toString() : jobName;
TaskManagerJobMetricGroup jobGroup;
synchronized (this) { // synchronization isn't strictly necessary as of FLINK-24864
jobGroup = jobs.get(jobId);
if (jobGroup == null) {
jobGroup = new TaskManagerJobMetricGroup(registry, this, jobId, resolvedJobName);
jobs.put(jobId, jobGroup);
}
}
return jobGroup;
} | @Test
void testCloseWithoutRemoval() {
TaskManagerJobMetricGroup jobGroup = metricGroup.addJob(JOB_ID, JOB_NAME);
metricGroup.close();
assertThat(jobGroup.isClosed()).isTrue();
} |
public void resetPositionsIfNeeded() {
Map<TopicPartition, Long> offsetResetTimestamps = offsetFetcherUtils.getOffsetResetTimestamp();
if (offsetResetTimestamps.isEmpty())
return;
resetPositionsAsync(offsetResetTimestamps);
} | @Test
public void testEarlierOffsetResetArrivesLate() {
buildFetcher();
assignFromUser(singleton(tp0));
subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.EARLIEST);
offsetFetcher.resetPositionsIfNeeded();
client.prepareResponse(req -> {
if (listOffsetMatchesExpectedReset(tp0, OffsetResetStrategy.EARLIEST, req)) {
// Before the response is handled, we get a request to reset to the latest offset
subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST);
return true;
} else {
return false;
}
}, listOffsetResponse(Errors.NONE, 1L, 0L));
consumerClient.pollNoWakeup();
// The list offset result should be ignored
assertTrue(subscriptions.isOffsetResetNeeded(tp0));
assertEquals(OffsetResetStrategy.LATEST, subscriptions.resetStrategy(tp0));
offsetFetcher.resetPositionsIfNeeded();
client.prepareResponse(
req -> listOffsetMatchesExpectedReset(tp0, OffsetResetStrategy.LATEST, req),
listOffsetResponse(Errors.NONE, 1L, 10L)
);
consumerClient.pollNoWakeup();
assertFalse(subscriptions.isOffsetResetNeeded(tp0));
assertEquals(10, subscriptions.position(tp0).offset);
} |
@GET
@Path("{id}")
@ApiOperation("Get a single view")
public ViewDTO get(@ApiParam(name = "id") @PathParam("id") @NotEmpty String id, @Context SearchUser searchUser) {
if ("default".equals(id)) {
// If the user is not permitted to access the default view, return a 404
return dbService.getDefault()
.filter(searchUser::canReadView)
.orElseThrow(() -> new NotFoundException("Default view doesn't exist"));
}
// Attempt to resolve the view from optional view resolvers before using the default database lookup.
// The view resolvers must be used first, because the ID may not be a valid hex ID string.
return resolveView(searchUser, id);
} | @Test
public void invalidObjectIdReturnsViewNotFoundException() {
final ViewsResource viewsResource = createViewsResource(
mock(ViewService.class),
mock(StartPageService.class),
mock(RecentActivityService.class),
mock(ClusterEventBus.class),
new ReferencedSearchFiltersHelper(),
EMPTY_SEARCH_FILTER_VISIBILITY_CHECKER,
EMPTY_VIEW_RESOLVERS
);
assertThatThrownBy(() -> viewsResource.get("invalid", SEARCH_USER))
.isInstanceOf(NotFoundException.class);
} |
@Override
public void execute(Exchange exchange) throws SmppException {
byte[] message = getShortMessage(exchange.getIn());
ReplaceSm replaceSm = createReplaceSmTempate(exchange);
replaceSm.setShortMessage(message);
if (log.isDebugEnabled()) {
log.debug("Sending replacement command for a short message for exchange id '{}' and message id '{}'",
exchange.getExchangeId(), replaceSm.getMessageId());
}
try {
session.replaceShortMessage(
replaceSm.getMessageId(),
TypeOfNumber.valueOf(replaceSm.getSourceAddrTon()),
NumberingPlanIndicator.valueOf(replaceSm.getSourceAddrNpi()),
replaceSm.getSourceAddr(),
replaceSm.getScheduleDeliveryTime(),
replaceSm.getValidityPeriod(),
new RegisteredDelivery(replaceSm.getRegisteredDelivery()),
replaceSm.getSmDefaultMsgId(),
replaceSm.getShortMessage());
} catch (Exception e) {
throw new SmppException(e);
}
if (log.isDebugEnabled()) {
log.debug("Sent replacement command for a short message for exchange id '{}' and message id '{}'",
exchange.getExchangeId(), replaceSm.getMessageId());
}
Message rspMsg = ExchangeHelper.getResultMessage(exchange);
rspMsg.setHeader(SmppConstants.ID, replaceSm.getMessageId());
} | @Test
public void executeWithValidityPeriodAsString() throws Exception {
Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut);
exchange.getIn().setHeader(SmppConstants.COMMAND, "ReplaceSm");
exchange.getIn().setHeader(SmppConstants.ID, "1");
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR_TON, TypeOfNumber.NATIONAL.value());
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR_NPI, NumberingPlanIndicator.NATIONAL.value());
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR, "1818");
exchange.getIn().setHeader(SmppConstants.SCHEDULE_DELIVERY_TIME, new Date(1111111));
exchange.getIn().setHeader(SmppConstants.VALIDITY_PERIOD, "000003000000000R"); // three days
exchange.getIn().setHeader(SmppConstants.REGISTERED_DELIVERY,
new RegisteredDelivery(SMSCDeliveryReceipt.FAILURE).value());
exchange.getIn().setBody("new short message body");
command.execute(exchange);
verify(session).replaceShortMessage(eq("1"), eq(TypeOfNumber.NATIONAL), eq(NumberingPlanIndicator.NATIONAL), eq("1818"),
eq("-300101001831100+"), eq("000003000000000R"),
eq(new RegisteredDelivery(SMSCDeliveryReceipt.FAILURE)), eq((byte) 0), eq("new short message body".getBytes()));
assertEquals("1", exchange.getMessage().getHeader(SmppConstants.ID));
} |
public Path find(final Path selected) {
if(null == selected) {
return workdir;
}
if(selected.getType().contains(Path.Type.volume)) {
return selected;
}
return selected.getParent();
} | @Test
public void testFindContainerSelected() {
assertEquals(new Path("/container", EnumSet.of(Path.Type.directory, Path.Type.volume)),
new UploadTargetFinder(new Path("/", EnumSet.of(Path.Type.directory)))
.find(new Path("/container", EnumSet.of(Path.Type.directory, Path.Type.volume))));
assertEquals(new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume)),
new UploadTargetFinder(new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume)))
.find(null));
} |
public void writeMethodDescriptor(MethodReference methodReference) throws IOException {
writeType(methodReference.getDefiningClass());
writer.write("->");
writeSimpleName(methodReference.getName());
writer.write('(');
for (CharSequence paramType: methodReference.getParameterTypes()) {
writeType(paramType);
}
writer.write(')');
writeType(methodReference.getReturnType());
} | @Test
public void testWriteMethodDescriptor() throws IOException {
DexFormattedWriter writer = new DexFormattedWriter(output);
writer.writeMethodDescriptor(getMethodReference());
Assert.assertEquals("Ldefining/class;->methodName(Lparam1;Lparam2;)Lreturn/type;", output.toString());
} |
public <T> Future<Iterable<Map.Entry<ByteString, Iterable<T>>>> multimapFetchAllFuture(
boolean omitValues, ByteString encodedTag, String stateFamily, Coder<T> elemCoder) {
StateTag<ByteString> stateTag =
StateTag.<ByteString>of(Kind.MULTIMAP_ALL, encodedTag, stateFamily)
.toBuilder()
.setOmitValues(omitValues)
.build();
return valuesToPagingIterableFuture(stateTag, elemCoder, this.stateFuture(stateTag, elemCoder));
} | @Test
public void testReadMultimapAllEntries() throws Exception {
Future<Iterable<Map.Entry<ByteString, Iterable<Integer>>>> future =
underTest.multimapFetchAllFuture(false, STATE_KEY_1, STATE_FAMILY, INT_CODER);
Mockito.verifyNoMoreInteractions(mockWindmill);
Windmill.KeyedGetDataRequest.Builder expectedRequest =
Windmill.KeyedGetDataRequest.newBuilder()
.setKey(DATA_KEY)
.setShardingKey(SHARDING_KEY)
.setWorkToken(WORK_TOKEN)
.setMaxBytes(WindmillStateReader.MAX_KEY_BYTES)
.addMultimapsToFetch(
Windmill.TagMultimapFetchRequest.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.setFetchEntryNamesOnly(false)
.setFetchMaxBytes(WindmillStateReader.INITIAL_MAX_MULTIMAP_BYTES));
Windmill.KeyedGetDataResponse.Builder response =
Windmill.KeyedGetDataResponse.newBuilder()
.setKey(DATA_KEY)
.addTagMultimaps(
Windmill.TagMultimapFetchResponse.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.addEntries(
Windmill.TagMultimapEntry.newBuilder()
.setEntryName(STATE_MULTIMAP_KEY_1)
.addValues(intData(1))
.addValues(intData(2)))
.addEntries(
Windmill.TagMultimapEntry.newBuilder()
.setEntryName(STATE_MULTIMAP_KEY_2)
.addValues(intData(10))
.addValues(intData(20))));
Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest.build()))
.thenReturn(response.build());
Iterable<Map.Entry<ByteString, Iterable<Integer>>> results = future.get();
Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest.build());
int foundEntries = 0;
for (Map.Entry<ByteString, Iterable<Integer>> entry : results) {
if (entry.getKey().equals(STATE_MULTIMAP_KEY_1)) {
foundEntries++;
assertThat(entry.getValue(), Matchers.containsInAnyOrder(1, 2));
} else {
foundEntries++;
assertEquals(STATE_MULTIMAP_KEY_2, entry.getKey());
assertThat(entry.getValue(), Matchers.containsInAnyOrder(10, 20));
}
}
assertEquals(2, foundEntries);
Mockito.verifyNoMoreInteractions(mockWindmill);
assertNoReader(future);
} |
@VisibleForTesting
boolean parseArguments(String[] args) throws IOException {
Options opts = new Options();
opts.addOption(Option.builder("h").build());
opts.addOption(Option.builder("help").build());
opts.addOption(Option.builder("input")
.desc("Input class path. Defaults to the default classpath.")
.hasArg().build());
opts.addOption(Option.builder("whitelist")
.desc(
"Regex specifying the full path of jars to include in the" +
" framework tarball. Default is a hardcoded set of jars" +
" considered necessary to include")
.hasArg().build());
opts.addOption(Option.builder("blacklist")
.desc(
"Regex specifying the full path of jars to exclude in the" +
" framework tarball. Default is a hardcoded set of jars" +
" considered unnecessary to include")
.hasArg().build());
opts.addOption(Option.builder("fs")
.desc(
"Target file system to upload to." +
" Example: hdfs://foo.com:8020")
.hasArg().build());
opts.addOption(Option.builder("target")
.desc(
"Target file to upload to with a reference name." +
" Example: /usr/mr-framework.tar.gz#mr-framework")
.hasArg().build());
opts.addOption(Option.builder("initialReplication")
.desc(
"Desired initial replication count. Default 3.")
.hasArg().build());
opts.addOption(Option.builder("finalReplication")
.desc(
"Desired final replication count. Default 10.")
.hasArg().build());
opts.addOption(Option.builder("acceptableReplication")
.desc(
"Desired acceptable replication count. Default 9.")
.hasArg().build());
opts.addOption(Option.builder("timeout")
.desc(
"Desired timeout for the acceptable" +
" replication in seconds. Default 10")
.hasArg().build());
opts.addOption(Option.builder("nosymlink")
.desc("Ignore symlinks into the same directory")
.build());
GenericOptionsParser parser = new GenericOptionsParser(opts, args);
if (parser.getCommandLine().hasOption("help") ||
parser.getCommandLine().hasOption("h")) {
printHelp(opts);
return false;
}
input = parser.getCommandLine().getOptionValue(
"input", System.getProperty("java.class.path"));
whitelist = parser.getCommandLine().getOptionValue(
"whitelist", DefaultJars.DEFAULT_MR_JARS);
blacklist = parser.getCommandLine().getOptionValue(
"blacklist", DefaultJars.DEFAULT_EXCLUDED_MR_JARS);
initialReplication =
Short.parseShort(parser.getCommandLine().getOptionValue(
"initialReplication", "3"));
finalReplication =
Short.parseShort(parser.getCommandLine().getOptionValue(
"finalReplication", "10"));
acceptableReplication =
Short.parseShort(
parser.getCommandLine().getOptionValue(
"acceptableReplication", "9"));
timeout =
Integer.parseInt(
parser.getCommandLine().getOptionValue("timeout", "10"));
if (parser.getCommandLine().hasOption("nosymlink")) {
ignoreSymlink = true;
}
String fs = parser.getCommandLine()
.getOptionValue("fs", null);
String path = parser.getCommandLine().getOptionValue("target",
"/usr/lib/mr-framework.tar.gz#mr-framework");
boolean isFullPath =
path.startsWith("hdfs://") ||
path.startsWith("file://");
if (fs == null) {
fs = conf.getTrimmed(FS_DEFAULT_NAME_KEY);
if (fs == null && !isFullPath) {
LOG.error("No filesystem specified in either fs or target.");
printHelp(opts);
return false;
} else {
LOG.info(String.format(
"Target file system not specified. Using default %s", fs));
}
}
if (path.isEmpty()) {
LOG.error("Target directory not specified");
printHelp(opts);
return false;
}
StringBuilder absolutePath = new StringBuilder();
if (!isFullPath) {
absolutePath.append(fs);
absolutePath.append(path.startsWith("/") ? "" : "/");
}
absolutePath.append(path);
target = absolutePath.toString();
if (parser.getRemainingArgs().length > 0) {
LOG.warn("Unexpected parameters");
printHelp(opts);
return false;
}
return true;
} | @Test
void testWrongArgument() throws IOException {
String[] args = new String[]{"-unexpected"};
FrameworkUploader uploader = new FrameworkUploader();
boolean success = uploader.parseArguments(args);
assertFalse(success, "Expected to print help");
} |
@SuppressFBWarnings(value = "DMI_RANDOM_USED_ONLY_ONCE")
public static LocalCommands open(
final KsqlEngine ksqlEngine,
final File directory
) {
if (!directory.exists()) {
if (!directory.mkdirs()) {
throw new KsqlServerException("Couldn't create the local commands directory: "
+ directory.getPath()
+ "\n Make sure the directory exists and is readable/writable for KSQL server "
+ "\n or its parent directory is readable/writable by KSQL server"
+ "\n or change it to a readable/writable directory by setting '"
+ KsqlRestConfig.KSQL_LOCAL_COMMANDS_LOCATION_CONFIG
+ "' config in the properties file."
);
}
try {
Files.setPosixFilePermissions(directory.toPath(),
PosixFilePermissions.fromString("rwx------"));
} catch (final IOException e) {
throw new KsqlServerException(String.format(
"Couldn't set POSIX permissions on the backups directory: %s. Error = %s",
directory.getPath(), e.getMessage()));
}
}
if (!directory.isDirectory()) {
throw new KsqlServerException(directory.getPath()
+ " is not a directory."
+ "\n Make sure the directory exists and is readable/writable for KSQL server "
+ "\n or its parent directory is readable/writable by KSQL server"
+ "\n or change it to a readable/writable directory by setting '"
+ KsqlRestConfig.KSQL_LOCAL_COMMANDS_LOCATION_CONFIG
+ "' config in the properties file."
);
}
if (!directory.canWrite() || !directory.canRead() || !directory.canExecute()) {
throw new KsqlServerException("The local commands directory is not readable/writable "
+ "for KSQL server: "
+ directory.getPath()
+ "\n Make sure the directory exists and is readable/writable for KSQL server "
+ "\n or change it to a readable/writable directory by setting '"
+ KsqlRestConfig.KSQL_LOCAL_COMMANDS_LOCATION_CONFIG
+ "' config in the properties file."
);
}
final File file = new File(directory, String.format("local_commands_%d_%s%s",
System.currentTimeMillis(), Integer.toHexString(RANDOM.nextInt()),
LOCAL_COMMANDS_FILE_SUFFIX));
return new LocalCommands(directory, ksqlEngine, LocalCommandsFile.createWriteable(file));
} | @Test
public void shouldThrowWhenCommandLocationIsNotDirectory() throws IOException {
// Given
File file = commandsDir.newFile();
// When
final Exception e = assertThrows(
KsqlServerException.class,
() -> LocalCommands.open(ksqlEngine, file)
);
// Then
assertThat(e.getMessage(), containsString(String.format(
"%s is not a directory.",
file.getAbsolutePath()
)));
} |
public static CharSequence escapeCsv(CharSequence value) {
return escapeCsv(value, false);
} | @Test
public void escapeCsvAlreadyQuoted() {
CharSequence value = "\"something\"";
CharSequence expected = "\"something\"";
escapeCsv(value, expected);
} |
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static void setConfiguration(Configuration conf) {
initialize(conf, true);
} | @Test (timeout = 30000)
public void testConstructorWithRules() throws Exception {
// security off, but use rules if explicitly set
conf.set(HADOOP_SECURITY_AUTH_TO_LOCAL,
"RULE:[1:$1@$0](.*@OTHER.REALM)s/(.*)@.*/other-$1/");
conf.set(HADOOP_SECURITY_AUTH_TO_LOCAL_MECHANISM, "hadoop");
UserGroupInformation.setConfiguration(conf);
testConstructorSuccess("user1", "user1");
testConstructorSuccess("user4@OTHER.REALM", "other-user4");
// failure test
testConstructorFailures("user2@DEFAULT.REALM");
testConstructorFailures("user3/cron@DEFAULT.REALM");
testConstructorFailures("user5/cron@OTHER.REALM");
// with MIT
conf.set(HADOOP_SECURITY_AUTH_TO_LOCAL_MECHANISM, "mit");
UserGroupInformation.setConfiguration(conf);
testConstructorSuccess("user2@DEFAULT.REALM", "user2@DEFAULT.REALM");
testConstructorSuccess("user3/cron@DEFAULT.REALM", "user3/cron@DEFAULT.REALM");
testConstructorSuccess("user5/cron@OTHER.REALM", "user5/cron@OTHER.REALM");
// failures
testConstructorFailures("user6@example.com@OTHER.REALM");
testConstructorFailures("user7@example.com@DEFAULT.REALM");
testConstructorFailures(null);
testConstructorFailures("");
conf.set(HADOOP_SECURITY_AUTH_TO_LOCAL_MECHANISM, "hadoop");
} |
@Override
public void transferBufferOwnership(Object oldOwner, Object newOwner, Buffer buffer) {
checkState(buffer.isBuffer(), "Only buffer supports transfer ownership.");
decNumRequestedBuffer(oldOwner);
incNumRequestedBuffer(newOwner);
buffer.setRecycler(memorySegment -> recycleBuffer(newOwner, memorySegment));
} | @Test
void testCanNotTransferOwnershipForEvent() throws IOException {
TieredStorageMemoryManagerImpl memoryManager =
createStorageMemoryManager(
1, Collections.singletonList(new TieredStorageMemorySpec(this, 0)));
BufferConsumer bufferConsumer =
BufferBuilderTestUtils.createEventBufferConsumer(1, Buffer.DataType.EVENT_BUFFER);
Buffer buffer = bufferConsumer.build();
bufferConsumer.close();
assertThatThrownBy(() -> memoryManager.transferBufferOwnership(this, new Object(), buffer))
.isInstanceOf(IllegalStateException.class);
} |
public static Http2Headers toHttp2Headers(HttpMessage in, boolean validateHeaders) {
HttpHeaders inHeaders = in.headers();
final Http2Headers out = new DefaultHttp2Headers(validateHeaders, inHeaders.size());
if (in instanceof HttpRequest) {
HttpRequest request = (HttpRequest) in;
String host = inHeaders.getAsString(HttpHeaderNames.HOST);
if (isOriginForm(request.uri()) || isAsteriskForm(request.uri())) {
out.path(new AsciiString(request.uri()));
setHttp2Scheme(inHeaders, out);
} else {
URI requestTargetUri = URI.create(request.uri());
out.path(toHttp2Path(requestTargetUri));
// Take from the request-line if HOST header was empty
host = isNullOrEmpty(host) ? requestTargetUri.getAuthority() : host;
setHttp2Scheme(inHeaders, requestTargetUri, out);
}
setHttp2Authority(host, out);
out.method(request.method().asciiName());
} else if (in instanceof HttpResponse) {
HttpResponse response = (HttpResponse) in;
out.status(response.status().codeAsText());
}
// Add the HTTP headers which have not been consumed above
toHttp2Headers(inHeaders, out);
return out;
} | @Test
public void stripConnectionHeadersAndNominees() {
HttpHeaders inHeaders = new DefaultHttpHeaders();
inHeaders.add(CONNECTION, "foo");
inHeaders.add("foo", "bar");
Http2Headers out = new DefaultHttp2Headers();
HttpConversionUtil.toHttp2Headers(inHeaders, out);
assertTrue(out.isEmpty());
} |
public String cloneNote(String sourceNoteId, String newNotePath, AuthenticationInfo subject)
throws IOException {
return cloneNote(sourceNoteId, "", newNotePath, subject);
} | @Test
void testCloneNote() throws Exception {
String noteId = notebook.createNote("note1", anonymous);
notebook.processNote(noteId,
note -> {
final Paragraph p = note.addNewParagraph(AuthenticationInfo.ANONYMOUS);
p.setText("hello world");
try {
note.runAll(anonymous, true, false, new HashMap<>());
} catch (Exception e) {
fail();
}
p.setStatus(Status.RUNNING);
return null;
});
String cloneNoteId = notebook.cloneNote(noteId, "clone note", anonymous);
notebook.processNote(noteId,
note -> {
Paragraph p = note.getParagraph(0);
notebook.processNote(cloneNoteId,
cloneNote -> {
Paragraph cp = cloneNote.getParagraph(0);
assertEquals(Status.READY, cp.getStatus());
// Keep same ParagraphId
assertEquals(cp.getId(), p.getId());
assertEquals(cp.getText(), p.getText());
assertEquals(cp.getReturn().message().get(0).getData(), p.getReturn().message().get(0).getData());
return null;
});
return null;
});
// Verify clone note with subject
AuthenticationInfo subject = new AuthenticationInfo("user1");
String cloneNote2Id = notebook.cloneNote(noteId, "clone note2", subject);
assertNotNull(authorizationService.getOwners(cloneNote2Id));
assertEquals(1, authorizationService.getOwners(cloneNote2Id).size());
Set<String> owners = new HashSet<>();
owners.add("user1");
assertEquals(owners, authorizationService.getOwners(cloneNote2Id));
notebook.removeNote(noteId, anonymous);
notebook.removeNote(cloneNoteId, anonymous);
notebook.removeNote(cloneNote2Id, anonymous);
} |
public Set<String> makeReady(final Map<String, InternalTopicConfig> topics) {
// we will do the validation / topic-creation in a loop, until we have confirmed all topics
// have existed with the expected number of partitions, or some create topic returns fatal errors.
log.debug("Starting to validate internal topics {} in partition assignor.", topics);
long currentWallClockMs = time.milliseconds();
final long deadlineMs = currentWallClockMs + retryTimeoutMs;
Set<String> topicsNotReady = new HashSet<>(topics.keySet());
final Set<String> newlyCreatedTopics = new HashSet<>();
while (!topicsNotReady.isEmpty()) {
final Set<String> tempUnknownTopics = new HashSet<>();
topicsNotReady = validateTopics(topicsNotReady, topics, tempUnknownTopics);
newlyCreatedTopics.addAll(topicsNotReady);
if (!topicsNotReady.isEmpty()) {
final Set<NewTopic> newTopics = new HashSet<>();
for (final String topicName : topicsNotReady) {
if (tempUnknownTopics.contains(topicName)) {
// for the tempUnknownTopics, don't create topic for them
// we'll check again later if remaining retries > 0
continue;
}
final InternalTopicConfig internalTopicConfig = Objects.requireNonNull(topics.get(topicName));
final Map<String, String> topicConfig = internalTopicConfig.properties(defaultTopicConfigs, windowChangeLogAdditionalRetention);
log.debug("Going to create topic {} with {} partitions and config {}.",
internalTopicConfig.name(),
internalTopicConfig.numberOfPartitions(),
topicConfig);
newTopics.add(
new NewTopic(
internalTopicConfig.name(),
internalTopicConfig.numberOfPartitions(),
Optional.of(replicationFactor))
.configs(topicConfig));
}
// it's possible that although some topics are not ready yet because they
// are temporarily not available, not that they do not exist; in this case
// the new topics to create may be empty and hence we can skip here
if (!newTopics.isEmpty()) {
final CreateTopicsResult createTopicsResult = adminClient.createTopics(newTopics);
for (final Map.Entry<String, KafkaFuture<Void>> createTopicResult : createTopicsResult.values().entrySet()) {
final String topicName = createTopicResult.getKey();
try {
createTopicResult.getValue().get();
topicsNotReady.remove(topicName);
} catch (final InterruptedException fatalException) {
// this should not happen; if it ever happens it indicate a bug
Thread.currentThread().interrupt();
log.error(INTERRUPTED_ERROR_MESSAGE, fatalException);
throw new IllegalStateException(INTERRUPTED_ERROR_MESSAGE, fatalException);
} catch (final ExecutionException executionException) {
final Throwable cause = executionException.getCause();
if (cause instanceof TopicExistsException) {
// This topic didn't exist earlier or its leader not known before; just retain it for next round of validation.
log.info(
"Could not create topic {}. Topic is probably marked for deletion (number of partitions is unknown).\n"
+
"Will retry to create this topic in {} ms (to let broker finish async delete operation first).\n"
+
"Error message was: {}", topicName, retryBackOffMs,
cause.toString());
} else {
log.error("Unexpected error during topic creation for {}.\n" +
"Error message was: {}", topicName, cause.toString());
if (cause instanceof UnsupportedVersionException) {
final String errorMessage = cause.getMessage();
if (errorMessage != null &&
errorMessage.startsWith("Creating topics with default partitions/replication factor are only supported in CreateTopicRequest version 4+")) {
throw new StreamsException(String.format(
"Could not create topic %s, because brokers don't support configuration replication.factor=-1."
+ " You can change the replication.factor config or upgrade your brokers to version 2.4 or newer to avoid this error.",
topicName)
);
}
} else if (cause instanceof TimeoutException) {
log.error("Creating topic {} timed out.\n" +
"Error message was: {}", topicName, cause.toString());
} else {
throw new StreamsException(
String.format("Could not create topic %s.", topicName),
cause
);
}
}
}
}
}
}
if (!topicsNotReady.isEmpty()) {
currentWallClockMs = time.milliseconds();
if (currentWallClockMs >= deadlineMs) {
final String timeoutError = String.format("Could not create topics within %d milliseconds. " +
"This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs);
log.error(timeoutError);
throw new TimeoutException(timeoutError);
}
log.info(
"Topics {} could not be made ready. Will retry in {} milliseconds. Remaining time in milliseconds: {}",
topicsNotReady,
retryBackOffMs,
deadlineMs - currentWallClockMs
);
Utils.sleep(retryBackOffMs);
}
}
log.debug("Completed validating internal topics and created {}", newlyCreatedTopics);
return newlyCreatedTopics;
} | @Test
public void shouldExhaustRetriesOnMarkedForDeletionTopic() {
mockAdminClient.addTopic(
false,
topic1,
Collections.singletonList(new TopicPartitionInfo(0, broker1, cluster, Collections.emptyList())),
null);
mockAdminClient.markTopicForDeletion(topic1);
final MockTime time = new MockTime(
(Integer) config.get(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG)) / 15
);
final InternalTopicManager internalTopicManager =
new InternalTopicManager(time, mockAdminClient, new StreamsConfig(config));
final InternalTopicConfig internalTopicConfig = new RepartitionTopicConfig(topic1, Collections.emptyMap());
internalTopicConfig.setNumberOfPartitions(1);
final TimeoutException exception = assertThrows(
TimeoutException.class,
() -> internalTopicManager.makeReady(Collections.singletonMap(topic1, internalTopicConfig))
);
assertNull(exception.getCause());
assertThat(
exception.getMessage(),
equalTo("Could not create topics within 50 milliseconds." +
" This can happen if the Kafka cluster is temporarily not available.")
);
} |
public static <T> Flattened<T> flattenedSchema() {
return new AutoValue_Select_Flattened.Builder<T>()
.setNameFn(CONCAT_FIELD_NAMES)
.setNameOverrides(Collections.emptyMap())
.build();
} | @Test
@Category(NeedsRunner.class)
public void testClashingNameWithRenameFlatten() {
List<Row> bottomRow =
IntStream.rangeClosed(0, 2)
.mapToObj(i -> Row.withSchema(SIMPLE_SCHEMA).addValues(i, Integer.toString(i)).build())
.collect(Collectors.toList());
List<Row> rows =
bottomRow.stream()
.map(r -> Row.withSchema(NESTED_SCHEMA).addValues(r, r).build())
.collect(Collectors.toList());
PCollection<Row> unnested =
pipeline
.apply(Create.of(rows).withRowSchema(NESTED_SCHEMA))
.apply(
Select.<Row>flattenedSchema()
.keepMostNestedFieldName()
.withFieldNameAs("nested2.field1", "n2field1")
.withFieldNameAs("nested2.field2", "n2field2"));
assertEquals(CLASHING_NAME_UNNESTED_SCHEMA, unnested.getSchema());
pipeline.run();
} |
public synchronized ListenableFuture<?> waitForMinimumWorkers()
{
if (currentWorkerCount >= workerMinCount) {
return immediateFuture(null);
}
SettableFuture<?> future = SettableFuture.create();
workerSizeFutures.add(future);
// if future does not finish in wait period, complete with an exception
ScheduledFuture<?> timeoutTask = executor.schedule(
() -> {
synchronized (this) {
future.setException(new PrestoException(
GENERIC_INSUFFICIENT_RESOURCES,
format("Insufficient active worker nodes. Waited %s for at least %s workers, but only %s workers are active", executionMaxWait, workerMinCount, currentWorkerCount)));
}
},
executionMaxWait.toMillis(),
MILLISECONDS);
// remove future if finished (e.g., canceled, timed out)
future.addListener(() -> {
timeoutTask.cancel(true);
removeWorkerFuture(future);
}, executor);
return future;
} | @Test(timeOut = 10_000)
public void testTimeoutWaitingForWorkers()
throws InterruptedException
{
waitForMinimumWorkers();
assertFalse(workersTimeout.get());
addWorker(nodeManager);
assertFalse(workersTimeout.get());
assertEquals(minWorkersLatch.getCount(), 1);
Thread.sleep(SECONDS.toMillis(5));
assertTrue(workersTimeout.get());
assertEquals(minWorkersLatch.getCount(), 0);
} |
public static String generateFileName(String string) {
string = StringUtils.stripAccents(string);
StringBuilder buf = new StringBuilder();
for (int i = 0; i < string.length(); i++) {
char c = string.charAt(i);
if (Character.isSpaceChar(c)
&& (buf.length() == 0 || Character.isSpaceChar(buf.charAt(buf.length() - 1)))) {
continue;
}
if (ArrayUtils.contains(validChars, c)) {
buf.append(c);
}
}
String filename = buf.toString().trim();
if (TextUtils.isEmpty(filename)) {
return randomString(8);
} else if (filename.length() >= MAX_FILENAME_LENGTH) {
return filename.substring(0, MAX_FILENAME_LENGTH - MD5_HEX_LENGTH - 1) + "_" + md5(filename);
} else {
return filename;
}
} | @Test
public void testFeedTitleContainsDash() {
String result = FileNameGenerator.generateFileName("Left - Right");
assertEquals("Left - Right", result);
} |
public EclipseProfile getUserProfile(String accessToken) {
checkApiUrl();
var headers = new HttpHeaders();
headers.setBearerAuth(accessToken);
headers.setAccept(Arrays.asList(MediaType.APPLICATION_JSON));
var requestUrl = UrlUtil.createApiUrl(eclipseApiUrl, "openvsx", "profile");
var request = new RequestEntity<>(headers, HttpMethod.GET, URI.create(requestUrl));
try {
var response = restTemplate.exchange(request, String.class);
return parseEclipseProfile(response);
} catch (RestClientException exc) {
logger.error("Get request failed with URL: " + requestUrl, exc);
throw new ErrorResultException("Request for retrieving user profile failed: " + exc.getMessage(),
HttpStatus.INTERNAL_SERVER_ERROR);
}
} | @Test
public void testGetUserProfile() throws Exception {
Mockito.when(restTemplate.exchange(any(RequestEntity.class), eq(String.class)))
.thenReturn(mockProfileResponse());
var profile = eclipse.getUserProfile("12345");
assertThat(profile).isNotNull();
assertThat(profile.name).isEqualTo("test");
assertThat(profile.githubHandle).isEqualTo("test");
assertThat(profile.publisherAgreements).isNotNull();
assertThat(profile.publisherAgreements.openVsx).isNotNull();
assertThat(profile.publisherAgreements.openVsx.version).isEqualTo("1");
} |
@Override
public Double getDoubleAndRemove(K name) {
return null;
} | @Test
public void testGetDoubleAndRemoveDefault() {
assertEquals(1, HEADERS.getDoubleAndRemove("name1", 1), 0);
} |
public void shutdown(ComponentGraph graph) {
shutdownConfigRetriever();
if (graph != null) {
// As we are shutting down, there is no need to uninstall bundles.
deconstructComponentsAndBundles(graph.generation(), List.of(), graph.allConstructedComponentsAndProviders());
destructor.shutdown();
}
} | @Test
void providers_are_invoked_only_when_needed() {
writeBootstrapConfigs("id1", FailOnGetProvider.class);
Container container = newContainer(dirConfigSource);
ComponentGraph oldGraph = getNewComponentGraph(container);
container.shutdown(oldGraph);
} |
public JType generate(JCodeModel codeModel, String className, String packageName, URL schemaUrl) {
JPackage jpackage = codeModel._package(packageName);
ObjectNode schemaNode = readSchema(schemaUrl);
return ruleFactory.getSchemaRule().apply(className, schemaNode, null, jpackage, new Schema(null, schemaNode, null));
} | @Test
public void generateCreatesSchemaFromSchemaAsStringInput() throws IOException {
String schemaContent = IOUtils.toString(this.getClass().getResourceAsStream("/schema/address.json"));
final SchemaRule mockSchemaRule = mock(SchemaRule.class);
final RuleFactory mockRuleFactory = mock(RuleFactory.class);
when(mockRuleFactory.getSchemaRule()).thenReturn(mockSchemaRule);
when(mockRuleFactory.getGenerationConfig()).thenReturn(new DefaultGenerationConfig());
new SchemaMapper(mockRuleFactory, new SchemaGenerator()).generate(new JCodeModel(), "Address", "com.example.package", schemaContent);
ArgumentCaptor<JPackage> capturePackage = ArgumentCaptor.forClass(JPackage.class);
ArgumentCaptor<JsonNode> captureNode = ArgumentCaptor.forClass(JsonNode.class);
verify(mockSchemaRule).apply(eq("Address"), captureNode.capture(), eq(null), capturePackage.capture(), Mockito.isA(Schema.class));
assertThat(capturePackage.getValue().name(), is("com.example.package"));
assertThat(captureNode.getValue(), is(notNullValue()));
} |
@VisibleForTesting
List<ScheduledFuture<?>> getScheduledFutures() {
return scheduledFutures;
} | @Test
void requestClientToClose() throws Exception {
doHandshakeComplete();
Mockito.reset(eventLoopSpy);
authenticateChannel();
verify(eventLoopSpy).schedule(scheduledCaptor.capture(), anyLong(), eq(TimeUnit.SECONDS));
Runnable requestClientToClose = scheduledCaptor.getValue();
int taskListSize = handler.getScheduledFutures().size();
doReturn(true).when(channel).isActive();
requestClientToClose.run();
assertEquals(taskListSize + 1, handler.getScheduledFutures().size());
Object capture = writeCaptor.getValue();
assertTrue(capture instanceof TextWebSocketFrame);
TextWebSocketFrame frame = (TextWebSocketFrame) capture;
assertEquals("_CLOSE_", frame.text());
} |
@Override
public Map<String, String> evaluate(FunctionArgs args, EvaluationContext context) {
final String value = valueParam.required(args, context);
if (Strings.isNullOrEmpty(value)) {
return Collections.emptyMap();
}
final CharMatcher kvPairsMatcher = splitParam.optional(args, context).orElse(CharMatcher.whitespace());
final CharMatcher kvDelimMatcher = valueSplitParam.optional(args, context).orElse(CharMatcher.anyOf("="));
Splitter outerSplitter = Splitter.on(DelimiterCharMatcher.withQuoteHandling(kvPairsMatcher))
.omitEmptyStrings()
.trimResults();
final Splitter entrySplitter = Splitter.on(kvDelimMatcher)
.omitEmptyStrings()
.limit(2)
.trimResults();
return new MapSplitter(outerSplitter,
entrySplitter,
ignoreEmptyValuesParam.optional(args, context).orElse(true),
trimCharactersParam.optional(args, context).orElse(CharMatcher.none()),
trimValueCharactersParam.optional(args, context).orElse(CharMatcher.none()),
allowDupeKeysParam.optional(args, context).orElse(true),
duplicateHandlingParam.optional(args, context).orElse(TAKE_FIRST))
.split(value);
} | @Test
void testConcatDemiliter() {
final Map<String, Expression> arguments = Map.of("value", valueExpression, "handle_dup_keys",
new StringExpression(new CommonToken(0), ","));
Map<String, String> result = classUnderTest.evaluate(new FunctionArgs(classUnderTest, arguments), evaluationContext);
Map<String, String> expectedResult = new HashMap<>();
expectedResult.put("test", "do,remi");
expectedResult.put("number", "12345");
assertThat(result).containsExactlyInAnyOrderEntriesOf(expectedResult);
} |
@Override
public TransformResultMetadata getResultMetadata() {
return BOOLEAN_SV_NO_DICTIONARY_METADATA;
} | @Test
public void testLogicalOperatorTransformFunction() {
ExpressionContext intEqualsExpr =
RequestContextUtils.getExpression(String.format("EQUALS(%s, %d)", INT_SV_COLUMN, _intSVValues[0]));
ExpressionContext longEqualsExpr =
RequestContextUtils.getExpression(String.format("EQUALS(%s, %d)", LONG_SV_COLUMN, _longSVValues[0]));
String functionName = getFunctionName();
ExpressionContext expression = ExpressionContext.forFunction(
new FunctionContext(FunctionContext.Type.TRANSFORM, functionName,
Arrays.asList(intEqualsExpr, longEqualsExpr)));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertEquals(transformFunction.getName(), functionName);
TransformResultMetadata resultMetadata = transformFunction.getResultMetadata();
assertEquals(resultMetadata.getDataType(), FieldSpec.DataType.BOOLEAN);
assertTrue(resultMetadata.isSingleValue());
assertFalse(resultMetadata.hasDictionary());
boolean[] expectedValues = new boolean[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = getExpectedValue(_intSVValues[i] == _intSVValues[0], _longSVValues[i] == _longSVValues[0]);
}
testTransformFunction(transformFunction, expectedValues);
} |
@Override
public Result<V, E> search(Graph<V, E> graph, V src, V dst,
EdgeWeigher<V, E> weigher, int maxPaths) {
checkArguments(graph, src, dst);
return internalSearch(graph, src, dst,
weigher != null ? weigher : new DefaultEdgeWeigher<>(),
maxPaths);
} | @Test(expected = NullPointerException.class)
public void nullGraphArgument() {
graphSearch().search(null, A, H, weigher, 1);
} |
@Nonnull
@SuppressWarnings("checkstyle:MagicNumber") // number of hours per day isn't magic :)
public static String formatJobDuration(long durationMs) {
if (durationMs == Long.MIN_VALUE) {
return "" + Long.MIN_VALUE;
}
String sign = "";
if (durationMs < 0) {
sign = "-";
durationMs = -durationMs;
}
long millis = durationMs % 1000;
durationMs /= 1000;
long seconds = durationMs % 60;
durationMs /= 60;
long minutes = durationMs % 60;
durationMs /= 60;
long hours = durationMs % 24;
durationMs /= 24;
String textUpToHours = format("%02d:%02d:%02d.%03d", hours, minutes, seconds, millis);
return sign + (durationMs > 0 ? durationMs + "d " : "") + textUpToHours;
} | @Test
public void test_formatJobDuration() {
assertEquals("20:19:02.855", formatJobDuration(73_142_855));
assertEquals("00:00:00.120", formatJobDuration(120));
assertEquals("00:00:05.120", formatJobDuration(5120));
assertEquals("13d 13:52:22.855", formatJobDuration(1173_142_855));
assertEquals("2d 00:05:42.855", formatJobDuration(173_142_855));
assertEquals("00:12:22.855", formatJobDuration(742_855));
assertEquals("106751991167d 07:12:55.807", formatJobDuration(Long.MAX_VALUE));
assertEquals("-9223372036854775808", formatJobDuration(Long.MIN_VALUE));
} |
@VisibleForTesting
ExportResult<MediaContainerResource> exportOneDrivePhotos(TokensAndUrlAuthData authData,
Optional<IdOnlyContainerResource> albumData, Optional<PaginationData> paginationData,
UUID jobId) throws IOException {
Optional<String> albumId = Optional.empty();
if (albumData.isPresent()) {
albumId = Optional.of(albumData.get().getId());
}
Optional<String> paginationUrl = getDrivePaginationToken(paginationData);
MicrosoftDriveItemsResponse driveItemsResponse;
if (paginationData.isPresent() || albumData.isPresent()) {
driveItemsResponse =
getOrCreateMediaInterface(authData).getDriveItems(albumId, paginationUrl);
} else {
driveItemsResponse = getOrCreateMediaInterface(authData).getDriveItemsFromSpecialFolder(
MicrosoftSpecialFolder.FolderType.photos);
}
PaginationData nextPageData = setNextPageToken(driveItemsResponse);
ContinuationData continuationData = new ContinuationData(nextPageData);
MediaContainerResource containerResource;
MicrosoftDriveItem[] driveItems = driveItemsResponse.getDriveItems();
List<MediaAlbum> albums = new ArrayList<>();
List<PhotoModel> photos = new ArrayList<>();
List<VideoModel> videos = new ArrayList<>();
if (driveItems != null && driveItems.length > 0) {
for (MicrosoftDriveItem driveItem : driveItems) {
MediaAlbum album = tryConvertDriveItemToMediaAlbum(driveItem, jobId);
if (album != null) {
albums.add(album);
continuationData.addContainerResource(new IdOnlyContainerResource(driveItem.id));
continue;
}
PhotoModel photo = tryConvertDriveItemToPhotoModel(albumId, driveItem, jobId);
if (photo != null) {
photos.add(photo);
continue;
}
VideoModel video = tryConvertDriveItemToVideoModel(albumId, driveItem, jobId);
if (video != null) {
videos.add(video);
continue;
}
}
}
ExportResult.ResultType result =
nextPageData == null ? ExportResult.ResultType.END : ExportResult.ResultType.CONTINUE;
containerResource = new MediaContainerResource(albums, photos, videos);
return new ExportResult<>(result, containerResource, continuationData);
} | @Test
public void exportMediaWithNextPage() throws IOException {
// Setup
when(driveItemsResponse.getNextPageLink()).thenReturn(null);
when(driveItemsResponse.getDriveItems()).thenReturn(new MicrosoftDriveItem[] {
setUpSinglePhoto(PHOTO_FILENAME, PHOTO_URI, PHOTO_ID),
setUpSingleVideo(VIDEO_FILENAME, VIDEO_URI, VIDEO_ID)
});
when(driveItemsResponse.getNextPageLink()).thenReturn(DRIVE_PAGE_URL);
IdOnlyContainerResource idOnlyContainerResource = new IdOnlyContainerResource(FOLDER_ID);
// Run
ExportResult<MediaContainerResource> result = microsoftMediaExporter.exportOneDrivePhotos(
null, Optional.of(idOnlyContainerResource), Optional.empty(), uuid);
// Verify method calls
verify(mediaInterface).getDriveItems(Optional.of(FOLDER_ID), Optional.empty());
verify(driveItemsResponse).getDriveItems();
// Verify pagination token is set
ContinuationData continuationData = result.getContinuationData();
StringPaginationToken paginationToken =
(StringPaginationToken) continuationData.getPaginationData();
assertThat(paginationToken.getToken()).isEqualTo(DRIVE_TOKEN_PREFIX + DRIVE_PAGE_URL);
// Verify no albums are exported
Collection<MediaAlbum> actualAlbums = result.getExportedData().getAlbums();
assertThat(actualAlbums).isEmpty();
// Verify one photo (in an album) should be exported
Collection<PhotoModel> actualPhotos = result.getExportedData().getPhotos();
assertThat(actualPhotos.size()).isEqualTo(1);
assertThat(actualPhotos.stream().map(PhotoModel::getFetchableUrl).collect(Collectors.toList()))
.containsExactly(PHOTO_URI);
assertThat(actualPhotos.stream().map(PhotoModel::getAlbumId).collect(Collectors.toList()))
.containsExactly(FOLDER_ID);
assertThat(actualPhotos.stream().map(PhotoModel::getTitle).collect(Collectors.toList()))
.containsExactly(PHOTO_FILENAME);
// Verify one video (in an album) should be exported
Collection<VideoModel> actualVideos = result.getExportedData().getVideos();
assertThat(actualVideos.size()).isEqualTo(1);
assertThat(actualVideos.stream().map(VideoModel::getFetchableUrl).collect(Collectors.toList()))
.containsExactly(VIDEO_URI);
assertThat(actualVideos.stream().map(VideoModel::getAlbumId).collect(Collectors.toList()))
.containsExactly(FOLDER_ID);
assertThat(actualVideos.stream().map(VideoModel::getName).collect(Collectors.toList()))
.containsExactly(VIDEO_FILENAME);
// Verify there are no containers ready for sub-processing
List<ContainerResource> actualResources = continuationData.getContainerResources();
assertThat(actualResources).isEmpty();
} |
@ApolloAuditLog(type = OpType.CREATE, name = "AppNamespace.create", description = "createDefaultAppNamespace")
@Transactional
public void createDefaultAppNamespace(String appId) {
if (!isAppNamespaceNameUnique(appId, ConfigConsts.NAMESPACE_APPLICATION)) {
throw new BadRequestException("App already has application namespace. AppId = %s", appId);
}
AppNamespace appNs = new AppNamespace();
appNs.setAppId(appId);
appNs.setName(ConfigConsts.NAMESPACE_APPLICATION);
appNs.setComment("default app namespace");
appNs.setFormat(ConfigFileFormat.Properties.getValue());
String userId = userInfoHolder.getUser().getUserId();
appNs.setDataChangeCreatedBy(userId);
appNs.setDataChangeLastModifiedBy(userId);
appNamespaceRepository.save(appNs);
} | @Test
@Sql(scripts = "/sql/cleanup.sql", executionPhase = Sql.ExecutionPhase.AFTER_TEST_METHOD)
public void testCreateDefaultAppNamespace() {
appNamespaceService.createDefaultAppNamespace(APP);
AppNamespace appNamespace = appNamespaceService.findByAppIdAndName(APP, ConfigConsts.NAMESPACE_APPLICATION);
Assert.assertNotNull(appNamespace);
Assert.assertEquals(ConfigFileFormat.Properties.getValue(), appNamespace.getFormat());
} |
Object getCellValue(Cell cell, Schema.FieldType type) {
ByteString cellValue = cell.getValue();
int valueSize = cellValue.size();
switch (type.getTypeName()) {
case BOOLEAN:
checkArgument(valueSize == 1, message("Boolean", 1));
return cellValue.toByteArray()[0] != 0;
case BYTE:
checkArgument(valueSize == 1, message("Byte", 1));
return cellValue.toByteArray()[0];
case INT16:
checkArgument(valueSize == 2, message("Int16", 2));
return Shorts.fromByteArray(cellValue.toByteArray());
case INT32:
checkArgument(valueSize == 4, message("Int32", 4));
return Ints.fromByteArray(cellValue.toByteArray());
case INT64:
checkArgument(valueSize == 8, message("Int64", 8));
return Longs.fromByteArray(cellValue.toByteArray());
case FLOAT:
checkArgument(valueSize == 4, message("Float", 4));
return Float.intBitsToFloat(Ints.fromByteArray(cellValue.toByteArray()));
case DOUBLE:
checkArgument(valueSize == 8, message("Double", 8));
return Double.longBitsToDouble(Longs.fromByteArray(cellValue.toByteArray()));
case DATETIME:
return DateTime.parse(cellValue.toStringUtf8());
case STRING:
return cellValue.toStringUtf8();
case BYTES:
return cellValue.toByteArray();
case LOGICAL_TYPE:
String identifier = checkArgumentNotNull(type.getLogicalType()).getIdentifier();
throw new IllegalStateException("Unsupported logical type: " + identifier);
default:
throw new IllegalArgumentException(
String.format("Unsupported cell value type '%s'.", type.getTypeName()));
}
} | @Test
public void shouldParseBytesType() {
byte[] value = new byte[] {1, 2, 3, 4, 5};
assertEquals(
ByteString.copyFrom(value),
ByteString.copyFrom((byte[]) PARSER.getCellValue(cell(value), BYTES)));
} |
@Override
public Path copy(final Path source, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException {
if(proxy.isSupported(source, target)) {
return proxy.copy(source, target, status, callback, listener);
}
// Copy between encrypted and unencrypted data room
if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(target))) {
// File key must be set for new upload
status.setFilekey(SDSTripleCryptEncryptorFeature.generateFileKey());
}
final Path result = copy.copy(source, target, status, callback, listener);
nodeid.cache(target, null);
return result.withAttributes(new SDSAttributesFinderFeature(session, nodeid).find(result));
} | @Test
public void testCopyFileToDifferentDataRoom() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room1 = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path room2 = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path source = new SDSTouchFeature(session, nodeid).touch(new Path(room1, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final Path target = new SDSTouchFeature(session, nodeid).touch(new Path(room2, source.getName(), EnumSet.of(Path.Type.file)), new TransferStatus());
final SDSDelegatingCopyFeature feature = new SDSDelegatingCopyFeature(session, nodeid, new SDSCopyFeature(session, nodeid));
assertTrue(feature.isSupported(source, target));
assertNotNull(feature.copy(source, target, new TransferStatus(), new DisabledConnectionCallback(), new DisabledStreamListener()).attributes().getVersionId());
assertTrue(new SDSFindFeature(session, nodeid).find(source));
assertTrue(new SDSFindFeature(session, nodeid).find(target));
new SDSDeleteFeature(session, nodeid).delete(Arrays.asList(room1, room2), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
public static boolean hasAnnotation(Class clazz, String annotationClass) {
for (Annotation annot : clazz.getDeclaredAnnotations())
if (annot.annotationType().getName().equals(annotationClass))
return true;
return false;
} | @Test
public void testHasAnnotationJavassist() throws Exception {
ClassPool ctPool = ClassPool.getDefault();
CtClass ctClass = ctPool.getCtClass(AnonymousClassPatchPlugin.class.getName());
assertTrue(AnnotationHelper.hasAnnotation(ctClass, "org.hotswap.agent.annotation.Plugin"));
assertFalse(AnnotationHelper.hasAnnotation(ctClass, "xxxx"));
} |
@Override
public String rendering() {
final StringBuilder ladderSB = new StringBuilder();
int deep = 0;
for (String item : items) {
// no separator is required for the first item
if (deep == 0) {
ladderSB.append(item).append(System.lineSeparator());
}
// need separator for others
else {
ladderSB.append(repeat(STEP_CHAR, deep * INDENT_STEP))
.append(LADDER_CHAR)
.append(item)
.append(System.lineSeparator());
}
deep++;
}
return ladderSB.toString();
} | @Test
void testRendering() throws Exception {
TLadder ladder = new TLadder();
ladder.addItem("1");
ladder.addItem("2");
ladder.addItem("3");
ladder.addItem("4");
String result = ladder.rendering();
String expected = "1" + System.lineSeparator() + " `-2"
+ System.lineSeparator() + " `-3"
+ System.lineSeparator() + " `-4"
+ System.lineSeparator();
assertThat(result, equalTo(expected));
System.out.println(result);
} |
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
if(directory.isRoot()) {
final AttributedList<Path> list = new AttributedList<>();
list.add(MYFILES_NAME);
list.add(SHARED_NAME);
listener.chunk(directory, list);
return list;
}
else if(new SimplePathPredicate(SHARED_NAME).test(directory)) {
return new SharedWithMeListService(session, fileid).list(directory, listener);
}
else {
return new GraphItemListService(session, fileid).list(directory, listener);
}
} | @Test
public void testListShared() throws Exception {
final AttributedList<Path> list = new OneDriveListService(session, fileid).list(OneDriveListService.SHARED_NAME, new DisabledListProgressListener());
assertFalse(list.isEmpty());
for(Path f : list) {
assertEquals(OneDriveListService.SHARED_NAME, f.getParent());
}
} |
@Override
public String version() {
return AppInfoParser.getVersion();
} | @Test
public void testTimestampRouterVersionRetrievedFromAppInfoParser() {
assertEquals(AppInfoParser.getVersion(), xform.version());
} |
public static <V> Values<V> create() {
return new Values<>();
} | @Test
@Category(NeedsRunner.class)
public void testValuesEmpty() {
PCollection<KV<String, Integer>> input =
p.apply(
Create.of(Arrays.asList(EMPTY_TABLE))
.withCoder(KvCoder.of(StringUtf8Coder.of(), BigEndianIntegerCoder.of())));
PCollection<Integer> output = input.apply(Values.create());
PAssert.that(output).empty();
p.run();
} |
public static AliyunClientFactory from(Map<String, String> properties) {
String factoryImpl =
PropertyUtil.propertyAsString(
properties,
AliyunProperties.CLIENT_FACTORY,
DefaultAliyunClientFactory.class.getName());
return loadClientFactory(factoryImpl, properties);
} | @Test
public void testLoadCustom() {
Map<String, String> properties = Maps.newHashMap();
properties.put(AliyunProperties.CLIENT_FACTORY, CustomFactory.class.getName());
assertThat(AliyunClientFactories.from(properties))
.as("Should load custom class")
.isInstanceOf(CustomFactory.class);
} |
@Override
public void ignoreAutoTrackActivity(Class<?> activity) {
} | @Test
public void ignoreAutoTrackActivity() {
mSensorsAPI.ignoreAutoTrackActivity(EmptyActivity.class);
Assert.assertTrue(mSensorsAPI.isActivityAutoTrackAppClickIgnored(EmptyActivity.class));
} |
public void matches(@Nullable String regex) {
checkNotNull(regex);
if (actual == null) {
failWithActual("expected a string that matches", regex);
} else if (!actual.matches(regex)) {
if (regex.equals(actual)) {
failWithoutActual(
fact("expected to match", regex),
fact("but was", actual),
simpleFact("Looks like you want to use .isEqualTo() for an exact equality assertion."));
} else if (Platform.containsMatch(actual, regex)) {
failWithoutActual(
fact("expected to match", regex),
fact("but was", actual),
simpleFact("Did you mean to call containsMatch() instead of match()?"));
} else {
failWithActual("expected to match", regex);
}
}
} | @Test
public void stringMatchesStringWithFail() {
expectFailureWhenTestingThat("abcaqadev").matches(".*aaa.*");
assertFailureValue("expected to match", ".*aaa.*");
} |
@Override
public PageResult<SmsTemplateDO> getSmsTemplatePage(SmsTemplatePageReqVO pageReqVO) {
return smsTemplateMapper.selectPage(pageReqVO);
} | @Test
public void testGetSmsTemplatePage() {
// mock 数据
SmsTemplateDO dbSmsTemplate = randomPojo(SmsTemplateDO.class, o -> { // 等会查询到
o.setType(SmsTemplateTypeEnum.PROMOTION.getType());
o.setStatus(CommonStatusEnum.ENABLE.getStatus());
o.setCode("tudou");
o.setContent("芋道源码");
o.setApiTemplateId("yunai");
o.setChannelId(1L);
o.setCreateTime(buildTime(2021, 11, 11));
});
smsTemplateMapper.insert(dbSmsTemplate);
// 测试 type 不匹配
smsTemplateMapper.insert(ObjectUtils.cloneIgnoreId(dbSmsTemplate, o -> o.setType(SmsTemplateTypeEnum.VERIFICATION_CODE.getType())));
// 测试 status 不匹配
smsTemplateMapper.insert(ObjectUtils.cloneIgnoreId(dbSmsTemplate, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus())));
// 测试 code 不匹配
smsTemplateMapper.insert(ObjectUtils.cloneIgnoreId(dbSmsTemplate, o -> o.setCode("yuanma")));
// 测试 content 不匹配
smsTemplateMapper.insert(ObjectUtils.cloneIgnoreId(dbSmsTemplate, o -> o.setContent("源码")));
// 测试 apiTemplateId 不匹配
smsTemplateMapper.insert(ObjectUtils.cloneIgnoreId(dbSmsTemplate, o -> o.setApiTemplateId("nai")));
// 测试 channelId 不匹配
smsTemplateMapper.insert(ObjectUtils.cloneIgnoreId(dbSmsTemplate, o -> o.setChannelId(2L)));
// 测试 createTime 不匹配
smsTemplateMapper.insert(ObjectUtils.cloneIgnoreId(dbSmsTemplate, o -> o.setCreateTime(buildTime(2021, 12, 12))));
// 准备参数
SmsTemplatePageReqVO reqVO = new SmsTemplatePageReqVO();
reqVO.setType(SmsTemplateTypeEnum.PROMOTION.getType());
reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus());
reqVO.setCode("tu");
reqVO.setContent("芋道");
reqVO.setApiTemplateId("yu");
reqVO.setChannelId(1L);
reqVO.setCreateTime(buildBetweenTime(2021, 11, 1, 2021, 12, 1));
// 调用
PageResult<SmsTemplateDO> pageResult = smsTemplateService.getSmsTemplatePage(reqVO);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbSmsTemplate, pageResult.getList().get(0));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.