focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static UForAll create(List<UTypeVar> typeVars, UType quantifiedType) { return new AutoValue_UForAll(ImmutableList.copyOf(typeVars), quantifiedType); }
@Test public void serialization() { UType nullType = UPrimitiveType.create(TypeKind.NULL); UType objectType = UClassType.create("java.lang.Object", ImmutableList.<UType>of()); UTypeVar eType = UTypeVar.create("E", nullType, objectType); UType listOfEType = UClassType.create("java.util.List", ImmutableList.<UType>of(eType)); SerializableTester.reserializeAndAssert(UForAll.create(ImmutableList.of(eType), listOfEType)); }
public static void unzip(Path archive, Path destination) throws IOException { unzip(archive, destination, false); }
@Test public void testUnzip_modificationTimePreserved() throws URISyntaxException, IOException { Path archive = Paths.get(Resources.getResource("plugins-common/test-archives/test.zip").toURI()); Path destination = tempFolder.getRoot().toPath(); ZipUtil.unzip(archive, destination); assertThat(Files.getLastModifiedTime(destination.resolve("file1.txt"))) .isEqualTo(FileTime.from(Instant.parse("2018-08-30T14:53:05Z"))); assertThat(Files.getLastModifiedTime(destination.resolve("my-zip/file2.txt"))) .isEqualTo(FileTime.from(Instant.parse("2018-08-30T14:53:44Z"))); assertThat(Files.getLastModifiedTime(destination.resolve("my-zip"))) .isEqualTo(FileTime.from(Instant.parse("2018-08-30T15:15:48Z"))); assertThat(Files.getLastModifiedTime(destination.resolve("my-zip/some"))) .isEqualTo(FileTime.from(Instant.parse("2018-08-30T14:53:38Z"))); assertThat(Files.getLastModifiedTime(destination.resolve("my-zip/some/sub"))) .isEqualTo(FileTime.from(Instant.parse("2018-08-30T14:53:38Z"))); assertThat(Files.getLastModifiedTime(destination.resolve("my-zip/some/sub/folder"))) .isEqualTo(FileTime.from(Instant.parse("2018-08-30T15:16:11Z"))); assertThat(Files.getLastModifiedTime(destination.resolve("my-zip/some/sub/folder/file3.txt"))) .isEqualTo(FileTime.from(Instant.parse("2018-08-30T15:16:12Z"))); }
public static String stringifyException(final Throwable e) { if (e == null) { return STRINGIFIED_NULL_EXCEPTION; } try { StringWriter stm = new StringWriter(); PrintWriter wrt = new PrintWriter(stm); e.printStackTrace(wrt); wrt.close(); return stm.toString(); } catch (Throwable t) { return e.getClass().getName() + " (error while printing stack trace)"; } }
@Test void testStringifyNullException() { assertThat(ExceptionUtils.STRINGIFIED_NULL_EXCEPTION) .isEqualTo(ExceptionUtils.stringifyException(null)); }
public static void initializeBootstrapProperties( Properties properties, Optional<String> bootstrapServer, Optional<String> bootstrapControllers ) { if (bootstrapServer.isPresent()) { if (bootstrapControllers.isPresent()) { throw new InitializeBootstrapException("You cannot specify both " + "--bootstrap-controller and --bootstrap-server."); } properties.setProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer.get()); properties.remove(AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG); } else if (bootstrapControllers.isPresent()) { properties.remove(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG); properties.setProperty(AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG, bootstrapControllers.get()); } else { throw new InitializeBootstrapException("You must specify either --bootstrap-controller " + "or --bootstrap-server."); } }
@Test public void testInitializeBootstrapPropertiesWithControllerBootstrap() { Properties props = createTestProps(); CommandLineUtils.initializeBootstrapProperties(props, Optional.empty(), Optional.of("127.0.0.2:9094")); assertNull(props.getProperty("bootstrap.servers")); assertEquals("127.0.0.2:9094", props.getProperty("bootstrap.controllers")); }
public static final void saveAttributesMap( DataNode dataNode, AttributesInterface attributesInterface ) throws KettleException { saveAttributesMap( dataNode, attributesInterface, NODE_ATTRIBUTE_GROUPS ); }
@Test public void testSaveAttributesMap_DefaultTag() throws Exception { try( MockedStatic<AttributesMapUtil> mockedAttributesMapUtil = mockStatic( AttributesMapUtil.class) ) { mockedAttributesMapUtil.when( () -> AttributesMapUtil.saveAttributesMap( any( DataNode.class ), any( AttributesInterface.class ) ) ).thenCallRealMethod(); mockedAttributesMapUtil.when( () -> AttributesMapUtil.saveAttributesMap( any( DataNode.class ), any( AttributesInterface.class ), anyString() ) ).thenCallRealMethod(); JobEntryCopy jobEntryCopy = new JobEntryCopy(); jobEntryCopy.setAttributesMap( new HashMap<>() ); jobEntryCopy.setAttributes( A_GROUP, new HashMap<>() ); jobEntryCopy.setAttribute( A_GROUP, A_KEY, A_VALUE ); DataNode dataNode = new DataNode( CNST_DUMMY ); AttributesMapUtil.saveAttributesMap( dataNode, jobEntryCopy ); assertNotNull( dataNode.getNode( AttributesMapUtil.NODE_ATTRIBUTE_GROUPS ) ); assertNotNull( dataNode.getNode( AttributesMapUtil.NODE_ATTRIBUTE_GROUPS ).getNode( A_GROUP ) ); assertNotNull( dataNode.getNode( AttributesMapUtil.NODE_ATTRIBUTE_GROUPS ).getNode( A_GROUP ).getProperty( A_KEY ) ); assertEquals( A_VALUE, dataNode.getNode( AttributesMapUtil.NODE_ATTRIBUTE_GROUPS ).getNode( A_GROUP ).getProperty( A_KEY ) .getString() ); } }
public static ProxyBackendHandler newInstance(final DatabaseType databaseType, final String sql, final SQLStatement sqlStatement, final ConnectionSession connectionSession, final HintValueContext hintValueContext) throws SQLException { if (sqlStatement instanceof EmptyStatement) { return new SkipBackendHandler(sqlStatement); } SQLStatementContext sqlStatementContext = sqlStatement instanceof DistSQLStatement ? new DistSQLStatementContext((DistSQLStatement) sqlStatement) : new SQLBindEngine(ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData(), connectionSession.getCurrentDatabaseName(), hintValueContext).bind(sqlStatement, Collections.emptyList()); QueryContext queryContext = new QueryContext(sqlStatementContext, sql, Collections.emptyList(), hintValueContext, connectionSession.getConnectionContext(), ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData()); connectionSession.setQueryContext(queryContext); return newInstance(databaseType, queryContext, connectionSession, false); }
@Disabled("FIXME") @Test void assertNewInstanceWithQuery() throws SQLException { String sql = "SELECT * FROM t_order limit 1"; ProxyContext proxyContext = ProxyContext.getInstance(); when(proxyContext.getAllDatabaseNames()).thenReturn(new HashSet<>(Collections.singletonList("db"))); when(proxyContext.getContextManager().getDatabase("db").containsDataSource()).thenReturn(true); SQLStatement sqlStatement = ProxySQLComQueryParser.parse(sql, databaseType, connectionSession); ProxyBackendHandler actual = ProxyBackendHandlerFactory.newInstance(databaseType, sql, sqlStatement, connectionSession, new HintValueContext()); assertThat(actual, instanceOf(DatabaseConnector.class)); sql = "SELECT * FROM information_schema.schemata LIMIT 1"; sqlStatement = ProxySQLComQueryParser.parse(sql, databaseType, connectionSession); actual = ProxyBackendHandlerFactory.newInstance(databaseType, sql, sqlStatement, connectionSession, new HintValueContext()); assertThat(actual, instanceOf(DatabaseAdminQueryBackendHandler.class)); }
@Override public int getUnsignedMedium(int index) { checkIndex(index, 3); return _getUnsignedMedium(index); }
@Test public void testGetUnsignedMediumAfterRelease() { assertThrows(IllegalReferenceCountException.class, new Executable() { @Override public void execute() { releasedBuffer().getUnsignedMedium(0); } }); }
public static double[] toDoubleArray(String name, Object value) { try { if (value instanceof BigDecimal[]) { return Arrays.stream((BigDecimal[]) value).mapToDouble(BigDecimal::doubleValue).toArray(); } else if (value instanceof double[]) { return (double[]) value; } else if (value instanceof List) { return ((List<?>) value) .stream().mapToDouble(d -> new BigDecimal(String.valueOf(d)).doubleValue()).toArray(); } else { throw new MaestroInternalError( "Param [%s] has an invalid evaluated result [%s]", name, toTruncateString(value)); } } catch (NumberFormatException nfe) { throw new MaestroInternalError( nfe, "Invalid number format for evaluated result: %s for param [%s]", toTruncateString(value), name); } }
@Test public void testInvalidToDoubleArray() { AssertHelper.assertThrows( "Invalid number format", MaestroInternalError.class, "Invalid number format for evaluated result: [true, 5.6]", () -> ParamHelper.toDoubleArray("foo", Arrays.asList(true, 5.6))); AssertHelper.assertThrows( "Invalid number format", MaestroInternalError.class, "Invalid number format for evaluated result: [3.4abc, 5.6]", () -> ParamHelper.toDoubleArray("foo", Arrays.asList("3.4abc", 5.6))); AssertHelper.assertThrows( "Invalid number format", MaestroInternalError.class, "Invalid number format for evaluated result: [null, 5.6]", () -> ParamHelper.toDoubleArray("foo", Arrays.asList(null, 5.6))); AssertHelper.assertThrows( "Invalid number format", MaestroInternalError.class, "Param [foo] has an invalid evaluated result [null]", () -> ParamHelper.toDoubleArray("foo", null)); }
@SuppressWarnings("unchecked") @Override public <T> Attribute<T> attr(AttributeKey<T> key) { ObjectUtil.checkNotNull(key, "key"); DefaultAttribute newAttribute = null; for (;;) { final DefaultAttribute[] attributes = this.attributes; final int index = searchAttributeByKey(attributes, key); final DefaultAttribute[] newAttributes; if (index >= 0) { final DefaultAttribute attribute = attributes[index]; assert attribute.key() == key; if (!attribute.isRemoved()) { return attribute; } // let's try replace the removed attribute with a new one if (newAttribute == null) { newAttribute = new DefaultAttribute<T>(this, key); } final int count = attributes.length; newAttributes = Arrays.copyOf(attributes, count); newAttributes[index] = newAttribute; } else { if (newAttribute == null) { newAttribute = new DefaultAttribute<T>(this, key); } final int count = attributes.length; newAttributes = new DefaultAttribute[count + 1]; orderedCopyOnInsert(attributes, count, newAttributes, newAttribute); } if (ATTRIBUTES_UPDATER.compareAndSet(this, attributes, newAttributes)) { return newAttribute; } } }
@Test public void testGetSetInt() { AttributeKey<Integer> key = AttributeKey.valueOf("Nada"); Attribute<Integer> one = map.attr(key); assertSame(one, map.attr(key)); one.setIfAbsent(3653); assertEquals(Integer.valueOf(3653), one.get()); one.setIfAbsent(1); assertNotSame(1, one.get()); one.remove(); assertNull(one.get()); }
public static <R> R callConstructor( Class<? extends R> clazz, ClassParameter<?>... classParameters) { perfStatsCollector.incrementCount("ReflectionHelpers.callConstructor-" + clazz.getName()); try { final Class<?>[] classes = ClassParameter.getClasses(classParameters); final Object[] values = ClassParameter.getValues(classParameters); Constructor<? extends R> constructor = clazz.getDeclaredConstructor(classes); constructor.setAccessible(true); return constructor.newInstance(values); } catch (InstantiationException e) { throw new RuntimeException("error instantiating " + clazz.getName(), e); } catch (InvocationTargetException e) { if (e.getTargetException() instanceof RuntimeException) { throw (RuntimeException) e.getTargetException(); } if (e.getTargetException() instanceof Error) { throw (Error) e.getTargetException(); } throw new RuntimeException(e.getTargetException()); } catch (Exception e) { throw new RuntimeException(e); } }
@Test public void callConstructorReflectively_rethrowsUncheckedException() { try { ReflectionHelpers.callConstructor(ThrowsUncheckedException.class); fail("Expected exception not thrown"); } catch (TestRuntimeException e) { } catch (RuntimeException e) { throw new RuntimeException("Incorrect exception thrown", e); } }
public static Pair<DataSchema, int[]> getResultTableDataSchemaAndColumnIndices(QueryContext queryContext, DataSchema dataSchema) { List<ExpressionContext> selectExpressions = queryContext.getSelectExpressions(); int numSelectExpressions = selectExpressions.size(); ColumnDataType[] columnDataTypesInDataSchema = dataSchema.getColumnDataTypes(); int numColumnsInDataSchema = columnDataTypesInDataSchema.length; // No order-by expression // NOTE: Order-by expressions are ignored for queries with LIMIT 0. List<OrderByExpressionContext> orderByExpressions = queryContext.getOrderByExpressions(); if (orderByExpressions == null || queryContext.getLimit() == 0) { // For 'SELECT *', use the server response data schema as the final results data schema. if ((numSelectExpressions == 1 && selectExpressions.get(0).equals(IDENTIFIER_STAR))) { int[] columnIndices = new int[numColumnsInDataSchema]; for (int i = 0; i < numColumnsInDataSchema; i++) { columnIndices[i] = i; } return Pair.of(dataSchema, columnIndices); } // For select without duplicate columns, the order of the final selection columns is the same as the order of the // columns in the data schema. if (numSelectExpressions == numColumnsInDataSchema) { String[] columnNames = new String[numSelectExpressions]; int[] columnIndices = new int[numSelectExpressions]; for (int i = 0; i < numSelectExpressions; i++) { columnNames[i] = selectExpressions.get(i).toString(); columnIndices[i] = i; } return Pair.of(new DataSchema(columnNames, columnDataTypesInDataSchema), columnIndices); } // For select with duplicate columns, construct a map from expression to index with the same order as the data // schema, then look up the selection expressions. Object2IntOpenHashMap<ExpressionContext> expressionIndexMap = new Object2IntOpenHashMap<>(numColumnsInDataSchema); for (ExpressionContext selectExpression : selectExpressions) { expressionIndexMap.putIfAbsent(selectExpression, expressionIndexMap.size()); } Preconditions.checkState(expressionIndexMap.size() == numColumnsInDataSchema, "BUG: Expect same number of deduped columns in SELECT clause and in data schema, got %s before dedup and %s" + " after dedup in SELECT clause, %s in data schema", numSelectExpressions, expressionIndexMap.size(), numColumnsInDataSchema); String[] columnNames = new String[numSelectExpressions]; ColumnDataType[] columnDataTypes = new ColumnDataType[numSelectExpressions]; int[] columnIndices = new int[numSelectExpressions]; for (int i = 0; i < numSelectExpressions; i++) { ExpressionContext selectExpression = selectExpressions.get(i); int columnIndex = expressionIndexMap.getInt(selectExpression); columnNames[i] = selectExpression.toString(); columnDataTypes[i] = columnDataTypesInDataSchema[columnIndex]; columnIndices[i] = columnIndex; } return Pair.of(new DataSchema(columnNames, columnDataTypes), columnIndices); } // For 'SELECT *' with order-by, exclude transform functions from the returned columns and sort. if (numSelectExpressions == 1 && selectExpressions.get(0).equals(IDENTIFIER_STAR)) { String[] columnNamesInDataSchema = dataSchema.getColumnNames(); List<Integer> columnIndexList = new ArrayList<>(columnNamesInDataSchema.length); for (int i = 0; i < columnNamesInDataSchema.length; i++) { if (columnNamesInDataSchema[i].indexOf('(') == -1) { columnIndexList.add(i); } } columnIndexList.sort(Comparator.comparing(o -> columnNamesInDataSchema[o])); int numColumns = columnIndexList.size(); String[] columnNames = new String[numColumns]; ColumnDataType[] columnDataTypes = new ColumnDataType[numColumns]; int[] columnIndices = new int[numColumns]; for (int i = 0; i < numColumns; i++) { int columnIndex = columnIndexList.get(i); columnNames[i] = columnNamesInDataSchema[columnIndex]; columnDataTypes[i] = columnDataTypesInDataSchema[columnIndex]; columnIndices[i] = columnIndex; } return Pair.of(new DataSchema(columnNames, columnDataTypes), columnIndices); } // For other order-by queries, construct a map from expression to index with the same order as the data schema, // then look up the selection expressions. Object2IntOpenHashMap<ExpressionContext> expressionIndexMap = new Object2IntOpenHashMap<>(numColumnsInDataSchema); // NOTE: Order-by expressions are already deduped in QueryContext. for (OrderByExpressionContext orderByExpression : orderByExpressions) { expressionIndexMap.put(orderByExpression.getExpression(), expressionIndexMap.size()); } for (ExpressionContext selectExpression : selectExpressions) { expressionIndexMap.putIfAbsent(selectExpression, expressionIndexMap.size()); } String[] columnNames = new String[numSelectExpressions]; ColumnDataType[] columnDataTypes = new ColumnDataType[numSelectExpressions]; int[] columnIndices = new int[numSelectExpressions]; if (expressionIndexMap.size() == numColumnsInDataSchema) { for (int i = 0; i < numSelectExpressions; i++) { ExpressionContext selectExpression = selectExpressions.get(i); int columnIndex = expressionIndexMap.getInt(selectExpression); columnNames[i] = selectExpression.toString(); columnDataTypes[i] = columnDataTypesInDataSchema[columnIndex]; columnIndices[i] = columnIndex; } } else { // When all segments are pruned on the server side, the data schema will only contain the columns in the SELECT // clause, and data type for all columns are set to STRING. See ResultBlocksUtils for details. for (int i = 0; i < numSelectExpressions; i++) { columnNames[i] = selectExpressions.get(i).toString(); columnDataTypes[i] = ColumnDataType.STRING; columnIndices[i] = i; } } return Pair.of(new DataSchema(columnNames, columnDataTypes), columnIndices); }
@Test public void testGetResultTableColumnIndices() { // Select * without order-by QueryContext queryContext = QueryContextConverterUtils.getQueryContext("SELECT * FROM testTable"); DataSchema dataSchema = new DataSchema(new String[]{"col1", "col2", "col3"}, new ColumnDataType[]{ ColumnDataType.INT, ColumnDataType.LONG, ColumnDataType.DOUBLE }); Pair<DataSchema, int[]> pair = SelectionOperatorUtils.getResultTableDataSchemaAndColumnIndices(queryContext, dataSchema); assertEquals(pair.getLeft(), new DataSchema(new String[]{"col1", "col2", "col3"}, new ColumnDataType[]{ ColumnDataType.INT, ColumnDataType.LONG, ColumnDataType.DOUBLE })); assertEquals(pair.getRight(), new int[]{0, 1, 2}); // Select * without order-by, all the segments are pruned on the server side dataSchema = new DataSchema(new String[]{"*"}, new ColumnDataType[]{ColumnDataType.STRING}); pair = SelectionOperatorUtils.getResultTableDataSchemaAndColumnIndices(queryContext, dataSchema); assertEquals(pair.getLeft(), new DataSchema(new String[]{"*"}, new ColumnDataType[]{ColumnDataType.STRING})); // Select * with order-by but LIMIT 0 queryContext = QueryContextConverterUtils.getQueryContext("SELECT * FROM testTable ORDER BY col1 LIMIT 0"); dataSchema = new DataSchema(new String[]{"col1", "col2", "col3"}, new ColumnDataType[]{ ColumnDataType.INT, ColumnDataType.LONG, ColumnDataType.DOUBLE }); pair = SelectionOperatorUtils.getResultTableDataSchemaAndColumnIndices(queryContext, dataSchema); assertEquals(pair.getLeft(), new DataSchema(new String[]{"col1", "col2", "col3"}, new ColumnDataType[]{ ColumnDataType.INT, ColumnDataType.LONG, ColumnDataType.DOUBLE })); assertEquals(pair.getRight(), new int[]{0, 1, 2}); // Select * with order-by but LIMIT 0, all the segments are pruned on the server side dataSchema = new DataSchema(new String[]{"*"}, new ColumnDataType[]{ColumnDataType.STRING}); pair = SelectionOperatorUtils.getResultTableDataSchemaAndColumnIndices(queryContext, dataSchema); assertEquals(pair.getLeft(), new DataSchema(new String[]{"*"}, new ColumnDataType[]{ColumnDataType.STRING})); // Select columns without order-by queryContext = QueryContextConverterUtils.getQueryContext("SELECT col1 + 1, col2 + 2 FROM testTable"); // Intentionally make data schema not matching the string representation of the expression dataSchema = new DataSchema(new String[]{"add(col1+1)", "add(col2+2)"}, new ColumnDataType[]{ ColumnDataType.DOUBLE, ColumnDataType.DOUBLE }); pair = SelectionOperatorUtils.getResultTableDataSchemaAndColumnIndices(queryContext, dataSchema); assertEquals(pair.getLeft(), new DataSchema(new String[]{"plus(col1,'1')", "plus(col2,'2')"}, new ColumnDataType[]{ ColumnDataType.DOUBLE, ColumnDataType.DOUBLE })); assertEquals(pair.getRight(), new int[]{0, 1}); // Select columns without order-by, all the segments are pruned on the server side // Intentionally make data schema not matching the string representation of the expression dataSchema = new DataSchema(new String[]{"add(col1+1)", "add(col2+2)"}, new ColumnDataType[]{ ColumnDataType.STRING, ColumnDataType.STRING }); pair = SelectionOperatorUtils.getResultTableDataSchemaAndColumnIndices(queryContext, dataSchema); assertEquals(pair.getLeft(), new DataSchema(new String[]{"plus(col1,'1')", "plus(col2,'2')"}, new ColumnDataType[]{ ColumnDataType.STRING, ColumnDataType.STRING })); // Select duplicate columns without order-by queryContext = QueryContextConverterUtils.getQueryContext("SELECT col1 + 1, col2 + 2, col1 + 1 FROM testTable"); // Intentionally make data schema not matching the string representation of the expression dataSchema = new DataSchema(new String[]{"add(col1+1)", "add(col2+2)"}, new ColumnDataType[]{ ColumnDataType.DOUBLE, ColumnDataType.DOUBLE }); pair = SelectionOperatorUtils.getResultTableDataSchemaAndColumnIndices(queryContext, dataSchema); assertEquals(pair.getLeft(), new DataSchema(new String[]{"plus(col1,'1')", "plus(col2,'2')", "plus(col1,'1')"}, new ColumnDataType[]{ ColumnDataType.DOUBLE, ColumnDataType.DOUBLE, ColumnDataType.DOUBLE })); assertEquals(pair.getRight(), new int[]{0, 1, 0}); // Select duplicate columns without order-by, all the segments are pruned on the server side // Intentionally make data schema not matching the string representation of the expression dataSchema = new DataSchema(new String[]{"add(col1+1)", "add(col2+2)", "add(col1+1)"}, new ColumnDataType[]{ ColumnDataType.STRING, ColumnDataType.STRING, ColumnDataType.STRING }); pair = SelectionOperatorUtils.getResultTableDataSchemaAndColumnIndices(queryContext, dataSchema); assertEquals(pair.getLeft(), new DataSchema(new String[]{"plus(col1,'1')", "plus(col2,'2')", "plus(col1,'1')"}, new ColumnDataType[]{ ColumnDataType.STRING, ColumnDataType.STRING, ColumnDataType.STRING })); // Select * with order-by queryContext = QueryContextConverterUtils.getQueryContext("SELECT * FROM testTable ORDER BY col3"); dataSchema = new DataSchema(new String[]{"col3", "col1", "col2"}, new ColumnDataType[]{ ColumnDataType.DOUBLE, ColumnDataType.INT, ColumnDataType.LONG }); pair = SelectionOperatorUtils.getResultTableDataSchemaAndColumnIndices(queryContext, dataSchema); assertEquals(pair.getLeft(), new DataSchema(new String[]{"col1", "col2", "col3"}, new ColumnDataType[]{ ColumnDataType.INT, ColumnDataType.LONG, ColumnDataType.DOUBLE })); assertEquals(pair.getRight(), new int[]{1, 2, 0}); // Select * with order-by, all the segments are pruned on the server side dataSchema = new DataSchema(new String[]{"*"}, new ColumnDataType[]{ColumnDataType.STRING}); pair = SelectionOperatorUtils.getResultTableDataSchemaAndColumnIndices(queryContext, dataSchema); assertEquals(pair.getLeft(), new DataSchema(new String[]{"*"}, new ColumnDataType[]{ColumnDataType.STRING})); // Select * ordering on function queryContext = QueryContextConverterUtils.getQueryContext("SELECT * FROM testTable ORDER BY col1 + col2"); // Intentionally make data schema not matching the string representation of the expression dataSchema = new DataSchema(new String[]{"add(col1+col2)", "col1", "col2", "col3"}, new ColumnDataType[]{ ColumnDataType.DOUBLE, ColumnDataType.INT, ColumnDataType.LONG, ColumnDataType.DOUBLE }); pair = SelectionOperatorUtils.getResultTableDataSchemaAndColumnIndices(queryContext, dataSchema); assertEquals(pair.getLeft(), new DataSchema(new String[]{"col1", "col2", "col3"}, new ColumnDataType[]{ ColumnDataType.INT, ColumnDataType.LONG, ColumnDataType.DOUBLE })); assertEquals(pair.getRight(), new int[]{1, 2, 3}); // Select * ordering on function, all the segments are pruned on the server side dataSchema = new DataSchema(new String[]{"*"}, new ColumnDataType[]{ColumnDataType.STRING}); pair = SelectionOperatorUtils.getResultTableDataSchemaAndColumnIndices(queryContext, dataSchema); assertEquals(pair.getLeft(), new DataSchema(new String[]{"*"}, new ColumnDataType[]{ColumnDataType.STRING})); // Select * ordering on both column and function queryContext = QueryContextConverterUtils.getQueryContext("SELECT * FROM testTable ORDER BY col1 + col2, col2"); // Intentionally make data schema not matching the string representation of the expression dataSchema = new DataSchema(new String[]{"add(col1+col2)", "col2", "col1", "col3"}, new ColumnDataType[]{ ColumnDataType.DOUBLE, ColumnDataType.LONG, ColumnDataType.INT, ColumnDataType.DOUBLE }); pair = SelectionOperatorUtils.getResultTableDataSchemaAndColumnIndices(queryContext, dataSchema); assertEquals(pair.getLeft(), new DataSchema(new String[]{"col1", "col2", "col3"}, new ColumnDataType[]{ ColumnDataType.INT, ColumnDataType.LONG, ColumnDataType.DOUBLE })); assertEquals(pair.getRight(), new int[]{2, 1, 3}); // Select * ordering on both column and function, all the segments are pruned on the server side dataSchema = new DataSchema(new String[]{"*"}, new ColumnDataType[]{ColumnDataType.STRING}); pair = SelectionOperatorUtils.getResultTableDataSchemaAndColumnIndices(queryContext, dataSchema); assertEquals(pair.getLeft(), new DataSchema(new String[]{"*"}, new ColumnDataType[]{ColumnDataType.STRING})); // Select columns with order-by queryContext = QueryContextConverterUtils.getQueryContext( "SELECT col1 + 1, col3, col2 + 2 FROM testTable ORDER BY col2 + 2, col4"); // Intentionally make data schema not matching the string representation of the expression dataSchema = new DataSchema(new String[]{"add(col2+2)", "col4", "add(col1+1)", "col3"}, new ColumnDataType[]{ ColumnDataType.DOUBLE, ColumnDataType.STRING, ColumnDataType.DOUBLE, ColumnDataType.DOUBLE }); pair = SelectionOperatorUtils.getResultTableDataSchemaAndColumnIndices(queryContext, dataSchema); assertEquals(pair.getLeft(), new DataSchema(new String[]{"plus(col1,'1')", "col3", "plus(col2,'2')"}, new ColumnDataType[]{ColumnDataType.DOUBLE, ColumnDataType.DOUBLE, ColumnDataType.DOUBLE})); assertEquals(pair.getRight(), new int[]{2, 3, 0}); // Select columns with order-by, all the segments are pruned on the server side // Intentionally make data schema not matching the string representation of the expression dataSchema = new DataSchema(new String[]{"add(col1+1)", "col3", "add(col2+2)"}, new ColumnDataType[]{ ColumnDataType.STRING, ColumnDataType.STRING, ColumnDataType.STRING }); pair = SelectionOperatorUtils.getResultTableDataSchemaAndColumnIndices(queryContext, dataSchema); assertEquals(pair.getLeft(), new DataSchema(new String[]{"plus(col1,'1')", "col3", "plus(col2,'2')"}, new ColumnDataType[]{ColumnDataType.STRING, ColumnDataType.STRING, ColumnDataType.STRING})); // Select duplicate columns with order-by queryContext = QueryContextConverterUtils.getQueryContext( "SELECT col1 + 1, col2 + 2, col1 + 1 FROM testTable ORDER BY col2 + 2, col4"); // Intentionally make data schema not matching the string representation of the expression dataSchema = new DataSchema(new String[]{"add(col2+2)", "col4", "add(col1+1)"}, new ColumnDataType[]{ ColumnDataType.DOUBLE, ColumnDataType.STRING, ColumnDataType.DOUBLE }); pair = SelectionOperatorUtils.getResultTableDataSchemaAndColumnIndices(queryContext, dataSchema); assertEquals(pair.getLeft(), new DataSchema(new String[]{"plus(col1,'1')", "plus(col2,'2')", "plus(col1,'1')"}, new ColumnDataType[]{ ColumnDataType.DOUBLE, ColumnDataType.DOUBLE, ColumnDataType.DOUBLE })); assertEquals(pair.getRight(), new int[]{2, 0, 2}); // Select duplicate columns with order-by, all the segments are pruned on the server side // Intentionally make data schema not matching the string representation of the expression dataSchema = new DataSchema(new String[]{"add(col1+1)", "add(col2+2)", "add(col1+1)"}, new ColumnDataType[]{ ColumnDataType.STRING, ColumnDataType.STRING, ColumnDataType.STRING }); pair = SelectionOperatorUtils.getResultTableDataSchemaAndColumnIndices(queryContext, dataSchema); assertEquals(pair.getLeft(), new DataSchema(new String[]{"plus(col1,'1')", "plus(col2,'2')", "plus(col1,'1')"}, new ColumnDataType[]{ ColumnDataType.STRING, ColumnDataType.STRING, ColumnDataType.STRING })); }
boolean sendRecords() { int processed = 0; recordBatch(toSend.size()); final SourceRecordWriteCounter counter = toSend.isEmpty() ? null : new SourceRecordWriteCounter(toSend.size(), sourceTaskMetricsGroup); for (final SourceRecord preTransformRecord : toSend) { ProcessingContext<SourceRecord> context = new ProcessingContext<>(preTransformRecord); final SourceRecord record = transformationChain.apply(context, preTransformRecord); final ProducerRecord<byte[], byte[]> producerRecord = convertTransformedRecord(context, record); if (producerRecord == null || context.failed()) { counter.skipRecord(); recordDropped(preTransformRecord); processed++; continue; } log.trace("{} Appending record to the topic {} with key {}, value {}", this, record.topic(), record.key(), record.value()); Optional<SubmittedRecords.SubmittedRecord> submittedRecord = prepareToSendRecord(preTransformRecord, producerRecord); try { final String topic = producerRecord.topic(); maybeCreateTopic(topic); producer.send( producerRecord, (recordMetadata, e) -> { if (e != null) { if (producerClosed) { log.trace("{} failed to send record to {}; this is expected as the producer has already been closed", AbstractWorkerSourceTask.this, topic, e); } else { log.error("{} failed to send record to {}: ", AbstractWorkerSourceTask.this, topic, e); } log.trace("{} Failed record: {}", AbstractWorkerSourceTask.this, preTransformRecord); producerSendFailed(context, false, producerRecord, preTransformRecord, e); if (retryWithToleranceOperator.getErrorToleranceType() == ToleranceType.ALL) { counter.skipRecord(); submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::ack); } } else { counter.completeRecord(); log.trace("{} Wrote record successfully: topic {} partition {} offset {}", AbstractWorkerSourceTask.this, recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset()); recordSent(preTransformRecord, producerRecord, recordMetadata); submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::ack); if (topicTrackingEnabled) { recordActiveTopic(producerRecord.topic()); } } }); // Note that this will cause retries to take place within a transaction } catch (RetriableException | org.apache.kafka.common.errors.RetriableException e) { log.warn("{} Failed to send record to topic '{}' and partition '{}'. Backing off before retrying: ", this, producerRecord.topic(), producerRecord.partition(), e); toSend = toSend.subList(processed, toSend.size()); submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::drop); counter.retryRemaining(); return false; } catch (ConnectException e) { log.warn("{} Failed to send record to topic '{}' and partition '{}' due to an unrecoverable exception: ", this, producerRecord.topic(), producerRecord.partition(), e); log.trace("{} Failed to send {} with unrecoverable exception: ", this, producerRecord, e); throw e; } catch (KafkaException e) { producerSendFailed(context, true, producerRecord, preTransformRecord, e); } processed++; recordDispatched(preTransformRecord); } toSend = null; batchDispatched(); return true; }
@Test public void testSendRecordsRetriableException() { createWorkerTask(); SourceRecord record1 = new SourceRecord(PARTITION, OFFSET, TOPIC, 1, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD); SourceRecord record2 = new SourceRecord(PARTITION, OFFSET, TOPIC, 2, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD); SourceRecord record3 = new SourceRecord(PARTITION, OFFSET, TOPIC, 3, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD); expectConvertHeadersAndKeyValue(emptyHeaders(), TOPIC); when(transformationChain.apply(any(), eq(record1))).thenReturn(null); when(transformationChain.apply(any(), eq(record2))).thenReturn(null); when(transformationChain.apply(any(), eq(record3))).thenReturn(record3); TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, Collections.emptyList(), Collections.emptyList()); TopicDescription topicDesc = new TopicDescription(TOPIC, false, Collections.singletonList(topicPartitionInfo)); when(admin.describeTopics(TOPIC)).thenReturn(Collections.singletonMap(TOPIC, topicDesc)); when(producer.send(any(), any())).thenThrow(new RetriableException("Retriable exception")).thenReturn(null); workerTask.toSend = Arrays.asList(record1, record2, record3); // The first two records are filtered out / dropped by the transformation chain; only the third record will be attempted to be sent. // The producer throws a RetriableException the first time we try to send the third record assertFalse(workerTask.sendRecords()); // The next attempt to send the third record should succeed assertTrue(workerTask.sendRecords()); // Ensure that the first two records that were filtered out by the transformation chain // aren't re-processed when we retry the call to sendRecords() verify(transformationChain, times(1)).apply(any(), eq(record1)); verify(transformationChain, times(1)).apply(any(), eq(record2)); verify(transformationChain, times(2)).apply(any(), eq(record3)); }
@Override public Double getLocalValue() { if (this.count == 0) { return 0.0; } return this.sum / this.count; }
@Test void testGet() { AverageAccumulator average = new AverageAccumulator(); assertThat(average.getLocalValue()).isCloseTo(0.0, within(0.0)); }
@Override public BulkOperationResponse executeBulkOperation(final BulkOperationRequest bulkOperationRequest, final C userContext, final AuditParams params) { if (bulkOperationRequest.entityIds() == null || bulkOperationRequest.entityIds().isEmpty()) { throw new BadRequestException(NO_ENTITY_IDS_ERROR); } List<BulkOperationFailure> capturedFailures = new LinkedList<>(); for (String entityId : bulkOperationRequest.entityIds()) { try { T entityModel = singleEntityOperationExecutor.execute(entityId, userContext); try { if (params != null) { auditEventSender.success(getAuditActor(userContext), params.eventType(), successAuditLogContextCreator.create(entityModel, params.entityClass())); } } catch (Exception auditLogStoreException) { //exception on audit log storing should not result in failure report, as the operation itself is successful LOG.error("Failed to store in the audit log information about successful entity removal via bulk action ", auditLogStoreException); } } catch (Exception ex) { capturedFailures.add(new BulkOperationFailure(entityId, ex.getMessage())); try { if (params != null) { auditEventSender.failure(getAuditActor(userContext), params.eventType(), failureAuditLogContextCreator.create(params.entityIdInPathParam(), entityId)); } } catch (Exception auditLogStoreException) { //exception on audit log storing should not result in failure report, as the operation itself is successful LOG.error("Failed to store in the audit log information about failed entity removal via bulk action ", auditLogStoreException); } } } return new BulkOperationResponse( bulkOperationRequest.entityIds().size() - capturedFailures.size(), capturedFailures); }
@Test void throwsBadRequestExceptionOnEmptyEntityIdsList() { assertThrows(BadRequestException.class, () -> toTest.executeBulkOperation(new BulkOperationRequest(List.of()), context, params), NO_ENTITY_IDS_ERROR); }
@Override public Object toConnectRow(final Object ksqlData) { final Object compatible = ConnectSchemas.withCompatibleSchema(avroCompatibleSchema, ksqlData); return innerTranslator.toConnectRow(compatible); }
@Test public void shouldUseExplicitSchemaName() { // Given: final Schema schema = SchemaBuilder.struct() .field("COLUMN_NAME", Schema.OPTIONAL_INT64_SCHEMA) .optional() .build(); final String schemaFullName = "com.custom.schema"; final AvroDataTranslator dataTranslator = new AvroDataTranslator(schema, schemaFullName); final Struct ksqlRow = new Struct(schema) .put("COLUMN_NAME", 123L); // When: final Struct struct = (Struct)dataTranslator.toConnectRow(ksqlRow); // Then: assertThat(struct.schema().name(), equalTo(schemaFullName)); }
public static Predicate parse(String expression) { final Stack<Predicate> predicateStack = new Stack<>(); final Stack<Character> operatorStack = new Stack<>(); final String trimmedExpression = TRIMMER_PATTERN.matcher(expression).replaceAll(""); final StringTokenizer tokenizer = new StringTokenizer(trimmedExpression, OPERATORS, true); boolean isTokenMode = true; while (true) { final Character operator; final String token; if (isTokenMode) { if (tokenizer.hasMoreTokens()) { token = tokenizer.nextToken(); } else { break; } if (OPERATORS.contains(token)) { operator = token.charAt(0); } else { operator = null; } } else { operator = operatorStack.pop(); token = null; } isTokenMode = true; if (operator == null) { try { predicateStack.push(Class.forName(token).asSubclass(Predicate.class).getDeclaredConstructor().newInstance()); } catch (ClassCastException e) { throw new RuntimeException(token + " must implement " + Predicate.class.getName(), e); } catch (Exception e) { throw new RuntimeException(e); } } else { if (operatorStack.empty() || operator == '(') { operatorStack.push(operator); } else if (operator == ')') { while (operatorStack.peek() != '(') { evaluate(predicateStack, operatorStack); } operatorStack.pop(); } else { if (OPERATOR_PRECEDENCE.get(operator) < OPERATOR_PRECEDENCE.get(operatorStack.peek())) { evaluate(predicateStack, operatorStack); isTokenMode = false; } operatorStack.push(operator); } } } while (!operatorStack.empty()) { evaluate(predicateStack, operatorStack); } if (predicateStack.size() > 1) { throw new RuntimeException("Invalid logical expression"); } return predicateStack.pop(); }
@Test public void testAndAnd() { final Predicate parsed = PredicateExpressionParser.parse("com.linkedin.data.it.AlwaysTruePredicate & com.linkedin.data.it.AlwaysTruePredicate & com.linkedin.data.it.AlwaysFalsePredicate"); Assert.assertEquals(parsed.getClass(), AndPredicate.class); final List<Predicate> children = ((AndPredicate) parsed).getChildPredicates(); Assert.assertEquals(children.get(0).getClass(), AlwaysTruePredicate.class); Assert.assertEquals(children.get(1).getClass(), AlwaysTruePredicate.class); Assert.assertEquals(children.get(2).getClass(), AlwaysFalsePredicate.class); }
@Override public void addTask(Object key, AbstractDelayTask newTask) { super.addTask(key, newTask); MetricsMonitor.getDumpTaskMonitor().set(tasks.size()); }
@Test void testRemoveProcessor() throws InterruptedException { when(taskProcessor.process(abstractTask)).thenReturn(true); taskManager.addProcessor("test", testTaskProcessor); taskManager.removeProcessor("test"); taskManager.addTask("test", abstractTask); TimeUnit.MILLISECONDS.sleep(200); verify(testTaskProcessor, never()).process(abstractTask); verify(taskProcessor).process(abstractTask); }
@Override public void onActivityCreated(Activity activity, Bundle bundle) { }
@Test public void onActivityCreated() { mActivityLifecycle.onActivityCreated(mActivity, null); }
public PipelineOptions get() { return options; }
@Test public void testSerializationAndDeserialization() throws Exception { PipelineOptions options = PipelineOptionsFactory.fromArgs("--foo=testValue", "--ignoredField=overridden") .as(MyOptions.class); SerializablePipelineOptions serializableOptions = new SerializablePipelineOptions(options); assertEquals("testValue", serializableOptions.get().as(MyOptions.class).getFoo()); assertEquals("overridden", serializableOptions.get().as(MyOptions.class).getIgnoredField()); SerializablePipelineOptions copy = SerializableUtils.clone(serializableOptions); assertEquals("testValue", copy.get().as(MyOptions.class).getFoo()); assertEquals("not overridden", copy.get().as(MyOptions.class).getIgnoredField()); }
public static void removeDupes( final List<CharSequence> suggestions, List<CharSequence> stringsPool) { if (suggestions.size() < 2) return; int i = 1; // Don't cache suggestions.size(), since we may be removing items while (i < suggestions.size()) { final CharSequence cur = suggestions.get(i); // Compare each suggestion with each previous suggestion for (int j = 0; j < i; j++) { CharSequence previous = suggestions.get(j); if (TextUtils.equals(cur, previous)) { removeSuggestion(suggestions, i, stringsPool); i--; break; } } i++; } }
@Test public void testRemoveDupesOneItemTwoTypes() throws Exception { ArrayList<CharSequence> list = new ArrayList<>(Arrays.<CharSequence>asList("typed", "something")); IMEUtil.removeDupes(list, mStringPool); Assert.assertEquals(2, list.size()); Assert.assertEquals("typed", list.get(0)); Assert.assertEquals("something", list.get(1)); }
@Override public void init(File dataFile, @Nullable Set<String> fieldsToRead, @Nullable RecordReaderConfig recordReaderConfig) throws IOException { _dataFile = dataFile; CSVRecordReaderConfig config = (CSVRecordReaderConfig) recordReaderConfig; Character multiValueDelimiter = null; if (config == null) { _format = CSVFormat.DEFAULT.builder().setDelimiter(CSVRecordReaderConfig.DEFAULT_DELIMITER).setHeader().build(); multiValueDelimiter = CSVRecordReaderConfig.DEFAULT_MULTI_VALUE_DELIMITER; } else { CSVFormat format; String formatString = config.getFileFormat(); if (formatString == null) { format = CSVFormat.DEFAULT; } else { switch (formatString.toUpperCase()) { case "EXCEL": format = CSVFormat.EXCEL; break; case "MYSQL": format = CSVFormat.MYSQL; break; case "RFC4180": format = CSVFormat.RFC4180; break; case "TDF": format = CSVFormat.TDF; break; default: format = CSVFormat.DEFAULT; break; } } char delimiter = config.getDelimiter(); format = format.builder().setDelimiter(delimiter).build(); if (config.isSkipUnParseableLines()) { _useLineIterator = true; } _isHeaderProvided = config.getHeader() != null; _skipHeaderRecord = config.isSkipHeader(); _format = format.builder() .setHeader() .setSkipHeaderRecord(config.isSkipHeader()) .setCommentMarker(config.getCommentMarker()) .setEscape(config.getEscapeCharacter()) .setIgnoreEmptyLines(config.isIgnoreEmptyLines()) .setIgnoreSurroundingSpaces(config.isIgnoreSurroundingSpaces()) .setQuote(config.getQuoteCharacter()) .build(); if (config.getQuoteMode() != null) { _format = _format.builder().setQuoteMode(QuoteMode.valueOf(config.getQuoteMode())).build(); } if (config.getRecordSeparator() != null) { _format = _format.builder().setRecordSeparator(config.getRecordSeparator()).build(); } String nullString = config.getNullStringValue(); if (nullString != null) { _format = _format.builder().setNullString(nullString).build(); } if (_isHeaderProvided) { _headerMap = parseLineAsHeader(config.getHeader()); _format = _format.builder().setHeader(_headerMap.keySet().toArray(new String[0])).build(); if (!_useLineIterator) { validateHeaderForDelimiter(delimiter, config.getHeader(), _format); } } if (config.isMultiValueDelimiterEnabled()) { multiValueDelimiter = config.getMultiValueDelimiter(); } } _recordExtractor = new CSVRecordExtractor(); init(); CSVRecordExtractorConfig recordExtractorConfig = new CSVRecordExtractorConfig(); recordExtractorConfig.setMultiValueDelimiter(multiValueDelimiter); recordExtractorConfig.setColumnNames(_headerMap.keySet()); _recordExtractor.init(fieldsToRead, recordExtractorConfig); }
@Test public void testInvalidDelimiterInHeader() { // setup CSVRecordReaderConfig csvRecordReaderConfig = new CSVRecordReaderConfig(); csvRecordReaderConfig.setMultiValueDelimiter(CSV_MULTI_VALUE_DELIMITER); csvRecordReaderConfig.setHeader("col1;col2;col3;col4;col5;col6;col7;col8;col9;col10"); csvRecordReaderConfig.setDelimiter(','); CSVRecordReader csvRecordReader = new CSVRecordReader(); //execute and assert Assert.assertThrows(IllegalArgumentException.class, () -> csvRecordReader.init(_dataFile, null, csvRecordReaderConfig)); }
@Override public Processor<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>, K, SubscriptionResponseWrapper<VO>> get() { return new ContextualProcessor<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>, K, SubscriptionResponseWrapper<VO>>() { private KTableValueGetter<KO, VO> foreignValues; @Override public void init(final ProcessorContext<K, SubscriptionResponseWrapper<VO>> context) { super.init(context); foreignValues = foreignValueGetterSupplier.get(); foreignValues.init(context); } @Override public void process(final Record<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> record) { Objects.requireNonNull(record.key(), "This processor should never see a null key."); Objects.requireNonNull(record.value(), "This processor should never see a null value."); final ValueAndTimestamp<SubscriptionWrapper<K>> valueAndTimestamp = record.value().newValue; Objects.requireNonNull(valueAndTimestamp, "This processor should never see a null newValue."); final SubscriptionWrapper<K> value = valueAndTimestamp.value(); if (value.getVersion() > SubscriptionWrapper.CURRENT_VERSION) { //Guard against modifications to SubscriptionWrapper. Need to ensure that there is compatibility //with previous versions to enable rolling upgrades. Must develop a strategy for upgrading //from older SubscriptionWrapper versions to newer versions. throw new UnsupportedVersionException("SubscriptionWrapper is of an incompatible version."); } final ValueAndTimestamp<VO> foreignValueAndTime = record.key().getForeignKey() == null ? null : foreignValues.get(record.key().getForeignKey()); final long resultTimestamp = foreignValueAndTime == null ? valueAndTimestamp.timestamp() : Math.max(valueAndTimestamp.timestamp(), foreignValueAndTime.timestamp()); switch (value.getInstruction()) { case DELETE_KEY_AND_PROPAGATE: context().forward( record.withKey(record.key().getPrimaryKey()) .withValue(new SubscriptionResponseWrapper<VO>( value.getHash(), null, value.getPrimaryPartition() )) .withTimestamp(resultTimestamp) ); break; case PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE: //This one needs to go through regardless of LEFT or INNER join, since the extracted FK was //changed and there is no match for it. We must propagate the (key, null) to ensure that the //downstream consumers are alerted to this fact. final VO valueToSend = foreignValueAndTime == null ? null : foreignValueAndTime.value(); context().forward( record.withKey(record.key().getPrimaryKey()) .withValue(new SubscriptionResponseWrapper<>( value.getHash(), valueToSend, value.getPrimaryPartition() )) .withTimestamp(resultTimestamp) ); break; case PROPAGATE_ONLY_IF_FK_VAL_AVAILABLE: if (foreignValueAndTime != null) { context().forward( record.withKey(record.key().getPrimaryKey()) .withValue(new SubscriptionResponseWrapper<>( value.getHash(), foreignValueAndTime.value(), value.getPrimaryPartition() )) .withTimestamp(resultTimestamp) ); } break; case DELETE_KEY_NO_PROPAGATE: break; default: throw new IllegalStateException("Unhandled instruction: " + value.getInstruction()); } } }; }
@Test public void shouldPropagateNullIfNoFKAvailableV0() { final MockProcessorContext<String, SubscriptionResponseWrapper<String>> context = new MockProcessorContext<>(); processor.init(context); final SubscriptionWrapper<String> newValue = new SubscriptionWrapper<>( new long[]{1L}, Instruction.PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE, "pk1", SubscriptionWrapper.VERSION_0, null ); Record<CombinedKey<String, String>, Change<ValueAndTimestamp<SubscriptionWrapper<String>>>> record = new Record<>( new CombinedKey<>("fk1", "pk1"), new Change<>(ValueAndTimestamp.make(newValue, 1L), null), 1L ); processor.process(record); // propagate matched FK List<CapturedForward<? extends String, ? extends SubscriptionResponseWrapper<String>>> forwarded = context.forwarded(); assertEquals(1, forwarded.size()); assertEquals( new Record<>( "pk1", new SubscriptionResponseWrapper<>( newValue.getHash(), "foo", null ), 1L ), forwarded.get(0).record()); record = new Record<>( new CombinedKey<>("fk9000", "pk1"), new Change<>(ValueAndTimestamp.make(newValue, 1L), null), 1L ); processor.process(record); // propagate null if there is no match forwarded = context.forwarded(); assertEquals(2, forwarded.size()); assertEquals( new Record<>( "pk1", new SubscriptionResponseWrapper<>( newValue.getHash(), null, null ), 1L ), forwarded.get(1).record()); }
public static <T> Mono<Long> writeAll(Writer writer, Flux<T> values) throws IOException { return writeAll(DEFAULT_OBJECT_MAPPER, writer, values); }
@Test void writeAll_fromEmptySource() throws IOException { final Path outputTempFilePath = createTempFile(); final Long outputCount = FileSerde.writeAll(Files.newBufferedWriter(outputTempFilePath), Flux.empty()).block(); assertThat(outputCount, is(0L)); }
void forwardToStateService(DeviceStateServiceMsgProto deviceStateServiceMsg, TbCallback callback) { if (statsEnabled) { stats.log(deviceStateServiceMsg); } stateService.onQueueMsg(deviceStateServiceMsg, callback); }
@Test public void givenStatsDisabled_whenForwardingDisconnectMsgToStateService_thenStatsAreNotRecorded() { // GIVEN ReflectionTestUtils.setField(defaultTbCoreConsumerServiceMock, "stats", statsMock); ReflectionTestUtils.setField(defaultTbCoreConsumerServiceMock, "statsEnabled", false); var disconnectMsg = TransportProtos.DeviceDisconnectProto.newBuilder() .setTenantIdMSB(tenantId.getId().getMostSignificantBits()) .setTenantIdLSB(tenantId.getId().getLeastSignificantBits()) .setDeviceIdMSB(deviceId.getId().getMostSignificantBits()) .setDeviceIdLSB(deviceId.getId().getLeastSignificantBits()) .setLastDisconnectTime(time) .build(); doCallRealMethod().when(defaultTbCoreConsumerServiceMock).forwardToStateService(disconnectMsg, tbCallbackMock); // WHEN defaultTbCoreConsumerServiceMock.forwardToStateService(disconnectMsg, tbCallbackMock); // THEN then(statsMock).should(never()).log(disconnectMsg); }
@Override public String lowerKey(String key) { return complete(treeMap.lowerKey(key)); }
@Test(expected = ConsistentMapException.Timeout.class) public void testTimeout() { ConsistentTreeMapWithError<String> consistentMap = new ConsistentTreeMapWithError<>(); consistentMap.setErrorState(TestingCompletableFutures.ErrorState.TIMEOUT_EXCEPTION); DefaultConsistentTreeMap<String> map = new DefaultConsistentTreeMap<>(consistentMap, 1000); map.lowerKey(KEY1); }
public synchronized boolean createIndex(String indexName) throws ElasticsearchResourceManagerException { LOG.info("Creating index using name '{}'.", indexName); try { // Check to see if the index exists if (indexExists(indexName)) { return false; } managedIndexNames.add(indexName); return elasticsearchClient .indices() .create(new CreateIndexRequest(indexName), RequestOptions.DEFAULT) .isAcknowledged(); } catch (Exception e) { throw new ElasticsearchResourceManagerException("Error creating index.", e); } }
@Test public void testCreateCollectionShouldThrowErrorWhenElasticsearchFailsToGetDB() throws IOException { when(elasticsearchClient .indices() .exists(any(GetIndexRequest.class), eq(RequestOptions.DEFAULT))) .thenThrow(IllegalArgumentException.class); assertThrows( ElasticsearchResourceManagerException.class, () -> testManager.createIndex(INDEX_NAME)); }
@Override public WidgetType findByTenantIdAndFqn(UUID tenantId, String fqn) { return DaoUtil.getData(widgetTypeRepository.findWidgetTypeByTenantIdAndFqn(tenantId, fqn)); }
@Test public void testFindByTenantIdAndFqn() { WidgetType result = widgetTypeList.get(0); assertNotNull(result); WidgetType widgetType = widgetTypeDao.findByTenantIdAndFqn(TenantId.SYS_TENANT_ID.getId(), "FQN_0"); assertEquals(result.getId(), widgetType.getId()); }
@Override public Algorithm getEncryption(final Path file) throws BackgroundException { if(containerService.isContainer(file)) { final String key = String.format("s3.encryption.key.%s", containerService.getContainer(file).getName()); if(StringUtils.isNotBlank(new HostPreferences(session.getHost()).getProperty(key))) { return Algorithm.fromString(new HostPreferences(session.getHost()).getProperty(key)); } } return super.getEncryption(file); }
@Test public void testSetEncryptionKMSDefaultKeySignatureVersionV4() throws Exception { final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory)); final S3AccessControlListFeature acl = new S3AccessControlListFeature(session); final Path test = new S3TouchFeature(session, acl).touch(new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); final S3EncryptionFeature feature = new S3EncryptionFeature(session, acl); feature.setEncryption(test, KMSEncryptionFeature.SSE_KMS_DEFAULT); final Encryption.Algorithm value = feature.getEncryption(test); assertEquals("aws:kms", value.algorithm); assertNotNull(value.key); final PathAttributes attr = new S3AttributesFinderFeature(session, acl).find(test); assertNotEquals(Checksum.NONE, attr.getChecksum()); assertNotNull(attr.getETag()); assertNotEquals(Checksum.NONE, Checksum.parse(attr.getETag())); // The ETag will only be the MD5 of the object data when the object is stored as plaintext or encrypted using SSE-S3. // If the object is encrypted using another method (such as SSE-C or SSE-KMS) the ETag is not the MD5 of the object data. assertNotEquals("d41d8cd98f00b204e9800998ecf8427e", Checksum.parse(attr.getETag()).hash); new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static CoordinatorRecord newConsumerGroupSubscriptionMetadataRecord( String groupId, Map<String, TopicMetadata> newSubscriptionMetadata ) { ConsumerGroupPartitionMetadataValue value = new ConsumerGroupPartitionMetadataValue(); newSubscriptionMetadata.forEach((topicName, topicMetadata) -> { List<ConsumerGroupPartitionMetadataValue.PartitionMetadata> partitionMetadata = new ArrayList<>(); // If the partition rack information map is empty, store an empty list in the record. if (!topicMetadata.partitionRacks().isEmpty()) { topicMetadata.partitionRacks().forEach((partition, racks) -> partitionMetadata.add(new ConsumerGroupPartitionMetadataValue.PartitionMetadata() .setPartition(partition) .setRacks(new ArrayList<>(racks)) ) ); } value.topics().add(new ConsumerGroupPartitionMetadataValue.TopicMetadata() .setTopicId(topicMetadata.id()) .setTopicName(topicMetadata.name()) .setNumPartitions(topicMetadata.numPartitions()) .setPartitionMetadata(partitionMetadata) ); }); return new CoordinatorRecord( new ApiMessageAndVersion( new ConsumerGroupPartitionMetadataKey() .setGroupId(groupId), (short) 4 ), new ApiMessageAndVersion( value, (short) 0 ) ); }
@Test public void testNewConsumerGroupSubscriptionMetadataRecord() { Uuid fooTopicId = Uuid.randomUuid(); Uuid barTopicId = Uuid.randomUuid(); Map<String, TopicMetadata> subscriptionMetadata = new LinkedHashMap<>(); subscriptionMetadata.put("foo", new TopicMetadata( fooTopicId, "foo", 10, mkMapOfPartitionRacks(10) )); subscriptionMetadata.put("bar", new TopicMetadata( barTopicId, "bar", 20, mkMapOfPartitionRacks(20) )); CoordinatorRecord expectedRecord = new CoordinatorRecord( new ApiMessageAndVersion( new ConsumerGroupPartitionMetadataKey() .setGroupId("group-id"), (short) 4 ), new ApiMessageAndVersion( new ConsumerGroupPartitionMetadataValue() .setTopics(Arrays.asList( new ConsumerGroupPartitionMetadataValue.TopicMetadata() .setTopicId(fooTopicId) .setTopicName("foo") .setNumPartitions(10) .setPartitionMetadata(mkListOfPartitionRacks(10)), new ConsumerGroupPartitionMetadataValue.TopicMetadata() .setTopicId(barTopicId) .setTopicName("bar") .setNumPartitions(20) .setPartitionMetadata(mkListOfPartitionRacks(20)))), (short) 0)); assertRecordEquals(expectedRecord, newConsumerGroupSubscriptionMetadataRecord( "group-id", subscriptionMetadata )); }
@Override public KTable<Windowed<K>, Long> count() { return count(NamedInternal.empty()); }
@Test public void shouldMaterializeCount() { windowedStream.count( Materialized.<String, Long, WindowStore<Bytes, byte[]>>as("count-store") .withKeySerde(Serdes.String()) .withValueSerde(Serdes.Long())); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { processData(driver); { final WindowStore<String, Long> windowStore = driver.getWindowStore("count-store"); final List<KeyValue<Windowed<String>, Long>> data = StreamsTestUtils.toList(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); assertThat(data, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("1", new TimeWindow(0, 100)), 1L), KeyValue.pair(new Windowed<>("1", new TimeWindow(50, 150)), 2L), KeyValue.pair(new Windowed<>("1", new TimeWindow(101, 201)), 1L), KeyValue.pair(new Windowed<>("1", new TimeWindow(400, 500)), 1L), KeyValue.pair(new Windowed<>("2", new TimeWindow(50, 150)), 1L), KeyValue.pair(new Windowed<>("2", new TimeWindow(100, 200)), 2L), KeyValue.pair(new Windowed<>("2", new TimeWindow(151, 251)), 1L)))); } { final WindowStore<String, ValueAndTimestamp<Long>> windowStore = driver.getTimestampedWindowStore("count-store"); final List<KeyValue<Windowed<String>, ValueAndTimestamp<Long>>> data = StreamsTestUtils.toList(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); assertThat(data, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("1", new TimeWindow(0, 100)), ValueAndTimestamp.make(1L, 100L)), KeyValue.pair(new Windowed<>("1", new TimeWindow(50, 150)), ValueAndTimestamp.make(2L, 150L)), KeyValue.pair(new Windowed<>("1", new TimeWindow(101, 201)), ValueAndTimestamp.make(1L, 150L)), KeyValue.pair(new Windowed<>("1", new TimeWindow(400, 500)), ValueAndTimestamp.make(1L, 500L)), KeyValue.pair(new Windowed<>("2", new TimeWindow(50, 150)), ValueAndTimestamp.make(1L, 150L)), KeyValue.pair(new Windowed<>("2", new TimeWindow(100, 200)), ValueAndTimestamp.make(2L, 200L)), KeyValue.pair(new Windowed<>("2", new TimeWindow(151, 251)), ValueAndTimestamp.make(1L, 200L))))); } } }
public static long getNextScheduledTime(final String cronEntry, long currentTime) throws MessageFormatException { long result = 0; if (cronEntry == null || cronEntry.length() == 0) { return result; } // Handle the once per minute case "* * * * *" // starting the next event at the top of the minute. if (cronEntry.equals("* * * * *")) { result = currentTime + 60 * 1000; result = result / 60000 * 60000; return result; } List<String> list = tokenize(cronEntry); List<CronEntry> entries = buildCronEntries(list); Calendar working = Calendar.getInstance(); working.setTimeInMillis(currentTime); working.set(Calendar.SECOND, 0); CronEntry minutes = entries.get(MINUTES); CronEntry hours = entries.get(HOURS); CronEntry dayOfMonth = entries.get(DAY_OF_MONTH); CronEntry month = entries.get(MONTH); CronEntry dayOfWeek = entries.get(DAY_OF_WEEK); // Start at the top of the next minute, cron is only guaranteed to be // run on the minute. int timeToNextMinute = 60 - working.get(Calendar.SECOND); working.add(Calendar.SECOND, timeToNextMinute); // If its already to late in the day this will roll us over to tomorrow // so we'll need to check again when done updating month and day. int currentMinutes = working.get(Calendar.MINUTE); if (!isCurrent(minutes, currentMinutes)) { int nextMinutes = getNext(minutes, currentMinutes, working); working.add(Calendar.MINUTE, nextMinutes); } int currentHours = working.get(Calendar.HOUR_OF_DAY); if (!isCurrent(hours, currentHours)) { int nextHour = getNext(hours, currentHours, working); working.add(Calendar.HOUR_OF_DAY, nextHour); } // We can roll into the next month here which might violate the cron setting // rules so we check once then recheck again after applying the month settings. doUpdateCurrentDay(working, dayOfMonth, dayOfWeek); // Start by checking if we are in the right month, if not then calculations // need to start from the beginning of the month to ensure that we don't end // up on the wrong day. (Can happen when DAY_OF_WEEK is set and current time // is ahead of the day of the week to execute on). doUpdateCurrentMonth(working, month); // Now Check day of week and day of month together since they can be specified // together in one entry, if both "day of month" and "day of week" are restricted // (not "*"), then either the "day of month" field (3) or the "day of week" field // (5) must match the current day or the Calenday must be advanced. doUpdateCurrentDay(working, dayOfMonth, dayOfWeek); // Now we can chose the correct hour and minute of the day in question. currentHours = working.get(Calendar.HOUR_OF_DAY); if (!isCurrent(hours, currentHours)) { int nextHour = getNext(hours, currentHours, working); working.add(Calendar.HOUR_OF_DAY, nextHour); } currentMinutes = working.get(Calendar.MINUTE); if (!isCurrent(minutes, currentMinutes)) { int nextMinutes = getNext(minutes, currentMinutes, working); working.add(Calendar.MINUTE, nextMinutes); } result = working.getTimeInMillis(); if (result <= currentTime) { throw new ArithmeticException("Unable to compute next scheduled exection time."); } return result; }
@Test public void testgetNextTimeMonthVariant() throws MessageFormatException { // using an absolute date so that result will be absolute - Monday 7 March 2011 Calendar current = Calendar.getInstance(); current.set(2011, Calendar.MARCH, 7, 9, 15, 30); LOG.debug("start:" + current.getTime()); String test = "0 20 * 4,5 0"; long next = CronParser.getNextScheduledTime(test, current.getTimeInMillis()); Calendar result = Calendar.getInstance(); result.setTimeInMillis(next); LOG.debug("next:" + result.getTime()); assertEquals(0,result.get(Calendar.SECOND)); assertEquals(0,result.get(Calendar.MINUTE)); assertEquals(20,result.get(Calendar.HOUR_OF_DAY)); // expecting Sunday 3rd of April assertEquals(Calendar.APRIL,result.get(Calendar.MONTH)); assertEquals(3,result.get(Calendar.DAY_OF_MONTH)); assertEquals(Calendar.SUNDAY,result.get(Calendar.DAY_OF_WEEK)); assertEquals(2011,result.get(Calendar.YEAR)); current = Calendar.getInstance(); current.set(2011, Calendar.APRIL, 30, 22, 0, 30); LOG.debug("update:" + current.getTime()); next = CronParser.getNextScheduledTime(test, current.getTimeInMillis()); result = Calendar.getInstance(); result.setTimeInMillis(next); LOG.debug("next:" + result.getTime()); assertEquals(0,result.get(Calendar.SECOND)); assertEquals(0,result.get(Calendar.MINUTE)); assertEquals(20,result.get(Calendar.HOUR_OF_DAY)); // expecting Sunday 1st of May assertEquals(1,result.get(Calendar.DAY_OF_MONTH)); assertEquals(Calendar.SUNDAY,result.get(Calendar.DAY_OF_WEEK)); assertEquals(Calendar.MAY,result.get(Calendar.MONTH)); assertEquals(2011,result.get(Calendar.YEAR)); // Move past last time and see if reschedule to next year works. current = Calendar.getInstance(); current.set(2011, Calendar.MAY, 30, 22, 0, 30); LOG.debug("update:" + current.getTime()); next = CronParser.getNextScheduledTime(test, current.getTimeInMillis()); result = Calendar.getInstance(); result.setTimeInMillis(next); LOG.debug("next:" + result.getTime()); assertEquals(0,result.get(Calendar.SECOND)); assertEquals(0,result.get(Calendar.MINUTE)); assertEquals(20,result.get(Calendar.HOUR_OF_DAY)); // expecting Sunday 1st of April - 2012 assertEquals(1,result.get(Calendar.DAY_OF_MONTH)); assertEquals(Calendar.SUNDAY,result.get(Calendar.DAY_OF_WEEK)); assertEquals(Calendar.APRIL,result.get(Calendar.MONTH)); assertEquals(2012,result.get(Calendar.YEAR)); }
protected static PrivateKey toPrivateKey(File keyFile, String keyPassword) throws NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeySpecException, InvalidAlgorithmParameterException, KeyException, IOException { return toPrivateKey(keyFile, keyPassword, true); }
@Test public void testEncryptedEmptyPassword() throws Exception { PrivateKey key = SslContext.toPrivateKey( ResourcesUtil.getFile(getClass(), "test_encrypted_empty_pass.pem"), ""); assertNotNull(key); }
@Operation(summary = "queryEnvironmentListPaging", description = "QUERY_ENVIRONMENT_LIST_PAGING_NOTES") @Parameters({ @Parameter(name = "searchVal", description = "SEARCH_VAL", schema = @Schema(implementation = String.class)), @Parameter(name = "pageSize", description = "PAGE_SIZE", required = true, schema = @Schema(implementation = int.class, example = "20")), @Parameter(name = "pageNo", description = "PAGE_NO", required = true, schema = @Schema(implementation = int.class, example = "1")) }) @GetMapping(value = "/list-paging") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_ENVIRONMENT_ERROR) public Result queryEnvironmentListPaging(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "searchVal", required = false) String searchVal, @RequestParam("pageSize") Integer pageSize, @RequestParam("pageNo") Integer pageNo) { checkPageParams(pageNo, pageSize); searchVal = ParameterUtils.handleEscapes(searchVal); return environmentService.queryEnvironmentListPaging(loginUser, pageNo, pageSize, searchVal); }
@Test public void testQueryEnvironmentListPaging() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("searchVal", "test"); paramsMap.add("pageSize", "2"); paramsMap.add("pageNo", "2"); MvcResult mvcResult = mockMvc.perform(get("/environment/list-paging") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); logger.info(result.toString()); Assertions.assertTrue(result != null && result.isSuccess()); logger.info("query list-paging environment return result:{}", mvcResult.getResponse().getContentAsString()); }
public abstract int status(HttpServletResponse response);
@Test void servlet25_status_cachesUpToTenTypes() { assertThat(servlet25.status(new Response1())) .isEqualTo(200); assertThat(servlet25.status(new Response2())) .isEqualTo(200); assertThat(servlet25.status(new Response3())) .isEqualTo(200); assertThat(servlet25.status(new Response4())) .isEqualTo(200); assertThat(servlet25.status(new Response5())) .isEqualTo(200); assertThat(servlet25.status(new Response6())) .isEqualTo(200); assertThat(servlet25.status(new Response7())) .isEqualTo(200); assertThat(servlet25.status(new Response8())) .isEqualTo(200); assertThat(servlet25.status(new Response9())) .isEqualTo(200); assertThat(servlet25.status(new Response10())) .isEqualTo(200); assertThat(servlet25.status(new Response11())) .isZero(); }
@Override public <R> R run(Action<R, C, E> action) throws E, InterruptedException { return run(action, retryByDefault); }
@Test public void testNoRetryingWhenDisabled() { try (MockClientPoolImpl mockClientPool = new MockClientPoolImpl(2, RetryableException.class, false, 3)) { assertThatThrownBy(() -> mockClientPool.run(client -> client.succeedAfter(3))) .isInstanceOf(RetryableException.class); assertThat(mockClientPool.reconnectionAttempts()).isEqualTo(0); } }
public static <T> Map<String, T> translateDeprecatedConfigs(Map<String, T> configs, String[][] aliasGroups) { return translateDeprecatedConfigs(configs, Stream.of(aliasGroups) .collect(Collectors.toMap(x -> x[0], x -> Stream.of(x).skip(1).collect(Collectors.toList())))); }
@Test public void testDuplicateSynonyms() { Map<String, String> config = new HashMap<>(); config.put("foo.bar", "baz"); config.put("foo.bar.deprecated", "derp"); Map<String, String> newConfig = ConfigUtils.translateDeprecatedConfigs(config, new String[][]{ {"foo.bar", "foo.bar.deprecated"}, {"chicken", "foo.bar.deprecated"} }); assertNotNull(newConfig); assertEquals("baz", newConfig.get("foo.bar")); assertEquals("derp", newConfig.get("chicken")); assertNull(newConfig.get("foo.bar.deprecated")); }
@Retries.RetryTranslated public void retry(String action, String path, boolean idempotent, Retried retrying, InvocationRaisingIOE operation) throws IOException { retry(action, path, idempotent, retrying, () -> { operation.apply(); return null; }); }
@Test public void testRetryOnThrottle() throws Throwable { final AtomicInteger counter = new AtomicInteger(0); invoker.retry("test", null, false, () -> { if (counter.incrementAndGet() < 5) { throw newThrottledException(); } }); }
@Override public String toString() { return Stream.of( elements ).filter(Objects::nonNull).collect(toList() ).toString(); }
@Test public void testShuffled() { long time = System.currentTimeMillis(); for (int k = 0; k < 10; k++) { for (Integer[] perm : perms) { Group group = new Group("group"); for (Integer i : perm) { Item item = new Item(group, i); group.add(item); } InternalMatch[] elems = group.getQueue().toArray(new InternalMatch[0]); for (InternalMatch elem : elems) { Item item = (Item) elem; // System.out.print( " " + item.getSalience() + "/" + item.getActivationNumber() + "/" + item.getQueueIndex() ); if (item.getQueueIndex() % 2 == 0) { group.remove(item); group.add(item); } } boolean ok = true; StringBuilder sb = new StringBuilder("queue:"); for (int i = max - 1; i >= 0; i--) { int sal = group.getNext().getSalience(); sb.append(" ").append(sal); if (sal != i) { ok = false; } } assertThat(ok).as("incorrect order in " + sb.toString()).isTrue(); // System.out.println( sb.toString() ); } } System.out.println("time:" + (System.currentTimeMillis() - time)); }
@Override public Map<K, V> getAll(Set<? extends K> keys) { checkNotClosed(); if (keys == null) { throw new NullPointerException(); } for (K key : keys) { checkKey(key); } if (!atomicExecution && !config.isReadThrough()) { long startTime = currentNanoTime(); boolean exists = false; for (K key : keys) { if (containsKey(key)) { exists = true; break; } } if (!exists) { cacheManager.getStatBean(this).addGetTime(currentNanoTime() - startTime); return Collections.emptyMap(); } } RFuture<Map<K, V>> result = getAllAsync(keys); return sync(result); }
@Test public void testGetAll() throws Exception { URL configUrl = getClass().getResource("redisson-jcache.yaml"); Config cfg = Config.fromYAML(configUrl); Configuration<String, String> config = RedissonConfiguration.fromConfig(cfg); Cache<String, String> cache = Caching.getCachingProvider().getCacheManager() .createCache("test", config); cache.put("1", "2"); cache.put("3", "4"); Map<String, String> entries = cache.getAll(new HashSet<String>(Arrays.asList("1", "3", "7"))); Map<String, String> expected = new HashMap<String, String>(); expected.put("1", "2"); expected.put("3", "4"); assertThat(entries).isEqualTo(expected); cache.close(); }
@SuppressWarnings("ChainOfInstanceofChecks") public OpenFileInformation prepareToOpenFile( final Path path, final OpenFileParameters parameters, final long blockSize) throws IOException { Configuration options = parameters.getOptions(); Set<String> mandatoryKeys = parameters.getMandatoryKeys(); // S3 Select is not supported in this release if (options.get(SelectConstants.SELECT_SQL, null) != null) { if (mandatoryKeys.contains(SelectConstants.SELECT_SQL)) { // mandatory option: fail with a specific message. throw new UnsupportedOperationException(SelectConstants.SELECT_UNSUPPORTED); } else { // optional; log once and continue LOG_NO_SQL_SELECT.warn(SelectConstants.SELECT_UNSUPPORTED); } } // choice of keys depends on open type rejectUnknownMandatoryKeys( mandatoryKeys, InternalConstants.S3A_OPENFILE_KEYS, "for " + path + " in file I/O"); // where does a read end? long fileLength = LENGTH_UNKNOWN; // was a status passed in via a withStatus() invocation in // the builder API? FileStatus providedStatus = parameters.getStatus(); S3AFileStatus fileStatus = null; if (providedStatus != null) { // there's a file status // make sure the file name matches -the rest of the path // MUST NOT be checked. Path providedStatusPath = providedStatus.getPath(); checkArgument(path.getName().equals(providedStatusPath.getName()), "Filename mismatch between file being opened %s and" + " supplied filestatus %s", path, providedStatusPath); // make sure the status references a file if (providedStatus.isDirectory()) { throw new FileNotFoundException( "Supplied status references a directory " + providedStatus); } // build up the values long len = providedStatus.getLen(); long modTime = providedStatus.getModificationTime(); String versionId; String eTag; // can use this status to skip our own probes, LOG.debug("File was opened with a supplied FileStatus;" + " skipping getFileStatus call in open() operation: {}", providedStatus); // what type is the status (and hence: what information does it contain?) if (providedStatus instanceof S3AFileStatus) { // is it an S3AFileSystem status? S3AFileStatus st = (S3AFileStatus) providedStatus; versionId = st.getVersionId(); eTag = st.getEtag(); } else if (providedStatus instanceof S3ALocatedFileStatus) { // S3ALocatedFileStatus instance may supply etag and version. S3ALocatedFileStatus st = (S3ALocatedFileStatus) providedStatus; versionId = st.getVersionId(); eTag = st.getEtag(); } else { // it is another type. // build a status struct without etag or version. LOG.debug("Converting file status {}", providedStatus); versionId = null; eTag = null; } // Construct a new file status with the real path of the file. fileStatus = new S3AFileStatus( len, modTime, path, blockSize, username, eTag, versionId); // set the end of the read to the file length fileLength = fileStatus.getLen(); } FSBuilderSupport builderSupport = new FSBuilderSupport(options); // determine start and end of file. long splitStart = builderSupport.getPositiveLong(FS_OPTION_OPENFILE_SPLIT_START, 0); // split end long splitEnd = builderSupport.getLong( FS_OPTION_OPENFILE_SPLIT_END, LENGTH_UNKNOWN); if (splitStart > 0 && splitStart > splitEnd) { LOG.warn("Split start {} is greater than split end {}, resetting", splitStart, splitEnd); splitStart = 0; } // read end is the open file value fileLength = builderSupport.getPositiveLong(FS_OPTION_OPENFILE_LENGTH, fileLength); // if the read end has come from options, use that // in creating a file status if (fileLength >= 0 && fileStatus == null) { fileStatus = createStatus(path, fileLength, blockSize); } // Build up the input policy. // seek policy from default, s3a opt or standard option // read from the FS standard option. Collection<String> policies = options.getStringCollection(FS_OPTION_OPENFILE_READ_POLICY); if (policies.isEmpty()) { // fall back to looking at the S3A-specific option. policies = options.getStringCollection(INPUT_FADVISE); } return new OpenFileInformation() .withAsyncDrainThreshold( builderSupport.getPositiveLong(ASYNC_DRAIN_THRESHOLD, defaultReadAhead)) .withBufferSize( (int)builderSupport.getPositiveLong( FS_OPTION_OPENFILE_BUFFER_SIZE, defaultBufferSize)) .withChangePolicy(changePolicy) .withFileLength(fileLength) .withInputPolicy( S3AInputPolicy.getFirstSupportedPolicy(policies, defaultInputPolicy)) .withReadAheadRange( builderSupport.getPositiveLong(READAHEAD_RANGE, defaultReadAhead)) .withSplitStart(splitStart) .withSplitEnd(splitEnd) .withStatus(fileStatus) .build(); }
@Test public void testFileLength() throws Throwable { ObjectAssert<OpenFileSupport.OpenFileInformation> asst = assertFileInfo(prepareToOpenFile( params(FS_OPTION_OPENFILE_LENGTH, "8192") .withStatus(null))); asst.extracting(f -> f.getStatus()) .isNotNull(); asst.extracting(f -> f.getStatus().getPath()) .isEqualTo(TESTPATH); asst.extracting(f -> f.getStatus().getLen()) .isEqualTo(8192L); }
@Override public URL select(List<URL> urls, String serviceId, String tag, String requestKey) { String key = tag == null ? serviceId : serviceId + "|" + tag; // search for a URL in the same ip first List<URL> localUrls = searchLocalUrls(urls, ip); if(localUrls.size() > 0) { if(localUrls.size() == 1) { return localUrls.get(0); } else { // round robin within localUrls return doSelect(localUrls, key); } } else { // round robin within urls return doSelect(urls, key); } }
@Test public void testSelectWithEmptyList() throws Exception { List<URL> urls = new ArrayList<>(); URL url = loadBalance.select(urls, "serviceId", "tag", null); Assert.assertNull(url); }
public static void main(String[] args) throws IOException { if (args.length < 2) { _log.error("Usage: AvroSchemaGenerator targetDirectoryPath [sourceFile or sourceDirectory or schemaName]+"); System.exit(1); } String resolverPath = System.getProperty(AbstractGenerator.GENERATOR_RESOLVER_PATH); if (resolverPath != null && ArgumentFileProcessor.isArgFile(resolverPath)) { // The resolver path is an arg file, prefixed with '@' and containing the actual resolverPath String[] argFileContents = ArgumentFileProcessor.getContentsAsArray(resolverPath); resolverPath = argFileContents.length > 0 ? argFileContents[0] : null; } run(resolverPath, System.getProperty(GENERATOR_AVRO_TRANSLATE_OPTIONAL_DEFAULT), System.getProperty(GENERATOR_AVRO_TYPEREF_PROPERTY_EXCLUDE), Boolean.parseBoolean(System.getProperty(GENERATOR_AVRO_NAMESPACE_OVERRIDE)), args[0], Arrays.copyOfRange(args, 1, args.length)); }
@Test(dataProvider = "toAvroSchemaDataBeforeReferree") public void testReferrerBeforeReferreeInArgs(Map<String, String> testSchemas, String testPath, boolean override) throws IOException { Map<File, Map.Entry<String,String>> files = TestUtil.createSchemaFiles(_testDir, testSchemas, _debug); Collection<String> testPaths = computePathFromRelativePaths(_testDir, Arrays.asList(testPath)); Map.Entry<File, Map.Entry<String,String>> referrer2 = findEntryForPdsc(buildSystemIndependentPath("a3", "b", "c", "referrer2.pdsc"), files); Map.Entry<File, Map.Entry<String,String>> referree2 = findEntryForPdsc(buildSystemIndependentPath("a3", "b", "c", "referree2.pdsc"), files); File targetDir = setup(testPaths, override); String targetPath = targetDir.getCanonicalPath() + (override ? ("/" + AVRO_PREFIX) : ""); File[] expectedOutputFiles = { schemaOutputFile(targetPath, referrer2.getValue().getValue()), schemaOutputFile(targetPath, referree2.getValue().getValue()) }; // make sure files do not exists for (File f : expectedOutputFiles) { assertFalse(f.exists()); } // referrer before referree in arg list String args[] = { targetDir.getAbsolutePath(), referrer2.getKey().getCanonicalPath(), referree2.getKey().getCanonicalPath(), }; Exception exc = null; try { AvroSchemaGenerator.main(args); } catch (Exception e) { exc = e; } assertNull(exc); // make sure expected file is generated for (File f : expectedOutputFiles) { assertTrue(f.exists(), f + " expected to exist"); f.delete(); } }
@SuppressWarnings("unchecked") public Mono<RateLimiterResponse> isAllowed(final String id, final RateLimiterHandle limiterHandle) { double replenishRate = limiterHandle.getReplenishRate(); double burstCapacity = limiterHandle.getBurstCapacity(); double requestCount = limiterHandle.getRequestCount(); RateLimiterAlgorithm<?> rateLimiterAlgorithm = RateLimiterAlgorithmFactory.newInstance(limiterHandle.getAlgorithmName()); RedisScript<?> script = rateLimiterAlgorithm.getScript(); List<String> keys = rateLimiterAlgorithm.getKeys(id); List<String> scriptArgs = Stream.of(replenishRate, burstCapacity, Instant.now().getEpochSecond(), requestCount).map(String::valueOf).collect(Collectors.toList()); Flux<List<Long>> resultFlux = Singleton.INST.get(ReactiveRedisTemplate.class).execute(script, keys, scriptArgs); return resultFlux.onErrorResume(throwable -> Flux.just(Arrays.asList(1L, -1L))) .reduce(new ArrayList<Long>(), (longs, l) -> { longs.addAll(l); return longs; }).map(results -> { boolean allowed = results.get(0) == 1L; Long tokensLeft = results.get(1); return new RateLimiterResponse(allowed, tokensLeft, keys); }) .doOnError(throwable -> { rateLimiterAlgorithm.callback(rateLimiterAlgorithm.getScript(), keys, scriptArgs); LOG.error("Error occurred while judging if user is allowed by RedisRateLimiter:{}", throwable.getMessage()); }); }
@Test public void leakyBucketNotAllowedTest() { leakyBucketPreInit(0L, 300L); rateLimiterHandle.setAlgorithmName("leakyBucket"); Mono<RateLimiterResponse> responseMono = redisRateLimiter.isAllowed(DEFAULT_TEST_ID, rateLimiterHandle); StepVerifier.create(responseMono).assertNext(r -> { assertThat(r.getTokensRemaining(), is((long) DEFAULT_TEST_BURST_CAPACITY)); assertFalse(r.isAllowed()); }).verifyComplete(); }
public List<SchemaChangeEvent> applySchemaChange(SchemaChangeEvent schemaChangeEvent) { List<SchemaChangeEvent> events = new ArrayList<>(); TableId originalTable = schemaChangeEvent.tableId(); boolean noRouteMatched = true; for (Tuple3<Selectors, String, String> route : routes) { // Check routing table if (!route.f0.isMatch(originalTable)) { continue; } noRouteMatched = false; // Matched a routing rule TableId derivedTable = resolveReplacement(originalTable, route); Set<TableId> originalTables = derivationMapping.computeIfAbsent(derivedTable, t -> new HashSet<>()); originalTables.add(originalTable); if (originalTables.size() == 1) { // single source mapping, replace the table ID directly SchemaChangeEvent derivedSchemaChangeEvent = ChangeEventUtils.recreateSchemaChangeEvent(schemaChangeEvent, derivedTable); events.add(derivedSchemaChangeEvent); } else { // multiple source mapping (merging tables) Schema derivedTableSchema = schemaManager.getLatestEvolvedSchema(derivedTable).get(); events.addAll( Objects.requireNonNull( SchemaChangeEventVisitor.visit( schemaChangeEvent, addColumnEvent -> handleAddColumnEvent( addColumnEvent, derivedTableSchema, derivedTable), alterColumnTypeEvent -> handleAlterColumnTypeEvent( alterColumnTypeEvent, derivedTableSchema, derivedTable), createTableEvent -> handleCreateTableEvent( createTableEvent, derivedTableSchema, derivedTable), dropColumnEvent -> Collections.emptyList(), // Column drop shouldn't be // spread to route // destination. dropTableEvent -> Collections.emptyList(), // Table drop shouldn't be // spread to route // destination. renameColumnEvent -> handleRenameColumnEvent( renameColumnEvent, derivedTableSchema, derivedTable), truncateTableEvent -> Collections.emptyList() // // Table truncation // shouldn't be spread to route // destination. ))); } } if (noRouteMatched) { // No routes are matched, leave it as-is return Collections.singletonList(schemaChangeEvent); } else { return events; } }
@Test void testIncompatibleTypes() { SchemaManager schemaManager = new SchemaManager(); SchemaDerivation schemaDerivation = new SchemaDerivation(schemaManager, ROUTES, new HashMap<>()); // Create table 1 List<SchemaChangeEvent> derivedChangesAfterCreateTable = schemaDerivation.applySchemaChange(new CreateTableEvent(TABLE_1, SCHEMA)); assertThat(derivedChangesAfterCreateTable).hasSize(1); assertThat(derivedChangesAfterCreateTable.get(0)) .asCreateTableEvent() .hasTableId(MERGED_TABLE) .hasSchema(SCHEMA); derivedChangesAfterCreateTable.forEach(schemaManager::applyEvolvedSchemaChange); // Create table 2 assertThatThrownBy( () -> schemaDerivation.applySchemaChange( new CreateTableEvent(TABLE_2, INCOMPATIBLE_SCHEMA))) .isInstanceOf(IllegalStateException.class) .hasMessage("Incompatible types: \"INT\" and \"STRING\""); }
@Override public Metrics toDay() { MaxFunction metrics = (MaxFunction) createNew(); metrics.setEntityId(getEntityId()); metrics.setTimeBucket(toTimeBucketInDay()); metrics.setServiceId(getServiceId()); metrics.setValue(getValue()); return metrics; }
@Test public void testToDay() { function.setTimeBucket(TimeBucket.getMinuteTimeBucket(System.currentTimeMillis())); function.accept(MeterEntity.newService("service-test", Layer.GENERAL), LARGE_VALUE); function.accept(MeterEntity.newService("service-test", Layer.GENERAL), SMALL_VALUE); function.calculate(); final MaxFunction dayFunction = (MaxFunction) function.toDay(); dayFunction.calculate(); assertThat(dayFunction.getValue()).isEqualTo(LARGE_VALUE); }
public IssuesChangesNotification newIssuesChangesNotification(Set<DefaultIssue> issues, Map<String, UserDto> assigneesByUuid) { AnalysisChange change = new AnalysisChange(analysisMetadataHolder.getAnalysisDate()); Set<ChangedIssue> changedIssues = issues.stream() .map(issue -> new ChangedIssue.Builder(issue.key()) .setAssignee(getAssignee(issue.assignee(), assigneesByUuid)) .setNewStatus(issue.status()) .setNewIssueStatus(issue.status() != null ? IssueStatus.of(issue.status(), issue.resolution()) : null) .setRule(getRuleByRuleKey(issue.ruleKey())) .setProject(getProject()) .build()) .collect(Collectors.toSet()); return issuesChangesSerializer.serialize(new IssuesChangesNotificationBuilder(changedIssues, change)); }
@Test public void newIssuesChangesNotification_fails_with_ISE_if_issue_has_assignee_not_in_assigneesByUuid() { RuleKey ruleKey = RuleKey.of("foo", "bar"); String assigneeUuid = randomAlphabetic(40); DefaultIssue issue = new DefaultIssue() .setRuleKey(ruleKey) .setKey("issueKey") .setStatus(STATUS_OPEN) .setAssigneeUuid(assigneeUuid); Map<String, UserDto> assigneesByUuid = Collections.emptyMap(); ReportComponent project = ReportComponent.builder(PROJECT, 1).build(); ruleRepository.add(ruleKey); treeRootHolder.setRoot(project); analysisMetadata.setAnalysisDate(new Random().nextLong()); analysisMetadata.setBranch(newNonMainBranch(BranchType.BRANCH, randomAlphabetic(12))); assertThatThrownBy(() -> underTest.newIssuesChangesNotification(ImmutableSet.of(issue), assigneesByUuid)) .isInstanceOf(IllegalStateException.class) .hasMessage("Can not find DTO for assignee uuid " + assigneeUuid); }
Integer calculateFeePrice(Integer withdrawPrice, Integer percent) { Integer feePrice = 0; if (percent != null && percent > 0) { feePrice = MoneyUtils.calculateRatePrice(withdrawPrice, Double.valueOf(percent)); } return feePrice; }
@Test public void testCalculateFeePrice() { Integer withdrawPrice = 100; // 测试手续费比例未设置 Integer percent = null; assertEquals(brokerageWithdrawService.calculateFeePrice(withdrawPrice, percent), 0); // 测试手续费给为0 percent = 0; assertEquals(brokerageWithdrawService.calculateFeePrice(withdrawPrice, percent), 0); // 测试手续费 percent = 1; assertEquals(brokerageWithdrawService.calculateFeePrice(withdrawPrice, percent), 1); }
@Override public Future<Map<ByteBuffer, ByteBuffer>> get(Collection<ByteBuffer> keys) { CompletableFuture<Void> endFuture = new CompletableFuture<>(); readToEnd(endFuture); return endFuture.thenApply(ignored -> { Map<ByteBuffer, ByteBuffer> values = new HashMap<>(); for (ByteBuffer key : keys) { ByteBuffer value = data.get(key); if (null != value) { values.put(key, value); } } return values; }); }
@Test public void testGetFromEmpty() throws Exception { testOffsetBackingStore(false); assertTrue(offsetBackingStore.get( Arrays.asList(ByteBuffer.wrap("empty-key".getBytes(UTF_8))) ).get().isEmpty()); }
public static Set<String> verifyTopologyOptimizationConfigs(final String config) { final List<String> configs = Arrays.asList(config.split("\\s*,\\s*")); final Set<String> verifiedConfigs = new HashSet<>(); // Verify it doesn't contain none or all plus a list of optimizations if (configs.contains(NO_OPTIMIZATION) || configs.contains(OPTIMIZE)) { if (configs.size() > 1) { throw new ConfigException("\"" + config + "\" is not a valid optimization config. " + CONFIG_ERROR_MSG); } } for (final String conf: configs) { if (!TOPOLOGY_OPTIMIZATION_CONFIGS.contains(conf)) { throw new ConfigException("Unrecognized config. " + CONFIG_ERROR_MSG); } } if (configs.contains(OPTIMIZE)) { verifiedConfigs.add(REUSE_KTABLE_SOURCE_TOPICS); verifiedConfigs.add(MERGE_REPARTITION_TOPICS); verifiedConfigs.add(SINGLE_STORE_SELF_JOIN); } else if (!configs.contains(NO_OPTIMIZATION)) { verifiedConfigs.addAll(configs); } return verifiedConfigs; }
@Test public void shouldEnableAllOptimizationsWithOptimizeConfig() { final Set<String> configs = StreamsConfig.verifyTopologyOptimizationConfigs(StreamsConfig.OPTIMIZE); assertEquals(3, configs.size()); assertTrue(configs.contains(StreamsConfig.REUSE_KTABLE_SOURCE_TOPICS)); assertTrue(configs.contains(StreamsConfig.MERGE_REPARTITION_TOPICS)); assertTrue(configs.contains(StreamsConfig.SINGLE_STORE_SELF_JOIN)); }
@Override public long getPos() throws IOException { return position; }
@Test public void shouldConstructStream() throws IOException { if (empty) { assertEquals(0, fsDataOutputStream.getPos()); } else { assertEquals(position, fsDataOutputStream.getPos()); } }
@PostMapping("add-to-favourites") public Mono<String> addProductToFavourites(@ModelAttribute("product") Mono<Product> productMono) { return productMono .map(Product::id) .flatMap(productId -> this.favouriteProductsClient.addProductToFavourites(productId) .thenReturn("redirect:/customer/products/%d".formatted(productId)) .onErrorResume(exception -> { log.error(exception.getMessage(), exception); return Mono.just("redirect:/customer/products/%d".formatted(productId)); })); }
@Test void addProductToFavourites_RequestIsValid_RedirectsToProductPage() { // given doReturn(Mono.just(new FavouriteProduct(UUID.fromString("25ec67b4-cbac-11ee-adc8-4bd80e8171c4"), 1))) .when(this.favouriteProductsClient).addProductToFavourites(1); // when StepVerifier.create(this.controller.addProductToFavourites( Mono.just(new Product(1, "Товар №1", "Описание товара №1")))) // then .expectNext("redirect:/customer/products/1") .verifyComplete(); verify(this.favouriteProductsClient).addProductToFavourites(1); verifyNoMoreInteractions(this.favouriteProductsClient); verifyNoInteractions(this.productReviewsClient, this.productsClient); }
public DecommissioningNodesWatcher(RMContext rmContext) { this.rmContext = rmContext; pollTimer = new Timer(true); mclock = new MonotonicClock(); }
@Test public void testDecommissioningNodesWatcher() throws Exception { Configuration conf = new Configuration(); conf.set(YarnConfiguration.RM_NODE_GRACEFUL_DECOMMISSION_TIMEOUT, "40"); rm = new MockRM(conf); rm.start(); DecommissioningNodesWatcher watcher = new DecommissioningNodesWatcher(rm.getRMContext()); MockNM nm1 = rm.registerNode("host1:1234", 10240); RMNodeImpl node1 = (RMNodeImpl) rm.getRMContext().getRMNodes().get(nm1.getNodeId()); NodeId id1 = nm1.getNodeId(); rm.waitForState(id1, NodeState.RUNNING); RMApp app = MockRMAppSubmitter.submitWithMemory(2000, rm); MockAM am = MockRM.launchAndRegisterAM(app, rm, nm1); NodeStatus nodeStatus = createNodeStatus(id1, app, 3); node1.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeStatus)); // Setup nm1 as DECOMMISSIONING for DecommissioningNodesWatcher. rm.sendNodeGracefulDecommission(nm1, YarnConfiguration.DEFAULT_RM_NODE_GRACEFUL_DECOMMISSION_TIMEOUT); rm.waitForState(id1, NodeState.DECOMMISSIONING); // Update status with decreasing number of running containers until 0. nodeStatus = createNodeStatus(id1, app, 3); node1.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeStatus)); watcher.update(node1, nodeStatus); nodeStatus = createNodeStatus(id1, app, 2); node1.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeStatus)); watcher.update(node1, nodeStatus); Assert.assertFalse(watcher.checkReadyToBeDecommissioned(id1)); nodeStatus = createNodeStatus(id1, app, 1); node1.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeStatus)); watcher.update(node1, nodeStatus); Assert.assertEquals(DecommissioningNodeStatus.WAIT_CONTAINER, watcher.checkDecommissioningStatus(id1)); nodeStatus = createNodeStatus(id1, app, 0); watcher.update(node1, nodeStatus); node1.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeStatus)); Assert.assertEquals(DecommissioningNodeStatus.WAIT_APP, watcher.checkDecommissioningStatus(id1)); // Set app to be FINISHED and verified DecommissioningNodeStatus is READY. MockRM.finishAMAndVerifyAppState(app, rm, nm1, am); rm.waitForState(app.getApplicationId(), RMAppState.FINISHED); watcher.update(node1, nodeStatus); Assert.assertEquals(DecommissioningNodeStatus.READY, watcher.checkDecommissioningStatus(id1)); }
public static void createTopics( Logger log, String bootstrapServers, Map<String, String> commonClientConf, Map<String, String> adminClientConf, Map<String, NewTopic> topics, boolean failOnExisting) throws Throwable { // this method wraps the call to createTopics() that takes admin client, so that we can // unit test the functionality with MockAdminClient. The exception is caught and // re-thrown so that admin client is closed when the method returns. try (Admin adminClient = createAdminClient(bootstrapServers, commonClientConf, adminClientConf)) { createTopics(log, adminClient, topics, failOnExisting); } catch (Exception e) { log.warn("Failed to create or verify topics {}", topics, e); throw e; } }
@Test public void testCreateRetriesOnTimeout() throws Throwable { adminClient.timeoutNextRequest(1); WorkerUtils.createTopics( log, adminClient, Collections.singletonMap(TEST_TOPIC, NEW_TEST_TOPIC), true); assertEquals( new TopicDescription( TEST_TOPIC, false, Collections.singletonList( new TopicPartitionInfo(0, broker1, singleReplica, Collections.emptyList(), Collections.emptyList(), Collections.emptyList()))), adminClient.describeTopics( Collections.singleton(TEST_TOPIC)).topicNameValues().get(TEST_TOPIC).get() ); }
@Description("compute sha1 hash") @ScalarFunction @SqlType(StandardTypes.VARBINARY) public static Slice sha1(@SqlType(StandardTypes.VARBINARY) Slice slice) { return computeHash(Hashing.sha1(), slice); }
@Test public void testSha1() { assertFunction("sha1(CAST('' AS VARBINARY))", VARBINARY, sqlVarbinaryHex("DA39A3EE5E6B4B0D3255BFEF95601890AFD80709")); assertFunction("sha1(CAST('hashme' AS VARBINARY))", VARBINARY, sqlVarbinaryHex("FB78992E561929A6967D5328F49413FA99048D06")); }
public ProcessContinuation run( PartitionRecord partitionRecord, RestrictionTracker<StreamProgress, StreamProgress> tracker, OutputReceiver<KV<ByteString, ChangeStreamRecord>> receiver, ManualWatermarkEstimator<Instant> watermarkEstimator) throws IOException { BytesThroughputEstimator<KV<ByteString, ChangeStreamRecord>> throughputEstimator = new BytesThroughputEstimator<>(sizeEstimator, Instant.now()); // Lock the partition if (tracker.currentRestriction().isEmpty()) { boolean lockedPartition = metadataTableDao.lockAndRecordPartition(partitionRecord); // Clean up NewPartition on the first run regardless of locking result. If locking fails it // means this partition is being streamed, then cleaning up NewPartitions avoids lingering // NewPartitions. for (NewPartition newPartition : partitionRecord.getParentPartitions()) { metadataTableDao.deleteNewPartition(newPartition); } if (!lockedPartition) { LOG.info( "RCSP {} : Could not acquire lock with uid: {}, because this is a " + "duplicate and another worker is working on this partition already.", formatByteStringRange(partitionRecord.getPartition()), partitionRecord.getUuid()); StreamProgress streamProgress = new StreamProgress(); streamProgress.setFailToLock(true); metrics.decPartitionStreamCount(); tracker.tryClaim(streamProgress); return ProcessContinuation.stop(); } } else if (tracker.currentRestriction().getCloseStream() == null && !metadataTableDao.doHoldLock( partitionRecord.getPartition(), partitionRecord.getUuid())) { // We only verify the lock if we are not holding CloseStream because if this is a retry of // CloseStream we might have already cleaned up the lock in a previous attempt. // Failed correctness check on this worker holds the lock on this partition. This shouldn't // fail because there's a restriction tracker which means this worker has already acquired the // lock and once it has acquired the lock it shouldn't fail the lock check. LOG.warn( "RCSP {} : Subsequent run that doesn't hold the lock {}. This is not unexpected and " + "should probably be reviewed.", formatByteStringRange(partitionRecord.getPartition()), partitionRecord.getUuid()); StreamProgress streamProgress = new StreamProgress(); streamProgress.setFailToLock(true); metrics.decPartitionStreamCount(); tracker.tryClaim(streamProgress); return ProcessContinuation.stop(); } // Process CloseStream if it exists CloseStream closeStream = tracker.currentRestriction().getCloseStream(); if (closeStream != null) { LOG.debug("RCSP: Processing CloseStream"); metrics.decPartitionStreamCount(); if (closeStream.getStatus().getCode() == Status.Code.OK) { // We need to update watermark here. We're terminating this stream because we have reached // endTime. Instant.now is greater or equal to endTime. The goal here is // DNP will need to know this stream has passed the endTime so DNP can eventually terminate. Instant terminatingWatermark = Instant.ofEpochMilli(Long.MAX_VALUE); Instant endTime = partitionRecord.getEndTime(); if (endTime != null) { terminatingWatermark = endTime; } watermarkEstimator.setWatermark(terminatingWatermark); metadataTableDao.updateWatermark( partitionRecord.getPartition(), watermarkEstimator.currentWatermark(), null); LOG.info( "RCSP {}: Reached end time, terminating...", formatByteStringRange(partitionRecord.getPartition())); return ProcessContinuation.stop(); } if (closeStream.getStatus().getCode() != Status.Code.OUT_OF_RANGE) { LOG.error( "RCSP {}: Reached unexpected terminal state: {}", formatByteStringRange(partitionRecord.getPartition()), closeStream.getStatus()); return ProcessContinuation.stop(); } // Release the lock only if the uuid matches. In normal operation this doesn't change // anything. However, it's possible for this RCSP to crash while processing CloseStream but // after the side effects of writing the new partitions to the metadata table. New partitions // can be created while this RCSP restarts from the previous checkpoint and processes the // CloseStream again. In certain race scenarios the child partitions may merge back to this // partition, but as a new RCSP. The new partition (same as this partition) would write the // exact same content to the metadata table but with a different uuid. We don't want to // accidentally delete the StreamPartition because it now belongs to the new RCSP. // If the uuid is the same (meaning this race scenario did not take place) we release the lock // and mark the StreamPartition to be deleted, so we can delete it after we have written the // NewPartitions. metadataTableDao.releaseStreamPartitionLockForDeletion( partitionRecord.getPartition(), partitionRecord.getUuid()); // The partitions in the continuation tokens must cover the same key space as this partition. // If there's only 1 token, then the token's partition is equals to this partition. // If there are more than 1 tokens, then the tokens form a continuous row range equals to this // partition. List<ByteStringRange> childPartitions = new ArrayList<>(); List<ByteStringRange> tokenPartitions = new ArrayList<>(); // Check if NewPartitions field exists, if not we default to using just the // ChangeStreamContinuationTokens. boolean useNewPartitionsField = closeStream.getNewPartitions().size() == closeStream.getChangeStreamContinuationTokens().size(); for (int i = 0; i < closeStream.getChangeStreamContinuationTokens().size(); i++) { ByteStringRange childPartition; if (useNewPartitionsField) { childPartition = closeStream.getNewPartitions().get(i); } else { childPartition = closeStream.getChangeStreamContinuationTokens().get(i).getPartition(); } childPartitions.add(childPartition); ChangeStreamContinuationToken token = getTokenWithCorrectPartition( partitionRecord.getPartition(), closeStream.getChangeStreamContinuationTokens().get(i)); tokenPartitions.add(token.getPartition()); metadataTableDao.writeNewPartition( new NewPartition( childPartition, Collections.singletonList(token), watermarkEstimator.getState())); } LOG.info( "RCSP {}: Split/Merge into {}", formatByteStringRange(partitionRecord.getPartition()), partitionsToString(childPartitions)); if (!coverSameKeySpace(tokenPartitions, partitionRecord.getPartition())) { LOG.warn( "RCSP {}: CloseStream has tokens {} that don't cover the entire keyspace", formatByteStringRange(partitionRecord.getPartition()), partitionsToString(tokenPartitions)); } // Perform the real cleanup. This step is no op if the race mentioned above occurs (splits and // merges results back to this partition again) because when we register the "new" partition, // we unset the deletion bit. metadataTableDao.deleteStreamPartitionRow(partitionRecord.getPartition()); return ProcessContinuation.stop(); } // Update the metadata table with the watermark metadataTableDao.updateWatermark( partitionRecord.getPartition(), watermarkEstimator.getState(), tracker.currentRestriction().getCurrentToken()); // Start to stream the partition. ServerStream<ChangeStreamRecord> stream = null; try { stream = changeStreamDao.readChangeStreamPartition( partitionRecord, tracker.currentRestriction(), partitionRecord.getEndTime(), heartbeatDuration); for (ChangeStreamRecord record : stream) { Optional<ProcessContinuation> result = changeStreamAction.run( partitionRecord, record, tracker, receiver, watermarkEstimator, throughputEstimator); // changeStreamAction will usually return Optional.empty() except for when a checkpoint // (either runner or pipeline initiated) is required. if (result.isPresent()) { return result.get(); } } } catch (Exception e) { throw e; } finally { if (stream != null) { stream.cancel(); } } return ProcessContinuation.resume(); }
@Test public void testCloseStreamNewPartitionMerge() throws IOException { // Force lock fail because CloseStream should not depend on locking when(metadataTableDao.doHoldLock(partition, uuid)).thenReturn(false); // NewPartitions field includes the merge target. ChangeStreamContinuationToken's partition may // not be the same as the new partition. // AC merging into AD. The new partition is AD and the corresponding // ChangeStreamContinuationToken is for AC ByteStringRange childPartitionAD = ByteStringRange.create("A", "D"); ChangeStreamContinuationToken parentTokenAC = ChangeStreamContinuationToken.create(partition, "AC"); CloseStream mockCloseStream = Mockito.mock(CloseStream.class); Status statusProto = Status.newBuilder().setCode(11).build(); Mockito.when(mockCloseStream.getStatus()) .thenReturn(com.google.cloud.bigtable.common.Status.fromProto(statusProto)); Mockito.when(mockCloseStream.getChangeStreamContinuationTokens()) .thenReturn(Collections.singletonList(parentTokenAC)); Mockito.when(mockCloseStream.getNewPartitions()) .thenReturn(Collections.singletonList(childPartitionAD)); when(restriction.isEmpty()).thenReturn(false); when(restriction.getCloseStream()).thenReturn(mockCloseStream); when(restriction.isEmpty()).thenReturn(false); final DoFn.ProcessContinuation result = action.run(partitionRecord, tracker, receiver, watermarkEstimator); assertEquals(DoFn.ProcessContinuation.stop(), result); // Should terminate before reaching processing stream partition responses. verify(changeStreamAction, never()).run(any(), any(), any(), any(), any(), any()); // Should not try claim any restriction when processing CloseStream verify(tracker, (never())).tryClaim(any()); // Should decrement the metric on termination. verify(metrics).decPartitionStreamCount(); // Write the new partitions. NewPartition newPartitionAD = new NewPartition( childPartitionAD, Collections.singletonList(parentTokenAC), watermarkEstimator.getState()); verify(metadataTableDao).writeNewPartition(eq(newPartitionAD)); verify(metadataTableDao, times(1)) .releaseStreamPartitionLockForDeletion( partitionRecord.getPartition(), partitionRecord.getUuid()); verify(metadataTableDao, times(1)).deleteStreamPartitionRow(partitionRecord.getPartition()); }
public static void load(Configuration conf, InputStream is) throws IOException { conf.addResource(is); }
@Test public void constructors3() throws Exception { InputStream is = new ByteArrayInputStream( "<xxx><property name=\"key1\" value=\"val1\"/></xxx>".getBytes()); Configuration conf = new Configuration(false); ConfigurationUtils.load(conf, is); assertEquals("val1", conf.get("key1")); }
public static Schema create(Type type) { switch (type) { case STRING: return new StringSchema(); case BYTES: return new BytesSchema(); case INT: return new IntSchema(); case LONG: return new LongSchema(); case FLOAT: return new FloatSchema(); case DOUBLE: return new DoubleSchema(); case BOOLEAN: return new BooleanSchema(); case NULL: return new NullSchema(); default: throw new AvroRuntimeException("Can't create a: " + type); } }
@Test void doubleAsFloatDefaultValue() { Schema.Field field = new Schema.Field("myField", Schema.create(Schema.Type.FLOAT), "doc", 1.0d); assertTrue(field.hasDefaultValue()); assertEquals(1.0f, field.defaultVal()); assertEquals(1.0f, GenericData.get().getDefaultValue(field)); }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStart, final Range<Instant> windowEnd, final Optional<Position> position ) { try { final ReadOnlySessionStore<GenericKey, GenericRow> store = stateStore .store(QueryableStoreTypes.sessionStore(), partition); return KsMaterializedQueryResult.rowIterator( findSession(store, key, windowStart, windowEnd).iterator()); } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldReturnValueIfSessionEndsBetweenBounds() { // Given: final Instant wstart = LOWER_INSTANT.minusMillis(5); final Instant wend = UPPER_INSTANT.minusMillis(1); givenSingleSession(wstart, wend); // When: final Iterator<WindowedRow> rowIterator = table.get(A_KEY, PARTITION, Range.all(), WINDOW_END_BOUNDS).rowIterator; // Then: assertThat(rowIterator.next(), is( WindowedRow.of( SCHEMA, sessionKey(wstart, wend), A_VALUE, wend.toEpochMilli() ) )); }
@Override public Double parse(final String value) { return Double.parseDouble(value); }
@Test void assertParse() { assertThat(new PostgreSQLDoubleValueParser().parse("1"), is(1D)); }
public static Rating computeRating(@Nullable Double percent) { if (percent == null || percent >= 80.0D) { return A; } else if (percent >= 70.0D) { return B; } else if (percent >= 50.0D) { return C; } else if (percent >= 30.0D) { return D; } return E; }
@Test @UseDataProvider("values") public void compute_rating(double percent, Rating expectedRating) { assertThat(computeRating(percent)).isEqualTo(expectedRating); }
public void setTitle(CharSequence title) { TextView cta = findViewById(R.id.cta_title); cta.setText(title); }
@Test public void testSetsTheTitleFromAttribute() { Context context = ApplicationProvider.getApplicationContext(); final var rootTest = LayoutInflater.from(context).inflate(R.layout.test_search_layout, null); final AddOnStoreSearchView underTest = rootTest.findViewById(R.id.test_search_view); final TextView title = underTest.findViewById(R.id.cta_title); Assert.assertEquals("Search for add-ons", title.getText().toString()); underTest.setTitle("now this"); Assert.assertEquals("now this", title.getText().toString()); }
public static String[] split(String splittee, String splitChar, boolean truncate) { //NOSONAR if (splittee == null || splitChar == null) { return new String[0]; } final String EMPTY_ELEMENT = ""; int spot; final int splitLength = splitChar.length(); final String adjacentSplit = splitChar + splitChar; final int adjacentSplitLength = adjacentSplit.length(); if (truncate) { while ((spot = splittee.indexOf(adjacentSplit)) != -1) { splittee = splittee.substring(0, spot + splitLength) + splittee.substring(spot + adjacentSplitLength, splittee.length()); } if (splittee.startsWith(splitChar)) { splittee = splittee.substring(splitLength); } if (splittee.endsWith(splitChar)) { // Remove trailing splitter splittee = splittee.substring(0, splittee.length() - splitLength); } } List<String> returns = new ArrayList<>(); final int length = splittee.length(); // This is the new length int start = 0; spot = 0; while (start < length && (spot = splittee.indexOf(splitChar, start)) > -1) { if (spot > 0) { returns.add(splittee.substring(start, spot)); } else { returns.add(EMPTY_ELEMENT); } start = spot + splitLength; } if (start < length) { returns.add(splittee.substring(start)); } else if (spot == length - splitLength) {// Found splitChar at end of line returns.add(EMPTY_ELEMENT); } return returns.toArray(new String[returns.size()]); }
@Test public void testSplitStringNullString() { Assertions.assertThrows( NullPointerException.class, () -> JOrphanUtils.split("a,bc,,", null, "?")); }
@Override public void execute(SensorContext context) { for (InputFile file : context.fileSystem().inputFiles(context.fileSystem().predicates().hasLanguages(Xoo.KEY))) { processSignificantCodeFile(file, context); } }
@Test public void testExecution() throws IOException { File significantCode = new File(baseDir, "src/foo.xoo.significantCode"); FileUtils.write(significantCode, "1,1,4\n2,2,5"); context.fileSystem().add(inputFile); sensor.execute(context); assertThat(context.significantCodeTextRange("foo:src/foo.xoo", 1)).isEqualTo(range(1, 1, 4)); assertThat(context.significantCodeTextRange("foo:src/foo.xoo", 2)).isEqualTo(range(2, 2, 5)); }
public Preference<Boolean> getBoolean(@StringRes int prefKey, @BoolRes int defaultValue) { return mRxSharedPreferences.getBoolean( mResources.getString(prefKey), mResources.getBoolean(defaultValue)); }
@Test public void testConvertBottomGenericRow() { SharedPrefsHelper.setPrefsValue( "settings_key_ext_kbd_bottom_row_key", "3DFFC2AD-8BC8-47F3-962A-918156AD8DD0"); SharedPrefsHelper.setPrefsValue(RxSharedPrefs.CONFIGURATION_VERSION, 10); SharedPreferences preferences = PreferenceManager.getDefaultSharedPreferences(getApplicationContext()); Assert.assertFalse( preferences.contains("ext_kbd_enabled_1_3DFFC2AD-8BC8-47F3-962A-918156AD8DD0")); new RxSharedPrefs(getApplicationContext(), this::testRestoreFunction); Assert.assertTrue( preferences.contains("ext_kbd_enabled_1_3DFFC2AD-8BC8-47F3-962A-918156AD8DD0")); Assert.assertTrue( preferences.getBoolean("ext_kbd_enabled_1_3DFFC2AD-8BC8-47F3-962A-918156AD8DD0", false)); }
public SpeedStat getReadSpeedStat() { return mReadSpeedStat; }
@Test public void statJson() throws Exception { IOTaskResult result = new IOTaskResult(); // Reading 200MB took 1s result.addPoint(new IOTaskResult.Point(IOTaskResult.IOMode.READ, 1L, 200 * 1024 * 1024)); // Reading 196MB took 1s result.addPoint(new IOTaskResult.Point(IOTaskResult.IOMode.READ, 1L, 196 * 1024 * 1024)); IOTaskSummary summary = new IOTaskSummary(result); IOTaskSummary.SpeedStat stat = summary.getReadSpeedStat(); ObjectMapper mapper = new ObjectMapper(); String json = mapper.writeValueAsString(stat); IOTaskSummary.SpeedStat other = mapper.readValue(json, IOTaskSummary.SpeedStat.class); checkEquality(stat, other); }
public static String notEmpty(String str, String name) { if (str == null) { throw new IllegalArgumentException(name + " cannot be null"); } if (str.length() == 0) { throw new IllegalArgumentException(name + " cannot be empty"); } return str; }
@Test public void notEmptyNotEmtpy() { assertEquals(Check.notEmpty("value", "name"), "value"); }
@Override @Transactional public void updateProduct(Integer id, String title, String details) { this.productRepository.findById(id) .ifPresentOrElse(product -> { product.setTitle(title); product.setDetails(details); }, () -> { throw new NoSuchElementException(); }); }
@Test void updateProduct_ProductDoesNotExist_ThrowsNoSuchElementException() { // given var productId = 1; var title = "Новое название"; var details = "Новое описание"; // when assertThrows(NoSuchElementException.class, () -> this.service .updateProduct(productId, title, details)); // then verify(this.productRepository).findById(productId); verifyNoMoreInteractions(this.productRepository); }
@Override public long extractWatermark(IcebergSourceSplit split) { return split.task().files().stream() .map( scanTask -> { Preconditions.checkArgument( scanTask.file().lowerBounds() != null && scanTask.file().lowerBounds().get(eventTimeFieldId) != null, "Missing statistics for column name = %s in file = %s", eventTimeFieldName, eventTimeFieldId, scanTask.file()); return timeUnit.toMillis( Conversions.fromByteBuffer( Types.LongType.get(), scanTask.file().lowerBounds().get(eventTimeFieldId))); }) .min(Comparator.comparingLong(l -> l)) .get(); }
@TestTemplate public void testTimeUnit() throws IOException { assumeThat(columnName).isEqualTo("long_column"); ColumnStatsWatermarkExtractor extractor = new ColumnStatsWatermarkExtractor(SCHEMA, columnName, TimeUnit.MICROSECONDS); assertThat(extractor.extractWatermark(split(0))) .isEqualTo(MIN_VALUES.get(0).get(columnName) / 1000L); }
public boolean isChangeExpiryOnUpdate() { return changeExpiryOnUpdate; }
@Test public void changeExpiryOnUpdate_is_true_when_default() { State state = new State(null, null); assertTrue(state.isChangeExpiryOnUpdate()); }
public static PostgreSQLCommandPacket newInstance(final CommandPacketType commandPacketType, final PostgreSQLPacketPayload payload) { if (!OpenGaussCommandPacketType.isExtendedProtocolPacketType(commandPacketType)) { payload.getByteBuf().skipBytes(1); return getCommandPacket(commandPacketType, payload); } List<PostgreSQLCommandPacket> result = new ArrayList<>(); while (payload.hasCompletePacket()) { CommandPacketType type = OpenGaussCommandPacketType.valueOf(payload.readInt1()); int length = payload.getByteBuf().getInt(payload.getByteBuf().readerIndex()); PostgreSQLPacketPayload slicedPayload = new PostgreSQLPacketPayload(payload.getByteBuf().readSlice(length), payload.getCharset()); result.add(getCommandPacket(type, slicedPayload)); } return new PostgreSQLAggregatedCommandPacket(result); }
@Test void assertNewOpenGaussComBatchBindPacket() { when(payload.getByteBuf()).thenReturn(mock(ByteBuf.class)); assertThat(OpenGaussCommandPacketFactory.newInstance(OpenGaussCommandPacketType.BATCH_BIND_COMMAND, payload), instanceOf(PostgreSQLAggregatedCommandPacket.class)); }
@Override public ProtobufSystemInfo.Section toProtobuf() { ProtobufSystemInfo.Section.Builder protobuf = ProtobufSystemInfo.Section.newBuilder(); protobuf.setName("System"); setAttribute(protobuf, "Server ID", server.getId()); setAttribute(protobuf, "Version", getVersion()); setAttribute(protobuf, "Edition", sonarRuntime.getEdition().getLabel()); setAttribute(protobuf, NCLOC.getName(), statisticsSupport.getLinesOfCode()); setAttribute(protobuf, "Container", containerSupport.isRunningInContainer()); setAttribute(protobuf, "External Users and Groups Provisioning", commonSystemInformation.getManagedInstanceProviderName()); setAttribute(protobuf, "External User Authentication", commonSystemInformation.getExternalUserAuthentication()); addIfNotEmpty(protobuf, "Accepted external identity providers", commonSystemInformation.getEnabledIdentityProviders()); addIfNotEmpty(protobuf, "External identity providers whose users are allowed to sign themselves up", commonSystemInformation.getAllowsToSignUpEnabledIdentityProviders()); setAttribute(protobuf, "High Availability", false); setAttribute(protobuf, "Official Distribution", officialDistribution.check()); setAttribute(protobuf, "Force authentication", commonSystemInformation.getForceAuthentication()); setAttribute(protobuf, "Home Dir", config.get(PATH_HOME.getKey()).orElse(null)); setAttribute(protobuf, "Data Dir", config.get(PATH_DATA.getKey()).orElse(null)); setAttribute(protobuf, "Temp Dir", config.get(PATH_TEMP.getKey()).orElse(null)); setAttribute(protobuf, "Processors", Runtime.getRuntime().availableProcessors()); return protobuf.build(); }
@Test public void return_Lines_of_Codes_from_StatisticsSupport(){ when(statisticsSupport.getLinesOfCode()).thenReturn(17752L); ProtobufSystemInfo.Section protobuf = underTest.toProtobuf(); assertThatAttributeIs(protobuf,"Lines of Code", 17752L); }
public static Map<String, String> resolveMessagesForTemplate(final Locale locale, ThemeContext theme) { // Compute all the resource names we should use: *_gl_ES-gheada.properties, *_gl_ES // .properties, _gl.properties... // The order here is important: as we will let values from more specific files // overwrite those in less specific, // (e.g. a value for gl_ES will have more precedence than a value for gl). So we will // iterate these resource // names from less specific to more specific. final List<String> messageResourceNames = computeMessageResourceNamesFromBase(locale); // Build the combined messages Map<String, String> combinedMessages = null; for (final String messageResourceName : messageResourceNames) { try { final Reader messageResourceReader = messageReader(messageResourceName, theme); if (messageResourceReader != null) { final Properties messageProperties = readMessagesResource(messageResourceReader); if (messageProperties != null && !messageProperties.isEmpty()) { if (combinedMessages == null) { combinedMessages = new HashMap<>(20); } for (final Map.Entry<Object, Object> propertyEntry : messageProperties.entrySet()) { combinedMessages.put((String) propertyEntry.getKey(), (String) propertyEntry.getValue()); } } } } catch (final IOException ignored) { // File might not exist, simply try the next one } } if (combinedMessages == null) { return EMPTY_MESSAGES; } return Collections.unmodifiableMap(combinedMessages); }
@Test void resolveMessagesForTemplateForDefault() throws URISyntaxException { Map<String, String> properties = ThemeMessageResolutionUtils.resolveMessagesForTemplate(Locale.CHINESE, getTheme()); assertThat(properties).hasSize(1); assertThat(properties).containsEntry("index.welcome", "欢迎来到首页"); }
@Override public void onWorkflowFinalized(Workflow workflow) { WorkflowSummary summary = StepHelper.retrieveWorkflowSummary(objectMapper, workflow.getInput()); WorkflowRuntimeSummary runtimeSummary = retrieveWorkflowRuntimeSummary(workflow); String reason = workflow.getReasonForIncompletion(); LOG.info( "Workflow {} with execution_id [{}] is finalized with internal state [{}] and reason [{}]", summary.getIdentity(), workflow.getWorkflowId(), workflow.getStatus(), reason); metrics.counter( MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC, getClass(), TYPE_TAG, "onWorkflowFinalized", MetricConstants.STATUS_TAG, workflow.getStatus().name()); if (reason != null && workflow.getStatus() == Workflow.WorkflowStatus.FAILED && reason.startsWith(MaestroStartTask.DEDUP_FAILURE_PREFIX)) { LOG.info( "Workflow {} with execution_id [{}] has not actually started, thus skip onWorkflowFinalized.", summary.getIdentity(), workflow.getWorkflowId()); return; // special case doing nothing } WorkflowInstance.Status instanceStatus = instanceDao.getWorkflowInstanceStatus( summary.getWorkflowId(), summary.getWorkflowInstanceId(), summary.getWorkflowRunId()); if (instanceStatus == null || (instanceStatus.isTerminal() && workflow.getStatus().isTerminal())) { LOG.info( "Workflow {} with execution_id [{}] does not exist or already " + "in a terminal state [{}] with internal state [{}], thus skip onWorkflowFinalized.", summary.getIdentity(), workflow.getWorkflowId(), instanceStatus, workflow.getStatus()); return; } Map<String, Task> realTaskMap = TaskHelper.getUserDefinedRealTaskMap(workflow); // cancel internally failed tasks realTaskMap.values().stream() .filter(task -> !StepHelper.retrieveStepStatus(task.getOutputData()).isTerminal()) .forEach(task -> maestroTask.cancel(workflow, task, null)); WorkflowRuntimeOverview overview = TaskHelper.computeOverview( objectMapper, summary, runtimeSummary.getRollupBase(), realTaskMap); try { validateAndUpdateOverview(overview, summary); switch (workflow.getStatus()) { case TERMINATED: // stopped due to stop request if (reason != null && reason.startsWith(FAILURE_REASON_PREFIX)) { update(workflow, WorkflowInstance.Status.FAILED, summary, overview); } else { update(workflow, WorkflowInstance.Status.STOPPED, summary, overview); } break; case TIMED_OUT: update(workflow, WorkflowInstance.Status.TIMED_OUT, summary, overview); break; default: // other status (FAILED, COMPLETED, PAUSED, RUNNING) to be handled here. Optional<Task.Status> done = TaskHelper.checkProgress(realTaskMap, summary, overview, true); switch (done.orElse(Task.Status.IN_PROGRESS)) { /** * This is a special status to indicate that the workflow has succeeded. Check {@link * TaskHelper#checkProgress} for more details. */ case FAILED_WITH_TERMINAL_ERROR: WorkflowInstance.Status nextStatus = AggregatedViewHelper.deriveAggregatedStatus( instanceDao, summary, WorkflowInstance.Status.SUCCEEDED, overview); if (!nextStatus.isTerminal()) { throw new MaestroInternalError( "Invalid status: [%s], expecting a terminal one", nextStatus); } update(workflow, nextStatus, summary, overview); break; case FAILED: case CANCELED: // due to step failure update(workflow, WorkflowInstance.Status.FAILED, summary, overview); break; case TIMED_OUT: update(workflow, WorkflowInstance.Status.TIMED_OUT, summary, overview); break; // all other status are invalid default: metrics.counter( MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC, getClass(), TYPE_TAG, "invalidStatusOnWorkflowFinalized"); throw new MaestroInternalError( "Invalid status [%s] onWorkflowFinalized", workflow.getStatus()); } break; } } catch (MaestroInternalError | IllegalArgumentException e) { // non-retryable error and still fail the instance LOG.warn("onWorkflowFinalized is failed with a non-retryable error", e); metrics.counter( MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC, getClass(), TYPE_TAG, "nonRetryableErrorOnWorkflowFinalized"); update( workflow, WorkflowInstance.Status.FAILED, summary, overview, Details.create( e.getMessage(), "onWorkflowFinalized is failed with non-retryable error.")); } }
@Test public void testWorkflowFinalizedTimedOut() { when(workflow.getStatus()).thenReturn(Workflow.WorkflowStatus.TIMED_OUT); when(instanceDao.getWorkflowInstanceStatus(eq("test-workflow-id"), anyLong(), anyLong())) .thenReturn(WorkflowInstance.Status.IN_PROGRESS); statusListener.onWorkflowFinalized(workflow); Assert.assertEquals( 1L, metricRepo .getCounter( MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC, MaestroWorkflowStatusListener.class, "type", "onWorkflowFinalized", "status", "TIMED_OUT") .count()); verify(instanceDao, times(1)) .updateWorkflowInstance( any(), any(), any(), eq(WorkflowInstance.Status.TIMED_OUT), anyLong()); verify(publisher, times(1)).publishOrThrow(any(), any()); }
@Override public double sd() { return Math.sqrt(2 * nu); }
@Test public void testSd() { System.out.println("sd"); ChiSquareDistribution instance = new ChiSquareDistribution(20); instance.rand(); assertEquals(Math.sqrt(40), instance.sd(), 1E-7); }
@Override public List<LogicalSlot> peakSlotsToAllocate(SlotTracker slotTracker) { updateOptionsPeriodically(); List<LogicalSlot> slotsToAllocate = Lists.newArrayList(); int curNumAllocatedSmallSlots = numAllocatedSmallSlots; for (SlotContext slotContext : requiringSmallSlots.values()) { LogicalSlot slot = slotContext.getSlot(); if (curNumAllocatedSmallSlots + slot.getNumPhysicalSlots() > opts.v2().getTotalSmallSlots()) { break; } requiringQueue.remove(slotContext); slotsToAllocate.add(slot); slotContext.setAllocateAsSmallSlot(); curNumAllocatedSmallSlots += slot.getNumPhysicalSlots(); } int numAllocatedSlots = slotTracker.getNumAllocatedSlots() - numAllocatedSmallSlots; while (!requiringQueue.isEmpty()) { SlotContext slotContext = requiringQueue.peak(); if (!isGlobalSlotAvailable(numAllocatedSlots, slotContext.getSlot())) { break; } requiringQueue.poll(); slotsToAllocate.add(slotContext.getSlot()); numAllocatedSlots += slotContext.getSlot().getNumPhysicalSlots(); } return slotsToAllocate; }
@Test public void testSmallSlot() { QueryQueueOptions opts = QueryQueueOptions.createFromEnv(); SlotSelectionStrategyV2 strategy = new SlotSelectionStrategyV2(); SlotTracker slotTracker = new SlotTracker(ImmutableList.of(strategy)); LogicalSlot largeSlot = generateSlot(opts.v2().getTotalSlots() - 1); List<LogicalSlot> smallSlots = IntStream.range(0, NUM_CORES + 2) .mapToObj(i -> generateSlot(1)) .collect(Collectors.toList()); // 1. Require and allocate the large slot with `totalSlots - 1` slots. slotTracker.requireSlot(largeSlot); List<LogicalSlot> peakSlots = strategy.peakSlotsToAllocate(slotTracker); assertThat(peakSlots).containsExactly(largeSlot); slotTracker.allocateSlot(largeSlot); // 2. Require `NUM_CORES + 2` small slots. for (LogicalSlot smallSlot : smallSlots) { slotTracker.requireSlot(smallSlot); } // 3. Could peak and allocate `NUM_CORES + 1` small slots: // - `NUM_CORES` small slot are allocated as small slots. // - 1 small slot is allocated as a non-small slot. List<LogicalSlot> peakSmallSlots = strategy.peakSlotsToAllocate(slotTracker); assertThat(peakSmallSlots).hasSize(NUM_CORES + 1); for (LogicalSlot peakSmallSlot : peakSmallSlots) { slotTracker.allocateSlot(peakSmallSlot); } // 4. Release the large slot and then the rest one small slot could be peaked. peakSlots = strategy.peakSlotsToAllocate(slotTracker); assertThat(peakSlots).isEmpty(); slotTracker.releaseSlot(largeSlot.getSlotId()); peakSlots = strategy.peakSlotsToAllocate(slotTracker); assertThat(peakSlots).hasSize(1); slotTracker.allocateSlot(peakSlots.get(0)); assertThat(slotTracker.getNumAllocatedSlots()).isEqualTo(smallSlots.size()); // 5. Require `10+numAvailableSlots` small slots. final int numAvailableSlots = opts.v2().getTotalSlots() + opts.v2().getTotalSmallSlots() - smallSlots.size(); List<LogicalSlot> smallSlots2 = IntStream.range(0, 10 + numAvailableSlots) .mapToObj(i -> generateSlot(1)) .collect(Collectors.toList()); smallSlots2.forEach(slotTracker::requireSlot); // 6. Could peak and allocate `numAvailableSlots` small slots. peakSmallSlots = strategy.peakSlotsToAllocate(slotTracker); peakSmallSlots.forEach(slotTracker::allocateSlot); assertThat(peakSmallSlots).hasSize(numAvailableSlots); assertThat(slotTracker.getNumAllocatedSlots()).isEqualTo(opts.v2().getTotalSlots() + opts.v2().getTotalSmallSlots()); // 7. Release 10 small slots and then the rest small slots could be peaked and allocated. smallSlots.stream().limit(10).forEach(slot -> slotTracker.releaseSlot(slot.getSlotId())); peakSmallSlots = strategy.peakSlotsToAllocate(slotTracker); peakSmallSlots.forEach(slotTracker::allocateSlot); assertThat(peakSmallSlots).hasSize(10); assertThat(slotTracker.getNumAllocatedSlots()).isEqualTo(opts.v2().getTotalSlots() + opts.v2().getTotalSmallSlots()); }
@GetMapping("/readiness") public ResponseEntity<String> readiness(HttpServletRequest request) { ReadinessResult result = ModuleHealthCheckerHolder.getInstance().checkReadiness(); if (result.isSuccess()) { return ResponseEntity.ok().body("OK"); } return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(result.getResultMessage()); }
@Test void testReadinessBothFailure() { // Config and Naming are not in readiness Mockito.when(configInfoPersistService.configInfoCount(any(String.class))) .thenThrow(new RuntimeException("HealthControllerTest.testReadiness")); Mockito.when(serverStatusManager.getServerStatus()).thenThrow(new RuntimeException("HealthControllerTest.testReadiness")); ResponseEntity<String> response = healthController.readiness(null); assertEquals(500, response.getStatusCodeValue()); assertEquals("naming and config not in readiness", response.getBody()); }
@Override public VersionedKeyValueStore<K, V> build() { final KeyValueStore<Bytes, byte[]> store = storeSupplier.get(); if (!(store instanceof VersionedBytesStore)) { throw new IllegalStateException("VersionedBytesStoreSupplier.get() must return an instance of VersionedBytesStore"); } return new MeteredVersionedKeyValueStore<>( maybeWrapLogging((VersionedBytesStore) store), // no caching layer for versioned stores storeSupplier.metricsScope(), time, keySerde, valueSerde); }
@Test public void shouldNotHaveChangeLoggingStoreWhenDisabled() { setUp(); final VersionedKeyValueStore<String, String> store = builder .withLoggingDisabled() .build(); assertThat(store, instanceOf(MeteredVersionedKeyValueStore.class)); final StateStore next = ((WrappedStateStore) store).wrapped(); assertThat(next, equalTo(inner)); }
@Override public Long createSmsLog(String mobile, Long userId, Integer userType, Boolean isSend, SmsTemplateDO template, String templateContent, Map<String, Object> templateParams) { SmsLogDO.SmsLogDOBuilder logBuilder = SmsLogDO.builder(); // 根据是否要发送,设置状态 logBuilder.sendStatus(Objects.equals(isSend, true) ? SmsSendStatusEnum.INIT.getStatus() : SmsSendStatusEnum.IGNORE.getStatus()); // 设置手机相关字段 logBuilder.mobile(mobile).userId(userId).userType(userType); // 设置模板相关字段 logBuilder.templateId(template.getId()).templateCode(template.getCode()).templateType(template.getType()); logBuilder.templateContent(templateContent).templateParams(templateParams) .apiTemplateId(template.getApiTemplateId()); // 设置渠道相关字段 logBuilder.channelId(template.getChannelId()).channelCode(template.getChannelCode()); // 设置接收相关字段 logBuilder.receiveStatus(SmsReceiveStatusEnum.INIT.getStatus()); // 插入数据库 SmsLogDO logDO = logBuilder.build(); smsLogMapper.insert(logDO); return logDO.getId(); }
@Test public void testCreateSmsLog() { // 准备参数 String mobile = randomString(); Long userId = randomLongId(); Integer userType = randomEle(UserTypeEnum.values()).getValue(); Boolean isSend = randomBoolean(); SmsTemplateDO templateDO = randomPojo(SmsTemplateDO.class, o -> o.setType(randomEle(SmsTemplateTypeEnum.values()).getType())); String templateContent = randomString(); Map<String, Object> templateParams = randomTemplateParams(); // mock 方法 // 调用 Long logId = smsLogService.createSmsLog(mobile, userId, userType, isSend, templateDO, templateContent, templateParams); // 断言 SmsLogDO logDO = smsLogMapper.selectById(logId); assertEquals(isSend ? SmsSendStatusEnum.INIT.getStatus() : SmsSendStatusEnum.IGNORE.getStatus(), logDO.getSendStatus()); assertEquals(mobile, logDO.getMobile()); assertEquals(userType, logDO.getUserType()); assertEquals(userId, logDO.getUserId()); assertEquals(templateDO.getId(), logDO.getTemplateId()); assertEquals(templateDO.getCode(), logDO.getTemplateCode()); assertEquals(templateDO.getType(), logDO.getTemplateType()); assertEquals(templateDO.getChannelId(), logDO.getChannelId()); assertEquals(templateDO.getChannelCode(), logDO.getChannelCode()); assertEquals(templateContent, logDO.getTemplateContent()); assertEquals(templateParams, logDO.getTemplateParams()); assertEquals(SmsReceiveStatusEnum.INIT.getStatus(), logDO.getReceiveStatus()); }
@Override public ExecuteContext after(ExecuteContext context) { // Higher versions of Properties will call the Map method to avoid duplicate entries if (MarkUtils.getMark() != null) { return context; } if (handler != null) { handler.doAfter(context); } cacheKafkaConsumer(context); return context; }
@Test public void testAfter() { ExecuteContext context = ExecuteContext.forMemberMethod(mockConsumer, null, null, null, null); interceptor.after(context); KafkaConsumerWrapper wrapper = KafkaConsumerController.getKafkaConsumerCache() .get(mockConsumer.hashCode()); Assert.assertNotNull(wrapper); Assert.assertEquals(mockConsumer, wrapper.getKafkaConsumer()); Assert.assertEquals("default", wrapper.getApplication()); Assert.assertEquals("default", wrapper.getService()); Assert.assertEquals("default", wrapper.getZone()); Assert.assertEquals("default", wrapper.getProject()); Assert.assertEquals("", wrapper.getEnvironment()); Assert.assertFalse(wrapper.isAssign()); Assert.assertFalse(wrapper.getIsConfigChanged().get()); Assert.assertEquals(new HashSet<>(), wrapper.getOriginalTopics()); Assert.assertEquals(new HashSet<>(), wrapper.getOriginalPartitions()); Assert.assertEquals(NetworkUtils.getMachineIp(), wrapper.getServerAddress()); }
@Override public String get(String url, String path) throws RestClientException { return get(url, path, String.class); }
@Test public void testGetWithType() throws RestClientException{ Pet pet = restClientTemplate.get("https://localhost:9991", "/v1/pets/1", Pet.class); assertTrue(pet.getId()==1); }
public void init(Collection<T> allObjectsToTest) { }
@Test @UseDataProvider("conditionCombinations") public void join_inits_all_conditions(ConditionCombination combination) { ConditionWithInitAndFinish one = someCondition("one"); ConditionWithInitAndFinish two = someCondition("two"); combination.combine(one, two).init(singleton("init")); assertThat(one.allObjectsToTest).containsExactly("init"); assertThat(two.allObjectsToTest).containsExactly("init"); }
public static <T> T decodeFromByteArray(Coder<T> coder, byte[] encodedValue) throws CoderException { return decodeFromByteArray(coder, encodedValue, Coder.Context.OUTER); }
@Test public void testClosingCoderFailsWhenDecodingByteArray() throws Exception { expectedException.expect(UnsupportedOperationException.class); expectedException.expectMessage("Caller does not own the underlying"); CoderUtils.decodeFromByteArray(new ClosingCoder(), new byte[0]); }
@Override @MethodNotAvailable public CompletionStage<Void> setAsync(K key, V value) { throw new MethodNotAvailableException(); }
@Test(expected = MethodNotAvailableException.class) public void testSetAsyncWithExpiryPolicy() { ExpiryPolicy expiryPolicy = new HazelcastExpiryPolicy(1, 1, 1, TimeUnit.MILLISECONDS); adapter.setAsync(42, "value", expiryPolicy); }
@Override public QuoteCharacter getQuoteCharacter() { return QuoteCharacter.QUOTE; }
@Test void assertGetQuoteCharacter() { assertThat(dialectDatabaseMetaData.getQuoteCharacter(), is(QuoteCharacter.QUOTE)); }
@Override public Headers headers() { if (recordContext == null) { // This is only exposed via the deprecated ProcessorContext, // in which case, we're preserving the pre-existing behavior // of returning dummy values when the record context is undefined. // For headers, the dummy value is an empty headers collection. return new RecordHeaders(); } else { return recordContext.headers(); } }
@Test public void shouldReturnHeadersFromRecordContext() { assertThat(context.headers(), equalTo(recordContext.headers())); }
@Override public List<String> readFilesWithRetries(Sleeper sleeper, BackOff backOff) throws IOException, InterruptedException { IOException lastException = null; do { try { // Match inputPath which may contains glob Collection<Metadata> files = Iterables.getOnlyElement(FileSystems.match(Collections.singletonList(filePattern))) .metadata(); LOG.debug("Found {} file(s) by matching the path: {}", files.size(), filePattern); if (files.isEmpty() || !checkTotalNumOfFiles(files)) { continue; } // Read data from file paths return readLines(files); } catch (IOException e) { // Ignore and retry lastException = e; LOG.warn("Error in file reading. Ignore and retry."); } } while (BackOffUtils.next(sleeper, backOff)); // Failed after max retries throw new IOException( String.format("Unable to read file(s) after retrying %d times", MAX_READ_RETRIES), lastException); }
@Test public void testReadWithRetriesFailsWhenTemplateIncorrect() throws Exception { File tmpFile = tmpFolder.newFile(); Files.asCharSink(tmpFile, StandardCharsets.UTF_8).write("Test for file checksum verifier."); NumberedShardedFile shardedFile = new NumberedShardedFile(filePattern, Pattern.compile("incorrect-template")); thrown.expect(IOException.class); thrown.expectMessage( containsString( "Unable to read file(s) after retrying " + NumberedShardedFile.MAX_READ_RETRIES)); shardedFile.readFilesWithRetries(fastClock, backOff); }
public static boolean isFloatingNumber(String text) { final int startPos = findStartPosition(text); if (startPos < 0) { return false; } boolean dots = false; for (int i = startPos; i < text.length(); i++) { char ch = text.charAt(i); if (!Character.isDigit(ch)) { if (ch == '.') { if (dots) { return false; } dots = true; } else { return false; } } } return true; }
@Test @DisplayName("Tests that isFloatingNumber returns true for integers") void isFloatingNumberIntegers() { assertTrue(ObjectHelper.isFloatingNumber("1234")); assertTrue(ObjectHelper.isFloatingNumber("-1234")); assertTrue(ObjectHelper.isFloatingNumber("1")); assertTrue(ObjectHelper.isFloatingNumber("0")); }
public static JobIndexInfo getIndexInfo(String jhFileName) throws IOException { String fileName = jhFileName.substring(0, jhFileName.indexOf(JobHistoryUtils.JOB_HISTORY_FILE_EXTENSION)); JobIndexInfo indexInfo = new JobIndexInfo(); String[] jobDetails = fileName.split(DELIMITER); JobID oldJobId = JobID.forName(decodeJobHistoryFileName(jobDetails[JOB_ID_INDEX])); JobId jobId = TypeConverter.toYarn(oldJobId); indexInfo.setJobId(jobId); // Do not fail if there are some minor parse errors try { try { indexInfo.setSubmitTime(Long.parseLong( decodeJobHistoryFileName(jobDetails[SUBMIT_TIME_INDEX]))); } catch (NumberFormatException e) { LOG.warn("Unable to parse submit time from job history file " + jhFileName + " : " + e); } indexInfo.setUser( decodeJobHistoryFileName(jobDetails[USER_INDEX])); indexInfo.setJobName( decodeJobHistoryFileName(jobDetails[JOB_NAME_INDEX])); try { indexInfo.setFinishTime(Long.parseLong( decodeJobHistoryFileName(jobDetails[FINISH_TIME_INDEX]))); } catch (NumberFormatException e) { LOG.warn("Unable to parse finish time from job history file " + jhFileName + " : " + e); } try { indexInfo.setNumMaps(Integer.parseInt( decodeJobHistoryFileName(jobDetails[NUM_MAPS_INDEX]))); } catch (NumberFormatException e) { LOG.warn("Unable to parse num maps from job history file " + jhFileName + " : " + e); } try { indexInfo.setNumReduces(Integer.parseInt( decodeJobHistoryFileName(jobDetails[NUM_REDUCES_INDEX]))); } catch (NumberFormatException e) { LOG.warn("Unable to parse num reduces from job history file " + jhFileName + " : " + e); } indexInfo.setJobStatus( decodeJobHistoryFileName(jobDetails[JOB_STATUS_INDEX])); indexInfo.setQueueName( decodeJobHistoryFileName(jobDetails[QUEUE_NAME_INDEX])); try{ if (jobDetails.length <= JOB_START_TIME_INDEX) { indexInfo.setJobStartTime(indexInfo.getSubmitTime()); } else { indexInfo.setJobStartTime(Long.parseLong( decodeJobHistoryFileName(jobDetails[JOB_START_TIME_INDEX]))); } } catch (NumberFormatException e){ LOG.warn("Unable to parse start time from job history file " + jhFileName + " : " + e); } } catch (IndexOutOfBoundsException e) { LOG.warn("Parsing job history file with partial data encoded into name: " + jhFileName); } return indexInfo; }
@Test public void testUserNamePercentDecoding() throws IOException { String jobHistoryFile = String.format(JOB_HISTORY_FILE_FORMATTER, JOB_ID, SUBMIT_TIME, USER_NAME_WITH_DELIMITER_ESCAPE, JOB_NAME, FINISH_TIME, NUM_MAPS, NUM_REDUCES, JOB_STATUS, QUEUE_NAME, JOB_START_TIME); JobIndexInfo info = FileNameIndexUtils.getIndexInfo(jobHistoryFile); assertEquals(USER_NAME_WITH_DELIMITER, info.getUser(), "User name doesn't match"); }
@Override public InterpreterResult interpret(String st, InterpreterContext context) { return helper.interpret(session, st, context); }
@Test void should_display_statistics_for_non_select_statement() { // Given String query = "USE zeppelin;\nCREATE TABLE IF NOT EXISTS no_select(id int PRIMARY KEY);"; final String rawResult = reformatHtml(readTestResource( "/scalate/NoResultWithExecutionInfo.html")); // When final InterpreterResult actual = interpreter.interpret(query, intrContext); final int port = EmbeddedCassandraServerHelper.getNativeTransportPort(); final String address = EmbeddedCassandraServerHelper.getHost(); // Then final String expected = rawResult.replaceAll("TRIED_HOSTS", address + ":" + port) .replaceAll("QUERIED_HOSTS", address + ":" + port); assertEquals(Code.SUCCESS, actual.code()); assertEquals(expected, reformatHtml(actual.message().get(0).getData())); }
public Schema getSchema() { return context.getSchema(); }
@Test public void testEnumSchema() { ProtoDynamicMessageSchema schemaProvider = schemaFromDescriptor(EnumMessage.getDescriptor()); Schema schema = schemaProvider.getSchema(); assertEquals(ENUM_SCHEMA, schema); }
public List<Pipeline> parsePipelines(String pipelines) throws ParseException { final ParseContext parseContext = new ParseContext(false); final SyntaxErrorListener errorListener = new SyntaxErrorListener(parseContext); final RuleLangLexer lexer = new RuleLangLexer(new ANTLRInputStream(pipelines)); lexer.removeErrorListeners(); lexer.addErrorListener(errorListener); final RuleLangParser parser = new RuleLangParser(new CommonTokenStream(lexer)); parser.setErrorHandler(new DefaultErrorStrategy()); parser.removeErrorListeners(); parser.addErrorListener(errorListener); final RuleLangParser.PipelineDeclsContext pipelineDeclsContext = parser.pipelineDecls(); WALKER.walk(new PipelineAstBuilder(parseContext), pipelineDeclsContext); if (parseContext.getErrors().isEmpty()) { return parseContext.pipelines; } throw new ParseException(parseContext.getErrors()); }
@Test void pipelineDeclaration() throws Exception { final List<Pipeline> pipelines = parser.parsePipelines(ruleForTest()); assertEquals(1, pipelines.size()); final Pipeline pipeline = Iterables.getOnlyElement(pipelines); assertEquals("cisco", pipeline.name()); assertEquals(2, pipeline.stages().size()); final Stage stage1 = pipeline.stages().first(); final Stage stage2 = pipeline.stages().last(); assertEquals(Stage.Match.ALL, stage1.match()); assertEquals(1, stage1.stage()); assertArrayEquals(new Object[]{"check_ip_whitelist", "cisco_device"}, stage1.ruleReferences().toArray()); assertEquals(Stage.Match.EITHER, stage2.match()); assertEquals(2, stage2.stage()); assertArrayEquals(new Object[]{"parse_cisco_time", "extract_src_dest", "normalize_src_dest", "lookup_ips", "resolve_ips"}, stage2.ruleReferences().toArray()); }
public UiTopoLayout parent(UiTopoLayoutId parentId) { if (isRoot()) { throw new IllegalArgumentException(E_ROOT_PARENT); } // TODO: consider checking ancestry chain to prevent loops parent = parentId; return this; }
@Test public void setParentOnOther() { mkOtherLayout(); layout.parent(OTHER_ID); assertEquals("wrong parent", OTHER_ID, layout.parent()); layout.parent(null); assertEquals("non-null parent", null, layout.parent()); }
String filenameValidatorForInputFiles( String filename ) { Repository rep = getTransMeta().getRepository(); if ( rep != null && rep.isConnected() && filename .contains( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY ) ) { return environmentSubstitute( filename.replace( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY ) ); } else { return environmentSubstitute( filename ); } }
@Test public void testFilenameValidatorForInputFilesConnectedToRep() { CsvInput csvInput = mock( CsvInput.class ); String internalEntryVariable = "internalEntryVariable"; String internalTransformationVariable = "internalTransformationVariable"; String filename = Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY ; csvInput.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, internalEntryVariable ); csvInput.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, internalTransformationVariable ); TransMeta transmeta = mock( TransMeta.class ); Repository rep = mock( Repository.class ); when( csvInput.getTransMeta() ).thenReturn( transmeta ); when( transmeta.getRepository() ).thenReturn( rep ); when( rep.isConnected() ).thenReturn( true ); when( csvInput.filenameValidatorForInputFiles( any() ) ).thenCallRealMethod(); when( csvInput.environmentSubstitute( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY ) ).thenReturn( internalEntryVariable ); when( csvInput.environmentSubstitute( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY ) ).thenReturn( internalTransformationVariable ); String finalFilename = csvInput.filenameValidatorForInputFiles(filename); assertEquals( internalTransformationVariable, finalFilename ); }
private List<Class<?>> scanForClassesInPackage(String packageName, Predicate<Class<?>> classFilter) { requireValidPackageName(packageName); requireNonNull(classFilter, "classFilter must not be null"); List<URI> rootUris = getUrisForPackage(getClassLoader(), packageName); return findClassesForUris(rootUris, packageName, classFilter); }
@Test void scanForClassesInNonExistingPackage() { List<Class<?>> classes = scanner.scanForClassesInPackage("io.cucumber.core.resource.does.not.exist"); assertThat(classes, empty()); }
@Override public Path find() throws BackgroundException { return new MantaAccountHomeInfo(host.getCredentials().getUsername(), host.getDefaultPath()).getNormalizedHomePath(); }
@Test public void testHomeFeature() throws BackgroundException { final Path drive = new MantaHomeFinderFeature(session.getHost()).find(); assertNotNull(drive); assertFalse(drive.isRoot()); assertTrue(drive.isPlaceholder()); assertNotEquals("null", drive.getName()); assertFalse(StringUtils.isEmpty(drive.getName())); }
@Override // FsDatasetSpi public ReplicaHandler append(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException { try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME, b.getBlockPoolId(), getStorageUuidForLock(b))) { // If the block was successfully finalized because all packets // were successfully processed at the Datanode but the ack for // some of the packets were not received by the client. The client // re-opens the connection and retries sending those packets. // The other reason is that an "append" is occurring to this block. // check the validity of the parameter if (newGS < b.getGenerationStamp()) { throw new IOException("The new generation stamp " + newGS + " should be greater than the replica " + b + "'s generation stamp"); } ReplicaInfo replicaInfo = getReplicaInfo(b); LOG.info("Appending to " + replicaInfo); if (replicaInfo.getState() != ReplicaState.FINALIZED) { throw new ReplicaNotFoundException( ReplicaNotFoundException.UNFINALIZED_REPLICA + b); } if (replicaInfo.getNumBytes() != expectedBlockLen) { throw new IOException("Corrupted replica " + replicaInfo + " with a length of " + replicaInfo.getNumBytes() + " expected length is " + expectedBlockLen); } FsVolumeReference ref = replicaInfo.getVolume().obtainReference(); ReplicaInPipeline replica = null; try { replica = append(b.getBlockPoolId(), replicaInfo, newGS, b.getNumBytes()); } catch (IOException e) { IOUtils.cleanupWithLogger(null, ref); throw e; } return new ReplicaHandler(replica, ref); } }
@Test(timeout = 30000) public void testAppend() { MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1) .storageTypes(new StorageType[]{StorageType.DISK, StorageType.DISK}) .storagesPerDatanode(2) .build(); FileSystem fs = cluster.getFileSystem(); DataNode dataNode = cluster.getDataNodes().get(0); // Create test file Path filePath = new Path("testData"); FsDatasetImpl fsDataSetImpl = (FsDatasetImpl) dataNode.getFSDataset(); DFSTestUtil.createFile(fs, filePath, 100, (short) 1, 0); ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath); ReplicaInfo replicaInfo = fsDataSetImpl.getReplicaInfo(block); long oldMetaLength = replicaInfo.getMetadataLength(); long oldDfsUsed = fsDataSetImpl.getDfsUsed(); // Append to file int appendLength = 100; DFSTestUtil.appendFile(fs, filePath, appendLength); block = DFSTestUtil.getFirstBlock(fs, filePath); replicaInfo = fsDataSetImpl.getReplicaInfo(block); long newMetaLength = replicaInfo.getMetadataLength(); long newDfsUsed = fsDataSetImpl.getDfsUsed(); assert newDfsUsed == oldDfsUsed + appendLength + (newMetaLength - oldMetaLength) : "When appending a file, the dfsused statistics of datanode are incorrect."; } catch (Exception ex) { LOG.info("Exception in testAppend ", ex); fail("Exception while testing testAppend "); } finally { if (cluster.isClusterUp()) { cluster.shutdown(); } } }
public static String capitaliseFirstLetter(String string) { if (string == null || string.length() == 0) { return string; } else { return string.substring(0, 1).toUpperCase() + string.substring(1); } }
@Test public void testCapitaliseFirstLetter() { assertEquals(capitaliseFirstLetter(""), ("")); assertEquals(capitaliseFirstLetter("a"), ("A")); assertEquals(capitaliseFirstLetter("aa"), ("Aa")); assertEquals(capitaliseFirstLetter("A"), ("A")); assertEquals(capitaliseFirstLetter("Ab"), ("Ab")); }
public boolean isSevere() { return severe; }
@Test public void testIsSevere() throws Exception { assertTrue( exception.isSevere() ); }
public static DocumentBuilderFactory newSecureDocumentBuilderFactory() throws ParserConfigurationException { DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); dbf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); dbf.setFeature(DISALLOW_DOCTYPE_DECL, true); dbf.setFeature(LOAD_EXTERNAL_DECL, false); dbf.setFeature(EXTERNAL_GENERAL_ENTITIES, false); dbf.setFeature(EXTERNAL_PARAMETER_ENTITIES, false); dbf.setFeature(CREATE_ENTITY_REF_NODES, false); return dbf; }
@Test public void testSecureDocumentBuilderFactory() throws Exception { DocumentBuilder db = XMLUtils.newSecureDocumentBuilderFactory().newDocumentBuilder(); Document doc = db.parse(new InputSource(new StringReader("<root/>"))); Assertions.assertThat(doc).describedAs("parsed document").isNotNull(); }
@VisibleForTesting ZonedDateTime parseZoned(final String text, final ZoneId zoneId) { final TemporalAccessor parsed = formatter.parse(text); final ZoneId parsedZone = parsed.query(TemporalQueries.zone()); ZonedDateTime resolved = DEFAULT_ZONED_DATE_TIME.apply( ObjectUtils.defaultIfNull(parsedZone, zoneId)); for (final TemporalField override : ChronoField.values()) { if (parsed.isSupported(override)) { if (!resolved.isSupported(override)) { throw new KsqlException( "Unsupported temporal field in timestamp: " + text + " (" + override + ")"); } final long value = parsed.getLong(override); if (override == ChronoField.DAY_OF_YEAR && value == LEAP_DAY_OF_THE_YEAR) { if (!parsed.isSupported(ChronoField.YEAR)) { throw new KsqlException("Leap day cannot be parsed without supplying the year field"); } // eagerly override year, to avoid mismatch with epoch year, which is not a leap year resolved = resolved.withYear(parsed.get(ChronoField.YEAR)); } resolved = resolved.with(override, value); } } return resolved; }
@Test public void shouldResolveDefaultsForDayOfYear() { // Given final String format = "DDD"; final String timestamp = "100"; // When final ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID); // Then assertThat(ts, is(sameInstant(EPOCH.withDayOfYear(100).withZoneSameInstant(ZID)))); }