code
stringlengths 3
1.05M
| repo_name
stringlengths 4
116
| path
stringlengths 4
991
| language
stringclasses 9
values | license
stringclasses 15
values | size
int32 3
1.05M
|
---|---|---|---|---|---|
package com.modesteam.urutau.model.system;
public class MemberPager {
private Integer page;
private Integer limit;
public Integer getPage() {
return page;
}
public void setPage(Integer page) {
this.page = page;
}
public Integer getLimit() {
return limit;
}
public void setLimit(Integer limit) {
this.limit = limit;
}
}
| Modesteam/Urutau | src/main/java/com/modesteam/urutau/model/system/MemberPager.java | Java | apache-2.0 | 342 |
#pragma warning disable 109, 114, 219, 429, 168, 162
namespace haxe.lang{
public class Exceptions {
public Exceptions(){
unchecked {
#line 25 "C:\\HaxeToolkit\\haxe\\std\\cs\\internal\\Exceptions.hx"
{
}
}
#line default
}
[System.ThreadStaticAttribute]
public static global::System.Exception exception;
}
}
#pragma warning disable 109, 114, 219, 429, 168, 162
namespace haxe.lang{
public class HaxeException : global::System.Exception {
public HaxeException(object obj) : base(){
unchecked {
#line 41 "C:\\HaxeToolkit\\haxe\\std\\cs\\internal\\Exceptions.hx"
if (( obj is global::haxe.lang.HaxeException )) {
#line 43 "C:\\HaxeToolkit\\haxe\\std\\cs\\internal\\Exceptions.hx"
global::haxe.lang.HaxeException _obj = ((global::haxe.lang.HaxeException) (obj) );
obj = _obj.getObject();
}
#line 46 "C:\\HaxeToolkit\\haxe\\std\\cs\\internal\\Exceptions.hx"
this.obj = obj;
}
#line default
}
public static global::System.Exception wrap(object obj){
unchecked {
#line 61 "C:\\HaxeToolkit\\haxe\\std\\cs\\internal\\Exceptions.hx"
if (( obj is global::System.Exception )) {
#line 61 "C:\\HaxeToolkit\\haxe\\std\\cs\\internal\\Exceptions.hx"
return ((global::System.Exception) (obj) );
}
#line 63 "C:\\HaxeToolkit\\haxe\\std\\cs\\internal\\Exceptions.hx"
return new global::haxe.lang.HaxeException(((object) (obj) ));
}
#line default
}
public object obj;
public virtual object getObject(){
unchecked {
#line 51 "C:\\HaxeToolkit\\haxe\\std\\cs\\internal\\Exceptions.hx"
return this.obj;
}
#line default
}
public virtual string toString(){
unchecked {
#line 56 "C:\\HaxeToolkit\\haxe\\std\\cs\\internal\\Exceptions.hx"
return global::haxe.lang.Runtime.concat("Haxe Exception: ", global::Std.@string(this.obj));
}
#line default
}
public override string ToString(){
return this.toString();
}
}
}
| Espigah/HaxeRepo | Learning/HaxeClient/unity/out/src/cs/internal/Exceptions.cs | C# | apache-2.0 | 2,036 |
/**
* Identify the name of all functions invoked within a given Expression
*
* Descend through the expression, identify every instance of a Function
* expression, and record the name of every function encountered.
*
* General usage is of the form
* ScanForFunctions.scan(myExpression)
*/
package mimir.context;
import java.util.ArrayList;
import java.util.List;
import net.sf.jsqlparser.expression.*;
public class ScanForFunctions extends ExpressionScan {
ArrayList<String> functions = new ArrayList<String>();
public ScanForFunctions(){ super(false); }
public void visit(Function fn) {
functions.add(fn.getName());
super.visit(fn);
}
public List<String> getFunctions(){ return functions; }
/**
* Compute a list of all function names in the given expression
*
* @param e An arbitrary expression
* @returns A list of all function names in e
*/
public static List<String> scan(Expression e){
ScanForFunctions scan = new ScanForFunctions();
e.accept(scan);
return scan.getFunctions();
}
/**
* Determine if a given expression contains any aggregate function calls
*
* @param e An arbitrary expression
* @returns true if e contains any aggregate functions as determined by
* the isAggregate method.
*/
public static boolean hasAggregate(Expression e){
for(String fn : scan(e)){
if(isAggregate(fn)){ return true; }
}
return false;
}
/**
* Determine if the given function name corresponds to a standard aggregate
* function.
*
* @param fn The name of a function
* @returns true if fn corresponds to the name of an aggregate function.
*/
public static boolean isAggregate(String fn)
{
fn = fn.toUpperCase();
return "SUM".equals(fn)
|| "COUNT".equals(fn)
|| "AVG".equals(fn)
|| "STDDEV".equals(fn)
|| "MAX".equals(fn)
|| "MIN".equals(fn);
}
} | sophieyoung717/mimir | src/main/java/mimir/context/ScanForFunctions.java | Java | apache-2.0 | 1,951 |
//// [invalidTaggedTemplateEscapeSequences.ts]
function tag (str: any, ...args: any[]): any {
return str
}
const a = tag`123`
const b = tag`123 ${100}`
const x = tag`\u{hello} ${ 100 } \xtraordinary ${ 200 } wonderful ${ 300 } \uworld`;
const y = `\u{hello} ${ 100 } \xtraordinary ${ 200 } wonderful ${ 300 } \uworld`; // should error with NoSubstitutionTemplate
const z = tag`\u{hello} \xtraordinary wonderful \uworld` // should work with Tagged NoSubstitutionTemplate
const a1 = tag`${ 100 }\0` // \0
const a2 = tag`${ 100 }\00` // \\00
const a3 = tag`${ 100 }\u` // \\u
const a4 = tag`${ 100 }\u0` // \\u0
const a5 = tag`${ 100 }\u00` // \\u00
const a6 = tag`${ 100 }\u000` // \\u000
const a7 = tag`${ 100 }\u0000` // \u0000
const a8 = tag`${ 100 }\u{` // \\u{
const a9 = tag`${ 100 }\u{10FFFF}` // \\u{10FFFF
const a10 = tag`${ 100 }\u{1f622` // \\u{1f622
const a11 = tag`${ 100 }\u{1f622}` // \u{1f622}
const a12 = tag`${ 100 }\x` // \\x
const a13 = tag`${ 100 }\x0` // \\x0
const a14 = tag`${ 100 }\x00` // \x00
//// [invalidTaggedTemplateEscapeSequences.js]
var __makeTemplateObject = (this && this.__makeTemplateObject) || function (cooked, raw) {
if (Object.defineProperty) { Object.defineProperty(cooked, "raw", { value: raw }); } else { cooked.raw = raw; }
return cooked;
};
function tag(str) {
var args = [];
for (var _i = 1; _i < arguments.length; _i++) {
args[_i - 1] = arguments[_i];
}
return str;
}
var a = tag(__makeTemplateObject(["123"], ["123"]));
var b = tag(__makeTemplateObject(["123 ", ""], ["123 ", ""]), 100);
var x = tag(__makeTemplateObject([void 0, void 0, " wonderful ", void 0], ["\\u{hello} ", " \\xtraordinary ", " wonderful ", " \\uworld"]), 100, 200, 300);
var y = "hello} ".concat(100, " traordinary ").concat(200, " wonderful ").concat(300, " world"); // should error with NoSubstitutionTemplate
var z = tag(__makeTemplateObject([void 0], ["\\u{hello} \\xtraordinary wonderful \\uworld"])); // should work with Tagged NoSubstitutionTemplate
var a1 = tag(__makeTemplateObject(["", "\0"], ["", "\\0"]), 100); // \0
var a2 = tag(__makeTemplateObject(["", void 0], ["", "\\00"]), 100); // \\00
var a3 = tag(__makeTemplateObject(["", void 0], ["", "\\u"]), 100); // \\u
var a4 = tag(__makeTemplateObject(["", void 0], ["", "\\u0"]), 100); // \\u0
var a5 = tag(__makeTemplateObject(["", void 0], ["", "\\u00"]), 100); // \\u00
var a6 = tag(__makeTemplateObject(["", void 0], ["", "\\u000"]), 100); // \\u000
var a7 = tag(__makeTemplateObject(["", "\0"], ["", "\\u0000"]), 100); // \u0000
var a8 = tag(__makeTemplateObject(["", void 0], ["", "\\u{"]), 100); // \\u{
var a9 = tag(__makeTemplateObject(["", "\uDBFF\uDFFF"], ["", "\\u{10FFFF}"]), 100); // \\u{10FFFF
var a10 = tag(__makeTemplateObject(["", void 0], ["", "\\u{1f622"]), 100); // \\u{1f622
var a11 = tag(__makeTemplateObject(["", "\uD83D\uDE22"], ["", "\\u{1f622}"]), 100); // \u{1f622}
var a12 = tag(__makeTemplateObject(["", void 0], ["", "\\x"]), 100); // \\x
var a13 = tag(__makeTemplateObject(["", void 0], ["", "\\x0"]), 100); // \\x0
var a14 = tag(__makeTemplateObject(["", "\0"], ["", "\\x00"]), 100); // \x00
| microsoft/TypeScript | tests/baselines/reference/invalidTaggedTemplateEscapeSequences(target=es5).js | JavaScript | apache-2.0 | 3,182 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
from datetime import datetime
from typing import Any, Dict, Optional, TYPE_CHECKING
from superset import is_feature_enabled
from superset.db_engine_specs.base import BaseEngineSpec
from superset.exceptions import SupersetException
from superset.utils import core as utils
if TYPE_CHECKING:
from superset.connectors.sqla.models import TableColumn
from superset.models.core import Database
logger = logging.getLogger()
class DruidEngineSpec(BaseEngineSpec):
"""Engine spec for Druid.io"""
engine = "druid"
engine_name = "Apache Druid"
allows_joins = is_feature_enabled("DRUID_JOINS")
allows_subqueries = True
_time_grain_expressions = {
None: "{col}",
"PT1S": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT1S')",
"PT5S": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT5S')",
"PT30S": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT30S')",
"PT1M": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT1M')",
"PT5M": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT5M')",
"PT10M": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT10M')",
"PT15M": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT15M')",
"PT30M": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT30M')",
"PT1H": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT1H')",
"PT6H": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT6H')",
"P1D": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'P1D')",
"P1W": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'P1W')",
"P1M": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'P1M')",
"P3M": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'P3M')",
"P1Y": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'P1Y')",
"P1W/1970-01-03T00:00:00Z": (
"TIME_SHIFT(TIME_FLOOR(TIME_SHIFT(CAST({col} AS TIMESTAMP), "
"'P1D', 1), 'P1W'), 'P1D', 5)"
),
"1969-12-28T00:00:00Z/P1W": (
"TIME_SHIFT(TIME_FLOOR(TIME_SHIFT(CAST({col} AS TIMESTAMP), "
"'P1D', 1), 'P1W'), 'P1D', -1)"
),
}
@classmethod
def alter_new_orm_column(cls, orm_col: "TableColumn") -> None:
if orm_col.column_name == "__time":
orm_col.is_dttm = True
@staticmethod
def get_extra_params(database: "Database") -> Dict[str, Any]:
"""
For Druid, the path to a SSL certificate is placed in `connect_args`.
:param database: database instance from which to extract extras
:raises CertificateException: If certificate is not valid/unparseable
:raises SupersetException: If database extra json payload is unparseable
"""
try:
extra = json.loads(database.extra or "{}")
except json.JSONDecodeError as ex:
raise SupersetException("Unable to parse database extras") from ex
if database.server_cert:
engine_params = extra.get("engine_params", {})
connect_args = engine_params.get("connect_args", {})
connect_args["scheme"] = "https"
path = utils.create_ssl_cert_file(database.server_cert)
connect_args["ssl_verify_cert"] = path
engine_params["connect_args"] = connect_args
extra["engine_params"] = engine_params
return extra
@classmethod
def convert_dttm(
cls, target_type: str, dttm: datetime, db_extra: Optional[Dict[str, Any]] = None
) -> Optional[str]:
tt = target_type.upper()
if tt == utils.TemporalType.DATE:
return f"CAST(TIME_PARSE('{dttm.date().isoformat()}') AS DATE)"
if tt in (utils.TemporalType.DATETIME, utils.TemporalType.TIMESTAMP):
return f"""TIME_PARSE('{dttm.isoformat(timespec="seconds")}')"""
return None
@classmethod
def epoch_to_dttm(cls) -> str:
"""
Convert from number of seconds since the epoch to a timestamp.
"""
return "MILLIS_TO_TIMESTAMP({col} * 1000)"
@classmethod
def epoch_ms_to_dttm(cls) -> str:
"""
Convert from number of milliseconds since the epoch to a timestamp.
"""
return "MILLIS_TO_TIMESTAMP({col})"
| apache/incubator-superset | superset/db_engine_specs/druid.py | Python | apache-2.0 | 4,908 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.lucene;
import java.io.File;
import java.io.IOException;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.camel.Exchange;
import org.apache.camel.converter.IOConverter;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.store.NIOFSDirectory;
import org.apache.lucene.util.Version;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class LuceneIndexer {
private static final transient Logger LOG = LoggerFactory.getLogger(LuceneIndexer.class);
private File sourceDirectory;
private Analyzer analyzer;
private NIOFSDirectory niofsDirectory;
private IndexWriter indexWriter;
private boolean sourceDirectoryIndexed;
private boolean indexCreated;
public LuceneIndexer(File sourceDirectory, File indexDirectory, Analyzer analyzer) throws Exception {
if (indexDirectory != null) {
if (!indexDirectory.exists()) {
indexDirectory.mkdir();
}
this.setNiofsDirectory(new NIOFSDirectory(indexDirectory));
} else {
this.setNiofsDirectory(new NIOFSDirectory(new File("./indexDirectory")));
}
this.setAnalyzer(analyzer);
if ((sourceDirectory != null) && (!sourceDirectoryIndexed)) {
this.setSourceDirectory(sourceDirectory);
add(getSourceDirectory());
sourceDirectoryIndexed = true;
}
}
public void index(Exchange exchange) throws Exception {
LOG.debug("Indexing {}", exchange);
openIndexWriter();
Map<String, Object> headers = exchange.getIn().getHeaders();
add("exchangeId", exchange.getExchangeId(), true);
for (Entry<String, Object> entry : headers.entrySet()) {
String field = entry.getKey();
String value = exchange.getContext().getTypeConverter().convertTo(String.class, entry.getValue());
add(field, value, true);
}
add("contents", exchange.getIn().getMandatoryBody(String.class), true);
closeIndexWriter();
}
public NIOFSDirectory getNiofsDirectory() {
return niofsDirectory;
}
public void setNiofsDirectory(NIOFSDirectory niofsDirectory) {
this.niofsDirectory = niofsDirectory;
}
public File getSourceDirectory() {
return sourceDirectory;
}
public void setSourceDirectory(File sourceDirectory) {
this.sourceDirectory = sourceDirectory;
}
public Analyzer getAnalyzer() {
return analyzer;
}
public void setAnalyzer(Analyzer analyzer) {
this.analyzer = analyzer;
}
private void add(String field, String value, boolean analyzed) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("Adding field: {}", field);
LOG.trace(" value: {}", value);
}
Document doc = new Document();
if (!analyzed) {
doc.add(new Field(field, value, Field.Store.YES, Field.Index.NOT_ANALYZED));
} else {
doc.add(new Field(field, value, Field.Store.YES, Field.Index.ANALYZED));
}
indexWriter.addDocument(doc);
}
private void add(File file) throws IOException {
if (file.canRead()) {
if (file.isDirectory()) {
String[] files = file.list();
if (files != null) {
for (String child : files) {
add(new File(file.getAbsolutePath() + "/" + child));
}
}
} else {
LOG.trace("Adding {}", file);
openIndexWriter();
add("path", file.getPath(), false);
add("contents", new String(IOConverter.toByteArray(file)), true);
closeIndexWriter();
LOG.trace("Added {} successfully", file);
}
} else {
LOG.warn("Directory/File " + file.getAbsolutePath() + " could not be read."
+ " This directory will not be indexed. Please check permissions and rebuild indexes.");
}
}
private void openIndexWriter() throws IOException {
IndexWriterConfig indexWriterConfig;
if (!indexCreated) {
indexWriterConfig = new IndexWriterConfig(Version.LUCENE_35, getAnalyzer()).setOpenMode(OpenMode.CREATE);
indexWriter = new IndexWriter(niofsDirectory, indexWriterConfig);
indexCreated = true;
return;
}
indexWriterConfig = new IndexWriterConfig(Version.LUCENE_35, getAnalyzer()).setOpenMode(OpenMode.APPEND);
indexWriter = new IndexWriter(niofsDirectory, indexWriterConfig);
}
private void closeIndexWriter() throws IOException {
indexWriter.commit();
indexWriter.close();
}
}
| aaronwalker/camel | components/camel-lucene/src/main/java/org/apache/camel/component/lucene/LuceneIndexer.java | Java | apache-2.0 | 5,933 |
using System;
using System.Collections;
using System.Collections.Generic;
using System.Diagnostics.CodeAnalysis;
using System.Globalization;
using System.Linq;
using System.Reflection;
namespace ZipCodeFinal.Areas.HelpPage
{
/// <summary>
/// This class will create an object of a given type and populate it with sample data.
/// </summary>
public class ObjectGenerator
{
internal const int DefaultCollectionSize = 2;
private readonly SimpleTypeObjectGenerator SimpleObjectGenerator = new SimpleTypeObjectGenerator();
/// <summary>
/// Generates an object for a given type. The type needs to be public, have a public default constructor and settable public properties/fields. Currently it supports the following types:
/// Simple types: <see cref="int"/>, <see cref="string"/>, <see cref="Enum"/>, <see cref="DateTime"/>, <see cref="Uri"/>, etc.
/// Complex types: POCO types.
/// Nullables: <see cref="Nullable{T}"/>.
/// Arrays: arrays of simple types or complex types.
/// Key value pairs: <see cref="KeyValuePair{TKey,TValue}"/>
/// Tuples: <see cref="Tuple{T1}"/>, <see cref="Tuple{T1,T2}"/>, etc
/// Dictionaries: <see cref="IDictionary{TKey,TValue}"/> or anything deriving from <see cref="IDictionary{TKey,TValue}"/>.
/// Collections: <see cref="IList{T}"/>, <see cref="IEnumerable{T}"/>, <see cref="ICollection{T}"/>, <see cref="IList"/>, <see cref="IEnumerable"/>, <see cref="ICollection"/> or anything deriving from <see cref="ICollection{T}"/> or <see cref="IList"/>.
/// Queryables: <see cref="IQueryable"/>, <see cref="IQueryable{T}"/>.
/// </summary>
/// <param name="type">The type.</param>
/// <returns>An object of the given type.</returns>
public object GenerateObject(Type type)
{
return GenerateObject(type, new Dictionary<Type, object>());
}
[SuppressMessage("Microsoft.Design", "CA1031:DoNotCatchGeneralExceptionTypes", Justification = "Here we just want to return null if anything goes wrong.")]
private object GenerateObject(Type type, Dictionary<Type, object> createdObjectReferences)
{
try
{
if (SimpleTypeObjectGenerator.CanGenerateObject(type))
{
return SimpleObjectGenerator.GenerateObject(type);
}
if (type.IsArray)
{
return GenerateArray(type, DefaultCollectionSize, createdObjectReferences);
}
if (type.IsGenericType)
{
return GenerateGenericType(type, DefaultCollectionSize, createdObjectReferences);
}
if (type == typeof(IDictionary))
{
return GenerateDictionary(typeof(Hashtable), DefaultCollectionSize, createdObjectReferences);
}
if (typeof(IDictionary).IsAssignableFrom(type))
{
return GenerateDictionary(type, DefaultCollectionSize, createdObjectReferences);
}
if (type == typeof(IList) ||
type == typeof(IEnumerable) ||
type == typeof(ICollection))
{
return GenerateCollection(typeof(ArrayList), DefaultCollectionSize, createdObjectReferences);
}
if (typeof(IList).IsAssignableFrom(type))
{
return GenerateCollection(type, DefaultCollectionSize, createdObjectReferences);
}
if (type == typeof(IQueryable))
{
return GenerateQueryable(type, DefaultCollectionSize, createdObjectReferences);
}
if (type.IsEnum)
{
return GenerateEnum(type);
}
if (type.IsPublic || type.IsNestedPublic)
{
return GenerateComplexObject(type, createdObjectReferences);
}
}
catch
{
// Returns null if anything fails
return null;
}
return null;
}
private static object GenerateGenericType(Type type, int collectionSize, Dictionary<Type, object> createdObjectReferences)
{
Type genericTypeDefinition = type.GetGenericTypeDefinition();
if (genericTypeDefinition == typeof(Nullable<>))
{
return GenerateNullable(type, createdObjectReferences);
}
if (genericTypeDefinition == typeof(KeyValuePair<,>))
{
return GenerateKeyValuePair(type, createdObjectReferences);
}
if (IsTuple(genericTypeDefinition))
{
return GenerateTuple(type, createdObjectReferences);
}
Type[] genericArguments = type.GetGenericArguments();
if (genericArguments.Length == 1)
{
if (genericTypeDefinition == typeof(IList<>) ||
genericTypeDefinition == typeof(IEnumerable<>) ||
genericTypeDefinition == typeof(ICollection<>))
{
Type collectionType = typeof(List<>).MakeGenericType(genericArguments);
return GenerateCollection(collectionType, collectionSize, createdObjectReferences);
}
if (genericTypeDefinition == typeof(IQueryable<>))
{
return GenerateQueryable(type, collectionSize, createdObjectReferences);
}
Type closedCollectionType = typeof(ICollection<>).MakeGenericType(genericArguments[0]);
if (closedCollectionType.IsAssignableFrom(type))
{
return GenerateCollection(type, collectionSize, createdObjectReferences);
}
}
if (genericArguments.Length == 2)
{
if (genericTypeDefinition == typeof(IDictionary<,>))
{
Type dictionaryType = typeof(Dictionary<,>).MakeGenericType(genericArguments);
return GenerateDictionary(dictionaryType, collectionSize, createdObjectReferences);
}
Type closedDictionaryType = typeof(IDictionary<,>).MakeGenericType(genericArguments[0], genericArguments[1]);
if (closedDictionaryType.IsAssignableFrom(type))
{
return GenerateDictionary(type, collectionSize, createdObjectReferences);
}
}
if (type.IsPublic || type.IsNestedPublic)
{
return GenerateComplexObject(type, createdObjectReferences);
}
return null;
}
private static object GenerateTuple(Type type, Dictionary<Type, object> createdObjectReferences)
{
Type[] genericArgs = type.GetGenericArguments();
object[] parameterValues = new object[genericArgs.Length];
bool failedToCreateTuple = true;
ObjectGenerator objectGenerator = new ObjectGenerator();
for (int i = 0; i < genericArgs.Length; i++)
{
parameterValues[i] = objectGenerator.GenerateObject(genericArgs[i], createdObjectReferences);
failedToCreateTuple &= parameterValues[i] == null;
}
if (failedToCreateTuple)
{
return null;
}
object result = Activator.CreateInstance(type, parameterValues);
return result;
}
private static bool IsTuple(Type genericTypeDefinition)
{
return genericTypeDefinition == typeof(Tuple<>) ||
genericTypeDefinition == typeof(Tuple<,>) ||
genericTypeDefinition == typeof(Tuple<,,>) ||
genericTypeDefinition == typeof(Tuple<,,,>) ||
genericTypeDefinition == typeof(Tuple<,,,,>) ||
genericTypeDefinition == typeof(Tuple<,,,,,>) ||
genericTypeDefinition == typeof(Tuple<,,,,,,>) ||
genericTypeDefinition == typeof(Tuple<,,,,,,,>);
}
private static object GenerateKeyValuePair(Type keyValuePairType, Dictionary<Type, object> createdObjectReferences)
{
Type[] genericArgs = keyValuePairType.GetGenericArguments();
Type typeK = genericArgs[0];
Type typeV = genericArgs[1];
ObjectGenerator objectGenerator = new ObjectGenerator();
object keyObject = objectGenerator.GenerateObject(typeK, createdObjectReferences);
object valueObject = objectGenerator.GenerateObject(typeV, createdObjectReferences);
if (keyObject == null && valueObject == null)
{
// Failed to create key and values
return null;
}
object result = Activator.CreateInstance(keyValuePairType, keyObject, valueObject);
return result;
}
private static object GenerateArray(Type arrayType, int size, Dictionary<Type, object> createdObjectReferences)
{
Type type = arrayType.GetElementType();
Array result = Array.CreateInstance(type, size);
bool areAllElementsNull = true;
ObjectGenerator objectGenerator = new ObjectGenerator();
for (int i = 0; i < size; i++)
{
object element = objectGenerator.GenerateObject(type, createdObjectReferences);
result.SetValue(element, i);
areAllElementsNull &= element == null;
}
if (areAllElementsNull)
{
return null;
}
return result;
}
private static object GenerateDictionary(Type dictionaryType, int size, Dictionary<Type, object> createdObjectReferences)
{
Type typeK = typeof(object);
Type typeV = typeof(object);
if (dictionaryType.IsGenericType)
{
Type[] genericArgs = dictionaryType.GetGenericArguments();
typeK = genericArgs[0];
typeV = genericArgs[1];
}
object result = Activator.CreateInstance(dictionaryType);
MethodInfo addMethod = dictionaryType.GetMethod("Add") ?? dictionaryType.GetMethod("TryAdd");
MethodInfo containsMethod = dictionaryType.GetMethod("Contains") ?? dictionaryType.GetMethod("ContainsKey");
ObjectGenerator objectGenerator = new ObjectGenerator();
for (int i = 0; i < size; i++)
{
object newKey = objectGenerator.GenerateObject(typeK, createdObjectReferences);
if (newKey == null)
{
// Cannot generate a valid key
return null;
}
bool containsKey = (bool)containsMethod.Invoke(result, new object[] { newKey });
if (!containsKey)
{
object newValue = objectGenerator.GenerateObject(typeV, createdObjectReferences);
addMethod.Invoke(result, new object[] { newKey, newValue });
}
}
return result;
}
private static object GenerateEnum(Type enumType)
{
Array possibleValues = Enum.GetValues(enumType);
if (possibleValues.Length > 0)
{
return possibleValues.GetValue(0);
}
return null;
}
private static object GenerateQueryable(Type queryableType, int size, Dictionary<Type, object> createdObjectReferences)
{
bool isGeneric = queryableType.IsGenericType;
object list;
if (isGeneric)
{
Type listType = typeof(List<>).MakeGenericType(queryableType.GetGenericArguments());
list = GenerateCollection(listType, size, createdObjectReferences);
}
else
{
list = GenerateArray(typeof(object[]), size, createdObjectReferences);
}
if (list == null)
{
return null;
}
if (isGeneric)
{
Type argumentType = typeof(IEnumerable<>).MakeGenericType(queryableType.GetGenericArguments());
MethodInfo asQueryableMethod = typeof(Queryable).GetMethod("AsQueryable", new[] { argumentType });
return asQueryableMethod.Invoke(null, new[] { list });
}
return Queryable.AsQueryable((IEnumerable)list);
}
private static object GenerateCollection(Type collectionType, int size, Dictionary<Type, object> createdObjectReferences)
{
Type type = collectionType.IsGenericType ?
collectionType.GetGenericArguments()[0] :
typeof(object);
object result = Activator.CreateInstance(collectionType);
MethodInfo addMethod = collectionType.GetMethod("Add");
bool areAllElementsNull = true;
ObjectGenerator objectGenerator = new ObjectGenerator();
for (int i = 0; i < size; i++)
{
object element = objectGenerator.GenerateObject(type, createdObjectReferences);
addMethod.Invoke(result, new object[] { element });
areAllElementsNull &= element == null;
}
if (areAllElementsNull)
{
return null;
}
return result;
}
private static object GenerateNullable(Type nullableType, Dictionary<Type, object> createdObjectReferences)
{
Type type = nullableType.GetGenericArguments()[0];
ObjectGenerator objectGenerator = new ObjectGenerator();
return objectGenerator.GenerateObject(type, createdObjectReferences);
}
private static object GenerateComplexObject(Type type, Dictionary<Type, object> createdObjectReferences)
{
object result = null;
if (createdObjectReferences.TryGetValue(type, out result))
{
// The object has been created already, just return it. This will handle the circular reference case.
return result;
}
if (type.IsValueType)
{
result = Activator.CreateInstance(type);
}
else
{
ConstructorInfo defaultCtor = type.GetConstructor(Type.EmptyTypes);
if (defaultCtor == null)
{
// Cannot instantiate the type because it doesn't have a default constructor
return null;
}
result = defaultCtor.Invoke(new object[0]);
}
createdObjectReferences.Add(type, result);
SetPublicProperties(type, result, createdObjectReferences);
SetPublicFields(type, result, createdObjectReferences);
return result;
}
private static void SetPublicProperties(Type type, object obj, Dictionary<Type, object> createdObjectReferences)
{
PropertyInfo[] properties = type.GetProperties(BindingFlags.Public | BindingFlags.Instance);
ObjectGenerator objectGenerator = new ObjectGenerator();
foreach (PropertyInfo property in properties)
{
if (property.CanWrite)
{
object propertyValue = objectGenerator.GenerateObject(property.PropertyType, createdObjectReferences);
property.SetValue(obj, propertyValue, null);
}
}
}
private static void SetPublicFields(Type type, object obj, Dictionary<Type, object> createdObjectReferences)
{
FieldInfo[] fields = type.GetFields(BindingFlags.Public | BindingFlags.Instance);
ObjectGenerator objectGenerator = new ObjectGenerator();
foreach (FieldInfo field in fields)
{
object fieldValue = objectGenerator.GenerateObject(field.FieldType, createdObjectReferences);
field.SetValue(obj, fieldValue);
}
}
private class SimpleTypeObjectGenerator
{
private long _index = 0;
private static readonly Dictionary<Type, Func<long, object>> DefaultGenerators = InitializeGenerators();
[SuppressMessage("Microsoft.Maintainability", "CA1502:AvoidExcessiveComplexity", Justification = "These are simple type factories and cannot be split up.")]
private static Dictionary<Type, Func<long, object>> InitializeGenerators()
{
return new Dictionary<Type, Func<long, object>>
{
{ typeof(Boolean), index => true },
{ typeof(Byte), index => (Byte)64 },
{ typeof(Char), index => (Char)65 },
{ typeof(DateTime), index => DateTime.Now },
{ typeof(DateTimeOffset), index => new DateTimeOffset(DateTime.Now) },
{ typeof(DBNull), index => DBNull.Value },
{ typeof(Decimal), index => (Decimal)index },
{ typeof(Double), index => (Double)(index + 0.1) },
{ typeof(Guid), index => Guid.NewGuid() },
{ typeof(Int16), index => (Int16)(index % Int16.MaxValue) },
{ typeof(Int32), index => (Int32)(index % Int32.MaxValue) },
{ typeof(Int64), index => (Int64)index },
{ typeof(Object), index => new object() },
{ typeof(SByte), index => (SByte)64 },
{ typeof(Single), index => (Single)(index + 0.1) },
{
typeof(String), index =>
{
return String.Format(CultureInfo.CurrentCulture, "sample string {0}", index);
}
},
{
typeof(TimeSpan), index =>
{
return TimeSpan.FromTicks(1234567);
}
},
{ typeof(UInt16), index => (UInt16)(index % UInt16.MaxValue) },
{ typeof(UInt32), index => (UInt32)(index % UInt32.MaxValue) },
{ typeof(UInt64), index => (UInt64)index },
{
typeof(Uri), index =>
{
return new Uri(String.Format(CultureInfo.CurrentCulture, "http://webapihelppage{0}.com", index));
}
},
};
}
public static bool CanGenerateObject(Type type)
{
return DefaultGenerators.ContainsKey(type);
}
public object GenerateObject(Type type)
{
return DefaultGenerators[type](++_index);
}
}
}
} | awslabs/aws-sdk-net-samples | Talks/vslive-2015/ZipCodes/ZipCodeFinal/Areas/HelpPage/SampleGeneration/ObjectGenerator.cs | C# | apache-2.0 | 19,494 |
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Generated code. DO NOT EDIT!
namespace Google.Cloud.Compute.V1.Snippets
{
// [START compute_v1_generated_InstanceGroupManagers_Insert_async]
using Google.Cloud.Compute.V1;
using System.Threading.Tasks;
using lro = Google.LongRunning;
public sealed partial class GeneratedInstanceGroupManagersClientSnippets
{
/// <summary>Snippet for InsertAsync</summary>
/// <remarks>
/// This snippet has been automatically generated for illustrative purposes only.
/// It may require modifications to work in your environment.
/// </remarks>
public async Task InsertRequestObjectAsync()
{
// Create client
InstanceGroupManagersClient instanceGroupManagersClient = await InstanceGroupManagersClient.CreateAsync();
// Initialize request argument(s)
InsertInstanceGroupManagerRequest request = new InsertInstanceGroupManagerRequest
{
Zone = "",
RequestId = "",
Project = "",
InstanceGroupManagerResource = new InstanceGroupManager(),
};
// Make the request
lro::Operation<Operation, Operation> response = await instanceGroupManagersClient.InsertAsync(request);
// Poll until the returned long-running operation is complete
lro::Operation<Operation, Operation> completedResponse = await response.PollUntilCompletedAsync();
// Retrieve the operation result
Operation result = completedResponse.Result;
// Or get the name of the operation
string operationName = response.Name;
// This name can be stored, then the long-running operation retrieved later by name
lro::Operation<Operation, Operation> retrievedResponse = await instanceGroupManagersClient.PollOnceInsertAsync(operationName);
// Check if the retrieved long-running operation has completed
if (retrievedResponse.IsCompleted)
{
// If it has completed, then access the result
Operation retrievedResult = retrievedResponse.Result;
}
}
}
// [END compute_v1_generated_InstanceGroupManagers_Insert_async]
}
| googleapis/google-cloud-dotnet | apis/Google.Cloud.Compute.V1/Google.Cloud.Compute.V1.GeneratedSnippets/InstanceGroupManagersClient.InsertRequestObjectAsyncSnippet.g.cs | C# | apache-2.0 | 2,871 |
<?php
return array (
'<strong>Module</strong> details' => '<strong>Modul</strong> Informationen ',
'This module doesn\'t provide further informations.' => 'Dieses Modul stellt keine weiteren Informationen zur Verfügung.',
);
| calonso-conabio/intranet | protected/humhub/modules/admin/messages/de/views_module_info.php | PHP | apache-2.0 | 230 |
package de.terrestris.shoguncore.dao;
import de.terrestris.shoguncore.model.layer.source.LayerDataSource;
import org.springframework.stereotype.Repository;
@Repository("layerDataSourceDao")
public class LayerDataSourceDao<E extends LayerDataSource> extends
GenericHibernateDao<E, Integer> {
/**
* Public default constructor for this DAO.
*/
@SuppressWarnings("unchecked")
public LayerDataSourceDao() {
super((Class<E>) LayerDataSource.class);
}
/**
* Constructor that has to be called by subclasses.
*
* @param clazz
*/
protected LayerDataSourceDao(Class<E> clazz) {
super(clazz);
}
}
| ahennr/shogun2 | src/shogun-core-main/src/main/java/de/terrestris/shoguncore/dao/LayerDataSourceDao.java | Java | apache-2.0 | 668 |
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package handlers
import (
"context"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
goruntime "runtime"
"strings"
"time"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager"
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
"k8s.io/apiserver/pkg/endpoints/metrics"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/klog"
utiltrace "k8s.io/utils/trace"
)
// RequestScope encapsulates common fields across all RESTful handler methods.
type RequestScope struct {
Namer ScopeNamer
Serializer runtime.NegotiatedSerializer
runtime.ParameterCodec
Creater runtime.ObjectCreater
Convertor runtime.ObjectConvertor
Defaulter runtime.ObjectDefaulter
Typer runtime.ObjectTyper
UnsafeConvertor runtime.ObjectConvertor
Authorizer authorizer.Authorizer
Trace *utiltrace.Trace
TableConvertor rest.TableConvertor
FieldManager *fieldmanager.FieldManager
Resource schema.GroupVersionResource
Kind schema.GroupVersionKind
Subresource string
MetaGroupVersion schema.GroupVersion
// HubGroupVersion indicates what version objects read from etcd or incoming requests should be converted to for in-memory handling.
HubGroupVersion schema.GroupVersion
MaxRequestBodyBytes int64
}
func (scope *RequestScope) err(err error, w http.ResponseWriter, req *http.Request) {
responsewriters.ErrorNegotiated(err, scope.Serializer, scope.Kind.GroupVersion(), w, req)
}
func (scope *RequestScope) AllowsConversion(gvk schema.GroupVersionKind) bool {
// TODO: this is temporary, replace with an abstraction calculated at endpoint installation time
if gvk.GroupVersion() == metav1beta1.SchemeGroupVersion {
switch gvk.Kind {
case "Table":
return scope.TableConvertor != nil
case "PartialObjectMetadata", "PartialObjectMetadataList":
// TODO: should delineate between lists and non-list endpoints
return true
default:
return false
}
}
return false
}
func (scope *RequestScope) AllowsServerVersion(version string) bool {
return version == scope.MetaGroupVersion.Version
}
func (scope *RequestScope) AllowsStreamSchema(s string) bool {
return s == "watch"
}
// ConnectResource returns a function that handles a connect request on a rest.Storage object.
func ConnectResource(connecter rest.Connecter, scope RequestScope, admit admission.Interface, restPath string, isSubresource bool) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
if isDryRun(req.URL) {
scope.err(errors.NewBadRequest("dryRun is not supported"), w, req)
return
}
namespace, name, err := scope.Namer.Name(req)
if err != nil {
scope.err(err, w, req)
return
}
ctx := req.Context()
ctx = request.WithNamespace(ctx, namespace)
ae := request.AuditEventFrom(ctx)
admit = admission.WithAudit(admit, ae)
opts, subpath, subpathKey := connecter.NewConnectOptions()
if err := getRequestOptions(req, scope, opts, subpath, subpathKey, isSubresource); err != nil {
err = errors.NewBadRequest(err.Error())
scope.err(err, w, req)
return
}
if admit != nil && admit.Handles(admission.Connect) {
userInfo, _ := request.UserFrom(ctx)
// TODO: remove the mutating admission here as soon as we have ported all plugin that handle CONNECT
if mutatingAdmission, ok := admit.(admission.MutationInterface); ok {
err = mutatingAdmission.Admit(admission.NewAttributesRecord(opts, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, false, userInfo))
if err != nil {
scope.err(err, w, req)
return
}
}
if validatingAdmission, ok := admit.(admission.ValidationInterface); ok {
err = validatingAdmission.Validate(admission.NewAttributesRecord(opts, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, false, userInfo))
if err != nil {
scope.err(err, w, req)
return
}
}
}
requestInfo, _ := request.RequestInfoFrom(ctx)
metrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() {
handler, err := connecter.Connect(ctx, name, opts, &responder{scope: scope, req: req, w: w})
if err != nil {
scope.err(err, w, req)
return
}
handler.ServeHTTP(w, req)
})
}
}
// responder implements rest.Responder for assisting a connector in writing objects or errors.
type responder struct {
scope RequestScope
req *http.Request
w http.ResponseWriter
}
func (r *responder) Object(statusCode int, obj runtime.Object) {
responsewriters.WriteObject(statusCode, r.scope.Kind.GroupVersion(), r.scope.Serializer, obj, r.w, r.req)
}
func (r *responder) Error(err error) {
r.scope.err(err, r.w, r.req)
}
// resultFunc is a function that returns a rest result and can be run in a goroutine
type resultFunc func() (runtime.Object, error)
// finishRequest makes a given resultFunc asynchronous and handles errors returned by the response.
// An api.Status object with status != success is considered an "error", which interrupts the normal response flow.
func finishRequest(timeout time.Duration, fn resultFunc) (result runtime.Object, err error) {
// these channels need to be buffered to prevent the goroutine below from hanging indefinitely
// when the select statement reads something other than the one the goroutine sends on.
ch := make(chan runtime.Object, 1)
errCh := make(chan error, 1)
panicCh := make(chan interface{}, 1)
go func() {
// panics don't cross goroutine boundaries, so we have to handle ourselves
defer func() {
panicReason := recover()
if panicReason != nil {
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:goruntime.Stack(buf, false)]
panicReason = strings.TrimSuffix(fmt.Sprintf("%v\n%s", panicReason, string(buf)), "\n")
// Propagate to parent goroutine
panicCh <- panicReason
}
}()
if result, err := fn(); err != nil {
errCh <- err
} else {
ch <- result
}
}()
select {
case result = <-ch:
if status, ok := result.(*metav1.Status); ok {
if status.Status != metav1.StatusSuccess {
return nil, errors.FromObject(status)
}
}
return result, nil
case err = <-errCh:
return nil, err
case p := <-panicCh:
panic(p)
case <-time.After(timeout):
return nil, errors.NewTimeoutError(fmt.Sprintf("request did not complete within requested timeout %s", timeout), 0)
}
}
// transformDecodeError adds additional information when a decode fails.
func transformDecodeError(typer runtime.ObjectTyper, baseErr error, into runtime.Object, gvk *schema.GroupVersionKind, body []byte) error {
objGVKs, _, err := typer.ObjectKinds(into)
if err != nil {
return err
}
objGVK := objGVKs[0]
if gvk != nil && len(gvk.Kind) > 0 {
return errors.NewBadRequest(fmt.Sprintf("%s in version %q cannot be handled as a %s: %v", gvk.Kind, gvk.Version, objGVK.Kind, baseErr))
}
summary := summarizeData(body, 30)
return errors.NewBadRequest(fmt.Sprintf("the object provided is unrecognized (must be of type %s): %v (%s)", objGVK.Kind, baseErr, summary))
}
// setSelfLink sets the self link of an object (or the child items in a list) to the base URL of the request
// plus the path and query generated by the provided linkFunc
func setSelfLink(obj runtime.Object, requestInfo *request.RequestInfo, namer ScopeNamer) error {
// TODO: SelfLink generation should return a full URL?
uri, err := namer.GenerateLink(requestInfo, obj)
if err != nil {
return nil
}
return namer.SetSelfLink(obj, uri)
}
func hasUID(obj runtime.Object) (bool, error) {
if obj == nil {
return false, nil
}
accessor, err := meta.Accessor(obj)
if err != nil {
return false, errors.NewInternalError(err)
}
if len(accessor.GetUID()) == 0 {
return false, nil
}
return true, nil
}
// checkName checks the provided name against the request
func checkName(obj runtime.Object, name, namespace string, namer ScopeNamer) error {
objNamespace, objName, err := namer.ObjectName(obj)
if err != nil {
return errors.NewBadRequest(fmt.Sprintf(
"the name of the object (%s based on URL) was undeterminable: %v", name, err))
}
if objName != name {
return errors.NewBadRequest(fmt.Sprintf(
"the name of the object (%s) does not match the name on the URL (%s)", objName, name))
}
if len(namespace) > 0 {
if len(objNamespace) > 0 && objNamespace != namespace {
return errors.NewBadRequest(fmt.Sprintf(
"the namespace of the object (%s) does not match the namespace on the request (%s)", objNamespace, namespace))
}
}
return nil
}
// setObjectSelfLink sets the self link of an object as needed.
func setObjectSelfLink(ctx context.Context, obj runtime.Object, req *http.Request, namer ScopeNamer) error {
if !meta.IsListType(obj) {
requestInfo, ok := request.RequestInfoFrom(ctx)
if !ok {
return fmt.Errorf("missing requestInfo")
}
return setSelfLink(obj, requestInfo, namer)
}
uri, err := namer.GenerateListLink(req)
if err != nil {
return err
}
if err := namer.SetSelfLink(obj, uri); err != nil {
klog.V(4).Infof("Unable to set self link on object: %v", err)
}
requestInfo, ok := request.RequestInfoFrom(ctx)
if !ok {
return fmt.Errorf("missing requestInfo")
}
count := 0
err = meta.EachListItem(obj, func(obj runtime.Object) error {
count++
return setSelfLink(obj, requestInfo, namer)
})
if count == 0 {
if err := meta.SetList(obj, []runtime.Object{}); err != nil {
return err
}
}
return err
}
func summarizeData(data []byte, maxLength int) string {
switch {
case len(data) == 0:
return "<empty>"
case data[0] == '{':
if len(data) > maxLength {
return string(data[:maxLength]) + " ..."
}
return string(data)
default:
if len(data) > maxLength {
return hex.EncodeToString(data[:maxLength]) + " ..."
}
return hex.EncodeToString(data)
}
}
func limitedReadBody(req *http.Request, limit int64) ([]byte, error) {
defer req.Body.Close()
if limit <= 0 {
return ioutil.ReadAll(req.Body)
}
lr := &io.LimitedReader{
R: req.Body,
N: limit + 1,
}
data, err := ioutil.ReadAll(lr)
if err != nil {
return nil, err
}
if lr.N <= 0 {
return nil, errors.NewRequestEntityTooLargeError(fmt.Sprintf("limit is %d", limit))
}
return data, nil
}
func parseTimeout(str string) time.Duration {
if str != "" {
timeout, err := time.ParseDuration(str)
if err == nil {
return timeout
}
klog.Errorf("Failed to parse %q: %v", str, err)
}
return 30 * time.Second
}
func isDryRun(url *url.URL) bool {
return len(url.Query()["dryRun"]) != 0
}
| vmware/kubernetes | staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go | GO | apache-2.0 | 11,462 |
/*
* Copyright (C) 2017 Ignite Realtime Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.jivesoftware.openfire.pep;
import org.jivesoftware.openfire.pubsub.PubSubServiceInfo;
import org.xmpp.packet.JID;
/**
* A PubSubService manager that is specific to the implemenation of XEP-163: Personal Eventing Protocol.
*
* @author Guus der Kinderen, guus.der.kinderen@gmail.com
*/
public class PEPServiceInfo extends PubSubServiceInfo
{
public PEPServiceInfo( JID owner )
{
super( new PEPServiceManager().getPEPService( owner.toBareJID() ) );
}
}
| speedy01/Openfire | xmppserver/src/main/java/org/jivesoftware/openfire/pep/PEPServiceInfo.java | Java | apache-2.0 | 1,128 |
package pl.temomuko.autostoprace.ui.teamslocationsmap.adapter.map;
import android.content.Context;
import android.view.LayoutInflater;
import android.view.View;
import android.widget.TextView;
import com.google.android.gms.maps.GoogleMap;
import com.google.android.gms.maps.model.Marker;
import javax.inject.Inject;
import butterknife.BindView;
import butterknife.ButterKnife;
import pl.temomuko.autostoprace.R;
import pl.temomuko.autostoprace.injection.AppContext;
/**
* Created by Rafał Naniewicz on 02.04.2016.
*/
public class TeamLocationInfoWindowAdapter implements GoogleMap.InfoWindowAdapter {
@BindView(R.id.tv_location_record_date) TextView mLocationRecordDateTextView;
@BindView(R.id.tv_location_record_message) TextView mLocationRecordMessageTextView;
private final View mContentsView;
@Inject
public TeamLocationInfoWindowAdapter(@AppContext Context context) {
mContentsView = LayoutInflater.from(context).inflate(R.layout.adapter_team_location_info_window, null, false);
ButterKnife.bind(this, mContentsView);
}
@Override
public View getInfoWindow(Marker marker) {
return null;
}
@Override
public View getInfoContents(Marker marker) {
mLocationRecordMessageTextView.setText(marker.getTitle());
mLocationRecordDateTextView.setText(marker.getSnippet());
return mContentsView;
}
}
| TeMoMuKo/AutoStopRace | app/src/main/java/pl/temomuko/autostoprace/ui/teamslocationsmap/adapter/map/TeamLocationInfoWindowAdapter.java | Java | apache-2.0 | 1,400 |
/**
* Copyright 2005-2014 The Kuali Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ecl2.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kuali.rice.core.framework.persistence.jpa.metadata;
import javax.persistence.CascadeType;
/**
* @author Kuali Rice Team (rice.collab@kuali.org)
*/
public class ManyToOneDescriptor extends ObjectDescriptor implements java.io.Serializable {
private static final long serialVersionUID = -1277621663465909764L;
public String toString() {
StringBuffer sb = new StringBuffer();
sb.append("ManyToOneDescriptor = [ ");
sb.append("targetEntity:").append(targetEntity.getName()).append(", ");
sb.append("cascade = { ");
for (CascadeType ct : cascade) {
sb.append(ct).append(" ");
}
sb.append("}, ");
sb.append("fetch:").append(fetch).append(", ");
sb.append("optional:").append(optional);
if (!joinColumnDescriptors.isEmpty()) {
sb.append(", join columns = { ");
for (JoinColumnDescriptor joinColumnDescriptor : joinColumnDescriptors) {
sb.append(" jc = { ");
sb.append("name:").append(joinColumnDescriptor.getName()).append(", ");
sb.append("insertable:").append(joinColumnDescriptor.isInsertable()).append(", ");
sb.append("nullable:").append(joinColumnDescriptor.isNullable()).append(", ");
sb.append("unique:").append(joinColumnDescriptor.isUnique()).append(", ");
sb.append("updateable:").append(joinColumnDescriptor.isUpdateable());
sb.append(" }");
}
sb.append(" } ");
}
sb.append(" ]");
return sb.toString();
}
}
| ua-eas/ksd-kc5.2.1-rice2.3.6-ua | rice-middleware/core/framework/src/main/java/org/kuali/rice/core/framework/persistence/jpa/metadata/ManyToOneDescriptor.java | Java | apache-2.0 | 2,029 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.felix.obrplugin;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.lang.reflect.Method;
import java.net.URI;
import java.util.regex.Pattern;
import org.apache.felix.bundlerepository.Resource;
import org.apache.felix.bundlerepository.impl.DataModelHelperImpl;
import org.apache.felix.bundlerepository.impl.PullParser;
import org.apache.felix.bundlerepository.impl.RepositoryImpl;
import org.apache.felix.bundlerepository.impl.RepositoryParser;
import org.apache.felix.bundlerepository.impl.ResourceImpl;
import org.apache.maven.plugin.MojoExecutionException;
import org.apache.maven.plugin.logging.Log;
import org.apache.maven.project.MavenProject;
import org.codehaus.plexus.util.FileUtils;
import org.kxml2.io.KXmlParser;
import org.xmlpull.v1.XmlPullParser;
/**
* this class parse the old repository.xml file build the bundle resource description and update the repository.
* @author <a href="mailto:dev@felix.apache.org">Felix Project Team</a>
*/
public class ObrUpdate
{
private static Pattern TIMESTAMP = Pattern.compile( "-[0-9]{8}\\.[0-9]{6}-[0-9]+" );
private static Method setURI;
static
{
try
{
setURI = RepositoryImpl.class.getDeclaredMethod( "setURI", String.class );
setURI.setAccessible( true );
}
catch ( Exception e )
{
setURI = null;
}
}
/**
* logger for this plugin.
*/
private Log m_logger;
/**
* name and path to the repository descriptor file.
*/
private URI m_repositoryXml;
/**
* name and path to the obr.xml file.
*/
private URI m_obrXml;
/**
* maven project description.
*/
private MavenProject m_project;
/**
* user configuration information.
*/
private Config m_userConfig;
/**
* root on parent document.
*/
private RepositoryImpl m_repository;
/**
* used to store bundle information.
*/
private ResourceImpl m_resourceBundle;
/**
* base URI used to relativize bundle URIs.
*/
private URI m_baseURI;
/**
* initialize information.
* @param repositoryXml path to the repository descriptor file
* @param obrXml path and filename to the obr.xml file
* @param project maven project description
* @param mavenRepositoryPath path to the local maven repository
* @param userConfig user information
* @param logger plugin logger
*/
public ObrUpdate( URI repositoryXml, URI obrXml, MavenProject project, String mavenRepositoryPath,
Config userConfig, Log logger )
{
m_repositoryXml = repositoryXml;
m_obrXml = obrXml;
m_project = project;
m_logger = logger;
m_userConfig = userConfig;
if ( userConfig.isRemoteFile() )
{
m_baseURI = ObrUtils.toFileURI( mavenRepositoryPath );
}
else
{
m_baseURI = m_repositoryXml;
}
}
/**
* update the repository descriptor file. parse the old repository descriptor file,
* get the old reference of the bundle or determine the id for a new bundle, extract
* information from bindex set the new information in descriptor file and save it.
*
* @param bundleJar path to the bundle jar file
* @param sourceJar path to the source jar file
* @param docJar path to the docs jar file
*
* @throws MojoExecutionException if the plugin failed
*/
public void updateRepository( URI bundleJar, URI sourceJar, URI docJar ) throws MojoExecutionException
{
m_logger.debug( " (f) repositoryXml = " + m_repositoryXml );
m_logger.debug( " (f) bundleJar = " + bundleJar );
m_logger.debug( " (f) sourceJar = " + sourceJar );
m_logger.debug( " (f) docJar = " + docJar );
m_logger.debug( " (f) obrXml = " + m_obrXml );
if ( m_repository == null )
{
return;
}
// get the file size
File bundleFile = new File( bundleJar );
if ( !bundleFile.exists() )
{
String snapshot = TIMESTAMP.matcher( bundleFile.getName() ).replaceFirst( "-SNAPSHOT" );
bundleFile = new File( bundleFile.getParentFile(), snapshot );
}
if ( bundleFile.exists() )
{
URI resourceURI = m_userConfig.getRemoteBundle();
if ( null == resourceURI )
{
resourceURI = bundleJar;
if ( m_userConfig.isPathRelative() )
{
resourceURI = ObrUtils.getRelativeURI( m_baseURI, resourceURI );
}
}
if ( m_userConfig.isRemoteFile() )
{
m_logger.info( "Deploying " + resourceURI );
}
else
{
m_logger.info( "Installing " + resourceURI );
}
try
{
m_resourceBundle = ( ResourceImpl ) new DataModelHelperImpl().createResource( bundleFile.toURI().toURL() );
if ( m_resourceBundle == null )
{
return;
}
}
catch ( IOException e )
{
throw new MojoExecutionException( "Unable to load resource information", e );
}
m_resourceBundle.put( Resource.SIZE, String.valueOf( bundleFile.length() ) );
m_resourceBundle.put( Resource.URI, resourceURI.toASCIIString() );
}
else
{
m_logger.error( "file doesn't exist: " + bundleJar );
return;
}
// parse the obr.xml file
if ( m_obrXml != null )
{
m_logger.info( "Adding " + m_obrXml );
// URL url = getClass().getResource("/SchemaObr.xsd");
// TODO validate obr.xml file
// add contents to resource bundle
parseObrXml();
}
String sourcePath = relativisePath( sourceJar );
String docPath = relativisePath( docJar );
// m_resourceBundle.construct( m_project, bindexExtractor, sourcePath, docPath );
// TODO: rebuild wrt m_project
m_repository.addResource( m_resourceBundle );
m_repository.setLastModified( System.currentTimeMillis() );
}
private String relativisePath( URI uri )
{
if ( null != uri )
{
if ( m_userConfig.isPathRelative() )
{
return ObrUtils.getRelativeURI( m_baseURI, uri ).toASCIIString();
}
return uri.toASCIIString();
}
return null;
}
public void writeRepositoryXml() throws MojoExecutionException
{
m_logger.info( "Writing OBR metadata" );
File file = null;
Writer writer;
try
{
file = File.createTempFile( "repository", ".xml" );
writer = new OutputStreamWriter( new FileOutputStream( file ) );
}
catch ( IOException e )
{
m_logger.error( "Unable to write to file: " + file.getName() );
e.printStackTrace();
throw new MojoExecutionException( "Unable to write to file: " + file.getName() + " : " + e.getMessage() );
}
try
{
new DataModelHelperImpl().writeRepository( m_repository, writer );
}
catch ( IOException e )
{
throw new MojoExecutionException( "Unable to write repository xml", e );
}
try
{
writer.flush();
writer.close();
File outputFile = new File( m_repositoryXml );
outputFile.getParentFile().mkdirs();
FileUtils.rename( file, outputFile );
}
catch ( IOException e )
{
e.printStackTrace();
throw new MojoExecutionException( "IOException" );
}
}
/**
* Parse the repository descriptor file.
*
* @throws MojoExecutionException if the plugin failed
*/
public void parseRepositoryXml() throws MojoExecutionException
{
File fout = new File( m_repositoryXml );
if ( !fout.exists() )
{
m_repository = new RepositoryImpl();
writeRepositoryXml();
}
else
{
try
{
m_repository = ( RepositoryImpl ) new DataModelHelperImpl().repository( m_repositoryXml.toURL() );
if ( setURI != null )
{
setURI.invoke( m_repository, ( String ) null );
}
}
catch ( Exception e )
{
throw new MojoExecutionException( "Unable to read repository xml: " + m_repositoryXml, e );
}
}
}
/**
* put the information from obr.xml into ressourceBundle object.
*/
private void parseObrXml() throws MojoExecutionException
{
try
{
InputStream is = new FileInputStream( new File( m_obrXml ) );
try
{
KXmlParser kxp = new KXmlParser();
kxp.setInput( is, null );
kxp.nextTag(); // skip top level element
kxp.nextTag(); // go to first child element
parseObrXml( kxp );
}
finally
{
is.close();
}
}
catch ( Exception e )
{
throw new MojoExecutionException( "Unable to parse obr xml: " + m_obrXml, e );
}
}
private void parseObrXml( KXmlParser kxp ) throws Exception
{
PullParser parser = new PullParser();
while ( kxp.getEventType() == XmlPullParser.START_TAG )
{
if ( RepositoryParser.CATEGORY.equals( kxp.getName() ) )
{
m_resourceBundle.addCategory( parser.parseCategory( kxp ) );
}
else if ( RepositoryParser.REQUIRE.equals( kxp.getName() ) )
{
m_resourceBundle.addRequire( parser.parseRequire( kxp ) );
}
else if ( RepositoryParser.CAPABILITY.equals( kxp.getName() ) )
{
m_resourceBundle.addCapability( parser.parseCapability( kxp ) );
}
else
{
kxp.nextTag();
parseObrXml( kxp );
}
kxp.nextTag();
}
}
}
| boneman1231/org.apache.felix | trunk/bundleplugin/src/main/java/org/apache/felix/obrplugin/ObrUpdate.java | Java | apache-2.0 | 11,514 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tachyon.worker;
/**
* The worker space counter, in charge of counting and granting spaces in a worker daemon.
*/
public class WorkerSpaceCounter {
private final long CAPACITY_BYTES;
private long mUsedBytes;
/**
* @param capacityBytes
* The maximum memory space the TachyonWorker can use, in bytes
*/
public WorkerSpaceCounter(long capacityBytes) {
CAPACITY_BYTES = capacityBytes;
mUsedBytes = 0;
}
/**
* @return The available space size, in bytes
*/
public synchronized long getAvailableBytes() {
return CAPACITY_BYTES - mUsedBytes;
}
/**
* @return The maximum memory space the TachyonWorker can use, in bytes
*/
public long getCapacityBytes() {
return CAPACITY_BYTES;
}
/**
* @return The bytes that have been used
*/
public synchronized long getUsedBytes() {
return mUsedBytes;
}
/**
* Request space
*
* @param requestSpaceBytes
* The requested space size, in bytes
* @return
*/
public synchronized boolean requestSpaceBytes(long requestSpaceBytes) {
if (getAvailableBytes() < requestSpaceBytes) {
return false;
}
mUsedBytes += requestSpaceBytes;
return true;
}
/**
* Return used space size
*
* @param returnUsedBytes
* The returned space size, in bytes
*/
public synchronized void returnUsedBytes(long returnUsedBytes) {
mUsedBytes -= returnUsedBytes;
}
@Override
public synchronized String toString() {
StringBuilder sb = new StringBuilder("WorkerSpaceCounter(");
sb.append(" TOTAL_BYTES: ").append(CAPACITY_BYTES);
sb.append(", mUsedBytes: ").append(mUsedBytes);
sb.append(", mAvailableBytes: ").append(CAPACITY_BYTES - mUsedBytes);
sb.append(" )");
return sb.toString();
}
/**
* Update the used bytes
*
* @param usedBytes
* The new used bytes
*/
public synchronized void updateUsedBytes(long usedBytes) {
mUsedBytes = usedBytes;
}
} | gsoundar/mambo-ec2-deploy | packages/tachyon-0.5.0/core/src/main/java/tachyon/worker/WorkerSpaceCounter.java | Java | apache-2.0 | 2,787 |
/*
* Copyright 2012-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.test.context;
import java.lang.annotation.Annotation;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.springframework.boot.SpringBootConfiguration;
import org.springframework.boot.WebApplicationType;
import org.springframework.boot.context.properties.bind.Bindable;
import org.springframework.boot.context.properties.bind.Binder;
import org.springframework.boot.context.properties.source.ConfigurationPropertySource;
import org.springframework.boot.context.properties.source.MapConfigurationPropertySource;
import org.springframework.boot.test.context.SpringBootTest.WebEnvironment;
import org.springframework.core.annotation.AnnotatedElementUtils;
import org.springframework.core.annotation.AnnotationUtils;
import org.springframework.core.env.Environment;
import org.springframework.core.io.support.SpringFactoriesLoader;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.ContextConfigurationAttributes;
import org.springframework.test.context.ContextHierarchy;
import org.springframework.test.context.ContextLoader;
import org.springframework.test.context.MergedContextConfiguration;
import org.springframework.test.context.TestContext;
import org.springframework.test.context.TestContextBootstrapper;
import org.springframework.test.context.TestExecutionListener;
import org.springframework.test.context.support.DefaultTestContextBootstrapper;
import org.springframework.test.context.support.TestPropertySourceUtils;
import org.springframework.test.context.web.WebAppConfiguration;
import org.springframework.test.context.web.WebMergedContextConfiguration;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
import org.springframework.util.ObjectUtils;
import org.springframework.util.StringUtils;
/**
* {@link TestContextBootstrapper} for Spring Boot. Provides support for
* {@link SpringBootTest @SpringBootTest} and may also be used directly or subclassed.
* Provides the following features over and above {@link DefaultTestContextBootstrapper}:
* <ul>
* <li>Uses {@link SpringBootContextLoader} as the
* {@link #getDefaultContextLoaderClass(Class) default context loader}.</li>
* <li>Automatically searches for a
* {@link SpringBootConfiguration @SpringBootConfiguration} when required.</li>
* <li>Allows custom {@link Environment} {@link #getProperties(Class)} to be defined.</li>
* <li>Provides support for different {@link WebEnvironment webEnvironment} modes.</li>
* </ul>
*
* @author Phillip Webb
* @author Andy Wilkinson
* @author Brian Clozel
* @author Madhura Bhave
* @since 1.4.0
* @see SpringBootTest
* @see TestConfiguration
*/
public class SpringBootTestContextBootstrapper extends DefaultTestContextBootstrapper {
private static final String[] WEB_ENVIRONMENT_CLASSES = { "javax.servlet.Servlet",
"org.springframework.web.context.ConfigurableWebApplicationContext" };
private static final String REACTIVE_WEB_ENVIRONMENT_CLASS = "org.springframework."
+ "web.reactive.DispatcherHandler";
private static final String MVC_WEB_ENVIRONMENT_CLASS = "org.springframework."
+ "web.servlet.DispatcherServlet";
private static final String ACTIVATE_SERVLET_LISTENER = "org.springframework.test."
+ "context.web.ServletTestExecutionListener.activateListener";
private static final Log logger = LogFactory
.getLog(SpringBootTestContextBootstrapper.class);
@Override
public TestContext buildTestContext() {
TestContext context = super.buildTestContext();
verifyConfiguration(context.getTestClass());
WebEnvironment webEnvironment = getWebEnvironment(context.getTestClass());
if (webEnvironment == WebEnvironment.MOCK
&& deduceWebApplicationType() == WebApplicationType.SERVLET) {
context.setAttribute(ACTIVATE_SERVLET_LISTENER, true);
}
else if (webEnvironment != null && webEnvironment.isEmbedded()) {
context.setAttribute(ACTIVATE_SERVLET_LISTENER, false);
}
return context;
}
@Override
protected Set<Class<? extends TestExecutionListener>> getDefaultTestExecutionListenerClasses() {
Set<Class<? extends TestExecutionListener>> listeners = super.getDefaultTestExecutionListenerClasses();
List<DefaultTestExecutionListenersPostProcessor> postProcessors = SpringFactoriesLoader
.loadFactories(DefaultTestExecutionListenersPostProcessor.class,
getClass().getClassLoader());
for (DefaultTestExecutionListenersPostProcessor postProcessor : postProcessors) {
listeners = postProcessor.postProcessDefaultTestExecutionListeners(listeners);
}
return listeners;
}
@Override
protected ContextLoader resolveContextLoader(Class<?> testClass,
List<ContextConfigurationAttributes> configAttributesList) {
Class<?>[] classes = getClasses(testClass);
if (!ObjectUtils.isEmpty(classes)) {
for (ContextConfigurationAttributes configAttributes : configAttributesList) {
addConfigAttributesClasses(configAttributes, classes);
}
}
return super.resolveContextLoader(testClass, configAttributesList);
}
private void addConfigAttributesClasses(
ContextConfigurationAttributes configAttributes, Class<?>[] classes) {
List<Class<?>> combined = new ArrayList<>();
combined.addAll(Arrays.asList(classes));
if (configAttributes.getClasses() != null) {
combined.addAll(Arrays.asList(configAttributes.getClasses()));
}
configAttributes.setClasses(ClassUtils.toClassArray(combined));
}
@Override
protected Class<? extends ContextLoader> getDefaultContextLoaderClass(
Class<?> testClass) {
return SpringBootContextLoader.class;
}
@Override
protected MergedContextConfiguration processMergedContextConfiguration(
MergedContextConfiguration mergedConfig) {
Class<?>[] classes = getOrFindConfigurationClasses(mergedConfig);
List<String> propertySourceProperties = getAndProcessPropertySourceProperties(
mergedConfig);
mergedConfig = createModifiedConfig(mergedConfig, classes,
StringUtils.toStringArray(propertySourceProperties));
WebEnvironment webEnvironment = getWebEnvironment(mergedConfig.getTestClass());
if (webEnvironment != null && isWebEnvironmentSupported(mergedConfig)) {
WebApplicationType webApplicationType = getWebApplicationType(mergedConfig);
if (webApplicationType == WebApplicationType.SERVLET
&& (webEnvironment.isEmbedded()
|| webEnvironment == WebEnvironment.MOCK)) {
WebAppConfiguration webAppConfiguration = AnnotatedElementUtils
.findMergedAnnotation(mergedConfig.getTestClass(),
WebAppConfiguration.class);
String resourceBasePath = (webAppConfiguration != null
? webAppConfiguration.value() : "src/main/webapp");
mergedConfig = new WebMergedContextConfiguration(mergedConfig,
resourceBasePath);
}
else if (webApplicationType == WebApplicationType.REACTIVE
&& (webEnvironment.isEmbedded()
|| webEnvironment == WebEnvironment.MOCK)) {
return new ReactiveWebMergedContextConfiguration(mergedConfig);
}
}
return mergedConfig;
}
private WebApplicationType getWebApplicationType(
MergedContextConfiguration configuration) {
ConfigurationPropertySource source = new MapConfigurationPropertySource(
TestPropertySourceUtils.convertInlinedPropertiesToMap(
configuration.getPropertySourceProperties()));
Binder binder = new Binder(source);
return binder
.bind("spring.main.web-application-type",
Bindable.of(WebApplicationType.class))
.orElseGet(this::deduceWebApplicationType);
}
private WebApplicationType deduceWebApplicationType() {
if (ClassUtils.isPresent(REACTIVE_WEB_ENVIRONMENT_CLASS, null)
&& !ClassUtils.isPresent(MVC_WEB_ENVIRONMENT_CLASS, null)) {
return WebApplicationType.REACTIVE;
}
for (String className : WEB_ENVIRONMENT_CLASSES) {
if (!ClassUtils.isPresent(className, null)) {
return WebApplicationType.NONE;
}
}
return WebApplicationType.SERVLET;
}
private boolean isWebEnvironmentSupported(MergedContextConfiguration mergedConfig) {
Class<?> testClass = mergedConfig.getTestClass();
ContextHierarchy hierarchy = AnnotationUtils.getAnnotation(testClass,
ContextHierarchy.class);
if (hierarchy == null || hierarchy.value().length == 0) {
return true;
}
ContextConfiguration[] configurations = hierarchy.value();
return isFromConfiguration(mergedConfig,
configurations[configurations.length - 1]);
}
private boolean isFromConfiguration(MergedContextConfiguration candidateConfig,
ContextConfiguration configuration) {
ContextConfigurationAttributes attributes = new ContextConfigurationAttributes(
candidateConfig.getTestClass(), configuration);
Set<Class<?>> configurationClasses = new HashSet<>(
Arrays.asList(attributes.getClasses()));
for (Class<?> candidate : candidateConfig.getClasses()) {
if (configurationClasses.contains(candidate)) {
return true;
}
}
return false;
}
protected Class<?>[] getOrFindConfigurationClasses(
MergedContextConfiguration mergedConfig) {
Class<?>[] classes = mergedConfig.getClasses();
if (containsNonTestComponent(classes) || mergedConfig.hasLocations()) {
return classes;
}
Class<?> found = new SpringBootConfigurationFinder()
.findFromClass(mergedConfig.getTestClass());
Assert.state(found != null,
"Unable to find a @SpringBootConfiguration, you need to use "
+ "@ContextConfiguration or @SpringBootTest(classes=...) "
+ "with your test");
logger.info("Found @SpringBootConfiguration " + found.getName() + " for test "
+ mergedConfig.getTestClass());
return merge(found, classes);
}
private boolean containsNonTestComponent(Class<?>[] classes) {
for (Class<?> candidate : classes) {
if (!AnnotatedElementUtils.isAnnotated(candidate, TestConfiguration.class)) {
return true;
}
}
return false;
}
private Class<?>[] merge(Class<?> head, Class<?>[] existing) {
Class<?>[] result = new Class<?>[existing.length + 1];
result[0] = head;
System.arraycopy(existing, 0, result, 1, existing.length);
return result;
}
private List<String> getAndProcessPropertySourceProperties(
MergedContextConfiguration mergedConfig) {
List<String> propertySourceProperties = new ArrayList<>(
Arrays.asList(mergedConfig.getPropertySourceProperties()));
String differentiator = getDifferentiatorPropertySourceProperty();
if (differentiator != null) {
propertySourceProperties.add(differentiator);
}
processPropertySourceProperties(mergedConfig, propertySourceProperties);
return propertySourceProperties;
}
/**
* Return a "differentiator" property to ensure that there is something to
* differentiate regular tests and bootstrapped tests. Without this property a cached
* context could be returned that wasn't created by this bootstrapper. By default uses
* the bootstrapper class as a property.
* @return the differentiator or {@code null}
*/
protected String getDifferentiatorPropertySourceProperty() {
return getClass().getName() + "=true";
}
/**
* Post process the property source properties, adding or removing elements as
* required.
* @param mergedConfig the merged context configuration
* @param propertySourceProperties the property source properties to process
*/
protected void processPropertySourceProperties(
MergedContextConfiguration mergedConfig,
List<String> propertySourceProperties) {
Class<?> testClass = mergedConfig.getTestClass();
String[] properties = getProperties(testClass);
if (!ObjectUtils.isEmpty(properties)) {
// Added first so that inlined properties from @TestPropertySource take
// precedence
propertySourceProperties.addAll(0, Arrays.asList(properties));
}
if (getWebEnvironment(testClass) == WebEnvironment.RANDOM_PORT) {
propertySourceProperties.add("server.port=0");
}
}
/**
* Return the {@link WebEnvironment} type for this test or null if undefined.
* @param testClass the source test class
* @return the {@link WebEnvironment} or {@code null}
*/
protected WebEnvironment getWebEnvironment(Class<?> testClass) {
SpringBootTest annotation = getAnnotation(testClass);
return (annotation != null ? annotation.webEnvironment() : null);
}
protected Class<?>[] getClasses(Class<?> testClass) {
SpringBootTest annotation = getAnnotation(testClass);
return (annotation != null ? annotation.classes() : null);
}
protected String[] getProperties(Class<?> testClass) {
SpringBootTest annotation = getAnnotation(testClass);
return (annotation != null ? annotation.properties() : null);
}
protected SpringBootTest getAnnotation(Class<?> testClass) {
return AnnotatedElementUtils.getMergedAnnotation(testClass, SpringBootTest.class);
}
protected void verifyConfiguration(Class<?> testClass) {
SpringBootTest springBootTest = getAnnotation(testClass);
if (springBootTest != null
&& (springBootTest.webEnvironment() == WebEnvironment.DEFINED_PORT
|| springBootTest.webEnvironment() == WebEnvironment.RANDOM_PORT)
&& getAnnotation(WebAppConfiguration.class, testClass) != null) {
throw new IllegalStateException("@WebAppConfiguration should only be used "
+ "with @SpringBootTest when @SpringBootTest is configured with a "
+ "mock web environment. Please remove @WebAppConfiguration or "
+ "reconfigure @SpringBootTest.");
}
}
private <T extends Annotation> T getAnnotation(Class<T> annotationType,
Class<?> testClass) {
return AnnotatedElementUtils.getMergedAnnotation(testClass, annotationType);
}
/**
* Create a new {@link MergedContextConfiguration} with different classes.
* @param mergedConfig the source config
* @param classes the replacement classes
* @return a new {@link MergedContextConfiguration}
*/
protected final MergedContextConfiguration createModifiedConfig(
MergedContextConfiguration mergedConfig, Class<?>[] classes) {
return createModifiedConfig(mergedConfig, classes,
mergedConfig.getPropertySourceProperties());
}
/**
* Create a new {@link MergedContextConfiguration} with different classes and
* properties.
* @param mergedConfig the source config
* @param classes the replacement classes
* @param propertySourceProperties the replacement properties
* @return a new {@link MergedContextConfiguration}
*/
protected final MergedContextConfiguration createModifiedConfig(
MergedContextConfiguration mergedConfig, Class<?>[] classes,
String[] propertySourceProperties) {
return new MergedContextConfiguration(mergedConfig.getTestClass(),
mergedConfig.getLocations(), classes,
mergedConfig.getContextInitializerClasses(),
mergedConfig.getActiveProfiles(),
mergedConfig.getPropertySourceLocations(), propertySourceProperties,
mergedConfig.getContextCustomizers(), mergedConfig.getContextLoader(),
getCacheAwareContextLoaderDelegate(), mergedConfig.getParent());
}
}
| bclozel/spring-boot | spring-boot-project/spring-boot-test/src/main/java/org/springframework/boot/test/context/SpringBootTestContextBootstrapper.java | Java | apache-2.0 | 15,659 |
<!DOCTYPE html>
<html>
<head>
<title>Sales Report</title>
<link rel="stylesheet" href="{{asset('core/themes/appui-backend/css/bootstrap.min.css')}}">
</head>
<body>
<div class="h3 text-center">Sales Report</div>
<div class="text-center">Period : {{ $period }}</div>
<br>
<table class="table tabble-striped table-bordered" cellpadding="0" cellspacing="0" border="0" >
<thead>
<tr>
<th width="30px" class="text-center">No.</th>
<th width="100px">Form Number</th>
<th width="125px">Form Date</th>
<th>Customer</th>
<th>Sales</th>
<th class="text-right">Total</th>
</tr>
</thead>
<tbody>
<?php $total_sales = 0; $i=0?>
@foreach($list_sales as $sales)
<tr id="list-{{$sales->id}}" @if($sales->formulir->form_status == -1) style="text-decoration: line-through;" @endif>
<td class="text-center">{{++$i}}</td>
<td>{{ $sales->formulir->form_number }}</td>
<td>{{ date_format_view($sales->formulir->form_date, true) }}</td>
<td>{{ $sales->customer->codeName }}</td>
<td>{{ $sales->formulir->createdBy->name }}</td>
<td class="text-right">{{ number_format_accounting($sales->total) }}</td>
</tr>
@if($sales->formulir->form_status != -1)
<?php $total_sales += $sales->total;?>
@endif
@endforeach
</tbody>
<tfoot>
<tr>
<td colspan="5" class="text-right"><strong>Total</strong></td>
<td class="text-right"><strong>{{ number_format_accounting($total_sales) }}</strong></td>
</tr>
</tfoot>
</table>
</body>
</html>
| bgd-point/point-app-test | packages/point/point-sales/src/views/app/sales/point/pos/report/pdf.blade.php | PHP | apache-2.0 | 1,787 |
/*
* Copyright 2000-2017 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.openapi.roots.impl;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.extensions.Extensions;
import com.intellij.openapi.fileTypes.FileTypeRegistry;
import com.intellij.openapi.module.Module;
import com.intellij.openapi.module.ModuleManager;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.roots.*;
import com.intellij.openapi.roots.impl.libraries.LibraryEx;
import com.intellij.openapi.roots.libraries.Library;
import com.intellij.openapi.util.Pair;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.openapi.vfs.VfsUtilCore;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.openapi.vfs.VirtualFileWithId;
import com.intellij.openapi.vfs.newvfs.events.VFileEvent;
import com.intellij.util.CollectionQuery;
import com.intellij.util.Query;
import com.intellij.util.containers.ContainerUtil;
import com.intellij.util.containers.MultiMap;
import com.intellij.util.containers.SLRUMap;
import gnu.trove.TObjectIntHashMap;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.jps.model.module.JpsModuleSourceRootType;
import java.util.*;
public class RootIndex {
public static final Comparator<OrderEntry> BY_OWNER_MODULE = (o1, o2) -> {
String name1 = o1.getOwnerModule().getName();
String name2 = o2.getOwnerModule().getName();
return name1.compareTo(name2);
};
private static final Logger LOG = Logger.getInstance("#com.intellij.openapi.roots.impl.RootIndex");
private static final FileTypeRegistry ourFileTypes = FileTypeRegistry.getInstance();
private final Map<VirtualFile, String> myPackagePrefixByRoot = ContainerUtil.newHashMap();
private final InfoCache myInfoCache;
private final List<JpsModuleSourceRootType<?>> myRootTypes = ContainerUtil.newArrayList();
private final TObjectIntHashMap<JpsModuleSourceRootType<?>> myRootTypeId = new TObjectIntHashMap<>();
@NotNull private final Project myProject;
private final PackageDirectoryCache myPackageDirectoryCache;
private OrderEntryGraph myOrderEntryGraph;
// made public for Upsource
public RootIndex(@NotNull Project project, @NotNull InfoCache cache) {
myProject = project;
myInfoCache = cache;
final RootInfo info = buildRootInfo(project);
MultiMap<String, VirtualFile> rootsByPackagePrefix = MultiMap.create();
Set<VirtualFile> allRoots = info.getAllRoots();
for (VirtualFile root : allRoots) {
List<VirtualFile> hierarchy = getHierarchy(root, allRoots, info);
Pair<DirectoryInfo, String> pair = hierarchy != null
? calcDirectoryInfo(root, hierarchy, info)
: new Pair<>(NonProjectDirectoryInfo.IGNORED, null);
cacheInfos(root, root, pair.first);
rootsByPackagePrefix.putValue(pair.second, root);
myPackagePrefixByRoot.put(root, pair.second);
}
myPackageDirectoryCache = new PackageDirectoryCache(rootsByPackagePrefix) {
@Override
protected boolean isPackageDirectory(@NotNull VirtualFile dir, @NotNull String packageName) {
return getInfoForFile(dir).isInProject() && packageName.equals(getPackageName(dir));
}
};
}
public void onLowMemory() {
myPackageDirectoryCache.onLowMemory();
}
@NotNull
private RootInfo buildRootInfo(@NotNull Project project) {
final RootInfo info = new RootInfo();
for (final Module module : ModuleManager.getInstance(project).getModules()) {
final ModuleRootManager moduleRootManager = ModuleRootManager.getInstance(module);
for (final VirtualFile contentRoot : moduleRootManager.getContentRoots()) {
if (!info.contentRootOf.containsKey(contentRoot) && ensureValid(contentRoot, module)) {
info.contentRootOf.put(contentRoot, module);
}
}
for (ContentEntry contentEntry : moduleRootManager.getContentEntries()) {
if (!(contentEntry instanceof ContentEntryImpl) || !((ContentEntryImpl)contentEntry).isDisposed()) {
for (VirtualFile excludeRoot : contentEntry.getExcludeFolderFiles()) {
if (!ensureValid(excludeRoot, contentEntry)) continue;
info.excludedFromModule.put(excludeRoot, module);
}
}
// Init module sources
for (final SourceFolder sourceFolder : contentEntry.getSourceFolders()) {
final VirtualFile sourceFolderRoot = sourceFolder.getFile();
if (sourceFolderRoot != null && ensureValid(sourceFolderRoot, sourceFolder)) {
info.rootTypeId.put(sourceFolderRoot, getRootTypeId(sourceFolder.getRootType()));
info.classAndSourceRoots.add(sourceFolderRoot);
info.sourceRootOf.putValue(sourceFolderRoot, module);
info.packagePrefix.put(sourceFolderRoot, sourceFolder.getPackagePrefix());
}
}
}
for (OrderEntry orderEntry : moduleRootManager.getOrderEntries()) {
if (orderEntry instanceof LibraryOrSdkOrderEntry) {
final LibraryOrSdkOrderEntry entry = (LibraryOrSdkOrderEntry)orderEntry;
final VirtualFile[] sourceRoots = entry.getRootFiles(OrderRootType.SOURCES);
final VirtualFile[] classRoots = entry.getRootFiles(OrderRootType.CLASSES);
// Init library sources
for (final VirtualFile sourceRoot : sourceRoots) {
if (!ensureValid(sourceRoot, entry)) continue;
info.classAndSourceRoots.add(sourceRoot);
info.libraryOrSdkSources.add(sourceRoot);
info.packagePrefix.put(sourceRoot, "");
}
// init library classes
for (final VirtualFile classRoot : classRoots) {
if (!ensureValid(classRoot, entry)) continue;
info.classAndSourceRoots.add(classRoot);
info.libraryOrSdkClasses.add(classRoot);
info.packagePrefix.put(classRoot, "");
}
if (orderEntry instanceof LibraryOrderEntry) {
Library library = ((LibraryOrderEntry)orderEntry).getLibrary();
if (library != null) {
for (VirtualFile root : ((LibraryEx)library).getExcludedRoots()) {
if (!ensureValid(root, library)) continue;
info.excludedFromLibraries.putValue(root, library);
}
for (VirtualFile root : sourceRoots) {
if (!ensureValid(root, library)) continue;
info.sourceOfLibraries.putValue(root, library);
}
for (VirtualFile root : classRoots) {
if (!ensureValid(root, library)) continue;
info.classOfLibraries.putValue(root, library);
}
}
}
}
}
}
for (AdditionalLibraryRootsProvider provider : Extensions.getExtensions(AdditionalLibraryRootsProvider.EP_NAME)) {
Collection<SyntheticLibrary> libraries = provider.getAdditionalProjectLibraries(project);
for (SyntheticLibrary descriptor : libraries) {
for (VirtualFile root : descriptor.getSourceRoots()) {
if (!ensureValid(root, project)) continue;
info.libraryOrSdkSources.add(root);
info.classAndSourceRoots.add(root);
info.sourceOfLibraries.putValue(root, descriptor);
}
for (VirtualFile file : descriptor.getExcludedRoots()) {
if (!ensureValid(file, project)) continue;
info.excludedFromLibraries.putValue(file, descriptor);
}
}
}
for (DirectoryIndexExcludePolicy policy : Extensions.getExtensions(DirectoryIndexExcludePolicy.EP_NAME, project)) {
info.excludedFromProject.addAll(ContainerUtil.filter(policy.getExcludeRootsForProject(), file -> ensureValid(file, policy)));
}
return info;
}
private static boolean ensureValid(@NotNull VirtualFile file, @NotNull Object container) {
if (!(file instanceof VirtualFileWithId)) {
//skip roots from unsupported file systems (e.g. http)
return false;
}
if (!file.isValid()) {
LOG.error("Invalid root " + file + " in " + container);
return false;
}
return true;
}
@NotNull
private synchronized OrderEntryGraph getOrderEntryGraph() {
if (myOrderEntryGraph == null) {
RootInfo rootInfo = buildRootInfo(myProject);
myOrderEntryGraph = new OrderEntryGraph(myProject, rootInfo);
}
return myOrderEntryGraph;
}
/**
* A reverse dependency graph of (library, jdk, module, module source) -> (module).
*
* <p>Each edge carries with it the associated OrderEntry that caused the dependency.
*/
private static class OrderEntryGraph {
private static class Edge {
Module myKey;
ModuleOrderEntry myOrderEntry; // Order entry from myKey -> the node containing the edge
boolean myRecursive; // Whether this edge should be descended into during graph walk
public Edge(Module key, ModuleOrderEntry orderEntry, boolean recursive) {
myKey = key;
myOrderEntry = orderEntry;
myRecursive = recursive;
}
@Override
public String toString() {
return myOrderEntry.toString();
}
}
private static class Node {
Module myKey;
List<Edge> myEdges = new ArrayList<>();
@Override
public String toString() {
return myKey.toString();
}
}
private static class Graph {
Map<Module, Node> myNodes = new HashMap<>();
}
final Project myProject;
final RootInfo myRootInfo;
final Set<VirtualFile> myAllRoots;
Graph myGraph;
MultiMap<VirtualFile, Node> myRoots; // Map of roots to their root nodes, eg. library jar -> library node
final SynchronizedSLRUCache<VirtualFile, List<OrderEntry>> myCache;
private MultiMap<VirtualFile, OrderEntry> myLibClassRootEntries;
private MultiMap<VirtualFile, OrderEntry> myLibSourceRootEntries;
public OrderEntryGraph(Project project, RootInfo rootInfo) {
myProject = project;
myRootInfo = rootInfo;
myAllRoots = myRootInfo.getAllRoots();
int cacheSize = Math.max(25, (myAllRoots.size() / 100) * 2);
myCache = new SynchronizedSLRUCache<VirtualFile, List<OrderEntry>>(cacheSize, cacheSize) {
@NotNull
@Override
public List<OrderEntry> createValue(VirtualFile key) {
return collectOrderEntries(key);
}
};
initGraph();
initLibraryRoots();
}
private void initGraph() {
Graph graph = new Graph();
MultiMap<VirtualFile, Node> roots = MultiMap.createSmart();
for (final Module module : ModuleManager.getInstance(myProject).getModules()) {
final ModuleRootManager moduleRootManager = ModuleRootManager.getInstance(module);
List<OrderEnumerationHandler> handlers = OrderEnumeratorBase.getCustomHandlers(module);
for (OrderEntry orderEntry : moduleRootManager.getOrderEntries()) {
if (orderEntry instanceof ModuleOrderEntry) {
ModuleOrderEntry moduleOrderEntry = (ModuleOrderEntry)orderEntry;
final Module depModule = moduleOrderEntry.getModule();
if (depModule != null) {
Node node = graph.myNodes.get(depModule);
OrderEnumerator en = OrderEnumerator.orderEntries(depModule).exportedOnly();
if (node == null) {
node = new Node();
node.myKey = depModule;
graph.myNodes.put(depModule, node);
VirtualFile[] importedClassRoots = en.classes().usingCache().getRoots();
for (VirtualFile importedClassRoot : importedClassRoots) {
roots.putValue(importedClassRoot, node);
}
VirtualFile[] importedSourceRoots = en.sources().usingCache().getRoots();
for (VirtualFile sourceRoot : importedSourceRoots) {
roots.putValue(sourceRoot, node);
}
}
boolean shouldRecurse = en.recursively().shouldRecurse(moduleOrderEntry, handlers);
node.myEdges.add(new Edge(module, moduleOrderEntry, shouldRecurse));
}
}
}
}
myGraph = graph;
myRoots = roots;
}
private void initLibraryRoots() {
MultiMap<VirtualFile, OrderEntry> libClassRootEntries = MultiMap.createSmart();
MultiMap<VirtualFile, OrderEntry> libSourceRootEntries = MultiMap.createSmart();
for (final Module module : ModuleManager.getInstance(myProject).getModules()) {
final ModuleRootManager moduleRootManager = ModuleRootManager.getInstance(module);
for (OrderEntry orderEntry : moduleRootManager.getOrderEntries()) {
if (orderEntry instanceof LibraryOrSdkOrderEntry) {
final LibraryOrSdkOrderEntry entry = (LibraryOrSdkOrderEntry)orderEntry;
for (final VirtualFile sourceRoot : entry.getRootFiles(OrderRootType.SOURCES)) {
libSourceRootEntries.putValue(sourceRoot, orderEntry);
}
for (final VirtualFile classRoot : entry.getRootFiles(OrderRootType.CLASSES)) {
libClassRootEntries.putValue(classRoot, orderEntry);
}
}
}
}
myLibClassRootEntries = libClassRootEntries;
myLibSourceRootEntries = libSourceRootEntries;
}
private List<OrderEntry> getOrderEntries(@NotNull VirtualFile file) {
return myCache.get(file);
}
/**
* Traverses the graph from the given file, collecting all encountered order entries.
*/
private List<OrderEntry> collectOrderEntries(@NotNull VirtualFile file) {
List<VirtualFile> roots = getHierarchy(file, myAllRoots, myRootInfo);
if (roots == null) {
return Collections.emptyList();
}
List<OrderEntry> result = new ArrayList<>();
Stack<Node> stack = new Stack<>();
for (VirtualFile root : roots) {
Collection<Node> nodes = myRoots.get(root);
for (Node node : nodes) {
stack.push(node);
}
}
Set<Node> seen = new HashSet<>();
while (!stack.isEmpty()) {
Node node = stack.pop();
if (seen.contains(node)) {
continue;
}
seen.add(node);
for (Edge edge : node.myEdges) {
result.add(edge.myOrderEntry);
if (edge.myRecursive) {
Node targetNode = myGraph.myNodes.get(edge.myKey);
if (targetNode != null) {
stack.push(targetNode);
}
}
}
}
@Nullable VirtualFile libraryClassRoot = myRootInfo.findLibraryRootInfo(roots, false);
@Nullable VirtualFile librarySourceRoot = myRootInfo.findLibraryRootInfo(roots, true);
result.addAll(myRootInfo.getLibraryOrderEntries(roots, libraryClassRoot, librarySourceRoot, myLibClassRootEntries, myLibSourceRootEntries));
VirtualFile moduleContentRoot = myRootInfo.findNearestContentRoot(roots);
if (moduleContentRoot != null) {
ContainerUtil.addIfNotNull(result, myRootInfo.getModuleSourceEntry(roots, moduleContentRoot, myLibClassRootEntries));
}
Collections.sort(result, BY_OWNER_MODULE);
return result;
}
}
private int getRootTypeId(@NotNull JpsModuleSourceRootType<?> rootType) {
if (myRootTypeId.containsKey(rootType)) {
return myRootTypeId.get(rootType);
}
int id = myRootTypes.size();
if (id > DirectoryInfoImpl.MAX_ROOT_TYPE_ID) {
LOG.error("Too many different types of module source roots (" + id + ") registered: " + myRootTypes);
}
myRootTypes.add(rootType);
myRootTypeId.put(rootType, id);
return id;
}
@NotNull
public DirectoryInfo getInfoForFile(@NotNull VirtualFile file) {
if (!file.isValid()) {
return NonProjectDirectoryInfo.INVALID;
}
VirtualFile dir;
if (!file.isDirectory()) {
DirectoryInfo info = myInfoCache.getCachedInfo(file);
if (info != null) {
return info;
}
if (ourFileTypes.isFileIgnored(file)) {
return NonProjectDirectoryInfo.IGNORED;
}
dir = file.getParent();
}
else {
dir = file;
}
int count = 0;
for (VirtualFile root = dir; root != null; root = root.getParent()) {
if (++count > 1000) {
throw new IllegalStateException("Possible loop in tree, started at " + dir.getName());
}
DirectoryInfo info = myInfoCache.getCachedInfo(root);
if (info != null) {
if (!dir.equals(root)) {
cacheInfos(dir, root, info);
}
return info;
}
if (ourFileTypes.isFileIgnored(root)) {
return cacheInfos(dir, root, NonProjectDirectoryInfo.IGNORED);
}
}
return cacheInfos(dir, null, NonProjectDirectoryInfo.NOT_UNDER_PROJECT_ROOTS);
}
@NotNull
private DirectoryInfo cacheInfos(VirtualFile dir, @Nullable VirtualFile stopAt, @NotNull DirectoryInfo info) {
while (dir != null) {
myInfoCache.cacheInfo(dir, info);
if (dir.equals(stopAt)) {
break;
}
dir = dir.getParent();
}
return info;
}
@NotNull
public Query<VirtualFile> getDirectoriesByPackageName(@NotNull final String packageName, final boolean includeLibrarySources) {
// Note that this method is used in upsource as well, hence, don't reduce this method's visibility.
List<VirtualFile> result = myPackageDirectoryCache.getDirectoriesByPackageName(packageName);
if (!includeLibrarySources) {
result = ContainerUtil.filter(result, file -> {
DirectoryInfo info = getInfoForFile(file);
return info.isInProject() && (!info.isInLibrarySource() || info.isInModuleSource() || info.hasLibraryClassRoot());
});
}
return new CollectionQuery<>(result);
}
@Nullable
public String getPackageName(@NotNull final VirtualFile dir) {
if (dir.isDirectory()) {
if (ourFileTypes.isFileIgnored(dir)) {
return null;
}
if (myPackagePrefixByRoot.containsKey(dir)) {
return myPackagePrefixByRoot.get(dir);
}
final VirtualFile parent = dir.getParent();
if (parent != null) {
return getPackageNameForSubdir(getPackageName(parent), dir.getName());
}
}
return null;
}
@Nullable
protected static String getPackageNameForSubdir(@Nullable String parentPackageName, @NotNull String subdirName) {
if (parentPackageName == null) return null;
return parentPackageName.isEmpty() ? subdirName : parentPackageName + "." + subdirName;
}
@Nullable
public JpsModuleSourceRootType<?> getSourceRootType(@NotNull DirectoryInfo directoryInfo) {
return myRootTypes.get(directoryInfo.getSourceRootTypeId());
}
boolean resetOnEvents(@NotNull List<? extends VFileEvent> events) {
for (VFileEvent event : events) {
VirtualFile file = event.getFile();
if (file == null || file.isDirectory()) {
return true;
}
}
return false;
}
@Nullable("returns null only if dir is under ignored folder")
private static List<VirtualFile> getHierarchy(VirtualFile dir, @NotNull Set<VirtualFile> allRoots, @NotNull RootInfo info) {
List<VirtualFile> hierarchy = ContainerUtil.newArrayList();
boolean hasContentRoots = false;
while (dir != null) {
hasContentRoots |= info.contentRootOf.get(dir) != null;
if (!hasContentRoots && ourFileTypes.isFileIgnored(dir)) {
return null;
}
if (allRoots.contains(dir)) {
hierarchy.add(dir);
}
dir = dir.getParent();
}
return hierarchy;
}
private static class RootInfo {
// getDirectoriesByPackageName used to be in this order, some clients might rely on that
@NotNull final LinkedHashSet<VirtualFile> classAndSourceRoots = ContainerUtil.newLinkedHashSet();
@NotNull final Set<VirtualFile> libraryOrSdkSources = ContainerUtil.newHashSet();
@NotNull final Set<VirtualFile> libraryOrSdkClasses = ContainerUtil.newHashSet();
@NotNull final Map<VirtualFile, Module> contentRootOf = ContainerUtil.newHashMap();
@NotNull final MultiMap<VirtualFile, Module> sourceRootOf = MultiMap.createSet();
@NotNull final TObjectIntHashMap<VirtualFile> rootTypeId = new TObjectIntHashMap<>();
@NotNull final MultiMap<VirtualFile, /*Library|SyntheticLibrary*/ Object> excludedFromLibraries = MultiMap.createSmart();
@NotNull final MultiMap<VirtualFile, Library> classOfLibraries = MultiMap.createSmart();
@NotNull final MultiMap<VirtualFile, /*Library|SyntheticLibrary*/ Object> sourceOfLibraries = MultiMap.createSmart();
@NotNull final Set<VirtualFile> excludedFromProject = ContainerUtil.newHashSet();
@NotNull final Map<VirtualFile, Module> excludedFromModule = ContainerUtil.newHashMap();
@NotNull final Map<VirtualFile, String> packagePrefix = ContainerUtil.newHashMap();
@NotNull
Set<VirtualFile> getAllRoots() {
LinkedHashSet<VirtualFile> result = ContainerUtil.newLinkedHashSet();
result.addAll(classAndSourceRoots);
result.addAll(contentRootOf.keySet());
result.addAll(excludedFromLibraries.keySet());
result.addAll(excludedFromModule.keySet());
result.addAll(excludedFromProject);
return result;
}
/**
* Returns nearest content root for a file by its parent directories hierarchy. If the file is excluded (i.e. located under an excluded
* root and there are no source roots on the path to the excluded root) returns {@code null}.
*/
@Nullable
private VirtualFile findNearestContentRoot(@NotNull List<VirtualFile> hierarchy) {
Collection<Module> sourceRootOwners = null;
boolean underExcludedSourceRoot = false;
for (VirtualFile root : hierarchy) {
Module module = contentRootOf.get(root);
Module excludedFrom = excludedFromModule.get(root);
if (module != null && (excludedFrom != module || underExcludedSourceRoot && sourceRootOwners.contains(module))) {
return root;
}
if (excludedFrom != null || excludedFromProject.contains(root)) {
if (sourceRootOwners != null) {
underExcludedSourceRoot = true;
}
else {
return null;
}
}
if (!underExcludedSourceRoot && sourceRootOf.containsKey(root)) {
Collection<Module> modulesForSourceRoot = sourceRootOf.get(root);
if (!modulesForSourceRoot.isEmpty()) {
if (sourceRootOwners == null) {
sourceRootOwners = modulesForSourceRoot;
}
else {
sourceRootOwners = ContainerUtil.union(sourceRootOwners, modulesForSourceRoot);
}
}
}
}
return null;
}
@Nullable
private VirtualFile findNearestContentRootForExcluded(@NotNull List<VirtualFile> hierarchy) {
for (VirtualFile root : hierarchy) {
if (contentRootOf.containsKey(root)) {
return root;
}
}
return null;
}
@Nullable
private VirtualFile findLibraryRootInfo(@NotNull List<VirtualFile> hierarchy, boolean source) {
Set<Object> librariesToIgnore = ContainerUtil.newHashSet();
for (VirtualFile root : hierarchy) {
librariesToIgnore.addAll(excludedFromLibraries.get(root));
if (source && libraryOrSdkSources.contains(root) &&
(!sourceOfLibraries.containsKey(root) || !librariesToIgnore.containsAll(sourceOfLibraries.get(root)))) {
return root;
}
else if (!source && libraryOrSdkClasses.contains(root) &&
(!classOfLibraries.containsKey(root) || !librariesToIgnore.containsAll(classOfLibraries.get(root)))) {
return root;
}
}
return null;
}
private String calcPackagePrefix(@NotNull VirtualFile root,
@NotNull List<VirtualFile> hierarchy,
VirtualFile moduleContentRoot,
VirtualFile libraryClassRoot,
VirtualFile librarySourceRoot) {
VirtualFile packageRoot = findPackageRootInfo(hierarchy, moduleContentRoot, libraryClassRoot, librarySourceRoot);
String prefix = packagePrefix.get(packageRoot);
if (prefix != null && !root.equals(packageRoot)) {
assert packageRoot != null;
String relative = VfsUtilCore.getRelativePath(root, packageRoot, '.');
prefix = StringUtil.isEmpty(prefix) ? relative : prefix + '.' + relative;
}
return prefix;
}
@Nullable
private VirtualFile findPackageRootInfo(@NotNull List<VirtualFile> hierarchy,
VirtualFile moduleContentRoot,
VirtualFile libraryClassRoot,
VirtualFile librarySourceRoot) {
for (VirtualFile root : hierarchy) {
if (moduleContentRoot != null &&
sourceRootOf.get(root).contains(contentRootOf.get(moduleContentRoot)) &&
librarySourceRoot == null) {
return root;
}
if (root.equals(libraryClassRoot) || root.equals(librarySourceRoot)) {
return root;
}
if (root.equals(moduleContentRoot) && !sourceRootOf.containsKey(root) && librarySourceRoot == null && libraryClassRoot == null) {
return null;
}
}
return null;
}
@NotNull
private LinkedHashSet<OrderEntry> getLibraryOrderEntries(@NotNull List<VirtualFile> hierarchy,
@Nullable VirtualFile libraryClassRoot,
@Nullable VirtualFile librarySourceRoot,
@NotNull MultiMap<VirtualFile, OrderEntry> libClassRootEntries,
@NotNull MultiMap<VirtualFile, OrderEntry> libSourceRootEntries) {
LinkedHashSet<OrderEntry> orderEntries = ContainerUtil.newLinkedHashSet();
for (VirtualFile root : hierarchy) {
if (root.equals(libraryClassRoot) && !sourceRootOf.containsKey(root)) {
orderEntries.addAll(libClassRootEntries.get(root));
}
if (root.equals(librarySourceRoot) && libraryClassRoot == null) {
orderEntries.addAll(libSourceRootEntries.get(root));
}
if (libClassRootEntries.containsKey(root) || sourceRootOf.containsKey(root) && librarySourceRoot == null) {
break;
}
}
return orderEntries;
}
@Nullable
private ModuleSourceOrderEntry getModuleSourceEntry(@NotNull List<VirtualFile> hierarchy,
@NotNull VirtualFile moduleContentRoot,
@NotNull MultiMap<VirtualFile, OrderEntry> libClassRootEntries) {
Module module = contentRootOf.get(moduleContentRoot);
for (VirtualFile root : hierarchy) {
if (sourceRootOf.get(root).contains(module)) {
return ContainerUtil.findInstance(ModuleRootManager.getInstance(module).getOrderEntries(), ModuleSourceOrderEntry.class);
}
if (libClassRootEntries.containsKey(root)) {
return null;
}
}
return null;
}
}
@NotNull
private static Pair<DirectoryInfo, String> calcDirectoryInfo(@NotNull final VirtualFile root,
@NotNull final List<VirtualFile> hierarchy,
@NotNull RootInfo info) {
VirtualFile moduleContentRoot = info.findNearestContentRoot(hierarchy);
VirtualFile libraryClassRoot = info.findLibraryRootInfo(hierarchy, false);
VirtualFile librarySourceRoot = info.findLibraryRootInfo(hierarchy, true);
boolean inProject = moduleContentRoot != null || libraryClassRoot != null || librarySourceRoot != null;
VirtualFile nearestContentRoot;
if (inProject) {
nearestContentRoot = moduleContentRoot;
}
else {
nearestContentRoot = info.findNearestContentRootForExcluded(hierarchy);
if (nearestContentRoot == null) {
return new Pair<>(NonProjectDirectoryInfo.EXCLUDED, null);
}
}
VirtualFile sourceRoot = info.findPackageRootInfo(hierarchy, moduleContentRoot, null, librarySourceRoot);
VirtualFile moduleSourceRoot = info.findPackageRootInfo(hierarchy, moduleContentRoot, null, null);
boolean inModuleSources = moduleSourceRoot != null;
boolean inLibrarySource = librarySourceRoot != null;
int typeId = moduleSourceRoot != null ? info.rootTypeId.get(moduleSourceRoot) : 0;
Module module = info.contentRootOf.get(nearestContentRoot);
DirectoryInfo directoryInfo =
new DirectoryInfoImpl(root, module, nearestContentRoot, sourceRoot, libraryClassRoot, inModuleSources, inLibrarySource, !inProject, typeId);
String packagePrefix = info.calcPackagePrefix(root, hierarchy, moduleContentRoot, libraryClassRoot, librarySourceRoot);
return Pair.create(directoryInfo, packagePrefix);
}
@NotNull
public List<OrderEntry> getOrderEntries(@NotNull DirectoryInfo info) {
if (!(info instanceof DirectoryInfoImpl)) return Collections.emptyList();
return getOrderEntryGraph().getOrderEntries(((DirectoryInfoImpl)info).getRoot());
}
public interface InfoCache {
@Nullable
DirectoryInfo getCachedInfo(@NotNull VirtualFile dir);
void cacheInfo(@NotNull VirtualFile dir, @NotNull DirectoryInfo info);
}
/**
* An LRU cache with synchronization around the primary cache operations (get() and insertion
* of a newly created value). Other map operations are not synchronized.
*/
abstract static class SynchronizedSLRUCache<K, V> extends SLRUMap<K,V> {
protected final Object myLock = new Object();
protected SynchronizedSLRUCache(final int protectedQueueSize, final int probationalQueueSize) {
super(protectedQueueSize, probationalQueueSize);
}
@NotNull
public abstract V createValue(K key);
@Override
@NotNull
public V get(K key) {
V value;
synchronized (myLock) {
value = super.get(key);
if (value != null) {
return value;
}
}
value = createValue(key);
synchronized (myLock) {
put(key, value);
}
return value;
}
}
}
| semonte/intellij-community | platform/projectModel-impl/src/com/intellij/openapi/roots/impl/RootIndex.java | Java | apache-2.0 | 31,099 |
L.Polygon.polygonEditor = L.Polygon.extend({
_prepareMapIfNeeded: function() {
var that = this;
if(this._map._editablePolygons != null) {
return;
}
// Container for all editable polylines on this map:
this._map._editablePolygons = [];
// Click anywhere on map to add a new point-polyline:
if(this._options.newPolygons) {
// console.log('click na map');
that._map.on('click', function(event) {
// console.log('click, target=' + (event.target == that._map) + ' type=' + event.type);
if(that.isBusy())
return;
that._setBusy(true);
var latLng = event.latlng;
if(that._options.newPolygonConfirmMessage)
if(!confirm(that._options.newPolygonConfirmMessage))
return
var contexts = [{'originalPolygonNo': null, 'originalPointNo': null}];
L.Polygon.PolygonEditor([latLng], that._options, contexts).addTo(that._map);
that._setBusy(false);
that._showBoundMarkers();
});
}
},
/**
* Will add all needed methods to this polyline.
*/
_addMethods: function() {
var that = this;
this._init = function(options, contexts) {
this._prepareMapIfNeeded();
/*
* Utility method added to this map to retreive editable
* polylines.
*/
if(!this._map.getEditablePolylines) {
this._map.getEditablePolylines = function() {
return that._map._editablePolygons;
}
}
/**
* Since all point editing is done by marker events, markers
* will be the main holder of the polyline points locations.
* Every marker contains a reference to the newPointMarker
* *before* him (=> the first marker has newPointMarker=null).
*/
this._parseOptions(options);
this._setMarkers();
var map = this._map;
this._map.on("zoomend", function(e) {
that._showBoundMarkers();
});
this._map.on("moveend", function(e) {
that._showBoundMarkers();
});
this._lastMouseEvent = undefined;
if('_desiredPolygonNo' in this) {
this._map._editablePolygons.splice(this._desiredPolygonNo, 0, this);
} else {
this._map._editablePolygons.push(this);
}
};
/**
* Check if there is *any* busy editable polyline on this map.
*/
this.isBusy = function() {
for(var i = 0; i < that._map._editablePolygons.length; i++)
if(that._map._editablePolygons[i]._isBusy())
return true;
return false;
};
/**
* Check if is busy adding/moving new nodes. Note, there may be
* *other* editable polylines on the same map which *are* busy.
*/
this._isBusy = function() {
return that._busy;
};
this._setBusy = function(busy) {
that._busy = busy;
};
/**
* Get markers for this polyline.
*/
this.getPoints = function() {
return this._markers;
};
this._parseOptions = function(options) {
if(!options)
options = {};
// Do not show edit markers if more than maxMarkers would be shown:
if(!('maxMarkers' in options)) {
options.maxMarkers = 100;
}
this.maxMarkers = options.maxMarkers;
// Do not allow edges to be destroyed (split polygon in two)
if(!('deletableEdges' in options)) {
options.deletableEdges = false;
}
this.deletableEdges = options.deletableEdges;
// Icons:
if(options.pointIcon) {
this.pointIcon = options.pointIcon;
} else {
this.pointIcon = L.icon({ iconUrl: 'editmarker.png', iconSize: [11, 11], iconAnchor: [6, 6] });
}
if(options.newPointIcon) {
this.newPointIcon = options.newPointIcon;
} else {
this.newPointIcon = L.icon({ iconUrl: 'editmarker2.png', iconSize: [11, 11], iconAnchor: [6, 6] });
}
};
/**
* Show only markers in current map bounds *is* there are only a certain
* number of markers. This method is called on eventy that change map
* bounds.
*/
this._showBoundMarkers = function() {
if(that.isBusy()) {
//console.log('Do not show because busy!');
return;
}
var bounds = that._map.getBounds();
var found = 0;
for(var polygonNo in that._map._editablePolygons) {
var polyline = that._map._editablePolygons[polygonNo];
for(var markerNo in polyline._markers) {
var marker = polyline._markers[markerNo];
if(bounds.contains(marker.getLatLng()))
found += 1;
}
}
//console.log('found=' + found);
for(var polygonNo in that._map._editablePolygons) {
var polyline = that._map._editablePolygons[polygonNo];
for(var markerNo in polyline._markers) {
var marker = polyline._markers[markerNo];
if(found < that.maxMarkers) {
that._setMarkerVisible(marker, bounds.contains(marker.getLatLng()));
that._setMarkerVisible(marker.newPointMarker, bounds.contains(marker.getLatLng()));
} else {
that._setMarkerVisible(marker, false);
that._setMarkerVisible(marker.newPointMarker, false);
}
}
}
};
/**
* Used when adding/moving points in order to disable the user to mess
* with other markers (+ easier to decide where to put the point
* without too many markers).
*/
this._hideAll = function(except) {
for(var polygonNo in that._map._editablePolygons) {
//console.log("hide " + polygonNo + " markers");
var polyline = that._map._editablePolygons[polygonNo];
for(var markerNo in polyline._markers) {
var marker = polyline._markers[markerNo];
if(except == null || except != marker)
polyline._setMarkerVisible(marker, false);
if(except == null || except != marker.newPointMarker)
polyline._setMarkerVisible(marker.newPointMarker, false);
}
}
}
/**
* Show/hide marker.
*/
this._setMarkerVisible = function(marker, show) {
if(!marker)
return;
var map = this._map;
if(show) {
if(!marker._visible) {
if(!marker._map) { // First show fo this marker:
marker.addTo(map);
} else { // Marker was already shown and hidden:
map.addLayer(marker);
}
marker._map = map;
}
marker._visible = true;
} else {
if(marker._visible) {
map.removeLayer(marker);
}
marker._visible = false;
}
};
this.updateLatLngs = function (latlngs) {
this._eraseMarkers();
this.setLatLngs(latlngs);
that._setMarkers();
this._reloadPolygon();
return this;
}
/**
* Reload polyline. If it is busy, then the bound markers will not be
* shown. Call _setBusy(false) before this method!
*/
this._reloadPolygon = function(fixAroundPointNo) {
// that._setMarkers();
that.setLatLngs(that._getMarkerLatLngs());
if(fixAroundPointNo != null)
that._fixNeighbourPositions(fixAroundPointNo);
that._showBoundMarkers();
}
/**
* Reload polyline. If it is busy, then the bound markers will not be
* shown. Call _setBusy(false) before this method!
*/
this._setMarkers = function() {
this._markers = [];
var that = this;
var points = this.getLatLngs();
var length = points.length;
for(var i = 0; i < length; i++) {
var marker = this._addMarkers(i, points[i]);
if(! ('context' in marker)) {
marker.context = {}
if(that._contexts != null) {
marker.context = contexts[i];
}
}
if(marker.context && ! ('originalPointNo' in marker.context))
marker.context.originalPointNo = i;
if(marker.context && ! ('originalPolygonNo' in marker.context))
marker.context.originalPolygonNo = that._map._editablePolygons.length;
}
}
/**
* Reload polyline. If it is busy, then the bound markers will not be
* shown. Call _setBusy(false) before this method!
*/
this._eraseMarkers = function() {
var that = this;
var points = this._markers;
var length = points.length;
for(var i = 0; i < length; i++) {
var marker = points[i];
this._map.removeLayer(marker.newPointMarker);
this._map.removeLayer(marker);
}
this._markers = [];
}
/**
* Add two markers (a point marker and his newPointMarker) for a
* single point.
*
* Markers are not added on the map here, the marker.addTo(map) is called
* only later when needed first time because of performance issues.
*/
this._addMarkers = function(pointNo, latLng, fixNeighbourPositions) {
var that = this;
var points = this.getLatLngs();
var marker = L.marker(latLng, {draggable: true, icon: this.pointIcon});
marker.newPointMarker = null;
marker.on('mousedown', function (e) {
that._lastMouseEvent = e.originalEvent;
});
marker.on('dragstart', function(event) {
var pointNo = that._getPointNo(event.target);
//console.log("pointNo", pointNo);
var previousPoint = pointNo == null ? null : (pointNo - 1 >= 0 ? that._markers[pointNo - 1].getLatLng() : that._markers[that._markers.length - 1].getLatLng());
var nextPoint = pointNo < that._markers.length - 1 ? that._markers[pointNo + 1].getLatLng() : that._markers[0].getLatLng();
that._edited = true;
that._setupDragLines(marker, previousPoint, nextPoint);
that._setBusy(true);
that._hideAll(marker);
});
marker.on('dragend', function(event) {
that._lastMouseEvent = undefined;
var marker = event.target;
var pointNo = that._getPointNo(event.target);
setTimeout(function() {
that._setBusy(false);
that._reloadPolygon(pointNo);
}, 25);
});
// deleting in click and context menu to allow for touch device tap-to-remove
marker.on('contextmenu dblclick', function(event) {
var corners = that._markers.length;
if (corners <= 3)
return;
var marker = event.target;
var pointNo = that._getPointNo(event.target);
//console.log("corners:", corners, "pointNo:", pointNo);
that._edited = true;
that._map.removeLayer(marker);
that._map.removeLayer(newPointMarker);
that._markers.splice(pointNo, 1);
that._reloadPolygon(pointNo);
});
var previousPoint = points[pointNo == 0 ? points.length - 1 : pointNo - 1];
var newPointMarker = L.marker([(latLng.lat + previousPoint.lat) / 2.,
(latLng.lng + previousPoint.lng) / 2.],
{draggable: true, icon: this.newPointIcon});
marker.newPointMarker = newPointMarker;
newPointMarker.on('dragstart', function(event) {
that._lastMouseEvent = event.originalEvent;
var pointNo = that._getPointNo(event.target);
//console.log("pointNo", pointNo);
var previousPoint = pointNo - 1 >= 0 ? that._markers[pointNo - 1].getLatLng() : that._markers[that._markers.length - 1].getLatLng();
var nextPoint = that._markers[pointNo].getLatLng();
that._edited = true;
that._setupDragLines(marker.newPointMarker, previousPoint, nextPoint);
that._setBusy(true);
that._hideAll(marker.newPointMarker);
});
newPointMarker.on('dragend', function(event) {
// console.log("dragend", event);
var marker = event.target;
var pointNo = that._getPointNo(event.target);
that._addMarkers(pointNo, marker.getLatLng(), true);
setTimeout(function() {
that._setBusy(false);
that._reloadPolygon();
}, 25);
});
newPointMarker.on('click', function(event) {
// console.log("click", event);
var marker = event.target;
var pointNo = that._getPointNo(event.target);
that._addMarkers(pointNo, marker.getLatLng(), true);
setTimeout(function() {
that._reloadPolygon();
}, 25);
});
// if (this._options.deletableEdges) {
// newPointMarker.on('contextmenu', function(event) {
// // 1. Remove this polyline from map
// var marker = event.target;
// var pointNo = that._getPointNo(marker);
// var markers = that.getPoints();
// that._hideAll();
// var secondPartMarkers = that._markers.slice(pointNo, pointNo.length);
// that._markers.splice(pointNo, that._markers.length - pointNo);
// that._reloadPolygon();
// var points = [];
// var contexts = [];
// for(var i = 0; i < secondPartMarkers.length; i++) {
// var marker = secondPartMarkers[i];
// points.push(marker.getLatLng());
// contexts.push(marker.context);
// }
// //console.log('points:' + points);
// //console.log('contexts:' + contexts);
// // Need to know the current polyline order numbers, because
// // the splitted one need to be inserted immediately after:
// var originalPolygonNo = that._map._editablePolygons.indexOf(that);
// var newPolygon = L.Polygon.PolygonEditor(points, that._options, contexts, originalPolygonNo + 1)
// .addTo(that._map);
// that._showBoundMarkers();
// //console.log('Done split, _editablePolygons now:' + that._map._editablePolygons.length);
// });
// }
this._markers.splice(pointNo, 0, marker);
if(fixNeighbourPositions) {
this._fixNeighbourPositions(pointNo);
}
return marker;
};
/**
* Fix nearby new point markers when the new point is created.
*/
this._fixNeighbourPositions = function(pointNo) {
var previousMarker = pointNo == 0 ? this._markers[this._markers.length - 1] : this._markers[pointNo - 1];
var marker = this._markers[pointNo];
var nextMarker = pointNo < this._markers.length - 1 ? this._markers[pointNo + 1] : this._markers[0];
//console.log("_fixNeighbourPositions:", pointNo, this._markers.length);
//console.log("markers:", marker, previousMarker, nextMarker);
if(!marker && previousMarker && nextMarker) {
// //console.log("last point deleted!");
nextMarker.newPointMarker.setLatLng([(previousMarker.getLatLng().lat + nextMarker.getLatLng().lat) / 2.,
(previousMarker.getLatLng().lng + nextMarker.getLatLng().lng) / 2.]);
}
if(marker && previousMarker) {
// //console.log("marker && previousMarker");
marker.newPointMarker.setLatLng([(previousMarker.getLatLng().lat + marker.getLatLng().lat) / 2.,
(previousMarker.getLatLng().lng + marker.getLatLng().lng) / 2.]);
}
if(marker && nextMarker) {
// //console.log("marker && nextMarker");
nextMarker.newPointMarker.setLatLng([(marker.getLatLng().lat + nextMarker.getLatLng().lat) / 2.,
(marker.getLatLng().lng + nextMarker.getLatLng().lng) / 2.]);
}
};
/**
* Find the order number of the marker.
*/
this._getPointNo = function(marker) {
for(var i = 0; i < this._markers.length; i++) {
if(marker == this._markers[i] || marker == this._markers[i].newPointMarker) {
return i;
}
}
return -1;
};
/**
* Get polyline latLngs based on marker positions.
*/
this._getMarkerLatLngs = function() {
var result = [];
for(var i = 0; i < this._markers.length; i++)
result.push(this._markers[i].getLatLng());
return result;
};
this._setupDragLines = function(marker, point1, point2) {
// //console.log("_setupDragLines", marker, point1, point2);
var line1 = null;
var line2 = null;
var markerLatlng = marker.getLatLng();
var offsetLat = 0;
var offsetLng = 0;
if (this._lastMouseEvent) {
var mousePoint = this._map.mouseEventToLatLng(this._lastMouseEvent);
offsetLat = markerLatlng.lat - mousePoint.lat;
offsetLng = markerLatlng.lng - mousePoint.lng;
// console.log(markerLatlng, mouseLatlng);
}
// console.log(markerLatlng, this._lastMouseEvent);
if(point1) line1 = L.polyline([markerLatlng, point1], {dashArray: "5,5", weight: 1})
.addTo(that._map);
if(point2) line2 = L.polyline([markerLatlng, point1], {dashArray: "5,5", weight: 1})
.addTo(that._map);
var moveHandler = function(event) {
// add the offsets from the marker
// so aux lines appear in the tip of the marker
var latlngPoint = L.latLng(event.latlng.lat + offsetLat, event.latlng.lng + offsetLng);
if(line1)
line1.setLatLngs([latlngPoint, point1]);
if(line2)
line2.setLatLngs([latlngPoint, point2]);
};
var stopHandler = function(event) {
that._map.off('mousemove', moveHandler);
marker.off('dragend', stopHandler);
if(line1) that._map.removeLayer(line1);
if(line2) that._map.removeLayer(line2);
//console.log('STOPPED');
if(event.target != that._map) {
that._map.fire('click', event);
}
};
that._map.on('mousemove', moveHandler);
marker.on('dragend', stopHandler);
that._map.once('click', stopHandler);
marker.once('click', stopHandler);
if(line1) line1.once('click', stopHandler);
if(line2) line2.once('click', stopHandler);
}
}
});
L.Polygon.polygonEditor.addInitHook(function () {
// Hack to keep reference to map:
this.originalAddTo = this.addTo;
this.addTo = function(map) {
this.originalAddTo(map);
this._map = map;
this._addMethods();
/**
* When addint a new point we must disable the user to mess with other
* markers. One way is to check everywhere if the user is busy. The
* other is to just remove other markers when the user is doing
* somethinng.
*
* TODO: Decide the right way to do this and then leave only _busy or
* _hideAll().
*/
this._busy = false;
this._initialized = false;
this._edited = false;
this._init(this._options, this._contexts);
this._initialized = true;
return this;
};
});
/**
* Construct a new editable polyline.
*
* latlngs ... a list of points (or two-element tuples with coordinates)
* options ... polyline options
* contexts ... custom contexts for every point in the polyline. Must have the
* same number of elements as latlngs and this data will be
* preserved when new points are added or polylines splitted.
* polygonNo ... insert this polyline in a specific order (used when splitting).
*
* More about contexts:
* This is an array of objects that will be kept as "context" for every
* point. Marker will keep this value as marker.context. New markers will
* have context set to null.
*
* Contexts must be the same size as the polyline size!
*
* By default, even without calling this method -- every marker will have
* context with one value: marker.context.originalPointNo with the
* original order number of this point. The order may change if some
* markers before this one are delted or new added.
*/
L.Polygon.PolygonEditor = function(latlngs, options, contexts, polygonNo) {
var result = new L.Polygon.polygonEditor(latlngs, options);
result._options = options;
result._contexts = contexts;
result._desiredPolygonNo = polygonNo
return result;
};
| NYPL/building-inspector | app/assets/javascripts/lib/vendor/leaflet-editable-polygon.js | JavaScript | apache-2.0 | 23,188 |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the Apache 2 License.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using ServiceFabricPersistence.Interfaces;
using Microsoft.ServiceFabric;
using Microsoft.ServiceFabric.Actors;
namespace ServiceFabricPersistence
{
public class ServiceFabricSnapshotStore : Actor<ServiceFabricSnapshotStoreState>, IServiceFabricSnapshotStore
{
public override Task OnActivateAsync()
{
if (this.State == null)
{
this.State = new ServiceFabricSnapshotStoreState();
}
ActorEventSource.Current.ActorMessage(this, "State initialized to {0}", this.State);
return Task.FromResult(true);
}
public Task<SnapshotEntry> SelectSnapshotAsync(long maxSequenceNr, DateTime maxTimeStamp)
{
IEnumerable<KeyValuePair<long, SnapshotEntry>> snapshots = State.snapshotStore;
ActorEventSource.Current.ActorMessage(this, "selectSnapshotAsync {0}-{1}", maxSequenceNr, maxTimeStamp);
if (maxSequenceNr > 0 && maxSequenceNr < long.MaxValue)
{
snapshots = from e in this.State.snapshotStore
where e.Key <= maxSequenceNr
select e;
}
if(maxTimeStamp > DateTime.MinValue && maxTimeStamp < DateTime.MaxValue)
{
snapshots = from e in snapshots
where e.Value.Timestamp == maxTimeStamp
select e;
}
//TODO: Double-check selection criteria
var snapshot = snapshots.ToList<KeyValuePair<long, SnapshotEntry>>();
var retValue = snapshot.Any() ? snapshot.Last().Value : null;
return Task.FromResult(retValue);
}
public Task WriteSnapshotAsync(SnapshotEntry s)
{
ActorEventSource.Current.ActorMessage(this, "writeSnapshot {0}-{1}", s.SequenceNr, s.Timestamp);
State.snapshotStore.Add(s.SequenceNr, s);
return Task.FromResult(true);
}
public Task DeleteSnapshotAsync(long maxSequenceNr, DateTime maxTimeStamp)
{
IEnumerable<KeyValuePair<long, SnapshotEntry>> snapshots = State.snapshotStore;
ActorEventSource.Current.ActorMessage(this, "deleteSnapshot {0}-{1}", maxSequenceNr, maxTimeStamp);
ActorEventSource.Current.ActorMessage(this, "DeleteSnapshot {0}-{1}-{2}", maxSequenceNr, maxTimeStamp);
if (maxSequenceNr > 0 && maxSequenceNr < long.MaxValue)
{
snapshots = from e in this.State.snapshotStore
where e.Key <= maxSequenceNr
select e;
}
if (maxTimeStamp > DateTime.MinValue && maxTimeStamp < DateTime.MaxValue)
{
snapshots = from e in snapshots
where e.Value.Timestamp == maxTimeStamp
select e;
}
foreach (var s in snapshots)
State.snapshotStore.Remove(s.Key);
return Task.FromResult(true);
}
public Task DeleteSnapshotManyAsync(long maxSequenceNr, DateTime maxTimeStamp)
{
ActorEventSource.Current.ActorMessage(this, "DeleteSnapshotMany {0}-{1}", maxSequenceNr, maxTimeStamp);
if (maxSequenceNr > 0 && maxSequenceNr < long.MaxValue)
{
var snapshot = from e in this.State.snapshotStore
where e.Key == maxSequenceNr
select e;
State.snapshotStore.Remove(snapshot.First().Key);
}
if (maxTimeStamp > DateTime.MinValue && maxTimeStamp < DateTime.MaxValue)
{
var snapshot = from e in this.State.snapshotStore
where e.Value.Timestamp == maxTimeStamp
select e;
State.snapshotStore.Remove(snapshot.First().Key);
}
return Task.FromResult(true);
}
}
}
| yonglehou/Akka.Persistence.ServiceFabric | src/ServiceFabricPersistence/ServiceFabricSnapshotStore.cs | C# | apache-2.0 | 4,298 |
/*
* Copyright 2011 Vincent Behar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.rundeck.api.domain;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Result of importing some jobs into RunDeck
*
* @author Vincent Behar
*/
public class RundeckJobsImportResult implements Serializable {
private static final long serialVersionUID = 1L;
private final List<RundeckJob> succeededJobs = new ArrayList<RundeckJob>();
private final List<RundeckJob> skippedJobs = new ArrayList<RundeckJob>();
private final Map<RundeckJob, String> failedJobs = new HashMap<RundeckJob, String>();
public void addSucceededJob(RundeckJob job) {
succeededJobs.add(job);
}
public void addSkippedJob(RundeckJob job) {
skippedJobs.add(job);
}
public void addFailedJob(RundeckJob job, String errorMessage) {
failedJobs.put(job, errorMessage);
}
public List<RundeckJob> getSucceededJobs() {
return succeededJobs;
}
public List<RundeckJob> getSkippedJobs() {
return skippedJobs;
}
public Map<RundeckJob, String> getFailedJobs() {
return failedJobs;
}
@Override
public String toString() {
return "RundeckJobsImportResult [succeededJobs=" + succeededJobs + ", skippedJobs=" + skippedJobs
+ ", failedJobs=" + failedJobs + "]";
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((failedJobs == null) ? 0 : failedJobs.hashCode());
result = prime * result + ((skippedJobs == null) ? 0 : skippedJobs.hashCode());
result = prime * result + ((succeededJobs == null) ? 0 : succeededJobs.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
RundeckJobsImportResult other = (RundeckJobsImportResult) obj;
if (failedJobs == null) {
if (other.failedJobs != null)
return false;
} else if (!failedJobs.equals(other.failedJobs))
return false;
if (skippedJobs == null) {
if (other.skippedJobs != null)
return false;
} else if (!skippedJobs.equals(other.skippedJobs))
return false;
if (succeededJobs == null) {
if (other.succeededJobs != null)
return false;
} else if (!succeededJobs.equals(other.succeededJobs))
return false;
return true;
}
}
| vbehar/rundeck-api-java-client | src/main/java/org/rundeck/api/domain/RundeckJobsImportResult.java | Java | apache-2.0 | 3,268 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "forge-"
cfg.versionfile_source = "forge/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| sipplified/forge | forge/_version.py | Python | apache-2.0 | 18,448 |
(function () {
'use strict';
angular.module('horizon.framework.widgets.help-panel', [])
.directive('helpPanel', ['horizon.framework.widgets.basePath',
function (path) {
return {
templateUrl: path + 'help-panel/help-panel.html',
transclude: true
};
}
]);
})();
| Hodorable/0602 | horizon/static/framework/widgets/help-panel/help-panel.js | JavaScript | apache-2.0 | 321 |
<script>
$(document).ready(function () {
$(function() {
$( "#datepicker" ).datepicker({
changeMonth: true,
changeYear: true,
dateFormat: "yy-mm-dd",
yearRange: "c-50,c"
});
});
});
function check_input(){
var temp = $('#milestone').val();
var temp1 = $('#datepicker').val();
if(temp != '' && temp1 != '')
return true;
else
return false;
}
</script>
<div id="com_proj_container">
<div id="title_container" class="title_post">
<div id="title">Edit Project Milestone Details</div>
</div>
<br />
<div class="body_post">
<?php $str = str_replace(' ','_',$committee_info->name); $str1 = str_replace(' ','_',$project_info->project);?>
<?php $attr = array('onsubmit' => 'return check_input()'); echo form_open('committees/project_milestones/'.$str.'/'.$str1.'/update/'.$proj_milestone->id, $attr);?>
<table style="width:100%">
<tr>
<td colspan="3">
<?php
echo form_label('Milestone:', 'milestone').' ';
echo form_input('milestone', $proj_milestone->milestone, 'id="milestone" style="width:540px;"');
?>
</td>
</tr>
<tr>
<td>
<?php
echo form_label('Date Due:', 'date_due').' ';
echo form_input('date_due', $proj_milestone->date_due, 'id="datepicker" style="width:110px;"');
?>
</td>
<td>
<?php
echo form_label('Faci:', 'user_list').' ';
echo form_dropdown('user_list',$user_list,$faci_id, 'class="user_dropdown" style="width:200px;"');
?>
</td>
<td>
<?php
$status = array(
'Pending' => 'Pending',
'Done' => 'Done'
);
echo form_label('Status:', 'status').' ';
echo form_dropdown('status',$status,$proj_milestone->status, 'class="status" style="width:110px;"');
?>
</td>
</tr>
<tr>
<td>
</td>
</tr>
<tr>
<td colspan='3' align="center">
<input class="readmore_button" type="submit" value="GO!" />
</td>
</tr>
</table>
<?php echo '</form>';?>
</div>
</div> | niknokseyer/formdev | application/views/proj_milestones_edit.php | PHP | apache-2.0 | 2,067 |
/*
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for
* license information.
*
* Code generated by Microsoft (R) AutoRest Code Generator.
* Changes may cause incorrect behavior and will be lost if the code is
* regenerated.
*/
'use strict';
/**
* The Operations Management Suite (OMS) parameters.
*
*/
class ClusterMonitoringRequest {
/**
* Create a ClusterMonitoringRequest.
* @property {string} [workspaceId] The Operations Management Suite (OMS)
* workspace ID.
* @property {string} [primaryKey] The Operations Management Suite (OMS)
* workspace key.
*/
constructor() {
}
/**
* Defines the metadata of ClusterMonitoringRequest
*
* @returns {object} metadata of ClusterMonitoringRequest
*
*/
mapper() {
return {
required: false,
serializedName: 'ClusterMonitoringRequest',
type: {
name: 'Composite',
className: 'ClusterMonitoringRequest',
modelProperties: {
workspaceId: {
required: false,
serializedName: 'workspaceId',
type: {
name: 'String'
}
},
primaryKey: {
required: false,
serializedName: 'primaryKey',
type: {
name: 'String'
}
}
}
}
};
}
}
module.exports = ClusterMonitoringRequest;
| xingwu1/azure-sdk-for-node | lib/services/hdInsightManagement/lib/models/clusterMonitoringRequest.js | JavaScript | apache-2.0 | 1,464 |
/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package armyc2.c2sd.renderer.utilities;
/**
*
* @author michael.spinelli
*/
public class SinglePointLookupInfo {
private String _SymbolID = "";
private String _Description = "";
private int _mappingP = 0;
private int _mappingA = 0;
private int _height = 0;
private int _width = 0;
public SinglePointLookupInfo(String basicSymbolID, String description,
String mappingP, String mappingA,String width,String height)
{
_SymbolID = basicSymbolID;
_Description = description;
if(mappingP != null && mappingP.equals("") == false)
_mappingP = Integer.valueOf(mappingP);
if(mappingA != null && mappingA.equals("") == false)
_mappingA = Integer.valueOf(mappingA);
if(height != null && height.equals("") == false)
_height = Integer.valueOf(height);
if(width != null && width.equals("") == false)
_width = Integer.valueOf(width);
}
public String getBasicSymbolID()
{
return _SymbolID;
}
public String getDescription()
{
return _Description;
}
public int getMappingP()
{
return _mappingP;
}
public int getMappingA()
{
return _mappingA;
}
public int getHeight()
{
return _height;
}
public int getWidth()
{
return _width;
}
/**
*
* @return The newly cloned SPSymbolDef
*/
@Override
public SinglePointLookupInfo clone()
{
SinglePointLookupInfo defReturn;
defReturn = new SinglePointLookupInfo(_SymbolID, _Description,
String.valueOf(getMappingP()),
String.valueOf(getMappingA()),
String.valueOf(getWidth()),
String.valueOf(getHeight()));
return defReturn;
}
public String toXML()
{
String symbolId = "<SYMBOLID>" + getBasicSymbolID() + "</SYMBOLID>";
String mappingP = "<MAPPINGP>" + String.valueOf(getMappingP()) + "</MAPPINGP>";
String mappingA = "<MAPPINGA>" + String.valueOf(getMappingA()) + "</MAPPINGA>";
String description = "<DESCRIPTION>" + getDescription() + "</DESCRIPTION>";
String width = "<WIDTH>" + String.valueOf(getWidth()) + "</WIDTH>";
String height = "<HEIGHT>" + String.valueOf(getHeight()) + "</HEIGHT>";
String xml = symbolId + mappingP + mappingA + description + width + height;
return xml;
}
}
| spyhunter99/mil-sym-android | renderer/src/main/java/armyc2/c2sd/renderer/utilities/SinglePointLookupInfo.java | Java | apache-2.0 | 2,634 |
<?php
/*
* user_info
*/
function get_user_info($config, $oauth_data) {
$aConfig = array (
'appid' => $config['appid'],
'appkey' => $config['appkey'],
'api' => 'get_user_info,add_topic,add_one_blog,add_album,upload_pic,list_album,add_share,check_page_fans,add_t,add_pic_t,del_t,get_repost_list,get_info,get_other_info,get_fanslist,get_idollist,add_idol,del_idol,get_tenpay_addr'
);
$sUrl = "https://graph.qq.com/user/get_user_info";
$aGetParam = array(
"access_token" => $oauth_data["access_token"],
"oauth_consumer_key" => $aConfig["appid"],
"openid" => $oauth_data["oauth_openid"],
"format" => "json"
);
$sContent = get($sUrl, $aGetParam);
if($sContent!==FALSE){
$user = json_decode($sContent, true);
return array("name"=>$user["nickname"], "avatar"=>$user["figureurl_1"]);
}
}
/*
* Logout
*/
function oauth_logout() {
unset($_SESSION["state"]);
unset($_SESSION["URI"]);
$session = new session();
$session->delete('oauth_data');
}
/*
* Login
*/
function oauth_login($config) {
if (!function_exists("curl_init")) {
echo "<h1>腾讯开放平台提示:请先开启curl支持</h1>";
echo "
开启php curl函数库的步骤(for windows)<br />
1).去掉windows/php.ini 文件里;extension=php_curl.dll前面的; /*用 echo phpinfo();查看php.ini的路径*/<br />
2).把php5/libeay32.dll,ssleay32.dll复制到系统目录windows/下<br />
3).重启apache<br />
";
exit();
}
$aConfig = array (
'appid' => $config['appid'],
'appkey' => $config['appkey'],
'api' => 'get_user_info,add_topic,add_one_blog,add_album,upload_pic,list_album,add_share,check_page_fans,add_t,add_pic_t,del_t,get_repost_list,get_info,get_other_info,get_fanslist,get_idollist,add_idol,del_idol,get_tenpay_addr'
);
$sState = md5(date('YmdHis' . getip()));
$_SESSION['state'] = $sState;
$server_name = strtolower($_SERVER['SERVER_NAME']);
$server_port = ($_SERVER['SERVER_PORT'] == '80') ? '' : ':' . (int)$_SERVER['SERVER_PORT'];
$secure = (isset($_SERVER['HTTPS']) && $_SERVER['HTTPS'] == 'on') ? 1 : 0;
$callback = ($secure ? 'https://' : 'http://') . $server_name . $server_port;
$callback = $callback . url('member/register/callback', array('app'=>'qq'));
$_SESSION['URI'] = $callback;
$aParam = array(
"response_type" => 'code',
"client_id" => $aConfig["appid"],
"redirect_uri" => $callback,
"scope" => $aConfig["api"],
"state" => $sState
);
$aGet = array();
foreach($aParam as $key=>$val){
$aGet[] = $key . '=' . urlencode($val);
}
$sUrl = "https://graph.qq.com/oauth2.0/authorize?";
$sUrl .= join("&", $aGet);
header("location:" . $sUrl);
}
/*
* callback
*/
function oauth_callback($config) {
$aConfig = array (
'appid' => $config['appid'],
'appkey' => $config['appkey'],
'api' => 'get_user_info,add_topic,add_one_blog,add_album,upload_pic,list_album,add_share,check_page_fans,add_t,add_pic_t,del_t,get_repost_list,get_info,get_other_info,get_fanslist,get_idollist,add_idol,del_idol,get_tenpay_addr'
);
$sUrl = "https://graph.qq.com/oauth2.0/token";
$aGetParam = array(
"grant_type" => "authorization_code",
"client_id" => $aConfig["appid"],
"client_secret" => $aConfig["appkey"],
"code" => $_GET["code"],
"state" => $_GET["state"],
"redirect_uri" => $_SESSION["URI"]
);
unset($_SESSION["state"]);
unset($_SESSION["URI"]);
$sContent = get($sUrl,$aGetParam);
if($sContent!==FALSE){
$aTemp = explode("&", $sContent);
$aParam = $oauth_data = array();
foreach($aTemp as $val){
$aTemp2 = explode("=", $val);
$aParam[$aTemp2[0]] = $aTemp2[1];
}
$oauth_data["access_token"] = $aParam["access_token"];
$sUrl = "https://graph.qq.com/oauth2.0/me";
$aGetParam = array(
"access_token" => $aParam["access_token"]
);
$sContent = get($sUrl, $aGetParam);
if($sContent!==FALSE){
$aTemp = array();
preg_match('/callback\(\s+(.*?)\s+\)/i', $sContent,$aTemp);
$aResult = json_decode($aTemp[1],true);
$session = new session();
$oauth_data['oauth_openid'] = $aResult["openid"];
$session->set('oauth_data', $oauth_data);
}
}
}
/*
* 获取IP
*/
function getip() {
if (isset ( $_SERVER )) {
if (isset ( $_SERVER ['HTTP_X_FORWARDED_FOR'] )) {
$aIps = explode ( ',', $_SERVER ['HTTP_X_FORWARDED_FOR'] );
foreach ( $aIps as $sIp ) {
$sIp = trim ( $sIp );
if ($sIp != 'unknown') {
$sRealIp = $sIp;
break;
}
}
} elseif (isset ( $_SERVER ['HTTP_CLIENT_IP'] )) {
$sRealIp = $_SERVER ['HTTP_CLIENT_IP'];
} else {
if (isset ( $_SERVER ['REMOTE_ADDR'] )) {
$sRealIp = $_SERVER ['REMOTE_ADDR'];
} else {
$sRealIp = '0.0.0.0';
}
}
} else {
if (getenv ( 'HTTP_X_FORWARDED_FOR' )) {
$sRealIp = getenv ( 'HTTP_X_FORWARDED_FOR' );
} elseif (getenv ( 'HTTP_CLIENT_IP' )) {
$sRealIp = getenv ( 'HTTP_CLIENT_IP' );
} else {
$sRealIp = getenv ( 'REMOTE_ADDR' );
}
}
return $sRealIp;
}
/*
* GET请求
*/
function get($sUrl,$aGetParam){
global $aConfig;
$oCurl = curl_init();
if(stripos($sUrl,"https://")!==FALSE){
curl_setopt($oCurl, CURLOPT_SSL_VERIFYPEER, FALSE);
curl_setopt($oCurl, CURLOPT_SSL_VERIFYHOST, FALSE);
}
$aGet = array();
foreach($aGetParam as $key=>$val){
$aGet[] = $key."=".urlencode($val);
}
curl_setopt($oCurl, CURLOPT_URL, $sUrl."?".join("&",$aGet));
curl_setopt($oCurl, CURLOPT_RETURNTRANSFER, 1 );
$sContent = curl_exec($oCurl);
$aStatus = curl_getinfo($oCurl);
curl_close($oCurl);
if(intval($aConfig["debug"])===1){
echo "<tr><td class='narrow-label'>请求地址:</td><td><pre>".$sUrl."</pre></td></tr>";
echo "<tr><td class='narrow-label'>GET参数:</td><td><pre>".var_export($aGetParam,true)."</pre></td></tr>";
echo "<tr><td class='narrow-label'>请求信息:</td><td><pre>".var_export($aStatus,true)."</pre></td></tr>";
if(intval($aStatus["http_code"])==200){
echo "<tr><td class='narrow-label'>返回结果:</td><td><pre>".$sContent."</pre></td></tr>";
if((@$aResult = json_decode($sContent,true))){
echo "<tr><td class='narrow-label'>结果集合解析:</td><td><pre>".var_export($aResult,true)."</pre></td></tr>";
}
}
}
if(intval($aStatus["http_code"])==200){
return $sContent;
}else{
echo "<tr><td class='narrow-label'>返回出错:</td><td><pre>".$aStatus["http_code"].",请检查参数或者确实是腾讯服务器出错咯。</pre></td></tr>";
return FALSE;
}
}
/*
* POST 请求
*/
function post($sUrl,$aPOSTParam){
global $aConfig;
$oCurl = curl_init();
if(stripos($sUrl,"https://")!==FALSE){
curl_setopt($oCurl, CURLOPT_SSL_VERIFYPEER, FALSE);
curl_setopt($oCurl, CURLOPT_SSL_VERIFYHOST, false);
}
$aPOST = array();
foreach($aPOSTParam as $key=>$val){
$aPOST[] = $key."=".urlencode($val);
}
curl_setopt($oCurl, CURLOPT_URL, $sUrl);
curl_setopt($oCurl, CURLOPT_RETURNTRANSFER, 1 );
curl_setopt($oCurl, CURLOPT_POST,true);
curl_setopt($oCurl, CURLOPT_POSTFIELDS, join("&", $aPOST));
$sContent = curl_exec($oCurl);
$aStatus = curl_getinfo($oCurl);
curl_close($oCurl);
if(intval($aConfig["debug"])===1){
echo "<tr><td class='narrow-label'>请求地址:</td><td><pre>".$sUrl."</pre></td></tr>";
echo "<tr><td class='narrow-label'>POST参数:</td><td><pre>".var_export($aPOSTParam,true)."</pre></td></tr>";
echo "<tr><td class='narrow-label'>请求信息:</td><td><pre>".var_export($aStatus,true)."</pre></td></tr>";
if(intval($aStatus["http_code"])==200){
echo "<tr><td class='narrow-label'>返回结果:</td><td><pre>".$sContent."</pre></td></tr>";
if((@$aResult = json_decode($sContent,true))){
echo "<tr><td class='narrow-label'>结果集合解析:</td><td><pre>".var_export($aResult,true)."</pre></td></tr>";
}
}
}
if(intval($aStatus["http_code"])==200){
return $sContent;
}else{
echo "<tr><td class='narrow-label'>返回出错:</td><td><pre>".$aStatus["http_code"].",请检查参数或者确实是腾讯服务器出错咯。</pre></td></tr>";
return FALSE;
}
}
/*
* 上传图片
*/
function upload($sUrl,$aPOSTParam,$aFileParam){
//防止请求超时
global $aConfig;
set_time_limit(0);
$oCurl = curl_init();
if(stripos($sUrl,"https://")!==FALSE){
curl_setopt($oCurl, CURLOPT_SSL_VERIFYPEER, FALSE);
curl_setopt($oCurl, CURLOPT_SSL_VERIFYHOST, false);
}
$aPOSTField = array();
foreach($aPOSTParam as $key=>$val){
$aPOSTField[$key]= $val;
}
foreach($aFileParam as $key=>$val){
$aPOSTField[$key] = "@".$val; //此处对应的是文件的绝对地址
}
curl_setopt($oCurl, CURLOPT_URL, $sUrl);
curl_setopt($oCurl, CURLOPT_POST, true);
curl_setopt($oCurl, CURLOPT_RETURNTRANSFER, 1 );
curl_setopt($oCurl, CURLOPT_POSTFIELDS, $aPOSTField);
$sContent = curl_exec($oCurl);
$aStatus = curl_getinfo($oCurl);
curl_close($oCurl);
if(intval($aConfig["debug"])===1){
echo "<tr><td class='narrow-label'>请求地址:</td><td><pre>".$sUrl."</pre></td></tr>";
echo "<tr><td class='narrow-label'>POST参数:</td><td><pre>".var_export($aPOSTParam,true)."</pre></td></tr>";
echo "<tr><td class='narrow-label'>文件参数:</td><td><pre>".var_export($aFileParam,true)."</pre></td></tr>";
echo "<tr><td class='narrow-label'>请求信息:</td><td><pre>".var_export($aStatus,true)."</pre></td></tr>";
if(intval($aStatus["http_code"])==200){
echo "<tr><td class='narrow-label'>返回结果:</td><td><pre>".$sContent."</pre></td></tr>";
if((@$aResult = json_decode($sContent,true))){
echo "<tr><td class='narrow-label'>结果集合解析:</td><td><pre>".var_export($aResult,true)."</pre></td></tr>";
}
}
}
if(intval($aStatus["http_code"])==200){
return $sContent;
}else{
echo "<tr><td class='narrow-label'>返回出错:</td><td><pre>".$aStatus["http_code"].",请检查参数或者确实是腾讯服务器出错咯。</pre></td></tr>";
return FALSE;
}
}
function download($sUrl,$sFileName){
$oCurl = curl_init();
global $aConfig;
set_time_limit(0);
$oCurl = curl_init();
if(stripos($sUrl,"https://")!==FALSE){
curl_setopt($oCurl, CURLOPT_SSL_VERIFYPEER, FALSE);
curl_setopt($oCurl, CURLOPT_SSL_VERIFYHOST, false);
}
curl_setopt($oCurl, CURLOPT_USERAGENT, $_SERVER["USER_AGENT"] ? $_SERVER["USER_AGENT"] : "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.7) Gecko/20100625 Firefox/3.6.7");
curl_setopt($oCurl, CURLOPT_URL, $sUrl);
curl_setopt($oCurl, CURLOPT_REFERER, $sUrl);
curl_setopt($oCurl, CURLOPT_AUTOREFERER, true);
curl_setopt($oCurl, CURLOPT_RETURNTRANSFER, 1 );
$sContent = curl_exec($oCurl);
$aStatus = curl_getinfo($oCurl);
curl_close($oCurl);
file_put_contents($sFileName,$sContent);
if(intval($aConfig["debug"])===1){
echo "<tr><td class='narrow-label'>请求地址:</td><td><pre>".$sUrl."</pre></td></tr>";
echo "<tr><td class='narrow-label'>请求信息:</td><td><pre>".var_export($aStatus,true)."</pre></td></tr>";
}
return(intval($aStatus["http_code"])==200);
} | huahuajjh/ppx | src/extensions/oauth/qq.php | PHP | apache-2.0 | 11,607 |
/*
* The Apache Software License, Version 1.1
*
* Copyright (c) 1999 The Apache Software Foundation. All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. The end-user documentation included with the redistribution, if
* any, must include the following acknowlegement:
* "This product includes software developed by the
* Apache Software Foundation (http://www.apache.org/)."
* Alternately, this acknowlegement may appear in the software itself,
* if and wherever such third-party acknowlegements normally appear.
*
* 4. The names "The Jakarta Project", "Tomcat", and "Apache Software
* Foundation" must not be used to endorse or promote products derived
* from this software without prior written permission. For written
* permission, please contact apache@apache.org.
*
* 5. Products derived from this software may not be called "Apache"
* nor may "Apache" appear in their names without prior written
* permission of the Apache Group.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
* <http://www.apache.org/>.
*
* ====================================================================
*
* This source code implements specifications defined by the Java
* Community Process. In order to remain compliant with the specification
* DO NOT add / change / or delete method signatures!
*/
package javax.servlet.http;
import javax.servlet.ServletRequest;
import java.util.Enumeration;
/**
*
* Extends the {@link javax.servlet.ServletRequest} interface
* to provide request information for HTTP servlets.
*
* <p>The servlet container creates an <code>HttpServletRequest</code>
* object and passes it as an argument to the servlet's service
* methods (<code>doGet</code>, <code>doPost</code>, etc).
*
*
* @author Various
* @version $Version$
*
*
*/
public interface HttpServletRequest extends ServletRequest {
/**
* String identifier for Basic authentication. Value "BASIC"
*/
public static final String BASIC_AUTH = "BASIC";
/**
* String identifier for Form authentication. Value "FORM"
*/
public static final String FORM_AUTH = "FORM";
/**
* String identifier for Client Certificate authentication. Value "CLIENT_CERT"
*/
public static final String CLIENT_CERT_AUTH = "CLIENT_CERT";
/**
* String identifier for Digest authentication. Value "DIGEST"
*/
public static final String DIGEST_AUTH = "DIGEST";
/**
* Returns the name of the authentication scheme used to protect
* the servlet. All servlet containers support basic, form and client
* certificate authentication, and may additionally support digest
* authentication.
* If the servlet is not authenticated <code>null</code> is returned.
*
* <p>Same as the value of the CGI variable AUTH_TYPE.
*
*
* @return one of the static members BASIC_AUTH,
* FORM_AUTH, CLIENT_CERT_AUTH, DIGEST_AUTH
* (suitable for == comparison) or
* the container-specific string indicating
* the authentication scheme, or
* <code>null</code> if the request was
* not authenticated.
*
*/
public String getAuthType();
/**
*
* Returns an array containing all of the <code>Cookie</code>
* objects the client sent with this request.
* This method returns <code>null</code> if no cookies were sent.
*
* @return an array of all the <code>Cookies</code>
* included with this request, or <code>null</code>
* if the request has no cookies
*
*
*/
public Cookie[] getCookies();
/**
*
* Returns the value of the specified request header
* as a <code>long</code> value that represents a
* <code>Date</code> object. Use this method with
* headers that contain dates, such as
* <code>If-Modified-Since</code>.
*
* <p>The date is returned as
* the number of milliseconds since January 1, 1970 GMT.
* The header name is case insensitive.
*
* <p>If the request did not have a header of the
* specified name, this method returns -1. If the header
* can't be converted to a date, the method throws
* an <code>IllegalArgumentException</code>.
*
* @param name a <code>String</code> specifying the
* name of the header
*
* @return a <code>long</code> value
* representing the date specified
* in the header expressed as
* the number of milliseconds
* since January 1, 1970 GMT,
* or -1 if the named header
* was not included with the
* request
*
* @exception IllegalArgumentException If the header value
* can't be converted
* to a date
*
*/
public long getDateHeader(String name);
/**
*
* Returns the value of the specified request header
* as a <code>String</code>. If the request did not include a header
* of the specified name, this method returns <code>null</code>.
* If there are multiple headers with the same name, this method
* returns the first head in the request.
* The header name is case insensitive. You can use
* this method with any request header.
*
* @param name a <code>String</code> specifying the
* header name
*
* @return a <code>String</code> containing the
* value of the requested
* header, or <code>null</code>
* if the request does not
* have a header of that name
*
*/
public String getHeader(String name);
/**
*
* Returns all the values of the specified request header
* as an <code>Enumeration</code> of <code>String</code> objects.
*
* <p>Some headers, such as <code>Accept-Language</code> can be sent
* by clients as several headers each with a different value rather than
* sending the header as a comma separated list.
*
* <p>If the request did not include any headers
* of the specified name, this method returns an empty
* <code>Enumeration</code>.
* The header name is case insensitive. You can use
* this method with any request header.
*
* @param name a <code>String</code> specifying the
* header name
*
* @return an <code>Enumeration</code> containing
* the values of the requested header. If
* the request does not have any headers of
* that name return an empty
* enumeration. If
* the container does not allow access to
* header information, return null
*
*/
public Enumeration getHeaders(String name);
/**
*
* Returns an enumeration of all the header names
* this request contains. If the request has no
* headers, this method returns an empty enumeration.
*
* <p>Some servlet containers do not allow
* servlets to access headers using this method, in
* which case this method returns <code>null</code>
*
* @return an enumeration of all the
* header names sent with this
* request; if the request has
* no headers, an empty enumeration;
* if the servlet container does not
* allow servlets to use this method,
* <code>null</code>
*
*
*/
public Enumeration getHeaderNames();
/**
*
* Returns the value of the specified request header
* as an <code>int</code>. If the request does not have a header
* of the specified name, this method returns -1. If the
* header cannot be converted to an integer, this method
* throws a <code>NumberFormatException</code>.
*
* <p>The header name is case insensitive.
*
* @param name a <code>String</code> specifying the name
* of a request header
*
* @return an integer expressing the value
* of the request header or -1
* if the request doesn't have a
* header of this name
*
* @exception NumberFormatException If the header value
* can't be converted
* to an <code>int</code>
*/
public int getIntHeader(String name);
/**
*
* Returns the name of the HTTP method with which this
* request was made, for example, GET, POST, or PUT.
* Same as the value of the CGI variable REQUEST_METHOD.
*
* @return a <code>String</code>
* specifying the name
* of the method with which
* this request was made
*
*/
public String getMethod();
/**
*
* Returns any extra path information associated with
* the URL the client sent when it made this request.
* The extra path information follows the servlet path
* but precedes the query string and will start with
* a "/" character.
*
* <p>This method returns <code>null</code> if there
* was no extra path information.
*
* <p>Same as the value of the CGI variable PATH_INFO.
*
*
* @return a <code>String</code>, decoded by the
* web container, specifying
* extra path information that comes
* after the servlet path but before
* the query string in the request URL;
* or <code>null</code> if the URL does not have
* any extra path information
*
*/
public String getPathInfo();
/**
*
* Returns any extra path information after the servlet name
* but before the query string, and translates it to a real
* path. Same as the value of the CGI variable PATH_TRANSLATED.
*
* <p>If the URL does not have any extra path information,
* this method returns <code>null</code> or the servlet container
* cannot translate the virtual path to a real path for any reason
* (such as when the web application is executed from an archive).
*
* The web container does not decode this string.
*
*
* @return a <code>String</code> specifying the
* real path, or <code>null</code> if
* the URL does not have any extra path
* information
*
*
*/
public String getPathTranslated();
/**
*
* Returns the portion of the request URI that indicates the context
* of the request. The context path always comes first in a request
* URI. The path starts with a "/" character but does not end with a "/"
* character. For servlets in the default (root) context, this method
* returns "". The container does not decode this string.
*
*
* @return a <code>String</code> specifying the
* portion of the request URI that indicates the context
* of the request
*
*
*/
public String getContextPath();
/**
*
* Returns the query string that is contained in the request
* URL after the path. This method returns <code>null</code>
* if the URL does not have a query string. Same as the value
* of the CGI variable QUERY_STRING.
*
* @return a <code>String</code> containing the query
* string or <code>null</code> if the URL
* contains no query string. The value is not
* decoded by the container.
*
*/
public String getQueryString();
/**
*
* Returns the login of the user making this request, if the
* user has been authenticated, or <code>null</code> if the user
* has not been authenticated.
* Whether the user name is sent with each subsequent request
* depends on the browser and type of authentication. Same as the
* value of the CGI variable REMOTE_USER.
*
* @return a <code>String</code> specifying the login
* of the user making this request, or <code>null</code>
* if the user login is not known
*
*/
public String getRemoteUser();
/**
*
* Returns a boolean indicating whether the authenticated user is included
* in the specified logical "role". Roles and role membership can be
* defined using deployment descriptors. If the user has not been
* authenticated, the method returns <code>false</code>.
*
* @param role a <code>String</code> specifying the name
* of the role
*
* @return a <code>boolean</code> indicating whether
* the user making this request belongs to a given role;
* <code>false</code> if the user has not been
* authenticated
*
*/
public boolean isUserInRole(String role);
/**
*
* Returns a <code>java.security.Principal</code> object containing
* the name of the current authenticated user. If the user has not been
* authenticated, the method returns <code>null</code>.
*
* @return a <code>java.security.Principal</code> containing
* the name of the user making this request;
* <code>null</code> if the user has not been
* authenticated
*
*/
public java.security.Principal getUserPrincipal();
/**
*
* Returns the session ID specified by the client. This may
* not be the same as the ID of the current valid session
* for this request.
* If the client did not specify a session ID, this method returns
* <code>null</code>.
*
*
* @return a <code>String</code> specifying the session
* ID, or <code>null</code> if the request did
* not specify a session ID
*
* @see #isRequestedSessionIdValid
*
*/
public String getRequestedSessionId();
/**
*
* Returns the part of this request's URL from the protocol
* name up to the query string in the first line of the HTTP request.
* The web container does not decode this String.
* For example:
*
*
* <table summary="Examples of Returned Values">
* <tr align=left><th>First line of HTTP request </th>
* <th> Returned Value</th>
* <tr><td>POST /some/path.html HTTP/1.1<td><td>/some/path.html
* <tr><td>GET http://foo.bar/a.html HTTP/1.0
* <td><td>/a.html
* <tr><td>HEAD /xyz?a=b HTTP/1.1<td><td>/xyz
* </table>
*
* <p>To reconstruct an URL with a scheme and host, use
* {@link HttpUtils#getRequestURL}.
*
* @return a <code>String</code> containing
* the part of the URL from the
* protocol name up to the query string
*
* @see HttpUtils#getRequestURL
*
*/
public String getRequestURI();
/**
*
* Reconstructs the URL the client used to make the request.
* The returned URL contains a protocol, server name, port
* number, and server path, but it does not include query
* string parameters.
*
* <p>Because this method returns a <code>StringBuffer</code>,
* not a string, you can modify the URL easily, for example,
* to append query parameters.
*
* <p>This method is useful for creating redirect messages
* and for reporting errors.
*
* @return a <code>StringBuffer</code> object containing
* the reconstructed URL
*
*/
public StringBuffer getRequestURL();
/**
*
* Returns the part of this request's URL that calls
* the servlet. This path starts with a "/" character
* and includes either the servlet name or a path to
* the servlet, but does not include any extra path
* information or a query string. Same as the value of
* the CGI variable SCRIPT_NAME.
*
* <p>This method will return an empty string ("") if the
* servlet used to process this request was matched using
* the "/*" pattern.
*
* @return a <code>String</code> containing
* the name or path of the servlet being
* called, as specified in the request URL,
* decoded, or an empty string if the servlet
* used to process the request is matched
* using the "/*" pattern.
*
*/
public String getServletPath();
/**
*
* Returns the current <code>HttpSession</code>
* associated with this request or, if there is no
* current session and <code>create</code> is true, returns
* a new session.
*
* <p>If <code>create</code> is <code>false</code>
* and the request has no valid <code>HttpSession</code>,
* this method returns <code>null</code>.
*
* <p>To make sure the session is properly maintained,
* you must call this method before
* the response is committed. If the container is using cookies
* to maintain session integrity and is asked to create a new session
* when the response is committed, an IllegalStateException is thrown.
*
*
*
*
* @param create <code>true</code> to create
* a new session for this request if necessary;
* <code>false</code> to return <code>null</code>
* if there's no current session
*
*
* @return the <code>HttpSession</code> associated
* with this request or <code>null</code> if
* <code>create</code> is <code>false</code>
* and the request has no valid session
*
* @see #getSession()
*
*
*/
public HttpSession getSession(boolean create);
/**
*
* Returns the current session associated with this request,
* or if the request does not have a session, creates one.
*
* @return the <code>HttpSession</code> associated
* with this request
*
* @see #getSession(boolean)
*
*/
public HttpSession getSession();
/**
*
* Checks whether the requested session ID is still valid.
*
* @return <code>true</code> if this
* request has an id for a valid session
* in the current session context;
* <code>false</code> otherwise
*
* @see #getRequestedSessionId
* @see #getSession
* @see HttpSessionContext
*
*/
public boolean isRequestedSessionIdValid();
/**
*
* Checks whether the requested session ID came in as a cookie.
*
* @return <code>true</code> if the session ID
* came in as a
* cookie; otherwise, <code>false</code>
*
*
* @see #getSession
*
*/
public boolean isRequestedSessionIdFromCookie();
/**
*
* Checks whether the requested session ID came in as part of the
* request URL.
*
* @return <code>true</code> if the session ID
* came in as part of a URL; otherwise,
* <code>false</code>
*
*
* @see #getSession
*
*/
public boolean isRequestedSessionIdFromURL();
/**
*
* @deprecated As of Version 2.1 of the Java Servlet
* API, use {@link #isRequestedSessionIdFromURL}
* instead.
*
*/
public boolean isRequestedSessionIdFromUrl();
}
| devjin24/howtomcatworks | bookrefer/jakarta-tomcat-5.0.18-src/jakarta-servletapi-5/jsr154/src/share/javax/servlet/http/HttpServletRequest.java | Java | apache-2.0 | 20,909 |
// Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.internal;
/**
* @deprecated Use {@link org.openqa.selenium.BuildInfo} instead.
*/
@Deprecated
public class BuildInfo extends org.openqa.selenium.BuildInfo {
}
| 5hawnknight/selenium | java/client/src/org/openqa/selenium/internal/BuildInfo.java | Java | apache-2.0 | 995 |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
import google.api_core.operations_v1
from google.cloud.dataproc_v1beta2.proto import workflow_templates_pb2_grpc
class WorkflowTemplateServiceGrpcTransport(object):
"""gRPC transport class providing stubs for
google.cloud.dataproc.v1beta2 WorkflowTemplateService API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
def __init__(
self, channel=None, credentials=None, address="dataproc.googleapis.com:443"
):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
"The `channel` and `credentials` arguments are mutually " "exclusive."
)
# Create the channel.
if channel is None:
channel = self.create_channel(address=address, credentials=credentials)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
"workflow_template_service_stub": workflow_templates_pb2_grpc.WorkflowTemplateServiceStub(
channel
)
}
# Because this API includes a method that returns a
# long-running operation (proto: google.longrunning.Operation),
# instantiate an LRO client.
self._operations_client = google.api_core.operations_v1.OperationsClient(
channel
)
@classmethod
def create_channel(cls, address="dataproc.googleapis.com:443", credentials=None):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address, credentials=credentials, scopes=cls._OAUTH_SCOPES
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def create_workflow_template(self):
"""Return the gRPC stub for {$apiMethod.name}.
Creates new workflow template.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["workflow_template_service_stub"].CreateWorkflowTemplate
@property
def get_workflow_template(self):
"""Return the gRPC stub for {$apiMethod.name}.
Retrieves the latest workflow template.
Can retrieve previously instantiated template by specifying optional
version parameter.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["workflow_template_service_stub"].GetWorkflowTemplate
@property
def instantiate_workflow_template(self):
"""Return the gRPC stub for {$apiMethod.name}.
Instantiates a template and begins execution.
The returned Operation can be used to track execution of workflow by
polling ``operations.get``. The Operation will complete when entire
workflow is finished.
The running workflow can be aborted via ``operations.cancel``. This will
cause any inflight jobs to be cancelled and workflow-owned clusters to
be deleted.
The ``Operation.metadata`` will be ``WorkflowMetadata``.
On successful completion, ``Operation.response`` will be ``Empty``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["workflow_template_service_stub"].InstantiateWorkflowTemplate
@property
def instantiate_inline_workflow_template(self):
"""Return the gRPC stub for {$apiMethod.name}.
Instantiates a template and begins execution.
This method is equivalent to executing the sequence
``CreateWorkflowTemplate``, ``InstantiateWorkflowTemplate``,
``DeleteWorkflowTemplate``.
The returned Operation can be used to track execution of workflow by
polling ``operations.get``. The Operation will complete when entire
workflow is finished.
The running workflow can be aborted via ``operations.cancel``. This will
cause any inflight jobs to be cancelled and workflow-owned clusters to
be deleted.
The ``Operation.metadata`` will be ``WorkflowMetadata``.
On successful completion, ``Operation.response`` will be ``Empty``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs[
"workflow_template_service_stub"
].InstantiateInlineWorkflowTemplate
@property
def update_workflow_template(self):
"""Return the gRPC stub for {$apiMethod.name}.
Updates (replaces) workflow template. The updated template
must contain version that matches the current server version.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["workflow_template_service_stub"].UpdateWorkflowTemplate
@property
def list_workflow_templates(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists workflows that match the specified filter in the request.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["workflow_template_service_stub"].ListWorkflowTemplates
@property
def delete_workflow_template(self):
"""Return the gRPC stub for {$apiMethod.name}.
Deletes a workflow template. It does not cancel in-progress workflows.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["workflow_template_service_stub"].DeleteWorkflowTemplate
| dhermes/google-cloud-python | dataproc/google/cloud/dataproc_v1beta2/gapic/transports/workflow_template_service_grpc_transport.py | Python | apache-2.0 | 8,646 |
#include "OccupancyGrid/occgrid.hpp"
#include "OccupancyGrid/cvmat_serialization.h"
#include <opencv2/opencv.hpp>
#include <boost/format.hpp>
/// Override class to override is_occupied function so that it can copy the
/// ground truth map everytime a laser crosses a cell.
template<typename real_t, typename int_t>
class OccupancyGrid2DInverseSensor : public OccupancyGrid2D<real_t, int_t> {
public:
using OccupancyGrid2D<real_t, int_t>::og_;
using OccupancyGrid2D<real_t, int_t>::cell_size_;
using OccupancyGrid2D<real_t, int_t>::min_pt_;
using OccupancyGrid2D<real_t, int_t>::FREE;
int_t observed_manh_range_;
cv::Vec<int_t, 2> robot_position_;
cv::Mat_<real_t> log_odds_map_;
const real_t LOG_ODDS_OCCUPIED;
const real_t LOG_ODDS_FREE;
OccupancyGrid2DInverseSensor(real_t min_x, real_t min_y, real_t cell_size_x, real_t
cell_size_y, int_t ncells_x, int_t ncells_y) :
OccupancyGrid2D<real_t, int_t>(min_x, min_y, cell_size_x, cell_size_y,
ncells_x, ncells_y),
observed_manh_range_(),
robot_position_(),
log_odds_map_(ncells_x, ncells_y, 0.0L),
LOG_ODDS_OCCUPIED(1.3863),
LOG_ODDS_FREE(-1.3863)
{
};
void set_up_ray_trace(
real_t px,
real_t py,
real_t ptheta,
real_t observed_range) {
robot_position_(0) =
(int)floor((px - min_pt_(0)) / cell_size_(0));
robot_position_(1) =
(int)floor((py - min_pt_(1)) / cell_size_(1));
real_t dx_abs = fabs(cos(ptheta));
real_t dy_abs = fabs(sin(ptheta));
real_t dmag = sqrt(dx_abs * dx_abs + dy_abs * dy_abs);
observed_manh_range_ =
floor(observed_range * dx_abs / dmag / cell_size_(0)) +
floor(observed_range * dy_abs / dmag / cell_size_(1));
//printf("-----------------\n");
}
inline int_t manh_distance(int_t i, int_t j) {
return std::abs(i - robot_position_(0)) + std::abs(j - robot_position_(1));
}
virtual bool is_occupied(int_t i, int_t j) {
uint8_t val = og_.ptr(i)[j];
bool retval = (val != FREE);
int_t d = manh_distance(i, j);
// update step
// printf("%d < %d\n", d, observed_manh_range_);
log_odds_map_(i, j) +=
(d < observed_manh_range_) ? LOG_ODDS_FREE
: (d == observed_manh_range_) ? LOG_ODDS_OCCUPIED
: 0; // unknown
return retval;
}
inline void show(int r) {
cv::Mat vis;
cv::exp(log_odds_map_, vis);
vis = 1 / (1 + vis);
vis *= 255;
vis.convertTo(vis, CV_8U);
cv::imshow("c", vis);
//cv::imwrite((boost::format("out-%d.png") % r).str(), vis);
cv::waitKey(1);
cv::imwrite("/tmp/two_assumption_algo.png", vis);
}
};
int main(int argc, char** argv) {
if (argc != 4) {
std::cout << "Sample Usage:" << std::endl;
std::cout << "bin/two_assumption_alg Data/player_sim/laser_pose_all.bin Data/player_sim/laser_range_all.bin Data/player_sim/scan_angles_all.bin" << std::endl;
exit(1);
}
cv::Mat laser_pose;
loadMat(laser_pose, argv[1]);
cv::Mat laser_ranges;
loadMat(laser_ranges, argv[2]);
cv::Mat scan_angles;
loadMat(scan_angles, argv[3]);
cv::Vec2d min_pt(-9, -9);
cv::Vec2d range = -2 * min_pt;
cv::Vec2i gridsize(100, 100);
cv::Vec2d cellsize;
cv::divide(range, gridsize, cellsize);
//std::cout << cellsize(0) << cellsize(1) << std::endl;
cv::Vec2i ncells;
cv::divide(min_pt, cellsize, ncells, -2);
//std::cout << ncells(0) << ncells(1) << std::endl;
OccupancyGrid2DInverseSensor<double, int> map(
min_pt(0),
min_pt(1),
cellsize(0),
cellsize(1),
ncells(0),
ncells(1));
double MAX_RANGE = 8;
int r;
for (r = 0; r < laser_pose.rows; r++) {
double* pose = laser_pose.ptr<double>(r);
double* ranges = laser_ranges.ptr<double>(r);
double* angles = scan_angles.ptr<double>(r);
double robot_angle = pose[2];
for (int c = 0; c < scan_angles.cols; c++) {
double total_angle = robot_angle + angles[c];
cv::Vec2d final_pos;
map.set_up_ray_trace(pose[0], pose[1], total_angle, ranges[c]);
bool reflectance;
map.ray_trace(pose[0], pose[1], total_angle, MAX_RANGE, final_pos, reflectance);
}
}
map.show(r);
}
| wecacuee/modern-occupancy-grid | src/two_assumption_alg.cpp | C++ | apache-2.0 | 4,475 |
/**
* Copyright 2010-2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ibatis.migration.commands;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintStream;
import java.util.Properties;
public final class InfoCommand implements Command {
private final PrintStream out;
public InfoCommand(PrintStream out) {
this.out = out;
}
public void execute(String... params) {
Properties properties = new Properties();
InputStream input = getClass().getClassLoader().getResourceAsStream(
"META-INF/maven/org.mybatis/mybatis-migrations/pom.properties");
if (input != null) {
try {
properties.load(input);
} catch (IOException e) {
// ignore, just don't load the properties
} finally {
try {
input.close();
} catch (IOException e) {
// close quietly
}
}
}
out.printf("%s %s (%s)%n",
properties.getProperty("name"),
properties.getProperty("version"),
properties.getProperty("build"));
out.printf("Java version: %s, vendor: %s%n",
System.getProperty("java.version"),
System.getProperty("java.vendor"));
out.printf("Java home: %s%n", System.getProperty("java.home"));
out.printf("Default locale: %s_%s, platform encoding: %s%n",
System.getProperty("user.language"),
System.getProperty("user.country"),
System.getProperty("sun.jnu.encoding"));
out.printf("OS name: \"%s\", version: \"%s\", arch: \"%s\", family: \"%s\"%n",
System.getProperty("os.name"),
System.getProperty("os.version"),
System.getProperty("os.arch"),
getOsFamily());
}
private static final String getOsFamily() {
String osName = System.getProperty("os.name").toLowerCase();
String pathSep = System.getProperty("path.separator");
if (osName.indexOf("windows") != -1) {
return "windows";
} else if (osName.indexOf("os/2") != -1) {
return "os/2";
} else if (osName.indexOf("z/os") != -1 || osName.indexOf("os/390") != -1) {
return "z/os";
} else if (osName.indexOf("os/400") != -1) {
return "os/400";
} else if (pathSep.equals(";")) {
return "dos";
} else if (osName.indexOf("mac") != -1) {
if (osName.endsWith("x")) {
return "mac"; // MACOSX
}
return "unix";
} else if (osName.indexOf("nonstop_kernel") != -1) {
return "tandem";
} else if (osName.indexOf("openvms") != -1) {
return "openvms";
} else if (pathSep.equals(":")) {
return "unix";
}
return "undefined";
}
}
| bradsokol/migrations | src/main/java/org/apache/ibatis/migration/commands/InfoCommand.java | Java | apache-2.0 | 3,217 |
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.resource;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.cloud.agent.api.StartupCommand;
import com.cloud.agent.api.StartupRoutingCommand;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.HostPodVO;
import com.cloud.dc.PodCluster;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.host.Host;
import com.cloud.host.Host.Type;
import com.cloud.host.HostStats;
import com.cloud.host.HostVO;
import com.cloud.host.Status;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.resource.ResourceState.Event;
import com.cloud.service.ServiceOfferingVO;
import com.cloud.template.VirtualMachineTemplate;
import com.cloud.utils.Pair;
import com.cloud.utils.fsm.NoTransitionException;
/**
* ResourceManager manages how physical resources are organized within the
* CloudStack. It also manages the life cycle of the physical resources.
*/
public interface ResourceManager extends ResourceService{
/**
* Register a listener for different types of resource life cycle events.
* There can only be one type of listener per type of host.
*
* @param Event type see ResourceListener.java, allow combination of multiple events.
* @param listener the listener to notify.
*/
public void registerResourceEvent(Integer event, ResourceListener listener);
public void unregisterResourceEvent(ResourceListener listener);
/**
*
* @param name of adapter
* @param adapter
* @param hates, a list of names which will be eliminated by this adapter. Especially for the case where
* can be only one adapter responds to an event, e.g. startupCommand
*/
public void registerResourceStateAdapter(String name, ResourceStateAdapter adapter);
public void unregisterResourceStateAdapter(String name);
public Host createHostAndAgent(Long hostId, ServerResource resource, Map<String, String> details, boolean old, List<String> hostTags,
boolean forRebalance);
public Host addHost(long zoneId, ServerResource resource, Type hostType, Map<String, String> hostDetails);
public HostVO createHostVOForConnectedAgent(StartupCommand[] cmds);
public void checkCIDR(HostPodVO pod, DataCenterVO dc, String serverPrivateIP, String serverPrivateNetmask);
public HostVO fillRoutingHostVO(HostVO host, StartupRoutingCommand ssCmd, HypervisorType hyType, Map<String, String> details, List<String> hostTags);
public void deleteRoutingHost(HostVO host, boolean isForced, boolean forceDestroyStorage) throws UnableDeleteHostException;
public boolean executeUserRequest(long hostId, ResourceState.Event event) throws AgentUnavailableException;
boolean resourceStateTransitTo(Host host, Event event, long msId) throws NoTransitionException;
boolean umanageHost(long hostId);
boolean maintenanceFailed(long hostId);
public boolean maintain(final long hostId) throws AgentUnavailableException;
@Override
public boolean deleteHost(long hostId, boolean isForced, boolean isForceDeleteStorage);
public List<HostVO> findDirectlyConnectedHosts();
public List<HostVO> listAllUpAndEnabledHosts(Host.Type type, Long clusterId, Long podId, long dcId);
public List<HostVO> listAllHostsInCluster(long clusterId);
public List<HostVO> listHostsInClusterByStatus(long clusterId, Status status);
public List<HostVO> listAllUpAndEnabledHostsInOneZoneByType(Host.Type type, long dcId);
public List<HostVO> listAllHostsInOneZoneByType(Host.Type type, long dcId);
public List<HostVO> listAllHostsInAllZonesByType(Type type);
public List<HypervisorType> listAvailHypervisorInZone(Long hostId, Long zoneId);
public HostVO findHostByGuid(String guid);
public HostVO findHostByName(String name);
public List<HostVO> listHostsByNameLike(String name);
/**
* Find a pod based on the user id, template, and data center.
*
* @param template
* @param dc
* @param userId
* @return
*/
Pair<HostPodVO, Long> findPod(VirtualMachineTemplate template, ServiceOfferingVO offering, DataCenterVO dc, long accountId, Set<Long> avoids);
HostStats getHostStatistics(long hostId);
Long getGuestOSCategoryId(long hostId);
String getHostTags(long hostId);
List<PodCluster> listByDataCenter(long dcId);
List<HostVO> listAllNotInMaintenanceHostsInOneZone(Type type, Long dcId);
HypervisorType getDefaultHypervisor(long zoneId);
HypervisorType getAvailableHypervisor(long zoneId);
Discoverer getMatchingDiscover(HypervisorType hypervisorType);
List<HostVO> findHostByGuid(long dcId, String guid);
/**
* @param type
* @param clusterId
* @param podId
* @param dcId
* @return
*/
List<HostVO> listAllUpAndEnabledNonHAHosts(Type type, Long clusterId, Long podId, long dcId);
}
| argv0/cloudstack | server/src/com/cloud/resource/ResourceManager.java | Java | apache-2.0 | 5,753 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/securityhub/model/AwsEcsTaskDefinitionInferenceAcceleratorsDetails.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
namespace Aws
{
namespace SecurityHub
{
namespace Model
{
AwsEcsTaskDefinitionInferenceAcceleratorsDetails::AwsEcsTaskDefinitionInferenceAcceleratorsDetails() :
m_deviceNameHasBeenSet(false),
m_deviceTypeHasBeenSet(false)
{
}
AwsEcsTaskDefinitionInferenceAcceleratorsDetails::AwsEcsTaskDefinitionInferenceAcceleratorsDetails(JsonView jsonValue) :
m_deviceNameHasBeenSet(false),
m_deviceTypeHasBeenSet(false)
{
*this = jsonValue;
}
AwsEcsTaskDefinitionInferenceAcceleratorsDetails& AwsEcsTaskDefinitionInferenceAcceleratorsDetails::operator =(JsonView jsonValue)
{
if(jsonValue.ValueExists("DeviceName"))
{
m_deviceName = jsonValue.GetString("DeviceName");
m_deviceNameHasBeenSet = true;
}
if(jsonValue.ValueExists("DeviceType"))
{
m_deviceType = jsonValue.GetString("DeviceType");
m_deviceTypeHasBeenSet = true;
}
return *this;
}
JsonValue AwsEcsTaskDefinitionInferenceAcceleratorsDetails::Jsonize() const
{
JsonValue payload;
if(m_deviceNameHasBeenSet)
{
payload.WithString("DeviceName", m_deviceName);
}
if(m_deviceTypeHasBeenSet)
{
payload.WithString("DeviceType", m_deviceType);
}
return payload;
}
} // namespace Model
} // namespace SecurityHub
} // namespace Aws
| awslabs/aws-sdk-cpp | aws-cpp-sdk-securityhub/source/model/AwsEcsTaskDefinitionInferenceAcceleratorsDetails.cpp | C++ | apache-2.0 | 1,594 |
package mil.nga.giat.geowave.analytics.kmeans.mapreduce;
import java.io.IOException;
import java.util.List;
import mil.nga.giat.geowave.accumulo.mapreduce.GeoWaveWritableInputMapper;
import mil.nga.giat.geowave.accumulo.mapreduce.input.GeoWaveInputKey;
import mil.nga.giat.geowave.analytics.clustering.CentroidManagerGeoWave;
import mil.nga.giat.geowave.analytics.clustering.CentroidPairing;
import mil.nga.giat.geowave.analytics.clustering.NestedGroupCentroidAssignment;
import mil.nga.giat.geowave.analytics.extract.CentroidExtractor;
import mil.nga.giat.geowave.analytics.extract.SimpleFeatureCentroidExtractor;
import mil.nga.giat.geowave.analytics.kmeans.AssociationNotification;
import mil.nga.giat.geowave.analytics.parameters.CentroidParameters;
import mil.nga.giat.geowave.analytics.parameters.JumpParameters;
import mil.nga.giat.geowave.analytics.tools.AnalyticItemWrapper;
import mil.nga.giat.geowave.analytics.tools.AnalyticItemWrapperFactory;
import mil.nga.giat.geowave.analytics.tools.ConfigurationWrapper;
import mil.nga.giat.geowave.analytics.tools.SimpleFeatureItemWrapperFactory;
import mil.nga.giat.geowave.analytics.tools.mapreduce.CountofDoubleWritable;
import mil.nga.giat.geowave.analytics.tools.mapreduce.JobContextConfigurationWrapper;
import mil.nga.giat.geowave.index.StringUtils;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.hadoop.io.ObjectWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.vividsolutions.jts.geom.Point;
/**
* Calculate the distortation.
* <p/>
* See Catherine A. Sugar and Gareth M. James (2003).
* "Finding the number of clusters in a data set: An information theoretic approach"
* Journal of the American Statistical Association 98 (January): 750–763
*
* @formatter:off Context configuration parameters include:
* <p/>
* "KMeansDistortionMapReduce.Common.DistanceFunctionClass" ->
* {@link mil.nga.giat.geowave.analytics.distance.DistanceFn}
* used to determine distance to centroid
* <p/>
* "KMeansDistortionMapReduce.Centroid.WrapperFactoryClass" ->
* {@link AnalyticItemWrapperFactory} to extract wrap spatial
* objects with Centroid management functions
* <p/>
* "KMeansDistortionMapReduce.Centroid.ExtractorClass" ->
* {@link mil.nga.giat.geowave.analytics.extract.CentroidExtractor}
* <p/>
* "KMeansDistortionMapReduce.Jump.CountOfCentroids" -> May be
* different from actual.
* @formatter:on
* @see CentroidManagerGeoWave
*/
public class KMeansDistortionMapReduce
{
protected static final Logger LOGGER = LoggerFactory.getLogger(KMeansDistortionMapReduce.class);
public static class KMeansDistortionMapper extends
GeoWaveWritableInputMapper<Text, CountofDoubleWritable>
{
private NestedGroupCentroidAssignment<Object> nestedGroupCentroidAssigner;
private final Text outputKeyWritable = new Text(
"1");
private final CountofDoubleWritable outputValWritable = new CountofDoubleWritable();
private CentroidExtractor<Object> centroidExtractor;
private AnalyticItemWrapperFactory<Object> itemWrapperFactory;
AssociationNotification<Object> centroidAssociationFn = new AssociationNotification<Object>() {
@Override
public void notify(
final CentroidPairing<Object> pairing ) {
outputKeyWritable.set(pairing.getCentroid().getGroupID());
final double extraFromItem[] = pairing.getPairedItem().getDimensionValues();
final double extraCentroid[] = pairing.getCentroid().getDimensionValues();
final Point p = centroidExtractor.getCentroid(pairing.getPairedItem().getWrappedItem());
final Point centroid = centroidExtractor.getCentroid(pairing.getCentroid().getWrappedItem());
// calculate error for dp
// using identity matrix for the common covariance, therefore
// E[(p - c)^-1 * cov * (p - c)] => (px - cx)^2 + (py - cy)^2
double expectation = 0.0;
for (int i = 0; i < extraCentroid.length; i++) {
expectation += Math.pow(
extraFromItem[i] - extraCentroid[i],
2);
}
expectation += (Math.pow(
p.getCoordinate().x - centroid.getCoordinate().x,
2) + Math.pow(
p.getCoordinate().y - centroid.getCoordinate().y,
2));
// + Math.pow(
// p.getCoordinate().z - centroid.getCoordinate().z,
// 2));
outputValWritable.set(
expectation,
1);
}
};
@Override
protected void mapNativeValue(
final GeoWaveInputKey key,
final Object value,
final org.apache.hadoop.mapreduce.Mapper<GeoWaveInputKey, ObjectWritable, Text, CountofDoubleWritable>.Context context )
throws IOException,
InterruptedException {
nestedGroupCentroidAssigner.findCentroidForLevel(
itemWrapperFactory.create(value),
centroidAssociationFn);
context.write(
outputKeyWritable,
outputValWritable);
}
@SuppressWarnings("unchecked")
@Override
protected void setup(
final Mapper<GeoWaveInputKey, ObjectWritable, Text, CountofDoubleWritable>.Context context )
throws IOException,
InterruptedException {
super.setup(context);
final ConfigurationWrapper config = new JobContextConfigurationWrapper(
context,
KMeansDistortionMapReduce.LOGGER);
try {
nestedGroupCentroidAssigner = new NestedGroupCentroidAssignment<Object>(
config);
}
catch (final Exception e1) {
throw new IOException(
e1);
}
try {
centroidExtractor = config.getInstance(
CentroidParameters.Centroid.EXTRACTOR_CLASS,
KMeansDistortionMapReduce.class,
CentroidExtractor.class,
SimpleFeatureCentroidExtractor.class);
}
catch (final Exception e1) {
throw new IOException(
e1);
}
try {
itemWrapperFactory = config.getInstance(
CentroidParameters.Centroid.WRAPPER_FACTORY_CLASS,
KMeansDistortionMapReduce.class,
AnalyticItemWrapperFactory.class,
SimpleFeatureItemWrapperFactory.class);
}
catch (final Exception e1) {
throw new IOException(
e1);
}
}
}
public static class KMeansDistorationCombiner extends
Reducer<Text, CountofDoubleWritable, Text, CountofDoubleWritable>
{
final CountofDoubleWritable outputValue = new CountofDoubleWritable();
@Override
public void reduce(
final Text key,
final Iterable<CountofDoubleWritable> values,
final Reducer<Text, CountofDoubleWritable, Text, CountofDoubleWritable>.Context context )
throws IOException,
InterruptedException {
double expectation = 0;
double ptCount = 0;
for (final CountofDoubleWritable value : values) {
expectation += value.getValue();
ptCount += value.getCount();
}
outputValue.set(
expectation,
ptCount);
context.write(
key,
outputValue);
}
}
public static class KMeansDistortionReduce extends
Reducer<Text, CountofDoubleWritable, Text, Mutation>
{
private String expectedK = null;
final protected Text output = new Text(
"");
private CentroidManagerGeoWave<Object> centroidManager;
@Override
public void reduce(
final Text key,
final Iterable<CountofDoubleWritable> values,
final Reducer<Text, CountofDoubleWritable, Text, Mutation>.Context context )
throws IOException,
InterruptedException {
double expectation = 0.0;
final List<AnalyticItemWrapper<Object>> centroids = centroidManager.getCentroidsForGroup(key.toString());
// it is possible that the number of items in a group are smaller
// than the cluster
final String kCount = expectedK == null ? Integer.toString(centroids.size()) : expectedK;
if (centroids.size() == 0) {
return;
}
final double numDimesions = 2 + centroids.get(
0).getExtraDimensions().length;
double ptCount = 0;
for (final CountofDoubleWritable value : values) {
expectation += value.getValue();
ptCount += value.getCount();
}
if (ptCount > 0) {
expectation /= ptCount;
final Double distortion = Math.pow(
expectation / numDimesions,
-(numDimesions / 2));
// key: group ID | "DISTORTION" | K
// value: distortion value
final Mutation m = new Mutation(
key.toString());
m.put(
new Text(
"dt"),
new Text(
kCount),
new Value(
distortion.toString().getBytes(
StringUtils.UTF8_CHAR_SET)));
// write distortion to accumulo, defaults to table given to
// AccumuloOutputFormat, in driver
context.write(
output, // default table
m);
}
}
@Override
protected void setup(
final Reducer<Text, CountofDoubleWritable, Text, Mutation>.Context context )
throws IOException,
InterruptedException {
super.setup(context);
final ConfigurationWrapper config = new JobContextConfigurationWrapper(
context,
KMeansDistortionMapReduce.LOGGER);
final int k = config.getInt(
JumpParameters.Jump.COUNT_OF_CENTROIDS,
KMeansDistortionMapReduce.class,
-1);
if (k > 0) {
expectedK = Integer.toString(k);
}
try {
centroidManager = new CentroidManagerGeoWave<Object>(
config);
}
catch (final Exception e) {
KMeansDistortionMapReduce.LOGGER.warn(
"Unable to initialize centroid manager",
e);
throw new IOException(
"Unable to initialize centroid manager");
}
}
}
}
| viggyprabhu/geowave | geowave-analytics/src/main/java/mil/nga/giat/geowave/analytics/kmeans/mapreduce/KMeansDistortionMapReduce.java | Java | apache-2.0 | 9,638 |
/*
* Copyright (c) 2015-2016 Tapglue (https://www.tapglue.com/). All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.tapglue.managers;
import android.content.Context;
import android.content.SharedPreferences;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
import com.tapglue.Tapglue;
import com.tapglue.model.TGEventsList;
import com.tapglue.model.TGFeed;
import com.tapglue.model.TGFeedCount;
import com.tapglue.model.TGPostsList;
import com.tapglue.model.queries.TGQuery;
import com.tapglue.networking.requests.TGRequestCallback;
import com.tapglue.networking.requests.TGRequestErrorType;
public class TGFeedManagerImpl extends AbstractTGManager implements TGFeedManager {
private static final String CACHE_KEY = "FEED_CACHE";
public TGFeedManagerImpl(Tapglue instance) {
super(instance);
}
/**
* Get feed from cache
*
* @param callback
*/
@Override
public void cachedFeedForCurrentUser(@NonNull final TGRequestCallback<TGFeed> callback) {
if (instance.getUserManager().getCurrentUser() == null) {
callback.onRequestError(new TGRequestErrorType(TGRequestErrorType.ErrorType.USER_NOT_LOGGED_IN));
return;
}
getCachedFeedIfAvailable(callback);
}
/**
* Return cached feed
*
* @param callback
*/
@Override
public void getCachedFeedIfAvailable(@NonNull final TGRequestCallback<TGFeed> callback) {
SharedPreferences cache = instance.getContext().getSharedPreferences(TGFeedManagerImpl.class.toString(), Context.MODE_PRIVATE);
if (!cache.contains(CACHE_KEY)) {
callback.onRequestError(new TGRequestErrorType(TGRequestErrorType.ErrorType.NO_CACHE_OBJECT));
return;
}
TGFeed feed = new Gson().fromJson(cache.getString(CACHE_KEY, null), new TypeToken<TGFeed>() {}.getType());
callback.onRequestFinished(feed, false);
}
@Override
public void retrieveEventsFeedForCurrentUser(@NonNull final TGRequestCallback<TGEventsList> callback) {
retrieveEventsFeedForCurrentUser(null, callback);
}
@Override
public void retrieveEventsFeedForCurrentUser(@Nullable TGQuery whereParameters, @NonNull final TGRequestCallback<TGEventsList> callback) {
if (instance.getUserManager().getCurrentUser() == null) {
callback.onRequestError(new TGRequestErrorType(TGRequestErrorType.ErrorType.USER_NOT_LOGGED_IN));
return;
}
instance.createRequest().getEvents(whereParameters, callback);
}
@Override
public void retrieveEventsForCurrentUser(@NonNull final TGRequestCallback<TGEventsList> callback) {
retrieveEventsForCurrentUser(null, callback);
}
@Override
public void retrieveEventsForCurrentUser(@Nullable TGQuery whereParameters, @NonNull final TGRequestCallback<TGEventsList> callback) {
if (instance.getUserManager().getCurrentUser() == null) {
callback.onRequestError(new TGRequestErrorType(TGRequestErrorType.ErrorType.USER_NOT_LOGGED_IN));
return;
}
instance.createRequest().getEvents(whereParameters, callback);
}
@Override
public void retrieveEventsForUser(@NonNull Long userId, @NonNull final TGRequestCallback<TGEventsList> callback) {
retrieveEventsForUser(userId, null, callback);
}
@Override
public void retrieveEventsForUser(@NonNull Long userId, TGQuery whereParameters, @NonNull final TGRequestCallback<TGEventsList> callback) {
if (userId == null) {
callback.onRequestError(new TGRequestErrorType(TGRequestErrorType.ErrorType.NULL_INPUT));
return;
}
else if (instance.getUserManager().getCurrentUser() == null) {
callback.onRequestError(new TGRequestErrorType(TGRequestErrorType.ErrorType.USER_NOT_LOGGED_IN));
return;
}
instance.createRequest().getEvents(userId, whereParameters, callback);
}
@Override
public void retrieveNewsFeedForCurrentUser(@NonNull final TGRequestCallback<TGFeed> callback) {
retrieveNewsFeedForCurrentUser(null, callback);
}
@Override
public void retrieveNewsFeedForCurrentUser(@Nullable TGQuery whereParameters, @NonNull final TGRequestCallback<TGFeed> callback) {
instance.createRequest().getFeed(whereParameters, new TGRequestCallback<TGFeed>() {
@Override
public boolean callbackIsEnabled() {
return callback.callbackIsEnabled();
}
@Override
public void onRequestError(TGRequestErrorType cause) {
callback.onRequestError(cause);
}
@Override
public void onRequestFinished(TGFeed output, boolean changeDoneOnline) {
saveFeedToCache(output);
callback.onRequestFinished(output, changeDoneOnline);
}
});
}
@Override
public void retrievePostsFeedForCurrentUser(@NonNull final TGRequestCallback<TGPostsList> callback) {
if (instance.getUserManager().getCurrentUser() == null) {
callback.onRequestError(new TGRequestErrorType(TGRequestErrorType.ErrorType.USER_NOT_LOGGED_IN));
return;
}
instance.createRequest().getMyPosts(callback);
}
@Override
public void retrievePostsForCurrentUser(@NonNull final TGRequestCallback<TGPostsList> callback) {
if (instance.getUserManager().getCurrentUser() == null) {
callback.onRequestError(new TGRequestErrorType(TGRequestErrorType.ErrorType.USER_NOT_LOGGED_IN));
return;
}
instance.createRequest().getMyPosts(callback);
}
@Override
public void retrievePostsForUser(@NonNull Long userId, @NonNull final TGRequestCallback<TGPostsList> callback) {
if (userId == null) {
callback.onRequestError(new TGRequestErrorType(TGRequestErrorType.ErrorType.NULL_INPUT));
return;
}
else if (instance.getUserManager().getCurrentUser() == null) {
callback.onRequestError(new TGRequestErrorType(TGRequestErrorType.ErrorType.USER_NOT_LOGGED_IN));
return;
}
instance.createRequest().getUserPosts(userId, callback);
}
@Override
public void retrieveUnreadCountForCurrentUser(@NonNull final TGRequestCallback<TGFeedCount> callback) {
if (instance.getUserManager().getCurrentUser() == null) {
callback.onRequestError(new TGRequestErrorType(TGRequestErrorType.ErrorType.USER_NOT_LOGGED_IN));
return;
}
instance.createRequest().getFeedCount(callback);
}
@Override
public void retrieveUnreadFeedForCurrentUser(@NonNull final TGRequestCallback<TGFeed> callback) {
if (instance.getUserManager().getCurrentUser() == null) {
callback.onRequestError(new TGRequestErrorType(TGRequestErrorType.ErrorType.USER_NOT_LOGGED_IN));
return;
}
instance.createRequest().getUnreadFeed(callback);
}
/**
* Save feed to cache
*
* @param output
*/
private void saveFeedToCache(@Nullable TGFeed output) {
SharedPreferences cache = instance.getContext().getSharedPreferences(TGFeedManagerImpl.class.toString(), Context.MODE_PRIVATE);
if (output == null) {
if (cache.contains(CACHE_KEY)) {
cache.edit().remove(CACHE_KEY).apply();
}
}
else {
cache.edit().putString(CACHE_KEY, new Gson().toJson(output, new TypeToken<TGFeed>() {
}.getType())).apply();
}
}
}
| tapglue/android_sdk | v1/tapglue-android-sdk/tapglue-android-sdk/src/main/java/com/tapglue/managers/TGFeedManagerImpl.java | Java | apache-2.0 | 8,313 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines various data containers for plotting a transect.
This file is not used in the current version of `geotransect` but is kept here
in case it's useful later.
:copyright: 2015 Agile Geoscience
:license: Apache 2.0
"""
import sys
import os
import numpy as np
from scipy import fft
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import obspy
from plot_utils import add_subplot_axes
filename = sys.argv[1]
segyfile = os.path.basename(filename)
# Read all traces.
section = obspy.read(filename)
r_elevs = []
s_elevs = []
esp = [] # energy source point number
ens = [] # ensemble number
for t in section.traces:
nsamples = t.stats.segy.trace_header.number_of_samples_in_this_trace
dt = t.stats.segy.trace_header.sample_interval_in_ms_for_this_trace
if dt > 100:
dt /= 1000.
r_elevs.append(t.stats.segy.trace_header.datum_elevation_at_receiver_group)
s_elevs.append(t.stats.segy.trace_header.receiver_group_elevation)
esp.append(t.stats.segy.trace_header.energy_source_point_number)
ens.append(t.stats.segy.trace_header.ensemble_number)
ntraces = len(section.traces)
tbase = np.arange(0, nsamples * dt, dt)
tstart = 0
tend = tbase[-1]
aspect = float(ntraces) / float(nsamples)
nf = 1.0
print 'ntraces', ntraces
print 'nsamples', nsamples
print 'dt', dt/nf
data = np.zeros((nsamples, ntraces))
for i, trace in enumerate(section.traces):
data[:, i] = trace.data
line_extents = {'first_trace': 1,
'last_trace': ntraces,
'start_time': tstart,
'end_time': tend
}
clip_val = np.percentile(data, 99.0)
print "clip_val", clip_val
print "max_val", np.amax(data)
print "min_val", np.amin(data)
print "tstart", tstart
print "tend", tend
largest = max(np.amax(data), abs(np.amin(data)))
# MAIN PLOT
h = (tend-tstart) / 250.0
w = ntraces / 250.0
fig = plt.figure(figsize=(10, 10), facecolor='w')
# Seismic data
ax = fig.add_axes([0.05, 0.05, 0.9, 0.95])
im = ax.imshow(data, cmap=cm.gray, origin='upper',
vmin=-clip_val,
vmax=clip_val,
extent=(line_extents['first_trace'],
line_extents['last_trace'],
line_extents['end_time'],
line_extents['start_time']),
aspect = aspect * 0.5
)
ax.set_ylabel('Two-way time [ms]')
ax.set_xlabel('Trace no.')
ax.grid()
ax.set_title(segyfile)
# Colourbar
extreme = max(np.amax(data), abs(np.amin(data)))
colorbar_ax = add_subplot_axes(ax, [0.075, 0.075, 0.025, 0.15])
fig.colorbar(im, cax=colorbar_ax)
colorbar_ax.text(1.15, 1.1, '%3.0f' % -extreme,
transform=colorbar_ax.transAxes,
ha='left',
va='top')
colorbar_ax.text(1.15, -0.05, '%3.0f' % extreme,
transform=colorbar_ax.transAxes,
ha='left', fontsize=10)
colorbar_ax.set_axis_off()
# Power spectrum
S = abs(fft(data[:, 1]))
faxis = np.fft.fftfreq(len(data[:, 1]), d=(1/nf)*dt*1e-6)
spec_ax = add_subplot_axes(ax, [0.50, 0.075, 0.2, 0.15])
spec_ax.plot(faxis[:len(faxis)//4],
np.log10(S[0:len(faxis)//4]),
'b', lw=2)
spec_ax.set_xlabel('frequency [Hz]', fontsize=10)
spec_ax.set_xticklabels([0, 100, 200, 300], fontsize=10)
# spec_ax.set_xticklabels(spec_ax.get_xticks(), fontsize=10)
spec_ax.set_yticklabels(spec_ax.get_yticks(), fontsize=10)
spec_ax.set_yticks([])
spec_ax.set_yticklabels([])
spec_ax.text(.95, .9, 'Power spectrum',
horizontalalignment='right',
transform=spec_ax.transAxes, fontsize=10
)
spec_ax.grid('on')
# Histogram
hist_ax = add_subplot_axes(ax, [0.75, 0.075, 0.2, 0.15])
hist_line = hist_ax.hist(np.ravel(data),
bins=int(100.0 / (clip_val/largest)))
hist_ax.set_xlim(-clip_val, clip_val)
# hist_ax.set_xticklabels([])
hist_ax.set_yticks([])
hist_ax.set_xticklabels([])
hist_ax.set_ylim(hist_ax.get_ylim()[0], hist_ax.get_ylim()[1]),
hist_ax.set_yticks([])
hist_ax.text(.95, .9, 'Histogram',
horizontalalignment='right',
transform=hist_ax.transAxes, fontsize=10
)
plt.show()
| kinverarity1/geotransect | profile_plot.py | Python | apache-2.0 | 4,222 |
#
# Cookbook Name:: icinga2
# Resource:: idomysqlconnection
#
# Copyright 2014, Virender Khatri
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
actions :create, :delete
default_action :create
attribute :library, :kind_of => String, :default => 'db_ido_mysql'
attribute :host, :kind_of => String, :default => 'localhost'
attribute :port, :kind_of => [String, Integer], :default => '3306'
attribute :user, :kind_of => String, :default => 'icinga'
attribute :password, :kind_of => String, :default => 'icinga'
attribute :database, :kind_of => String, :default => 'icinga'
attribute :table_prefix, :kind_of => String, :default => 'icinga_'
attribute :instance_name, :kind_of => String, :default => 'default'
attribute :instance_description, :kind_of => String, :default => nil
attribute :enable_ha, :kind_of => [TrueClass, FalseClass], :default => nil
attribute :failover_timeout, :kind_of => String, :default => '60s'
attribute :cleanup, :kind_of => Hash, :default => nil
attribute :categories, :kind_of => Array, :default => nil
| vkhatri/chef-icinga2 | resources/idomysqlconnection.rb | Ruby | apache-2.0 | 1,543 |
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* Copyright 2012-2019 the original author or authors.
*/
package org.assertj.core.util;
import java.util.Comparator;
public class CaseInsensitiveStringComparator implements Comparator<String> {
public final static CaseInsensitiveStringComparator instance = new CaseInsensitiveStringComparator();
@Override
public int compare(String s1, String s2) {
if (s1 == null && s2 == null) return 0;
if (s1 == null) return -1;
if (s2 == null) return 1;
return s1.toLowerCase().compareTo(s2.toLowerCase());
}
} | xasx/assertj-core | src/test/java/org/assertj/core/util/CaseInsensitiveStringComparator.java | Java | apache-2.0 | 1,076 |
# Copyright:: Copyright (c) 2012, 2014 Megam Systems
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Megam
class Components < Megam::ServerAPI
def initialize(email=nil, api_key=nil, host=nil)
@id = nil
@name =nil
@tosca_type = nil
@inputs = []
@outputs = []
@artifacts = {}
@artifact_type = nil
@content = nil
@artifact_requirements = []
@related_components = []
@operations = []
@status = nil
@created_at = nil
super(email, api_key, host)
end
def components
self
end
def id(arg=nil)
if arg != nil
@id = arg
else
@id
end
end
def name(arg=nil)
if arg != nil
@name = arg
else
@name
end
end
def tosca_type(arg=nil)
if arg != nil
@tosca_type = arg
else
@tosca_type
end
end
def inputs(arg=[])
if arg != []
@inputs = arg
else
@inputs
end
end
def outputs(arg=[])
if arg != []
@outputs = arg
else
@outputs
end
end
def artifacts(arg=nil)
if arg != nil
@artifacts = arg
else
@artifacts
end
end
def artifact_type(arg=nil)
if arg != nil
@artifact_type = arg
else
@artifact_type
end
end
def content(arg=nil)
if arg != nil
@content = arg
else
@content
end
end
def artifact_requirements(arg=[])
if arg != []
@artifact_requirements = arg
else
@artifact_requirements
end
end
def related_components(arg=[])
if arg != []
@related_components = arg
else
@related_components
end
end
def operations(arg=[])
if arg != []
@operations = arg
else
@operations
end
end
def status(arg=nil)
if arg != nil
@status = arg
else
@status
end
end
def created_at(arg=nil)
if arg != nil
@created_at = arg
else
@created_at
end
end
def error?
crocked = true if (some_msg.has_key?(:msg_type) && some_msg[:msg_type] == "error")
end
# Transform the ruby obj -> to a Hash
def to_hash
index_hash = Hash.new
index_hash["json_claz"] = self.class.name
index_hash["id"] = id
index_hash["name"] = name
index_hash["tosca_type"] = tosca_type
index_hash["inputs"] = inputs
index_hash["outputs"] = outputs
index_hash["artifacts"] = artifacts
index_hash["related_components"] = related_components
index_hash["operations"] = operations
index_hash["status"] = status
index_hash["created_at"] = created_at
index_hash
end
# Serialize this object as a hash: called from JsonCompat.
# Verify if this called from JsonCompat during testing.
def to_json(*a)
for_json.to_json(*a)
end
def for_json
result = {
"id" => id,
"name" => name,
"tosca_type" => tosca_type,
"inputs" => inputs,
"outputs" => outputs,
"artifacts" => artifacts,
"related_components" => related_components,
"operations" => operations,
"status" => status,
"created_at" => created_at
}
result
end
def self.json_create(o)
asm = new
asm.id(o["id"]) if o.has_key?("id")
asm.name(o["name"]) if o.has_key?("name")
asm.tosca_type(o["tosca_type"]) if o.has_key?("tosca_type")
asm.inputs(o["inputs"]) if o.has_key?("inputs")
asm.outputs(o["outputs"]) if o.has_key?("outputs")
ar = o["artifacts"]
asm.artifacts[:artifact_type] = ar["artifact_type"] if ar && ar.has_key?("artifact_type")
asm.artifacts[:content] = ar["content"] if ar && ar.has_key?("content")
asm.artifacts[:artifact_requirements] = ar["artifact_requirements"] if ar && ar.has_key?("artifact_requirements")
asm.related_components(o["related_components"]) if o.has_key?("related_components")
asm.operations(o["operations"]) if o.has_key?("operations")
asm.status(o["status"]) if o.has_key?("status")
asm.created_at(o["created_at"]) if o.has_key?("created_at")
asm
end
def self.from_hash(o,tmp_email=nil, tmp_api_key=nil, tmp_host=nil)
asm = self.new(tmp_email, tmp_api_key, tmp_host)
asm.from_hash(o)
asm
end
def from_hash(o)
@id = o["id"] if o.has_key?("id")
@name = o["name"] if o.has_key?("name")
@tosca_type = o["tosca_type"] if o.has_key?("tosca_type")
@inputs = o["inputs"] if o.has_key?("inputs")
@outputs = o["outputs"] if o.has_key?("outputs")
@artifacts = o["artifacts"] if o.has_key?("artifacts")
@related_components = o["related_components"] if o.has_key?("related_components")
@operations = o["operations"] if o.has_key?("operations")
@status = o["status"] if o.has_key?("status")
@created_at = o["created_at"] if o.has_key?("created_at")
self
end
def self.create(params)
asm = from_hash(params, params["email"], params["api_key"], params["host"])
asm.create
end
# Load a account by email_p
def self.show(params)
asm = self.new(params["email"], params["api_key"], params["host"])
asm.megam_rest.get_components(params["id"])
end
def self.update(params)
asm = from_hash(params, params["email"] || params[:email], params["api_key"] || params[:api_key], params["host"] || params[:host])
asm.update
end
# Create the node via the REST API
def update
megam_rest.update_component(to_hash)
end
def to_s
Megam::Stuff.styled_hash(to_hash)
end
end
end | arunkumarsekar06/megam_api | lib/megam/core/components.rb | Ruby | apache-2.0 | 6,597 |
/*
* Copyright 2015 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.samples.apps.iosched.provider;
import android.app.SearchManager;
import android.content.ContentProvider;
import android.content.ContentProviderOperation;
import android.content.ContentProviderResult;
import android.content.ContentValues;
import android.content.Context;
import android.content.OperationApplicationException;
import android.database.Cursor;
import android.database.MatrixCursor;
import android.database.sqlite.SQLiteConstraintException;
import android.database.sqlite.SQLiteDatabase;
import android.net.Uri;
import android.os.ParcelFileDescriptor;
import android.provider.BaseColumns;
import android.text.TextUtils;
import android.util.Log;
import com.google.samples.apps.iosched.Config;
import com.google.samples.apps.iosched.appwidget.ScheduleWidgetProvider;
import com.google.samples.apps.iosched.provider.ScheduleContract.Announcements;
import com.google.samples.apps.iosched.provider.ScheduleContract.Blocks;
import com.google.samples.apps.iosched.provider.ScheduleContract.Feedback;
import com.google.samples.apps.iosched.provider.ScheduleContract.HashtagColumns;
import com.google.samples.apps.iosched.provider.ScheduleContract.Hashtags;
import com.google.samples.apps.iosched.provider.ScheduleContract.MyFeedbackSubmitted;
import com.google.samples.apps.iosched.provider.ScheduleContract.MyReservationColumns;
import com.google.samples.apps.iosched.provider.ScheduleContract.MyReservations;
import com.google.samples.apps.iosched.provider.ScheduleContract.MySchedule;
import com.google.samples.apps.iosched.provider.ScheduleContract.MyScheduleColumns;
import com.google.samples.apps.iosched.provider.ScheduleContract.MyViewedVideos;
import com.google.samples.apps.iosched.provider.ScheduleContract.Rooms;
import com.google.samples.apps.iosched.provider.ScheduleContract.SearchSuggest;
import com.google.samples.apps.iosched.provider.ScheduleContract.SearchTopicsSessions;
import com.google.samples.apps.iosched.provider.ScheduleContract.Sessions;
import com.google.samples.apps.iosched.provider.ScheduleContract.Speakers;
import com.google.samples.apps.iosched.provider.ScheduleContract.Tags;
import com.google.samples.apps.iosched.provider.ScheduleContract.VideoColumns;
import com.google.samples.apps.iosched.provider.ScheduleContract.Videos;
import com.google.samples.apps.iosched.provider.ScheduleDatabase.SessionsSearchColumns;
import com.google.samples.apps.iosched.provider.ScheduleDatabase.SessionsSpeakers;
import com.google.samples.apps.iosched.provider.ScheduleDatabase.Tables;
import com.google.samples.apps.iosched.settings.SettingsUtils;
import com.google.samples.apps.iosched.util.AccountUtils;
import com.google.samples.apps.iosched.util.SelectionBuilder;
import java.io.FileDescriptor;
import java.io.FileNotFoundException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static com.google.samples.apps.iosched.util.LogUtils.LOGD;
import static com.google.samples.apps.iosched.util.LogUtils.LOGE;
import static com.google.samples.apps.iosched.util.LogUtils.LOGV;
import static com.google.samples.apps.iosched.util.LogUtils.makeLogTag;
/**
* {@link android.content.ContentProvider} that stores {@link ScheduleContract} data. Data is
* usually inserted by {@link com.google.samples.apps.iosched.sync.SyncHelper}, and queried using
* {@link android.support.v4.app.LoaderManager} pattern.
*/
public class ScheduleProvider extends ContentProvider {
private static final String TAG = makeLogTag(ScheduleProvider.class);
private ScheduleDatabase mOpenHelper;
private ScheduleProviderUriMatcher mUriMatcher;
/**
* Providing important state information to be included in bug reports.
*
* !!! Remember !!! Any important data logged to {@code writer} shouldn't contain personally
* identifiable information as it can be seen in bugreports.
*/
@Override
public void dump(FileDescriptor fd, PrintWriter writer, String[] args) {
Context context = getContext();
// Using try/catch block in case there are issues retrieving information to log.
try {
// Calling append in multiple calls is typically better than creating net new strings to
// pass to method invocations.
writer.print("Last sync attempted: ");
writer.println(new java.util.Date(SettingsUtils.getLastSyncAttemptedTime(context)));
writer.print("Last sync successful: ");
writer.println(new java.util.Date(SettingsUtils.getLastSyncSucceededTime(context)));
writer.print("Current sync interval: ");
writer.println(SettingsUtils.getCurSyncInterval(context));
writer.print("Is an account active: ");
writer.println(AccountUtils.hasActiveAccount(context));
boolean canGetAuthToken = !TextUtils.isEmpty(AccountUtils.getAuthToken(context));
writer.print("Can an auth token be retrieved: ");
writer.println(canGetAuthToken);
} catch (Exception exception) {
writer.append("Exception while dumping state: ");
exception.printStackTrace(writer);
}
}
@Override
public boolean onCreate() {
mOpenHelper = new ScheduleDatabase(getContext());
mUriMatcher = new ScheduleProviderUriMatcher();
return true;
}
private void deleteDatabase() {
// TODO: wait for content provider operations to finish, then tear down
mOpenHelper.close();
Context context = getContext();
ScheduleDatabase.deleteDatabase(context);
mOpenHelper = new ScheduleDatabase(getContext());
}
/** {@inheritDoc} */
@Override
public String getType(Uri uri) {
ScheduleUriEnum matchingUriEnum = mUriMatcher.matchUri(uri);
return matchingUriEnum.contentType;
}
/**
* Returns a tuple of question marks. For example, if {@code count} is 3, returns "(?,?,?)".
*/
private String makeQuestionMarkTuple(int count) {
if (count < 1) {
return "()";
}
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("(?");
for (int i = 1; i < count; i++) {
stringBuilder.append(",?");
}
stringBuilder.append(")");
return stringBuilder.toString();
}
/**
* Adds the {@code tagsFilter} query parameter to the given {@code builder}. This query
* parameter is used when the user makes a selection containing multiple filters.
*/
private void addTagsFilter(SelectionBuilder builder, String tagsFilter, String numCategories) {
// Note: for context, remember that session queries are done on a join of sessions
// and the sessions_tags relationship table, and are GROUP'ed BY the session ID.
String[] requiredTags = tagsFilter.split(",");
if (requiredTags.length == 0) {
// filtering by 0 tags -- no-op
return;
} else if (requiredTags.length == 1) {
// filtering by only one tag, so a simple WHERE clause suffices
builder.where(Tags.TAG_ID + "=?", requiredTags[0]);
} else {
// Filtering by multiple tags, so we must add a WHERE clause with an IN operator,
// and add a HAVING statement to exclude groups that fall short of the number
// of required tags. For example, if requiredTags is { "X", "Y", "Z" }, and a certain
// session only has tags "X" and "Y", it will be excluded by the HAVING statement.
int categories = 1;
if (numCategories != null && TextUtils.isDigitsOnly(numCategories)) {
try {
categories = Integer.parseInt(numCategories);
LOGD(TAG, "Categories being used " + categories);
} catch (Exception ex) {
LOGE(TAG, "exception parsing categories ", ex);
}
}
String questionMarkTuple = makeQuestionMarkTuple(requiredTags.length);
builder.where(Tags.TAG_ID + " IN " + questionMarkTuple, requiredTags);
builder.having(
"COUNT(" + Qualified.SESSIONS_SESSION_ID + ") >= " + categories);
}
}
/** {@inheritDoc} */
@Override
public Cursor query(Uri uri, String[] projection, String selection, String[] selectionArgs,
String sortOrder) {
final SQLiteDatabase db = mOpenHelper.getReadableDatabase();
String tagsFilter = uri.getQueryParameter(Sessions.QUERY_PARAMETER_TAG_FILTER);
String categories = uri.getQueryParameter(Sessions.QUERY_PARAMETER_CATEGORIES);
ScheduleUriEnum matchingUriEnum = mUriMatcher.matchUri(uri);
// Avoid the expensive string concatenation below if not loggable.
if (Log.isLoggable(TAG, Log.VERBOSE)) {
LOGV(TAG, "uri=" + uri + " code=" + matchingUriEnum.code + " proj=" +
Arrays.toString(projection) + " selection=" + selection + " args="
+ Arrays.toString(selectionArgs) + ")");
}
switch (matchingUriEnum) {
default: {
// Most cases are handled with simple SelectionBuilder.
final SelectionBuilder builder = buildExpandedSelection(uri, matchingUriEnum.code);
// If a special filter was specified, try to apply it.
if (!TextUtils.isEmpty(tagsFilter) && !TextUtils.isEmpty(categories)) {
addTagsFilter(builder, tagsFilter, categories);
}
boolean distinct = ScheduleContractHelper.isQueryDistinct(uri);
Cursor cursor = builder
.where(selection, selectionArgs)
.query(db, distinct, projection, sortOrder, null);
Context context = getContext();
if (null != context) {
cursor.setNotificationUri(context.getContentResolver(), uri);
}
return cursor;
}
case SEARCH_SUGGEST: {
final SelectionBuilder builder = new SelectionBuilder();
// Adjust incoming query to become SQL text match.
selectionArgs[0] = selectionArgs[0] + "%";
builder.table(Tables.SEARCH_SUGGEST);
builder.where(selection, selectionArgs);
builder.map(SearchManager.SUGGEST_COLUMN_QUERY,
SearchManager.SUGGEST_COLUMN_TEXT_1);
projection = new String[]{
BaseColumns._ID,
SearchManager.SUGGEST_COLUMN_TEXT_1,
SearchManager.SUGGEST_COLUMN_QUERY
};
final String limit = uri.getQueryParameter(SearchManager.SUGGEST_PARAMETER_LIMIT);
return builder.query(db, false, projection, SearchSuggest.DEFAULT_SORT, limit);
}
case SEARCH_TOPICS_SESSIONS: {
if (selectionArgs == null || selectionArgs.length == 0) {
return createMergedSearchCursor(null, null);
}
String selectionArg = selectionArgs[0] == null ? "" : selectionArgs[0];
// First we query the Tags table to find any tags that match the given query
Cursor tags = query(Tags.CONTENT_URI, SearchTopicsSessions.TOPIC_TAG_PROJECTION,
SearchTopicsSessions.TOPIC_TAG_SELECTION,
new String[] {Config.Tags.CATEGORY_TRACK, selectionArg + "%"},
SearchTopicsSessions.TOPIC_TAG_SORT);
// Then we query the sessions_search table and get a list of sessions that match
// the given keywords.
Cursor search = null;
if (selectionArgs[0] != null) { // dont query if there was no selectionArg.
search = query(ScheduleContract.Sessions.buildSearchUri(selectionArg),
SearchTopicsSessions.SEARCH_SESSIONS_PROJECTION,
null, null,
ScheduleContract.Sessions.SORT_BY_TYPE_THEN_TIME);
}
// Now that we have two cursors, we merge the cursors and return a unified view
// of the two result sets.
return createMergedSearchCursor(tags, search);
}
}
}
/**
* Create a {@link MatrixCursor} given the tags and search cursors.
*
* @param tags Cursor with the projection {@link SearchTopicsSessions#TOPIC_TAG_PROJECTION}.
* @param search Cursor with the projection
* {@link SearchTopicsSessions#SEARCH_SESSIONS_PROJECTION}.
* @return Returns a MatrixCursor always with {@link SearchTopicsSessions#DEFAULT_PROJECTION}
*/
private Cursor createMergedSearchCursor(Cursor tags, Cursor search) {
// How big should our MatrixCursor be?
int maxCount = (tags == null ? 0 : tags.getCount()) +
(search == null ? 0 : search.getCount());
MatrixCursor matrixCursor = new MatrixCursor(
SearchTopicsSessions.DEFAULT_PROJECTION, maxCount);
// Iterate over the tags cursor and add rows.
if (tags != null && tags.moveToFirst()) {
do {
matrixCursor.addRow(
new Object[]{
tags.getLong(0),
tags.getString(1), /*tag_id*/
"{" + tags.getString(2) + "}", /*search_snippet*/
1}); /*is_topic_tag*/
} while (tags.moveToNext());
}
// Iterate over the search cursor and add rows.
if (search != null && search.moveToFirst()) {
do {
matrixCursor.addRow(
new Object[]{
search.getLong(0),
search.getString(1),
search.getString(2), /*search_snippet*/
0}); /*is_topic_tag*/
} while (search.moveToNext());
}
return matrixCursor;
}
/** {@inheritDoc} */
@Override
public Uri insert(Uri uri, ContentValues values) {
LOGV(TAG, "insert(uri=" + uri + ", values=" + values.toString()
+ ", account=" + getCurrentAccountName(uri, false) + ")");
final SQLiteDatabase db = mOpenHelper.getWritableDatabase();
ScheduleUriEnum matchingUriEnum = mUriMatcher.matchUri(uri);
if (matchingUriEnum.table != null) {
try {
db.insertOrThrow(matchingUriEnum.table, null, values);
notifyChange(uri);
} catch (SQLiteConstraintException exception) {
// Leaving this here as it's handy to to breakpoint on this throw when debugging a
// bootstrap file issue.
throw exception;
}
}
switch (matchingUriEnum) {
case BLOCKS: {
return Blocks.buildBlockUri(values.getAsString(Blocks.BLOCK_ID));
}
case CARDS: {
return ScheduleContract.Cards.buildCardUri(values.getAsString(
ScheduleContract.Cards.CARD_ID));
}
case TAGS: {
return Tags.buildTagUri(values.getAsString(Tags.TAG_ID));
}
case ROOMS: {
return Rooms.buildRoomUri(values.getAsString(Rooms.ROOM_ID));
}
case SESSIONS: {
return Sessions.buildSessionUri(values.getAsString(Sessions.SESSION_ID));
}
case SESSIONS_ID_SPEAKERS: {
return Speakers.buildSpeakerUri(values.getAsString(SessionsSpeakers.SPEAKER_ID));
}
case SESSIONS_ID_TAGS: {
return Tags.buildTagUri(values.getAsString(Tags.TAG_ID));
}
case SESSIONS_ID_RELATED: {
values.put(Sessions.SESSION_ID, Sessions.getSessionId(uri));
db.insertOrThrow(Tables.RELATED_SESSIONS, null, values);
notifyChange(uri);
return uri;
}
case MY_SCHEDULE: {
values.put(MySchedule.MY_SCHEDULE_ACCOUNT_NAME, getCurrentAccountName(uri, false));
db.insertOrThrow(Tables.MY_SCHEDULE, null, values);
notifyChange(uri);
Uri sessionUri = Sessions.buildSessionUri(
values.getAsString(MyScheduleColumns.SESSION_ID));
notifyChange(sessionUri);
// Queries for sessions in user's schedule are affected by this change.
notifyChange(Sessions.CONTENT_MY_SCHEDULE_URI);
return sessionUri;
}
case MY_RESERVATIONS: {
values.put(MyReservations.MY_RESERVATION_ACCOUNT_NAME, getCurrentAccountName(uri, false));
db.insertOrThrow(Tables.MY_RESERVATIONS, null, values);
notifyChange(uri);
Uri sessionUri = Sessions.buildSessionUri(
values.getAsString(MyReservationColumns.SESSION_ID));
notifyChange(sessionUri);
// Queries for sessions in user's schedule are affected by this change.
notifyChange(Sessions.CONTENT_MY_SCHEDULE_URI);
return sessionUri;
}
case MY_VIEWED_VIDEOS: {
values.put(MyViewedVideos.MY_VIEWED_VIDEOS_ACCOUNT_NAME,
getCurrentAccountName(uri, false));
db.insertOrThrow(Tables.MY_VIEWED_VIDEO, null, values);
notifyChange(uri);
Uri videoUri = Videos.buildVideoUri(
values.getAsString(MyViewedVideos.VIDEO_ID));
notifyChange(videoUri);
return videoUri;
}
case MY_FEEDBACK_SUBMITTED: {
values.put(MyFeedbackSubmitted.MY_FEEDBACK_SUBMITTED_ACCOUNT_NAME,
getCurrentAccountName(uri, false));
db.insertOrThrow(Tables.MY_FEEDBACK_SUBMITTED, null, values);
notifyChange(uri);
Uri sessionUri = Sessions.buildSessionUri(
values.getAsString(MyFeedbackSubmitted.SESSION_ID));
notifyChange(sessionUri);
return sessionUri;
}
case SPEAKERS: {
return Speakers.buildSpeakerUri(values.getAsString(Speakers.SPEAKER_ID));
}
case ANNOUNCEMENTS: {
return Announcements.buildAnnouncementUri(values
.getAsString(Announcements.ANNOUNCEMENT_ID));
}
case SEARCH_SUGGEST: {
return SearchSuggest.CONTENT_URI;
}
case MAPGEOJSON: {
return ScheduleContract.MapGeoJson.buildGeoJsonUri();
}
case MAPTILES: {
return ScheduleContract.MapTiles.buildFloorUri(values.getAsString(
ScheduleContract.MapTiles.TILE_FLOOR));
}
case FEEDBACK_FOR_SESSION: {
return Feedback.buildFeedbackUri(values.getAsString(Feedback.SESSION_ID));
}
case HASHTAGS: {
return Hashtags.buildHashtagUri(values.getAsString(Hashtags.HASHTAG_NAME));
}
case VIDEOS: {
return Videos.buildVideoUri(values.getAsString(Videos.VIDEO_ID));
}
default: {
throw new UnsupportedOperationException("Unknown insert uri: " + uri);
}
}
}
/** {@inheritDoc} */
@Override
public int update(Uri uri, ContentValues values, String selection, String[] selectionArgs) {
String accountName = getCurrentAccountName(uri, false);
Uri notifyUri = null;
LOGV(TAG, "update(uri=" + uri + ", values=" + values.toString()
+ ", account=" + accountName + ")");
boolean isAccountUpdateAllowed = ScheduleContractHelper.isAccountUpdateAllowed(uri);
final SQLiteDatabase db = mOpenHelper.getWritableDatabase();
ScheduleUriEnum matchingUriEnum = mUriMatcher.matchUri(uri);
if (matchingUriEnum == ScheduleUriEnum.SEARCH_INDEX) {
// update the search index
ScheduleDatabase.updateSessionSearchIndex(db);
return 1;
}
final SelectionBuilder builder = buildSimpleSelection(uri);
if (matchingUriEnum == ScheduleUriEnum.SESSIONS_ID_RELATED) {
// update not supported
return 0;
}
if (matchingUriEnum == ScheduleUriEnum.MY_SCHEDULE) {
if (!isAccountUpdateAllowed) {
values.remove(MySchedule.MY_SCHEDULE_ACCOUNT_NAME);
}
builder.where(MySchedule.MY_SCHEDULE_ACCOUNT_NAME + "=?", accountName);
// Also notify session listeners.
notifyUri = Sessions.CONTENT_MY_SCHEDULE_URI;
}
if (matchingUriEnum == ScheduleUriEnum.MY_RESERVATIONS) {
values.remove(MyReservations.MY_RESERVATION_ACCOUNT_NAME);
builder.where(MyReservations.MY_RESERVATION_ACCOUNT_NAME + "=?", accountName);
}
if (matchingUriEnum == ScheduleUriEnum.MY_VIEWED_VIDEOS) {
values.remove(MyViewedVideos.MY_VIEWED_VIDEOS_ACCOUNT_NAME);
builder.where(MyViewedVideos.MY_VIEWED_VIDEOS_ACCOUNT_NAME + "=?", accountName);
}
if (matchingUriEnum == ScheduleUriEnum.MY_FEEDBACK_SUBMITTED) {
if (!isAccountUpdateAllowed) {
values.remove(MyFeedbackSubmitted.MY_FEEDBACK_SUBMITTED_ACCOUNT_NAME);
}
builder.where(MyFeedbackSubmitted.MY_FEEDBACK_SUBMITTED_ACCOUNT_NAME + "=?",
accountName);
}
int retVal = builder.where(selection, selectionArgs).update(db, values);
if (retVal > 0) {
notifyChange(uri);
if (null != notifyUri) notifyChange(notifyUri);
}
return retVal;
}
/** {@inheritDoc} */
@Override
public int delete(Uri uri, String selection, String[] selectionArgs) {
String accountName = getCurrentAccountName(uri, false);
LOGV(TAG, "delete(uri=" + uri + ", account=" + accountName + ")");
if (uri == ScheduleContract.BASE_CONTENT_URI) {
// Handle whole database deletes (e.g. when signing out)
deleteDatabase();
notifyChange(uri);
return 1;
}
final SQLiteDatabase db = mOpenHelper.getWritableDatabase();
final SelectionBuilder builder = buildSimpleSelection(uri);
ScheduleUriEnum matchingUriEnum = mUriMatcher.matchUri(uri);
if (matchingUriEnum == ScheduleUriEnum.MY_SCHEDULE) {
builder.where(MySchedule.MY_SCHEDULE_ACCOUNT_NAME + "=?", accountName);
}
if (matchingUriEnum == ScheduleUriEnum.MY_RESERVATIONS) {
builder.where(MyReservations.MY_RESERVATION_ACCOUNT_NAME + "=?", accountName);
}
if (matchingUriEnum == ScheduleUriEnum.MY_VIEWED_VIDEOS) {
builder.where(MyViewedVideos.MY_VIEWED_VIDEOS_ACCOUNT_NAME + "=?", accountName);
}
if (matchingUriEnum == ScheduleUriEnum.MY_FEEDBACK_SUBMITTED) {
builder.where(
MyFeedbackSubmitted.MY_FEEDBACK_SUBMITTED_ACCOUNT_NAME + "=?", accountName);
}
int retVal = builder.where(selection, selectionArgs).delete(db);
if (retVal > 0) {
notifyChange(uri);
}
return retVal;
}
/**
* Notifies the system that the given {@code uri} data has changed.
* <p/>
* We only notify changes if the uri wasn't called by the sync adapter, to avoid issuing a large
* amount of notifications while doing a sync. The
* {@link com.google.samples.apps.iosched.sync.ConferenceDataHandler} notifies all top level
* conference paths once the conference data sync is done, and the
* {@link com.google.samples.apps.iosched.sync.userdata.AbstractUserDataSyncHelper} notifies all
* user data related paths once the user data sync is done.
*/
private void notifyChange(Uri uri) {
if (!ScheduleContractHelper.isUriCalledFromSyncAdapter(uri)) {
Context context = getContext();
context.getContentResolver().notifyChange(uri, null);
// Widgets can't register content observers so we refresh widgets separately.
context.sendBroadcast(ScheduleWidgetProvider.getRefreshBroadcastIntent(context, false));
}
}
/**
* Apply the given set of {@link ContentProviderOperation}, executing inside
* a {@link SQLiteDatabase} transaction. All changes will be rolled back if
* any single one fails.
*/
@Override
public ContentProviderResult[] applyBatch(ArrayList<ContentProviderOperation> operations)
throws OperationApplicationException {
final SQLiteDatabase db = mOpenHelper.getWritableDatabase();
db.beginTransaction();
try {
final int numOperations = operations.size();
final ContentProviderResult[] results = new ContentProviderResult[numOperations];
for (int i = 0; i < numOperations; i++) {
results[i] = operations.get(i).apply(this, results, i);
}
db.setTransactionSuccessful();
return results;
} finally {
db.endTransaction();
}
}
/**
* Build a simple {@link SelectionBuilder} to match the requested
* {@link Uri}. This is usually enough to support {@link #insert},
* {@link #update}, and {@link #delete} operations.
*/
private SelectionBuilder buildSimpleSelection(Uri uri) {
final SelectionBuilder builder = new SelectionBuilder();
ScheduleUriEnum matchingUriEnum = mUriMatcher.matchUri(uri);
// The main Uris, corresponding to the root of each type of Uri, do not have any selection
// criteria so the full table is used. The others apply a selection criteria.
switch (matchingUriEnum) {
case BLOCKS:
case CARDS:
case TAGS:
case ROOMS:
case SESSIONS:
case SPEAKERS:
case ANNOUNCEMENTS:
case MAPGEOJSON:
case MAPTILES:
case SEARCH_SUGGEST:
case HASHTAGS:
case VIDEOS:
return builder.table(matchingUriEnum.table);
case BLOCKS_ID: {
final String blockId = Blocks.getBlockId(uri);
return builder.table(Tables.BLOCKS)
.where(Blocks.BLOCK_ID + "=?", blockId);
}
case TAGS_ID: {
final String tagId = Tags.getTagId(uri);
return builder.table(Tables.TAGS)
.where(Tags.TAG_ID + "=?", tagId);
}
case ROOMS_ID: {
final String roomId = Rooms.getRoomId(uri);
return builder.table(Tables.ROOMS)
.where(Rooms.ROOM_ID + "=?", roomId);
}
case SESSIONS_ID: {
final String sessionId = Sessions.getSessionId(uri);
return builder.table(Tables.SESSIONS)
.where(Sessions.SESSION_ID + "=?", sessionId);
}
case SESSIONS_ID_SPEAKERS: {
final String sessionId = Sessions.getSessionId(uri);
return builder.table(Tables.SESSIONS_SPEAKERS)
.where(Sessions.SESSION_ID + "=?", sessionId);
}
case SESSIONS_ID_TAGS: {
final String sessionId = Sessions.getSessionId(uri);
return builder.table(Tables.SESSIONS_TAGS)
.where(Sessions.SESSION_ID + "=?", sessionId);
}
case SESSIONS_ID_RELATED: {
final String sessionId = Sessions.getSessionId(uri);
return builder.table(Tables.RELATED_SESSIONS)
.where(Sessions.SESSION_ID + "=?", sessionId);
}
case SESSIONS_MY_SCHEDULE: {
final String sessionId = Sessions.getSessionId(uri);
return builder.table(Tables.MY_SCHEDULE)
.where(ScheduleContract.MyScheduleColumns.SESSION_ID + "=?", sessionId);
}
case MY_SCHEDULE: {
return builder.table(Tables.MY_SCHEDULE)
.where(MySchedule.MY_SCHEDULE_ACCOUNT_NAME + "=?",
getCurrentAccountName(uri, false));
}
case MY_RESERVATIONS: {
return builder.table(Tables.MY_RESERVATIONS)
.where(MyReservations.MY_RESERVATION_ACCOUNT_NAME + "=?",
getCurrentAccountName(uri, false));
}
case MY_VIEWED_VIDEOS: {
return builder.table(Tables.MY_VIEWED_VIDEO)
.where(MyViewedVideos.MY_VIEWED_VIDEOS_ACCOUNT_NAME + "=?",
getCurrentAccountName(uri, false));
}
case MY_FEEDBACK_SUBMITTED: {
return builder.table(Tables.MY_FEEDBACK_SUBMITTED)
.where(MyFeedbackSubmitted.MY_FEEDBACK_SUBMITTED_ACCOUNT_NAME + "=?",
getCurrentAccountName(uri, false));
}
case SPEAKERS_ID: {
final String speakerId = Speakers.getSpeakerId(uri);
return builder.table(Tables.SPEAKERS)
.where(Speakers.SPEAKER_ID + "=?", speakerId);
}
case ANNOUNCEMENTS_ID: {
final String announcementId = Announcements.getAnnouncementId(uri);
return builder.table(Tables.ANNOUNCEMENTS)
.where(Announcements.ANNOUNCEMENT_ID + "=?", announcementId);
}
case FEEDBACK_FOR_SESSION: {
final String session_id = Feedback.getSessionId(uri);
return builder.table(Tables.FEEDBACK)
.where(Feedback.SESSION_ID + "=?", session_id);
}
case FEEDBACK_ALL: {
return builder.table(Tables.FEEDBACK);
}
case HASHTAGS_NAME: {
final String hashtagName = Hashtags.getHashtagName(uri);
return builder.table(Tables.HASHTAGS)
.where(Hashtags.HASHTAG_NAME + "=?", hashtagName);
}
case VIDEOS_ID: {
final String videoId = Videos.getVideoId(uri);
return builder.table(Tables.VIDEOS).where(Videos.VIDEO_ID + "=?", videoId);
}
default: {
throw new UnsupportedOperationException("Unknown uri for " + uri);
}
}
}
private String getCurrentAccountName(Uri uri, boolean sanitize) {
String accountName = ScheduleContractHelper.getOverrideAccountName(uri);
if (accountName == null) {
accountName = AccountUtils.getActiveAccountName(getContext());
}
if (sanitize) {
// sanitize accountName when concatenating (http://xkcd.com/327/)
accountName = (accountName != null) ? accountName.replace("'", "''") : null;
}
return accountName;
}
/**
* Build an advanced {@link SelectionBuilder} to match the requested
* {@link Uri}. This is usually only used by {@link #query}, since it
* performs table joins useful for {@link Cursor} data.
*/
private SelectionBuilder buildExpandedSelection(Uri uri, int match) {
final SelectionBuilder builder = new SelectionBuilder();
ScheduleUriEnum matchingUriEnum = mUriMatcher.matchCode(match);
if (matchingUriEnum == null) {
throw new UnsupportedOperationException("Unknown uri: " + uri);
}
String accountName = getCurrentAccountName(uri, true);
switch (matchingUriEnum) {
case BLOCKS: {
return builder.table(Tables.BLOCKS);
}
case BLOCKS_BETWEEN: {
final List<String> segments = uri.getPathSegments();
final String startTime = segments.get(2);
final String endTime = segments.get(3);
return builder.table(Tables.BLOCKS)
.where(Blocks.BLOCK_START + ">=?", startTime)
.where(Blocks.BLOCK_START + "<=?", endTime);
}
case BLOCKS_ID: {
final String blockId = Blocks.getBlockId(uri);
return builder.table(Tables.BLOCKS)
.where(Blocks.BLOCK_ID + "=?", blockId);
}
case CARDS: {
return builder.table(Tables.CARDS);
}
case TAGS: {
return builder.table(Tables.TAGS);
}
case TAGS_ID: {
final String tagId = Tags.getTagId(uri);
return builder.table(Tables.TAGS)
.where(Tags.TAG_ID + "=?", tagId);
}
case ROOMS: {
return builder.table(Tables.ROOMS);
}
case ROOMS_ID: {
final String roomId = Rooms.getRoomId(uri);
return builder.table(Tables.ROOMS)
.where(Rooms.ROOM_ID + "=?", roomId);
}
case ROOMS_ID_SESSIONS: {
final String roomId = Rooms.getRoomId(uri);
return builder.table(Tables.SESSIONS_JOIN_ROOMS, accountName, accountName)
.mapToTable(Sessions._ID, Tables.SESSIONS)
.mapToTable(Sessions.ROOM_ID, Tables.SESSIONS)
.where(Qualified.SESSIONS_ROOM_ID + "=?", roomId)
.groupBy(Qualified.SESSIONS_SESSION_ID);
}
case SESSIONS: {
// We query sessions on the joined table of sessions with rooms and tags.
// Since there may be more than one tag per session, we GROUP BY session ID.
// The starred sessions ("my schedule") are associated with a user, so we
// use the current user to select them properly. Reserved sessions are handled
// similarly.
return builder
.table(Tables.SESSIONS_JOIN_ROOMS_TAGS, accountName, accountName)
.mapToTable(Sessions._ID, Tables.SESSIONS)
.mapToTable(Sessions.ROOM_ID, Tables.SESSIONS)
.mapToTable(Sessions.SESSION_ID, Tables.SESSIONS)
.map(Sessions.SESSION_IN_MY_SCHEDULE, "IFNULL(in_schedule, 0)")
.map(Sessions.SESSION_RESERVATION_STATUS, "IFNULL(" + MyReservations.
MY_RESERVATION_STATUS + ", -1)")
.groupBy(Qualified.SESSIONS_SESSION_ID);
}
case SESSIONS_COUNTER: {
return builder
.table(Tables.SESSIONS_JOIN_MYSCHEDULE, accountName, accountName)
.map(Sessions.SESSION_INTERVAL_COUNT, "count(1)")
.map(Sessions.SESSION_IN_MY_SCHEDULE, "IFNULL(in_schedule, 0)")
.map(Sessions.SESSION_RESERVATION_STATUS, "IFNULL(" + MyReservations.
MY_RESERVATION_STATUS + ", -1)")
.groupBy(Sessions.SESSION_START + ", " + Sessions.SESSION_END);
}
case SESSIONS_MY_SCHEDULE: {
return builder.table(Tables.SESSIONS_JOIN_ROOMS_TAGS_FEEDBACK_MYSCHEDULE,
accountName, accountName)
.mapToTable(Sessions._ID, Tables.SESSIONS)
.mapToTable(Sessions.ROOM_ID, Tables.SESSIONS)
.mapToTable(Sessions.SESSION_ID, Tables.SESSIONS)
.map(Sessions.HAS_GIVEN_FEEDBACK, Subquery.SESSION_HAS_GIVEN_FEEDBACK)
.map(Sessions.SESSION_IN_MY_SCHEDULE, "IFNULL(in_schedule, 0)")
.map(Sessions.SESSION_RESERVATION_STATUS, "IFNULL(" + MyReservations.
MY_RESERVATION_STATUS + ", -1)")
.where(Sessions.IN_SCHEDULE_SELECTION)
.groupBy(Qualified.SESSIONS_SESSION_ID);
}
case SESSIONS_UNSCHEDULED: {
final long[] interval = Sessions.getInterval(uri);
return builder.table(Tables.SESSIONS_JOIN_ROOMS_TAGS_FEEDBACK_MYSCHEDULE,
accountName, accountName)
.mapToTable(Sessions._ID, Tables.SESSIONS)
.mapToTable(Sessions.ROOM_ID, Tables.SESSIONS)
.mapToTable(Sessions.SESSION_ID, Tables.SESSIONS)
.map(Sessions.SESSION_IN_MY_SCHEDULE, "IFNULL(in_schedule, 0)")
.map(Sessions.SESSION_RESERVATION_STATUS, "IFNULL(" + MyReservations.
MY_RESERVATION_STATUS + ", -1)")
.where(Sessions.NOT_IN_SCHEDULE_SELECTION)
.where(Sessions.SESSION_START + ">=?", String.valueOf(interval[0]))
.where(Sessions.SESSION_START + "<?", String.valueOf(interval[1]))
.groupBy(Qualified.SESSIONS_SESSION_ID);
}
case SESSIONS_SEARCH: {
final String query = Sessions.getSearchQuery(uri);
return builder.table(Tables.SESSIONS_SEARCH_JOIN_SESSIONS_ROOMS,
accountName, accountName)
.map(Sessions.SEARCH_SNIPPET, Subquery.SESSIONS_SNIPPET)
.mapToTable(Sessions._ID, Tables.SESSIONS)
.mapToTable(Sessions.SESSION_ID, Tables.SESSIONS)
.mapToTable(Sessions.ROOM_ID, Tables.SESSIONS)
.map(Sessions.SESSION_IN_MY_SCHEDULE, "IFNULL(in_schedule, 0)")
.map(Sessions.SESSION_RESERVATION_STATUS, "IFNULL(" + MyReservations.
MY_RESERVATION_STATUS + ", -1)")
.where(SessionsSearchColumns.BODY + " MATCH ?", query);
}
case SESSIONS_AT: {
final List<String> segments = uri.getPathSegments();
final String time = segments.get(2);
return builder.table(Tables.SESSIONS_JOIN_ROOMS, accountName, accountName)
.mapToTable(Sessions._ID, Tables.SESSIONS)
.mapToTable(Sessions.ROOM_ID, Tables.SESSIONS)
.where(Sessions.SESSION_START + "<=?", time)
.where(Sessions.SESSION_END + ">=?", time);
}
case SESSIONS_ID: {
final String sessionId = Sessions.getSessionId(uri);
return builder.table(Tables.SESSIONS_JOIN_ROOMS, accountName, accountName)
.mapToTable(Sessions._ID, Tables.SESSIONS)
.mapToTable(Sessions.ROOM_ID, Tables.SESSIONS)
.mapToTable(Sessions.SESSION_ID, Tables.SESSIONS)
.map(Sessions.SESSION_IN_MY_SCHEDULE, "IFNULL(in_schedule, 0)")
.map(Sessions.SESSION_RESERVATION_STATUS, "IFNULL(" + MyReservations.
MY_RESERVATION_STATUS + ", -1)")
.where(Qualified.SESSIONS_SESSION_ID + "=?", sessionId);
}
case SESSIONS_ID_SPEAKERS: {
final String sessionId = Sessions.getSessionId(uri);
return builder.table(Tables.SESSIONS_SPEAKERS_JOIN_SPEAKERS)
.mapToTable(Speakers._ID, Tables.SPEAKERS)
.mapToTable(Speakers.SPEAKER_ID, Tables.SPEAKERS)
.where(Qualified.SESSIONS_SPEAKERS_SESSION_ID + "=?", sessionId);
}
case SESSIONS_ID_TAGS: {
final String sessionId = Sessions.getSessionId(uri);
return builder.table(Tables.SESSIONS_TAGS_JOIN_TAGS)
.mapToTable(Tags._ID, Tables.TAGS)
.mapToTable(Tags.TAG_ID, Tables.TAGS)
.where(Qualified.SESSIONS_TAGS_SESSION_ID + "=?", sessionId);
}
case SESSIONS_ID_RELATED: {
final String sessionId = Sessions.getSessionId(uri);
return builder
.table(Tables.SESSIONS_JOIN_ROOMS_TAGS, accountName, accountName)
.mapToTable(Sessions._ID, Tables.SESSIONS)
.mapToTable(Sessions.ROOM_ID, Tables.SESSIONS)
.mapToTable(Sessions.SESSION_ID, Tables.SESSIONS)
.map(Sessions.SESSION_IN_MY_SCHEDULE, "IFNULL(in_schedule, 0)")
.map(Sessions.SESSION_RESERVATION_STATUS, "IFNULL(" + ScheduleContract
.MyReservations.MY_RESERVATION_STATUS + ", -1)")
.map(Sessions.HAS_GIVEN_FEEDBACK, Subquery.SESSION_HAS_GIVEN_FEEDBACK)
.where(Subquery.RELATED_SESSIONS_SELECTION, sessionId)
.groupBy(Qualified.SESSIONS_SESSION_ID);
}
case SESSIONS_ROOM_AFTER: {
final String room = Sessions.getRoom(uri);
final String time = Sessions.getAfterForRoom(uri);
return builder.table(Tables.SESSIONS_JOIN_ROOMS_TAGS, accountName, accountName)
.mapToTable(Sessions._ID, Tables.SESSIONS)
.mapToTable(Sessions.ROOM_ID, Tables.SESSIONS)
.mapToTable(Sessions.SESSION_ID, Tables.SESSIONS)
.where(Qualified.SESSIONS_ROOM_ID + "=?", room)
.where("(" + Sessions.SESSION_START + "<= ? AND " + Sessions.SESSION_END +
" >= ?) OR (" + Sessions.SESSION_START + " >= ?)", time,
time,
time)
.map(Sessions.SESSION_IN_MY_SCHEDULE, "IFNULL(in_schedule, 0)")
.map(Sessions.SESSION_RESERVATION_STATUS, "IFNULL(" + ScheduleContract
.MyReservations.MY_RESERVATION_STATUS + ", -1)")
.groupBy(Qualified.SESSIONS_SESSION_ID);
}
case SESSIONS_AFTER: {
final String time = Sessions.getAfter(uri);
return builder.table(Tables.SESSIONS_JOIN_ROOMS_TAGS, accountName, accountName)
.mapToTable(Sessions._ID, Tables.SESSIONS)
.mapToTable(Sessions.SESSION_ID, Tables.SESSIONS)
.mapToTable(Sessions.ROOM_ID, Tables.SESSIONS)
.map(Sessions.SESSION_IN_MY_SCHEDULE, "IFNULL(in_schedule, 0)")
.map(Sessions.SESSION_RESERVATION_STATUS, "IFNULL(" + ScheduleContract
.MyReservations.MY_RESERVATION_STATUS + ", -1)")
.where("(" + Sessions.SESSION_START + "<= ? AND " + Sessions.SESSION_END +
" >= ?) OR (" + Sessions.SESSION_START + " >= ?)", time,
time, time)
.groupBy(Qualified.SESSIONS_SESSION_ID);
}
case SPEAKERS: {
return builder.table(Tables.SPEAKERS);
}
case MY_SCHEDULE: {
// force a where condition to avoid leaking schedule info to another account
// Note that, since SelectionBuilder always join multiple where calls using AND,
// even if malicious code specifying additional conditions on account_name won't
// be able to fetch data from a different account.
return builder.table(Tables.MY_SCHEDULE)
.where(MySchedule.MY_SCHEDULE_ACCOUNT_NAME + "=?",
accountName);
}
case MY_RESERVATIONS: {
// force a where condition to avoid leaking reservation info to another account
// Note that, since SelectionBuilder always join multiple where calls using AND,
// even if malicious code specifying additional conditions on account_name won't
// be able to fetch data from a different account.
return builder.table(Tables.MY_RESERVATIONS)
.where(MyReservations.MY_RESERVATION_ACCOUNT_NAME + "=?",
accountName);
}
case MY_FEEDBACK_SUBMITTED: {
// force a where condition to avoid leaking schedule info to another account
// Note that, since SelectionBuilder always join multiple where calls using AND,
// even if malicious code specifying additional conditions on account_name won't
// be able to fetch data from a different account.
return builder.table(Tables.MY_FEEDBACK_SUBMITTED)
.where(MyFeedbackSubmitted.MY_FEEDBACK_SUBMITTED_ACCOUNT_NAME + "=?",
accountName);
}
case MY_VIEWED_VIDEOS: {
// force a where condition to avoid leaking schedule info to another account
// Note that, since SelectionBuilder always join multiple where calls using AND,
// even if malicious code specifying additional conditions on account_name won't
// be able to fetch data from a different account.
return builder.table(Tables.MY_VIEWED_VIDEO)
.where(MyViewedVideos.MY_VIEWED_VIDEOS_ACCOUNT_NAME + "=?",
accountName);
}
case SPEAKERS_ID: {
final String speakerId = Speakers.getSpeakerId(uri);
return builder.table(Tables.SPEAKERS)
.where(Speakers.SPEAKER_ID + "=?", speakerId);
}
case SPEAKERS_ID_SESSIONS: {
final String speakerId = Speakers.getSpeakerId(uri);
return builder.table(Tables.SESSIONS_SPEAKERS_JOIN_SESSIONS_ROOMS,
accountName, accountName)
.mapToTable(Sessions._ID, Tables.SESSIONS)
.mapToTable(Sessions.SESSION_ID, Tables.SESSIONS)
.mapToTable(Sessions.ROOM_ID, Tables.SESSIONS)
.map(Sessions.SESSION_IN_MY_SCHEDULE, "IFNULL(in_schedule, 0)")
.map(Sessions.SESSION_RESERVATION_STATUS, "IFNULL(" + MyReservations.
MY_RESERVATION_STATUS + ", -1)")
.where(Qualified.SESSIONS_SPEAKERS_SPEAKER_ID + "=?", speakerId);
}
case ANNOUNCEMENTS: {
return builder.table(Tables.ANNOUNCEMENTS);
}
case ANNOUNCEMENTS_ID: {
final String announcementId = Announcements.getAnnouncementId(uri);
return builder.table(Tables.ANNOUNCEMENTS)
.where(Announcements.ANNOUNCEMENT_ID + "=?", announcementId);
}
case MAPGEOJSON: {
return builder.table(Tables.MAPGEOJSON);
}
case MAPTILES: {
return builder.table(Tables.MAPTILES);
}
case FEEDBACK_FOR_SESSION: {
final String sessionId = Feedback.getSessionId(uri);
return builder.table(Tables.FEEDBACK)
.where(Feedback.SESSION_ID + "=?", sessionId);
}
case FEEDBACK_ALL: {
return builder.table(Tables.FEEDBACK);
}
case HASHTAGS: {
return builder.table(Tables.HASHTAGS);
}
case HASHTAGS_NAME: {
final String hashtagName = Hashtags.getHashtagName(uri);
return builder.table(Tables.HASHTAGS)
.where(HashtagColumns.HASHTAG_NAME + "=?", hashtagName);
}
case VIDEOS: {
return builder.table(Tables.VIDEOS);
}
case VIDEOS_ID: {
final String videoId = Videos.getVideoId(uri);
return builder.table(Tables.VIDEOS)
.where(VideoColumns.VIDEO_ID + "=?", videoId);
}
default: {
throw new UnsupportedOperationException("Unknown uri: " + uri);
}
}
}
@Override
public ParcelFileDescriptor openFile(Uri uri, String mode) throws FileNotFoundException {
throw new UnsupportedOperationException("openFile is not supported for " + uri);
}
private interface Subquery {
String SESSION_HAS_GIVEN_FEEDBACK = "(SELECT COUNT(1) FROM "
+ Tables.FEEDBACK + " WHERE " + Qualified.FEEDBACK_SESSION_ID + "="
+ Qualified.SESSIONS_SESSION_ID + ")";
String SESSIONS_SNIPPET = "snippet(" + Tables.SESSIONS_SEARCH + ",'{','}','\u2026')";
String RELATED_SESSIONS_SELECTION = Qualified.SESSIONS_SESSION_ID + " IN (SELECT "
+ Sessions.RELATED_SESSION_ID + " FROM " + Tables.RELATED_SESSIONS + " WHERE "
+ Sessions.SESSION_ID + " = ?)";
}
/**
* {@link ScheduleContract} fields that are fully qualified with a specific
* parent {@link Tables}. Used when needed to work around SQL ambiguity.
*/
private interface Qualified {
String SESSIONS_SESSION_ID = Tables.SESSIONS + "." + Sessions.SESSION_ID;
String SESSIONS_ROOM_ID = Tables.SESSIONS + "." + Sessions.ROOM_ID;
String SESSIONS_TAGS_SESSION_ID = Tables.SESSIONS_TAGS + "."
+ ScheduleDatabase.SessionsTags.SESSION_ID;
String SESSIONS_SPEAKERS_SESSION_ID = Tables.SESSIONS_SPEAKERS + "."
+ SessionsSpeakers.SESSION_ID;
String SESSIONS_SPEAKERS_SPEAKER_ID = Tables.SESSIONS_SPEAKERS + "."
+ SessionsSpeakers.SPEAKER_ID;
String FEEDBACK_SESSION_ID = Tables.FEEDBACK + "." + Feedback.SESSION_ID;
}
}
| WeRockStar/iosched | lib/src/main/java/com/google/samples/apps/iosched/provider/ScheduleProvider.java | Java | apache-2.0 | 51,535 |
/*
* Copyright 2014-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import subscribing from '@/mixins/subscribing';
import {timer} from '@/utils/rxjs';
import moment from 'moment';
export default {
props: ['value'],
mixins: [subscribing],
data: () => ({
startTs: null,
offset: null
}),
render() {
return this._v(this.clock);
},
computed: {
clock() {
if (!this.value) {
return null;
}
const duration = moment.duration(this.value + this.offset);
return `${Math.floor(duration.asDays())}d ${duration.hours()}h ${duration.minutes()}m ${duration.seconds()}s`;
}
},
watch: {
value: 'subscribe'
},
methods: {
createSubscription() {
if (this.value) {
const vm = this;
vm.startTs = moment();
vm.offset = 0;
return timer(0, 1000).subscribe({
next: () => {
vm.offset = moment().valueOf() - vm.startTs.valueOf();
}
})
}
}
}
}
| codecentric/spring-boot-admin | spring-boot-admin-server-ui/src/main/frontend/views/instances/details/process-uptime.js | JavaScript | apache-2.0 | 1,540 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.ngrinder.script.handler;
import freemarker.template.Configuration;
import freemarker.template.DefaultObjectWrapper;
import freemarker.template.Template;
import org.apache.commons.io.FilenameUtils;
import org.ngrinder.common.constant.ControllerConstants;
import org.ngrinder.common.util.FileUtils;
import org.ngrinder.common.util.PathUtils;
import org.ngrinder.common.util.PropertiesWrapper;
import org.ngrinder.model.User;
import org.ngrinder.script.model.FileEntry;
import org.ngrinder.script.model.FileType;
import org.ngrinder.script.repository.FileEntryRepository;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.core.io.ClassPathResource;
import java.io.File;
import java.io.StringWriter;
import java.util.List;
import java.util.Map;
import static org.apache.commons.lang.StringUtils.startsWithIgnoreCase;
import static org.ngrinder.common.util.CollectionUtils.newArrayList;
import static org.ngrinder.common.util.ExceptionUtils.processException;
/**
* Script per language handler. This is the superclass for all sub
* {@link ScriptHandler}s which implements the specific processing of each
* language.
*
* @author JunHo Yoon
* @since 3.2
*/
public abstract class ScriptHandler implements ControllerConstants {
protected static final Logger LOGGER = LoggerFactory.getLogger(JythonScriptHandler.class);
private final String codemirrorKey;
private final String title;
private final String extension;
private final String key;
/**
* Constructor.
*
* @param key key of the script handler
* @param extension extension
* @param title title of the handler
* @param codeMirrorKey code mirror key
*/
public ScriptHandler(String key, String extension, String title, String codeMirrorKey) {
this.key = key;
this.extension = extension;
this.title = title;
this.codemirrorKey = codeMirrorKey;
}
@Autowired
private FileEntryRepository fileEntryRepository;
/**
* Get the display order of {@link ScriptHandler}s.
*
* @return order
*/
public abstract Integer displayOrder();
public String getCodemirrorKey() {
return codemirrorKey;
}
/**
* Check if the given fileEntry can be handled by this handler.
*
* @param fileEntry fileEntry to be checked
* @return true if the given fileEntry can be handled
*/
public boolean canHandle(FileEntry fileEntry) {
return FilenameUtils.isExtension(fileEntry.getPath(), getExtension());
}
public String getExtension() {
return extension;
}
/**
* Get the handler resolution order.
* <p/>
* Less is more prioritized.
*
* @return the order of handler resolution
*/
protected abstract Integer order();
@SuppressWarnings("SpellCheckingInspection")
public boolean isValidatable() {
return true;
}
/**
* Return if it's project handler which implements {@link ProjectHandler}.
*
* @return true if it is.
*/
@SuppressWarnings("UnusedDeclaration")
public boolean isProjectHandler() {
return (this instanceof ProjectHandler);
}
/**
* Prepare the distribution.
*
* @param testCaseId id of the test case. This is for the log identification.
* @param user user who will distribute the script.
* @param scriptEntry script to be distributed.
* @param distDir distribution target dir.
* @param properties properties set which is used for detailed distribution control.
* @param processingResult processing result holder.
*/
public void prepareDist(Long testCaseId,
User user, //
FileEntry scriptEntry, File distDir, PropertiesWrapper properties,
ProcessingResultPrintStream processingResult) {
prepareDefaultFile(distDir, properties);
List<FileEntry> fileEntries = getLibAndResourceEntries(user, scriptEntry, -1);
if (scriptEntry.getRevision() != 0) {
fileEntries.add(scriptEntry);
}
String basePath = getBasePath(scriptEntry);
// Distribute each files in that folder.
for (FileEntry each : fileEntries) {
// Directory is not subject to be distributed.
if (each.getFileType() == FileType.DIR) {
continue;
}
File toDir = new File(distDir, calcDistSubPath(basePath, each));
processingResult.printf("%s is being written.\n", each.getPath());
LOGGER.info("{} is being written in {} for test {}", new Object[]{each.getPath(), toDir, testCaseId});
getFileEntryRepository().writeContentTo(user, each.getPath(), toDir);
}
processingResult.setSuccess(true);
prepareDistMore(testCaseId, user, scriptEntry, distDir, properties, processingResult);
}
/**
* Prepare script creation. This method is subject to be extended by the
* subclasses.
* <p/>
* This method is the perfect place if it's necessary to include additional
* files.
*
* @param user user
* @param path base path
* @param fileName fileName
* @param name name
* @param url url
* @param createLibAndResources true if lib and resources should be created
* @return true if process more.
*/
public boolean prepareScriptEnv(User user, String path, String fileName, String name, String url,
boolean createLibAndResources, String scriptContent) {
return true;
}
/**
* Prepare the distribution more. This method is subject to be extended by
* the subclass.
*
* @param testCaseId test case id. This is for the log identification.
* @param user user
* @param script script entry to be distributed.
* @param distDir distribution directory
* @param properties properties
* @param processingResult processing result holder
*/
protected void prepareDistMore(Long testCaseId, User user, FileEntry script, File distDir,
PropertiesWrapper properties, ProcessingResultPrintStream processingResult) {
}
/**
* Get the appropriated distribution path for the given file entry.
*
* @param basePath distribution base path
* @param fileEntry fileEntry to be distributed
* @return the resolved destination path.
*/
protected String calcDistSubPath(String basePath, FileEntry fileEntry) {
String path = FilenameUtils.getPath(fileEntry.getPath());
path = path.substring(basePath.length());
return path;
}
/**
* Get all resources and lib entries belonging to the given user and
* scriptEntry.
*
* @param user user
* @param scriptEntry script entry
* @param revision revision of the script entry.
* @return file entry list
*/
public List<FileEntry> getLibAndResourceEntries(User user, FileEntry scriptEntry, long revision) {
String path = FilenameUtils.getPath(scriptEntry.getPath());
List<FileEntry> fileList = newArrayList();
for (FileEntry eachFileEntry : getFileEntryRepository().findAll(user, path + "lib/", revision, true)) {
// Skip jython 2.5... it's already included.
if (startsWithIgnoreCase(eachFileEntry.getFileName(), "jython-2.5.")
|| startsWithIgnoreCase(eachFileEntry.getFileName(), "jython-standalone-2.5.")) {
continue;
}
FileType fileType = eachFileEntry.getFileType();
if (fileType.isLibDistributable()) {
fileList.add(eachFileEntry);
}
}
for (FileEntry eachFileEntry : getFileEntryRepository().findAll(user, path + "resources/", revision, true)) {
FileType fileType = eachFileEntry.getFileType();
if (fileType.isResourceDistributable()) {
fileList.add(eachFileEntry);
}
}
return fileList;
}
protected void prepareDefaultFile(File distDir, PropertiesWrapper properties) {
if (properties.getPropertyBoolean(PROP_CONTROLLER_DIST_LOGBACK)) {
FileUtils.copyResourceToFile("/logback/logback-worker.xml", new File(distDir, "logback-worker.xml"));
}
}
protected String getBasePath(FileEntry script) {
return getBasePath(script.getPath());
}
/**
* Get the base path of the given path.
*
* @param path path
* @return base path
*/
public String getBasePath(String path) {
return FilenameUtils.getPath(path);
}
/**
* Get executable script path.
*
* @param svnPath path in svn
* @return path executable in agent.
*/
public String getScriptExecutePath(String svnPath) {
return FilenameUtils.getName(svnPath);
}
/**
* Check syntax errors for the given content.
*
* @param path path
* @param content content
* @return syntax error messages. null if none.
*/
public abstract String checkSyntaxErrors(String path, String content);
/**
* Get the initial script with the given value map.
*
* @param values map of initial script referencing values.
* @return generated string
*/
public String getScriptTemplate(Map<String, Object> values) {
try {
Configuration freemarkerConfig = new Configuration();
ClassPathResource cpr = new ClassPathResource("script_template");
freemarkerConfig.setDirectoryForTemplateLoading(cpr.getFile());
freemarkerConfig.setObjectWrapper(new DefaultObjectWrapper());
Template template = freemarkerConfig.getTemplate("basic_template_" + getExtension() + ".ftl");
StringWriter writer = new StringWriter();
template.process(values, writer);
return writer.toString();
} catch (Exception e) {
throw processException("Error while fetching the script template.", e);
}
}
public String getTitle() {
return title;
}
public String getKey() {
return key;
}
FileEntryRepository getFileEntryRepository() {
return fileEntryRepository;
}
void setFileEntryRepository(FileEntryRepository fileEntryRepository) {
this.fileEntryRepository = fileEntryRepository;
}
/**
* Get the default quick test file.
*
* @param basePath base path
* @return quick test file
*/
public FileEntry getDefaultQuickTestFilePath(String basePath) {
FileEntry fileEntry = new FileEntry();
fileEntry.setPath(PathUtils.join(basePath, "TestRunner." + getExtension()));
return fileEntry;
}
}
| songeunwoo/ngrinder | ngrinder-controller/src/main/java/org/ngrinder/script/handler/ScriptHandler.java | Java | apache-2.0 | 10,936 |
module.exports = function(ctx) {
var fs = ctx.requireCordovaModule('fs'),
path = ctx.requireCordovaModule('path'),
os = require("os"),
readline = require("readline"),
deferral = ctx.requireCordovaModule('q').defer();
var lineReader = readline.createInterface({
terminal: false,
input : fs.createReadStream('platforms/android/build.gradle')
});
lineReader.on("line", function(line) {
fs.appendFileSync('./build.gradle', line.toString() + os.EOL);
if (/.*\ dependencies \{.*/.test(line)) {
fs.appendFileSync('./build.gradle', '\t\tclasspath "com.google.gms:google-services:3.0.0"' + os.EOL);
}
}).on("close", function () {
fs.rename('./build.gradle', 'platforms/android/build.gradle', deferral.resolve);
});
return deferral.promise;
};
| alfredo777/btmglobalconsulting | plugins/phonegap-plugin-push/scripts/copyAndroidFile.js | JavaScript | apache-2.0 | 856 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jena.sparql.exec.http;
import static org.apache.jena.http.HttpLib.*;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.net.http.HttpClient;
import java.net.http.HttpRequest;
import java.net.http.HttpRequest.BodyPublishers;
import java.net.http.HttpResponse;
import java.nio.charset.StandardCharsets;
import java.util.*;
import java.util.concurrent.TimeUnit;
import org.apache.jena.atlas.RuntimeIOException;
import org.apache.jena.atlas.io.IO;
import org.apache.jena.atlas.iterator.Iter;
import org.apache.jena.atlas.json.JSON;
import org.apache.jena.atlas.json.JsonArray;
import org.apache.jena.atlas.json.JsonObject;
import org.apache.jena.atlas.lib.InternalErrorException;
import org.apache.jena.atlas.lib.Pair;
import org.apache.jena.atlas.logging.Log;
import org.apache.jena.atlas.web.HttpException;
import org.apache.jena.graph.Graph;
import org.apache.jena.graph.Triple;
import org.apache.jena.http.HttpEnv;
import org.apache.jena.http.HttpLib;
import org.apache.jena.query.*;
import org.apache.jena.riot.*;
import org.apache.jena.riot.resultset.ResultSetLang;
import org.apache.jena.riot.resultset.ResultSetReaderRegistry;
import org.apache.jena.riot.web.HttpNames;
import org.apache.jena.sparql.ARQException;
import org.apache.jena.sparql.core.DatasetGraph;
import org.apache.jena.sparql.core.DatasetGraphFactory;
import org.apache.jena.sparql.core.Quad;
import org.apache.jena.sparql.engine.http.HttpParams;
import org.apache.jena.sparql.engine.http.QueryExceptionHTTP;
import org.apache.jena.sparql.exec.QueryExec;
import org.apache.jena.sparql.exec.RowSet;
import org.apache.jena.sparql.util.Context;
/**
* A {@link QueryExec} implementation where queries are executed against a remote
* service over HTTP.
*/
public class QueryExecHTTP implements QueryExec {
/** @deprecated Use {@link #newBuilder} */
@Deprecated
public static QueryExecHTTPBuilder create() { return newBuilder() ; }
public static QueryExecHTTPBuilder newBuilder() { return QueryExecHTTPBuilder.create(); }
public static QueryExecHTTPBuilder service(String serviceURL) {
return QueryExecHTTP.newBuilder().endpoint(serviceURL);
}
//public static final String QUERY_MIME_TYPE = WebContent.contentTypeSPARQLQuery;
private final Query query;
private final String queryString;
private final String service;
private final Context context;
// Params
private Params params = null;
private final QuerySendMode sendMode;
private int urlLimit = HttpEnv.urlLimit;
// Protocol
private List<String> defaultGraphURIs = new ArrayList<>();
private List<String> namedGraphURIs = new ArrayList<>();
private boolean closed = false;
// Timeout of query execution.
private long readTimeout = -1;
private TimeUnit readTimeoutUnit = TimeUnit.MILLISECONDS;
// Content Types: these list the standard formats and also include */*.
private final String selectAcceptheader = WebContent.defaultSparqlResultsHeader;
private final String askAcceptHeader = WebContent.defaultSparqlAskHeader;
private final String describeAcceptHeader = WebContent.defaultGraphAcceptHeader;
private final String constructAcceptHeader = WebContent.defaultGraphAcceptHeader;
private final String datasetAcceptHeader = WebContent.defaultDatasetAcceptHeader;
// If this is non-null, it overrides the use of any Content-Type above.
private String appProvidedAcceptHeader = null;
// Received content type
private String httpResponseContentType = null;
// Releasing HTTP input streams is important. We remember this for SELECT result
// set streaming, and will close it when the execution is closed
private InputStream retainedConnection = null;
private HttpClient httpClient = HttpEnv.getDftHttpClient();
private Map<String, String> httpHeaders;
public QueryExecHTTP(String serviceURL, Query query, String queryString, int urlLimit,
HttpClient httpClient, Map<String, String> httpHeaders, Params params, Context context,
List<String> defaultGraphURIs, List<String> namedGraphURIs,
QuerySendMode sendMode, String explicitAcceptHeader,
long timeout, TimeUnit timeoutUnit) {
this.context = ( context == null ) ? ARQ.getContext().copy() : context.copy();
this.service = serviceURL;
this.query = query;
this.queryString = queryString;
this.urlLimit = urlLimit;
this.httpHeaders = httpHeaders;
this.defaultGraphURIs = defaultGraphURIs;
this.namedGraphURIs = namedGraphURIs;
this.sendMode = Objects.requireNonNull(sendMode);
this.appProvidedAcceptHeader = explicitAcceptHeader;
// Important - handled as special case because the defaults vary by query type.
if ( httpHeaders.containsKey(HttpNames.hAccept) ) {
if ( this.appProvidedAcceptHeader != null )
this.appProvidedAcceptHeader = httpHeaders.get(HttpNames.hAccept);
this.httpHeaders.remove(HttpNames.hAccept);
}
this.httpHeaders = httpHeaders;
this.params = params;
this.readTimeout = timeout;
this.readTimeoutUnit = timeoutUnit;
this.httpClient = HttpLib.dft(httpClient, HttpEnv.getDftHttpClient());
}
/** The Content-Type response header received (null before the remote operation is attempted). */
public String getHttpResponseContentType() {
return httpResponseContentType;
}
@Override
public RowSet select() {
checkNotClosed();
check(QueryType.SELECT);
RowSet rs = execRowSet();
return rs;
}
private RowSet execRowSet() {
// Use the explicitly given header or the default selectAcceptheader
String thisAcceptHeader = dft(appProvidedAcceptHeader, selectAcceptheader);
HttpResponse<InputStream> response = performQuery(thisAcceptHeader);
InputStream in = HttpLib.getInputStream(response);
// Don't assume the endpoint actually gives back the content type we asked for
String actualContentType = responseHeader(response, HttpNames.hContentType);
// Remember the response.
httpResponseContentType = actualContentType;
// More reliable to use the format-defined charsets e.g. JSON -> UTF-8
actualContentType = removeCharset(actualContentType);
if (false) {
byte b[] = IO.readWholeFile(in);
String str = new String(b);
System.out.println(str);
in = new ByteArrayInputStream(b);
}
retainedConnection = in; // This will be closed on close()
if (actualContentType == null || actualContentType.equals(""))
actualContentType = WebContent.contentTypeResultsXML;
// Map to lang, with pragmatic alternatives.
Lang lang = WebContent.contentTypeToLangResultSet(actualContentType);
if ( lang == null )
throw new QueryException("Endpoint returned Content-Type: " + actualContentType + " which is not recognized for SELECT queries");
if ( !ResultSetReaderRegistry.isRegistered(lang) )
throw new QueryException("Endpoint returned Content-Type: " + actualContentType + " which is not supported for SELECT queries");
// This returns a streaming result set for some formats.
// Do not close the InputStream at this point.
ResultSet result = ResultSetMgr.read(in, lang);
return RowSet.adapt(result);
}
@Override
public boolean ask() {
checkNotClosed();
check(QueryType.ASK);
String thisAcceptHeader = dft(appProvidedAcceptHeader, askAcceptHeader);
HttpResponse<InputStream> response = performQuery(thisAcceptHeader);
InputStream in = HttpLib.getInputStream(response);
String actualContentType = responseHeader(response, HttpNames.hContentType);
httpResponseContentType = actualContentType;
actualContentType = removeCharset(actualContentType);
// If the server fails to return a Content-Type then we will assume
// the server returned the type we asked for
if (actualContentType == null || actualContentType.equals(""))
actualContentType = askAcceptHeader;
Lang lang = RDFLanguages.contentTypeToLang(actualContentType);
if ( lang == null ) {
// Any specials :
// application/xml for application/sparql-results+xml
// application/json for application/sparql-results+json
if (actualContentType.equals(WebContent.contentTypeXML))
lang = ResultSetLang.RS_XML;
else if ( actualContentType.equals(WebContent.contentTypeJSON))
lang = ResultSetLang.RS_JSON;
}
if ( lang == null )
throw new QueryException("Endpoint returned Content-Type: " + actualContentType + " which is not supported for ASK queries");
boolean result = ResultSetMgr.readBoolean(in, lang);
finish(in);
return result;
}
private String removeCharset(String contentType) {
int idx = contentType.indexOf(';');
if ( idx < 0 )
return contentType;
return contentType.substring(0,idx);
}
@Override
public Graph construct(Graph graph) {
checkNotClosed();
check(QueryType.CONSTRUCT);
return execGraph(graph, constructAcceptHeader);
}
@Override
public Iterator<Triple> constructTriples() {
checkNotClosed();
check(QueryType.CONSTRUCT);
return execTriples(constructAcceptHeader);
}
@Override
public Iterator<Quad> constructQuads(){
checkNotClosed();
return execQuads();
}
@Override
public DatasetGraph constructDataset(){
checkNotClosed();
return constructDataset(DatasetGraphFactory.createTxnMem());
}
@Override
public DatasetGraph constructDataset(DatasetGraph dataset){
checkNotClosed();
check(QueryType.CONSTRUCT);
return execDataset(dataset);
}
@Override
public Graph describe(Graph graph) {
checkNotClosed();
check(QueryType.DESCRIBE);
return execGraph(graph, describeAcceptHeader);
}
@Override
public Iterator<Triple> describeTriples() {
checkNotClosed();
return execTriples(describeAcceptHeader);
}
private Graph execGraph(Graph graph, String acceptHeader) {
Pair<InputStream, Lang> p = execRdfWorker(acceptHeader, WebContent.contentTypeRDFXML);
InputStream in = p.getLeft();
Lang lang = p.getRight();
try {
RDFDataMgr.read(graph, in, lang);
} catch (RiotException ex) {
HttpLib.finish(in);
throw ex;
}
return graph;
}
private DatasetGraph execDataset(DatasetGraph dataset) {
Pair<InputStream, Lang> p = execRdfWorker(datasetAcceptHeader, WebContent.contentTypeNQuads);
InputStream in = p.getLeft();
Lang lang = p.getRight();
try {
RDFDataMgr.read(dataset, in, lang);
} catch (RiotException ex) {
finish(in);
throw ex;
}
return dataset;
}
@SuppressWarnings("deprecation")
private Iterator<Triple> execTriples(String acceptHeader) {
Pair<InputStream, Lang> p = execRdfWorker(acceptHeader, WebContent.contentTypeRDFXML);
InputStream input = p.getLeft();
Lang lang = p.getRight();
// Base URI?
// Unless N-Triples, this creates a thread.
Iterator<Triple> iter = RDFDataMgr.createIteratorTriples(input, lang, null);
return Iter.onCloseIO(iter, input);
}
@SuppressWarnings("deprecation")
private Iterator<Quad> execQuads() {
checkNotClosed();
Pair<InputStream, Lang> p = execRdfWorker(datasetAcceptHeader, WebContent.contentTypeNQuads);
InputStream input = p.getLeft();
Lang lang = p.getRight();
// Unless N-Quads, this creates a thread.
Iterator<Quad> iter = RDFDataMgr.createIteratorQuads(input, lang, null);
return Iter.onCloseIO(iter, input);
}
// Any RDF data back (CONSTRUCT, DESCRIBE, QUADS)
// ifNoContentType - some wild guess at the content type.
private Pair<InputStream, Lang> execRdfWorker(String contentType, String ifNoContentType) {
checkNotClosed();
String thisAcceptHeader = dft(appProvidedAcceptHeader, contentType);
HttpResponse<InputStream> response = performQuery(thisAcceptHeader);
InputStream in = HttpLib.getInputStream(response);
// Don't assume the endpoint actually gives back the content type we asked for
String actualContentType = responseHeader(response, HttpNames.hContentType);
httpResponseContentType = actualContentType;
actualContentType = removeCharset(actualContentType);
// If the server fails to return a Content-Type then we will assume
// the server returned the type we asked for
if (actualContentType == null || actualContentType.equals(""))
actualContentType = ifNoContentType;
Lang lang = RDFLanguages.contentTypeToLang(actualContentType);
if ( ! RDFLanguages.isQuads(lang) && ! RDFLanguages.isTriples(lang) )
throw new QueryException("Endpoint returned Content Type: "
+ actualContentType
+ " which is not a valid RDF syntax");
return Pair.create(in, lang);
}
@Override
public JsonArray execJson() {
checkNotClosed();
check(QueryType.CONSTRUCT_JSON);
String thisAcceptHeader = dft(appProvidedAcceptHeader, WebContent.contentTypeJSON);
HttpResponse<InputStream> response = performQuery(thisAcceptHeader);
InputStream in = HttpLib.getInputStream(response);
try {
return JSON.parseAny(in).getAsArray();
} finally { finish(in); }
}
@Override
public Iterator<JsonObject> execJsonItems() {
JsonArray array = execJson().getAsArray();
List<JsonObject> x = new ArrayList<>(array.size());
array.forEach(elt->{
if ( ! elt.isObject())
throw new QueryExecException("Item in an array from a JSON query isn't an object");
x.add(elt.getAsObject());
});
return x.iterator();
}
private void checkNotClosed() {
if ( closed )
throw new QueryExecException("HTTP QueryExecHTTP has been closed");
}
private void check(QueryType queryType) {
if ( query == null ) {
// Pass through the queryString.
return;
}
if ( query.queryType() != queryType )
throw new QueryExecException("Not the right form of query. Expected "+queryType+" but got "+query.queryType());
}
@Override
public Context getContext() {
return context;
}
@Override
public DatasetGraph getDataset() {
return null;
}
// This may be null - if we were created form a query string,
// we don't guarantee to parse it so we let through non-SPARQL
// extensions to the far end.
@Override
public Query getQuery() {
if ( query != null )
return query;
if ( queryString != null ) {
// Object not created with a Query object, may be because there is foreign
// syntax in the query or may be because the query string was available and the app
// didn't want the overhead of parsing it every time.
// Try to parse it else return null;
try { return QueryFactory.create(queryString, Syntax.syntaxARQ); }
catch (QueryParseException ex) {}
return null;
}
return null;
}
/**
* Return the query string. If this was supplied as a string,
* there is no guarantee this is legal SPARQL syntax.
*/
@Override
public String getQueryString() {
return queryString;
}
private static long asMillis(long duration, TimeUnit timeUnit) {
return (duration < 0) ? duration : timeUnit.toMillis(duration);
}
/**
* Make a query over HTTP.
* The response is returned after status code processing so the caller can assume the
* query execution was successful and return 200.
* Use {@link HttpLib#getInputStream} to access the body.
*/
private HttpResponse<InputStream> performQuery(String reqAcceptHeader) {
if (closed)
throw new ARQException("HTTP execution already closed");
// SERVICE specials.
Params thisParams = Params.create(params);
if ( defaultGraphURIs != null ) {
for ( String dft : defaultGraphURIs )
thisParams.add( HttpParams.pDefaultGraph, dft );
}
if ( namedGraphURIs != null ) {
for ( String name : namedGraphURIs )
thisParams.add( HttpParams.pNamedGraph, name );
}
HttpLib.modifyByService(service, context, thisParams, httpHeaders);
HttpRequest request = makeRequest(thisParams, reqAcceptHeader);
return executeQuery(request);
}
private HttpRequest makeRequest(Params thisParams, String reqAcceptHeader) {
QuerySendMode actualSendMode = actualSendMode();
HttpRequest.Builder requestBuilder;
switch(actualSendMode) {
case asGetAlways :
requestBuilder = executeQueryGet(thisParams, reqAcceptHeader);
break;
case asPostForm :
requestBuilder = executeQueryPostForm(thisParams, reqAcceptHeader);
break;
case asPost :
requestBuilder = executeQueryPostBody(thisParams, reqAcceptHeader);
break;
default :
// Should not happen!
throw new InternalErrorException("Invalid value for 'actualSendMode' "+actualSendMode);
}
return requestBuilder.build();
}
private HttpResponse<InputStream> executeQuery(HttpRequest request) {
logQuery(queryString, request);
try {
HttpResponse<InputStream> response = execute(httpClient, request);
HttpLib.handleHttpStatusCode(response);
return response;
} catch (HttpException httpEx) {
throw QueryExceptionHTTP.rewrap(httpEx);
}
}
private QuerySendMode actualSendMode() {
int thisLengthLimit = urlLimit;
switch(sendMode) {
case asGetAlways :
case asPostForm :
case asPost :
return sendMode;
case asGetWithLimitBody :
case asGetWithLimitForm :
break;
}
// Only QuerySendMode.asGetWithLimitBody and QuerySendMode.asGetWithLimitForm here.
String requestURL = service;
// Other params (query= has not been added at this point)
int paramsLength = params.httpString().length();
int qEncodedLength = calcEncodeStringLength(queryString);
// URL Length, including service (for safety)
int length = service.length()
+ /* ?query= */ 1 + HttpParams.pQuery.length()
+ /* encoded query */ qEncodedLength
+ /* &other params*/ 1 + paramsLength;
if ( length <= thisLengthLimit )
return QuerySendMode.asGetAlways;
return (sendMode==QuerySendMode.asGetWithLimitBody) ? QuerySendMode.asPost : QuerySendMode.asPostForm;
}
private static int calcEncodeStringLength(String str) {
// Could approximate by counting non-queryString character and adding that *2 to the length of the string.
String qs = HttpLib.urlEncodeQueryString(str);
int encodedLength = qs.length();
return encodedLength;
}
private HttpRequest.Builder executeQueryGet(Params thisParams, String acceptHeader) {
thisParams.add(HttpParams.pQuery, queryString);
String requestURL = requestURL(service, thisParams.httpString());
HttpRequest.Builder builder = HttpLib.requestBuilder(requestURL, httpHeaders, readTimeout, readTimeoutUnit);
acceptHeader(builder, acceptHeader);
return builder.GET();
}
private HttpRequest.Builder executeQueryPostForm(Params thisParams, String acceptHeader) {
thisParams.add(HttpParams.pQuery, queryString);
String requestURL = service;
String formBody = thisParams.httpString();
HttpRequest.Builder builder = HttpLib.requestBuilder(requestURL, httpHeaders, readTimeout, readTimeoutUnit);
acceptHeader(builder, acceptHeader);
// Use an HTML form.
contentTypeHeader(builder, WebContent.contentTypeHTMLForm);
// Already UTF-8 encoded to ASCII.
return builder.POST(BodyPublishers.ofString(formBody, StandardCharsets.US_ASCII));
}
// Use SPARQL query body and MIME type.
private HttpRequest.Builder executeQueryPostBody(Params thisParams, String acceptHeader) {
// Use thisParams (for default-graph-uri etc)
String requestURL = requestURL(service, thisParams.httpString());
HttpRequest.Builder builder = HttpLib.requestBuilder(requestURL, httpHeaders, readTimeout, readTimeoutUnit);
contentTypeHeader(builder, WebContent.contentTypeSPARQLQuery);
acceptHeader(builder, acceptHeader);
return builder.POST(BodyPublishers.ofString(queryString));
}
private static void logQuery(String queryString, HttpRequest request) {}
/**
* Cancel query evaluation
*/
public void cancel() {
closed = true;
}
@Override
public void abort() {
try {
close();
} catch (Exception ex) {
Log.warn(this, "Error during abort", ex);
}
}
@Override
public void close() {
closed = true;
if (retainedConnection != null) {
try {
// This call may take a long time if the response has not been consumed
// as HTTP client will consume the remaining response so it can re-use the
// connection. If we're closing when we're not at the end of the stream then
// issue a warning to the logs
if (retainedConnection.read() != -1)
Log.warn(this, "HTTP response not fully consumed, if HTTP Client is reusing connections (its default behaviour) then it will consume the remaining response data which may take a long time and cause this application to become unresponsive");
retainedConnection.close();
} catch (RuntimeIOException | java.io.IOException e) {
// If we are closing early and the underlying stream is chunk encoded
// the close() can result in a IOException. TypedInputStream catches
// and re-wraps that and we want to suppress both forms.
} finally {
retainedConnection = null;
}
}
}
@Override
public boolean isClosed() { return closed; }
}
| apache/jena | jena-arq/src/main/java/org/apache/jena/sparql/exec/http/QueryExecHTTP.java | Java | apache-2.0 | 24,139 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import io.netty.handler.codec.CodecException;
import org.apache.tinkerpop.gremlin.driver.exception.ConnectionException;
import org.apache.tinkerpop.gremlin.driver.message.RequestMessage;
import io.netty.bootstrap.Bootstrap;
import io.netty.channel.Channel;
import io.netty.channel.ChannelPromise;
import io.netty.channel.socket.nio.NioSocketChannel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URI;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
/**
* A single connection to a Gremlin Server instance.
*
* @author Stephen Mallette (http://stephen.genoprime.com)
*/
final class Connection {
private static final Logger logger = LoggerFactory.getLogger(Connection.class);
private final Channel channel;
private final URI uri;
private final ConcurrentMap<UUID, ResultQueue> pending = new ConcurrentHashMap<>();
private final Cluster cluster;
private final Client client;
private final ConnectionPool pool;
private final long keepAliveInterval;
public static final int MAX_IN_PROCESS = 4;
public static final int MIN_IN_PROCESS = 1;
public static final int MAX_WAIT_FOR_CONNECTION = 3000;
public static final int MAX_WAIT_FOR_SESSION_CLOSE = 3000;
public static final int MAX_CONTENT_LENGTH = 65536;
public static final int RECONNECT_INTERVAL = 1000;
public static final int RESULT_ITERATION_BATCH_SIZE = 64;
public static final long KEEP_ALIVE_INTERVAL = 1800000;
/**
* When a {@code Connection} is borrowed from the pool, this number is incremented to indicate the number of
* times it has been taken and is decremented when it is returned. This number is one indication as to how
* busy a particular {@code Connection} is.
*/
public final AtomicInteger borrowed = new AtomicInteger(0);
private final AtomicReference<Class<Channelizer>> channelizerClass = new AtomicReference<>(null);
private volatile boolean isDead = false;
private final int maxInProcess;
private final String connectionLabel;
private final Channelizer channelizer;
private final AtomicReference<CompletableFuture<Void>> closeFuture = new AtomicReference<>();
private final AtomicBoolean shutdownInitiated = new AtomicBoolean(false);
private final AtomicReference<ScheduledFuture> keepAliveFuture = new AtomicReference<>();
public Connection(final URI uri, final ConnectionPool pool, final int maxInProcess) throws ConnectionException {
this.uri = uri;
this.cluster = pool.getCluster();
this.client = pool.getClient();
this.pool = pool;
this.maxInProcess = maxInProcess;
this.keepAliveInterval = pool.settings().keepAliveInterval;
connectionLabel = String.format("Connection{host=%s}", pool.host);
if (cluster.isClosing()) throw new IllegalStateException("Cannot open a connection with the cluster after close() is called");
final Bootstrap b = this.cluster.getFactory().createBootstrap();
try {
if (channelizerClass.get() == null) {
channelizerClass.compareAndSet(null, (Class<Channelizer>) Class.forName(cluster.connectionPoolSettings().channelizer));
}
channelizer = channelizerClass.get().newInstance();
channelizer.init(this);
b.channel(NioSocketChannel.class).handler(channelizer);
channel = b.connect(uri.getHost(), uri.getPort()).sync().channel();
channelizer.connected();
logger.info("Created new connection for {}", uri);
} catch (Exception ie) {
logger.debug("Error opening connection on {}", uri);
throw new ConnectionException(uri, "Could not open connection", ie);
}
}
/**
* A connection can only have so many things in process happening on it at once, where "in process" refers to
* the maximum number of in-process requests less the number of pending responses.
*/
public int availableInProcess() {
// no need for a negative available amount - not sure that the pending size can ever exceed maximum, but
// better to avoid the negatives that would ensue if it did
return Math.max(0, maxInProcess - pending.size());
}
public boolean isDead() {
return isDead;
}
boolean isClosing() {
return closeFuture.get() != null;
}
URI getUri() {
return uri;
}
Cluster getCluster() {
return cluster;
}
Client getClient() {
return client;
}
ConcurrentMap<UUID, ResultQueue> getPending() {
return pending;
}
public synchronized CompletableFuture<Void> closeAsync() {
if (isClosing()) return closeFuture.get();
final CompletableFuture<Void> future = new CompletableFuture<>();
closeFuture.set(future);
// stop any pings being sent at the server for keep-alive
final ScheduledFuture keepAlive = keepAliveFuture.get();
if (keepAlive != null) keepAlive.cancel(true);
// make sure all requests in the queue are fully processed before killing. if they are then shutdown
// can be immediate. if not this method will signal the readCompleted future defined in the write()
// operation to check if it can close. in this way the connection no longer receives writes, but
// can continue to read. If a request never comes back the future won't get fulfilled and the connection
// will maintain a "pending" request, that won't quite ever go away. The build up of such a dead requests
// on a connection in the connection pool will force the pool to replace the connection for a fresh one.
if (isOkToClose()) {
if (null == channel)
future.complete(null);
else
shutdown(future);
} else {
// there may be some pending requests. schedule a job to wait for those to complete and then shutdown
new CheckForPending(future).runUntilDone(cluster.executor(), 1000, TimeUnit.MILLISECONDS);
}
return future;
}
public void close() {
try {
closeAsync().get();
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
public ChannelPromise write(final RequestMessage requestMessage, final CompletableFuture<ResultSet> future) {
// once there is a completed write, then create a traverser for the result set and complete
// the promise so that the client knows that that it can start checking for results.
final Connection thisConnection = this;
final ChannelPromise requestPromise = channel.newPromise()
.addListener(f -> {
if (!f.isSuccess()) {
if (logger.isDebugEnabled())
logger.debug(String.format("Write on connection %s failed", thisConnection.getConnectionInfo()), f.cause());
thisConnection.isDead = true;
thisConnection.returnToPool();
cluster.executor().submit(() -> future.completeExceptionally(f.cause()));
} else {
final LinkedBlockingQueue<Result> resultLinkedBlockingQueue = new LinkedBlockingQueue<>();
final CompletableFuture<Void> readCompleted = new CompletableFuture<>();
// the callback for when the read was successful, meaning that ResultQueue.markComplete()
// was called
readCompleted.thenAcceptAsync(v -> {
thisConnection.returnToPool();
tryShutdown();
}, cluster.executor());
// the callback for when the read failed. a failed read means the request went to the server
// and came back with a server-side error of some sort. it means the server is responsive
// so this isn't going to be like a dead host situation which is handled above on a failed
// write operation.
//
// in the event of an IOException (typically means that the Connection might have
// been closed from the server side - this is typical in situations like when a request is
// sent that exceeds maxContentLength and the server closes the channel on its side) or other
// exceptions that indicate a non-recoverable state for the Connection object
// (a netty CorruptedFrameException is a good example of that), the Connection cannot simply
// be returned to the pool as future uses will end with refusal from the server and make it
// appear as a dead host as the write will not succeed. instead, the Connection needs to be
// replaced in these scenarios which destroys the dead channel on the client and allows a new
// one to be reconstructed.
readCompleted.exceptionally(t -> {
if (t instanceof IOException || t instanceof CodecException) {
if (pool != null) pool.replaceConnection(thisConnection);
} else {
thisConnection.returnToPool();
}
// close was signaled in closeAsync() but there were pending messages at that time. attempt
// the shutdown if the returned result cleared up the last pending message
tryShutdown();
return null;
});
final ResultQueue handler = new ResultQueue(resultLinkedBlockingQueue, readCompleted);
pending.put(requestMessage.getRequestId(), handler);
cluster.executor().submit(() -> future.complete(
new ResultSet(handler, cluster.executor(), readCompleted, requestMessage, pool.host)));
}
});
channel.writeAndFlush(requestMessage, requestPromise);
// try to keep the connection alive if the channel allows such things - websockets will
if (channelizer.supportsKeepAlive() && keepAliveInterval > 0) {
final ScheduledFuture oldKeepAliveFuture = keepAliveFuture.getAndSet(cluster.executor().scheduleAtFixedRate(() -> {
logger.debug("Request sent to server to keep {} alive", thisConnection);
try {
channel.writeAndFlush(channelizer.createKeepAliveMessage());
} catch (Exception ex) {
// will just log this for now - a future real request can be responsible for the failure that
// marks the host as dead. this also may not mean the host is actually dead. more robust handling
// is in play for real requests, not this simple ping
logger.warn(String.format("Keep-alive did not succeed on %s", thisConnection), ex);
}
}, keepAliveInterval, keepAliveInterval, TimeUnit.MILLISECONDS));
// try to cancel the old future if it's still un-executed - no need to ping since a new write has come
// through on the connection
if (oldKeepAliveFuture != null) oldKeepAliveFuture.cancel(true);
}
return requestPromise;
}
public void returnToPool() {
try {
if (pool != null) pool.returnConnection(this);
} catch (ConnectionException ce) {
if (logger.isDebugEnabled())
logger.debug("Returned {} connection to {} but an error occurred - {}", this.getConnectionInfo(), pool, ce.getMessage());
}
}
private boolean isOkToClose() {
return pending.isEmpty() || (channel !=null && !channel.isOpen()) || !pool.host.isAvailable();
}
/**
* Close was signaled in closeAsync() but there were pending messages at that time. This method attempts the
* shutdown if the returned result cleared up the last pending message.
*/
private void tryShutdown() {
if (isClosing() && isOkToClose())
shutdown(closeFuture.get());
}
private synchronized void shutdown(final CompletableFuture<Void> future) {
// shutdown can be called directly from closeAsync() or after write() and therefore this method should only
// be called once. once shutdown is initiated, it shouldn't be executed a second time or else it sends more
// messages at the server and leads to ugly log messages over there.
if (shutdownInitiated.compareAndSet(false, true)) {
// maybe this should be delegated back to the Client implementation??? kinda weird to instanceof here.....
if (client instanceof Client.SessionedClient) {
final boolean forceClose = client.getSettings().getSession().get().isForceClosed();
final RequestMessage closeMessage = client.buildMessage(
RequestMessage.build(Tokens.OPS_CLOSE).addArg(Tokens.ARGS_FORCE, forceClose)).create();
final CompletableFuture<ResultSet> closed = new CompletableFuture<>();
write(closeMessage, closed);
try {
// make sure we get a response here to validate that things closed as expected. on error, we'll let
// the server try to clean up on its own. the primary error here should probably be related to
// protocol issues which should not be something a user has to fuss with.
closed.join().all().get(cluster.connectionPoolSettings().maxWaitForSessionClose, TimeUnit.MILLISECONDS);
} catch (TimeoutException ex) {
final String msg = String.format(
"Timeout while trying to close connection on %s - force closing - server will close session on shutdown or expiration.",
((Client.SessionedClient) client).getSessionId());
logger.warn(msg, ex);
} catch (Exception ex) {
final String msg = String.format(
"Encountered an error trying to close connection on %s - force closing - server will close session on shutdown or expiration.",
((Client.SessionedClient) client).getSessionId());
logger.warn(msg, ex);
}
}
channelizer.close(channel);
final ChannelPromise promise = channel.newPromise();
promise.addListener(f -> {
if (f.cause() != null)
future.completeExceptionally(f.cause());
else
future.complete(null);
});
channel.close(promise);
}
}
public String getConnectionInfo() {
return String.format("Connection{host=%s, isDead=%s, borrowed=%s, pending=%s}",
pool.host, isDead, borrowed, pending.size());
}
@Override
public String toString() {
return connectionLabel;
}
/**
* Self-cancelling tasks that periodically checks for the pending queue to clear before shutting down the
* {@code Connection}. Once it does that, it self cancels the scheduled job in the executor.
*/
private final class CheckForPending implements Runnable {
private volatile ScheduledFuture<?> self;
private final CompletableFuture<Void> future;
CheckForPending(final CompletableFuture<Void> future) {
this.future = future;
}
@Override
public void run() {
logger.info("Checking for pending messages to complete before close on {}", this);
if (isOkToClose()) {
shutdown(future);
boolean interrupted = false;
try {
while(null == self) {
try {
Thread.sleep(1);
} catch (InterruptedException e) {
interrupted = true;
}
}
self.cancel(false);
} finally {
if(interrupted) {
Thread.currentThread().interrupt();
}
}
}
}
void runUntilDone(final ScheduledExecutorService executor, final long period, final TimeUnit unit) {
self = executor.scheduleAtFixedRate(this, period, period, unit);
}
}
}
| jorgebay/tinkerpop | gremlin-driver/src/main/java/org/apache/tinkerpop/gremlin/driver/Connection.java | Java | apache-2.0 | 18,349 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.milton.http.values;
import io.milton.http.XmlWriter;
import io.milton.http.XmlWriter.Element;
import io.milton.http.webdav.WebDavProtocol;
import java.util.Map;
/**
* Supports HrefList objects, and writes them out as a list of <href>...</href> elements
*
* Currently readonly, but should support writing
*
* @author brad
*/
public class HrefListValueWriter implements ValueWriter {
@Override
public boolean supports(String nsUri, String localName, Class c) {
boolean b = HrefList.class.isAssignableFrom(c);
return b;
}
@Override
public void writeValue(XmlWriter writer, String nsUri, String prefix, String localName, Object val, String href, Map<String, String> nsPrefixes) {
if (val instanceof HrefList) {
Element outerEl = writer.begin(prefix, localName).open();
HrefList list = (HrefList) val;
if (list != null) {
for (String s : list) {
Element hrefEl = writer.begin(WebDavProtocol.DAV_PREFIX + ":href").open(false);
hrefEl.writeText(s);
hrefEl.close();
}
}
outerEl.close();
} else {
if (val != null) {
throw new RuntimeException("Value is not correct type. Is a: " + val.getClass());
}
}
}
@Override
public Object parse(String namespaceURI, String localPart, String value) {
throw new UnsupportedOperationException("Not supported yet.");
}
}
| skoulouzis/lobcder | milton2/milton-server-ce/src/main/java/io/milton/http/values/HrefListValueWriter.java | Java | apache-2.0 | 2,213 |
/* Generated By:JavaCC: Do not edit this line. SelectorParserImpl.java */
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.qpid.jms.selector.parser;
import java.io.*;
import java.util.*;
import org.apache.qpid.jms.selector.filter.*;
/**
* JMS Selector Parser generated by JavaCC
*
* Do not edit this .java file directly - it is generated from SelectorParserImpl.jj
* Edit SelectorParserImpl.jj and rebuild with the 'generate-selector-parser' profile.
*/
public class SelectorParserImpl implements SelectorParserImplConstants {
private BooleanExpression asBooleanExpression(Expression value) throws ParseException {
if (value instanceof BooleanExpression) {
return (BooleanExpression) value;
}
if (value instanceof PropertyExpression) {
return UnaryExpression.createBooleanCast( value );
}
throw new ParseException("Expression will not result in a boolean value: " + value);
}
// ----------------------------------------------------------------------------
// Grammer
// ----------------------------------------------------------------------------
final public BooleanExpression JmsSelector() throws ParseException {
Expression left=null;
left = orExpression();
{if (true) return asBooleanExpression(left);}
throw new Error("Missing return statement in function");
}
final public Expression orExpression() throws ParseException {
Expression left;
Expression right;
left = andExpression();
label_1:
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case OR:
;
break;
default:
break label_1;
}
jj_consume_token(OR);
right = andExpression();
left = LogicExpression.createOR(asBooleanExpression(left), asBooleanExpression(right));
}
{if (true) return left;}
throw new Error("Missing return statement in function");
}
final public Expression andExpression() throws ParseException {
Expression left;
Expression right;
left = equalityExpression();
label_2:
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case AND:
;
break;
default:
break label_2;
}
jj_consume_token(AND);
right = equalityExpression();
left = LogicExpression.createAND(asBooleanExpression(left), asBooleanExpression(right));
}
{if (true) return left;}
throw new Error("Missing return statement in function");
}
final public Expression equalityExpression() throws ParseException {
Expression left;
Expression right;
left = comparisonExpression();
label_3:
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case IS:
case 27:
case 28:
;
break;
default:
break label_3;
}
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case 27:
jj_consume_token(27);
right = comparisonExpression();
left = ComparisonExpression.createEqual(left, right);
break;
case 28:
jj_consume_token(28);
right = comparisonExpression();
left = ComparisonExpression.createNotEqual(left, right);
break;
default:
if (jj_2_1(2)) {
jj_consume_token(IS);
jj_consume_token(NULL);
left = ComparisonExpression.createIsNull(left);
} else {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case IS:
jj_consume_token(IS);
jj_consume_token(NOT);
jj_consume_token(NULL);
left = ComparisonExpression.createIsNotNull(left);
break;
default:
jj_consume_token(-1);
throw new ParseException();
}
}
}
}
{if (true) return left;}
throw new Error("Missing return statement in function");
}
final public Expression comparisonExpression() throws ParseException {
Expression left;
Expression right;
Expression low;
Expression high;
String t, u;
boolean not;
ArrayList list;
left = addExpression();
label_4:
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case NOT:
case BETWEEN:
case LIKE:
case IN:
case 29:
case 30:
case 31:
case 32:
;
break;
default:
break label_4;
}
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case 29:
jj_consume_token(29);
right = addExpression();
left = ComparisonExpression.createGreaterThan(left, right);
break;
case 30:
jj_consume_token(30);
right = addExpression();
left = ComparisonExpression.createGreaterThanEqual(left, right);
break;
case 31:
jj_consume_token(31);
right = addExpression();
left = ComparisonExpression.createLessThan(left, right);
break;
case 32:
jj_consume_token(32);
right = addExpression();
left = ComparisonExpression.createLessThanEqual(left, right);
break;
case LIKE:
u=null;
jj_consume_token(LIKE);
t = stringLitteral();
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case ESCAPE:
jj_consume_token(ESCAPE);
u = stringLitteral();
break;
default:
;
}
left = ComparisonExpression.createLike(left, t, u);
break;
default:
if (jj_2_2(2)) {
u=null;
jj_consume_token(NOT);
jj_consume_token(LIKE);
t = stringLitteral();
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case ESCAPE:
jj_consume_token(ESCAPE);
u = stringLitteral();
break;
default:
;
}
left = ComparisonExpression.createNotLike(left, t, u);
} else {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case BETWEEN:
jj_consume_token(BETWEEN);
low = addExpression();
jj_consume_token(AND);
high = addExpression();
left = ComparisonExpression.createBetween(left, low, high);
break;
default:
if (jj_2_3(2)) {
jj_consume_token(NOT);
jj_consume_token(BETWEEN);
low = addExpression();
jj_consume_token(AND);
high = addExpression();
left = ComparisonExpression.createNotBetween(left, low, high);
} else {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case IN:
jj_consume_token(IN);
jj_consume_token(33);
t = stringLitteral();
list = new ArrayList();
list.add( t );
label_5:
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case 34:
;
break;
default:
break label_5;
}
jj_consume_token(34);
t = stringLitteral();
list.add( t );
}
jj_consume_token(35);
left = ComparisonExpression.createInFilter(left, list);
break;
default:
if (jj_2_4(2)) {
jj_consume_token(NOT);
jj_consume_token(IN);
jj_consume_token(33);
t = stringLitteral();
list = new ArrayList();
list.add( t );
label_6:
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case 34:
;
break;
default:
break label_6;
}
jj_consume_token(34);
t = stringLitteral();
list.add( t );
}
jj_consume_token(35);
left = ComparisonExpression.createNotInFilter(left, list);
} else {
jj_consume_token(-1);
throw new ParseException();
}
}
}
}
}
}
}
{if (true) return left;}
throw new Error("Missing return statement in function");
}
final public Expression addExpression() throws ParseException {
Expression left;
Expression right;
left = multExpr();
label_7:
while (true) {
if (jj_2_5(2147483647)) {
;
} else {
break label_7;
}
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case 36:
jj_consume_token(36);
right = multExpr();
left = ArithmeticExpression.createPlus(left, right);
break;
case 37:
jj_consume_token(37);
right = multExpr();
left = ArithmeticExpression.createMinus(left, right);
break;
default:
jj_consume_token(-1);
throw new ParseException();
}
}
{if (true) return left;}
throw new Error("Missing return statement in function");
}
final public Expression multExpr() throws ParseException {
Expression left;
Expression right;
left = unaryExpr();
label_8:
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case 38:
case 39:
case 40:
;
break;
default:
break label_8;
}
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case 38:
jj_consume_token(38);
right = unaryExpr();
left = ArithmeticExpression.createMultiply(left, right);
break;
case 39:
jj_consume_token(39);
right = unaryExpr();
left = ArithmeticExpression.createDivide(left, right);
break;
case 40:
jj_consume_token(40);
right = unaryExpr();
left = ArithmeticExpression.createMod(left, right);
break;
default:
jj_consume_token(-1);
throw new ParseException();
}
}
{if (true) return left;}
throw new Error("Missing return statement in function");
}
final public Expression unaryExpr() throws ParseException {
String s=null;
Expression left=null;
if (jj_2_6(2147483647)) {
jj_consume_token(36);
left = unaryExpr();
} else {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case 37:
jj_consume_token(37);
left = unaryExpr();
left = UnaryExpression.createNegate(left);
break;
case NOT:
jj_consume_token(NOT);
left = unaryExpr();
left = UnaryExpression.createNOT( asBooleanExpression(left) );
break;
case TRUE:
case FALSE:
case NULL:
case DECIMAL_LITERAL:
case HEX_LITERAL:
case OCTAL_LITERAL:
case FLOATING_POINT_LITERAL:
case STRING_LITERAL:
case ID:
case QUOTED_ID:
case 33:
left = primaryExpr();
break;
default:
jj_consume_token(-1);
throw new ParseException();
}
}
{if (true) return left;}
throw new Error("Missing return statement in function");
}
final public Expression primaryExpr() throws ParseException {
Expression left=null;
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case TRUE:
case FALSE:
case NULL:
case DECIMAL_LITERAL:
case HEX_LITERAL:
case OCTAL_LITERAL:
case FLOATING_POINT_LITERAL:
case STRING_LITERAL:
left = literal();
break;
case ID:
case QUOTED_ID:
left = variable();
break;
case 33:
jj_consume_token(33);
left = orExpression();
jj_consume_token(35);
break;
default:
jj_consume_token(-1);
throw new ParseException();
}
{if (true) return left;}
throw new Error("Missing return statement in function");
}
final public ConstantExpression literal() throws ParseException {
Token t;
String s;
ConstantExpression left=null;
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case STRING_LITERAL:
s = stringLitteral();
left = new ConstantExpression(s);
break;
case DECIMAL_LITERAL:
t = jj_consume_token(DECIMAL_LITERAL);
left = ConstantExpression.createFromDecimal(t.image);
break;
case HEX_LITERAL:
t = jj_consume_token(HEX_LITERAL);
left = ConstantExpression.createFromHex(t.image);
break;
case OCTAL_LITERAL:
t = jj_consume_token(OCTAL_LITERAL);
left = ConstantExpression.createFromOctal(t.image);
break;
case FLOATING_POINT_LITERAL:
t = jj_consume_token(FLOATING_POINT_LITERAL);
left = ConstantExpression.createFloat(t.image);
break;
case TRUE:
jj_consume_token(TRUE);
left = ConstantExpression.TRUE;
break;
case FALSE:
jj_consume_token(FALSE);
left = ConstantExpression.FALSE;
break;
case NULL:
jj_consume_token(NULL);
left = ConstantExpression.NULL;
break;
default:
jj_consume_token(-1);
throw new ParseException();
}
{if (true) return left;}
throw new Error("Missing return statement in function");
}
final public String stringLitteral() throws ParseException {
Token t;
StringBuffer rc = new StringBuffer();
boolean first=true;
t = jj_consume_token(STRING_LITERAL);
// Decode the sting value.
String image = t.image;
for( int i=1; i < image.length()-1; i++ ) {
char c = image.charAt(i);
if( c == '\u005c'' )
i++;
rc.append(c);
}
{if (true) return rc.toString();}
throw new Error("Missing return statement in function");
}
final public PropertyExpression variable() throws ParseException {
Token t;
PropertyExpression left=null;
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case ID:
t = jj_consume_token(ID);
left = new PropertyExpression(t.image);
break;
case QUOTED_ID:
t = jj_consume_token(QUOTED_ID);
// Decode the string value.
StringBuffer rc = new StringBuffer();
String image = t.image;
for( int i=1; i < image.length()-1; i++ ) {
char c = image.charAt(i);
if( c == '"' )
i++;
rc.append(c);
}
{if (true) return new PropertyExpression(rc.toString());}
break;
default:
jj_consume_token(-1);
throw new ParseException();
}
{if (true) return left;}
throw new Error("Missing return statement in function");
}
private boolean jj_2_1(int xla) {
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_1(); }
catch(LookaheadSuccess ls) { return true; }
}
private boolean jj_2_2(int xla) {
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_2(); }
catch(LookaheadSuccess ls) { return true; }
}
private boolean jj_2_3(int xla) {
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_3(); }
catch(LookaheadSuccess ls) { return true; }
}
private boolean jj_2_4(int xla) {
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_4(); }
catch(LookaheadSuccess ls) { return true; }
}
private boolean jj_2_5(int xla) {
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_5(); }
catch(LookaheadSuccess ls) { return true; }
}
private boolean jj_2_6(int xla) {
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_6(); }
catch(LookaheadSuccess ls) { return true; }
}
private boolean jj_3R_53() {
if (jj_scan_token(LIKE)) return true;
if (jj_3R_38()) return true;
Token xsp;
xsp = jj_scanpos;
if (jj_3R_58()) jj_scanpos = xsp;
return false;
}
private boolean jj_3R_27() {
if (jj_scan_token(DECIMAL_LITERAL)) return true;
return false;
}
private boolean jj_3R_36() {
if (jj_3R_39()) return true;
Token xsp;
while (true) {
xsp = jj_scanpos;
if (jj_3R_40()) { jj_scanpos = xsp; break; }
}
return false;
}
private boolean jj_3R_52() {
if (jj_scan_token(32)) return true;
if (jj_3R_43()) return true;
return false;
}
private boolean jj_3R_57() {
if (jj_scan_token(37)) return true;
if (jj_3R_9()) return true;
return false;
}
private boolean jj_3_5() {
Token xsp;
xsp = jj_scanpos;
if (jj_scan_token(36)) {
jj_scanpos = xsp;
if (jj_scan_token(37)) return true;
}
if (jj_3R_9()) return true;
return false;
}
private boolean jj_3R_26() {
if (jj_3R_38()) return true;
return false;
}
private boolean jj_3R_51() {
if (jj_scan_token(31)) return true;
if (jj_3R_43()) return true;
return false;
}
private boolean jj_3R_35() {
if (jj_scan_token(QUOTED_ID)) return true;
return false;
}
private boolean jj_3R_56() {
if (jj_scan_token(36)) return true;
if (jj_3R_9()) return true;
return false;
}
private boolean jj_3R_37() {
if (jj_scan_token(OR)) return true;
if (jj_3R_36()) return true;
return false;
}
private boolean jj_3R_23() {
Token xsp;
xsp = jj_scanpos;
if (jj_3R_26()) {
jj_scanpos = xsp;
if (jj_3R_27()) {
jj_scanpos = xsp;
if (jj_3R_28()) {
jj_scanpos = xsp;
if (jj_3R_29()) {
jj_scanpos = xsp;
if (jj_3R_30()) {
jj_scanpos = xsp;
if (jj_3R_31()) {
jj_scanpos = xsp;
if (jj_3R_32()) {
jj_scanpos = xsp;
if (jj_3R_33()) return true;
}
}
}
}
}
}
}
return false;
}
private boolean jj_3R_50() {
if (jj_scan_token(30)) return true;
if (jj_3R_43()) return true;
return false;
}
private boolean jj_3R_34() {
if (jj_scan_token(ID)) return true;
return false;
}
private boolean jj_3R_48() {
Token xsp;
xsp = jj_scanpos;
if (jj_3R_56()) {
jj_scanpos = xsp;
if (jj_3R_57()) return true;
}
return false;
}
private boolean jj_3R_49() {
if (jj_scan_token(29)) return true;
if (jj_3R_43()) return true;
return false;
}
private boolean jj_3R_44() {
Token xsp;
xsp = jj_scanpos;
if (jj_3R_49()) {
jj_scanpos = xsp;
if (jj_3R_50()) {
jj_scanpos = xsp;
if (jj_3R_51()) {
jj_scanpos = xsp;
if (jj_3R_52()) {
jj_scanpos = xsp;
if (jj_3R_53()) {
jj_scanpos = xsp;
if (jj_3_2()) {
jj_scanpos = xsp;
if (jj_3R_54()) {
jj_scanpos = xsp;
if (jj_3_3()) {
jj_scanpos = xsp;
if (jj_3R_55()) {
jj_scanpos = xsp;
if (jj_3_4()) return true;
}
}
}
}
}
}
}
}
}
return false;
}
private boolean jj_3R_24() {
Token xsp;
xsp = jj_scanpos;
if (jj_3R_34()) {
jj_scanpos = xsp;
if (jj_3R_35()) return true;
}
return false;
}
private boolean jj_3R_43() {
if (jj_3R_9()) return true;
Token xsp;
while (true) {
xsp = jj_scanpos;
if (jj_3R_48()) { jj_scanpos = xsp; break; }
}
return false;
}
private boolean jj_3R_25() {
if (jj_3R_36()) return true;
Token xsp;
while (true) {
xsp = jj_scanpos;
if (jj_3R_37()) { jj_scanpos = xsp; break; }
}
return false;
}
private boolean jj_3R_22() {
if (jj_scan_token(33)) return true;
if (jj_3R_25()) return true;
if (jj_scan_token(35)) return true;
return false;
}
private boolean jj_3R_21() {
if (jj_3R_24()) return true;
return false;
}
private boolean jj_3R_61() {
if (jj_scan_token(34)) return true;
if (jj_3R_38()) return true;
return false;
}
private boolean jj_3R_20() {
if (jj_3R_23()) return true;
return false;
}
private boolean jj_3R_19() {
Token xsp;
xsp = jj_scanpos;
if (jj_3R_20()) {
jj_scanpos = xsp;
if (jj_3R_21()) {
jj_scanpos = xsp;
if (jj_3R_22()) return true;
}
}
return false;
}
private boolean jj_3R_41() {
if (jj_3R_43()) return true;
Token xsp;
while (true) {
xsp = jj_scanpos;
if (jj_3R_44()) { jj_scanpos = xsp; break; }
}
return false;
}
private boolean jj_3R_38() {
if (jj_scan_token(STRING_LITERAL)) return true;
return false;
}
private boolean jj_3R_15() {
if (jj_3R_19()) return true;
return false;
}
private boolean jj_3R_59() {
if (jj_scan_token(ESCAPE)) return true;
if (jj_3R_38()) return true;
return false;
}
private boolean jj_3_4() {
if (jj_scan_token(NOT)) return true;
if (jj_scan_token(IN)) return true;
if (jj_scan_token(33)) return true;
if (jj_3R_38()) return true;
Token xsp;
while (true) {
xsp = jj_scanpos;
if (jj_3R_61()) { jj_scanpos = xsp; break; }
}
if (jj_scan_token(35)) return true;
return false;
}
private boolean jj_3_6() {
if (jj_scan_token(36)) return true;
if (jj_3R_10()) return true;
return false;
}
private boolean jj_3R_14() {
if (jj_scan_token(NOT)) return true;
if (jj_3R_10()) return true;
return false;
}
private boolean jj_3R_60() {
if (jj_scan_token(34)) return true;
if (jj_3R_38()) return true;
return false;
}
private boolean jj_3R_47() {
if (jj_scan_token(IS)) return true;
if (jj_scan_token(NOT)) return true;
if (jj_scan_token(NULL)) return true;
return false;
}
private boolean jj_3R_13() {
if (jj_scan_token(37)) return true;
if (jj_3R_10()) return true;
return false;
}
private boolean jj_3R_33() {
if (jj_scan_token(NULL)) return true;
return false;
}
private boolean jj_3_1() {
if (jj_scan_token(IS)) return true;
if (jj_scan_token(NULL)) return true;
return false;
}
private boolean jj_3R_12() {
if (jj_scan_token(36)) return true;
if (jj_3R_10()) return true;
return false;
}
private boolean jj_3R_46() {
if (jj_scan_token(28)) return true;
if (jj_3R_41()) return true;
return false;
}
private boolean jj_3R_10() {
Token xsp;
xsp = jj_scanpos;
if (jj_3R_12()) {
jj_scanpos = xsp;
if (jj_3R_13()) {
jj_scanpos = xsp;
if (jj_3R_14()) {
jj_scanpos = xsp;
if (jj_3R_15()) return true;
}
}
}
return false;
}
private boolean jj_3R_32() {
if (jj_scan_token(FALSE)) return true;
return false;
}
private boolean jj_3R_55() {
if (jj_scan_token(IN)) return true;
if (jj_scan_token(33)) return true;
if (jj_3R_38()) return true;
Token xsp;
while (true) {
xsp = jj_scanpos;
if (jj_3R_60()) { jj_scanpos = xsp; break; }
}
if (jj_scan_token(35)) return true;
return false;
}
private boolean jj_3R_45() {
if (jj_scan_token(27)) return true;
if (jj_3R_41()) return true;
return false;
}
private boolean jj_3R_42() {
Token xsp;
xsp = jj_scanpos;
if (jj_3R_45()) {
jj_scanpos = xsp;
if (jj_3R_46()) {
jj_scanpos = xsp;
if (jj_3_1()) {
jj_scanpos = xsp;
if (jj_3R_47()) return true;
}
}
}
return false;
}
private boolean jj_3R_31() {
if (jj_scan_token(TRUE)) return true;
return false;
}
private boolean jj_3_3() {
if (jj_scan_token(NOT)) return true;
if (jj_scan_token(BETWEEN)) return true;
if (jj_3R_43()) return true;
if (jj_scan_token(AND)) return true;
if (jj_3R_43()) return true;
return false;
}
private boolean jj_3R_18() {
if (jj_scan_token(40)) return true;
if (jj_3R_10()) return true;
return false;
}
private boolean jj_3R_30() {
if (jj_scan_token(FLOATING_POINT_LITERAL)) return true;
return false;
}
private boolean jj_3R_54() {
if (jj_scan_token(BETWEEN)) return true;
if (jj_3R_43()) return true;
if (jj_scan_token(AND)) return true;
if (jj_3R_43()) return true;
return false;
}
private boolean jj_3R_39() {
if (jj_3R_41()) return true;
Token xsp;
while (true) {
xsp = jj_scanpos;
if (jj_3R_42()) { jj_scanpos = xsp; break; }
}
return false;
}
private boolean jj_3R_17() {
if (jj_scan_token(39)) return true;
if (jj_3R_10()) return true;
return false;
}
private boolean jj_3R_29() {
if (jj_scan_token(OCTAL_LITERAL)) return true;
return false;
}
private boolean jj_3R_58() {
if (jj_scan_token(ESCAPE)) return true;
if (jj_3R_38()) return true;
return false;
}
private boolean jj_3_2() {
if (jj_scan_token(NOT)) return true;
if (jj_scan_token(LIKE)) return true;
if (jj_3R_38()) return true;
Token xsp;
xsp = jj_scanpos;
if (jj_3R_59()) jj_scanpos = xsp;
return false;
}
private boolean jj_3R_16() {
if (jj_scan_token(38)) return true;
if (jj_3R_10()) return true;
return false;
}
private boolean jj_3R_11() {
Token xsp;
xsp = jj_scanpos;
if (jj_3R_16()) {
jj_scanpos = xsp;
if (jj_3R_17()) {
jj_scanpos = xsp;
if (jj_3R_18()) return true;
}
}
return false;
}
private boolean jj_3R_40() {
if (jj_scan_token(AND)) return true;
if (jj_3R_39()) return true;
return false;
}
private boolean jj_3R_28() {
if (jj_scan_token(HEX_LITERAL)) return true;
return false;
}
private boolean jj_3R_9() {
if (jj_3R_10()) return true;
Token xsp;
while (true) {
xsp = jj_scanpos;
if (jj_3R_11()) { jj_scanpos = xsp; break; }
}
return false;
}
/** Generated Token Manager. */
public SelectorParserImplTokenManager token_source;
SimpleCharStream jj_input_stream;
/** Current token. */
public Token token;
/** Next token. */
public Token jj_nt;
private int jj_ntk;
private Token jj_scanpos, jj_lastpos;
private int jj_la;
/** Constructor with InputStream. */
public SelectorParserImpl(java.io.InputStream stream) {
this(stream, null);
}
/** Constructor with InputStream and supplied encoding */
public SelectorParserImpl(java.io.InputStream stream, String encoding) {
try { jj_input_stream = new SimpleCharStream(stream, encoding, 1, 1); } catch(java.io.UnsupportedEncodingException e) { throw new RuntimeException(e); }
token_source = new SelectorParserImplTokenManager(jj_input_stream);
token = new Token();
jj_ntk = -1;
}
/** Reinitialise. */
public void ReInit(java.io.InputStream stream) {
ReInit(stream, null);
}
/** Reinitialise. */
public void ReInit(java.io.InputStream stream, String encoding) {
try { jj_input_stream.ReInit(stream, encoding, 1, 1); } catch(java.io.UnsupportedEncodingException e) { throw new RuntimeException(e); }
token_source.ReInit(jj_input_stream);
token = new Token();
jj_ntk = -1;
}
/** Constructor. */
public SelectorParserImpl(java.io.Reader stream) {
jj_input_stream = new SimpleCharStream(stream, 1, 1);
token_source = new SelectorParserImplTokenManager(jj_input_stream);
token = new Token();
jj_ntk = -1;
}
/** Reinitialise. */
public void ReInit(java.io.Reader stream) {
jj_input_stream.ReInit(stream, 1, 1);
token_source.ReInit(jj_input_stream);
token = new Token();
jj_ntk = -1;
}
/** Constructor with generated Token Manager. */
public SelectorParserImpl(SelectorParserImplTokenManager tm) {
token_source = tm;
token = new Token();
jj_ntk = -1;
}
/** Reinitialise. */
public void ReInit(SelectorParserImplTokenManager tm) {
token_source = tm;
token = new Token();
jj_ntk = -1;
}
private Token jj_consume_token(int kind) throws ParseException {
Token oldToken;
if ((oldToken = token).next != null) token = token.next;
else token = token.next = token_source.getNextToken();
jj_ntk = -1;
if (token.kind == kind) {
return token;
}
token = oldToken;
throw generateParseException();
}
static private final class LookaheadSuccess extends java.lang.Error { }
final private LookaheadSuccess jj_ls = new LookaheadSuccess();
private boolean jj_scan_token(int kind) {
if (jj_scanpos == jj_lastpos) {
jj_la--;
if (jj_scanpos.next == null) {
jj_lastpos = jj_scanpos = jj_scanpos.next = token_source.getNextToken();
} else {
jj_lastpos = jj_scanpos = jj_scanpos.next;
}
} else {
jj_scanpos = jj_scanpos.next;
}
if (jj_scanpos.kind != kind) return true;
if (jj_la == 0 && jj_scanpos == jj_lastpos) throw jj_ls;
return false;
}
/** Get the next Token. */
final public Token getNextToken() {
if (token.next != null) token = token.next;
else token = token.next = token_source.getNextToken();
jj_ntk = -1;
return token;
}
/** Get the specific Token. */
final public Token getToken(int index) {
Token t = token;
for (int i = 0; i < index; i++) {
if (t.next != null) t = t.next;
else t = t.next = token_source.getNextToken();
}
return t;
}
private int jj_ntk() {
if ((jj_nt=token.next) == null)
return (jj_ntk = (token.next=token_source.getNextToken()).kind);
else
return (jj_ntk = jj_nt.kind);
}
/** Generate ParseException. */
public ParseException generateParseException() {
Token errortok = token.next;
int line = errortok.beginLine, column = errortok.beginColumn;
String mess = (errortok.kind == 0) ? tokenImage[0] : errortok.image;
return new ParseException("Parse error at line " + line + ", column " + column + ". Encountered: " + mess);
}
/** Enable tracing. */
final public void enable_tracing() {
}
/** Disable tracing. */
final public void disable_tracing() {
}
}
| avranju/qpid-jms | qpid-jms-client/src/main/java/org/apache/qpid/jms/selector/parser/SelectorParserImpl.java | Java | apache-2.0 | 30,987 |
package com.mentor.nucleus.bp.model.compare.contentmergeviewer;
//=====================================================================
//
//File: $RCSfile: ModelMergeViewer.java,v $
//Version: $Revision: 1.2 $
//Modified: $Date: 2013/01/17 03:35:34 $
//
//(c) Copyright 2013-2014 by Mentor Graphics Corp. All rights reserved.
//
//=====================================================================
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy
// of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
//=====================================================================
import org.eclipse.jface.viewers.Viewer;
import org.eclipse.swt.widgets.Composite;
import org.eclipse.swt.widgets.Control;
import com.mentor.nucleus.bp.core.Ooaofooa;
public abstract class ModelMergeViewer extends Viewer {
public static int LEFT = 0;
public static int RIGHT = 1;
public static int ANCESTOR = 2;
private Object key;
private int type;
private Ooaofooa compareRoot;
public abstract Control createControl(Composite parent);
public Object getKey() {
return key;
}
public int getType() {
return type;
}
public Ooaofooa getCompareRoot() {
return compareRoot;
}
public void setKey(Object key) {
this.key = key;
}
public void setType(int type) {
this.type = type;
}
public void setCompareRoot(Ooaofooa compareRoot) {
this.compareRoot = compareRoot;
}
public abstract String getTitle();
}
| HebaKhaled/bposs | src/com.mentor.nucleus.bp.model.compare/src/com/mentor/nucleus/bp/model/compare/contentmergeviewer/ModelMergeViewer.java | Java | apache-2.0 | 1,964 |
/**
* Created by Daniel Eaton on 12/11/2016.
*/
import
{Component} from "@angular/core";
import {Router} from "@angular/router";
@Component({
templateUrl: "./templates/settings.php"
})
export class SettingsComponent { }
| DannyEaton/growify | app/components/settings-component.ts | TypeScript | apache-2.0 | 225 |
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Generated code. DO NOT EDIT!
namespace Google.Cloud.Spanner.Admin.Database.V1.Snippets
{
// [START spanner_v1_generated_DatabaseAdmin_ListDatabases_sync]
using Google.Api.Gax;
using Google.Cloud.Spanner.Admin.Database.V1;
using Google.Cloud.Spanner.Common.V1;
using System;
public sealed partial class GeneratedDatabaseAdminClientSnippets
{
/// <summary>Snippet for ListDatabases</summary>
/// <remarks>
/// This snippet has been automatically generated for illustrative purposes only.
/// It may require modifications to work in your environment.
/// </remarks>
public void ListDatabasesRequestObject()
{
// Create client
DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.Create();
// Initialize request argument(s)
ListDatabasesRequest request = new ListDatabasesRequest
{
ParentAsInstanceName = InstanceName.FromProjectInstance("[PROJECT]", "[INSTANCE]"),
};
// Make the request
PagedEnumerable<ListDatabasesResponse, Database> response = databaseAdminClient.ListDatabases(request);
// Iterate over all response items, lazily performing RPCs as required
foreach (Database item in response)
{
// Do something with each item
Console.WriteLine(item);
}
// Or iterate over pages (of server-defined size), performing one RPC per page
foreach (ListDatabasesResponse page in response.AsRawResponses())
{
// Do something with each page of items
Console.WriteLine("A page of results:");
foreach (Database item in page)
{
// Do something with each item
Console.WriteLine(item);
}
}
// Or retrieve a single page of known size (unless it's the final page), performing as many RPCs as required
int pageSize = 10;
Page<Database> singlePage = response.ReadPage(pageSize);
// Do something with the page of items
Console.WriteLine($"A page of {pageSize} results (unless it's the final page):");
foreach (Database item in singlePage)
{
// Do something with each item
Console.WriteLine(item);
}
// Store the pageToken, for when the next page is required.
string nextPageToken = singlePage.NextPageToken;
}
}
// [END spanner_v1_generated_DatabaseAdmin_ListDatabases_sync]
}
| googleapis/google-cloud-dotnet | apis/Google.Cloud.Spanner.Admin.Database.V1/Google.Cloud.Spanner.Admin.Database.V1.GeneratedSnippets/DatabaseAdminClient.ListDatabasesRequestObjectSnippet.g.cs | C# | apache-2.0 | 3,278 |
/**
* <copyright>
* </copyright>
*
*
*/
package eu.hyvar.mspl.manifest.resource.hymanifest.ui;
/**
* The BracketHandler is responsible for handling the input of brackets. It
* automatically adds closing brackets, if the opening counterpart is entered in
* editors. It does also ignore the input of closing brackets, if these were
* automatically inserted right before.
*/
public interface IHymanifestBracketHandler {
/**
* If a closing bracket was added right before, this method returns true.
*/
public boolean addedClosingBracket();
/**
* Returns the last closing bracket that was added automatically.
*/
public String getClosingBracket();
}
| HyVar/DarwinSPL | plugins/eu.hyvar.mspl.manifest.resource.hymanifest.ui/src-gen/eu/hyvar/mspl/manifest/resource/hymanifest/ui/IHymanifestBracketHandler.java | Java | apache-2.0 | 675 |
package org.apache.maven.plugins.site;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.commons.lang.StringUtils;
import org.apache.maven.plugin.MojoExecutionException;
import org.apache.maven.plugins.annotations.Parameter;
/**
* Abstract base class for staging mojos.
*
* @author hboutemy
* @since 3.3
*/
public abstract class AbstractStagingMojo
extends AbstractDeployMojo
{
/**
* Top distribution management site url, for manual configuration when auto-calculated value
* doesn't match expectations. Relative module directory will be calculated from this url.
*/
@Parameter( property = "topSiteURL" )
protected String topSiteURL;
/**
* The String "staging/".
*/
protected static final String DEFAULT_STAGING_DIRECTORY = "staging/";
/**
* By default, staging mojos will get their top distribution management site url by getting top parent
* with the same site, which is a good heuristics. But in case the default value doesn't match
* expectations, <code>topSiteURL</code> can be configured: it will be used instead.
*/
@Override
protected String determineTopDistributionManagementSiteUrl()
throws MojoExecutionException
{
return ( StringUtils.isEmpty( topSiteURL ) ) ? getSite( getTopLevelProject( project ) ).getUrl() : topSiteURL;
}
}
| khmarbaise/maven-plugins | maven-site-plugin/src/main/java/org/apache/maven/plugins/site/AbstractStagingMojo.java | Java | apache-2.0 | 2,132 |
'''
Copyright 2015 University of Auckland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
class RespirationConstants(object):
class ExpirationType(object):
PASSIVE = 0
ACTIVE = 1
| OpenCMISS/neon | src/opencmiss/neon/core/problems/constants.py | Python | apache-2.0 | 714 |
import { IServerGroup } from '@spinnaker/core';
import { ICloudFoundrySpace, ICloudFoundryDroplet } from 'cloudfoundry/domain';
import { ICloudFoundryInstance } from 'cloudfoundry/domain/ICloudFoundryInstance';
export interface ICloudFoundryServerGroup extends IServerGroup {
appsManagerUri?: string;
diskQuota: number;
healthCheckType: string;
healthCheckHttpEndpoint: string;
state: 'STARTED' | 'STOPPED';
instances: ICloudFoundryInstance[];
metricsUri?: string;
memory: number;
space: ICloudFoundrySpace;
droplet?: ICloudFoundryDroplet;
serviceInstances: ICloudFoundryServiceInstance[];
env: ICloudFoundryEnvVar[];
ciBuild: ICloudFoundryBuildInfo;
appArtifact: ICloudFoundryArtifactInfo;
pipelineId: string;
}
export interface ICloudFoundryServiceInstance {
name: string;
plan: string;
service: string;
tags?: string[];
}
export interface ICloudFoundryEnvVar {
key: string;
value: string;
}
export interface ICloudFoundryBuildInfo {
jobName: string;
jobNumber: string;
jobUrl: string;
}
export interface ICloudFoundryArtifactInfo {
name: string;
version: string;
url: string;
}
| sgarlick987/deck | app/scripts/modules/cloudfoundry/src/domain/ICloudFoundryServerGroup.ts | TypeScript | apache-2.0 | 1,141 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.tamaya;
import static org.assertj.core.api.Assertions.fail;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class InvocationRecorder {
private List<Invocation> invocations = new ArrayList<>();
private Object record(Object instance, Method method, Object[] args) throws Throwable {
Invocation invocation = new Invocation(method.getName(), args);
this.invocations.add(invocation);
return method.invoke(instance, args);
}
public <T> T createProxy(Object instance, Class<T>... types) {
return (T) Proxy.newProxyInstance(
getClass().getClassLoader(), types,
(proxy,method,params) -> this.record(instance, method, params));
}
public void recordMethodCall(Object... params) {
Exception e = new Exception();
String methodName = e.getStackTrace()[1].getMethodName();
invocations.add(new Invocation(methodName, params));
}
public static final class Invocation{
public String methodName;
public Object[] params;
public Invocation(String methodName, Object[] params) {
this.methodName = methodName;
this.params = params;
}
}
public List<Invocation> getInvocations(){
return invocations;
}
public void assertInvocation(String method, Object... params){
for(Invocation invocation:invocations){
if(invocation.methodName.equals(method)){
if(Arrays.equals(invocation.params, params)){
return;
}
}
}
fail("No such invocation: "+method + Arrays.toString(params));
}
}
| apache/incubator-tamaya | code/api/src/test/java/org/apache/tamaya/InvocationRecorder.java | Java | apache-2.0 | 2,591 |
// Package pb provides underlying implementation for qy and mp
package pb
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
)
// AccessTokenResponse stores the normal result of access token fetching.
type AccessTokenResponse struct {
AccessToken string `json:"access_token"`
ExpiresIn float64 `json:"expires_in"`
}
// AccessTokenErrorResponse stores the error result of access token fetching.
type AccessTokenErrorResponse struct {
Errcode string
Errmsg string
}
// FetchAccessToken provides underlying access token fetching implementation.
func FetchAccessToken(requestLine string) (string, float64, error) {
resp, err := http.Get(requestLine)
if err != nil || resp.StatusCode != http.StatusOK {
return "", 0.0, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", 0.0, err
}
//Json Decoding
if bytes.Contains(body, []byte("access_token")) {
atr := AccessTokenResponse{}
err = json.Unmarshal(body, &atr)
if err != nil {
return "", 0.0, err
}
return atr.AccessToken, atr.ExpiresIn, nil
}
ater := AccessTokenErrorResponse{}
err = json.Unmarshal(body, &ater)
if err != nil {
return "", 0.0, err
}
return "", 0.0, fmt.Errorf("%s", ater.Errmsg)
}
| wmydz1/gowechat | pb/accesstoken.go | GO | apache-2.0 | 1,252 |
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.screens.projecteditor.backend.server;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.guvnor.common.services.backend.metadata.MetadataServerSideService;
import org.guvnor.common.services.backend.util.CommentedOptionFactory;
import org.guvnor.common.services.project.backend.server.utils.POMContentHandler;
import org.guvnor.common.services.project.model.GAV;
import org.guvnor.common.services.project.model.MavenRepositoryMetadata;
import org.guvnor.common.services.project.model.MavenRepositorySource;
import org.guvnor.common.services.project.model.POM;
import org.guvnor.common.services.project.model.ProjectRepositories;
import org.guvnor.common.services.project.service.DeploymentMode;
import org.guvnor.common.services.project.service.GAVAlreadyExistsException;
import org.guvnor.common.services.project.service.ProjectRepositoriesService;
import org.guvnor.common.services.project.service.ProjectRepositoryResolver;
import org.guvnor.common.services.shared.metadata.model.Metadata;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.kie.workbench.common.screens.defaulteditor.service.DefaultEditorContent;
import org.kie.workbench.common.screens.defaulteditor.service.DefaultEditorService;
import org.kie.workbench.common.screens.projecteditor.service.PomEditorService;
import org.kie.workbench.common.services.shared.project.KieProject;
import org.kie.workbench.common.services.shared.project.KieProjectService;
import org.mockito.ArgumentCaptor;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
import org.uberfire.backend.vfs.Path;
import org.uberfire.io.IOService;
import org.uberfire.java.nio.base.options.CommentedOption;
import org.uberfire.java.nio.file.FileSystem;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
@RunWith(MockitoJUnitRunner.class)
public class PomEditorServiceImplTest {
@Mock
private IOService ioService;
@Mock
private DefaultEditorService defaultEditorService;
@Mock
private MetadataServerSideService metadataService;
@Mock
private CommentedOptionFactory commentedOptionFactory;
@Mock
private KieProjectService projectService;
@Mock
private ProjectRepositoryResolver repositoryResolver;
@Mock
private ProjectRepositoriesService projectRepositoriesService;
@Mock
private Path pomPath;
@Mock
private Metadata metaData;
@Mock
private KieProject project;
@Mock
private POM pom;
@Mock
private Path projectRepositoriesPath;
private PomEditorService service;
private String pomPathUri = "default://p0/pom.xml";
private Map<String, Object> attributes = new HashMap<String, Object>();
private DefaultEditorContent content = new DefaultEditorContent();
private POMContentHandler pomContentHandler = new POMContentHandler();
private String pomXml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" +
"<project xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\" xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n" +
"<modelVersion>4.0.0</modelVersion>\n" +
"<groupId>groupId</groupId>\n" +
"<artifactId>artifactId</artifactId>\n" +
"<version>0.0.1</version>\n" +
"<name>name</name>\n" +
"<description>description</description>\n" +
"</project>";
private String comment = "comment";
@BeforeClass
public static void setupSystemProperties() {
//These are not needed for the tests
System.setProperty( "org.uberfire.nio.git.daemon.enabled",
"false" );
System.setProperty( "org.uberfire.nio.git.ssh.enabled",
"false" );
System.setProperty( "org.uberfire.sys.repo.monitor.disabled",
"true" );
}
@Before
public void setup() {
service = new PomEditorServiceImpl( ioService,
defaultEditorService,
metadataService,
commentedOptionFactory,
projectService,
pomContentHandler,
repositoryResolver,
projectRepositoriesService );
when( pomPath.toURI() ).thenReturn( pomPathUri );
when( defaultEditorService.loadContent( pomPath ) ).thenReturn( content );
when( metadataService.setUpAttributes( eq( pomPath ),
any( Metadata.class ) ) ).thenReturn( attributes );
when( projectService.resolveProject( pomPath ) ).thenReturn( project );
when( project.getRepositoriesPath() ).thenReturn( projectRepositoriesPath );
when( project.getPom() ).thenReturn( pom );
}
@Test
public void testLoad() {
final DefaultEditorContent content = service.loadContent( pomPath );
assertNotNull( content );
assertEquals( this.content,
content );
}
@Test
public void testSaveNonClashingGAVChangeToGAV() {
final Set<ProjectRepositories.ProjectRepository> projectRepositoriesMetadata = new HashSet<ProjectRepositories.ProjectRepository>();
final ProjectRepositories projectRepositories = new ProjectRepositories( projectRepositoriesMetadata );
when( projectRepositoriesService.load( projectRepositoriesPath ) ).thenReturn( projectRepositories );
final ArgumentCaptor<MavenRepositoryMetadata> resolvedRepositoriesCaptor = ArgumentCaptor.forClass( MavenRepositoryMetadata.class );
when( repositoryResolver.getRepositoriesResolvingArtifact( eq( pomXml ),
resolvedRepositoriesCaptor.capture() ) ).thenReturn( Collections.EMPTY_SET );
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.2" ) );
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.VALIDATED );
verify( projectService,
times( 1 ) ).resolveProject( pomPath );
verify( projectRepositoriesService,
times( 1 ) ).load( projectRepositoriesPath );
verify( repositoryResolver,
times( 1 ) ).getRepositoriesResolvingArtifact( eq( pomXml ) );
final List<MavenRepositoryMetadata> resolvedRepositories = resolvedRepositoriesCaptor.getAllValues();
assertNotNull( resolvedRepositories );
assertEquals( 0,
resolvedRepositories.size() );
verify( ioService,
times( 1 ) ).startBatch( any( FileSystem.class ) );
verify( ioService,
times( 1 ) ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
times( 1 ) ).endBatch();
}
@Test
public void testSaveNonClashingGAVNoChangeToGAV() {
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.1" ) );
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.VALIDATED );
verify( projectService,
times( 1 ) ).resolveProject( pomPath );
verify( projectRepositoriesService,
never() ).load( projectRepositoriesPath );
verify( repositoryResolver,
never() ).getRepositoriesResolvingArtifact( eq( pomXml ) );
verify( ioService,
times( 1 ) ).startBatch( any( FileSystem.class ) );
verify( ioService,
times( 1 ) ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
times( 1 ) ).endBatch();
}
@Test
public void testSaveNonClashingGAVFilteredChangeToGAV() {
final Set<ProjectRepositories.ProjectRepository> projectRepositoriesMetadata = new HashSet<ProjectRepositories.ProjectRepository>() {{
add( new ProjectRepositories.ProjectRepository( true,
new MavenRepositoryMetadata( "local-id",
"local-url",
MavenRepositorySource.LOCAL ) ) );
}};
final ProjectRepositories projectRepositories = new ProjectRepositories( projectRepositoriesMetadata );
when( projectRepositoriesService.load( projectRepositoriesPath ) ).thenReturn( projectRepositories );
final ArgumentCaptor<MavenRepositoryMetadata> resolvedRepositoriesCaptor = ArgumentCaptor.forClass( MavenRepositoryMetadata.class );
when( repositoryResolver.getRepositoriesResolvingArtifact( eq( pomXml ),
resolvedRepositoriesCaptor.capture() ) ).thenReturn( Collections.EMPTY_SET );
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.2" ) );
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.VALIDATED );
verify( projectService,
times( 1 ) ).resolveProject( pomPath );
verify( projectRepositoriesService,
times( 1 ) ).load( projectRepositoriesPath );
verify( repositoryResolver,
times( 1 ) ).getRepositoriesResolvingArtifact( eq( pomXml ),
any( MavenRepositoryMetadata.class ) );
final List<MavenRepositoryMetadata> resolvedRepositories = resolvedRepositoriesCaptor.getAllValues();
assertNotNull( resolvedRepositories );
assertEquals( 1,
resolvedRepositories.size() );
final MavenRepositoryMetadata repositoryMetadata = resolvedRepositories.get( 0 );
assertEquals( "local-id",
repositoryMetadata.getId() );
assertEquals( "local-url",
repositoryMetadata.getUrl() );
assertEquals( MavenRepositorySource.LOCAL,
repositoryMetadata.getSource() );
verify( ioService,
times( 1 ) ).startBatch( any( FileSystem.class ) );
verify( ioService,
times( 1 ) ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
times( 1 ) ).endBatch();
}
@Test
public void testSaveNonClashingGAVFilteredNoChangeToGAV() {
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.1" ) );
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.VALIDATED );
verify( projectService,
times( 1 ) ).resolveProject( pomPath );
verify( projectRepositoriesService,
never() ).load( projectRepositoriesPath );
verify( repositoryResolver,
never() ).getRepositoriesResolvingArtifact( eq( pomXml ),
any( MavenRepositoryMetadata.class ) );
verify( ioService,
times( 1 ) ).startBatch( any( FileSystem.class ) );
verify( ioService,
times( 1 ) ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
times( 1 ) ).endBatch();
}
@Test
public void testSaveClashingGAVChangeToGAV() {
final Set<ProjectRepositories.ProjectRepository> projectRepositoriesMetadata = new HashSet<ProjectRepositories.ProjectRepository>() {{
add( new ProjectRepositories.ProjectRepository( true,
new MavenRepositoryMetadata( "local-id",
"local-url",
MavenRepositorySource.LOCAL ) ) );
}};
final ProjectRepositories projectRepositories = new ProjectRepositories( projectRepositoriesMetadata );
when( projectRepositoriesService.load( projectRepositoriesPath ) ).thenReturn( projectRepositories );
final Set<MavenRepositoryMetadata> clashingRepositories = new HashSet<MavenRepositoryMetadata>() {{
add( new MavenRepositoryMetadata( "local-id",
"local-url",
MavenRepositorySource.LOCAL ) );
}};
final ArgumentCaptor<MavenRepositoryMetadata> resolvedRepositoriesCaptor = ArgumentCaptor.forClass( MavenRepositoryMetadata.class );
when( repositoryResolver.getRepositoriesResolvingArtifact( eq( pomXml ),
resolvedRepositoriesCaptor.capture() ) ).thenReturn( clashingRepositories );
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.2" ) );
try {
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.VALIDATED );
} catch ( GAVAlreadyExistsException e ) {
// This is expected! We catch here rather than let JUnit handle it with
// @Test(expected = GAVAlreadyExistsException.class) so we can verify
// that only the expected methods have been invoked.
} catch ( Exception e ) {
fail( e.getMessage() );
}
verify( projectService,
times( 1 ) ).resolveProject( pomPath );
verify( projectRepositoriesService,
times( 1 ) ).load( projectRepositoriesPath );
verify( repositoryResolver,
times( 1 ) ).getRepositoriesResolvingArtifact( eq( pomXml ),
any( MavenRepositoryMetadata.class ) );
final List<MavenRepositoryMetadata> resolvedRepositories = resolvedRepositoriesCaptor.getAllValues();
assertNotNull( resolvedRepositories );
assertEquals( 1,
resolvedRepositories.size() );
final MavenRepositoryMetadata repositoryMetadata = resolvedRepositories.get( 0 );
assertEquals( "local-id",
repositoryMetadata.getId() );
assertEquals( "local-url",
repositoryMetadata.getUrl() );
assertEquals( MavenRepositorySource.LOCAL,
repositoryMetadata.getSource() );
verify( ioService,
never() ).startBatch( any( FileSystem.class ) );
verify( ioService,
never() ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
never() ).endBatch();
}
@Test
public void testSaveClashingGAVNoChangeToGAV() {
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.1" ) );
try {
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.VALIDATED );
} catch ( GAVAlreadyExistsException e ) {
// This is should not be thrown if the GAV has not changed.
fail( e.getMessage() );
}
verify( projectService,
times( 1 ) ).resolveProject( pomPath );
verify( projectRepositoriesService,
never() ).load( projectRepositoriesPath );
verify( repositoryResolver,
never() ).getRepositoriesResolvingArtifact( eq( pomXml ),
any( MavenRepositoryMetadata.class ) );
verify( ioService,
times( 1 ) ).startBatch( any( FileSystem.class ) );
verify( ioService,
times( 1 ) ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
times( 1 ) ).endBatch();
}
@Test
public void testSaveClashingGAVForced() {
final Set<ProjectRepositories.ProjectRepository> projectRepositoriesMetadata = new HashSet<ProjectRepositories.ProjectRepository>() {{
add( new ProjectRepositories.ProjectRepository( true,
new MavenRepositoryMetadata( "local-id",
"local-url",
MavenRepositorySource.LOCAL ) ) );
}};
final ProjectRepositories projectRepositories = new ProjectRepositories( projectRepositoriesMetadata );
when( projectRepositoriesService.load( projectRepositoriesPath ) ).thenReturn( projectRepositories );
final Set<MavenRepositoryMetadata> clashingRepositories = new HashSet<MavenRepositoryMetadata>() {{
add( new MavenRepositoryMetadata( "local-id",
"local-url",
MavenRepositorySource.LOCAL ) );
}};
when( repositoryResolver.getRepositoriesResolvingArtifact( eq( pomXml ),
any( MavenRepositoryMetadata.class ) ) ).thenReturn( clashingRepositories );
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.1" ) );
try {
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.FORCED );
} catch ( GAVAlreadyExistsException e ) {
fail( e.getMessage() );
}
verify( projectService,
never() ).resolveProject( pomPath );
verify( projectRepositoriesService,
never() ).load( pomPath );
verify( repositoryResolver,
never() ).getRepositoriesResolvingArtifact( eq( pomXml ),
any( MavenRepositoryMetadata.class ) );
verify( ioService,
times( 1 ) ).startBatch( any( FileSystem.class ) );
verify( ioService,
times( 1 ) ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
times( 1 ) ).endBatch();
}
}
| dgutierr/kie-wb-common | kie-wb-common-screens/kie-wb-common-project-editor/kie-wb-common-project-editor-backend/src/test/java/org/kie/workbench/common/screens/projecteditor/backend/server/PomEditorServiceImplTest.java | Java | apache-2.0 | 21,647 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.replication.regionserver;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.RetriesExhaustedException;
import org.apache.hadoop.hbase.client.RetryingCallable;
import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
import org.apache.hadoop.hbase.wal.WAL.Entry;
import org.apache.hadoop.hbase.wal.WALSplitter.EntryBuffers;
import org.apache.hadoop.hbase.wal.WALSplitter.OutputSink;
import org.apache.hadoop.hbase.wal.WALSplitter.PipelineController;
import org.apache.hadoop.hbase.wal.WALSplitter.RegionEntryBuffer;
import org.apache.hadoop.hbase.wal.WALSplitter.SinkWriter;
import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
import org.apache.hadoop.hbase.replication.WALEntryFilter;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.util.StringUtils;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.protobuf.ServiceException;
/**
* A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} endpoint
* which receives the WAL edits from the WAL, and sends the edits to replicas
* of regions.
*/
@InterfaceAudience.Private
public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint {
private static final Log LOG = LogFactory.getLog(RegionReplicaReplicationEndpoint.class);
private Configuration conf;
private ClusterConnection connection;
// Reuse WALSplitter constructs as a WAL pipe
private PipelineController controller;
private RegionReplicaOutputSink outputSink;
private EntryBuffers entryBuffers;
// Number of writer threads
private int numWriterThreads;
private int operationTimeout;
private ExecutorService pool;
@Override
public void init(Context context) throws IOException {
super.init(context);
this.conf = HBaseConfiguration.create(context.getConfiguration());
String codecClassName = conf
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
this.numWriterThreads = this.conf.getInt(
"hbase.region.replica.replication.writer.threads", 3);
controller = new PipelineController();
entryBuffers = new EntryBuffers(controller,
this.conf.getInt("hbase.region.replica.replication.buffersize",
128*1024*1024));
// use the regular RPC timeout for replica replication RPC's
this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
}
@Override
protected void doStart() {
try {
connection = (ClusterConnection) HConnectionManager.createConnection(ctx.getConfiguration());
this.pool = getDefaultThreadPool(conf);
outputSink = new RegionReplicaOutputSink(controller, entryBuffers, connection, pool,
numWriterThreads, operationTimeout);
outputSink.startWriterThreads();
super.doStart();
} catch (IOException ex) {
LOG.warn("Received exception while creating connection :" + ex);
notifyFailed(ex);
}
}
@Override
protected void doStop() {
if (outputSink != null) {
try {
outputSink.finishWritingAndClose();
} catch (IOException ex) {
LOG.warn("Got exception while trying to close OutputSink");
LOG.warn(ex);
}
}
if (this.pool != null) {
this.pool.shutdownNow();
try {
// wait for 10 sec
boolean shutdown = this.pool.awaitTermination(10000, TimeUnit.MILLISECONDS);
if (!shutdown) {
LOG.warn("Failed to shutdown the thread pool after 10 seconds");
}
} catch (InterruptedException e) {
LOG.warn("Got interrupted while waiting for the thread pool to shut down" + e);
}
}
if (connection != null) {
try {
connection.close();
} catch (IOException ex) {
LOG.warn("Got exception closing connection :" + ex);
}
}
super.doStop();
}
/**
* Returns a Thread pool for the RPC's to region replicas. Similar to
* Connection's thread pool.
*/
private ExecutorService getDefaultThreadPool(Configuration conf) {
int maxThreads = conf.getInt("hbase.region.replica.replication.threads.max", 256);
int coreThreads = conf.getInt("hbase.region.replica.replication.threads.core", 16);
if (maxThreads == 0) {
maxThreads = Runtime.getRuntime().availableProcessors() * 8;
}
if (coreThreads == 0) {
coreThreads = Runtime.getRuntime().availableProcessors() * 8;
}
long keepAliveTime = conf.getLong("hbase.region.replica.replication.threads.keepalivetime", 60);
LinkedBlockingQueue<Runnable> workQueue =
new LinkedBlockingQueue<Runnable>(maxThreads *
conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
ThreadPoolExecutor tpe = new ThreadPoolExecutor(
coreThreads,
maxThreads,
keepAliveTime,
TimeUnit.SECONDS,
workQueue,
Threads.newDaemonThreadFactory(this.getClass().toString() + "-rpc-shared-"));
tpe.allowCoreThreadTimeOut(true);
return tpe;
}
@Override
public boolean replicate(ReplicateContext replicateContext) {
/* A note on batching in RegionReplicaReplicationEndpoint (RRRE):
*
* RRRE relies on batching from two different mechanisms. The first is the batching from
* ReplicationSource since RRRE is a ReplicationEndpoint driven by RS. RS reads from a single
* WAL file filling up a buffer of heap size "replication.source.size.capacity"(64MB) or at most
* "replication.source.nb.capacity" entries or until it sees the end of file (in live tailing).
* Then RS passes all the buffered edits in this replicate() call context. RRRE puts the edits
* to the WALSplitter.EntryBuffers which is a blocking buffer space of up to
* "hbase.region.replica.replication.buffersize" (128MB) in size. This buffer splits the edits
* based on regions.
*
* There are "hbase.region.replica.replication.writer.threads"(default 3) writer threads which
* pick largest per-region buffer and send it to the SinkWriter (see RegionReplicaOutputSink).
* The SinkWriter in this case will send the wal edits to all secondary region replicas in
* parallel via a retrying rpc call. EntryBuffers guarantees that while a buffer is
* being written to the sink, another buffer for the same region will not be made available to
* writers ensuring regions edits are not replayed out of order.
*
* The replicate() call won't return until all the buffers are sent and ack'd by the sinks so
* that the replication can assume all edits are persisted. We may be able to do a better
* pipelining between the replication thread and output sinks later if it becomes a bottleneck.
*/
while (this.isRunning()) {
try {
for (Entry entry: replicateContext.getEntries()) {
entryBuffers.appendEntry(entry);
}
outputSink.flush(); // make sure everything is flushed
return true;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return false;
} catch (IOException e) {
LOG.warn("Received IOException while trying to replicate"
+ StringUtils.stringifyException(e));
}
}
return false;
}
@Override
public boolean canReplicateToSameCluster() {
return true;
}
@Override
protected WALEntryFilter getScopeWALEntryFilter() {
// we do not care about scope. We replicate everything.
return null;
}
static class RegionReplicaOutputSink extends OutputSink {
private RegionReplicaSinkWriter sinkWriter;
public RegionReplicaOutputSink(PipelineController controller, EntryBuffers entryBuffers,
ClusterConnection connection, ExecutorService pool, int numWriters, int operationTimeout) {
super(controller, entryBuffers, numWriters);
this.sinkWriter = new RegionReplicaSinkWriter(this, connection, pool, operationTimeout);
}
@Override
public void append(RegionEntryBuffer buffer) throws IOException {
List<Entry> entries = buffer.getEntryBuffer();
if (entries.isEmpty() || entries.get(0).getEdit().getCells().isEmpty()) {
return;
}
sinkWriter.append(buffer.getTableName(), buffer.getEncodedRegionName(),
entries.get(0).getEdit().getCells().get(0).getRow(), entries);
}
@Override
public boolean flush() throws IOException {
// nothing much to do for now. Wait for the Writer threads to finish up
// append()'ing the data.
entryBuffers.waitUntilDrained();
return super.flush();
}
@Override
public List<Path> finishWritingAndClose() throws IOException {
finishWriting();
return null;
}
@Override
public Map<byte[], Long> getOutputCounts() {
return null; // only used in tests
}
@Override
public int getNumberOfRecoveredRegions() {
return 0;
}
AtomicLong getSkippedEditsCounter() {
return skippedEdits;
}
}
static class RegionReplicaSinkWriter extends SinkWriter {
RegionReplicaOutputSink sink;
ClusterConnection connection;
RpcControllerFactory rpcControllerFactory;
RpcRetryingCallerFactory rpcRetryingCallerFactory;
int operationTimeout;
ExecutorService pool;
Cache<TableName, Boolean> disabledAndDroppedTables;
public RegionReplicaSinkWriter(RegionReplicaOutputSink sink, ClusterConnection connection,
ExecutorService pool, int operationTimeout) {
this.sink = sink;
this.connection = connection;
this.operationTimeout = operationTimeout;
this.rpcRetryingCallerFactory
= RpcRetryingCallerFactory.instantiate(connection.getConfiguration());
this.rpcControllerFactory = RpcControllerFactory.instantiate(connection.getConfiguration());
this.pool = pool;
int nonExistentTableCacheExpiryMs = connection.getConfiguration()
.getInt("hbase.region.replica.replication.cache.disabledAndDroppedTables.expiryMs", 5000);
// A cache for non existing tables that have a default expiry of 5 sec. This means that if the
// table is created again with the same name, we might miss to replicate for that amount of
// time. But this cache prevents overloading meta requests for every edit from a deleted file.
disabledAndDroppedTables = CacheBuilder.newBuilder()
.expireAfterWrite(nonExistentTableCacheExpiryMs, TimeUnit.MILLISECONDS)
.initialCapacity(10)
.maximumSize(1000)
.build();
}
public void append(TableName tableName, byte[] encodedRegionName, byte[] row,
List<Entry> entries) throws IOException {
if (disabledAndDroppedTables.getIfPresent(tableName) != null) {
sink.getSkippedEditsCounter().incrementAndGet();
return;
}
// get the replicas of the primary region
RegionLocations locations = null;
try {
locations = getRegionLocations(connection, tableName, row, true, 0);
if (locations == null) {
throw new HBaseIOException("Cannot locate locations for "
+ tableName + ", row:" + Bytes.toStringBinary(row));
}
} catch (TableNotFoundException e) {
disabledAndDroppedTables.put(tableName, Boolean.TRUE); // put to cache. Value ignored
// skip this entry
sink.getSkippedEditsCounter().addAndGet(entries.size());
return;
}
if (locations.size() == 1) {
return;
}
ArrayList<Future<ReplicateWALEntryResponse>> tasks
= new ArrayList<Future<ReplicateWALEntryResponse>>(2);
// check whether we should still replay this entry. If the regions are changed, or the
// entry is not coming form the primary region, filter it out.
HRegionLocation primaryLocation = locations.getDefaultRegionLocation();
if (!Bytes.equals(primaryLocation.getRegionInfo().getEncodedNameAsBytes(),
encodedRegionName)) {
sink.getSkippedEditsCounter().addAndGet(entries.size());
return;
}
// All passed entries should belong to one region because it is coming from the EntryBuffers
// split per region. But the regions might split and merge (unlike log recovery case).
for (int replicaId = 0; replicaId < locations.size(); replicaId++) {
HRegionLocation location = locations.getRegionLocation(replicaId);
if (!RegionReplicaUtil.isDefaultReplica(replicaId)) {
HRegionInfo regionInfo = location == null
? RegionReplicaUtil.getRegionInfoForReplica(
locations.getDefaultRegionLocation().getRegionInfo(), replicaId)
: location.getRegionInfo();
RegionReplicaReplayCallable callable = new RegionReplicaReplayCallable(connection,
rpcControllerFactory, tableName, location, regionInfo, row, entries,
sink.getSkippedEditsCounter());
Future<ReplicateWALEntryResponse> task = pool.submit(
new RetryingRpcCallable<ReplicateWALEntryResponse>(rpcRetryingCallerFactory,
callable, operationTimeout));
tasks.add(task);
}
}
boolean tasksCancelled = false;
for (Future<ReplicateWALEntryResponse> task : tasks) {
try {
task.get();
} catch (InterruptedException e) {
throw new InterruptedIOException(e.getMessage());
} catch (ExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof IOException) {
// The table can be disabled or dropped at this time. For disabled tables, we have no
// cheap mechanism to detect this case because meta does not contain this information.
// HConnection.isTableDisabled() is a zk call which we cannot do for every replay RPC.
// So instead we start the replay RPC with retries and
// check whether the table is dropped or disabled which might cause
// SocketTimeoutException, or RetriesExhaustedException or similar if we get IOE.
if (cause instanceof TableNotFoundException || connection.isTableDisabled(tableName)) {
disabledAndDroppedTables.put(tableName, Boolean.TRUE); // put to cache for later.
if (!tasksCancelled) {
sink.getSkippedEditsCounter().addAndGet(entries.size());
tasksCancelled = true; // so that we do not add to skipped counter again
}
continue;
}
// otherwise rethrow
throw (IOException)cause;
}
// unexpected exception
throw new IOException(cause);
}
}
}
}
static class RetryingRpcCallable<V> implements Callable<V> {
RpcRetryingCallerFactory factory;
RetryingCallable<V> callable;
int timeout;
public RetryingRpcCallable(RpcRetryingCallerFactory factory, RetryingCallable<V> callable,
int timeout) {
this.factory = factory;
this.callable = callable;
this.timeout = timeout;
}
@Override
public V call() throws Exception {
return factory.<V>newCaller().callWithRetries(callable, timeout);
}
}
/**
* Calls replay on the passed edits for the given set of entries belonging to the region. It skips
* the entry if the region boundaries have changed or the region is gone.
*/
static class RegionReplicaReplayCallable
extends RegionAdminServiceCallable<ReplicateWALEntryResponse> {
// replicaId of the region replica that we want to replicate to
private final int replicaId;
private final List<Entry> entries;
private final byte[] initialEncodedRegionName;
private final AtomicLong skippedEntries;
private final RpcControllerFactory rpcControllerFactory;
private boolean skip;
public RegionReplicaReplayCallable(ClusterConnection connection,
RpcControllerFactory rpcControllerFactory, TableName tableName,
HRegionLocation location, HRegionInfo regionInfo, byte[] row,List<Entry> entries,
AtomicLong skippedEntries) {
super(connection, location, tableName, row);
this.replicaId = regionInfo.getReplicaId();
this.entries = entries;
this.rpcControllerFactory = rpcControllerFactory;
this.skippedEntries = skippedEntries;
this.initialEncodedRegionName = regionInfo.getEncodedNameAsBytes();
}
@Override
public HRegionLocation getLocation(boolean useCache) throws IOException {
RegionLocations rl = getRegionLocations(connection, tableName, row, useCache, replicaId);
if (rl == null) {
throw new HBaseIOException(getExceptionMessage());
}
location = rl.getRegionLocation(replicaId);
if (location == null) {
throw new HBaseIOException(getExceptionMessage());
}
// check whether we should still replay this entry. If the regions are changed, or the
// entry is not coming form the primary region, filter it out because we do not need it.
// Regions can change because of (1) region split (2) region merge (3) table recreated
if (!Bytes.equals(location.getRegionInfo().getEncodedNameAsBytes(),
initialEncodedRegionName)) {
skip = true;
return null;
}
return location;
}
@Override
public ReplicateWALEntryResponse call(int timeout) throws IOException {
return replayToServer(this.entries, timeout);
}
private ReplicateWALEntryResponse replayToServer(List<Entry> entries, int timeout)
throws IOException {
if (entries.isEmpty() || skip) {
skippedEntries.incrementAndGet();
return ReplicateWALEntryResponse.newBuilder().build();
}
Entry[] entriesArray = new Entry[entries.size()];
entriesArray = entries.toArray(entriesArray);
// set the region name for the target region replica
Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p =
ReplicationProtbufUtil.buildReplicateWALEntryRequest(
entriesArray, location.getRegionInfo().getEncodedNameAsBytes());
try {
PayloadCarryingRpcController controller = rpcControllerFactory.newController(p.getSecond());
controller.setCallTimeout(timeout);
controller.setPriority(tableName);
return stub.replay(controller, p.getFirst());
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
}
}
@Override
protected String getExceptionMessage() {
return super.getExceptionMessage() + " table=" + tableName
+ " ,replica=" + replicaId + ", row=" + Bytes.toStringBinary(row);
}
}
private static RegionLocations getRegionLocations(
ClusterConnection connection, TableName tableName, byte[] row,
boolean useCache, int replicaId)
throws RetriesExhaustedException, DoNotRetryIOException, InterruptedIOException {
RegionLocations rl;
try {
rl = connection.locateRegion(tableName, row, useCache, true, replicaId);
} catch (DoNotRetryIOException e) {
throw e;
} catch (RetriesExhaustedException e) {
throw e;
} catch (InterruptedIOException e) {
throw e;
} catch (IOException e) {
throw new RetriesExhaustedException("Can't get the location", e);
}
if (rl == null) {
throw new RetriesExhaustedException("Can't get the locations");
}
return rl;
}
}
| drewpope/hbase | hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java | Java | apache-2.0 | 22,360 |
/*
* Copyright 2012-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.spring.initializr.actuate.stat;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import io.spring.initializr.actuate.stat.ProjectRequestDocument.ClientInformation;
import io.spring.initializr.actuate.stat.ProjectRequestDocument.DependencyInformation;
import io.spring.initializr.actuate.stat.ProjectRequestDocument.ErrorStateInformation;
import io.spring.initializr.actuate.stat.ProjectRequestDocument.VersionInformation;
import io.spring.initializr.generator.version.Version;
import io.spring.initializr.metadata.InitializrMetadata;
import io.spring.initializr.web.project.ProjectFailedEvent;
import io.spring.initializr.web.project.ProjectRequest;
import io.spring.initializr.web.project.ProjectRequestEvent;
import io.spring.initializr.web.project.WebProjectRequest;
import io.spring.initializr.web.support.Agent;
import org.springframework.util.StringUtils;
/**
* Create {@link ProjectRequestDocument} instances.
*
* @author Stephane Nicoll
*/
public class ProjectRequestDocumentFactory {
public ProjectRequestDocument createDocument(ProjectRequestEvent event) {
InitializrMetadata metadata = event.getMetadata();
ProjectRequest request = event.getProjectRequest();
ProjectRequestDocument document = new ProjectRequestDocument();
document.setGenerationTimestamp(event.getTimestamp());
document.setGroupId(request.getGroupId());
document.setArtifactId(request.getArtifactId());
document.setPackageName(request.getPackageName());
document.setVersion(determineVersionInformation(request));
document.setClient(determineClientInformation(request));
document.setJavaVersion(request.getJavaVersion());
if (StringUtils.hasText(request.getJavaVersion())
&& metadata.getJavaVersions().get(request.getJavaVersion()) == null) {
document.triggerError().setJavaVersion(true);
}
document.setLanguage(request.getLanguage());
if (StringUtils.hasText(request.getLanguage()) && metadata.getLanguages().get(request.getLanguage()) == null) {
document.triggerError().setLanguage(true);
}
document.setPackaging(request.getPackaging());
if (StringUtils.hasText(request.getPackaging())
&& metadata.getPackagings().get(request.getPackaging()) == null) {
document.triggerError().setPackaging(true);
}
document.setType(request.getType());
document.setBuildSystem(determineBuildSystem(request));
if (StringUtils.hasText(request.getType()) && metadata.getTypes().get(request.getType()) == null) {
document.triggerError().setType(true);
}
// Let's not rely on the resolved dependencies here
List<String> dependencies = new ArrayList<>(request.getDependencies());
List<String> validDependencies = dependencies.stream()
.filter((id) -> metadata.getDependencies().get(id) != null).collect(Collectors.toList());
document.setDependencies(new DependencyInformation(validDependencies));
List<String> invalidDependencies = dependencies.stream().filter((id) -> (!validDependencies.contains(id)))
.collect(Collectors.toList());
if (!invalidDependencies.isEmpty()) {
document.triggerError().triggerInvalidDependencies(invalidDependencies);
}
// Let's make sure that the document is flagged as invalid no matter what
if (event instanceof ProjectFailedEvent) {
ErrorStateInformation errorState = document.triggerError();
ProjectFailedEvent failed = (ProjectFailedEvent) event;
if (failed.getCause() != null) {
errorState.setMessage(failed.getCause().getMessage());
}
}
return document;
}
private String determineBuildSystem(ProjectRequest request) {
String type = request.getType();
String[] elements = type.split("-");
return (elements.length == 2) ? elements[0] : null;
}
private VersionInformation determineVersionInformation(ProjectRequest request) {
Version version = Version.safeParse(request.getBootVersion());
if (version != null && version.getMajor() != null) {
return new VersionInformation(version);
}
return null;
}
private ClientInformation determineClientInformation(ProjectRequest request) {
if (request instanceof WebProjectRequest) {
WebProjectRequest webProjectRequest = (WebProjectRequest) request;
Agent agent = determineAgent(webProjectRequest);
String ip = determineIp(webProjectRequest);
String country = determineCountry(webProjectRequest);
if (agent != null || ip != null || country != null) {
return new ClientInformation(agent, ip, country);
}
}
return null;
}
private Agent determineAgent(WebProjectRequest request) {
String userAgent = (String) request.getParameters().get("user-agent");
if (StringUtils.hasText(userAgent)) {
return Agent.fromUserAgent(userAgent);
}
return null;
}
private String determineIp(WebProjectRequest request) {
String candidate = (String) request.getParameters().get("cf-connecting-ip");
return (StringUtils.hasText(candidate)) ? candidate : (String) request.getParameters().get("x-forwarded-for");
}
private String determineCountry(WebProjectRequest request) {
String candidate = (String) request.getParameters().get("cf-ipcountry");
if (StringUtils.hasText(candidate) && !"xx".equalsIgnoreCase(candidate)) {
return candidate;
}
return null;
}
}
| snicoll/initializr | initializr-actuator/src/main/java/io/spring/initializr/actuate/stat/ProjectRequestDocumentFactory.java | Java | apache-2.0 | 5,855 |
// HTMLParser Library - A java-based parser for HTML
// http://htmlparser.org
// Copyright (C) 2006 Claude Duguay
//
// Revision Control Information
//
// $URL: https://svn.sourceforge.net/svnroot/htmlparser/trunk/lexer/src/main/java/org/htmlparser/util/ParserException.java $
// $Author: derrickoswald $
// $Date: 2006-09-16 10:44:17 -0400 (Sat, 16 Sep 2006) $
// $Revision: 4 $
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the Common Public License; either
// version 1.0 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// Common Public License for more details.
//
// You should have received a copy of the Common Public License
// along with this library; if not, the license is available from
// the Open Source Initiative (OSI) website:
// http://opensource.org/licenses/cpl1.0.php
package org.htmlparser.util;
/**
* Library-specific support for chained exceptions.
*
* @see ChainedException
**/
public class ParserException
extends ChainedException
{
public ParserException() {}
public ParserException(String message)
{
super(message);
}
public ParserException(Throwable throwable)
{
super(throwable);
}
public ParserException(String message, Throwable throwable)
{
super(message, throwable);
}
}
| patrickfav/tuwien | master/swt workspace/HTMLParser/src/org/htmlparser/util/ParserException.java | Java | apache-2.0 | 1,512 |
package at.jku.sea.cloud.exceptions;
public class ArtifactNotPushOrPullableException extends RuntimeException {
private static final long serialVersionUID = 1L;
public ArtifactNotPushOrPullableException(final long aid) {
super("artifact (id=" + aid + ") references (type, package, project) existing only in WS");
}
} | OnurKirkizoglu/master_thesis | at.jku.sea.cloud/src/main/java/at/jku/sea/cloud/exceptions/ArtifactNotPushOrPullableException.java | Java | apache-2.0 | 328 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL;
import java.security.PrivilegedAction;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
import org.apache.hadoop.hdfs.server.namenode.FSImage;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.namenode.NNUpgradeUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.tools.DFSHAAdmin;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
/**
* Tool which allows the standby node's storage directories to be bootstrapped
* by copying the latest namespace snapshot from the active namenode. This is
* used when first configuring an HA cluster.
*/
@InterfaceAudience.Private
public class BootstrapStandby implements Tool, Configurable {
private static final Log LOG = LogFactory.getLog(BootstrapStandby.class);
private String nsId;
private String nnId;
private String otherNNId;
private URL otherHttpAddr;
private InetSocketAddress otherIpcAddr;
private Collection<URI> dirsToFormat;
private List<URI> editUrisToFormat;
private List<URI> sharedEditsUris;
private Configuration conf;
private boolean force = false;
private boolean interactive = true;
private boolean skipSharedEditsCheck = false;
// Exit/return codes.
static final int ERR_CODE_FAILED_CONNECT = 2;
static final int ERR_CODE_INVALID_VERSION = 3;
// Skip 4 - was used in previous versions, but no longer returned.
static final int ERR_CODE_ALREADY_FORMATTED = 5;
static final int ERR_CODE_LOGS_UNAVAILABLE = 6;
@Override
public int run(String[] args) throws Exception {
parseArgs(args);
parseConfAndFindOtherNN();
NameNode.checkAllowFormat(conf);
InetSocketAddress myAddr = NameNode.getAddress(conf);
SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, myAddr.getHostName());
return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction<Integer>() {
@Override
public Integer run() {
try {
return doRun();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
});
}
private void parseArgs(String[] args) {
for (String arg : args) {
if ("-force".equals(arg)) {
force = true;
} else if ("-nonInteractive".equals(arg)) {
interactive = false;
} else if ("-skipSharedEditsCheck".equals(arg)) {
skipSharedEditsCheck = true;
} else {
printUsage();
throw new HadoopIllegalArgumentException(
"Illegal argument: " + arg);
}
}
}
private void printUsage() {
System.err.println("Usage: " + this.getClass().getSimpleName() +
" [-force] [-nonInteractive] [-skipSharedEditsCheck]");
}
private NamenodeProtocol createNNProtocolProxy()
throws IOException {
return NameNodeProxies.createNonHAProxy(getConf(),
otherIpcAddr, NamenodeProtocol.class,
UserGroupInformation.getLoginUser(), true)
.getProxy();
}
private int doRun() throws IOException {
NamenodeProtocol proxy = createNNProtocolProxy();
NamespaceInfo nsInfo;
boolean isUpgradeFinalized;
try {
nsInfo = proxy.versionRequest();
isUpgradeFinalized = proxy.isUpgradeFinalized();
} catch (IOException ioe) {
LOG.fatal("Unable to fetch namespace information from active NN at " +
otherIpcAddr + ": " + ioe.getMessage());
if (LOG.isDebugEnabled()) {
LOG.debug("Full exception trace", ioe);
}
return ERR_CODE_FAILED_CONNECT;
}
if (!checkLayoutVersion(nsInfo)) {
LOG.fatal("Layout version on remote node (" + nsInfo.getLayoutVersion()
+ ") does not match " + "this node's layout version ("
+ HdfsConstants.NAMENODE_LAYOUT_VERSION + ")");
return ERR_CODE_INVALID_VERSION;
}
System.out.println(
"=====================================================\n" +
"About to bootstrap Standby ID " + nnId + " from:\n" +
" Nameservice ID: " + nsId + "\n" +
" Other Namenode ID: " + otherNNId + "\n" +
" Other NN's HTTP address: " + otherHttpAddr + "\n" +
" Other NN's IPC address: " + otherIpcAddr + "\n" +
" Namespace ID: " + nsInfo.getNamespaceID() + "\n" +
" Block pool ID: " + nsInfo.getBlockPoolID() + "\n" +
" Cluster ID: " + nsInfo.getClusterID() + "\n" +
" Layout version: " + nsInfo.getLayoutVersion() + "\n" +
" isUpgradeFinalized: " + isUpgradeFinalized + "\n" +
"=====================================================");
NNStorage storage = new NNStorage(conf, dirsToFormat, editUrisToFormat);
if (!isUpgradeFinalized) {
// the remote NameNode is in upgrade state, this NameNode should also
// create the previous directory. First prepare the upgrade and rename
// the current dir to previous.tmp.
LOG.info("The active NameNode is in Upgrade. " +
"Prepare the upgrade for the standby NameNode as well.");
if (!doPreUpgrade(storage, nsInfo)) {
return ERR_CODE_ALREADY_FORMATTED;
}
} else if (!format(storage, nsInfo)) { // prompt the user to format storage
return ERR_CODE_ALREADY_FORMATTED;
}
// download the fsimage from active namenode
int download = downloadImage(storage, proxy);
if (download != 0) {
return download;
}
// finish the upgrade: rename previous.tmp to previous
if (!isUpgradeFinalized) {
doUpgrade(storage);
}
return 0;
}
/**
* Iterate over all the storage directories, checking if it should be
* formatted. Format the storage if necessary and allowed by the user.
* @return True if formatting is processed
*/
private boolean format(NNStorage storage, NamespaceInfo nsInfo)
throws IOException {
// Check with the user before blowing away data.
if (!Storage.confirmFormat(storage.dirIterable(null), force, interactive)) {
storage.close();
return false;
} else {
// Format the storage (writes VERSION file)
storage.format(nsInfo);
return true;
}
}
/**
* This is called when using bootstrapStandby for HA upgrade. The SBN should
* also create previous directory so that later when it starts, it understands
* that the cluster is in the upgrade state. This function renames the old
* current directory to previous.tmp.
*/
private boolean doPreUpgrade(NNStorage storage, NamespaceInfo nsInfo)
throws IOException {
boolean isFormatted = false;
Map<StorageDirectory, StorageState> dataDirStates =
new HashMap<StorageDirectory, StorageState>();
try {
isFormatted = FSImage.recoverStorageDirs(StartupOption.UPGRADE, storage,
dataDirStates);
if (dataDirStates.values().contains(StorageState.NOT_FORMATTED)) {
// recoverStorageDirs returns true if there is a formatted directory
isFormatted = false;
System.err.println("The original storage directory is not formatted.");
}
} catch (InconsistentFSStateException e) {
// if the storage is in a bad state,
LOG.warn("The storage directory is in an inconsistent state", e);
} finally {
storage.unlockAll();
}
// if there is InconsistentFSStateException or the storage is not formatted,
// format the storage. Although this format is done through the new
// software, since in HA setup the SBN is rolled back through
// "-bootstrapStandby", we should still be fine.
if (!isFormatted && !format(storage, nsInfo)) {
return false;
}
// make sure there is no previous directory
FSImage.checkUpgrade(storage);
// Do preUpgrade for each directory
for (Iterator<StorageDirectory> it = storage.dirIterator(false);
it.hasNext();) {
StorageDirectory sd = it.next();
try {
NNUpgradeUtil.renameCurToTmp(sd);
} catch (IOException e) {
LOG.error("Failed to move aside pre-upgrade storage " +
"in image directory " + sd.getRoot(), e);
throw e;
}
}
storage.setStorageInfo(nsInfo);
storage.setBlockPoolID(nsInfo.getBlockPoolID());
return true;
}
private void doUpgrade(NNStorage storage) throws IOException {
for (Iterator<StorageDirectory> it = storage.dirIterator(false);
it.hasNext();) {
StorageDirectory sd = it.next();
NNUpgradeUtil.doUpgrade(sd, storage);
}
}
private int downloadImage(NNStorage storage, NamenodeProtocol proxy)
throws IOException {
// Load the newly formatted image, using all of the directories
// (including shared edits)
final long imageTxId = proxy.getMostRecentCheckpointTxId();
final long curTxId = proxy.getTransactionID();
FSImage image = new FSImage(conf);
try {
image.getStorage().setStorageInfo(storage);
image.initEditLog(StartupOption.REGULAR);
assert image.getEditLog().isOpenForRead() :
"Expected edit log to be open for read";
// Ensure that we have enough edits already in the shared directory to
// start up from the last checkpoint on the active.
if (!skipSharedEditsCheck &&
!checkLogsAvailableForRead(image, imageTxId, curTxId)) {
return ERR_CODE_LOGS_UNAVAILABLE;
}
image.getStorage().writeTransactionIdFileToStorage(curTxId);
// Download that checkpoint into our storage directories.
MD5Hash hash = TransferFsImage.downloadImageToStorage(
otherHttpAddr, imageTxId, storage, true);
image.saveDigestAndRenameCheckpointImage(NameNodeFile.IMAGE, imageTxId,
hash);
} catch (IOException ioe) {
image.close();
throw ioe;
}
return 0;
}
private boolean checkLogsAvailableForRead(FSImage image, long imageTxId,
long curTxIdOnOtherNode) {
if (imageTxId == curTxIdOnOtherNode) {
// The other node hasn't written any logs since the last checkpoint.
// This can be the case if the NN was freshly formatted as HA, and
// then started in standby mode, so it has no edit logs at all.
return true;
}
long firstTxIdInLogs = imageTxId + 1;
assert curTxIdOnOtherNode >= firstTxIdInLogs :
"first=" + firstTxIdInLogs + " onOtherNode=" + curTxIdOnOtherNode;
try {
Collection<EditLogInputStream> streams =
image.getEditLog().selectInputStreams(
firstTxIdInLogs, curTxIdOnOtherNode, null, true);
for (EditLogInputStream stream : streams) {
IOUtils.closeStream(stream);
}
return true;
} catch (IOException e) {
String msg = "Unable to read transaction ids " +
firstTxIdInLogs + "-" + curTxIdOnOtherNode +
" from the configured shared edits storage " +
Joiner.on(",").join(sharedEditsUris) + ". " +
"Please copy these logs into the shared edits storage " +
"or call saveNamespace on the active node.\n" +
"Error: " + e.getLocalizedMessage();
if (LOG.isDebugEnabled()) {
LOG.fatal(msg, e);
} else {
LOG.fatal(msg);
}
return false;
}
}
private boolean checkLayoutVersion(NamespaceInfo nsInfo) throws IOException {
return (nsInfo.getLayoutVersion() == HdfsConstants.NAMENODE_LAYOUT_VERSION);
}
private void parseConfAndFindOtherNN() throws IOException {
Configuration conf = getConf();
nsId = DFSUtil.getNamenodeNameServiceId(conf);
if (!HAUtil.isHAEnabled(conf, nsId)) {
throw new HadoopIllegalArgumentException(
"HA is not enabled for this namenode.");
}
nnId = HAUtil.getNameNodeId(conf, nsId);
NameNode.initializeGenericKeys(conf, nsId, nnId);
if (!HAUtil.usesSharedEditsDir(conf)) {
throw new HadoopIllegalArgumentException(
"Shared edits storage is not enabled for this namenode.");
}
Configuration otherNode = HAUtil.getConfForOtherNode(conf);
otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
otherIpcAddr = NameNode.getServiceAddress(otherNode, true);
Preconditions.checkArgument(otherIpcAddr.getPort() != 0 &&
!otherIpcAddr.getAddress().isAnyLocalAddress(),
"Could not determine valid IPC address for other NameNode (%s)" +
", got: %s", otherNNId, otherIpcAddr);
final String scheme = DFSUtil.getHttpClientScheme(conf);
otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(
otherIpcAddr.getHostName(), otherNode, scheme).toURL();
dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
conf, false);
sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf);
}
@Override
public void setConf(Configuration conf) {
this.conf = DFSHAAdmin.addSecurityConfiguration(conf);
}
@Override
public Configuration getConf() {
return conf;
}
public static int run(String[] argv, Configuration conf) throws IOException {
BootstrapStandby bs = new BootstrapStandby();
bs.setConf(conf);
try {
return ToolRunner.run(bs, argv);
} catch (Exception e) {
if (e instanceof IOException) {
throw (IOException)e;
} else {
throw new IOException(e);
}
}
}
}
| tecknowledgeable/hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java | Java | apache-2.0 | 16,024 |
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from ec2api.tests.functional import base
from ec2api.tests.functional import config
CONF = config.CONF
class SnapshotTest(base.EC2TestCase):
def test_create_delete_snapshot(self):
kwargs = {
'Size': 1,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id = data['VolumeId']
clean_vol = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id)
self.get_volume_waiter().wait_available(volume_id)
desc = 'test snapshot'
kwargs = {
'VolumeId': volume_id,
'Description': desc
}
data = self.client.create_snapshot(*[], **kwargs)
snapshot_id = data['SnapshotId']
res_clean = self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
self.get_snapshot_waiter().wait_available(snapshot_id,
final_set=('completed'))
self.assertEqual(desc, data['Description'])
self.assertEqual(volume_id, data['VolumeId'])
self.assertEqual(1, data['VolumeSize'])
self.assertNotEmpty(data.get('State', ''))
if 'Encrypted' in data:
self.assertFalse(data['Encrypted'])
self.assertIsNotNone(data['StartTime'])
data = self.client.delete_snapshot(SnapshotId=snapshot_id)
self.cancelResourceCleanUp(res_clean)
self.get_snapshot_waiter().wait_delete(snapshot_id)
data = self.client.delete_volume(VolumeId=volume_id)
self.cancelResourceCleanUp(clean_vol)
self.get_volume_waiter().wait_delete(volume_id)
def test_describe_snapshots(self):
kwargs = {
'Size': 1,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id = data['VolumeId']
clean_vol = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id)
self.get_volume_waiter().wait_available(volume_id)
desc = 'test snapshot'
kwargs = {
'VolumeId': volume_id,
'Description': desc
}
data = self.client.create_snapshot(*[], **kwargs)
snapshot_id = data['SnapshotId']
ownerId = data['OwnerId']
res_clean = self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
self.get_snapshot_waiter().wait_available(snapshot_id,
final_set=('completed'))
self.assertEqual(desc, data['Description'])
self.assertEqual(volume_id, data['VolumeId'])
self.assertEqual(1, data['VolumeSize'])
self.assertNotEmpty(data.get('State', ''))
if 'Encrypted' in data:
self.assertFalse(data['Encrypted'])
self.assertIsNotNone(data['StartTime'])
data = self.client.describe_snapshots(SnapshotIds=[snapshot_id])
self.assertEqual(1, len(data['Snapshots']))
data = data['Snapshots'][0]
self.assertEqual(snapshot_id, data['SnapshotId'])
self.assertEqual(desc, data['Description'])
self.assertEqual(volume_id, data['VolumeId'])
self.assertEqual(1, data['VolumeSize'])
self.assertNotEmpty(data.get('State', ''))
if 'Encrypted' in data:
self.assertFalse(data['Encrypted'])
self.assertIsNotNone(data['StartTime'])
data = self.client.describe_snapshots(OwnerIds=[ownerId])
data = [s for s in data['Snapshots'] if s['SnapshotId'] == snapshot_id]
self.assertEqual(1, len(data))
data = self.client.delete_snapshot(SnapshotId=snapshot_id)
self.cancelResourceCleanUp(res_clean)
self.get_snapshot_waiter().wait_delete(snapshot_id)
self.assertRaises('InvalidSnapshot.NotFound',
self.client.describe_snapshots,
SnapshotIds=[snapshot_id])
data = self.client.delete_volume(VolumeId=volume_id)
self.cancelResourceCleanUp(clean_vol)
self.get_volume_waiter().wait_delete(volume_id)
def test_create_volume_from_snapshot(self):
kwargs = {
'Size': 1,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id = data['VolumeId']
clean_vol = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id)
self.get_volume_waiter().wait_available(volume_id)
vol1 = data
desc = 'test snapshot'
kwargs = {
'VolumeId': volume_id,
'Description': desc
}
data = self.client.create_snapshot(*[], **kwargs)
snapshot_id = data['SnapshotId']
res_clean = self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
self.get_snapshot_waiter().wait_available(snapshot_id,
final_set=('completed'))
kwargs = {
'SnapshotId': snapshot_id,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id2 = data['VolumeId']
clean_vol2 = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id2)
self.get_volume_waiter().wait_available(volume_id2)
self.assertNotEqual(volume_id, volume_id2)
self.assertEqual(vol1['Size'], data['Size'])
self.assertEqual(snapshot_id, data['SnapshotId'])
data = self.client.describe_volumes(
Filters=[{'Name': 'snapshot-id', 'Values': [snapshot_id]}])
self.assertEqual(1, len(data['Volumes']))
self.assertEqual(volume_id2, data['Volumes'][0]['VolumeId'])
data = self.client.delete_snapshot(SnapshotId=snapshot_id)
self.cancelResourceCleanUp(res_clean)
self.get_snapshot_waiter().wait_delete(snapshot_id)
data = self.client.delete_volume(VolumeId=volume_id)
self.cancelResourceCleanUp(clean_vol)
self.get_volume_waiter().wait_delete(volume_id)
data = self.client.delete_volume(VolumeId=volume_id2)
self.cancelResourceCleanUp(clean_vol2)
self.get_volume_waiter().wait_delete(volume_id2)
def test_create_increased_volume_from_snapshot(self):
kwargs = {
'Size': 1,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id = data['VolumeId']
clean_vol = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id)
self.get_volume_waiter().wait_available(volume_id)
desc = 'test snapshot'
kwargs = {
'VolumeId': volume_id,
'Description': desc
}
data = self.client.create_snapshot(*[], **kwargs)
snapshot_id = data['SnapshotId']
res_clean = self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
self.get_snapshot_waiter().wait_available(snapshot_id,
final_set=('completed'))
kwargs = {
'Size': 2,
'SnapshotId': snapshot_id,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id2 = data['VolumeId']
clean_vol2 = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id2)
self.get_volume_waiter().wait_available(volume_id2)
self.assertNotEqual(volume_id, volume_id2)
self.assertEqual(2, data['Size'])
self.assertEqual(snapshot_id, data['SnapshotId'])
data = self.client.delete_snapshot(SnapshotId=snapshot_id)
self.cancelResourceCleanUp(res_clean)
self.get_snapshot_waiter().wait_delete(snapshot_id)
data = self.client.delete_volume(VolumeId=volume_id)
self.cancelResourceCleanUp(clean_vol)
self.get_volume_waiter().wait_delete(volume_id)
data = self.client.delete_volume(VolumeId=volume_id2)
self.cancelResourceCleanUp(clean_vol2)
self.get_volume_waiter().wait_delete(volume_id2)
@testtools.skipUnless(CONF.aws.run_incompatible_tests,
"Openstack can't delete volume with snapshots")
def test_delete_volume_with_snapshots(self):
kwargs = {
'Size': 1,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id = data['VolumeId']
clean_vol = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id)
self.get_volume_waiter().wait_available(volume_id)
desc = 'test snapshot'
kwargs = {
'VolumeId': volume_id,
'Description': desc
}
data = self.client.create_snapshot(*[], **kwargs)
snapshot_id = data['SnapshotId']
res_clean = self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
self.get_snapshot_waiter().wait_available(snapshot_id,
final_set=('completed'))
data = self.client.delete_volume(VolumeId=volume_id)
self.cancelResourceCleanUp(clean_vol)
self.get_volume_waiter().wait_delete(volume_id)
data = self.client.delete_snapshot(SnapshotId=snapshot_id)
self.cancelResourceCleanUp(res_clean)
self.get_snapshot_waiter().wait_delete(snapshot_id)
| vishnu-kumar/ec2-api | ec2api/tests/functional/api/test_snapshots.py | Python | apache-2.0 | 10,696 |
#include <stdio.h>
#include "ali_api_core.h"
#include "ali_string_utils.h"
#include "ali_rds.h"
#include "json/value.h"
#include "json/reader.h"
using namespace aliyun;
namespace {
void Json2Type(const Json::Value& value, std::string* item);
void Json2Type(const Json::Value& value, RdsReleaseInstancePublicConnectionResponseType* item);
template<typename T>
class Json2Array {
public:
Json2Array(const Json::Value& value, std::vector<T>* vec) {
if(!value.isArray()) {
return;
}
for(int i = 0; i < value.size(); i++) {
T val;
Json2Type(value[i], &val);
vec->push_back(val);
}
}
};
void Json2Type(const Json::Value& value, std::string* item) {
*item = value.asString();
}
void Json2Type(const Json::Value& value, RdsReleaseInstancePublicConnectionResponseType* item) {
}
}
int Rds::ReleaseInstancePublicConnection(const RdsReleaseInstancePublicConnectionRequestType& req,
RdsReleaseInstancePublicConnectionResponseType* response,
RdsErrorInfo* error_info) {
std::string str_response;
int status_code;
int ret = 0;
bool parse_success = false;
std::string secheme = this->use_tls_ ? "https" : "http";
AliRpcRequest* req_rpc = new AliRpcRequest(version_,
appid_,
secret_,
secheme + "://" + host_);
if((!this->use_tls_) && this->proxy_host_ && this->proxy_host_[0]) {
req_rpc->SetHttpProxy( this->proxy_host_);
}
Json::Value val;
Json::Reader reader;
req_rpc->AddRequestQuery("Action","ReleaseInstancePublicConnection");
if(!req.owner_id.empty()) {
req_rpc->AddRequestQuery("OwnerId", req.owner_id);
}
if(!req.resource_owner_account.empty()) {
req_rpc->AddRequestQuery("ResourceOwnerAccount", req.resource_owner_account);
}
if(!req.resource_owner_id.empty()) {
req_rpc->AddRequestQuery("ResourceOwnerId", req.resource_owner_id);
}
if(!req.db_instance_id.empty()) {
req_rpc->AddRequestQuery("DBInstanceId", req.db_instance_id);
}
if(!req.current_connection_string.empty()) {
req_rpc->AddRequestQuery("CurrentConnectionString", req.current_connection_string);
}
if(!req.owner_account.empty()) {
req_rpc->AddRequestQuery("OwnerAccount", req.owner_account);
}
if(this->region_id_ && this->region_id_[0]) {
req_rpc->AddRequestQuery("RegionId", this->region_id_);
}
if(req_rpc->CommitRequest() != 0) {
if(error_info) {
error_info->code = "connect to host failed";
}
ret = -1;
goto out;
}
status_code = req_rpc->WaitResponseHeaderComplete();
req_rpc->ReadResponseBody(str_response);
if(status_code > 0 && !str_response.empty()){
parse_success = reader.parse(str_response, val);
}
if(!parse_success) {
if(error_info) {
error_info->code = "parse response failed";
}
ret = -1;
goto out;
}
if(status_code!= 200 && error_info && parse_success) {
error_info->request_id = val.isMember("RequestId") ? val["RequestId"].asString(): "";
error_info->code = val.isMember("Code") ? val["Code"].asString(): "";
error_info->host_id = val.isMember("HostId") ? val["HostId"].asString(): "";
error_info->message = val.isMember("Message") ? val["Message"].asString(): "";
}
if(status_code== 200 && response) {
Json2Type(val, response);
}
ret = status_code;
out:
delete req_rpc;
return ret;
}
| zcy421593/aliyun-openapi-cpp-sdk | aliyun-api-rds/2014-08-15/src/ali_rds_release_instance_public_connection.cc | C++ | apache-2.0 | 3,520 |
/**
* This package contains classes for mapping between Particles and Tuples.
*/
package nl.tno.sensorstorm.particlemapper; | sensorstorm/SensorStorm | SensorStorm/src/nl/tno/sensorstorm/particlemapper/package-info.java | Java | apache-2.0 | 128 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.udf.generic;
import java.sql.Timestamp;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import org.apache.hadoop.hive.ql.exec.Description;
import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.hadoop.hive.serde2.io.TimestampWritable;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorConverter.TimestampConverter;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
/**
* GenericUDFAddMonths.
*
* Add a number of months to the date. The time part of the string will be
* ignored.
*
*/
@Description(name = "add_months",
value = "_FUNC_(start_date, num_months) - Returns the date that is num_months after start_date.",
extended = "start_date is a string in the format 'yyyy-MM-dd HH:mm:ss' or"
+ " 'yyyy-MM-dd'. num_months is a number. The time part of start_date is "
+ "ignored.\n"
+ "Example:\n " + " > SELECT _FUNC_('2009-08-31', 1) FROM src LIMIT 1;\n" + " '2009-09-30'")
public class GenericUDFAddMonths extends GenericUDF {
private transient SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd");
private transient TimestampConverter timestampConverter;
private transient Converter textConverter;
private transient Converter dateWritableConverter;
private transient Converter intWritableConverter;
private transient PrimitiveCategory inputType1;
private transient PrimitiveCategory inputType2;
private final Calendar calendar = Calendar.getInstance();
private final Text output = new Text();
@Override
public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
if (arguments.length != 2) {
throw new UDFArgumentLengthException("add_months() requires 2 argument, got "
+ arguments.length);
}
if (arguments[0].getCategory() != ObjectInspector.Category.PRIMITIVE) {
throw new UDFArgumentTypeException(0, "Only primitive type arguments are accepted but "
+ arguments[0].getTypeName() + " is passed as first arguments");
}
if (arguments[1].getCategory() != ObjectInspector.Category.PRIMITIVE) {
throw new UDFArgumentTypeException(1, "Only primitive type arguments are accepted but "
+ arguments[1].getTypeName() + " is passed as second arguments");
}
inputType1 = ((PrimitiveObjectInspector) arguments[0]).getPrimitiveCategory();
ObjectInspector outputOI = PrimitiveObjectInspectorFactory.writableStringObjectInspector;
switch (inputType1) {
case STRING:
case VARCHAR:
case CHAR:
inputType1 = PrimitiveCategory.STRING;
textConverter = ObjectInspectorConverters.getConverter(
(PrimitiveObjectInspector) arguments[0],
PrimitiveObjectInspectorFactory.writableStringObjectInspector);
break;
case TIMESTAMP:
timestampConverter = new TimestampConverter((PrimitiveObjectInspector) arguments[0],
PrimitiveObjectInspectorFactory.writableTimestampObjectInspector);
break;
case DATE:
dateWritableConverter = ObjectInspectorConverters.getConverter(
(PrimitiveObjectInspector) arguments[0],
PrimitiveObjectInspectorFactory.writableDateObjectInspector);
break;
default:
throw new UDFArgumentTypeException(0,
"ADD_MONTHS() only takes STRING/TIMESTAMP/DATEWRITABLE types as first argument, got "
+ inputType1);
}
inputType2 = ((PrimitiveObjectInspector) arguments[1]).getPrimitiveCategory();
if (inputType2 != PrimitiveCategory.INT) {
throw new UDFArgumentTypeException(1,
"ADD_MONTHS() only takes INT types as second argument, got " + inputType2);
}
intWritableConverter = ObjectInspectorConverters.getConverter(
(PrimitiveObjectInspector) arguments[1],
PrimitiveObjectInspectorFactory.writableIntObjectInspector);
return outputOI;
}
@Override
public Object evaluate(DeferredObject[] arguments) throws HiveException {
if (arguments[0].get() == null) {
return null;
}
IntWritable toBeAdded = (IntWritable) intWritableConverter.convert(arguments[1].get());
if (toBeAdded == null) {
return null;
}
Date date;
switch (inputType1) {
case STRING:
String dateString = textConverter.convert(arguments[0].get()).toString();
try {
date = formatter.parse(dateString.toString());
} catch (ParseException e) {
return null;
}
break;
case TIMESTAMP:
Timestamp ts = ((TimestampWritable) timestampConverter.convert(arguments[0].get()))
.getTimestamp();
date = ts;
break;
case DATE:
DateWritable dw = (DateWritable) dateWritableConverter.convert(arguments[0].get());
date = dw.get();
break;
default:
throw new UDFArgumentTypeException(0,
"ADD_MONTHS() only takes STRING/TIMESTAMP/DATEWRITABLE types, got " + inputType1);
}
int numMonth = toBeAdded.get();
addMonth(date, numMonth);
Date newDate = calendar.getTime();
output.set(formatter.format(newDate));
return output;
}
@Override
public String getDisplayString(String[] children) {
return getStandardDisplayString("add_months", children);
}
protected Calendar addMonth(Date d, int numMonths) {
calendar.setTime(d);
boolean lastDatOfMonth = isLastDayOfMonth(calendar);
calendar.add(Calendar.MONTH, numMonths);
if (lastDatOfMonth) {
int maxDd = calendar.getActualMaximum(Calendar.DAY_OF_MONTH);
calendar.set(Calendar.DAY_OF_MONTH, maxDd);
}
return calendar;
}
protected boolean isLastDayOfMonth(Calendar cal) {
int maxDd = cal.getActualMaximum(Calendar.DAY_OF_MONTH);
int dd = cal.get(Calendar.DAY_OF_MONTH);
return dd == maxDd;
}
}
| WANdisco/amplab-hive | ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFAddMonths.java | Java | apache-2.0 | 7,518 |
using System;
using ProtoBuf.Services.Serialization;
namespace ProtoBuf.Services.WebAPI
{
public interface IProtoMetaProvider
{
string GetMetaData(Type type);
TypeMetaData FromJson(byte[] json);
}
} | maingi4/ProtoBuf.Services | ProtoBuf.Services.WebAPI/IProtoMetaProvider.cs | C# | apache-2.0 | 227 |
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta3
import (
"fmt"
"reflect"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
)
func addConversionFuncs() {
// Add non-generated conversion functions
err := api.Scheme.AddConversionFuncs(
convert_v1beta3_Container_To_api_Container,
convert_api_Container_To_v1beta3_Container,
)
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
// Add field conversion funcs.
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "Pod",
func(label, value string) (string, string, error) {
switch label {
case "metadata.name",
"metadata.namespace",
"status.phase",
"spec.host":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "Node",
func(label, value string) (string, string, error) {
switch label {
case "metadata.name":
return label, value, nil
case "spec.unschedulable":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "ReplicationController",
func(label, value string) (string, string, error) {
switch label {
case "metadata.name",
"status.replicas":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "Event",
func(label, value string) (string, string, error) {
switch label {
case "involvedObject.kind",
"involvedObject.namespace",
"involvedObject.name",
"involvedObject.uid",
"involvedObject.apiVersion",
"involvedObject.resourceVersion",
"involvedObject.fieldPath",
"reason",
"source":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "Namespace",
func(label, value string) (string, string, error) {
switch label {
case "status.phase":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "Secret",
func(label, value string) (string, string, error) {
switch label {
case "type":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "ServiceAccount",
func(label, value string) (string, string, error) {
switch label {
case "metadata.name":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
}
func convert_v1beta3_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*Container))(in)
}
out.Name = in.Name
out.Image = in.Image
if in.Command != nil {
out.Command = make([]string, len(in.Command))
for i := range in.Command {
out.Command[i] = in.Command[i]
}
}
if in.Args != nil {
out.Args = make([]string, len(in.Args))
for i := range in.Args {
out.Args[i] = in.Args[i]
}
}
out.WorkingDir = in.WorkingDir
if in.Ports != nil {
out.Ports = make([]api.ContainerPort, len(in.Ports))
for i := range in.Ports {
if err := convert_v1beta3_ContainerPort_To_api_ContainerPort(&in.Ports[i], &out.Ports[i], s); err != nil {
return err
}
}
}
if in.Env != nil {
out.Env = make([]api.EnvVar, len(in.Env))
for i := range in.Env {
if err := convert_v1beta3_EnvVar_To_api_EnvVar(&in.Env[i], &out.Env[i], s); err != nil {
return err
}
}
}
if err := s.Convert(&in.Resources, &out.Resources, 0); err != nil {
return err
}
if in.VolumeMounts != nil {
out.VolumeMounts = make([]api.VolumeMount, len(in.VolumeMounts))
for i := range in.VolumeMounts {
if err := convert_v1beta3_VolumeMount_To_api_VolumeMount(&in.VolumeMounts[i], &out.VolumeMounts[i], s); err != nil {
return err
}
}
}
if in.LivenessProbe != nil {
out.LivenessProbe = new(api.Probe)
if err := convert_v1beta3_Probe_To_api_Probe(in.LivenessProbe, out.LivenessProbe, s); err != nil {
return err
}
} else {
out.LivenessProbe = nil
}
if in.ReadinessProbe != nil {
out.ReadinessProbe = new(api.Probe)
if err := convert_v1beta3_Probe_To_api_Probe(in.ReadinessProbe, out.ReadinessProbe, s); err != nil {
return err
}
} else {
out.ReadinessProbe = nil
}
if in.Lifecycle != nil {
out.Lifecycle = new(api.Lifecycle)
if err := convert_v1beta3_Lifecycle_To_api_Lifecycle(in.Lifecycle, out.Lifecycle, s); err != nil {
return err
}
} else {
out.Lifecycle = nil
}
out.TerminationMessagePath = in.TerminationMessagePath
out.ImagePullPolicy = api.PullPolicy(in.ImagePullPolicy)
if in.SecurityContext != nil {
if in.SecurityContext.Capabilities != nil {
if !reflect.DeepEqual(in.SecurityContext.Capabilities.Add, in.Capabilities.Add) ||
!reflect.DeepEqual(in.SecurityContext.Capabilities.Drop, in.Capabilities.Drop) {
return fmt.Errorf("container capability settings do not match security context settings, cannot convert")
}
}
if in.SecurityContext.Privileged != nil {
if in.Privileged != *in.SecurityContext.Privileged {
return fmt.Errorf("container privileged settings do not match security context settings, cannot convert")
}
}
}
if in.SecurityContext != nil {
out.SecurityContext = new(api.SecurityContext)
if err := convert_v1beta3_SecurityContext_To_api_SecurityContext(in.SecurityContext, out.SecurityContext, s); err != nil {
return err
}
} else {
out.SecurityContext = nil
}
return nil
}
func convert_api_Container_To_v1beta3_Container(in *api.Container, out *Container, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.Container))(in)
}
out.Name = in.Name
out.Image = in.Image
if in.Command != nil {
out.Command = make([]string, len(in.Command))
for i := range in.Command {
out.Command[i] = in.Command[i]
}
}
if in.Args != nil {
out.Args = make([]string, len(in.Args))
for i := range in.Args {
out.Args[i] = in.Args[i]
}
}
out.WorkingDir = in.WorkingDir
if in.Ports != nil {
out.Ports = make([]ContainerPort, len(in.Ports))
for i := range in.Ports {
if err := convert_api_ContainerPort_To_v1beta3_ContainerPort(&in.Ports[i], &out.Ports[i], s); err != nil {
return err
}
}
}
if in.Env != nil {
out.Env = make([]EnvVar, len(in.Env))
for i := range in.Env {
if err := convert_api_EnvVar_To_v1beta3_EnvVar(&in.Env[i], &out.Env[i], s); err != nil {
return err
}
}
}
if err := s.Convert(&in.Resources, &out.Resources, 0); err != nil {
return err
}
if in.VolumeMounts != nil {
out.VolumeMounts = make([]VolumeMount, len(in.VolumeMounts))
for i := range in.VolumeMounts {
if err := convert_api_VolumeMount_To_v1beta3_VolumeMount(&in.VolumeMounts[i], &out.VolumeMounts[i], s); err != nil {
return err
}
}
}
if in.LivenessProbe != nil {
out.LivenessProbe = new(Probe)
if err := convert_api_Probe_To_v1beta3_Probe(in.LivenessProbe, out.LivenessProbe, s); err != nil {
return err
}
} else {
out.LivenessProbe = nil
}
if in.ReadinessProbe != nil {
out.ReadinessProbe = new(Probe)
if err := convert_api_Probe_To_v1beta3_Probe(in.ReadinessProbe, out.ReadinessProbe, s); err != nil {
return err
}
} else {
out.ReadinessProbe = nil
}
if in.Lifecycle != nil {
out.Lifecycle = new(Lifecycle)
if err := convert_api_Lifecycle_To_v1beta3_Lifecycle(in.Lifecycle, out.Lifecycle, s); err != nil {
return err
}
} else {
out.Lifecycle = nil
}
out.TerminationMessagePath = in.TerminationMessagePath
out.ImagePullPolicy = PullPolicy(in.ImagePullPolicy)
if in.SecurityContext != nil {
out.SecurityContext = new(SecurityContext)
if err := convert_api_SecurityContext_To_v1beta3_SecurityContext(in.SecurityContext, out.SecurityContext, s); err != nil {
return err
}
} else {
out.SecurityContext = nil
}
// now that we've converted set the container field from security context
if out.SecurityContext != nil && out.SecurityContext.Privileged != nil {
out.Privileged = *out.SecurityContext.Privileged
}
// now that we've converted set the container field from security context
if out.SecurityContext != nil && out.SecurityContext.Capabilities != nil {
out.Capabilities = *out.SecurityContext.Capabilities
}
return nil
}
| bcbroussard/kubernetes | pkg/api/v1beta3/conversion.go | GO | apache-2.0 | 10,116 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.weex.ui.action;
import android.support.annotation.NonNull;
import android.support.annotation.RestrictTo;
import android.support.annotation.RestrictTo.Scope;
import android.support.annotation.WorkerThread;
import android.support.v4.util.ArrayMap;
import android.text.TextUtils;
import android.util.Log;
import org.apache.weex.BuildConfig;
import org.apache.weex.WXSDKInstance;
import org.apache.weex.WXSDKManager;
import org.apache.weex.common.WXErrorCode;
import org.apache.weex.dom.transition.WXTransition;
import org.apache.weex.performance.WXAnalyzerDataTransfer;
import org.apache.weex.performance.WXStateRecord;
import org.apache.weex.ui.component.WXComponent;
import org.apache.weex.ui.component.WXVContainer;
import org.apache.weex.utils.WXExceptionUtils;
import org.apache.weex.utils.WXLogUtils;
import org.apache.weex.utils.WXUtils;
import java.util.Arrays;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
public class GraphicActionAddElement extends GraphicActionAbstractAddElement {
private WXVContainer parent;
private WXComponent child;
private GraphicPosition layoutPosition;
private GraphicSize layoutSize;
private boolean isLayoutRTL;
public GraphicActionAddElement(@NonNull WXSDKInstance instance, String ref,
String componentType, String parentRef,
int index,
Map<String, String> style,
Map<String, String> attributes,
Set<String> events,
float[] margins,
float[] paddings,
float[] borders) {
super(instance, ref);
this.mComponentType = componentType;
this.mParentRef = parentRef;
this.mIndex = index;
this.mStyle = style;
this.mAttributes = attributes;
this.mEvents = events;
this.mPaddings = paddings;
this.mMargins = margins;
this.mBorders = borders;
if (instance.getContext() == null) {
return;
}
if (WXAnalyzerDataTransfer.isInteractionLogOpen()){
Log.d(WXAnalyzerDataTransfer.INTERACTION_TAG, "[client][addelementStart]"+instance.getInstanceId()+","+componentType+","+ref);
}
try {
parent = (WXVContainer) WXSDKManager.getInstance().getWXRenderManager()
.getWXComponent(getPageId(), mParentRef);
long start = WXUtils.getFixUnixTime();
BasicComponentData basicComponentData = new BasicComponentData(ref, mComponentType,
mParentRef);
child = createComponent(instance, parent, basicComponentData);
child.setTransition(WXTransition.fromMap(child.getStyles(), child));
long diff = WXUtils.getFixUnixTime()-start;
instance.getApmForInstance().componentCreateTime += diff;
if (null != parent && parent.isIgnoreInteraction){
child.isIgnoreInteraction = true;
}
if (!child.isIgnoreInteraction ){
Object flag = null;
if (null != child.getAttrs()){
flag = child.getAttrs().get("ignoreInteraction");
}
if ("false".equals(flag) || "0".equals(flag)){
child.isIgnoreInteraction = false;
}else if ("1".equals(flag) || "true".equals(flag) || child.isFixed()){
child.isIgnoreInteraction = true;
}
}
WXStateRecord.getInstance().recordAction(instance.getInstanceId(),"addElement");
} catch (ClassCastException e) {
Map<String, String> ext = new ArrayMap<>();
WXComponent parent = WXSDKManager.getInstance().getWXRenderManager()
.getWXComponent(getPageId(), mParentRef);
if (mStyle != null && !mStyle.isEmpty()) {
ext.put("child.style", mStyle.toString());
}
if (parent != null && parent.getStyles() != null && !parent.getStyles().isEmpty()) {
ext.put("parent.style", parent.getStyles().toString());
}
if (mAttributes != null && !mAttributes.isEmpty()) {
ext.put("child.attr", mAttributes.toString());
}
if (parent != null && parent.getAttrs() != null && !parent.getAttrs().isEmpty()) {
ext.put("parent.attr", parent.getAttrs().toString());
}
if (mEvents != null && !mEvents.isEmpty()) {
ext.put("child.event", mEvents.toString());
}
if (parent != null && parent.getEvents() != null && !parent.getEvents().isEmpty()) {
ext.put("parent.event", parent.getEvents().toString());
}
if (mMargins != null && mMargins.length > 0) {
ext.put("child.margin", Arrays.toString(mMargins));
}
if (parent != null && parent.getMargin() != null) {
ext.put("parent.margin", parent.getMargin().toString());
}
if (mPaddings != null && mPaddings.length > 0) {
ext.put("child.padding", Arrays.toString(mPaddings));
}
if (parent != null && parent.getPadding() != null) {
ext.put("parent.padding", parent.getPadding().toString());
}
if (mBorders != null && mBorders.length > 0) {
ext.put("child.border", Arrays.toString(mBorders));
}
if (parent != null && parent.getBorder() != null) {
ext.put("parent.border", parent.getBorder().toString());
}
WXExceptionUtils.commitCriticalExceptionRT(instance.getInstanceId(),
WXErrorCode.WX_RENDER_ERR_CONTAINER_TYPE,
"GraphicActionAddElement",
String.format(Locale.ENGLISH,"You are trying to add a %s to a %2$s, which is illegal as %2$s is not a container",
componentType,
WXSDKManager.getInstance().getWXRenderManager().getWXComponent(getPageId(), mParentRef).getComponentType()),
ext);
}
}
@RestrictTo(Scope.LIBRARY)
@WorkerThread
public void setRTL(boolean isRTL){
this.isLayoutRTL = isRTL;
}
@RestrictTo(Scope.LIBRARY)
@WorkerThread
public void setSize(GraphicSize graphicSize){
this.layoutSize = graphicSize;
}
@RestrictTo(Scope.LIBRARY)
@WorkerThread
public void setPosition(GraphicPosition position){
this.layoutPosition = position;
}
@RestrictTo(Scope.LIBRARY)
@WorkerThread
public void setIndex(int index){
mIndex = index;
}
@Override
public void executeAction() {
super.executeAction();
try {
if (!TextUtils.equals(mComponentType, "video") && !TextUtils.equals(mComponentType, "videoplus"))
child.mIsAddElementToTree = true;
long start = WXUtils.getFixUnixTime();
parent.addChild(child, mIndex);
parent.createChildViewAt(mIndex);
child.setIsLayoutRTL(isLayoutRTL);
if(layoutPosition !=null && layoutSize != null) {
child.setDemission(layoutSize, layoutPosition);
}
child.applyLayoutAndEvent(child);
child.bindData(child);
long diff = WXUtils.getFixUnixTime() - start;
if (null != getWXSDKIntance()){
getWXSDKIntance().getApmForInstance().viewCreateTime +=diff;
}
} catch (Exception e) {
WXLogUtils.e("add component failed.", e);
}
}
}
| alibaba/weex | android/sdk/src/main/java/org/apache/weex/ui/action/GraphicActionAddElement.java | Java | apache-2.0 | 7,884 |
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Generated code. DO NOT EDIT!
namespace Google.Cloud.ResourceManager.V3.Snippets
{
// [START cloudresourcemanager_v3_generated_TagKeys_TestIamPermissions_sync]
using Google.Api.Gax;
using Google.Cloud.Iam.V1;
using Google.Cloud.ResourceManager.V3;
public sealed partial class GeneratedTagKeysClientSnippets
{
/// <summary>Snippet for TestIamPermissions</summary>
/// <remarks>
/// This snippet has been automatically generated for illustrative purposes only.
/// It may require modifications to work in your environment.
/// </remarks>
public void TestIamPermissionsRequestObject()
{
// Create client
TagKeysClient tagKeysClient = TagKeysClient.Create();
// Initialize request argument(s)
TestIamPermissionsRequest request = new TestIamPermissionsRequest
{
ResourceAsResourceName = new UnparsedResourceName("a/wildcard/resource"),
Permissions = { "", },
};
// Make the request
TestIamPermissionsResponse response = tagKeysClient.TestIamPermissions(request);
}
}
// [END cloudresourcemanager_v3_generated_TagKeys_TestIamPermissions_sync]
}
| googleapis/google-cloud-dotnet | apis/Google.Cloud.ResourceManager.V3/Google.Cloud.ResourceManager.V3.GeneratedSnippets/TagKeysClient.TestIamPermissionsRequestObjectSnippet.g.cs | C# | apache-2.0 | 1,856 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.mallet;
import java.util.logging.Logger;
import java.util.Properties;
import java.io.*;
// Configuration parameters.
public class Conf {
private static final Logger logger = Logger.getLogger(Conf.class.getName());
private static Conf conf = new Conf();
private final String baseDirectory; //Base directory of this benchmark
private final String hiveServerHost;
private final String hiveServerPort;
private final int numberOfStreams;
private final String tpcDsToolDirectory;
private final String tempDirectory;
private final String malletDbDir;
private final int scale;
private final String user;
private boolean quickRunMode = false;
private boolean powerTestOnly = false;
private boolean singleQueryMode = false;
private int queryId;
private String dbSettings;
private String getProperty(Properties prop, String key) {
String value = prop.getProperty(key);
if (value == null) {
throw new ExceptionInInitializerError(key + " in conf file not found!");
}
return value;
}
private Conf() {
baseDirectory = System.getProperty("user.dir");
tempDirectory = System.getProperty("java.io.tmpdir");
tpcDsToolDirectory = baseDirectory + "/tools";
String confFile = baseDirectory + "/conf/conf.properties";
Properties prop = new Properties();
try {
FileInputStream in = new FileInputStream(confFile);
prop.load(in);
} catch (FileNotFoundException e) {
throw new ExceptionInInitializerError(e);
} catch (IOException e) {
throw new ExceptionInInitializerError(e);
}
hiveServerHost = getProperty(prop, "hiveServerHost");
hiveServerPort = getProperty(prop, "hiveServerPort");
numberOfStreams = Integer.parseInt(getProperty(prop, "numberOfStreams"));
// Multiple query streams are concurrently executed in a Throughput Test.
// The number of streams is any even number larger or equal to 4.
if (!(numberOfStreams >= 4 && ((numberOfStreams % 2) == 0))) {
throw new ExceptionInInitializerError("Number of streams for Throughput Test must be any even number larger or equal to 4.");
}
scale = Integer.parseInt(getProperty(prop, "scaleFactor"));
// Valid scale factors are 1,100,300,1000,3000,10000,30000,100000
int[] scaleFactors = {1, 100, 300, 1000, 3000, 10000, 30000, 100000};
int i;
for (i = 0; i < scaleFactors.length; i++) {
if (scale == scaleFactors[i]) {
break;
}
}
if (i >= scaleFactors.length) {
throw new ExceptionInInitializerError("Invalid scale factor.");
}
user = getProperty(prop, "user");
malletDbDir = getProperty(prop, "malletDbDir") + "/mallet/DATA";
}
public void parseCommandLine(String[] args) throws MalletException {
boolean argError = false;
for (int i = 0; i < args.length; i++) {
String arg = args[i];
if (arg.equalsIgnoreCase("--quickrun")) {
quickRunMode = true;
} else if (arg.equalsIgnoreCase("--powertest")) {
powerTestOnly = true;
} else if (arg.equalsIgnoreCase("--query")) {
powerTestOnly = true;
singleQueryMode = true;
if ((i + 1) >= args.length) {
argError = true;
break;
}
arg = args[i + 1];
try {
queryId = Integer.parseInt(arg);
} catch (NumberFormatException e) {
argError = true;
break;
}
if (queryId < 1 || queryId > 99) {
argError = true;
break;
}
i++;
} else {
argError = true;
break;
}
}
if (argError) {
throw new MalletException("Invalid command line arguments.");
}
}
public static Conf getConf() {
return conf;
}
public String getBaseDirectory() {
return baseDirectory;
}
public String getHiveServerHost() {
return hiveServerHost;
}
public String getHiveServerPort() {
return hiveServerPort;
}
public int getNumberOfStreams() {
return numberOfStreams;
}
public String getTpcDsToolDirectory() {
return tpcDsToolDirectory;
}
public String getTempDirectory() {
return tempDirectory;
}
public String getMalletDbDirectory() {
return malletDbDir;
}
public int getScale() {
return scale;
}
public String getUser() {
return user;
}
public boolean isQuickRunMode() {
return quickRunMode;
}
public boolean isPowerTestOnly() {
return powerTestOnly;
}
public boolean isSingleQueryMode() {
return singleQueryMode;
}
public int getQueryId() {
return queryId;
}
public String getDbSettings() {
if (dbSettings != null) {
return dbSettings;
}
String dbSettingsFile = getBaseDirectory() + "/conf/hive_settings.hql";
try {
dbSettings = Utility.readHqlFile(dbSettingsFile);
return dbSettings;
} catch (MalletException e) {
return null;
}
}
}
| wyg1990/Mallet | src/main/java/com/intel/mallet/Conf.java | Java | apache-2.0 | 5,783 |
package com.pacoapp.paco.ui;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.android.apps.paco.AccountChooser;
import com.google.android.gms.auth.GooglePlayServicesAvailabilityException;
import com.google.android.gms.auth.UserRecoverableAuthException;
import com.google.android.gms.common.ConnectionResult;
import com.google.android.gms.common.GooglePlayServicesUtil;
import com.pacoapp.paco.R;
import com.pacoapp.paco.UserPreferences;
import com.pacoapp.paco.net.AbstractAuthTokenTask;
import com.pacoapp.paco.net.GetAuthTokenInForeground;
import com.pacoapp.paco.net.NetworkClient;
import android.accounts.Account;
import android.accounts.AccountManager;
import android.accounts.AccountManagerCallback;
import android.accounts.AccountManagerFuture;
import android.accounts.OperationCanceledException;
import android.annotation.SuppressLint;
import android.app.Activity;
import android.app.Dialog;
import android.content.Context;
import android.content.Intent;
import android.net.ConnectivityManager;
import android.net.NetworkInfo;
import android.os.Build;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.Toast;
public class SplashActivity extends Activity implements NetworkClient {
private static Logger Log = LoggerFactory.getLogger(SplashActivity.class);
public static final String EXTRA_ACCOUNTNAME = "extra_accountname";
public static final String EXTRA_CHANGING_EXISTING_ACCOUNT = "extra_changing_existing_account";
public static final int REQUEST_CODE_PICK_ACCOUNT = 1000;
public static final int REQUEST_CODE_RECOVER_FROM_AUTH_ERROR = 1001;
public static final int REQUEST_CODE_RECOVER_FROM_PLAY_SERVICES_ERROR = 1002;
protected static final int ACCOUNT_CHOOSER_REQUEST_CODE = 55;
private UserPreferences userPrefs;
private boolean changingExistingAccount;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.splash_screen);
Log.debug("SplashActivity onCreate()");
changingExistingAccount = getIntent().getBooleanExtra(EXTRA_CHANGING_EXISTING_ACCOUNT, false);
userPrefs = new UserPreferences(getApplicationContext());
Button loginButton = (Button)findViewById(R.id.loginButton);
loginButton.setOnClickListener(new View.OnClickListener() {
@SuppressLint("NewApi")
@Override
public void onClick(View v) {
authenticateUser();
}
});
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
if (requestCode == REQUEST_CODE_PICK_ACCOUNT) {
if (resultCode == RESULT_OK) {
userPrefs.saveSelectedAccount(data.getStringExtra(AccountManager.KEY_ACCOUNT_NAME));
changingExistingAccount = false; // unset so that we don't loop in the picker forever
authenticateUser();
} else if (resultCode == RESULT_CANCELED) {
Toast.makeText(this, R.string.you_must_pick_an_account, Toast.LENGTH_SHORT).show();
}
} else if ((requestCode == REQUEST_CODE_RECOVER_FROM_AUTH_ERROR ||
requestCode == REQUEST_CODE_RECOVER_FROM_PLAY_SERVICES_ERROR)
&& resultCode == RESULT_OK) {
handleAuthorizeResult(resultCode, data);
return;
}
super.onActivityResult(requestCode, resultCode, data);
}
private void handleAuthorizeResult(int resultCode, Intent data) {
if (data == null) {
show("Unknown error, click the button again");
return;
}
if (resultCode == RESULT_OK) {
Log.info("Retrying");
getTask(this).execute();
return;
}
if (resultCode == RESULT_CANCELED) {
show("User rejected authorization.");
return;
}
show("Unknown error, click the button again");
}
protected void oldonActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
if (requestCode == ACCOUNT_CHOOSER_REQUEST_CODE && resultCode == Activity.RESULT_OK) {
String accountName = null;
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH) {
accountName = data.getStringExtra(AccountManager.KEY_ACCOUNT_NAME);
} else {
accountName = data.getStringExtra(AccountChooser.ACCOUNT_NAME);
}
if (accountName != null) {
userPrefs.saveSelectedAccount(accountName);
getAuthAccessToken(accountName);
// String token = GoogleAuthUtil.getToken(this, accountName, PacoService.AUTH_TOKEN_TYPE_USERINFO_EMAIL);
// finish();
} else {
finish(); // TODO handler errors
}
} else {
Toast.makeText(this, R.string.you_must_pick_an_account, Toast.LENGTH_SHORT).show();
}
}
private void getAuthAccessToken(final String accountName) {
AccountManager accountManager = AccountManager.get(this);
Account[] accounts = accountManager.getAccountsByType("com.google");
Account account = null;
for (Account currentAccount : accounts) {
if (currentAccount.name.equals(accountName)) {
account = currentAccount;
break;
}
}
String accessToken = getAccessToken();
if (accessToken != null) {
Log.info("Invalidating previous OAuth2 access token: " + accessToken);
accountManager.invalidateAuthToken(account.type, accessToken);
setAccessToken(null);
}
String authTokenType = AbstractAuthTokenTask.AUTH_TOKEN_TYPE_USERINFO_EMAIL;
Log.info("Get access token for " + accountName + " using authTokenType " + authTokenType);
accountManager.getAuthToken(account, authTokenType, null, this,
new AccountManagerCallback<Bundle>() {
@Override
public void run(AccountManagerFuture<Bundle> future) {
try {
String accessToken = future.getResult().getString(AccountManager.KEY_AUTHTOKEN);
Log.info("Got OAuth2 access token: " + accessToken);
setAccessToken(accessToken);
//
// Intent result = new Intent();
// result.putExtra(AccountChooser.ACCOUNT_NAME, accountName);
// SplashActivity.this.setResult(0, result);
SplashActivity.this.finish();
// finish();
} catch (OperationCanceledException e) {
Log.error("The user has denied you access to the API");
} catch (Exception e) {
Log.error(e.getMessage());
Log.error("Exception: ", e);
}
}
}, null);
}
private void setAccessToken(String token) {
userPrefs.setAccessToken(token);
}
private String getAccessToken() {
return userPrefs.getAccessToken();
}
@Override
protected void onResume() {
super.onResume();
//handle case of broken Google Play Services
// TODO remove when we get a build that properly incorporates Google Play Services and resources
// and can build an apk with < 64k methods for Android < 5.0 phones
int resultCode = GooglePlayServicesUtil.isGooglePlayServicesAvailable(getApplicationContext());
if (resultCode != ConnectionResult.SUCCESS) {
try {
// if the class that Paco doesn't provide is not on the system, don't
// use it to show an error dialog. Instead make a toast or dialog.
SplashActivity.this.getClassLoader().loadClass("com.google.android.gms.common.R$string");
Dialog dialog = GooglePlayServicesUtil.getErrorDialog(resultCode,
SplashActivity.this,
REQUEST_CODE_RECOVER_FROM_PLAY_SERVICES_ERROR);
dialog.show();
} catch (ClassNotFoundException e) {
Toast.makeText(getApplicationContext(),
"GooglePlayServices " + getString(R.string.are_not_available_) + " " + getString(R.string.error) + ":\n" + getGooglePlayConnectionErrorString(resultCode),
Toast.LENGTH_LONG).show();
}
} else {
if (changingExistingAccount) {
authenticateUser();
}
}
}
public void authenticateUser() {
if (userPrefs.getSelectedAccount() == null || changingExistingAccount) {
pickUserAccount();
} else {
if (isDeviceOnline()) {
getTask(this).execute();
} else {
Toast.makeText(this, getString(R.string.network_required), Toast.LENGTH_LONG).show();
}
}
}
private AbstractAuthTokenTask getTask(SplashActivity activity) {
return new GetAuthTokenInForeground(activity);
}
@SuppressLint("NewApi")
public void pickUserAccount() {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH) {
Account account = null;
if (userPrefs.getSelectedAccount() != null) {
account = getAccountFor(userPrefs.getSelectedAccount());
}
Intent intent = AccountManager.newChooseAccountIntent(account, null,
new String[]{"com.google"},
changingExistingAccount,
null,
AbstractAuthTokenTask.AUTH_TOKEN_TYPE_USERINFO_EMAIL,
null, null);
startActivityForResult(intent, REQUEST_CODE_PICK_ACCOUNT);
} else {
Intent intent = new Intent(SplashActivity.this, AccountChooser.class);
startActivityForResult(intent, REQUEST_CODE_PICK_ACCOUNT);
}
}
private Account getAccountFor(String selectedAccount) {
AccountManager am = AccountManager.get(this);
Account[] accounts = am.getAccountsByType("com.google");
for (Account account : accounts) {
if (account.name.equals(selectedAccount)) {
return account;
}
}
return null;
}
/** Checks whether the device currently has a network connection */
private boolean isDeviceOnline() {
ConnectivityManager connMgr = (ConnectivityManager) getSystemService(Context.CONNECTIVITY_SERVICE);
NetworkInfo networkInfo = connMgr.getActiveNetworkInfo();
if (networkInfo != null && networkInfo.isConnected()) {
return true;
}
return false;
}
public void show(final String message) {
runOnUiThread(new Runnable() {
@Override
public void run() {
Toast.makeText(SplashActivity.this, message, Toast.LENGTH_LONG);
}
});
}
@Override
public void handleException(final Exception e) {
runOnUiThread(new Runnable() {
@Override
public void run() {
if (e instanceof GooglePlayServicesAvailabilityException) {
// The Google Play services APK is old, disabled, or not present.
// Show a dialog created by Google Play services that allows
// the user to update the APK
int statusCode = ((GooglePlayServicesAvailabilityException)e)
.getConnectionStatusCode();
try {
// TODO remove this when we can build Google Play Services in properly
// if the class that Paco doesn't provide is not on the system, don't
// use it to show an error dialog. Instead make a toast or dialog.
SplashActivity.this.getClassLoader().loadClass("com.google.android.gms.common.R$string");
Dialog dialog = GooglePlayServicesUtil.getErrorDialog(statusCode,
SplashActivity.this,
REQUEST_CODE_RECOVER_FROM_PLAY_SERVICES_ERROR);
dialog.show();
} catch (ClassNotFoundException e) {
String gpsError = getGooglePlayConnectionErrorString(statusCode);
Toast.makeText(getApplicationContext(),
getString(R.string.error) + ": " + gpsError,
Toast.LENGTH_LONG).show();
}
} else if (e instanceof UserRecoverableAuthException) {
// Unable to authenticate, such as when the user has not yet granted
// the app access to the account, but the user can fix this.
// Forward the user to an activity in Google Play services.
Intent intent = ((UserRecoverableAuthException)e).getIntent();
startActivityForResult(intent,
REQUEST_CODE_RECOVER_FROM_PLAY_SERVICES_ERROR);
}
}
});
}
public String getGooglePlayConnectionErrorString(int statusCode) {
String gpsError = "unknown";
switch(statusCode) {
case ConnectionResult.API_UNAVAILABLE:
gpsError = "API Unavailable";
break;
case ConnectionResult.CANCELED:
gpsError = "Canceled";
break;
case ConnectionResult.DEVELOPER_ERROR:
gpsError = "Developer Error";
break;
case ConnectionResult.INTERNAL_ERROR:
gpsError = "Internal error";
break;
case ConnectionResult.INTERRUPTED:
gpsError = "Interrupted";
break;
case ConnectionResult.INVALID_ACCOUNT:
gpsError = "Invalid Account";
break;
case ConnectionResult.LICENSE_CHECK_FAILED:
gpsError = "License Check Failed";
break;
case ConnectionResult.NETWORK_ERROR:
gpsError = "Network Error";
break;
case ConnectionResult.RESOLUTION_REQUIRED:
gpsError = "Resolution Required";
break;
case ConnectionResult.SERVICE_DISABLED:
gpsError = "Service Disabled";
break;
case ConnectionResult.SERVICE_INVALID:
gpsError = "Service Invalid";
break;
case ConnectionResult.SERVICE_MISSING:
gpsError = "Service Missing";
break;
case ConnectionResult.SERVICE_VERSION_UPDATE_REQUIRED:
gpsError = "Service version update required";
break;
case ConnectionResult.SIGN_IN_FAILED:
gpsError = "Sign in failed";
break;
case ConnectionResult.SIGN_IN_REQUIRED:
gpsError = "Sign in required";
break;
case ConnectionResult.SUCCESS:
gpsError = "Success";
break;
case ConnectionResult.TIMEOUT:
gpsError = "Timeout";
break;
default:
break;
}
return gpsError;
}
public void showAndFinish(String string) {
show(string);
finish();
}
@Override
public Context getContext() {
return this.getApplicationContext();
}
}
| google/paco | Paco/src/com/pacoapp/paco/ui/SplashActivity.java | Java | apache-2.0 | 14,782 |
/**
*
* Process Editor - Animation Package
*
* (C) 2009, 2010 inubit AG
* (C) 2014 the authors
*
*/
package com.inubit.research.animation;
import java.awt.Point;
import java.util.ArrayList;
import java.util.List;
import net.frapu.code.visualization.Configuration;
import net.frapu.code.visualization.LayoutUtils;
import net.frapu.code.visualization.ProcessEdge;
import net.frapu.code.visualization.ProcessEditor;
import net.frapu.code.visualization.ProcessModel;
import net.frapu.code.visualization.ProcessNode;
import net.frapu.code.visualization.ProcessObject;
import com.inubit.research.layouter.LayoutHelper;
import com.inubit.research.layouter.ProcessLayouter;
import com.inubit.research.layouter.WorkBenchSpecific.WorkbenchHandler;
import com.inubit.research.layouter.adapter.ProcessNodeAdapter;
import com.inubit.research.layouter.interfaces.AbstractModelAdapter;
/**
* @author ff
*
*/
public class LayoutingAnimator implements IAnimationListener {
/**
* Configuration Key values
*/
public static final String CONF_ANIMATION_SPEED = "LayouterAnimationSpeed";
private long start;
private ProcessLayouter f_layouter;
private int f_animationTime = -1;
private Animator animator;
private ProcessEditor f_editor;
private boolean f_layoutEdgesValue;
/**
*
*/
public LayoutingAnimator(ProcessLayouter layouter) {
f_layouter = layouter;
}
public ProcessLayouter getLayouter() {
return f_layouter;
}
/**
* Animates the layout of the model.
* @param model
* @param xstart
* @param ystart
* @param direction
* @throws Exception
*/
public void layoutModelWithAnimation(ProcessEditor editor, List<NodeAnimator> animList, int xstart, int ystart, int direction)
throws Exception {
// Animator orgAnimator = editor.getAnimator().getAnimator();
// if (orgAnimator != null) {
// orgAnimator.setRunning(false);
// }
// animator = new Animator(null, 60);
// animator.start();
// animator.setParent(editor);
f_editor = editor;
animator = editor.getAnimator().getAnimator();
ProcessModel model = editor.getModel();
ProcessModel copy = model.clone();
ProcessNode _selNode = findNode(editor.getSelectionHandler().getLastSelectedNode(), copy);
if (_selNode != null) {
ProcessNodeAdapter selectedNode = new ProcessNodeAdapter(_selNode);
f_layouter.setSelectedNode(selectedNode);
} else {
f_layouter.setSelectedNode(null);
}
// Fix all sizes to final size
if (animList != null) {
for (NodeAnimator a : animList) {
if (a instanceof DefaultNodeAnimator) {
DefaultNodeAnimator defA = (DefaultNodeAnimator) a;
// Check if node is contained in copy
if (model.getNodes().contains(defA.getNode())) {
// If found, set target size for layouting
findNode(defA.getNode(), copy).setSize(defA.getNewSize().width, defA.getNewSize().height);
}
}
}
}
Point _offset = determinePartialLayoutingRegion(editor, copy);
AbstractModelAdapter modelI = LayoutUtils.getAdapter(copy);
f_layouter.layoutModel(modelI, xstart, ystart, 0);
WorkbenchHandler.postProcess(f_layouter, copy);
int _animationTime = f_animationTime;
if (_animationTime == -1) {
_animationTime = LayoutHelper.toInt(Configuration.getInstance().getProperty(CONF_ANIMATION_SPEED, "6000"), 6000);
}
//writing back coords to wrappers
ArrayList<NodeAnimator> wrappers = new ArrayList<NodeAnimator>();
for (ProcessNode n : editor.getModel().getNodes()) {
DefaultNodeAnimator w = new DefaultNodeAnimator(n, animator);
w.setAnimationTime(_animationTime);
ProcessNode dup = findNode(n, copy);
if (dup != null) {
Point _pos = applyPartialLayoutingOffsetToNode(_offset, dup);
w.setNewCoords(_pos);
w.setNewSize(dup.getSize());
wrappers.add(w);
}
}
for (ProcessEdge edge : editor.getModel().getEdges()) {
DefaultEdgeAnimator w = new DefaultEdgeAnimator(edge, animator);
w.setAnimationTime(_animationTime);
ProcessEdge _e = (ProcessEdge) copy.getObjectById(edge.getId());
if (copy.getEdges().contains(_e)) {
applyPartialLayoutingOffsetToEdge(_offset, _e);
w.transformTo(_e);
wrappers.add(w);
}
}
// Check if additional animation list @todo Refactor :-)
if (animList != null) {
for (NodeAnimator a : animList) {
if (wrappers.contains(a)) {
//Already contained, modify
NodeAnimator org = wrappers.get(wrappers.indexOf(a));
if (org instanceof DefaultNodeAnimator) {
DefaultNodeAnimator defOrg = (DefaultNodeAnimator) org;
defOrg.setNewSize(((DefaultNodeAnimator) a).getNewSize());
}
}
}
}
if (wrappers.size() > 0) {
wrappers.get(0).addListener(this);
start = System.nanoTime();
}
f_layoutEdgesValue = editor.isLayoutEdges();
editor.setLayoutEdges(false);
animator.setAnimationObjects(wrappers);
}
private void applyPartialLayoutingOffsetToEdge(Point _offset, ProcessEdge _e) {
if (_offset.x != Integer.MAX_VALUE) {
List<Point> _rps = _e.getRoutingPoints();
if (_rps.size() > 2) {
_rps.remove(0);
_rps.remove(_rps.size() - 1);
for (Point p : _rps) {
p.x += _offset.x;
p.y += _offset.y;
}
//setting new routing points
_e.clearRoutingPoints();
for (int i = 0; i < _rps.size(); i++) {
_e.addRoutingPoint(i, _rps.get(i));
}
}
}
}
private Point applyPartialLayoutingOffsetToNode(Point _offset, ProcessNode dup) {
Point _pos = dup.getPos();
if (_offset.x != Integer.MAX_VALUE) {
_pos.x += _offset.x;
_pos.y += _offset.y;
}
return _pos;
}
/**
* used for partial layouting (if just some node are selected)
* @param editor
* @param copy
* @return
*/
private Point determinePartialLayoutingRegion(ProcessEditor editor,
ProcessModel copy) {
List<ProcessObject> _selectedNodes = editor.getSelectionHandler().getSelection();
Point _offset = new Point(Integer.MAX_VALUE, Integer.MAX_VALUE);
if (_selectedNodes.size() > 1) {
for (ProcessObject o : _selectedNodes) {
if (o instanceof ProcessNode) {
ProcessNode _n = (ProcessNode) o;
_offset.x = Math.min(_offset.x, _n.getPos().x - _n.getSize().width / 2);
_offset.y = Math.min(_offset.y, _n.getPos().y - _n.getSize().height / 2);
}
}
for (ProcessNode n : new ArrayList<ProcessNode>(copy.getNodes())) {
if (!_selectedNodes.contains(n)) {
copy.removeNode(n);
}
}
for (ProcessEdge e : new ArrayList<ProcessEdge>(copy.getEdges())) {
if (!_selectedNodes.contains(e)) {
copy.removeEdge(e);
}
}
}
return _offset;
}
@Override
public void animationFinished(NodeAnimator node) {
node.removeListener(this);
System.out.println("Animation took: " + (System.nanoTime() - start) / 1000000 + " ms");
f_editor.setLayoutEdges(f_layoutEdgesValue);
// Kill Animator thread
//animator.setRunning(false);
}
private ProcessNode findNode(ProcessNode original, ProcessModel copy) {
if (original != null) {
String _id = original.getProperty(ProcessNode.PROP_ID);
for (ProcessNode n : copy.getNodes()) {
if (n.getProperty(ProcessNode.PROP_ID).equals(_id)) {
return n;
}
}
}
return null;
}
/**
* can be used to override the user set animation time for special occassions
* @param time
*/
public void setCustomAnimationTime(int time) {
f_animationTime = time;
}
}
| bptlab/processeditor | src/com/inubit/research/animation/LayoutingAnimator.java | Java | apache-2.0 | 9,066 |
//CHECKSTYLE:FileLength:OFF
/*! ******************************************************************************
*
* Pentaho Data Integration
*
* Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com
*
*******************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
package org.pentaho.di.trans;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Date;
import java.util.Deque;
import java.util.HashMap;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.vfs.FileName;
import org.apache.commons.vfs.FileObject;
import org.pentaho.di.cluster.SlaveServer;
import org.pentaho.di.core.BlockingBatchingRowSet;
import org.pentaho.di.core.BlockingRowSet;
import org.pentaho.di.core.Const;
import org.pentaho.di.core.Counter;
import org.pentaho.di.core.ExecutorInterface;
import org.pentaho.di.core.ExtensionDataInterface;
import org.pentaho.di.core.KettleEnvironment;
import org.pentaho.di.core.QueueRowSet;
import org.pentaho.di.core.Result;
import org.pentaho.di.core.ResultFile;
import org.pentaho.di.core.RowMetaAndData;
import org.pentaho.di.core.RowSet;
import org.pentaho.di.core.SingleRowRowSet;
import org.pentaho.di.core.database.Database;
import org.pentaho.di.core.database.DatabaseMeta;
import org.pentaho.di.core.database.DatabaseTransactionListener;
import org.pentaho.di.core.database.map.DatabaseConnectionMap;
import org.pentaho.di.core.exception.KettleDatabaseException;
import org.pentaho.di.core.exception.KettleException;
import org.pentaho.di.core.exception.KettleFileException;
import org.pentaho.di.core.exception.KettleTransException;
import org.pentaho.di.core.exception.KettleValueException;
import org.pentaho.di.core.extension.ExtensionPointHandler;
import org.pentaho.di.core.extension.KettleExtensionPoint;
import org.pentaho.di.core.logging.ChannelLogTable;
import org.pentaho.di.core.logging.HasLogChannelInterface;
import org.pentaho.di.core.logging.KettleLogStore;
import org.pentaho.di.core.logging.LogChannel;
import org.pentaho.di.core.logging.LogChannelInterface;
import org.pentaho.di.core.logging.LogLevel;
import org.pentaho.di.core.logging.LogStatus;
import org.pentaho.di.core.logging.LoggingHierarchy;
import org.pentaho.di.core.logging.LoggingMetric;
import org.pentaho.di.core.logging.LoggingObjectInterface;
import org.pentaho.di.core.logging.LoggingObjectType;
import org.pentaho.di.core.logging.LoggingRegistry;
import org.pentaho.di.core.logging.Metrics;
import org.pentaho.di.core.logging.MetricsLogTable;
import org.pentaho.di.core.logging.MetricsRegistry;
import org.pentaho.di.core.logging.PerformanceLogTable;
import org.pentaho.di.core.logging.StepLogTable;
import org.pentaho.di.core.logging.TransLogTable;
import org.pentaho.di.core.metrics.MetricsDuration;
import org.pentaho.di.core.metrics.MetricsSnapshotInterface;
import org.pentaho.di.core.metrics.MetricsUtil;
import org.pentaho.di.core.parameters.DuplicateParamException;
import org.pentaho.di.core.parameters.NamedParams;
import org.pentaho.di.core.parameters.NamedParamsDefault;
import org.pentaho.di.core.parameters.UnknownParamException;
import org.pentaho.di.core.row.RowMetaInterface;
import org.pentaho.di.core.row.ValueMeta;
import org.pentaho.di.core.util.EnvUtil;
import org.pentaho.di.core.variables.VariableSpace;
import org.pentaho.di.core.variables.Variables;
import org.pentaho.di.core.vfs.KettleVFS;
import org.pentaho.di.core.xml.XMLHandler;
import org.pentaho.di.i18n.BaseMessages;
import org.pentaho.di.job.DelegationListener;
import org.pentaho.di.job.Job;
import org.pentaho.di.partition.PartitionSchema;
import org.pentaho.di.repository.ObjectId;
import org.pentaho.di.repository.ObjectRevision;
import org.pentaho.di.repository.Repository;
import org.pentaho.di.repository.RepositoryDirectoryInterface;
import org.pentaho.di.resource.ResourceUtil;
import org.pentaho.di.resource.TopLevelResource;
import org.pentaho.di.trans.cluster.TransSplitter;
import org.pentaho.di.trans.performance.StepPerformanceSnapShot;
import org.pentaho.di.trans.step.BaseStep;
import org.pentaho.di.trans.step.BaseStepData.StepExecutionStatus;
import org.pentaho.di.trans.step.RunThread;
import org.pentaho.di.trans.step.StepAdapter;
import org.pentaho.di.trans.step.StepDataInterface;
import org.pentaho.di.trans.step.StepInitThread;
import org.pentaho.di.trans.step.StepInterface;
import org.pentaho.di.trans.step.StepListener;
import org.pentaho.di.trans.step.StepMeta;
import org.pentaho.di.trans.step.StepMetaDataCombi;
import org.pentaho.di.trans.step.StepPartitioningMeta;
import org.pentaho.di.trans.steps.mappinginput.MappingInput;
import org.pentaho.di.trans.steps.mappingoutput.MappingOutput;
import org.pentaho.di.www.AddExportServlet;
import org.pentaho.di.www.AddTransServlet;
import org.pentaho.di.www.PrepareExecutionTransServlet;
import org.pentaho.di.www.SlaveServerTransStatus;
import org.pentaho.di.www.SocketRepository;
import org.pentaho.di.www.StartExecutionTransServlet;
import org.pentaho.di.www.WebResult;
import org.pentaho.metastore.api.IMetaStore;
/**
* This class represents the information and operations associated with the concept of a Transformation. It loads,
* instantiates, initializes, runs, and monitors the execution of the transformation contained in the specified
* TransInfo object.
*
* @author Matt
* @since 07-04-2003
*
*/
public class Trans implements VariableSpace, NamedParams, HasLogChannelInterface, LoggingObjectInterface,
ExecutorInterface, ExtensionDataInterface {
/** The package name, used for internationalization of messages. */
private static Class<?> PKG = Trans.class; // for i18n purposes, needed by Translator2!!
/** The replay date format. */
public static final String REPLAY_DATE_FORMAT = "yyyy/MM/dd HH:mm:ss";
/** The log channel interface. */
protected LogChannelInterface log;
/** The log level. */
protected LogLevel logLevel = LogLevel.BASIC;
/** The container object id. */
protected String containerObjectId;
/** The log commit size. */
protected int logCommitSize = 10;
/** The transformation metadata to execute. */
protected TransMeta transMeta;
/**
* The repository we are referencing.
*/
protected Repository repository;
/**
* The MetaStore to use
*/
protected IMetaStore metaStore;
/**
* The job that's launching this transformation. This gives us access to the whole chain, including the parent
* variables, etc.
*/
private Job parentJob;
/**
* The transformation that is executing this transformation in case of mappings.
*/
private Trans parentTrans;
/** The parent logging object interface (this could be a transformation or a job). */
private LoggingObjectInterface parent;
/** The name of the mapping step that executes this transformation in case this is a mapping. */
private String mappingStepName;
/** Indicates that we want to monitor the running transformation in a GUI. */
private boolean monitored;
/**
* Indicates that we are running in preview mode...
*/
private boolean preview;
/** The date objects for logging information about the transformation such as start and end time, etc. */
private Date startDate, endDate, currentDate, logDate, depDate;
/** The job start and end date. */
private Date jobStartDate, jobEndDate;
/** The batch id. */
private long batchId;
/**
* This is the batch ID that is passed from job to job to transformation, if nothing is passed, it's the
* transformation's batch id.
*/
private long passedBatchId;
/** The variable bindings for the transformation. */
private VariableSpace variables = new Variables();
/** A list of all the row sets. */
private List<RowSet> rowsets;
/** A list of all the steps. */
private List<StepMetaDataCombi> steps;
/** The class number. */
public int class_nr;
/**
* The replayDate indicates that this transformation is a replay transformation for a transformation executed on
* replayDate. If replayDate is null, the transformation is not a replay.
*/
private Date replayDate;
/** Constant indicating a dispatch type of 1-to-1. */
public static final int TYPE_DISP_1_1 = 1;
/** Constant indicating a dispatch type of 1-to-N. */
public static final int TYPE_DISP_1_N = 2;
/** Constant indicating a dispatch type of N-to-1. */
public static final int TYPE_DISP_N_1 = 3;
/** Constant indicating a dispatch type of N-to-N. */
public static final int TYPE_DISP_N_N = 4;
/** Constant indicating a dispatch type of N-to-M. */
public static final int TYPE_DISP_N_M = 5;
/** Constant indicating a transformation status of Finished. */
public static final String STRING_FINISHED = "Finished";
/** Constant indicating a transformation status of Finished (with errors). */
public static final String STRING_FINISHED_WITH_ERRORS = "Finished (with errors)";
/** Constant indicating a transformation status of Running. */
public static final String STRING_RUNNING = "Running";
/** Constant indicating a transformation status of Paused. */
public static final String STRING_PAUSED = "Paused";
/** Constant indicating a transformation status of Preparing for execution. */
public static final String STRING_PREPARING = "Preparing executing";
/** Constant indicating a transformation status of Initializing. */
public static final String STRING_INITIALIZING = "Initializing";
/** Constant indicating a transformation status of Waiting. */
public static final String STRING_WAITING = "Waiting";
/** Constant indicating a transformation status of Stopped. */
public static final String STRING_STOPPED = "Stopped";
/** Constant indicating a transformation status of Halting. */
public static final String STRING_HALTING = "Halting";
/** Constant specifying a filename containing XML to inject into a ZIP file created during resource export. */
public static final String CONFIGURATION_IN_EXPORT_FILENAME = "__job_execution_configuration__.xml";
/** Whether safe mode is enabled. */
private boolean safeModeEnabled;
/** The thread name. */
@Deprecated
private String threadName;
/** The transaction ID */
private String transactionId;
/** Whether the transformation is preparing for execution. */
private volatile boolean preparing;
/** Whether the transformation is initializing. */
private boolean initializing;
/** Whether the transformation is running. */
private boolean running;
/** Whether the transformation is finished. */
private final AtomicBoolean finished;
/** Whether the transformation is paused. */
private AtomicBoolean paused;
/** Whether the transformation is stopped. */
private AtomicBoolean stopped;
/** The number of errors that have occurred during execution of the transformation. */
private AtomicInteger errors;
/** Whether the transformation is ready to start. */
private boolean readyToStart;
/** Step performance snapshots. */
private Map<String, List<StepPerformanceSnapShot>> stepPerformanceSnapShots;
/** The step performance snapshot timer. */
private Timer stepPerformanceSnapShotTimer;
/** A list of listeners attached to the transformation. */
private List<TransListener> transListeners;
/** A list of stop-event listeners attached to the transformation. */
private List<TransStoppedListener> transStoppedListeners;
/** In case this transformation starts to delegate work to a local transformation or job */
private List<DelegationListener> delegationListeners;
/** The number of finished steps. */
private int nrOfFinishedSteps;
/** The number of active steps. */
private int nrOfActiveSteps;
/** The named parameters. */
private NamedParams namedParams = new NamedParamsDefault();
/** The socket repository. */
private SocketRepository socketRepository;
/** The transformation log table database connection. */
private Database transLogTableDatabaseConnection;
/** The step performance snapshot sequence number. */
private AtomicInteger stepPerformanceSnapshotSeqNr;
/** The last written step performance sequence number. */
private int lastWrittenStepPerformanceSequenceNr;
/** The last step performance snapshot sequence number added. */
private int lastStepPerformanceSnapshotSeqNrAdded;
/** The active subtransformations. */
private Map<String, Trans> activeSubtransformations;
/** The active subjobs */
private Map<String, Job> activeSubjobs;
/** The step performance snapshot size limit. */
private int stepPerformanceSnapshotSizeLimit;
/** The servlet print writer. */
private PrintWriter servletPrintWriter;
/** The trans finished blocking queue. */
private ArrayBlockingQueue<Object> transFinishedBlockingQueue;
/** The name of the executing server */
private String executingServer;
/** The name of the executing user */
private String executingUser;
private Result previousResult;
protected List<RowMetaAndData> resultRows;
protected List<ResultFile> resultFiles;
/** The command line arguments for the transformation. */
protected String[] arguments;
/**
* A table of named counters.
*/
protected Hashtable<String, Counter> counters;
private HttpServletResponse servletResponse;
private HttpServletRequest servletRequest;
private Map<String, Object> extensionDataMap;
/**
* Instantiates a new transformation.
*/
public Trans() {
finished = new AtomicBoolean( false );
paused = new AtomicBoolean( false );
stopped = new AtomicBoolean( false );
transListeners = Collections.synchronizedList( new ArrayList<TransListener>() );
transStoppedListeners = Collections.synchronizedList( new ArrayList<TransStoppedListener>() );
delegationListeners = new ArrayList<DelegationListener>();
// Get a valid transactionId in case we run database transactional.
transactionId = calculateTransactionId();
threadName = transactionId; // / backward compatibility but deprecated!
errors = new AtomicInteger( 0 );
stepPerformanceSnapshotSeqNr = new AtomicInteger( 0 );
lastWrittenStepPerformanceSequenceNr = 0;
activeSubtransformations = new HashMap<String, Trans>();
activeSubjobs = new HashMap<String, Job>();
resultRows = new ArrayList<RowMetaAndData>();
resultFiles = new ArrayList<ResultFile>();
counters = new Hashtable<String, Counter>();
extensionDataMap = new HashMap<String, Object>();
}
/**
* Initializes a transformation from transformation meta-data defined in memory.
*
* @param transMeta
* the transformation meta-data to use.
*/
public Trans( TransMeta transMeta ) {
this( transMeta, null );
}
/**
* Initializes a transformation from transformation meta-data defined in memory. Also take into account the parent log
* channel interface (job or transformation) for logging lineage purposes.
*
* @param transMeta
* the transformation meta-data to use.
* @param parent
* the parent job that is executing this transformation
*/
public Trans( TransMeta transMeta, LoggingObjectInterface parent ) {
this();
this.transMeta = transMeta;
setParent( parent );
initializeVariablesFrom( transMeta );
copyParametersFrom( transMeta );
transMeta.activateParameters();
// Get a valid transactionId in case we run database transactional.
transactionId = calculateTransactionId();
threadName = transactionId; // / backward compatibility but deprecated!
}
/**
* Sets the parent logging object.
*
* @param parent
* the new parent
*/
public void setParent( LoggingObjectInterface parent ) {
this.parent = parent;
this.log = new LogChannel( this, parent );
this.logLevel = log.getLogLevel();
this.containerObjectId = log.getContainerObjectId();
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationIsPreloaded" ) );
}
if ( log.isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "Trans.Log.NumberOfStepsToRun", String.valueOf( transMeta
.nrSteps() ), String.valueOf( transMeta.nrTransHops() ) ) );
}
}
/**
* Sets the default log commit size.
*/
private void setDefaultLogCommitSize() {
String propLogCommitSize = this.getVariable( "pentaho.log.commit.size" );
if ( propLogCommitSize != null ) {
// override the logCommit variable
try {
logCommitSize = Integer.parseInt( propLogCommitSize );
} catch ( Exception ignored ) {
logCommitSize = 10; // ignore parsing error and default to 10
}
}
}
/**
* Gets the log channel interface for the transformation.
*
* @return the log channel
* @see org.pentaho.di.core.logging.HasLogChannelInterface#getLogChannel()
*/
public LogChannelInterface getLogChannel() {
return log;
}
/**
* Sets the log channel interface for the transformation.
*
* @param log
* the new log channel interface
*/
public void setLog( LogChannelInterface log ) {
this.log = log;
}
/**
* Gets the name of the transformation.
*
* @return the transformation name
*/
public String getName() {
if ( transMeta == null ) {
return null;
}
return transMeta.getName();
}
/**
* Instantiates a new transformation using any of the provided parameters including the variable bindings, a
* repository, a name, a repository directory name, and a filename. This is a multi-purpose method that supports
* loading a transformation from a file (if the filename is provided but not a repository object) or from a repository
* (if the repository object, repository directory name, and transformation name are specified).
*
* @param parent
* the parent variable space and named params
* @param rep
* the repository
* @param name
* the name of the transformation
* @param dirname
* the dirname the repository directory name
* @param filename
* the filename containing the transformation definition
* @throws KettleException
* if any error occurs during loading, parsing, or creation of the transformation
*/
public <Parent extends VariableSpace & NamedParams> Trans( Parent parent, Repository rep, String name,
String dirname, String filename ) throws KettleException {
this();
try {
if ( rep != null ) {
RepositoryDirectoryInterface repdir = rep.findDirectory( dirname );
if ( repdir != null ) {
this.transMeta = rep.loadTransformation( name, repdir, null, false, null ); // reads last version
} else {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToLoadTransformation", name, dirname ) );
}
} else {
transMeta = new TransMeta( filename, false );
}
this.log = LogChannel.GENERAL;
transMeta.initializeVariablesFrom( parent );
initializeVariablesFrom( parent );
// PDI-3064 do not erase parameters from meta!
// instead of this - copy parameters to actual transformation
this.copyParametersFrom( parent );
this.activateParameters();
this.setDefaultLogCommitSize();
// Get a valid transactionId in case we run database transactional.
transactionId = calculateTransactionId();
threadName = transactionId; // / backward compatibility but deprecated!
} catch ( KettleException e ) {
throw new KettleException(
BaseMessages.getString( PKG, "Trans.Exception.UnableToOpenTransformation", name ), e );
}
}
/**
* Executes the transformation. This method will prepare the transformation for execution and then start all the
* threads associated with the transformation and its steps.
*
* @param arguments
* the arguments
* @throws KettleException
* if the transformation could not be prepared (initialized)
*/
public void execute( String[] arguments ) throws KettleException {
prepareExecution( arguments );
startThreads();
}
/**
* Prepares the transformation for execution. This includes setting the arguments and parameters as well as preparing
* and tracking the steps and hops in the transformation.
*
* @param arguments
* the arguments to use for this transformation
* @throws KettleException
* in case the transformation could not be prepared (initialized)
*/
public void prepareExecution( String[] arguments ) throws KettleException {
preparing = true;
startDate = null;
running = false;
log.snap( Metrics.METRIC_TRANSFORMATION_EXECUTION_START );
log.snap( Metrics.METRIC_TRANSFORMATION_INIT_START );
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationPrepareExecution.id, this );
checkCompatibility();
// Set the arguments on the transformation...
//
if ( arguments != null ) {
setArguments( arguments );
}
activateParameters();
transMeta.activateParameters();
if ( transMeta.getName() == null ) {
if ( transMeta.getFilename() != null ) {
log.logBasic( BaseMessages.getString( PKG, "Trans.Log.DispacthingStartedForFilename", transMeta
.getFilename() ) );
}
} else {
log.logBasic( BaseMessages.getString( PKG, "Trans.Log.DispacthingStartedForTransformation", transMeta
.getName() ) );
}
if ( getArguments() != null ) {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.NumberOfArgumentsDetected", String
.valueOf( getArguments().length ) ) );
}
}
if ( isSafeModeEnabled() ) {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.SafeModeIsEnabled", transMeta.getName() ) );
}
}
if ( getReplayDate() != null ) {
SimpleDateFormat df = new SimpleDateFormat( REPLAY_DATE_FORMAT );
log.logBasic( BaseMessages.getString( PKG, "Trans.Log.ThisIsAReplayTransformation" )
+ df.format( getReplayDate() ) );
} else {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.ThisIsNotAReplayTransformation" ) );
}
}
// setInternalKettleVariables(this); --> Let's not do this, when running
// without file, for example remote, it spoils the fun
// extra check to see if the servlet print writer has some value in case
// folks want to test it locally...
//
if ( servletPrintWriter == null ) {
String encoding = System.getProperty( "KETTLE_DEFAULT_SERVLET_ENCODING", null );
if ( encoding == null ) {
servletPrintWriter = new PrintWriter( new OutputStreamWriter( System.out ) );
} else {
try {
servletPrintWriter = new PrintWriter( new OutputStreamWriter( System.out, encoding ) );
} catch ( UnsupportedEncodingException ex ) {
servletPrintWriter = new PrintWriter( new OutputStreamWriter( System.out ) );
}
}
}
// Keep track of all the row sets and allocated steps
//
steps = new ArrayList<StepMetaDataCombi>();
rowsets = new ArrayList<RowSet>();
List<StepMeta> hopsteps = transMeta.getTransHopSteps( false );
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.FoundDefferentSteps", String.valueOf( hopsteps
.size() ) ) );
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.AllocatingRowsets" ) );
}
// First allocate all the rowsets required!
// Note that a mapping doesn't receive ANY input or output rowsets...
//
for ( int i = 0; i < hopsteps.size(); i++ ) {
StepMeta thisStep = hopsteps.get( i );
if ( thisStep.isMapping() ) {
continue; // handled and allocated by the mapping step itself.
}
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString(
PKG, "Trans.Log.AllocateingRowsetsForStep", String.valueOf( i ), thisStep.getName() ) );
}
List<StepMeta> nextSteps = transMeta.findNextSteps( thisStep );
int nrTargets = nextSteps.size();
for ( int n = 0; n < nrTargets; n++ ) {
// What's the next step?
StepMeta nextStep = nextSteps.get( n );
if ( nextStep.isMapping() ) {
continue; // handled and allocated by the mapping step itself.
}
// How many times do we start the source step?
int thisCopies = thisStep.getCopies();
if ( thisCopies < 0 ) {
// This can only happen if a variable is used that didn't resolve to a positive integer value
//
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Log.StepCopiesNotCorrectlyDefined", thisStep.getName() ) );
}
// How many times do we start the target step?
int nextCopies = nextStep.getCopies();
// Are we re-partitioning?
boolean repartitioning;
if ( thisStep.isPartitioned() ) {
repartitioning = !thisStep.getStepPartitioningMeta()
.equals( nextStep.getStepPartitioningMeta() );
} else {
repartitioning = nextStep.isPartitioned();
}
int nrCopies;
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString(
PKG, "Trans.Log.copiesInfo", String.valueOf( thisCopies ), String.valueOf( nextCopies ) ) );
}
int dispatchType;
if ( thisCopies == 1 && nextCopies == 1 ) {
dispatchType = TYPE_DISP_1_1;
nrCopies = 1;
} else if ( thisCopies == 1 && nextCopies > 1 ) {
dispatchType = TYPE_DISP_1_N;
nrCopies = nextCopies;
} else if ( thisCopies > 1 && nextCopies == 1 ) {
dispatchType = TYPE_DISP_N_1;
nrCopies = thisCopies;
} else if ( thisCopies == nextCopies && !repartitioning ) {
dispatchType = TYPE_DISP_N_N;
nrCopies = nextCopies;
} else {
// > 1!
dispatchType = TYPE_DISP_N_M;
nrCopies = nextCopies;
} // Allocate a rowset for each destination step
// Allocate the rowsets
//
if ( dispatchType != TYPE_DISP_N_M ) {
for ( int c = 0; c < nrCopies; c++ ) {
RowSet rowSet;
switch ( transMeta.getTransformationType() ) {
case Normal:
// This is a temporary patch until the batching rowset has proven
// to be working in all situations.
// Currently there are stalling problems when dealing with small
// amounts of rows.
//
Boolean batchingRowSet =
ValueMeta.convertStringToBoolean( System.getProperty( Const.KETTLE_BATCHING_ROWSET ) );
if ( batchingRowSet != null && batchingRowSet.booleanValue() ) {
rowSet = new BlockingBatchingRowSet( transMeta.getSizeRowset() );
} else {
rowSet = new BlockingRowSet( transMeta.getSizeRowset() );
}
break;
case SerialSingleThreaded:
rowSet = new SingleRowRowSet();
break;
case SingleThreaded:
rowSet = new QueueRowSet();
break;
default:
throw new KettleException( "Unhandled transformation type: " + transMeta.getTransformationType() );
}
switch ( dispatchType ) {
case TYPE_DISP_1_1:
rowSet.setThreadNameFromToCopy( thisStep.getName(), 0, nextStep.getName(), 0 );
break;
case TYPE_DISP_1_N:
rowSet.setThreadNameFromToCopy( thisStep.getName(), 0, nextStep.getName(), c );
break;
case TYPE_DISP_N_1:
rowSet.setThreadNameFromToCopy( thisStep.getName(), c, nextStep.getName(), 0 );
break;
case TYPE_DISP_N_N:
rowSet.setThreadNameFromToCopy( thisStep.getName(), c, nextStep.getName(), c );
break;
default:
break;
}
rowsets.add( rowSet );
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.TransformationAllocatedNewRowset", rowSet
.toString() ) );
}
}
} else {
// For each N source steps we have M target steps
//
// From each input step we go to all output steps.
// This allows maximum flexibility for re-partitioning,
// distribution...
for ( int s = 0; s < thisCopies; s++ ) {
for ( int t = 0; t < nextCopies; t++ ) {
BlockingRowSet rowSet = new BlockingRowSet( transMeta.getSizeRowset() );
rowSet.setThreadNameFromToCopy( thisStep.getName(), s, nextStep.getName(), t );
rowsets.add( rowSet );
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.TransformationAllocatedNewRowset", rowSet
.toString() ) );
}
}
}
}
}
log
.logDetailed( BaseMessages
.getString(
PKG,
"Trans.Log.AllocatedRowsets", String.valueOf( rowsets.size() ), String.valueOf( i ), thisStep
.getName() )
+ " " );
}
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.AllocatingStepsAndStepData" ) );
}
// Allocate the steps & the data...
//
for ( int i = 0; i < hopsteps.size(); i++ ) {
StepMeta stepMeta = hopsteps.get( i );
String stepid = stepMeta.getStepID();
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString(
PKG, "Trans.Log.TransformationIsToAllocateStep", stepMeta.getName(), stepid ) );
}
// How many copies are launched of this step?
int nrCopies = stepMeta.getCopies();
if ( log.isDebug() ) {
log
.logDebug( BaseMessages
.getString( PKG, "Trans.Log.StepHasNumberRowCopies", String.valueOf( nrCopies ) ) );
}
// At least run once...
for ( int c = 0; c < nrCopies; c++ ) {
// Make sure we haven't started it yet!
if ( !hasStepStarted( stepMeta.getName(), c ) ) {
StepMetaDataCombi combi = new StepMetaDataCombi();
combi.stepname = stepMeta.getName();
combi.copy = c;
// The meta-data
combi.stepMeta = stepMeta;
combi.meta = stepMeta.getStepMetaInterface();
// Allocate the step data
StepDataInterface data = combi.meta.getStepData();
combi.data = data;
// Allocate the step
StepInterface step = combi.meta.getStep( stepMeta, data, c, transMeta, this );
// Copy the variables of the transformation to the step...
// don't share. Each copy of the step has its own variables.
//
step.initializeVariablesFrom( this );
step.setUsingThreadPriorityManagment( transMeta.isUsingThreadPriorityManagment() );
// Pass the connected repository & metaStore to the steps runtime
//
step.setRepository( repository );
step.setMetaStore( metaStore );
// If the step is partitioned, set the partitioning ID and some other
// things as well...
if ( stepMeta.isPartitioned() ) {
List<String> partitionIDs = stepMeta.getStepPartitioningMeta().getPartitionSchema().getPartitionIDs();
if ( partitionIDs != null && partitionIDs.size() > 0 ) {
step.setPartitionID( partitionIDs.get( c ) ); // Pass the partition ID
// to the step
}
}
// Save the step too
combi.step = step;
// Pass logging level and metrics gathering down to the step level.
// /
if ( combi.step instanceof LoggingObjectInterface ) {
LogChannelInterface logChannel = combi.step.getLogChannel();
logChannel.setLogLevel( logLevel );
logChannel.setGatheringMetrics( log.isGatheringMetrics() );
}
// Add to the bunch...
steps.add( combi );
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationHasAllocatedANewStep", stepMeta
.getName(), String.valueOf( c ) ) );
}
}
}
}
// Now we need to verify if certain rowsets are not meant to be for error
// handling...
// Loop over the steps and for every step verify the output rowsets
// If a rowset is going to a target step in the steps error handling
// metadata, set it to the errorRowSet.
// The input rowsets are already in place, so the next step just accepts the
// rows.
// Metadata wise we need to do the same trick in TransMeta
//
for ( int s = 0; s < steps.size(); s++ ) {
StepMetaDataCombi combi = steps.get( s );
if ( combi.stepMeta.isDoingErrorHandling() ) {
combi.step.identifyErrorOutput();
}
}
// Now (optionally) write start log record!
// Make sure we synchronize appropriately to avoid duplicate batch IDs.
//
Object syncObject = this;
if ( parentJob != null ) {
syncObject = parentJob; // parallel execution in a job
}
if ( parentTrans != null ) {
syncObject = parentTrans; // multiple sub-transformations
}
synchronized ( syncObject ) {
calculateBatchIdAndDateRange();
beginProcessing();
}
// Set the partition-to-rowset mapping
//
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepMeta stepMeta = sid.stepMeta;
StepInterface baseStep = sid.step;
baseStep.setPartitioned( stepMeta.isPartitioned() );
// Now let's take a look at the source and target relation
//
// If this source step is not partitioned, and the target step is: it
// means we need to re-partition the incoming data.
// If both steps are partitioned on the same method and schema, we don't
// need to re-partition
// If both steps are partitioned on a different method or schema, we need
// to re-partition as well.
// If both steps are not partitioned, we don't need to re-partition
//
boolean isThisPartitioned = stepMeta.isPartitioned();
PartitionSchema thisPartitionSchema = null;
if ( isThisPartitioned ) {
thisPartitionSchema = stepMeta.getStepPartitioningMeta().getPartitionSchema();
}
boolean isNextPartitioned = false;
StepPartitioningMeta nextStepPartitioningMeta = null;
PartitionSchema nextPartitionSchema = null;
List<StepMeta> nextSteps = transMeta.findNextSteps( stepMeta );
int nrNext = nextSteps.size();
for ( int p = 0; p < nrNext; p++ ) {
StepMeta nextStep = nextSteps.get( p );
if ( nextStep.isPartitioned() ) {
isNextPartitioned = true;
nextStepPartitioningMeta = nextStep.getStepPartitioningMeta();
nextPartitionSchema = nextStepPartitioningMeta.getPartitionSchema();
}
}
baseStep.setRepartitioning( StepPartitioningMeta.PARTITIONING_METHOD_NONE );
// If the next step is partitioned differently, set re-partitioning, when
// running locally.
//
if ( ( !isThisPartitioned && isNextPartitioned )
|| ( isThisPartitioned && isNextPartitioned && !thisPartitionSchema.equals( nextPartitionSchema ) ) ) {
baseStep.setRepartitioning( nextStepPartitioningMeta.getMethodType() );
}
// For partitioning to a set of remove steps (repartitioning from a master
// to a set or remote output steps)
//
StepPartitioningMeta targetStepPartitioningMeta = baseStep.getStepMeta().getTargetStepPartitioningMeta();
if ( targetStepPartitioningMeta != null ) {
baseStep.setRepartitioning( targetStepPartitioningMeta.getMethodType() );
}
}
preparing = false;
initializing = true;
// Do a topology sort... Over 150 step (copies) things might be slowing down too much.
//
if ( isMonitored() && steps.size() < 150 ) {
doTopologySortOfSteps();
}
if ( log.isDetailed() ) {
log
.logDetailed( BaseMessages
.getString( PKG, "Trans.Log.InitialisingSteps", String.valueOf( steps.size() ) ) );
}
StepInitThread[] initThreads = new StepInitThread[steps.size()];
Thread[] threads = new Thread[steps.size()];
// Initialize all the threads...
//
for ( int i = 0; i < steps.size(); i++ ) {
final StepMetaDataCombi sid = steps.get( i );
// Do the init code in the background!
// Init all steps at once, but ALL steps need to finish before we can
// continue properly!
//
initThreads[i] = new StepInitThread( sid, log );
// Put it in a separate thread!
//
threads[i] = new Thread( initThreads[i] );
threads[i].setName( "init of " + sid.stepname + "." + sid.copy + " (" + threads[i].getName() + ")" );
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepBeforeInitialize.id, initThreads[i] );
threads[i].start();
}
for ( int i = 0; i < threads.length; i++ ) {
try {
threads[i].join();
ExtensionPointHandler
.callExtensionPoint( log, KettleExtensionPoint.StepAfterInitialize.id, initThreads[i] );
} catch ( Exception ex ) {
log.logError( "Error with init thread: " + ex.getMessage(), ex.getMessage() );
log.logError( Const.getStackTracker( ex ) );
}
}
initializing = false;
boolean ok = true;
// All step are initialized now: see if there was one that didn't do it
// correctly!
//
for ( int i = 0; i < initThreads.length; i++ ) {
StepMetaDataCombi combi = initThreads[i].getCombi();
if ( !initThreads[i].isOk() ) {
log.logError( BaseMessages
.getString( PKG, "Trans.Log.StepFailedToInit", combi.stepname + "." + combi.copy ) );
combi.data.setStatus( StepExecutionStatus.STATUS_STOPPED );
ok = false;
} else {
combi.data.setStatus( StepExecutionStatus.STATUS_IDLE );
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.StepInitialized", combi.stepname
+ "." + combi.copy ) );
}
}
}
if ( !ok ) {
// Halt the other threads as well, signal end-of-the line to the outside
// world...
// Also explicitly call dispose() to clean up resources opened during
// init();
//
for ( int i = 0; i < initThreads.length; i++ ) {
StepMetaDataCombi combi = initThreads[i].getCombi();
// Dispose will overwrite the status, but we set it back right after
// this.
combi.step.dispose( combi.meta, combi.data );
if ( initThreads[i].isOk() ) {
combi.data.setStatus( StepExecutionStatus.STATUS_HALTED );
} else {
combi.data.setStatus( StepExecutionStatus.STATUS_STOPPED );
}
}
// Just for safety, fire the trans finished listeners...
try {
fireTransFinishedListeners();
} catch ( KettleException e ) {
//listeners produces errors
log.logError( BaseMessages.getString( PKG, "Trans.FinishListeners.Exception" ) );
//we will not pass this exception up to prepareExecuton() entry point.
} finally {
// Flag the transformation as finished even if exception was thrown
setFinished( true );
}
// Pass along the log during preview. Otherwise it becomes hard to see
// what went wrong.
//
if ( preview ) {
String logText = KettleLogStore.getAppender().getBuffer( getLogChannelId(), true ).toString();
throw new KettleException(
BaseMessages.getString( PKG, "Trans.Log.FailToInitializeAtLeastOneStep" ) + Const.CR + logText );
} else {
throw new KettleException(
BaseMessages.getString( PKG, "Trans.Log.FailToInitializeAtLeastOneStep" ) + Const.CR );
}
}
log.snap( Metrics.METRIC_TRANSFORMATION_INIT_STOP );
KettleEnvironment.setExecutionInformation( this, repository );
readyToStart = true;
}
@SuppressWarnings( "deprecation" )
private void checkCompatibility() {
// If we don't have a previous result and transMeta does have one, someone has been using a deprecated method.
//
if ( transMeta.getPreviousResult() != null && getPreviousResult() == null ) {
setPreviousResult( transMeta.getPreviousResult() );
}
// If we don't have arguments set and TransMeta has, someone has been using a deprecated method.
//
if ( transMeta.getArguments() != null && getArguments() == null ) {
setArguments( transMeta.getArguments() );
}
}
/**
* Starts the threads prepared by prepareThreads(). Before you start the threads, you can add RowListeners to them.
*
* @throws KettleException
* if there is a communication error with a remote output socket.
*/
public void startThreads() throws KettleException {
// Now prepare to start all the threads...
//
nrOfFinishedSteps = 0;
nrOfActiveSteps = 0;
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationStartThreads.id, this );
fireTransStartedListeners();
for ( int i = 0; i < steps.size(); i++ ) {
final StepMetaDataCombi sid = steps.get( i );
sid.step.markStart();
sid.step.initBeforeStart();
// also attach a Step Listener to detect when we're done...
//
StepListener stepListener = new StepListener() {
public void stepActive( Trans trans, StepMeta stepMeta, StepInterface step ) {
nrOfActiveSteps++;
if ( nrOfActiveSteps == 1 ) {
// Transformation goes from in-active to active...
// PDI-5229 sync added
synchronized ( transListeners ) {
for ( TransListener listener : transListeners ) {
listener.transActive( Trans.this );
}
}
}
}
public void stepFinished( Trans trans, StepMeta stepMeta, StepInterface step ) {
synchronized ( Trans.this ) {
nrOfFinishedSteps++;
if ( nrOfFinishedSteps >= steps.size() ) {
// Set the finished flag
//
setFinished( true );
// Grab the performance statistics one last time (if enabled)
//
addStepPerformanceSnapShot();
try {
fireTransFinishedListeners();
} catch ( Exception e ) {
step.setErrors( step.getErrors() + 1L );
log.logError( getName()
+ " : " + BaseMessages.getString( PKG, "Trans.Log.UnexpectedErrorAtTransformationEnd" ), e );
}
}
// If a step fails with an error, we want to kill/stop the others
// too...
//
if ( step.getErrors() > 0 ) {
log.logMinimal( BaseMessages.getString( PKG, "Trans.Log.TransformationDetectedErrors" ) );
log.logMinimal( BaseMessages.getString(
PKG, "Trans.Log.TransformationIsKillingTheOtherSteps" ) );
killAllNoWait();
}
}
}
};
// Make sure this is called first!
//
if ( sid.step instanceof BaseStep ) {
( (BaseStep) sid.step ).getStepListeners().add( 0, stepListener );
} else {
sid.step.addStepListener( stepListener );
}
}
if ( transMeta.isCapturingStepPerformanceSnapShots() ) {
stepPerformanceSnapshotSeqNr = new AtomicInteger( 0 );
stepPerformanceSnapShots = new ConcurrentHashMap<String, List<StepPerformanceSnapShot>>();
// Calculate the maximum number of snapshots to be kept in memory
//
String limitString = environmentSubstitute( transMeta.getStepPerformanceCapturingSizeLimit() );
if ( Const.isEmpty( limitString ) ) {
limitString = EnvUtil.getSystemProperty( Const.KETTLE_STEP_PERFORMANCE_SNAPSHOT_LIMIT );
}
stepPerformanceSnapshotSizeLimit = Const.toInt( limitString, 0 );
// Set a timer to collect the performance data from the running threads...
//
stepPerformanceSnapShotTimer = new Timer( "stepPerformanceSnapShot Timer: " + transMeta.getName() );
TimerTask timerTask = new TimerTask() {
public void run() {
if ( !isFinished() ) {
addStepPerformanceSnapShot();
}
}
};
stepPerformanceSnapShotTimer.schedule( timerTask, 100, transMeta.getStepPerformanceCapturingDelay() );
}
// Now start a thread to monitor the running transformation...
//
setFinished( false );
paused.set( false );
stopped.set( false );
transFinishedBlockingQueue = new ArrayBlockingQueue<Object>( 10 );
TransListener transListener = new TransAdapter() {
public void transFinished( Trans trans ) {
try {
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationFinish.id, trans );
} catch ( KettleException e ) {
throw new RuntimeException( "Error calling extension point at end of transformation", e );
}
// First of all, stop the performance snapshot timer if there is is
// one...
//
if ( transMeta.isCapturingStepPerformanceSnapShots() && stepPerformanceSnapShotTimer != null ) {
stepPerformanceSnapShotTimer.cancel();
}
setFinished( true );
running = false; // no longer running
log.snap( Metrics.METRIC_TRANSFORMATION_EXECUTION_STOP );
// If the user ran with metrics gathering enabled and a metrics logging table is configured, add another
// listener...
//
MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable();
if ( metricsLogTable.isDefined() ) {
try {
writeMetricsInformation();
} catch ( Exception e ) {
log.logError( "Error writing metrics information", e );
errors.incrementAndGet();
}
}
// Close the unique connections when running database transactionally.
// This will commit or roll back the transaction based on the result of this transformation.
//
if ( transMeta.isUsingUniqueConnections() ) {
trans.closeUniqueDatabaseConnections( getResult() );
}
}
};
// This should always be done first so that the other listeners achieve a clean state to start from (setFinished and
// so on)
//
transListeners.add( 0, transListener );
running = true;
switch ( transMeta.getTransformationType() ) {
case Normal:
// Now start all the threads...
//
for ( int i = 0; i < steps.size(); i++ ) {
final StepMetaDataCombi combi = steps.get( i );
RunThread runThread = new RunThread( combi );
Thread thread = new Thread( runThread );
thread.setName( getName() + " - " + combi.stepname );
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepBeforeStart.id, combi );
// Call an extension point at the end of the step
//
combi.step.addStepListener( new StepAdapter() {
@Override
public void stepFinished( Trans trans, StepMeta stepMeta, StepInterface step ) {
try {
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepFinished.id, combi );
} catch ( KettleException e ) {
throw new RuntimeException( "Unexpected error in calling extension point upon step finish", e );
}
}
} );
thread.start();
}
break;
case SerialSingleThreaded:
new Thread( new Runnable() {
public void run() {
try {
// Always disable thread priority management, it will always slow us
// down...
//
for ( StepMetaDataCombi combi : steps ) {
combi.step.setUsingThreadPriorityManagment( false );
}
//
// This is a single threaded version...
//
// Sort the steps from start to finish...
//
Collections.sort( steps, new Comparator<StepMetaDataCombi>() {
public int compare( StepMetaDataCombi c1, StepMetaDataCombi c2 ) {
boolean c1BeforeC2 = transMeta.findPrevious( c2.stepMeta, c1.stepMeta );
if ( c1BeforeC2 ) {
return -1;
} else {
return 1;
}
}
} );
boolean[] stepDone = new boolean[steps.size()];
int nrDone = 0;
while ( nrDone < steps.size() && !isStopped() ) {
for ( int i = 0; i < steps.size() && !isStopped(); i++ ) {
StepMetaDataCombi combi = steps.get( i );
if ( !stepDone[i] ) {
// if (combi.step.canProcessOneRow() ||
// !combi.step.isRunning()) {
boolean cont = combi.step.processRow( combi.meta, combi.data );
if ( !cont ) {
stepDone[i] = true;
nrDone++;
}
// }
}
}
}
} catch ( Exception e ) {
errors.addAndGet( 1 );
log.logError( "Error executing single threaded", e );
} finally {
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi combi = steps.get( i );
combi.step.dispose( combi.meta, combi.data );
combi.step.markStop();
}
}
}
} ).start();
break;
case SingleThreaded:
// Don't do anything, this needs to be handled by the transformation
// executor!
//
break;
default:
break;
}
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationStart.id, this );
if ( log.isDetailed() ) {
log
.logDetailed( BaseMessages
.getString(
PKG,
"Trans.Log.TransformationHasAllocated", String.valueOf( steps.size() ), String
.valueOf( rowsets.size() ) ) );
}
}
/**
* Make attempt to fire all registered listeners if possible.
*
* @throws KettleException
* if any errors occur during notification
*/
protected void fireTransFinishedListeners() throws KettleException {
// PDI-5229 sync added
synchronized ( transListeners ) {
if ( transListeners.size() == 0 ) {
return;
}
//prevent Exception from one listener to block others execution
List<KettleException> badGuys = new ArrayList<KettleException>( transListeners.size() );
for ( TransListener transListener : transListeners ) {
try {
transListener.transFinished( this );
} catch ( KettleException e ) {
badGuys.add( e );
}
}
// Signal for the the waitUntilFinished blocker...
transFinishedBlockingQueue.add( new Object() );
if ( !badGuys.isEmpty() ) {
//FIFO
throw new KettleException( badGuys.get( 0 ) );
}
}
}
/**
* Fires the start-event listeners (if any are registered).
*
* @throws KettleException
* if any errors occur during notification
*/
protected void fireTransStartedListeners() throws KettleException {
// PDI-5229 sync added
synchronized ( transListeners ) {
for ( TransListener transListener : transListeners ) {
transListener.transStarted( this );
}
}
}
/**
* Adds a step performance snapshot.
*/
protected void addStepPerformanceSnapShot() {
if ( stepPerformanceSnapShots == null ) {
return; // Race condition somewhere?
}
boolean pausedAndNotEmpty = isPaused() && !stepPerformanceSnapShots.isEmpty();
boolean stoppedAndNotEmpty = isStopped() && !stepPerformanceSnapShots.isEmpty();
if ( transMeta.isCapturingStepPerformanceSnapShots() && !pausedAndNotEmpty && !stoppedAndNotEmpty ) {
// get the statistics from the steps and keep them...
//
int seqNr = stepPerformanceSnapshotSeqNr.incrementAndGet();
for ( int i = 0; i < steps.size(); i++ ) {
StepMeta stepMeta = steps.get( i ).stepMeta;
StepInterface step = steps.get( i ).step;
StepPerformanceSnapShot snapShot =
new StepPerformanceSnapShot( seqNr, getBatchId(), new Date(), getName(), stepMeta.getName(), step
.getCopy(), step.getLinesRead(), step.getLinesWritten(), step.getLinesInput(), step
.getLinesOutput(), step.getLinesUpdated(), step.getLinesRejected(), step.getErrors() );
List<StepPerformanceSnapShot> snapShotList = stepPerformanceSnapShots.get( step.toString() );
StepPerformanceSnapShot previous;
if ( snapShotList == null ) {
snapShotList = new ArrayList<StepPerformanceSnapShot>();
stepPerformanceSnapShots.put( step.toString(), snapShotList );
previous = null;
} else {
previous = snapShotList.get( snapShotList.size() - 1 ); // the last one...
}
// Make the difference...
//
snapShot.diff( previous, step.rowsetInputSize(), step.rowsetOutputSize() );
synchronized ( stepPerformanceSnapShots ) {
snapShotList.add( snapShot );
if ( stepPerformanceSnapshotSizeLimit > 0 && snapShotList.size() > stepPerformanceSnapshotSizeLimit ) {
snapShotList.remove( 0 );
}
}
}
lastStepPerformanceSnapshotSeqNrAdded = stepPerformanceSnapshotSeqNr.get();
}
}
/**
* This method performs any cleanup operations, typically called after the transformation has finished. Specifically,
* after ALL the slave transformations in a clustered run have finished.
*/
public void cleanup() {
// Close all open server sockets.
// We can only close these after all processing has been confirmed to be finished.
//
if ( steps == null ) {
return;
}
for ( StepMetaDataCombi combi : steps ) {
combi.step.cleanup();
}
}
/**
* Logs a summary message for the specified step.
*
* @param si
* the step interface
*/
public void logSummary( StepInterface si ) {
log
.logBasic(
si.getStepname(),
BaseMessages
.getString(
PKG,
"Trans.Log.FinishedProcessing", String.valueOf( si.getLinesInput() ), String.valueOf( si
.getLinesOutput() ), String.valueOf( si.getLinesRead() ) )
+ BaseMessages.getString(
PKG, "Trans.Log.FinishedProcessing2", String.valueOf( si.getLinesWritten() ), String.valueOf( si
.getLinesUpdated() ), String.valueOf( si.getErrors() ) ) );
}
/**
* Waits until all RunThreads have finished.
*/
public void waitUntilFinished() {
try {
if ( transFinishedBlockingQueue == null ) {
return;
}
boolean wait = true;
while ( wait ) {
wait = transFinishedBlockingQueue.poll( 1, TimeUnit.DAYS ) == null;
}
} catch ( InterruptedException e ) {
throw new RuntimeException( "Waiting for transformation to be finished interrupted!", e );
}
}
/**
* Gets the number of errors that have occurred during execution of the transformation.
*
* @return the number of errors
*/
public int getErrors() {
int nrErrors = errors.get();
if ( steps == null ) {
return nrErrors;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
if ( sid.step.getErrors() != 0L ) {
nrErrors += sid.step.getErrors();
}
}
if ( nrErrors > 0 ) {
log.logError( BaseMessages.getString( PKG, "Trans.Log.TransformationErrorsDetected" ) );
}
return nrErrors;
}
/**
* Gets the number of steps in the transformation that are in an end state, such as Finished, Halted, or Stopped.
*
* @return the number of ended steps
*/
public int getEnded() {
int nrEnded = 0;
if ( steps == null ) {
return 0;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepDataInterface data = sid.data;
if ( ( sid.step != null && !sid.step.isRunning() ) || // Should normally not be needed anymore, status is kept in
// data.
data.getStatus() == StepExecutionStatus.STATUS_FINISHED || // Finished processing
data.getStatus() == StepExecutionStatus.STATUS_HALTED || // Not launching because of init error
data.getStatus() == StepExecutionStatus.STATUS_STOPPED // Stopped because of an error
) {
nrEnded++;
}
}
return nrEnded;
}
/**
* Checks if the transformation is finished\.
*
* @return true if the transformation is finished, false otherwise
*/
public boolean isFinished() {
return finished.get();
}
private void setFinished( boolean newValue ) {
finished.set( newValue );
}
public boolean isFinishedOrStopped() {
return isFinished() || isStopped();
}
/**
* Attempts to stops all running steps and subtransformations. If all steps have finished, the transformation is
* marked as Finished.
*/
public void killAll() {
if ( steps == null ) {
return;
}
int nrStepsFinished = 0;
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
if ( log.isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "Trans.Log.LookingAtStep" ) + sid.step.getStepname() );
}
// If thr is a mapping, this is cause for an endless loop
//
while ( sid.step.isRunning() ) {
sid.step.stopAll();
try {
Thread.sleep( 20 );
} catch ( Exception e ) {
log.logError( BaseMessages.getString( PKG, "Trans.Log.TransformationErrors" ) + e.toString() );
return;
}
}
if ( !sid.step.isRunning() ) {
nrStepsFinished++;
}
}
if ( nrStepsFinished == steps.size() ) {
setFinished( true );
}
}
/**
* Asks all steps to stop but doesn't wait around for it to happen. This is a special method for use with mappings.
*/
private void killAllNoWait() {
if ( steps == null ) {
return;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface step = sid.step;
if ( log.isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "Trans.Log.LookingAtStep" ) + step.getStepname() );
}
step.stopAll();
try {
Thread.sleep( 20 );
} catch ( Exception e ) {
log.logError( BaseMessages.getString( PKG, "Trans.Log.TransformationErrors" ) + e.toString() );
return;
}
}
}
/**
* Logs the execution statistics for the transformation for the specified time interval. If the total length of
* execution is supplied as the interval, then the statistics represent the average throughput (lines
* read/written/updated/rejected/etc. per second) for the entire execution.
*
* @param seconds
* the time interval (in seconds)
*/
public void printStats( int seconds ) {
log.logBasic( " " );
if ( steps == null ) {
return;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface step = sid.step;
long proc = step.getProcessed();
if ( seconds != 0 ) {
if ( step.getErrors() == 0 ) {
log
.logBasic( BaseMessages
.getString(
PKG,
"Trans.Log.ProcessSuccessfullyInfo", step.getStepname(), "." + step.getCopy(), String
.valueOf( proc ), String.valueOf( ( proc / seconds ) ) ) );
} else {
log
.logError( BaseMessages
.getString(
PKG,
"Trans.Log.ProcessErrorInfo", step.getStepname(), "." + step.getCopy(), String.valueOf( step
.getErrors() ), String.valueOf( proc ), String.valueOf( proc / seconds ) ) );
}
} else {
if ( step.getErrors() == 0 ) {
log
.logBasic( BaseMessages
.getString(
PKG,
"Trans.Log.ProcessSuccessfullyInfo", step.getStepname(), "." + step.getCopy(), String
.valueOf( proc ), seconds != 0 ? String.valueOf( ( proc / seconds ) ) : "-" ) );
} else {
log
.logError( BaseMessages
.getString(
PKG,
"Trans.Log.ProcessErrorInfo2", step.getStepname(), "." + step.getCopy(), String.valueOf( step
.getErrors() ), String.valueOf( proc ), String.valueOf( seconds ) ) );
}
}
}
}
/**
* Gets a representable metric of the "processed" lines of the last step.
*
* @return the number of lines processed by the last step
*/
public long getLastProcessed() {
if ( steps == null || steps.size() == 0 ) {
return 0L;
}
StepMetaDataCombi sid = steps.get( steps.size() - 1 );
return sid.step.getProcessed();
}
/**
* Finds the RowSet with the specified name.
*
* @param rowsetname
* the rowsetname
* @return the row set, or null if none found
*/
public RowSet findRowSet( String rowsetname ) {
// Start with the transformation.
for ( int i = 0; i < rowsets.size(); i++ ) {
// log.logDetailed("DIS: looking for RowSet ["+rowsetname+"] in nr "+i+" of "+threads.size()+" threads...");
RowSet rs = rowsets.get( i );
if ( rs.getName().equalsIgnoreCase( rowsetname ) ) {
return rs;
}
}
return null;
}
/**
* Finds the RowSet between two steps (or copies of steps).
*
* @param from
* the name of the "from" step
* @param fromcopy
* the copy number of the "from" step
* @param to
* the name of the "to" step
* @param tocopy
* the copy number of the "to" step
* @return the row set, or null if none found
*/
public RowSet findRowSet( String from, int fromcopy, String to, int tocopy ) {
// Start with the transformation.
for ( int i = 0; i < rowsets.size(); i++ ) {
RowSet rs = rowsets.get( i );
if ( rs.getOriginStepName().equalsIgnoreCase( from )
&& rs.getDestinationStepName().equalsIgnoreCase( to ) && rs.getOriginStepCopy() == fromcopy
&& rs.getDestinationStepCopy() == tocopy ) {
return rs;
}
}
return null;
}
/**
* Checks whether the specified step (or step copy) has started.
*
* @param sname
* the step name
* @param copy
* the copy number
* @return true the specified step (or step copy) has started, false otherwise
*/
public boolean hasStepStarted( String sname, int copy ) {
// log.logDetailed("DIS: Checking wether of not ["+sname+"]."+cnr+" has started!");
// log.logDetailed("DIS: hasStepStarted() looking in "+threads.size()+" threads");
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
boolean started = ( sid.stepname != null && sid.stepname.equalsIgnoreCase( sname ) ) && sid.copy == copy;
if ( started ) {
return true;
}
}
return false;
}
/**
* Stops all steps from running, and alerts any registered listeners.
*/
public void stopAll() {
if ( steps == null ) {
return;
}
// log.logDetailed("DIS: Checking wether of not ["+sname+"]."+cnr+" has started!");
// log.logDetailed("DIS: hasStepStarted() looking in "+threads.size()+" threads");
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface rt = sid.step;
rt.setStopped( true );
rt.resumeRunning();
// Cancel queries etc. by force...
StepInterface si = rt;
try {
si.stopRunning( sid.meta, sid.data );
} catch ( Exception e ) {
log.logError( "Something went wrong while trying to stop the transformation: " + e.toString() );
log.logError( Const.getStackTracker( e ) );
}
sid.data.setStatus( StepExecutionStatus.STATUS_STOPPED );
}
// if it is stopped it is not paused
paused.set( false );
stopped.set( true );
// Fire the stopped listener...
//
synchronized ( transStoppedListeners ) {
for ( TransStoppedListener listener : transStoppedListeners ) {
listener.transStopped( this );
}
}
}
/**
* Gets the number of steps in this transformation.
*
* @return the number of steps
*/
public int nrSteps() {
if ( steps == null ) {
return 0;
}
return steps.size();
}
/**
* Gets the number of active (i.e. not finished) steps in this transformation
*
* @return the number of active steps
*/
public int nrActiveSteps() {
if ( steps == null ) {
return 0;
}
int nr = 0;
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
// without also considering a step status of not finished,
// the step execution results grid shows empty while
// the transformation has steps still running.
// if ( sid.step.isRunning() ) nr++;
if ( sid.step.isRunning() || sid.step.getStatus() != StepExecutionStatus.STATUS_FINISHED ) {
nr++;
}
}
return nr;
}
/**
* Checks whether the transformation steps are running lookup.
*
* @return a boolean array associated with the step list, indicating whether that step is running a lookup.
*/
public boolean[] getTransStepIsRunningLookup() {
if ( steps == null ) {
return null;
}
boolean[] tResult = new boolean[steps.size()];
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
tResult[i] = ( sid.step.isRunning() || sid.step.getStatus() != StepExecutionStatus.STATUS_FINISHED );
}
return tResult;
}
/**
* Checks the execution status of each step in the transformations.
*
* @return an array associated with the step list, indicating the status of that step.
*/
public StepExecutionStatus[] getTransStepExecutionStatusLookup() {
if ( steps == null ) {
return null;
}
// we need this snapshot for the TransGridDelegate refresh method to handle the
// difference between a timed refresh and continual step status updates
int totalSteps = steps.size();
StepExecutionStatus[] tList = new StepExecutionStatus[totalSteps];
for ( int i = 0; i < totalSteps; i++ ) {
StepMetaDataCombi sid = steps.get( i );
tList[i] = sid.step.getStatus();
}
return tList;
}
/**
* Gets the run thread for the step at the specified index.
*
* @param i
* the index of the desired step
* @return a StepInterface object corresponding to the run thread for the specified step
*/
public StepInterface getRunThread( int i ) {
if ( steps == null ) {
return null;
}
return steps.get( i ).step;
}
/**
* Gets the run thread for the step with the specified name and copy number.
*
* @param name
* the step name
* @param copy
* the copy number
* @return a StepInterface object corresponding to the run thread for the specified step
*/
public StepInterface getRunThread( String name, int copy ) {
if ( steps == null ) {
return null;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface step = sid.step;
if ( step.getStepname().equalsIgnoreCase( name ) && step.getCopy() == copy ) {
return step;
}
}
return null;
}
/**
* Calculate the batch id and date range for the transformation.
*
* @throws KettleTransException
* if there are any errors during calculation
*/
public void calculateBatchIdAndDateRange() throws KettleTransException {
TransLogTable transLogTable = transMeta.getTransLogTable();
currentDate = new Date();
logDate = new Date();
startDate = Const.MIN_DATE;
endDate = currentDate;
DatabaseMeta logConnection = transLogTable.getDatabaseMeta();
String logTable = environmentSubstitute( transLogTable.getActualTableName() );
String logSchema = environmentSubstitute( transLogTable.getActualSchemaName() );
try {
if ( logConnection != null ) {
String logSchemaAndTable = logConnection.getQuotedSchemaTableCombination( logSchema, logTable );
if ( Const.isEmpty( logTable ) ) {
// It doesn't make sense to start database logging without a table
// to log to.
throw new KettleTransException( BaseMessages.getString( PKG, "Trans.Exception.NoLogTableDefined" ) );
}
if ( Const.isEmpty( transMeta.getName() ) && logConnection != null && logTable != null ) {
throw new KettleException( BaseMessages
.getString( PKG, "Trans.Exception.NoTransnameAvailableForLogging" ) );
}
transLogTableDatabaseConnection = new Database( this, logConnection );
transLogTableDatabaseConnection.shareVariablesWith( this );
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.OpeningLogConnection", "" + logConnection ) );
}
transLogTableDatabaseConnection.connect();
transLogTableDatabaseConnection.setCommit( logCommitSize );
// See if we have to add a batch id...
// Do this first, before anything else to lock the complete table exclusively
//
if ( transLogTable.isBatchIdUsed() ) {
Long id_batch =
logConnection.getNextBatchId( transLogTableDatabaseConnection, logSchema, logTable, transLogTable
.getKeyField().getFieldName() );
setBatchId( id_batch.longValue() );
}
//
// Get the date range from the logging table: from the last end_date to now. (currentDate)
//
Object[] lastr =
transLogTableDatabaseConnection.getLastLogDate(
logSchemaAndTable, transMeta.getName(), false, LogStatus.END );
if ( lastr != null && lastr.length > 0 ) {
startDate = (Date) lastr[0];
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.StartDateFound" ) + startDate );
}
}
//
// OK, we have a date-range.
// However, perhaps we need to look at a table before we make a final judgment?
//
if ( transMeta.getMaxDateConnection() != null
&& transMeta.getMaxDateTable() != null && transMeta.getMaxDateTable().length() > 0
&& transMeta.getMaxDateField() != null && transMeta.getMaxDateField().length() > 0 ) {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.LookingForMaxdateConnection", ""
+ transMeta.getMaxDateConnection() ) );
}
DatabaseMeta maxcon = transMeta.getMaxDateConnection();
if ( maxcon != null ) {
Database maxdb = new Database( this, maxcon );
maxdb.shareVariablesWith( this );
try {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.OpeningMaximumDateConnection" ) );
}
maxdb.connect();
maxdb.setCommit( logCommitSize );
//
// Determine the endDate by looking at a field in a table...
//
String sql = "SELECT MAX(" + transMeta.getMaxDateField() + ") FROM " + transMeta.getMaxDateTable();
RowMetaAndData r1 = maxdb.getOneRow( sql );
if ( r1 != null ) {
// OK, we have a value, what's the offset?
Date maxvalue = r1.getRowMeta().getDate( r1.getData(), 0 );
if ( maxvalue != null ) {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.LastDateFoundOnTheMaxdateConnection" )
+ r1 );
}
endDate.setTime( (long) ( maxvalue.getTime() + ( transMeta.getMaxDateOffset() * 1000 ) ) );
}
} else {
if ( log.isDetailed() ) {
log
.logDetailed( BaseMessages
.getString( PKG, "Trans.Log.NoLastDateFoundOnTheMaxdateConnection" ) );
}
}
} catch ( KettleException e ) {
throw new KettleTransException( BaseMessages.getString(
PKG, "Trans.Exception.ErrorConnectingToDatabase", "" + transMeta.getMaxDateConnection() ), e );
} finally {
maxdb.disconnect();
}
} else {
throw new KettleTransException( BaseMessages
.getString( PKG, "Trans.Exception.MaximumDateConnectionCouldNotBeFound", ""
+ transMeta.getMaxDateConnection() ) );
}
}
// Determine the last date of all dependend tables...
// Get the maximum in depdate...
if ( transMeta.nrDependencies() > 0 ) {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.CheckingForMaxDependencyDate" ) );
}
//
// Maybe one of the tables where this transformation is dependent on has changed?
// If so we need to change the start-date!
//
depDate = Const.MIN_DATE;
Date maxdepdate = Const.MIN_DATE;
if ( lastr != null && lastr.length > 0 ) {
Date dep = (Date) lastr[1]; // #1: last depdate
if ( dep != null ) {
maxdepdate = dep;
depDate = dep;
}
}
for ( int i = 0; i < transMeta.nrDependencies(); i++ ) {
TransDependency td = transMeta.getDependency( i );
DatabaseMeta depcon = td.getDatabase();
if ( depcon != null ) {
Database depdb = new Database( this, depcon );
try {
depdb.connect();
depdb.setCommit( logCommitSize );
String sql = "SELECT MAX(" + td.getFieldname() + ") FROM " + td.getTablename();
RowMetaAndData r1 = depdb.getOneRow( sql );
if ( r1 != null ) {
// OK, we have a row, get the result!
Date maxvalue = (Date) r1.getData()[0];
if ( maxvalue != null ) {
if ( log.isDetailed() ) {
log
.logDetailed( BaseMessages
.getString(
PKG,
"Trans.Log.FoundDateFromTable", td.getTablename(), "." + td.getFieldname(), " = "
+ maxvalue.toString() ) );
}
if ( maxvalue.getTime() > maxdepdate.getTime() ) {
maxdepdate = maxvalue;
}
} else {
throw new KettleTransException(
BaseMessages
.getString(
PKG,
"Trans.Exception.UnableToGetDependencyInfoFromDB", td.getDatabase().getName() + ".", td
.getTablename()
+ ".", td.getFieldname() ) );
}
} else {
throw new KettleTransException(
BaseMessages
.getString(
PKG,
"Trans.Exception.UnableToGetDependencyInfoFromDB", td.getDatabase().getName() + ".", td
.getTablename()
+ ".", td.getFieldname() ) );
}
} catch ( KettleException e ) {
throw new KettleTransException( BaseMessages.getString( PKG, "Trans.Exception.ErrorInDatabase", ""
+ td.getDatabase() ), e );
} finally {
depdb.disconnect();
}
} else {
throw new KettleTransException( BaseMessages.getString(
PKG, "Trans.Exception.ConnectionCouldNotBeFound", "" + td.getDatabase() ) );
}
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.Maxdepdate" )
+ ( XMLHandler.date2string( maxdepdate ) ) );
}
}
// OK, so we now have the maximum depdate;
// If it is larger, it means we have to read everything back in again.
// Maybe something has changed that we need!
//
if ( maxdepdate.getTime() > depDate.getTime() ) {
depDate = maxdepdate;
startDate = Const.MIN_DATE;
}
} else {
depDate = currentDate;
}
}
// OK, now we have a date-range. See if we need to set a maximum!
if ( transMeta.getMaxDateDifference() > 0.0 && // Do we have a difference specified?
startDate.getTime() > Const.MIN_DATE.getTime() // Is the startdate > Minimum?
) {
// See if the end-date is larger then Start_date + DIFF?
Date maxdesired = new Date( startDate.getTime() + ( (long) transMeta.getMaxDateDifference() * 1000 ) );
// If this is the case: lower the end-date. Pick up the next 'region' next time around.
// We do this to limit the workload in a single update session (e.g. for large fact tables)
//
if ( endDate.compareTo( maxdesired ) > 0 ) {
endDate = maxdesired;
}
}
} catch ( KettleException e ) {
throw new KettleTransException( BaseMessages.getString(
PKG, "Trans.Exception.ErrorCalculatingDateRange", logTable ), e );
}
// Be careful, We DO NOT close the trans log table database connection!!!
// It's closed later in beginProcessing() to prevent excessive connect/disconnect repetitions.
}
/**
* Begin processing. Also handle logging operations related to the start of the transformation
*
* @throws KettleTransException
* the kettle trans exception
*/
public void beginProcessing() throws KettleTransException {
TransLogTable transLogTable = transMeta.getTransLogTable();
int intervalInSeconds = Const.toInt( environmentSubstitute( transLogTable.getLogInterval() ), -1 );
try {
String logTable = transLogTable.getActualTableName();
SimpleDateFormat df = new SimpleDateFormat( REPLAY_DATE_FORMAT );
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationCanBeReplayed" )
+ df.format( currentDate ) );
try {
if ( transLogTableDatabaseConnection != null
&& !Const.isEmpty( logTable ) && !Const.isEmpty( transMeta.getName() ) ) {
transLogTableDatabaseConnection.writeLogRecord( transLogTable, LogStatus.START, this, null );
// Pass in a commit to release transaction locks and to allow a user to actually see the log record.
//
if ( !transLogTableDatabaseConnection.isAutoCommit() ) {
transLogTableDatabaseConnection.commitLog( true, transLogTable );
}
// If we need to do periodic logging, make sure to install a timer for this...
//
if ( intervalInSeconds > 0 ) {
final Timer timer = new Timer( getName() + " - interval logging timer" );
TimerTask timerTask = new TimerTask() {
public void run() {
try {
endProcessing();
} catch ( Exception e ) {
log
.logError(
BaseMessages.getString( PKG, "Trans.Exception.UnableToPerformIntervalLogging" ), e );
// Also stop the show...
//
errors.incrementAndGet();
stopAll();
}
}
};
timer.schedule( timerTask, intervalInSeconds * 1000, intervalInSeconds * 1000 );
addTransListener( new TransAdapter() {
public void transFinished( Trans trans ) {
timer.cancel();
}
} );
}
// Add a listener to make sure that the last record is also written when transformation finishes...
//
addTransListener( new TransAdapter() {
public void transFinished( Trans trans ) throws KettleException {
try {
endProcessing();
lastWrittenStepPerformanceSequenceNr =
writeStepPerformanceLogRecords( lastWrittenStepPerformanceSequenceNr, LogStatus.END );
} catch ( KettleException e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToPerformLoggingAtTransEnd" ), e );
}
}
} );
}
// If we need to write out the step logging information, do so at the end of the transformation too...
//
StepLogTable stepLogTable = transMeta.getStepLogTable();
if ( stepLogTable.isDefined() ) {
addTransListener( new TransAdapter() {
public void transFinished( Trans trans ) throws KettleException {
try {
writeStepLogInformation();
} catch ( KettleException e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToPerformLoggingAtTransEnd" ), e );
}
}
} );
}
// If we need to write the log channel hierarchy and lineage information, add a listener for that too...
//
ChannelLogTable channelLogTable = transMeta.getChannelLogTable();
if ( channelLogTable.isDefined() ) {
addTransListener( new TransAdapter() {
public void transFinished( Trans trans ) throws KettleException {
try {
writeLogChannelInformation();
} catch ( KettleException e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToPerformLoggingAtTransEnd" ), e );
}
}
} );
}
// See if we need to write the step performance records at intervals too...
//
PerformanceLogTable performanceLogTable = transMeta.getPerformanceLogTable();
int perfLogInterval = Const.toInt( environmentSubstitute( performanceLogTable.getLogInterval() ), -1 );
if ( performanceLogTable.isDefined() && perfLogInterval > 0 ) {
final Timer timer = new Timer( getName() + " - step performance log interval timer" );
TimerTask timerTask = new TimerTask() {
public void run() {
try {
lastWrittenStepPerformanceSequenceNr =
writeStepPerformanceLogRecords( lastWrittenStepPerformanceSequenceNr, LogStatus.RUNNING );
} catch ( Exception e ) {
log.logError( BaseMessages.getString(
PKG, "Trans.Exception.UnableToPerformIntervalPerformanceLogging" ), e );
// Also stop the show...
//
errors.incrementAndGet();
stopAll();
}
}
};
timer.schedule( timerTask, perfLogInterval * 1000, perfLogInterval * 1000 );
addTransListener( new TransAdapter() {
public void transFinished( Trans trans ) {
timer.cancel();
}
} );
}
} catch ( KettleException e ) {
throw new KettleTransException( BaseMessages.getString(
PKG, "Trans.Exception.ErrorWritingLogRecordToTable", logTable ), e );
} finally {
// If we use interval logging, we keep the connection open for performance reasons...
//
if ( transLogTableDatabaseConnection != null && ( intervalInSeconds <= 0 ) ) {
transLogTableDatabaseConnection.disconnect();
transLogTableDatabaseConnection = null;
}
}
} catch ( KettleException e ) {
throw new KettleTransException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToBeginProcessingTransformation" ), e );
}
}
/**
* Writes log channel information to a channel logging table (if one has been configured).
*
* @throws KettleException
* if any errors occur during logging
*/
protected void writeLogChannelInformation() throws KettleException {
Database db = null;
ChannelLogTable channelLogTable = transMeta.getChannelLogTable();
// PDI-7070: If parent trans or job has the same channel logging info, don't duplicate log entries
Trans t = getParentTrans();
if ( t != null ) {
if ( channelLogTable.equals( t.getTransMeta().getChannelLogTable() ) ) {
return;
}
}
Job j = getParentJob();
if ( j != null ) {
if ( channelLogTable.equals( j.getJobMeta().getChannelLogTable() ) ) {
return;
}
}
// end PDI-7070
try {
db = new Database( this, channelLogTable.getDatabaseMeta() );
db.shareVariablesWith( this );
db.connect();
db.setCommit( logCommitSize );
List<LoggingHierarchy> loggingHierarchyList = getLoggingHierarchy();
for ( LoggingHierarchy loggingHierarchy : loggingHierarchyList ) {
db.writeLogRecord( channelLogTable, LogStatus.START, loggingHierarchy, null );
}
// Also time-out the log records in here...
//
db.cleanupLogRecords( channelLogTable );
} catch ( Exception e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToWriteLogChannelInformationToLogTable" ), e );
} finally {
if ( !db.isAutoCommit() ) {
db.commit( true );
}
db.disconnect();
}
}
/**
* Writes step information to a step logging table (if one has been configured).
*
* @throws KettleException
* if any errors occur during logging
*/
protected void writeStepLogInformation() throws KettleException {
Database db = null;
StepLogTable stepLogTable = transMeta.getStepLogTable();
try {
db = new Database( this, stepLogTable.getDatabaseMeta() );
db.shareVariablesWith( this );
db.connect();
db.setCommit( logCommitSize );
for ( StepMetaDataCombi combi : steps ) {
db.writeLogRecord( stepLogTable, LogStatus.START, combi, null );
}
} catch ( Exception e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToWriteStepInformationToLogTable" ), e );
} finally {
if ( !db.isAutoCommit() ) {
db.commit( true );
}
db.disconnect();
}
}
protected synchronized void writeMetricsInformation() throws KettleException {
//
List<MetricsDuration> metricsList =
MetricsUtil.getDuration( log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_REGISTER_EXTENSIONS_START );
if ( !metricsList.isEmpty() ) {
System.out.println( metricsList.get( 0 ) );
}
metricsList =
MetricsUtil.getDuration( log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_PLUGIN_REGISTRATION_START );
if ( !metricsList.isEmpty() ) {
System.out.println( metricsList.get( 0 ) );
}
long total = 0;
metricsList =
MetricsUtil.getDuration(
log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_PLUGIN_TYPE_REGISTRATION_START );
if ( metricsList != null ) {
for ( MetricsDuration duration : metricsList ) {
total += duration.getDuration();
System.out.println( " - " + duration.toString() + " Total=" + total );
}
}
Database db = null;
MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable();
try {
db = new Database( this, metricsLogTable.getDatabaseMeta() );
db.shareVariablesWith( this );
db.connect();
db.setCommit( logCommitSize );
List<String> logChannelIds = LoggingRegistry.getInstance().getLogChannelChildren( getLogChannelId() );
for ( String logChannelId : logChannelIds ) {
Deque<MetricsSnapshotInterface> snapshotList =
MetricsRegistry.getInstance().getSnapshotLists().get( logChannelId );
if ( snapshotList != null ) {
Iterator<MetricsSnapshotInterface> iterator = snapshotList.iterator();
while ( iterator.hasNext() ) {
MetricsSnapshotInterface snapshot = iterator.next();
db.writeLogRecord( metricsLogTable, LogStatus.START, new LoggingMetric( batchId, snapshot ), null );
}
}
Map<String, MetricsSnapshotInterface> snapshotMap =
MetricsRegistry.getInstance().getSnapshotMaps().get( logChannelId );
if ( snapshotMap != null ) {
synchronized ( snapshotMap ) {
Iterator<MetricsSnapshotInterface> iterator = snapshotMap.values().iterator();
while ( iterator.hasNext() ) {
MetricsSnapshotInterface snapshot = iterator.next();
db.writeLogRecord( metricsLogTable, LogStatus.START, new LoggingMetric( batchId, snapshot ), null );
}
}
}
}
// Also time-out the log records in here...
//
db.cleanupLogRecords( metricsLogTable );
} catch ( Exception e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToWriteMetricsInformationToLogTable" ), e );
} finally {
if ( !db.isAutoCommit() ) {
db.commit( true );
}
db.disconnect();
}
}
/**
* Gets the result of the transformation. The Result object contains such measures as the number of errors, number of
* lines read/written/input/output/updated/rejected, etc.
*
* @return the Result object containing resulting measures from execution of the transformation
*/
public Result getResult() {
if ( steps == null ) {
return null;
}
Result result = new Result();
result.setNrErrors( errors.longValue() );
result.setResult( errors.longValue() == 0 );
TransLogTable transLogTable = transMeta.getTransLogTable();
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface step = sid.step;
result.setNrErrors( result.getNrErrors() + sid.step.getErrors() );
result.getResultFiles().putAll( step.getResultFiles() );
if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_READ ) ) ) {
result.setNrLinesRead( result.getNrLinesRead() + step.getLinesRead() );
}
if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_INPUT ) ) ) {
result.setNrLinesInput( result.getNrLinesInput() + step.getLinesInput() );
}
if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_WRITTEN ) ) ) {
result.setNrLinesWritten( result.getNrLinesWritten() + step.getLinesWritten() );
}
if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_OUTPUT ) ) ) {
result.setNrLinesOutput( result.getNrLinesOutput() + step.getLinesOutput() );
}
if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_UPDATED ) ) ) {
result.setNrLinesUpdated( result.getNrLinesUpdated() + step.getLinesUpdated() );
}
if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_REJECTED ) ) ) {
result.setNrLinesRejected( result.getNrLinesRejected() + step.getLinesRejected() );
}
}
result.setRows( resultRows );
if ( !Const.isEmpty( resultFiles ) ) {
result.setResultFiles( new HashMap<String, ResultFile>() );
for ( ResultFile resultFile : resultFiles ) {
result.getResultFiles().put( resultFile.toString(), resultFile );
}
}
result.setStopped( isStopped() );
result.setLogChannelId( log.getLogChannelId() );
return result;
}
/**
* End processing. Also handle any logging operations associated with the end of a transformation
*
* @return true if all end processing is successful, false otherwise
* @throws KettleException
* if any errors occur during processing
*/
private synchronized boolean endProcessing() throws KettleException {
LogStatus status;
if ( isFinished() ) {
if ( isStopped() ) {
status = LogStatus.STOP;
} else {
status = LogStatus.END;
}
} else if ( isPaused() ) {
status = LogStatus.PAUSED;
} else {
status = LogStatus.RUNNING;
}
TransLogTable transLogTable = transMeta.getTransLogTable();
int intervalInSeconds = Const.toInt( environmentSubstitute( transLogTable.getLogInterval() ), -1 );
logDate = new Date();
// OK, we have some logging to do...
//
DatabaseMeta logcon = transMeta.getTransLogTable().getDatabaseMeta();
String logTable = transMeta.getTransLogTable().getActualTableName();
if ( logcon != null ) {
Database ldb = null;
try {
// Let's not reconnect/disconnect all the time for performance reasons!
//
if ( transLogTableDatabaseConnection == null ) {
ldb = new Database( this, logcon );
ldb.shareVariablesWith( this );
ldb.connect();
ldb.setCommit( logCommitSize );
transLogTableDatabaseConnection = ldb;
} else {
ldb = transLogTableDatabaseConnection;
}
// Write to the standard transformation log table...
//
if ( !Const.isEmpty( logTable ) ) {
ldb.writeLogRecord( transLogTable, status, this, null );
}
// Also time-out the log records in here...
//
if ( status.equals( LogStatus.END ) || status.equals( LogStatus.STOP ) ) {
ldb.cleanupLogRecords( transLogTable );
}
// Commit the operations to prevent locking issues
//
if ( !ldb.isAutoCommit() ) {
ldb.commitLog( true, transMeta.getTransLogTable() );
}
} catch ( KettleDatabaseException e ) {
// PDI-9790 error write to log db is transaction error
log.logError( BaseMessages.getString( PKG, "Database.Error.WriteLogTable", logTable ), e );
errors.incrementAndGet();
//end PDI-9790
} catch ( Exception e ) {
throw new KettleException( BaseMessages
.getString( PKG, "Trans.Exception.ErrorWritingLogRecordToTable", transMeta
.getTransLogTable().getActualTableName() ), e );
} finally {
if ( intervalInSeconds <= 0 || ( status.equals( LogStatus.END ) || status.equals( LogStatus.STOP ) ) ) {
ldb.disconnect();
transLogTableDatabaseConnection = null; // disconnected
}
}
}
return true;
}
/**
* Write step performance log records.
*
* @param startSequenceNr
* the start sequence numberr
* @param status
* the logging status. If this is End, perform cleanup
* @return the new sequence number
* @throws KettleException
* if any errors occur during logging
*/
private int writeStepPerformanceLogRecords( int startSequenceNr, LogStatus status ) throws KettleException {
int lastSeqNr = 0;
Database ldb = null;
PerformanceLogTable performanceLogTable = transMeta.getPerformanceLogTable();
if ( !performanceLogTable.isDefined()
|| !transMeta.isCapturingStepPerformanceSnapShots() || stepPerformanceSnapShots == null
|| stepPerformanceSnapShots.isEmpty() ) {
return 0; // nothing to do here!
}
try {
ldb = new Database( this, performanceLogTable.getDatabaseMeta() );
ldb.shareVariablesWith( this );
ldb.connect();
ldb.setCommit( logCommitSize );
// Write to the step performance log table...
//
RowMetaInterface rowMeta = performanceLogTable.getLogRecord( LogStatus.START, null, null ).getRowMeta();
ldb.prepareInsert( rowMeta, performanceLogTable.getActualSchemaName(), performanceLogTable
.getActualTableName() );
synchronized ( stepPerformanceSnapShots ) {
Iterator<List<StepPerformanceSnapShot>> iterator = stepPerformanceSnapShots.values().iterator();
while ( iterator.hasNext() ) {
List<StepPerformanceSnapShot> snapshots = iterator.next();
synchronized ( snapshots ) {
Iterator<StepPerformanceSnapShot> snapshotsIterator = snapshots.iterator();
while ( snapshotsIterator.hasNext() ) {
StepPerformanceSnapShot snapshot = snapshotsIterator.next();
if ( snapshot.getSeqNr() >= startSequenceNr
&& snapshot.getSeqNr() <= lastStepPerformanceSnapshotSeqNrAdded ) {
RowMetaAndData row = performanceLogTable.getLogRecord( LogStatus.START, snapshot, null );
ldb.setValuesInsert( row.getRowMeta(), row.getData() );
ldb.insertRow( true );
}
lastSeqNr = snapshot.getSeqNr();
}
}
}
}
ldb.insertFinished( true );
// Finally, see if the log table needs cleaning up...
//
if ( status.equals( LogStatus.END ) ) {
ldb.cleanupLogRecords( performanceLogTable );
}
} catch ( Exception e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.ErrorWritingStepPerformanceLogRecordToTable" ), e );
} finally {
if ( ldb != null ) {
ldb.disconnect();
}
}
return lastSeqNr + 1;
}
/**
* Close unique database connections. If there are errors in the Result, perform a rollback
*
* @param result
* the result of the transformation execution
*/
private void closeUniqueDatabaseConnections( Result result ) {
// Don't close any connections if the parent job is using the same transaction
//
if ( parentJob != null
&& transactionId != null && parentJob.getTransactionId() != null
&& transactionId.equals( parentJob.getTransactionId() ) ) {
return;
}
// Don't close any connections if the parent transformation is using the same transaction
//
if ( parentTrans != null
&& parentTrans.getTransMeta().isUsingUniqueConnections() && transactionId != null
&& parentTrans.getTransactionId() != null && transactionId.equals( parentTrans.getTransactionId() ) ) {
return;
}
// First we get all the database connections ...
//
DatabaseConnectionMap map = DatabaseConnectionMap.getInstance();
synchronized ( map ) {
List<Database> databaseList = new ArrayList<Database>( map.getMap().values() );
for ( Database database : databaseList ) {
if ( database.getConnectionGroup().equals( getTransactionId() ) ) {
try {
// This database connection belongs to this transformation.
// Let's roll it back if there is an error...
//
if ( result.getNrErrors() > 0 ) {
try {
database.rollback( true );
log.logBasic( BaseMessages.getString(
PKG, "Trans.Exception.TransactionsRolledBackOnConnection", database.toString() ) );
} catch ( Exception e ) {
throw new KettleDatabaseException( BaseMessages.getString(
PKG, "Trans.Exception.ErrorRollingBackUniqueConnection", database.toString() ), e );
}
} else {
try {
database.commit( true );
log.logBasic( BaseMessages.getString(
PKG, "Trans.Exception.TransactionsCommittedOnConnection", database.toString() ) );
} catch ( Exception e ) {
throw new KettleDatabaseException( BaseMessages.getString(
PKG, "Trans.Exception.ErrorCommittingUniqueConnection", database.toString() ), e );
}
}
} catch ( Exception e ) {
log.logError( BaseMessages.getString(
PKG, "Trans.Exception.ErrorHandlingTransformationTransaction", database.toString() ), e );
result.setNrErrors( result.getNrErrors() + 1 );
} finally {
try {
// This database connection belongs to this transformation.
database.closeConnectionOnly();
} catch ( Exception e ) {
log.logError( BaseMessages.getString(
PKG, "Trans.Exception.ErrorHandlingTransformationTransaction", database.toString() ), e );
result.setNrErrors( result.getNrErrors() + 1 );
} finally {
// Remove the database from the list...
//
map.removeConnection( database.getConnectionGroup(), database.getPartitionId(), database );
}
}
}
}
// Who else needs to be informed of the rollback or commit?
//
List<DatabaseTransactionListener> transactionListeners = map.getTransactionListeners( getTransactionId() );
if ( result.getNrErrors() > 0 ) {
for ( DatabaseTransactionListener listener : transactionListeners ) {
try {
listener.rollback();
} catch ( Exception e ) {
log.logError(
BaseMessages.getString( PKG, "Trans.Exception.ErrorHandlingTransactionListenerRollback" ), e );
result.setNrErrors( result.getNrErrors() + 1 );
}
}
} else {
for ( DatabaseTransactionListener listener : transactionListeners ) {
try {
listener.commit();
} catch ( Exception e ) {
log.logError(
BaseMessages.getString( PKG, "Trans.Exception.ErrorHandlingTransactionListenerCommit" ), e );
result.setNrErrors( result.getNrErrors() + 1 );
}
}
}
}
}
/**
* Find the run thread for the step with the specified name.
*
* @param stepname
* the step name
* @return a StepInterface object corresponding to the run thread for the specified step
*/
public StepInterface findRunThread( String stepname ) {
if ( steps == null ) {
return null;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface step = sid.step;
if ( step.getStepname().equalsIgnoreCase( stepname ) ) {
return step;
}
}
return null;
}
/**
* Find the base steps for the step with the specified name.
*
* @param stepname
* the step name
* @return the list of base steps for the specified step
*/
public List<StepInterface> findBaseSteps( String stepname ) {
List<StepInterface> baseSteps = new ArrayList<StepInterface>();
if ( steps == null ) {
return baseSteps;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface stepInterface = sid.step;
if ( stepInterface.getStepname().equalsIgnoreCase( stepname ) ) {
baseSteps.add( stepInterface );
}
}
return baseSteps;
}
/**
* Find the executing step copy for the step with the specified name and copy number
*
* @param stepname
* the step name
* @param copynr
* @return the executing step found or null if no copy could be found.
*/
public StepInterface findStepInterface( String stepname, int copyNr ) {
if ( steps == null ) {
return null;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface stepInterface = sid.step;
if ( stepInterface.getStepname().equalsIgnoreCase( stepname ) && sid.copy == copyNr ) {
return stepInterface;
}
}
return null;
}
/**
* Find the available executing step copies for the step with the specified name
*
* @param stepname
* the step name
* @param copynr
* @return the list of executing step copies found or null if no steps are available yet (incorrect usage)
*/
public List<StepInterface> findStepInterfaces( String stepname ) {
if ( steps == null ) {
return null;
}
List<StepInterface> list = new ArrayList<StepInterface>();
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface stepInterface = sid.step;
if ( stepInterface.getStepname().equalsIgnoreCase( stepname ) ) {
list.add( stepInterface );
}
}
return list;
}
/**
* Find the data interface for the step with the specified name.
*
* @param name
* the step name
* @return the step data interface
*/
public StepDataInterface findDataInterface( String name ) {
if ( steps == null ) {
return null;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface rt = sid.step;
if ( rt.getStepname().equalsIgnoreCase( name ) ) {
return sid.data;
}
}
return null;
}
/**
* Gets the start date/time object for the transformation.
*
* @return Returns the startDate.
*/
public Date getStartDate() {
return startDate;
}
/**
* Gets the end date/time object for the transformation.
*
* @return Returns the endDate.
*/
public Date getEndDate() {
return endDate;
}
/**
* Checks whether the running transformation is being monitored.
*
* @return true the running transformation is being monitored, false otherwise
*/
public boolean isMonitored() {
return monitored;
}
/**
* Sets whether the running transformation should be monitored.
*
* @param monitored
* true if the running transformation should be monitored, false otherwise
*/
public void setMonitored( boolean monitored ) {
this.monitored = monitored;
}
/**
* Gets the meta-data for the transformation.
*
* @return Returns the transformation meta-data
*/
public TransMeta getTransMeta() {
return transMeta;
}
/**
* Sets the meta-data for the transformation.
*
* @param transMeta
* The transformation meta-data to set.
*/
public void setTransMeta( TransMeta transMeta ) {
this.transMeta = transMeta;
}
/**
* Gets the current date/time object.
*
* @return the current date
*/
public Date getCurrentDate() {
return currentDate;
}
/**
* Gets the dependency date for the transformation. A transformation can have a list of dependency fields. If any of
* these fields have a maximum date higher than the dependency date of the last run, the date range is set to to (-oo,
* now). The use-case is the incremental population of Slowly Changing Dimensions (SCD).
*
* @return Returns the dependency date
*/
public Date getDepDate() {
return depDate;
}
/**
* Gets the date the transformation was logged.
*
* @return the log date
*/
public Date getLogDate() {
return logDate;
}
/**
* Gets the rowsets for the transformation.
*
* @return a list of rowsets
*/
public List<RowSet> getRowsets() {
return rowsets;
}
/**
* Gets a list of steps in the transformation.
*
* @return a list of the steps in the transformation
*/
public List<StepMetaDataCombi> getSteps() {
return steps;
}
/**
* Gets a string representation of the transformation.
*
* @return the string representation of the transformation
* @see java.lang.Object#toString()
*/
public String toString() {
if ( transMeta == null || transMeta.getName() == null ) {
return getClass().getSimpleName();
}
// See if there is a parent transformation. If so, print the name of the parent here as well...
//
StringBuffer string = new StringBuffer();
// If we're running as a mapping, we get a reference to the calling (parent) transformation as well...
//
if ( getParentTrans() != null ) {
string.append( '[' ).append( getParentTrans().toString() ).append( ']' ).append( '.' );
}
// When we run a mapping we also set a mapping step name in there...
//
if ( !Const.isEmpty( mappingStepName ) ) {
string.append( '[' ).append( mappingStepName ).append( ']' ).append( '.' );
}
string.append( transMeta.getName() );
return string.toString();
}
/**
* Gets the mapping inputs for each step in the transformation.
*
* @return an array of MappingInputs
*/
public MappingInput[] findMappingInput() {
if ( steps == null ) {
return null;
}
List<MappingInput> list = new ArrayList<MappingInput>();
// Look in threads and find the MappingInput step thread...
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi smdc = steps.get( i );
StepInterface step = smdc.step;
if ( step.getStepID().equalsIgnoreCase( "MappingInput" ) ) {
list.add( (MappingInput) step );
}
}
return list.toArray( new MappingInput[list.size()] );
}
/**
* Gets the mapping outputs for each step in the transformation.
*
* @return an array of MappingOutputs
*/
public MappingOutput[] findMappingOutput() {
List<MappingOutput> list = new ArrayList<MappingOutput>();
if ( steps != null ) {
// Look in threads and find the MappingInput step thread...
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi smdc = steps.get( i );
StepInterface step = smdc.step;
if ( step.getStepID().equalsIgnoreCase( "MappingOutput" ) ) {
list.add( (MappingOutput) step );
}
}
}
return list.toArray( new MappingOutput[list.size()] );
}
/**
* Find the StepInterface (thread) by looking it up using the name.
*
* @param stepname
* The name of the step to look for
* @param copy
* the copy number of the step to look for
* @return the StepInterface or null if nothing was found.
*/
public StepInterface getStepInterface( String stepname, int copy ) {
if ( steps == null ) {
return null;
}
// Now start all the threads...
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
if ( sid.stepname.equalsIgnoreCase( stepname ) && sid.copy == copy ) {
return sid.step;
}
}
return null;
}
/**
* Gets the replay date. The replay date is used to indicate that the transformation was replayed (re-tried, run
* again) with that particular replay date. You can use this in Text File/Excel Input to allow you to save error line
* numbers into a file (SOURCE_FILE.line for example) During replay, only the lines that have errors in them are
* passed to the next steps, the other lines are ignored. This is for the use case: if the document contained errors
* (bad dates, chars in numbers, etc), you simply send the document back to the source (the user/departement that
* created it probably) and when you get it back, re-run the last transformation.
*
* @return the replay date
*/
public Date getReplayDate() {
return replayDate;
}
/**
* Sets the replay date. The replay date is used to indicate that the transformation was replayed (re-tried, run
* again) with that particular replay date. You can use this in Text File/Excel Input to allow you to save error line
* numbers into a file (SOURCE_FILE.line for example) During replay, only the lines that have errors in them are
* passed to the next steps, the other lines are ignored. This is for the use case: if the document contained errors
* (bad dates, chars in numbers, etc), you simply send the document back to the source (the user/departement that
* created it probably) and when you get it back, re-run the last transformation.
*
* @param replayDate
* the new replay date
*/
public void setReplayDate( Date replayDate ) {
this.replayDate = replayDate;
}
/**
* Turn on safe mode during running: the transformation will run slower but with more checking enabled.
*
* @param safeModeEnabled
* true for safe mode
*/
public void setSafeModeEnabled( boolean safeModeEnabled ) {
this.safeModeEnabled = safeModeEnabled;
}
/**
* Checks whether safe mode is enabled.
*
* @return Returns true if the safe mode is enabled: the transformation will run slower but with more checking enabled
*/
public boolean isSafeModeEnabled() {
return safeModeEnabled;
}
/**
* This adds a row producer to the transformation that just got set up. It is preferable to run this BEFORE execute()
* but after prepareExecution()
*
* @param stepname
* The step to produce rows for
* @param copynr
* The copynr of the step to produce row for (normally 0 unless you have multiple copies running)
* @return the row producer
* @throws KettleException
* in case the thread/step to produce rows for could not be found.
* @see Trans#execute(String[])
* @see Trans#prepareExecution(String[])
*/
public RowProducer addRowProducer( String stepname, int copynr ) throws KettleException {
StepInterface stepInterface = getStepInterface( stepname, copynr );
if ( stepInterface == null ) {
throw new KettleException( "Unable to find thread with name " + stepname + " and copy number " + copynr );
}
// We are going to add an extra RowSet to this stepInterface.
RowSet rowSet;
switch ( transMeta.getTransformationType() ) {
case Normal:
rowSet = new BlockingRowSet( transMeta.getSizeRowset() );
break;
case SerialSingleThreaded:
rowSet = new SingleRowRowSet();
break;
case SingleThreaded:
rowSet = new QueueRowSet();
break;
default:
throw new KettleException( "Unhandled transformation type: " + transMeta.getTransformationType() );
}
// Add this rowset to the list of active rowsets for the selected step
stepInterface.getInputRowSets().add( rowSet );
return new RowProducer( stepInterface, rowSet );
}
/**
* Gets the parent job, or null if there is no parent.
*
* @return the parent job, or null if there is no parent
*/
public Job getParentJob() {
return parentJob;
}
/**
* Sets the parent job for the transformation.
*
* @param parentJob
* The parent job to set
*/
public void setParentJob( Job parentJob ) {
this.logLevel = parentJob.getLogLevel();
this.log.setLogLevel( logLevel );
this.parentJob = parentJob;
transactionId = calculateTransactionId();
}
/**
* Finds the StepDataInterface (currently) associated with the specified step.
*
* @param stepname
* The name of the step to look for
* @param stepcopy
* The copy number (0 based) of the step
* @return The StepDataInterface or null if non found.
*/
public StepDataInterface getStepDataInterface( String stepname, int stepcopy ) {
if ( steps == null ) {
return null;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
if ( sid.stepname.equals( stepname ) && sid.copy == stepcopy ) {
return sid.data;
}
}
return null;
}
/**
* Checks whether the transformation has any steps that are halted.
*
* @return true if one or more steps are halted, false otherwise
*/
public boolean hasHaltedSteps() {
// not yet 100% sure of this, if there are no steps... or none halted?
if ( steps == null ) {
return false;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
if ( sid.data.getStatus() == StepExecutionStatus.STATUS_HALTED ) {
return true;
}
}
return false;
}
/**
* Gets the job start date.
*
* @return the job start date
*/
public Date getJobStartDate() {
return jobStartDate;
}
/**
* Gets the job end date.
*
* @return the job end date
*/
public Date getJobEndDate() {
return jobEndDate;
}
/**
* Sets the job end date.
*
* @param jobEndDate
* the jobEndDate to set
*/
public void setJobEndDate( Date jobEndDate ) {
this.jobEndDate = jobEndDate;
}
/**
* Sets the job start date.
*
* @param jobStartDate
* the jobStartDate to set
*/
public void setJobStartDate( Date jobStartDate ) {
this.jobStartDate = jobStartDate;
}
/**
* Get the batch ID that is passed from the parent job to the transformation. If nothing is passed, it's the
* transformation's batch ID
*
* @return the parent job's batch ID, or the transformation's batch ID if there is no parent job
*/
public long getPassedBatchId() {
return passedBatchId;
}
/**
* Sets the passed batch ID of the transformation from the batch ID of the parent job.
*
* @param jobBatchId
* the jobBatchId to set
*/
public void setPassedBatchId( long jobBatchId ) {
this.passedBatchId = jobBatchId;
}
/**
* Gets the batch ID of the transformation.
*
* @return the batch ID of the transformation
*/
public long getBatchId() {
return batchId;
}
/**
* Sets the batch ID of the transformation.
*
* @param batchId
* the batch ID to set
*/
public void setBatchId( long batchId ) {
this.batchId = batchId;
}
/**
* Gets the name of the thread that contains the transformation.
*
* @deprecated please use getTransactionId() instead
* @return the thread name
*/
@Deprecated
public String getThreadName() {
return threadName;
}
/**
* Sets the thread name for the transformation.
*
* @deprecated please use setTransactionId() instead
* @param threadName
* the thread name
*/
@Deprecated
public void setThreadName( String threadName ) {
this.threadName = threadName;
}
/**
* Gets the status of the transformation (Halting, Finished, Paused, etc.)
*
* @return the status of the transformation
*/
public String getStatus() {
String message;
if ( running ) {
if ( isStopped() ) {
message = STRING_HALTING;
} else {
if ( isFinished() ) {
message = STRING_FINISHED;
if ( getResult().getNrErrors() > 0 ) {
message += " (with errors)";
}
} else if ( isPaused() ) {
message = STRING_PAUSED;
} else {
message = STRING_RUNNING;
}
}
} else if ( isStopped() ) {
message = STRING_STOPPED;
} else if ( preparing ) {
message = STRING_PREPARING;
} else if ( initializing ) {
message = STRING_INITIALIZING;
} else {
message = STRING_WAITING;
}
return message;
}
/**
* Checks whether the transformation is initializing.
*
* @return true if the transformation is initializing, false otherwise
*/
public boolean isInitializing() {
return initializing;
}
/**
* Sets whether the transformation is initializing.
*
* @param initializing
* true if the transformation is initializing, false otherwise
*/
public void setInitializing( boolean initializing ) {
this.initializing = initializing;
}
/**
* Checks whether the transformation is preparing for execution.
*
* @return true if the transformation is preparing for execution, false otherwise
*/
public boolean isPreparing() {
return preparing;
}
/**
* Sets whether the transformation is preparing for execution.
*
* @param preparing
* true if the transformation is preparing for execution, false otherwise
*/
public void setPreparing( boolean preparing ) {
this.preparing = preparing;
}
/**
* Checks whether the transformation is running.
*
* @return true if the transformation is running, false otherwise
*/
public boolean isRunning() {
return running;
}
/**
* Sets whether the transformation is running.
*
* @param running
* true if the transformation is running, false otherwise
*/
public void setRunning( boolean running ) {
this.running = running;
}
/**
* Execute the transformation in a clustered fashion. The transformation steps are split and collected in a
* TransSplitter object
*
* @param transMeta
* the transformation's meta-data
* @param executionConfiguration
* the execution configuration
* @return the transformation splitter object
* @throws KettleException
* the kettle exception
*/
public static final TransSplitter executeClustered( final TransMeta transMeta,
final TransExecutionConfiguration executionConfiguration ) throws KettleException {
if ( Const.isEmpty( transMeta.getName() ) ) {
throw new KettleException(
"The transformation needs a name to uniquely identify it by on the remote server." );
}
TransSplitter transSplitter = new TransSplitter( transMeta );
transSplitter.splitOriginalTransformation();
// Pass the clustered run ID to allow for parallel execution of clustered transformations
//
executionConfiguration.getVariables().put(
Const.INTERNAL_VARIABLE_CLUSTER_RUN_ID, transSplitter.getClusteredRunId() );
executeClustered( transSplitter, executionConfiguration );
return transSplitter;
}
/**
* Executes an existing TransSplitter, with the transformation already split.
*
* @param transSplitter
* the trans splitter
* @param executionConfiguration
* the execution configuration
* @throws KettleException
* the kettle exception
* @see org.pentaho.di.ui.spoon.delegates.SpoonTransformationDelegate
*/
public static final void executeClustered( final TransSplitter transSplitter,
final TransExecutionConfiguration executionConfiguration ) throws KettleException {
try {
// Send the transformations to the servers...
//
// First the master and the slaves...
//
TransMeta master = transSplitter.getMaster();
final SlaveServer[] slaves = transSplitter.getSlaveTargets();
final Thread[] threads = new Thread[slaves.length];
final Throwable[] errors = new Throwable[slaves.length];
// Keep track of the various Carte object IDs
//
final Map<TransMeta, String> carteObjectMap = transSplitter.getCarteObjectMap();
//
// Send them all on their way...
//
SlaveServer masterServer = null;
List<StepMeta> masterSteps = master.getTransHopSteps( false );
if ( masterSteps.size() > 0 ) // If there is something that needs to be done on the master...
{
masterServer = transSplitter.getMasterServer();
if ( executionConfiguration.isClusterPosting() ) {
TransConfiguration transConfiguration = new TransConfiguration( master, executionConfiguration );
Map<String, String> variables = transConfiguration.getTransExecutionConfiguration().getVariables();
variables.put( Const.INTERNAL_VARIABLE_CLUSTER_SIZE, Integer.toString( slaves.length ) );
variables.put( Const.INTERNAL_VARIABLE_CLUSTER_MASTER, "Y" );
// Parameters override the variables but they need to pass over the configuration too...
//
Map<String, String> params = transConfiguration.getTransExecutionConfiguration().getParams();
TransMeta ot = transSplitter.getOriginalTransformation();
for ( String param : ot.listParameters() ) {
String value =
Const.NVL( ot.getParameterValue( param ), Const.NVL( ot.getParameterDefault( param ), ot
.getVariable( param ) ) );
params.put( param, value );
}
String masterReply =
masterServer.sendXML( transConfiguration.getXML(), AddTransServlet.CONTEXT_PATH + "/?xml=Y" );
WebResult webResult = WebResult.fromXMLString( masterReply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException( "An error occurred sending the master transformation: "
+ webResult.getMessage() );
}
carteObjectMap.put( master, webResult.getId() );
}
}
// Then the slaves...
// These are started in a background thread.
//
for ( int i = 0; i < slaves.length; i++ ) {
final int index = i;
final TransMeta slaveTrans = transSplitter.getSlaveTransMap().get( slaves[i] );
if ( executionConfiguration.isClusterPosting() ) {
Runnable runnable = new Runnable() {
public void run() {
try {
// Create a copy for local use... We get race-conditions otherwise...
//
TransExecutionConfiguration slaveTransExecutionConfiguration =
(TransExecutionConfiguration) executionConfiguration.clone();
TransConfiguration transConfiguration =
new TransConfiguration( slaveTrans, slaveTransExecutionConfiguration );
Map<String, String> variables = slaveTransExecutionConfiguration.getVariables();
variables.put( Const.INTERNAL_VARIABLE_SLAVE_SERVER_NUMBER, Integer.toString( index ) );
variables.put( Const.INTERNAL_VARIABLE_SLAVE_SERVER_NAME, slaves[index].getName() );
variables.put( Const.INTERNAL_VARIABLE_CLUSTER_SIZE, Integer.toString( slaves.length ) );
variables.put( Const.INTERNAL_VARIABLE_CLUSTER_MASTER, "N" );
// Parameters override the variables but they need to pass over the configuration too...
//
Map<String, String> params = slaveTransExecutionConfiguration.getParams();
TransMeta ot = transSplitter.getOriginalTransformation();
for ( String param : ot.listParameters() ) {
String value =
Const.NVL( ot.getParameterValue( param ), Const.NVL( ot.getParameterDefault( param ), ot
.getVariable( param ) ) );
params.put( param, value );
}
String slaveReply =
slaves[index].sendXML( transConfiguration.getXML(), AddTransServlet.CONTEXT_PATH + "/?xml=Y" );
WebResult webResult = WebResult.fromXMLString( slaveReply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException( "An error occurred sending a slave transformation: "
+ webResult.getMessage() );
}
carteObjectMap.put( slaveTrans, webResult.getId() );
} catch ( Throwable t ) {
errors[index] = t;
}
}
};
threads[i] = new Thread( runnable );
}
}
// Start the slaves
for ( int i = 0; i < threads.length; i++ ) {
if ( threads[i] != null ) {
threads[i].start();
}
}
// Wait until the slaves report back...
// Sending the XML over is the heaviest part
// Later we can do the others as well...
//
for ( int i = 0; i < threads.length; i++ ) {
if ( threads[i] != null ) {
threads[i].join();
if ( errors[i] != null ) {
throw new KettleException( errors[i] );
}
}
}
if ( executionConfiguration.isClusterPosting() ) {
if ( executionConfiguration.isClusterPreparing() ) {
// Prepare the master...
if ( masterSteps.size() > 0 ) // If there is something that needs to be done on the master...
{
String carteObjectId = carteObjectMap.get( master );
String masterReply =
masterServer.execService( PrepareExecutionTransServlet.CONTEXT_PATH
+ "/?name=" + URLEncoder.encode( master.getName(), "UTF-8" ) + "&id="
+ URLEncoder.encode( carteObjectId, "UTF-8" ) + "&xml=Y" );
WebResult webResult = WebResult.fromXMLString( masterReply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException(
"An error occurred while preparing the execution of the master transformation: "
+ webResult.getMessage() );
}
}
// Prepare the slaves
// WG: Should these be threaded like the above initialization?
for ( int i = 0; i < slaves.length; i++ ) {
TransMeta slaveTrans = transSplitter.getSlaveTransMap().get( slaves[i] );
String carteObjectId = carteObjectMap.get( slaveTrans );
String slaveReply =
slaves[i].execService( PrepareExecutionTransServlet.CONTEXT_PATH
+ "/?name=" + URLEncoder.encode( slaveTrans.getName(), "UTF-8" ) + "&id="
+ URLEncoder.encode( carteObjectId, "UTF-8" ) + "&xml=Y" );
WebResult webResult = WebResult.fromXMLString( slaveReply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException(
"An error occurred while preparing the execution of a slave transformation: "
+ webResult.getMessage() );
}
}
}
if ( executionConfiguration.isClusterStarting() ) {
// Start the master...
if ( masterSteps.size() > 0 ) // If there is something that needs to be done on the master...
{
String carteObjectId = carteObjectMap.get( master );
String masterReply =
masterServer.execService( StartExecutionTransServlet.CONTEXT_PATH
+ "/?name=" + URLEncoder.encode( master.getName(), "UTF-8" ) + "&id="
+ URLEncoder.encode( carteObjectId, "UTF-8" ) + "&xml=Y" );
WebResult webResult = WebResult.fromXMLString( masterReply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException(
"An error occurred while starting the execution of the master transformation: "
+ webResult.getMessage() );
}
}
// Start the slaves
// WG: Should these be threaded like the above initialization?
for ( int i = 0; i < slaves.length; i++ ) {
TransMeta slaveTrans = transSplitter.getSlaveTransMap().get( slaves[i] );
String carteObjectId = carteObjectMap.get( slaveTrans );
String slaveReply =
slaves[i].execService( StartExecutionTransServlet.CONTEXT_PATH
+ "/?name=" + URLEncoder.encode( slaveTrans.getName(), "UTF-8" ) + "&id="
+ URLEncoder.encode( carteObjectId, "UTF-8" ) + "&xml=Y" );
WebResult webResult = WebResult.fromXMLString( slaveReply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException(
"An error occurred while starting the execution of a slave transformation: "
+ webResult.getMessage() );
}
}
}
}
} catch ( KettleException ke ) {
throw ke;
} catch ( Exception e ) {
throw new KettleException( "There was an error during transformation split", e );
}
}
/**
* Monitors a clustered transformation every second,
* after all the transformations in a cluster schema are running.<br>
* Now we should verify that they are all running as they should.<br>
* If a transformation has an error, we should kill them all.<br>
* This should happen in a separate thread to prevent blocking of the UI.<br>
* <br>
* When the master and slave transformations have all finished, we should also run<br>
* a cleanup on those transformations to release sockets, etc.<br>
* <br>
*
* @param log
* the log interface channel
* @param transSplitter
* the transformation splitter object
* @param parentJob
* the parent job when executed in a job, otherwise just set to null
* @return the number of errors encountered
*/
public static final long monitorClusteredTransformation( LogChannelInterface log, TransSplitter transSplitter,
Job parentJob ) {
return monitorClusteredTransformation( log, transSplitter, parentJob, 1 ); // monitor every 1 seconds
}
/**
* Monitors a clustered transformation every second,
* after all the transformations in a cluster schema are running.<br>
* Now we should verify that they are all running as they should.<br>
* If a transformation has an error, we should kill them all.<br>
* This should happen in a separate thread to prevent blocking of the UI.<br>
* <br>
* When the master and slave transformations have all finished, we should also run<br>
* a cleanup on those transformations to release sockets, etc.<br>
* <br>
*
* @param log
* the subject to use for logging
* @param transSplitter
* the transformation splitter object
* @param parentJob
* the parent job when executed in a job, otherwise just set to null
* @param sleepTimeSeconds
* the sleep time in seconds in between slave transformation status polling
* @return the number of errors encountered
*/
public static final long monitorClusteredTransformation( LogChannelInterface log, TransSplitter transSplitter,
Job parentJob, int sleepTimeSeconds ) {
long errors = 0L;
//
// See if the remote transformations have finished.
// We could just look at the master, but I doubt that that is enough in all
// situations.
//
SlaveServer[] slaveServers = transSplitter.getSlaveTargets(); // <-- ask
// these guys
TransMeta[] slaves = transSplitter.getSlaves();
Map<TransMeta, String> carteObjectMap = transSplitter.getCarteObjectMap();
SlaveServer masterServer;
try {
masterServer = transSplitter.getMasterServer();
} catch ( KettleException e ) {
log.logError( "Error getting the master server", e );
masterServer = null;
errors++;
}
TransMeta masterTransMeta = transSplitter.getMaster();
boolean allFinished = false;
while ( !allFinished && errors == 0 && ( parentJob == null || !parentJob.isStopped() ) ) {
allFinished = true;
errors = 0L;
// Slaves first...
//
for ( int s = 0; s < slaveServers.length && allFinished && errors == 0; s++ ) {
try {
String carteObjectId = carteObjectMap.get( slaves[s] );
SlaveServerTransStatus transStatus =
slaveServers[s].getTransStatus( slaves[s].getName(), carteObjectId, 0 );
if ( transStatus.isRunning() ) {
if ( log.isDetailed() ) {
log.logDetailed( "Slave transformation on '" + slaveServers[s] + "' is still running." );
}
allFinished = false;
} else {
if ( log.isDetailed() ) {
log.logDetailed( "Slave transformation on '" + slaveServers[s] + "' has finished." );
}
}
errors += transStatus.getNrStepErrors();
} catch ( Exception e ) {
errors += 1;
log.logError( "Unable to contact slave server '"
+ slaveServers[s].getName() + "' to check slave transformation : " + e.toString() );
}
}
// Check the master too
if ( allFinished && errors == 0 && masterTransMeta != null && masterTransMeta.nrSteps() > 0 ) {
try {
String carteObjectId = carteObjectMap.get( masterTransMeta );
SlaveServerTransStatus transStatus =
masterServer.getTransStatus( masterTransMeta.getName(), carteObjectId, 0 );
if ( transStatus.isRunning() ) {
if ( log.isDetailed() ) {
log.logDetailed( "Master transformation is still running." );
}
allFinished = false;
} else {
if ( log.isDetailed() ) {
log.logDetailed( "Master transformation has finished." );
}
}
Result result = transStatus.getResult( transSplitter.getOriginalTransformation() );
errors += result.getNrErrors();
} catch ( Exception e ) {
errors += 1;
log.logError( "Unable to contact master server '"
+ masterServer.getName() + "' to check master transformation : " + e.toString() );
}
}
if ( ( parentJob != null && parentJob.isStopped() ) || errors != 0 ) {
//
// Stop all slaves and the master on the slave servers
//
for ( int s = 0; s < slaveServers.length && allFinished && errors == 0; s++ ) {
try {
String carteObjectId = carteObjectMap.get( slaves[s] );
WebResult webResult = slaveServers[s].stopTransformation( slaves[s].getName(), carteObjectId );
if ( !WebResult.STRING_OK.equals( webResult.getResult() ) ) {
log.logError( "Unable to stop slave transformation '"
+ slaves[s].getName() + "' : " + webResult.getMessage() );
}
} catch ( Exception e ) {
errors += 1;
log.logError( "Unable to contact slave server '"
+ slaveServers[s].getName() + "' to stop transformation : " + e.toString() );
}
}
try {
String carteObjectId = carteObjectMap.get( masterTransMeta );
WebResult webResult = masterServer.stopTransformation( masterTransMeta.getName(), carteObjectId );
if ( !WebResult.STRING_OK.equals( webResult.getResult() ) ) {
log.logError( "Unable to stop master transformation '"
+ masterServer.getName() + "' : " + webResult.getMessage() );
}
} catch ( Exception e ) {
errors += 1;
log.logError( "Unable to contact master server '"
+ masterServer.getName() + "' to stop the master : " + e.toString() );
}
}
//
// Keep waiting until all transformations have finished
// If needed, we stop them again and again until they yield.
//
if ( !allFinished ) {
// Not finished or error: wait a bit longer
if ( log.isDetailed() ) {
log.logDetailed( "Clustered transformation is still running, waiting a few seconds..." );
}
try {
Thread.sleep( sleepTimeSeconds * 2000 );
} catch ( Exception e ) {
// Ignore errors
} // Check all slaves every x seconds.
}
}
log.logBasic( "All transformations in the cluster have finished." );
errors += cleanupCluster( log, transSplitter );
return errors;
}
/**
* Cleanup the cluster, including the master and all slaves, and return the number of errors that occurred.
*
* @param log
* the log channel interface
* @param transSplitter
* the TransSplitter object
* @return the number of errors that occurred in the clustered transformation
*/
public static int cleanupCluster( LogChannelInterface log, TransSplitter transSplitter ) {
SlaveServer[] slaveServers = transSplitter.getSlaveTargets();
TransMeta[] slaves = transSplitter.getSlaves();
SlaveServer masterServer;
try {
masterServer = transSplitter.getMasterServer();
} catch ( KettleException e ) {
log.logError( "Unable to obtain the master server from the cluster", e );
return 1;
}
TransMeta masterTransMeta = transSplitter.getMaster();
int errors = 0;
// All transformations have finished, with or without error.
// Now run a cleanup on all the transformation on the master and the slaves.
//
// Slaves first...
//
for ( int s = 0; s < slaveServers.length; s++ ) {
try {
cleanupSlaveServer( transSplitter, slaveServers[s], slaves[s] );
} catch ( Exception e ) {
errors++;
log.logError( "Unable to contact slave server '"
+ slaveServers[s].getName() + "' to clean up slave transformation", e );
}
}
// Clean up the master too
//
if ( masterTransMeta != null && masterTransMeta.nrSteps() > 0 ) {
try {
cleanupSlaveServer( transSplitter, masterServer, masterTransMeta );
} catch ( Exception e ) {
errors++;
log.logError( "Unable to contact master server '"
+ masterServer.getName() + "' to clean up master transformation", e );
}
// Also de-allocate all ports used for this clustered transformation on the master.
//
try {
// Deallocate all ports belonging to this clustered run, not anything else
//
masterServer.deAllocateServerSockets( transSplitter.getOriginalTransformation().getName(), transSplitter
.getClusteredRunId() );
} catch ( Exception e ) {
errors++;
log.logError( "Unable to contact master server '"
+ masterServer.getName() + "' to clean up port sockets for transformation'"
+ transSplitter.getOriginalTransformation().getName() + "'", e );
}
}
return errors;
}
/**
* Cleanup the slave server as part of a clustered transformation.
*
* @param transSplitter
* the TransSplitter object
* @param slaveServer
* the slave server
* @param slaveTransMeta
* the slave transformation meta-data
* @throws KettleException
* if any errors occur during cleanup
*/
public static void cleanupSlaveServer( TransSplitter transSplitter, SlaveServer slaveServer,
TransMeta slaveTransMeta ) throws KettleException {
String transName = slaveTransMeta.getName();
try {
String carteObjectId = transSplitter.getCarteObjectMap().get( slaveTransMeta );
WebResult webResult = slaveServer.cleanupTransformation( transName, carteObjectId );
if ( !WebResult.STRING_OK.equals( webResult.getResult() ) ) {
throw new KettleException( "Unable to run clean-up on slave server '"
+ slaveServer + "' for transformation '" + transName + "' : " + webResult.getMessage() );
}
} catch ( Exception e ) {
throw new KettleException( "Unexpected error contacting slave server '"
+ slaveServer + "' to clear up transformation '" + transName + "'", e );
}
}
/**
* Gets the clustered transformation result.
*
* @param log
* the log channel interface
* @param transSplitter
* the TransSplitter object
* @param parentJob
* the parent job
* @return the clustered transformation result
*/
public static final Result getClusteredTransformationResult( LogChannelInterface log,
TransSplitter transSplitter, Job parentJob ) {
return getClusteredTransformationResult( log, transSplitter, parentJob, false );
}
/**
* Gets the clustered transformation result.
*
* @param log
* the log channel interface
* @param transSplitter
* the TransSplitter object
* @param parentJob
* the parent job
* @param loggingRemoteWork
* log remote execution logs locally
* @return the clustered transformation result
*/
public static final Result getClusteredTransformationResult( LogChannelInterface log,
TransSplitter transSplitter, Job parentJob, boolean loggingRemoteWork ) {
Result result = new Result();
//
// See if the remote transformations have finished.
// We could just look at the master, but I doubt that that is enough in all situations.
//
SlaveServer[] slaveServers = transSplitter.getSlaveTargets(); // <-- ask these guys
TransMeta[] slaves = transSplitter.getSlaves();
SlaveServer masterServer;
try {
masterServer = transSplitter.getMasterServer();
} catch ( KettleException e ) {
log.logError( "Error getting the master server", e );
masterServer = null;
result.setNrErrors( result.getNrErrors() + 1 );
}
TransMeta master = transSplitter.getMaster();
// Slaves first...
//
for ( int s = 0; s < slaveServers.length; s++ ) {
try {
// Get the detailed status of the slave transformation...
//
SlaveServerTransStatus transStatus = slaveServers[s].getTransStatus( slaves[s].getName(), "", 0 );
Result transResult = transStatus.getResult( slaves[s] );
result.add( transResult );
if ( loggingRemoteWork ) {
log.logBasic( "-- Slave : " + slaveServers[s].getName() );
log.logBasic( transStatus.getLoggingString() );
}
} catch ( Exception e ) {
result.setNrErrors( result.getNrErrors() + 1 );
log.logError( "Unable to contact slave server '"
+ slaveServers[s].getName() + "' to get result of slave transformation : " + e.toString() );
}
}
// Clean up the master too
//
if ( master != null && master.nrSteps() > 0 ) {
try {
// Get the detailed status of the slave transformation...
//
SlaveServerTransStatus transStatus = masterServer.getTransStatus( master.getName(), "", 0 );
Result transResult = transStatus.getResult( master );
result.add( transResult );
if ( loggingRemoteWork ) {
log.logBasic( "-- Master : " + masterServer.getName() );
log.logBasic( transStatus.getLoggingString() );
}
} catch ( Exception e ) {
result.setNrErrors( result.getNrErrors() + 1 );
log.logError( "Unable to contact master server '"
+ masterServer.getName() + "' to get result of master transformation : " + e.toString() );
}
}
return result;
}
/**
* Send the transformation for execution to a Carte slave server.
*
* @param transMeta
* the transformation meta-data
* @param executionConfiguration
* the transformation execution configuration
* @param repository
* the repository
* @return The Carte object ID on the server.
* @throws KettleException
* if any errors occur during the dispatch to the slave server
*/
public static String sendToSlaveServer( TransMeta transMeta, TransExecutionConfiguration executionConfiguration,
Repository repository, IMetaStore metaStore ) throws KettleException {
String carteObjectId;
SlaveServer slaveServer = executionConfiguration.getRemoteServer();
if ( slaveServer == null ) {
throw new KettleException( "No slave server specified" );
}
if ( Const.isEmpty( transMeta.getName() ) ) {
throw new KettleException(
"The transformation needs a name to uniquely identify it by on the remote server." );
}
try {
// Inject certain internal variables to make it more intuitive.
//
Map<String, String> vars = new HashMap<String, String>();
for ( String var : Const.INTERNAL_TRANS_VARIABLES ) {
vars.put( var, transMeta.getVariable( var ) );
}
for ( String var : Const.INTERNAL_JOB_VARIABLES ) {
vars.put( var, transMeta.getVariable( var ) );
}
executionConfiguration.getVariables().putAll( vars );
slaveServer.injectVariables( executionConfiguration.getVariables() );
slaveServer.getLogChannel().setLogLevel( executionConfiguration.getLogLevel() );
if ( executionConfiguration.isPassingExport() ) {
// First export the job...
//
FileObject tempFile =
KettleVFS.createTempFile( "transExport", ".zip", System.getProperty( "java.io.tmpdir" ), transMeta );
TopLevelResource topLevelResource =
ResourceUtil.serializeResourceExportInterface(
tempFile.getName().toString(), transMeta, transMeta, repository, metaStore, executionConfiguration
.getXML(), CONFIGURATION_IN_EXPORT_FILENAME );
// Send the zip file over to the slave server...
//
String result =
slaveServer.sendExport(
topLevelResource.getArchiveName(), AddExportServlet.TYPE_TRANS, topLevelResource
.getBaseResourceName() );
WebResult webResult = WebResult.fromXMLString( result );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException(
"There was an error passing the exported transformation to the remote server: "
+ Const.CR + webResult.getMessage() );
}
carteObjectId = webResult.getId();
} else {
// Now send it off to the remote server...
//
String xml = new TransConfiguration( transMeta, executionConfiguration ).getXML();
String reply = slaveServer.sendXML( xml, AddTransServlet.CONTEXT_PATH + "/?xml=Y" );
WebResult webResult = WebResult.fromXMLString( reply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException( "There was an error posting the transformation on the remote server: "
+ Const.CR + webResult.getMessage() );
}
carteObjectId = webResult.getId();
}
// Prepare the transformation
//
String reply =
slaveServer.execService( PrepareExecutionTransServlet.CONTEXT_PATH
+ "/?name=" + URLEncoder.encode( transMeta.getName(), "UTF-8" ) + "&xml=Y&id=" + carteObjectId );
WebResult webResult = WebResult.fromXMLString( reply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException(
"There was an error preparing the transformation for excution on the remote server: "
+ Const.CR + webResult.getMessage() );
}
// Start the transformation
//
reply =
slaveServer.execService( StartExecutionTransServlet.CONTEXT_PATH
+ "/?name=" + URLEncoder.encode( transMeta.getName(), "UTF-8" ) + "&xml=Y&id=" + carteObjectId );
webResult = WebResult.fromXMLString( reply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException( "There was an error starting the transformation on the remote server: "
+ Const.CR + webResult.getMessage() );
}
return carteObjectId;
} catch ( KettleException ke ) {
throw ke;
} catch ( Exception e ) {
throw new KettleException( e );
}
}
/**
* Checks whether the transformation is ready to start (i.e. execution preparation was successful)
*
* @return true if the transformation was prepared for execution successfully, false otherwise
* @see org.pentaho.di.trans.Trans#prepareExecution(String[])
*/
public boolean isReadyToStart() {
return readyToStart;
}
/**
* Sets the internal kettle variables.
*
* @param var
* the new internal kettle variables
*/
public void setInternalKettleVariables( VariableSpace var ) {
if ( transMeta != null && !Const.isEmpty( transMeta.getFilename() ) ) // we have a finename that's defined.
{
try {
FileObject fileObject = KettleVFS.getFileObject( transMeta.getFilename(), var );
FileName fileName = fileObject.getName();
// The filename of the transformation
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, fileName.getBaseName() );
// The directory of the transformation
FileName fileDir = fileName.getParent();
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, fileDir.getURI() );
} catch ( KettleFileException e ) {
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, "" );
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, "" );
}
} else {
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, "" );
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, "" );
}
// The name of the transformation
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_NAME, Const.NVL( transMeta.getName(), "" ) );
// TODO PUT THIS INSIDE OF THE "IF"
// The name of the directory in the repository
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY, transMeta
.getRepositoryDirectory() != null ? transMeta.getRepositoryDirectory().getPath() : "" );
// Here we don't clear the definition of the job specific parameters, as they may come in handy.
// A transformation can be called from a job and may inherit the job internal variables
// but the other around is not possible.
}
/**
* Copies variables from a given variable space to this transformation.
*
* @param space
* the variable space
* @see org.pentaho.di.core.variables.VariableSpace#copyVariablesFrom(org.pentaho.di.core.variables.VariableSpace)
*/
public void copyVariablesFrom( VariableSpace space ) {
variables.copyVariablesFrom( space );
}
/**
* Substitutes any variable values into the given string, and returns the resolved string.
*
* @param aString
* the string to resolve against environment variables
* @return the string after variables have been resolved/susbstituted
* @see org.pentaho.di.core.variables.VariableSpace#environmentSubstitute(java.lang.String)
*/
public String environmentSubstitute( String aString ) {
return variables.environmentSubstitute( aString );
}
/**
* Substitutes any variable values into each of the given strings, and returns an array containing the resolved
* string(s).
*
* @param aString
* an array of strings to resolve against environment variables
* @return the array of strings after variables have been resolved/susbstituted
* @see org.pentaho.di.core.variables.VariableSpace#environmentSubstitute(java.lang.String[])
*/
public String[] environmentSubstitute( String[] aString ) {
return variables.environmentSubstitute( aString );
}
public String fieldSubstitute( String aString, RowMetaInterface rowMeta, Object[] rowData ) throws KettleValueException {
return variables.fieldSubstitute( aString, rowMeta, rowData );
}
/**
* Gets the parent variable space.
*
* @return the parent variable space
* @see org.pentaho.di.core.variables.VariableSpace#getParentVariableSpace()
*/
public VariableSpace getParentVariableSpace() {
return variables.getParentVariableSpace();
}
/**
* Sets the parent variable space.
*
* @param parent
* the new parent variable space
* @see org.pentaho.di.core.variables.VariableSpace#setParentVariableSpace(
* org.pentaho.di.core.variables.VariableSpace)
*/
public void setParentVariableSpace( VariableSpace parent ) {
variables.setParentVariableSpace( parent );
}
/**
* Gets the value of the specified variable, or returns a default value if no such variable exists.
*
* @param variableName
* the variable name
* @param defaultValue
* the default value
* @return the value of the specified variable, or returns a default value if no such variable exists
* @see org.pentaho.di.core.variables.VariableSpace#getVariable(java.lang.String, java.lang.String)
*/
public String getVariable( String variableName, String defaultValue ) {
return variables.getVariable( variableName, defaultValue );
}
/**
* Gets the value of the specified variable, or returns a default value if no such variable exists.
*
* @param variableName
* the variable name
* @return the value of the specified variable, or returns a default value if no such variable exists
* @see org.pentaho.di.core.variables.VariableSpace#getVariable(java.lang.String)
*/
public String getVariable( String variableName ) {
return variables.getVariable( variableName );
}
/**
* Returns a boolean representation of the specified variable after performing any necessary substitution. Truth
* values include case-insensitive versions of "Y", "YES", "TRUE" or "1".
*
* @param variableName
* the variable name
* @param defaultValue
* the default value
* @return a boolean representation of the specified variable after performing any necessary substitution
* @see org.pentaho.di.core.variables.VariableSpace#getBooleanValueOfVariable(java.lang.String, boolean)
*/
public boolean getBooleanValueOfVariable( String variableName, boolean defaultValue ) {
if ( !Const.isEmpty( variableName ) ) {
String value = environmentSubstitute( variableName );
if ( !Const.isEmpty( value ) ) {
return ValueMeta.convertStringToBoolean( value );
}
}
return defaultValue;
}
/**
* Sets the values of the transformation's variables to the values from the parent variables.
*
* @param parent
* the parent
* @see org.pentaho.di.core.variables.VariableSpace#initializeVariablesFrom(
* org.pentaho.di.core.variables.VariableSpace)
*/
public void initializeVariablesFrom( VariableSpace parent ) {
variables.initializeVariablesFrom( parent );
}
/**
* Gets a list of variable names for the transformation.
*
* @return a list of variable names
* @see org.pentaho.di.core.variables.VariableSpace#listVariables()
*/
public String[] listVariables() {
return variables.listVariables();
}
/**
* Sets the value of the specified variable to the specified value.
*
* @param variableName
* the variable name
* @param variableValue
* the variable value
* @see org.pentaho.di.core.variables.VariableSpace#setVariable(java.lang.String, java.lang.String)
*/
public void setVariable( String variableName, String variableValue ) {
variables.setVariable( variableName, variableValue );
}
/**
* Shares a variable space from another variable space. This means that the object should take over the space used as
* argument.
*
* @param space
* the variable space
* @see org.pentaho.di.core.variables.VariableSpace#shareVariablesWith(org.pentaho.di.core.variables.VariableSpace)
*/
public void shareVariablesWith( VariableSpace space ) {
variables = space;
}
/**
* Injects variables using the given Map. The behavior should be that the properties object will be stored and at the
* time the VariableSpace is initialized (or upon calling this method if the space is already initialized). After
* injecting the link of the properties object should be removed.
*
* @param prop
* the property map
* @see org.pentaho.di.core.variables.VariableSpace#injectVariables(java.util.Map)
*/
public void injectVariables( Map<String, String> prop ) {
variables.injectVariables( prop );
}
/**
* Pauses the transformation (pause all steps).
*/
public void pauseRunning() {
paused.set( true );
for ( StepMetaDataCombi combi : steps ) {
combi.step.pauseRunning();
}
}
/**
* Resumes running the transformation after a pause (resume all steps).
*/
public void resumeRunning() {
for ( StepMetaDataCombi combi : steps ) {
combi.step.resumeRunning();
}
paused.set( false );
}
/**
* Checks whether the transformation is being previewed.
*
* @return true if the transformation is being previewed, false otherwise
*/
public boolean isPreview() {
return preview;
}
/**
* Sets whether the transformation is being previewed.
*
* @param preview
* true if the transformation is being previewed, false otherwise
*/
public void setPreview( boolean preview ) {
this.preview = preview;
}
/**
* Gets the repository object for the transformation.
*
* @return the repository
*/
public Repository getRepository() {
if ( repository == null ) {
// Does the transmeta have a repo?
// This is a valid case, when a non-repo trans is attempting to retrieve
// a transformation in the repository.
if ( transMeta != null ) {
return transMeta.getRepository();
}
}
return repository;
}
/**
* Sets the repository object for the transformation.
*
* @param repository
* the repository object to set
*/
public void setRepository( Repository repository ) {
this.repository = repository;
if ( transMeta != null ) {
transMeta.setRepository( repository );
}
}
/**
* Gets a named list (map) of step performance snapshots.
*
* @return a named list (map) of step performance snapshots
*/
public Map<String, List<StepPerformanceSnapShot>> getStepPerformanceSnapShots() {
return stepPerformanceSnapShots;
}
/**
* Sets the named list (map) of step performance snapshots.
*
* @param stepPerformanceSnapShots
* a named list (map) of step performance snapshots to set
*/
public void setStepPerformanceSnapShots( Map<String, List<StepPerformanceSnapShot>> stepPerformanceSnapShots ) {
this.stepPerformanceSnapShots = stepPerformanceSnapShots;
}
/**
* Gets a list of the transformation listeners.
* Please do not attempt to modify this list externally.
* Returned list is mutable only for backward compatibility purposes.
*
* @return the transListeners
*/
public List<TransListener> getTransListeners() {
return transListeners;
}
/**
* Sets the list of transformation listeners.
*
* @param transListeners
* the transListeners to set
*/
public void setTransListeners( List<TransListener> transListeners ) {
this.transListeners = Collections.synchronizedList( transListeners );
}
/**
* Adds a transformation listener.
*
* @param transListener
* the trans listener
*/
public void addTransListener( TransListener transListener ) {
// PDI-5229 sync added
synchronized ( transListeners ) {
transListeners.add( transListener );
}
}
/**
* Sets the list of stop-event listeners for the transformation.
*
* @param transStoppedListeners
* the list of stop-event listeners to set
*/
public void setTransStoppedListeners( List<TransStoppedListener> transStoppedListeners ) {
this.transStoppedListeners = Collections.synchronizedList( transStoppedListeners );
}
/**
* Gets the list of stop-event listeners for the transformation. This is not concurrent safe.
* Please note this is mutable implementation only for backward compatibility reasons.
*
* @return the list of stop-event listeners
*/
public List<TransStoppedListener> getTransStoppedListeners() {
return transStoppedListeners;
}
/**
* Adds a stop-event listener to the transformation.
*
* @param transStoppedListener
* the stop-event listener to add
*/
public void addTransStoppedListener( TransStoppedListener transStoppedListener ) {
transStoppedListeners.add( transStoppedListener );
}
/**
* Checks if the transformation is paused.
*
* @return true if the transformation is paused, false otherwise
*/
public boolean isPaused() {
return paused.get();
}
/**
* Checks if the transformation is stopped.
*
* @return true if the transformation is stopped, false otherwise
*/
public boolean isStopped() {
return stopped.get();
}
/**
* Monitors a remote transformation every 5 seconds.
*
* @param log
* the log channel interface
* @param carteObjectId
* the Carte object ID
* @param transName
* the transformation name
* @param remoteSlaveServer
* the remote slave server
*/
public static void monitorRemoteTransformation( LogChannelInterface log, String carteObjectId, String transName,
SlaveServer remoteSlaveServer ) {
monitorRemoteTransformation( log, carteObjectId, transName, remoteSlaveServer, 5 );
}
/**
* Monitors a remote transformation at the specified interval.
*
* @param log
* the log channel interface
* @param carteObjectId
* the Carte object ID
* @param transName
* the transformation name
* @param remoteSlaveServer
* the remote slave server
* @param sleepTimeSeconds
* the sleep time (in seconds)
*/
public static void monitorRemoteTransformation( LogChannelInterface log, String carteObjectId, String transName,
SlaveServer remoteSlaveServer, int sleepTimeSeconds ) {
long errors = 0;
boolean allFinished = false;
while ( !allFinished && errors == 0 ) {
allFinished = true;
errors = 0L;
// Check the remote server
if ( allFinished && errors == 0 ) {
try {
SlaveServerTransStatus transStatus = remoteSlaveServer.getTransStatus( transName, carteObjectId, 0 );
if ( transStatus.isRunning() ) {
if ( log.isDetailed() ) {
log.logDetailed( transName, "Remote transformation is still running." );
}
allFinished = false;
} else {
if ( log.isDetailed() ) {
log.logDetailed( transName, "Remote transformation has finished." );
}
}
Result result = transStatus.getResult();
errors += result.getNrErrors();
} catch ( Exception e ) {
errors += 1;
log.logError( transName, "Unable to contact remote slave server '"
+ remoteSlaveServer.getName() + "' to check transformation status : " + e.toString() );
}
}
//
// Keep waiting until all transformations have finished
// If needed, we stop them again and again until they yield.
//
if ( !allFinished ) {
// Not finished or error: wait a bit longer
if ( log.isDetailed() ) {
log.logDetailed( transName, "The remote transformation is still running, waiting a few seconds..." );
}
try {
Thread.sleep( sleepTimeSeconds * 1000 );
} catch ( Exception e ) {
// Ignore errors
} // Check all slaves every x seconds.
}
}
log.logMinimal( transName, "The remote transformation has finished." );
// Clean up the remote transformation
//
try {
WebResult webResult = remoteSlaveServer.cleanupTransformation( transName, carteObjectId );
if ( !WebResult.STRING_OK.equals( webResult.getResult() ) ) {
log.logError( transName, "Unable to run clean-up on remote transformation '"
+ transName + "' : " + webResult.getMessage() );
errors += 1;
}
} catch ( Exception e ) {
errors += 1;
log.logError( transName, "Unable to contact slave server '"
+ remoteSlaveServer.getName() + "' to clean up transformation : " + e.toString() );
}
}
/**
* Adds a parameter definition to this transformation.
*
* @param key
* the name of the parameter
* @param defValue
* the default value for the parameter
* @param description
* the description of the parameter
* @throws DuplicateParamException
* the duplicate param exception
* @see org.pentaho.di.core.parameters.NamedParams#addParameterDefinition(java.lang.String, java.lang.String,
* java.lang.String)
*/
public void addParameterDefinition( String key, String defValue, String description ) throws DuplicateParamException {
namedParams.addParameterDefinition( key, defValue, description );
}
/**
* Gets the default value of the specified parameter.
*
* @param key
* the name of the parameter
* @return the default value of the parameter
* @throws UnknownParamException
* if the parameter does not exist
* @see org.pentaho.di.core.parameters.NamedParams#getParameterDefault(java.lang.String)
*/
public String getParameterDefault( String key ) throws UnknownParamException {
return namedParams.getParameterDefault( key );
}
/**
* Gets the description of the specified parameter.
*
* @param key
* the name of the parameter
* @return the parameter description
* @throws UnknownParamException
* if the parameter does not exist
* @see org.pentaho.di.core.parameters.NamedParams#getParameterDescription(java.lang.String)
*/
public String getParameterDescription( String key ) throws UnknownParamException {
return namedParams.getParameterDescription( key );
}
/**
* Gets the value of the specified parameter.
*
* @param key
* the name of the parameter
* @return the parameter value
* @throws UnknownParamException
* if the parameter does not exist
* @see org.pentaho.di.core.parameters.NamedParams#getParameterValue(java.lang.String)
*/
public String getParameterValue( String key ) throws UnknownParamException {
return namedParams.getParameterValue( key );
}
/**
* Gets a list of the parameters for the transformation.
*
* @return an array of strings containing the names of all parameters for the transformation
* @see org.pentaho.di.core.parameters.NamedParams#listParameters()
*/
public String[] listParameters() {
return namedParams.listParameters();
}
/**
* Sets the value for the specified parameter.
*
* @param key
* the name of the parameter
* @param value
* the name of the value
* @throws UnknownParamException
* if the parameter does not exist
* @see org.pentaho.di.core.parameters.NamedParams#setParameterValue(java.lang.String, java.lang.String)
*/
public void setParameterValue( String key, String value ) throws UnknownParamException {
namedParams.setParameterValue( key, value );
}
/**
* Remove all parameters.
*
* @see org.pentaho.di.core.parameters.NamedParams#eraseParameters()
*/
public void eraseParameters() {
namedParams.eraseParameters();
}
/**
* Clear the values of all parameters.
*
* @see org.pentaho.di.core.parameters.NamedParams#clearParameters()
*/
public void clearParameters() {
namedParams.clearParameters();
}
/**
* Activates all parameters by setting their values. If no values already exist, the method will attempt to set the
* parameter to the default value. If no default value exists, the method will set the value of the parameter to the
* empty string ("").
*
* @see org.pentaho.di.core.parameters.NamedParams#activateParameters()
*/
public void activateParameters() {
String[] keys = listParameters();
for ( String key : keys ) {
String value;
try {
value = getParameterValue( key );
} catch ( UnknownParamException e ) {
value = "";
}
String defValue;
try {
defValue = getParameterDefault( key );
} catch ( UnknownParamException e ) {
defValue = "";
}
if ( Const.isEmpty( value ) ) {
setVariable( key, Const.NVL( defValue, "" ) );
} else {
setVariable( key, Const.NVL( value, "" ) );
}
}
}
/**
* Copy parameters from a NamedParams object.
*
* @param params
* the NamedParams object from which to copy the parameters
* @see org.pentaho.di.core.parameters.NamedParams#copyParametersFrom(org.pentaho.di.core.parameters.NamedParams)
*/
public void copyParametersFrom( NamedParams params ) {
namedParams.copyParametersFrom( params );
}
/**
* Gets the parent transformation, which is null if no parent transformation exists.
*
* @return a reference to the parent transformation's Trans object, or null if no parent transformation exists
*/
public Trans getParentTrans() {
return parentTrans;
}
/**
* Sets the parent transformation.
*
* @param parentTrans
* the parentTrans to set
*/
public void setParentTrans( Trans parentTrans ) {
this.logLevel = parentTrans.getLogLevel();
this.log.setLogLevel( logLevel );
this.parentTrans = parentTrans;
transactionId = calculateTransactionId();
}
/**
* Gets the mapping step name.
*
* @return the name of the mapping step that created this transformation
*/
public String getMappingStepName() {
return mappingStepName;
}
/**
* Sets the mapping step name.
*
* @param mappingStepName
* the name of the mapping step that created this transformation
*/
public void setMappingStepName( String mappingStepName ) {
this.mappingStepName = mappingStepName;
}
/**
* Sets the socket repository.
*
* @param socketRepository
* the new socket repository
*/
public void setSocketRepository( SocketRepository socketRepository ) {
this.socketRepository = socketRepository;
}
/**
* Gets the socket repository.
*
* @return the socket repository
*/
public SocketRepository getSocketRepository() {
return socketRepository;
}
/**
* Gets the object name.
*
* @return the object name
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectName()
*/
public String getObjectName() {
return getName();
}
/**
* Gets the object copy. For Trans, this always returns null
*
* @return null
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectCopy()
*/
public String getObjectCopy() {
return null;
}
/**
* Gets the filename of the transformation, or null if no filename exists
*
* @return the filename
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getFilename()
*/
public String getFilename() {
if ( transMeta == null ) {
return null;
}
return transMeta.getFilename();
}
/**
* Gets the log channel ID.
*
* @return the log channel ID
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getLogChannelId()
*/
public String getLogChannelId() {
return log.getLogChannelId();
}
/**
* Gets the object ID.
*
* @return the object ID
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectId()
*/
public ObjectId getObjectId() {
if ( transMeta == null ) {
return null;
}
return transMeta.getObjectId();
}
/**
* Gets the object revision.
*
* @return the object revision
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectRevision()
*/
public ObjectRevision getObjectRevision() {
if ( transMeta == null ) {
return null;
}
return transMeta.getObjectRevision();
}
/**
* Gets the object type. For Trans, this always returns LoggingObjectType.TRANS
*
* @return the object type
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectType()
*/
public LoggingObjectType getObjectType() {
return LoggingObjectType.TRANS;
}
/**
* Gets the parent logging object interface.
*
* @return the parent
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getParent()
*/
public LoggingObjectInterface getParent() {
return parent;
}
/**
* Gets the repository directory.
*
* @return the repository directory
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getRepositoryDirectory()
*/
public RepositoryDirectoryInterface getRepositoryDirectory() {
if ( transMeta == null ) {
return null;
}
return transMeta.getRepositoryDirectory();
}
/**
* Gets the log level.
*
* @return the log level
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getLogLevel()
*/
public LogLevel getLogLevel() {
return logLevel;
}
/**
* Sets the log level.
*
* @param logLevel
* the new log level
*/
public void setLogLevel( LogLevel logLevel ) {
this.logLevel = logLevel;
log.setLogLevel( logLevel );
}
/**
* Gets the logging hierarchy.
*
* @return the logging hierarchy
*/
public List<LoggingHierarchy> getLoggingHierarchy() {
List<LoggingHierarchy> hierarchy = new ArrayList<LoggingHierarchy>();
List<String> childIds = LoggingRegistry.getInstance().getLogChannelChildren( getLogChannelId() );
for ( String childId : childIds ) {
LoggingObjectInterface loggingObject = LoggingRegistry.getInstance().getLoggingObject( childId );
if ( loggingObject != null ) {
hierarchy.add( new LoggingHierarchy( getLogChannelId(), batchId, loggingObject ) );
}
}
return hierarchy;
}
/**
* Gets the active sub-transformations.
*
* @return a map (by name) of the active sub-transformations
*/
public Map<String, Trans> getActiveSubtransformations() {
return activeSubtransformations;
}
/**
* Gets the active sub-jobs.
*
* @return a map (by name) of the active sub-jobs
*/
public Map<String, Job> getActiveSubjobs() {
return activeSubjobs;
}
/**
* Gets the container object ID.
*
* @return the Carte object ID
*/
public String getContainerObjectId() {
return containerObjectId;
}
/**
* Sets the container object ID.
*
* @param containerObjectId
* the Carte object ID to set
*/
public void setContainerObjectId( String containerObjectId ) {
this.containerObjectId = containerObjectId;
}
/**
* Gets the registration date. For Trans, this always returns null
*
* @return null
*/
public Date getRegistrationDate() {
return null;
}
/**
* Sets the servlet print writer.
*
* @param servletPrintWriter
* the new servlet print writer
*/
public void setServletPrintWriter( PrintWriter servletPrintWriter ) {
this.servletPrintWriter = servletPrintWriter;
}
/**
* Gets the servlet print writer.
*
* @return the servlet print writer
*/
public PrintWriter getServletPrintWriter() {
return servletPrintWriter;
}
/**
* Gets the name of the executing server.
*
* @return the executingServer
*/
public String getExecutingServer() {
return executingServer;
}
/**
* Sets the name of the executing server.
*
* @param executingServer
* the executingServer to set
*/
public void setExecutingServer( String executingServer ) {
this.executingServer = executingServer;
}
/**
* Gets the name of the executing user.
*
* @return the executingUser
*/
public String getExecutingUser() {
return executingUser;
}
/**
* Sets the name of the executing user.
*
* @param executingUser
* the executingUser to set
*/
public void setExecutingUser( String executingUser ) {
this.executingUser = executingUser;
}
@Override
public boolean isGatheringMetrics() {
return log != null && log.isGatheringMetrics();
}
@Override
public void setGatheringMetrics( boolean gatheringMetrics ) {
if ( log != null ) {
log.setGatheringMetrics( gatheringMetrics );
}
}
@Override
public boolean isForcingSeparateLogging() {
return log != null && log.isForcingSeparateLogging();
}
@Override
public void setForcingSeparateLogging( boolean forcingSeparateLogging ) {
if ( log != null ) {
log.setForcingSeparateLogging( forcingSeparateLogging );
}
}
public List<ResultFile> getResultFiles() {
return resultFiles;
}
public void setResultFiles( List<ResultFile> resultFiles ) {
this.resultFiles = resultFiles;
}
public List<RowMetaAndData> getResultRows() {
return resultRows;
}
public void setResultRows( List<RowMetaAndData> resultRows ) {
this.resultRows = resultRows;
}
public Result getPreviousResult() {
return previousResult;
}
public void setPreviousResult( Result previousResult ) {
this.previousResult = previousResult;
}
public Hashtable<String, Counter> getCounters() {
return counters;
}
public void setCounters( Hashtable<String, Counter> counters ) {
this.counters = counters;
}
public String[] getArguments() {
return arguments;
}
public void setArguments( String[] arguments ) {
this.arguments = arguments;
}
/**
* Clear the error in the transformation, clear all the rows from all the row sets, to make sure the transformation
* can continue with other data. This is intended for use when running single threaded.
*/
public void clearError() {
stopped.set( false );
errors.set( 0 );
setFinished( false );
for ( StepMetaDataCombi combi : steps ) {
StepInterface step = combi.step;
for ( RowSet rowSet : step.getInputRowSets() ) {
rowSet.clear();
}
step.setStopped( false );
}
}
/**
* Gets the transaction ID for the transformation.
*
* @return the transactionId
*/
public String getTransactionId() {
return transactionId;
}
/**
* Sets the transaction ID for the transformation.
*
* @param transactionId
* the transactionId to set
*/
public void setTransactionId( String transactionId ) {
this.transactionId = transactionId;
}
/**
* Calculates the transaction ID for the transformation.
*
* @return the calculated transaction ID for the transformation.
*/
public String calculateTransactionId() {
if ( getTransMeta() != null && getTransMeta().isUsingUniqueConnections() ) {
if ( parentJob != null && parentJob.getTransactionId() != null ) {
return parentJob.getTransactionId();
} else if ( parentTrans != null && parentTrans.getTransMeta().isUsingUniqueConnections() ) {
return parentTrans.getTransactionId();
} else {
return DatabaseConnectionMap.getInstance().getNextTransactionId();
}
} else {
return Thread.currentThread().getName();
}
}
public IMetaStore getMetaStore() {
return metaStore;
}
public void setMetaStore( IMetaStore metaStore ) {
this.metaStore = metaStore;
if ( transMeta != null ) {
transMeta.setMetaStore( metaStore );
}
}
/**
* Sets encoding of HttpServletResponse according to System encoding.Check if system encoding is null or an empty and
* set it to HttpServletResponse when not and writes error to log if null. Throw IllegalArgumentException if input
* parameter is null.
*
* @param response
* the HttpServletResponse to set encoding, mayn't be null
*/
public void setServletReponse( HttpServletResponse response ) {
if ( response == null ) {
throw new IllegalArgumentException( "Response is not valid: " + response );
}
String encoding = System.getProperty( "KETTLE_DEFAULT_SERVLET_ENCODING", null );
// true if encoding is null or an empty (also for the next kin of strings: " ")
if ( !StringUtils.isBlank( encoding ) ) {
try {
response.setCharacterEncoding( encoding.trim() );
response.setContentType( "text/html; charset=" + encoding );
} catch ( Exception ex ) {
LogChannel.GENERAL.logError( "Unable to encode data with encoding : '" + encoding + "'", ex );
}
}
this.servletResponse = response;
}
public HttpServletResponse getServletResponse() {
return servletResponse;
}
public void setServletRequest( HttpServletRequest request ) {
this.servletRequest = request;
}
public HttpServletRequest getServletRequest() {
return servletRequest;
}
public List<DelegationListener> getDelegationListeners() {
return delegationListeners;
}
public void setDelegationListeners( List<DelegationListener> delegationListeners ) {
this.delegationListeners = delegationListeners;
}
public void addDelegationListener( DelegationListener delegationListener ) {
delegationListeners.add( delegationListener );
}
public synchronized void doTopologySortOfSteps() {
// The bubble sort algorithm in contrast to the QuickSort or MergeSort
// algorithms
// does indeed cover all possibilities.
// Sorting larger transformations with hundreds of steps might be too slow
// though.
// We should consider caching TransMeta.findPrevious() results in that case.
//
transMeta.clearCaches();
//
// Cocktail sort (bi-directional bubble sort)
//
// Original sort was taking 3ms for 30 steps
// cocktail sort takes about 8ms for the same 30, but it works :)
//
int stepsMinSize = 0;
int stepsSize = steps.size();
// Noticed a problem with an immediate shrinking iteration window
// trapping rows that need to be sorted.
// This threshold buys us some time to get the sorting close before
// starting to decrease the window size.
//
// TODO: this could become much smarter by tracking row movement
// and reacting to that each outer iteration verses
// using a threshold.
//
// After this many iterations enable trimming inner iteration
// window on no change being detected.
//
int windowShrinkThreshold = (int) Math.round( stepsSize * 0.75 );
// give ourselves some room to sort big lists. the window threshold should
// stop us before reaching this anyway.
//
int totalIterations = stepsSize * 2;
boolean isBefore = false;
boolean forwardChange = false;
boolean backwardChange = false;
boolean lastForwardChange = true;
boolean keepSortingForward = true;
StepMetaDataCombi one = null;
StepMetaDataCombi two = null;
for ( int x = 0; x < totalIterations; x++ ) {
// Go forward through the list
//
if ( keepSortingForward ) {
for ( int y = stepsMinSize; y < stepsSize - 1; y++ ) {
one = steps.get( y );
two = steps.get( y + 1 );
if ( one.stepMeta.equals( two.stepMeta ) ) {
isBefore = one.copy > two.copy;
} else {
isBefore = transMeta.findPrevious( one.stepMeta, two.stepMeta );
}
if ( isBefore ) {
// two was found to be positioned BEFORE one so we need to
// switch them...
//
steps.set( y, two );
steps.set( y + 1, one );
forwardChange = true;
}
}
}
// Go backward through the list
//
for ( int z = stepsSize - 1; z > stepsMinSize; z-- ) {
one = steps.get( z );
two = steps.get( z - 1 );
if ( one.stepMeta.equals( two.stepMeta ) ) {
isBefore = one.copy > two.copy;
} else {
isBefore = transMeta.findPrevious( one.stepMeta, two.stepMeta );
}
if ( !isBefore ) {
// two was found NOT to be positioned BEFORE one so we need to
// switch them...
//
steps.set( z, two );
steps.set( z - 1, one );
backwardChange = true;
}
}
// Shrink stepsSize(max) if there was no forward change
//
if ( x > windowShrinkThreshold && !forwardChange ) {
// should we keep going? check the window size
//
stepsSize--;
if ( stepsSize <= stepsMinSize ) {
break;
}
}
// shrink stepsMinSize(min) if there was no backward change
//
if ( x > windowShrinkThreshold && !backwardChange ) {
// should we keep going? check the window size
//
stepsMinSize++;
if ( stepsMinSize >= stepsSize ) {
break;
}
}
// End of both forward and backward traversal.
// Time to see if we should keep going.
//
if ( !forwardChange && !backwardChange ) {
break;
}
//
// if we are past the first iteration and there has been no change twice,
// quit doing it!
//
if ( keepSortingForward && x > 0 && !lastForwardChange && !forwardChange ) {
keepSortingForward = false;
}
lastForwardChange = forwardChange;
forwardChange = false;
backwardChange = false;
} // finished sorting
}
@Override
public Map<String, Object> getExtensionDataMap() {
return extensionDataMap;
}
}
| MikhailHubanau/pentaho-kettle | engine/src/org/pentaho/di/trans/Trans.java | Java | apache-2.0 | 191,825 |
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
eventlet.monkey_patch()
import contextlib
import sys
from oslo.config import cfg
from keystone.openstack.common import log as logging
from keystone.openstack.common import rpc
from keystone.openstack.common.rpc import impl_zmq
CONF = cfg.CONF
CONF.register_opts(rpc.rpc_opts)
CONF.register_opts(impl_zmq.zmq_opts)
def main():
CONF(sys.argv[1:], project='oslo')
logging.setup("oslo")
with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
reactor.consume_in_thread()
reactor.wait()
| derekchiang/keystone | keystone/openstack/common/rpc/zmq_receiver.py | Python | apache-2.0 | 1,154 |
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
package org.deeplearning4j.arbiter.dropout;
import lombok.AllArgsConstructor;
import org.deeplearning4j.arbiter.optimize.api.ParameterSpace;
import org.deeplearning4j.arbiter.optimize.parameter.FixedValue;
import org.deeplearning4j.nn.conf.dropout.GaussianNoise;
import org.deeplearning4j.nn.conf.dropout.IDropout;
import java.util.Collections;
import java.util.List;
import java.util.Map;
@AllArgsConstructor
public class GaussianNoiseSpace implements ParameterSpace<IDropout> {
private ParameterSpace<Double> stddev;
public GaussianNoiseSpace(double stddev){
this(new FixedValue<>(stddev));
}
@Override
public IDropout getValue(double[] parameterValues) {
return new GaussianNoise(stddev.getValue(parameterValues));
}
@Override
public int numParameters() {
return stddev.numParameters();
}
@Override
public List<ParameterSpace> collectLeaves() {
return Collections.<ParameterSpace>singletonList(stddev);
}
@Override
public Map<String, ParameterSpace> getNestedSpaces() {
return Collections.<String,ParameterSpace>singletonMap("stddev", stddev);
}
@Override
public boolean isLeaf() {
return false;
}
@Override
public void setIndices(int... indices) {
stddev.setIndices(indices);
}
}
| deeplearning4j/deeplearning4j | arbiter/arbiter-deeplearning4j/src/main/java/org/deeplearning4j/arbiter/dropout/GaussianNoiseSpace.java | Java | apache-2.0 | 2,098 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.commons.compress.utils;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.ServiceConfigurationError;
import java.util.ServiceLoader;
/**
* Iterates all services for a given class through the standard
* {@link ServiceLoader} mechanism.
*
* @param <E>
* The service to load
* @since 1.13
*/
public class ServiceLoaderIterator<E> implements Iterator<E> {
private E nextServiceLoader;
private final Class<E> service;
private final Iterator<E> serviceLoaderIterator;
public ServiceLoaderIterator(final Class<E> service) {
this(service, ClassLoader.getSystemClassLoader());
}
public ServiceLoaderIterator(final Class<E> service, final ClassLoader classLoader) {
this.service = service;
this.serviceLoaderIterator = ServiceLoader.load(service, classLoader).iterator();
}
@Override
public boolean hasNext() {
while (nextServiceLoader == null) {
try {
if (!serviceLoaderIterator.hasNext()) {
return false;
}
nextServiceLoader = serviceLoaderIterator.next();
} catch (final ServiceConfigurationError e) {
if (e.getCause() instanceof SecurityException) {
// Ignore security exceptions
// TODO Log?
continue;
}
throw e;
}
}
return true;
}
@Override
public E next() {
if (!hasNext()) {
throw new NoSuchElementException("No more elements for service " + service.getName());
}
final E tempNext = nextServiceLoader;
nextServiceLoader = null;
return tempNext;
}
@Override
public void remove() {
throw new UnsupportedOperationException("service=" + service.getName());
}
}
| apache/commons-compress | src/main/java/org/apache/commons/compress/utils/ServiceLoaderIterator.java | Java | apache-2.0 | 2,727 |
package answers.chapter3;
import java.util.stream.IntStream;
public class ForEach06 {
public ForEach06() {
// for文で記述
printEvens();
// IntStream#rangeメソッドでストリームを生成して、使用
printEvensStream1();
// filterメソッドを追加
printEvensStream2();
// メソッド参照を使用
printEvensStream3();
}
private void printEvens() {
for (int i = 0; i < 20; i++) {
if (i % 2 == 0) {
System.out.println(i);
}
}
}
private void printEvensStream1() {
IntStream.range(0, 20)
.forEach(i -> {
if (i % 2 == 0) {
System.out.println(i);
}
});
}
private void printEvensStream2() {
IntStream.range(0, 20)
.filter(i -> i % 2 == 0)
.forEach(i -> System.out.println(i));
}
private void printEvensStream3() {
IntStream.range(0, 20)
.filter(i -> i % 2 == 0)
.forEach(System.out::println);
}
public static void main(String... args) {
new ForEach06();
}
}
| gobjapan/LambdaOkeiko | src/answers/chapter3/ForEach06.java | Java | apache-2.0 | 1,304 |
# frozen_string_literal: true
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
require_relative 'spec_helper'
module Selenium
module WebDriver
describe Element do
it 'should click' do
driver.navigate.to url_for('formPage.html')
expect { driver.find_element(id: 'imageButton').click }.not_to raise_error
reset_driver!(1) if %i[safari safari_preview].include? GlobalTestEnv.browser
end
# Safari returns "click intercepted" error instead of "element click intercepted"
it 'should raise if different element receives click', except: {browser: %i[safari safari_preview]} do
driver.navigate.to url_for('click_tests/overlapping_elements.html')
expect { driver.find_element(id: 'contents').click }.to raise_error(Error::ElementClickInterceptedError)
end
# Safari returns "click intercepted" error instead of "element click intercepted"
it 'should raise if element is partially covered', except: {browser: %i[safari safari_preview]} do
driver.navigate.to url_for('click_tests/overlapping_elements.html')
expect { driver.find_element(id: 'other_contents').click }.to raise_error(Error::ElementClickInterceptedError)
end
it 'should submit' do
driver.navigate.to url_for('formPage.html')
wait_for_element(id: 'submitButton')
expect { driver.find_element(id: 'submitButton').submit }.not_to raise_error
reset_driver!
end
it 'should send string keys' do
driver.navigate.to url_for('formPage.html')
wait_for_element(id: 'working')
expect { driver.find_element(id: 'working').send_keys('foo', 'bar') }.not_to raise_error
end
it 'should send key presses' do
driver.navigate.to url_for('javascriptPage.html')
key_reporter = driver.find_element(id: 'keyReporter')
key_reporter.send_keys('Tet', :arrow_left, 's')
expect(key_reporter.attribute('value')).to eq('Test')
end
# https://github.com/mozilla/geckodriver/issues/245
it 'should send key presses chords', except: {browser: %i[firefox firefox_nightly safari safari_preview]} do
driver.navigate.to url_for('javascriptPage.html')
key_reporter = driver.find_element(id: 'keyReporter')
key_reporter.send_keys([:shift, 'h'], 'ello')
expect(key_reporter.attribute('value')).to eq('Hello')
end
it 'should handle file uploads' do
driver.navigate.to url_for('formPage.html')
element = driver.find_element(id: 'upload')
expect(element.attribute('value')).to be_empty
path = WebDriver::Platform.windows? ? WebDriver::Platform.windows_path(__FILE__) : __FILE__
element.send_keys path
expect(element.attribute('value')).to include(File.basename(path))
end
describe 'properties and attributes' do
before { driver.navigate.to url_for('formPage.html') }
context 'string type' do
let(:element) { driver.find_element(id: 'checky') }
let(:prop_or_attr) { 'type' }
it '#dom_attribute returns attribute value' do
expect(element.dom_attribute(prop_or_attr)).to eq 'checkbox'
end
it '#property returns property value' do
expect(element.property(prop_or_attr)).to eq 'checkbox'
end
it '#attribute returns value' do
expect(element.attribute(prop_or_attr)).to eq 'checkbox'
end
end
context 'numeric type' do
let(:element) { driver.find_element(id: 'withText') }
let(:prop_or_attr) { 'rows' }
it '#dom_attribute String' do
expect(element.dom_attribute(prop_or_attr)).to eq '5'
end
it '#property returns Number' do
expect(element.property(prop_or_attr)).to eq 5
end
it '#attribute returns String' do
expect(element.attribute(prop_or_attr)).to eq '5'
end
end
context 'boolean type of true' do
let(:element) { driver.find_element(id: 'checkedchecky') }
let(:prop_or_attr) { 'checked' }
it '#dom_attribute returns String', except: {browser: :safari} do
expect(element.dom_attribute(prop_or_attr)).to eq 'true'
end
it '#property returns true' do
expect(element.property(prop_or_attr)).to eq true
end
it '#attribute returns String' do
expect(element.attribute(prop_or_attr)).to eq 'true'
end
it '#dom_attribute does not update after click', except: {browser: :safari} do
element.click
expect(element.dom_attribute(prop_or_attr)).to eq 'true'
end
it '#property updates to false after click' do
element.click
expect(element.property(prop_or_attr)).to eq false
end
it '#attribute updates to nil after click' do
element.click
expect(element.attribute(prop_or_attr)).to eq nil
end
end
context 'boolean type of false' do
let(:element) { driver.find_element(id: 'checky') }
let(:prop_or_attr) { 'checked' }
it '#dom_attribute returns nil' do
expect(element.dom_attribute(prop_or_attr)).to be_nil
end
it '#property returns false' do
expect(element.property(prop_or_attr)).to eq false
end
it '#attribute returns nil' do
expect(element.attribute(prop_or_attr)).to be_nil
end
it '#dom_attribute does not update after click' do
element.click
expect(element.dom_attribute(prop_or_attr)).to eq nil
end
it '#property updates to true after click' do
element.click
expect(element.property(prop_or_attr)).to eq true
end
it '#attribute updates to String after click' do
element.click
expect(element.attribute(prop_or_attr)).to eq 'true'
end
end
context 'property exists but attribute does not' do
let(:element) { driver.find_element(id: 'withText') }
let(:prop_or_attr) { 'value' }
it '#dom_attribute returns nil' do
expect(element.dom_attribute(prop_or_attr)).to be_nil
end
it '#property returns default property' do
expect(element.property(prop_or_attr)).to eq 'Example text'
end
it '#attribute returns default property' do
expect(element.attribute(prop_or_attr)).to eq 'Example text'
end
it '#property returns updated property' do
element.clear
expect(element.property(prop_or_attr)).to be_empty
end
it '#attribute returns updated property' do
element.clear
expect(element.attribute(prop_or_attr)).to be_empty
end
end
context 'attribute exists but property does not' do
let(:element) { driver.find_element(id: 'vsearchGadget') }
let(:prop_or_attr) { 'accesskey' }
it '#dom_attribute returns attribute' do
expect(element.dom_attribute(prop_or_attr)).to eq '4'
end
it '#property returns nil' do
expect(element.property(prop_or_attr)).to be_nil
end
it '#attribute returns attribute' do
expect(element.attribute(prop_or_attr)).to eq '4'
end
end
context 'neither attribute nor property exists' do
let(:element) { driver.find_element(id: 'checky') }
let(:prop_or_attr) { 'nonexistent' }
it '#dom_attribute returns nil' do
expect(element.dom_attribute(prop_or_attr)).to be_nil
end
it '#property returns nil' do
expect(element.property(prop_or_attr)).to be_nil
end
it '#attribute returns nil' do
expect(element.attribute(prop_or_attr)).to be_nil
end
end
context 'style' do
before { driver.navigate.to url_for('clickEventPage.html') }
let(:element) { driver.find_element(id: 'result') }
let(:prop_or_attr) { 'style' }
it '#dom_attribute attribute with no formatting' do
expect(element.dom_attribute(prop_or_attr)).to eq 'width:300;height:60'
end
# TODO: This might not be correct behavior
it '#property returns object',
except: [{browser: :firefox,
reason: 'https://github.com/mozilla/geckodriver/issues/1846'},
{browser: :safari}] do
expect(element.property(prop_or_attr)).to eq %w[width height]
end
it '#attribute returns attribute with formatting' do
expect(element.attribute(prop_or_attr)).to eq 'width: 300px; height: 60px;'
end
end
context 'incorrect casing' do
let(:element) { driver.find_element(id: 'checky') }
let(:prop_or_attr) { 'nAme' }
it '#dom_attribute returns correctly cased attribute' do
expect(element.dom_attribute(prop_or_attr)).to eq 'checky'
end
it '#property returns nil' do
expect(element.property(prop_or_attr)).to be_nil
end
it '#attribute returns correctly cased attribute' do
expect(element.attribute(prop_or_attr)).to eq 'checky'
end
end
context 'property attribute case difference with attribute casing' do
let(:element) { driver.find_element(name: 'readonly') }
let(:prop_or_attr) { 'readonly' }
it '#dom_attribute returns a String', except: {browser: :safari} do
expect(element.dom_attribute(prop_or_attr)).to eq 'true'
end
it '#property returns nil' do
expect(element.property(prop_or_attr)).to be_nil
end
it '#attribute returns a String' do
expect(element.attribute(prop_or_attr)).to eq 'true'
end
end
context 'property attribute case difference with property casing' do
let(:element) { driver.find_element(name: 'readonly') }
let(:prop_or_attr) { 'readOnly' }
it '#dom_attribute returns a String',
except: [{browser: :firefox,
reason: 'https://github.com/mozilla/geckodriver/issues/1850'},
{browser: :safari}] do
expect(element.dom_attribute(prop_or_attr)).to eq 'true'
end
it '#property returns property as true' do
expect(element.property(prop_or_attr)).to eq true
end
it '#attribute returns property as String' do
expect(element.attribute(prop_or_attr)).to eq 'true'
end
end
context 'property attribute name difference with attribute naming' do
let(:element) { driver.find_element(id: 'wallace') }
let(:prop_or_attr) { 'class' }
it '#dom_attribute returns attribute value' do
expect(element.dom_attribute(prop_or_attr)).to eq 'gromit'
end
it '#property returns nil' do
expect(element.property(prop_or_attr)).to be_nil
end
it '#attribute returns attribute value' do
expect(element.attribute(prop_or_attr)).to eq 'gromit'
end
end
context 'property attribute name difference with property naming' do
let(:element) { driver.find_element(id: 'wallace') }
let(:prop_or_attr) { 'className' }
it '#dom_attribute returns nil' do
expect(element.dom_attribute(prop_or_attr)).to be_nil
end
it '#property returns property value' do
expect(element.property(prop_or_attr)).to eq 'gromit'
end
it '#attribute returns property value' do
expect(element.attribute(prop_or_attr)).to eq 'gromit'
end
end
context 'property attribute value difference' do
let(:element) { driver.find_element(tag_name: 'form') }
let(:prop_or_attr) { 'action' }
it '#dom_attribute returns attribute value' do
expect(element.dom_attribute(prop_or_attr)).to eq 'resultPage.html'
end
it '#property returns property value' do
expect(element.property(prop_or_attr)).to match(%r{http://(.+)/resultPage\.html})
end
it '#attribute returns property value' do
expect(element.attribute(prop_or_attr)).to match(%r{http://(.+)/resultPage\.html})
end
end
end
it 'returns ARIA role', only: {browser: %i[chrome edge]} do
driver.navigate.to "data:text/html," \
"<div role='heading' aria-level='1'>Level 1 Header</div>" \
"<h1>Level 1 Header</h1>" \
"<h2 role='alert'>Level 2 Header</h2>"
expect(driver.find_element(tag_name: 'div').aria_role).to eq('heading')
expect(driver.find_element(tag_name: 'h1').aria_role).to eq('heading')
expect(driver.find_element(tag_name: 'h2').aria_role).to eq('alert')
end
it 'returns accessible name', only: {browser: %i[chrome edge]} do
driver.navigate.to "data:text/html,<h1>Level 1 Header</h1>"
expect(driver.find_element(tag_name: 'h1').accessible_name).to eq('Level 1 Header')
end
it 'should clear' do
driver.navigate.to url_for('formPage.html')
expect { driver.find_element(id: 'withText').clear }.not_to raise_error
end
it 'should get and set selected' do
driver.navigate.to url_for('formPage.html')
cheese = driver.find_element(id: 'cheese')
peas = driver.find_element(id: 'peas')
cheese.click
expect(cheese).to be_selected
expect(peas).not_to be_selected
peas.click
expect(peas).to be_selected
expect(cheese).not_to be_selected
end
it 'should get enabled' do
driver.navigate.to url_for('formPage.html')
expect(driver.find_element(id: 'notWorking')).not_to be_enabled
end
it 'should get text' do
driver.navigate.to url_for('xhtmlTest.html')
expect(driver.find_element(class: 'header').text).to eq('XHTML Might Be The Future')
end
it 'should get displayed' do
driver.navigate.to url_for('xhtmlTest.html')
expect(driver.find_element(class: 'header')).to be_displayed
end
context 'size and location' do
it 'should get current location' do
driver.navigate.to url_for('xhtmlTest.html')
loc = driver.find_element(class: 'header').location
expect(loc.x).to be >= 1
expect(loc.y).to be >= 1
end
it 'should get location once scrolled into view' do
driver.navigate.to url_for('javascriptPage.html')
loc = driver.find_element(id: 'keyUp').location_once_scrolled_into_view
expect(loc.x).to be >= 1
expect(loc.y).to be >= 0 # can be 0 if scrolled to the top
end
it 'should get size' do
driver.navigate.to url_for('xhtmlTest.html')
size = driver.find_element(class: 'header').size
expect(size.width).to be_positive
expect(size.height).to be_positive
end
it 'should get rect' do
driver.navigate.to url_for('xhtmlTest.html')
rect = driver.find_element(class: 'header').rect
expect(rect.x).to be_positive
expect(rect.y).to be_positive
expect(rect.width).to be_positive
expect(rect.height).to be_positive
end
end
# IE - https://github.com/SeleniumHQ/selenium/pull/4043
it 'should drag and drop', except: {browser: :ie} do
driver.navigate.to url_for('dragAndDropTest.html')
img1 = driver.find_element(id: 'test1')
img2 = driver.find_element(id: 'test2')
driver.action.drag_and_drop_by(img1, 100, 100)
.drag_and_drop(img2, img1)
.perform
expect(img1.location).to eq(img2.location)
end
it 'should get css property' do
driver.navigate.to url_for('javascriptPage.html')
element = driver.find_element(id: 'green-parent')
style1 = element.css_value('background-color')
style2 = element.style('background-color') # backwards compatibility
acceptable = ['rgb(0, 128, 0)', '#008000', 'rgba(0,128,0,1)', 'rgba(0, 128, 0, 1)']
expect(acceptable).to include(style1, style2)
end
it 'should know when two elements are equal' do
driver.navigate.to url_for('simpleTest.html')
body = driver.find_element(tag_name: 'body')
xbody = driver.find_element(xpath: '//body')
jsbody = driver.execute_script('return document.getElementsByTagName("body")[0]')
expect(body).to eq(xbody)
expect(body).to eq(jsbody)
expect(body).to eql(xbody)
expect(body).to eql(jsbody)
end
it 'should know when element arrays are equal' do
driver.navigate.to url_for('simpleTest.html')
tags = driver.find_elements(tag_name: 'div')
jstags = driver.execute_script('return document.getElementsByTagName("div")')
expect(tags).to eq(jstags)
end
it 'should know when two elements are not equal' do
driver.navigate.to url_for('simpleTest.html')
elements = driver.find_elements(tag_name: 'p')
p1 = elements.fetch(0)
p2 = elements.fetch(1)
expect(p1).not_to eq(p2)
expect(p1).not_to eql(p2)
end
it 'should return the same #hash for equal elements when found by Driver#find_element' do
driver.navigate.to url_for('simpleTest.html')
body = driver.find_element(tag_name: 'body')
xbody = driver.find_element(xpath: '//body')
expect(body.hash).to eq(xbody.hash)
end
it 'should return the same #hash for equal elements when found by Driver#find_elements' do
driver.navigate.to url_for('simpleTest.html')
body = driver.find_elements(tag_name: 'body').fetch(0)
xbody = driver.find_elements(xpath: '//body').fetch(0)
expect(body.hash).to eq(xbody.hash)
end
end
end # WebDriver
end # Selenium
| SeleniumHQ/selenium | rb/spec/integration/selenium/webdriver/element_spec.rb | Ruby | apache-2.0 | 19,292 |
"""The Hunter Douglas PowerView integration."""
import asyncio
from datetime import timedelta
import logging
from aiopvapi.helpers.aiorequest import AioRequest
from aiopvapi.helpers.constants import ATTR_ID
from aiopvapi.helpers.tools import base64_to_unicode
from aiopvapi.rooms import Rooms
from aiopvapi.scenes import Scenes
from aiopvapi.shades import Shades
from aiopvapi.userdata import UserData
import async_timeout
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
COORDINATOR,
DEVICE_FIRMWARE,
DEVICE_INFO,
DEVICE_MAC_ADDRESS,
DEVICE_MODEL,
DEVICE_NAME,
DEVICE_REVISION,
DEVICE_SERIAL_NUMBER,
DOMAIN,
FIRMWARE_BUILD,
FIRMWARE_IN_USERDATA,
FIRMWARE_SUB_REVISION,
HUB_EXCEPTIONS,
HUB_NAME,
LEGACY_DEVICE_BUILD,
LEGACY_DEVICE_MODEL,
LEGACY_DEVICE_REVISION,
LEGACY_DEVICE_SUB_REVISION,
MAC_ADDRESS_IN_USERDATA,
MAINPROCESSOR_IN_USERDATA_FIRMWARE,
MODEL_IN_MAINPROCESSOR,
PV_API,
PV_ROOM_DATA,
PV_SCENE_DATA,
PV_SHADE_DATA,
PV_SHADES,
REVISION_IN_MAINPROCESSOR,
ROOM_DATA,
SCENE_DATA,
SERIAL_NUMBER_IN_USERDATA,
SHADE_DATA,
USER_DATA,
)
PARALLEL_UPDATES = 1
CONFIG_SCHEMA = cv.deprecated(DOMAIN)
PLATFORMS = ["cover", "scene", "sensor"]
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, hass_config: dict):
"""Set up the Hunter Douglas PowerView component."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Hunter Douglas PowerView from a config entry."""
config = entry.data
hub_address = config.get(CONF_HOST)
websession = async_get_clientsession(hass)
pv_request = AioRequest(hub_address, loop=hass.loop, websession=websession)
try:
async with async_timeout.timeout(10):
device_info = await async_get_device_info(pv_request)
async with async_timeout.timeout(10):
rooms = Rooms(pv_request)
room_data = _async_map_data_by_id((await rooms.get_resources())[ROOM_DATA])
async with async_timeout.timeout(10):
scenes = Scenes(pv_request)
scene_data = _async_map_data_by_id(
(await scenes.get_resources())[SCENE_DATA]
)
async with async_timeout.timeout(10):
shades = Shades(pv_request)
shade_data = _async_map_data_by_id(
(await shades.get_resources())[SHADE_DATA]
)
except HUB_EXCEPTIONS as err:
_LOGGER.error("Connection error to PowerView hub: %s", hub_address)
raise ConfigEntryNotReady from err
if not device_info:
_LOGGER.error("Unable to initialize PowerView hub: %s", hub_address)
raise ConfigEntryNotReady
async def async_update_data():
"""Fetch data from shade endpoint."""
async with async_timeout.timeout(10):
shade_entries = await shades.get_resources()
if not shade_entries:
raise UpdateFailed("Failed to fetch new shade data.")
return _async_map_data_by_id(shade_entries[SHADE_DATA])
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="powerview hub",
update_method=async_update_data,
update_interval=timedelta(seconds=60),
)
hass.data[DOMAIN][entry.entry_id] = {
PV_API: pv_request,
PV_ROOM_DATA: room_data,
PV_SCENE_DATA: scene_data,
PV_SHADES: shades,
PV_SHADE_DATA: shade_data,
COORDINATOR: coordinator,
DEVICE_INFO: device_info,
}
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_get_device_info(pv_request):
"""Determine device info."""
userdata = UserData(pv_request)
resources = await userdata.get_resources()
userdata_data = resources[USER_DATA]
if FIRMWARE_IN_USERDATA in userdata_data:
main_processor_info = userdata_data[FIRMWARE_IN_USERDATA][
MAINPROCESSOR_IN_USERDATA_FIRMWARE
]
else:
# Legacy devices
main_processor_info = {
REVISION_IN_MAINPROCESSOR: LEGACY_DEVICE_REVISION,
FIRMWARE_SUB_REVISION: LEGACY_DEVICE_SUB_REVISION,
FIRMWARE_BUILD: LEGACY_DEVICE_BUILD,
MODEL_IN_MAINPROCESSOR: LEGACY_DEVICE_MODEL,
}
return {
DEVICE_NAME: base64_to_unicode(userdata_data[HUB_NAME]),
DEVICE_MAC_ADDRESS: userdata_data[MAC_ADDRESS_IN_USERDATA],
DEVICE_SERIAL_NUMBER: userdata_data[SERIAL_NUMBER_IN_USERDATA],
DEVICE_REVISION: main_processor_info[REVISION_IN_MAINPROCESSOR],
DEVICE_FIRMWARE: main_processor_info,
DEVICE_MODEL: main_processor_info[MODEL_IN_MAINPROCESSOR],
}
@callback
def _async_map_data_by_id(data):
"""Return a dict with the key being the id for a list of entries."""
return {entry[ATTR_ID]: entry for entry in data}
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| turbokongen/home-assistant | homeassistant/components/hunterdouglas_powerview/__init__.py | Python | apache-2.0 | 5,873 |
var AddressInSeattleView = function (answerService) {
this.initialize = function () {
// Define a div wrapper for the view (used to attach events)
this.$el = $('<div/>');
var queryAddress = function(evt) {
var geocodeDeferred = $.Deferred();
var geocoder = new google.maps.Geocoder();
geocoder.geocode({ address: $("#employer-address").val() },function(results, status) {
if(results.length == 0) {
geocodeDeferred.reject("Error geocoding");
} else {
geocodeDeferred.resolve(results);
}
});
var loadCityLimitsDeferred = $.Deferred();
$.ajax({
dataType: "json",
url: "data/city-limits.json",
success: function(cityLimits) {
loadCityLimitsDeferred.resolve(cityLimits);
},
error: function(response, status, errorThrown) {
loadCityLimitsDeferred.reject("Error loading city limits");
}
});
var onGeocodeAndLoad = function(results, cityLimits) {
var ww = Wherewolf();
ww.add("Seattle", cityLimits);
var lngLat, inSeattle;
//For each geocoder result
for (var i = 0; i < results.length; i++) {
lngLat = {
lng: results[0].geometry.location.lng(),
lat: results[0].geometry.location.lat()
};
inSeattle = ww.find(lngLat,{
layer:"Seattle",
wholeFeature: true
});
//If it's a match, stop
if (inSeattle) {
answerService.saveAnswer("work-seattle","yes");
var resultDiv = $(this.$el.find(".result")).html("In Seattle");
var continueButton = $(this.$el.find("a.btn"));
continueButton.attr("href","#question/number-employees");
continueButton.removeClass("hidden");
return;
}
}
answerService.saveAnswer("work-seattle","no");
var resultDiv = $(this.$el.find(".result")).html("Not In Seattle");
var continueButton = $(this.$el.find("a.btn"));
continueButton.attr("href","#results");
continueButton.removeClass("hidden");
}
var onFailedGeocodeOrLoad = function(err1, err2) {
$(this.$el.find(".result")).html("Unable to Determine");
};
$.when(geocodeDeferred, loadCityLimitsDeferred).done(onGeocodeAndLoad.bind(this)).fail( onFailedGeocodeOrLoad.bind(this));
};
this.$el.on("click",".query", queryAddress.bind(this));
this.render();
};
this.render = function() {
this.$el.html(this.template());
return this;
};
this.initialize();
}
| working-wa/whats-my-wage-app | www/js/AddressInSeattleView.js | JavaScript | apache-2.0 | 2,780 |
using System.Collections.Generic;
using System.Runtime.CompilerServices;
namespace System.Html {
[IgnoreNamespace, Imported(ObeysTypeSystem = true)]
public partial class DOMRectList {
internal DOMRectList() {
}
[IndexerName("__Item"), IntrinsicProperty]
public DOMRect this[int index] {
get {
return default(DOMRect);
}
}
[EnumerateAsArray, InlineCode("new {$System.ArrayEnumerator}({this})")]
public IEnumerator<DOMRect> GetEnumerator() {
return null;
}
public DOMRect Item(int index) {
return default(DOMRect);
}
[IntrinsicProperty]
public int Length {
get {
return 0;
}
}
}
}
| n9/SaltarelleWeb | Web/Generated/Html/DOMRectList.cs | C# | apache-2.0 | 642 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import itertools
import json
import re
from itertools import imap
from operator import itemgetter
from django.utils.translation import ugettext as _
from desktop.lib import thrift_util
from desktop.conf import DEFAULT_USER
from hadoop import cluster
from TCLIService import TCLIService
from TCLIService.ttypes import TOpenSessionReq, TGetTablesReq, TFetchResultsReq,\
TStatusCode, TGetResultSetMetadataReq, TGetColumnsReq, TTypeId,\
TExecuteStatementReq, TGetOperationStatusReq, TFetchOrientation,\
TCloseSessionReq, TGetSchemasReq, TGetLogReq, TCancelOperationReq,\
TCloseOperationReq, TFetchResultsResp, TRowSet, TProtocolVersion
from beeswax import conf as beeswax_conf
from beeswax import hive_site
from beeswax.hive_site import hiveserver2_use_ssl
from beeswax.models import Session, HiveServerQueryHandle, HiveServerQueryHistory
from beeswax.server.dbms import Table, NoSuchObjectException, DataTable,\
QueryServerException
LOG = logging.getLogger(__name__)
IMPALA_RESULTSET_CACHE_SIZE = 'impala.resultset.cache.size'
DEFAULT_USER = DEFAULT_USER.get()
class HiveServerTable(Table):
"""
We get the table details from a DESCRIBE FORMATTED.
"""
def __init__(self, table_results, table_schema, desc_results, desc_schema):
if beeswax_conf.THRIFT_VERSION.get() >= 7:
if not table_results.columns:
raise NoSuchObjectException()
self.table = table_results.columns
else: # Deprecated. To remove in Hue 4.
if not table_results.rows:
raise NoSuchObjectException()
self.table = table_results.rows and table_results.rows[0] or ''
self.table_schema = table_schema
self.desc_results = desc_results
self.desc_schema = desc_schema
self.describe = HiveServerTTableSchema(self.desc_results, self.desc_schema).cols()
@property
def name(self):
return HiveServerTRow(self.table, self.table_schema).col('TABLE_NAME')
@property
def is_view(self):
return HiveServerTRow(self.table, self.table_schema).col('TABLE_TYPE') == 'VIEW'
@property
def partition_keys(self):
try:
return [PartitionKeyCompatible(row['col_name'], row['data_type'], row['comment']) for row in self._get_partition_column()]
except:
LOG.exception('failed to get partition keys')
return []
@property
def path_location(self):
try:
rows = self.describe
rows = [row for row in rows if row['col_name'].startswith('Location:')]
if rows:
return rows[0]['data_type']
except:
LOG.exception('failed to get path location')
return None
@property
def cols(self):
rows = self.describe
col_row_index = 2
try:
end_cols_index = map(itemgetter('col_name'), rows[col_row_index:]).index('')
return rows[col_row_index:][:end_cols_index] + self._get_partition_column()
except ValueError: # DESCRIBE on columns and nested columns does not contain add'l rows beyond cols
return rows[col_row_index:]
except:
# Impala does not have it
return rows
def _get_partition_column(self):
rows = self.describe
try:
col_row_index = map(itemgetter('col_name'), rows).index('# Partition Information') + 3
end_cols_index = map(itemgetter('col_name'), rows[col_row_index:]).index('')
return rows[col_row_index:][:end_cols_index]
except:
# Impala does not have it
return []
@property
def comment(self):
return HiveServerTRow(self.table, self.table_schema).col('REMARKS')
@property
def properties(self):
rows = self.describe
col_row_index = 2
end_cols_index = map(itemgetter('col_name'), rows[col_row_index:]).index('')
return [{
'col_name': prop['col_name'].strip() if prop['col_name'] else prop['col_name'],
'data_type': prop['data_type'].strip() if prop['data_type'] else prop['data_type'],
'comment': prop['comment'].strip() if prop['comment'] else prop['comment']
} for prop in rows[col_row_index + end_cols_index + 1:]
]
@property
def stats(self):
rows = self.properties
col_row_index = map(itemgetter('col_name'), rows).index('Table Parameters:') + 1
end_cols_index = map(itemgetter('data_type'), rows[col_row_index:]).index(None)
return rows[col_row_index:][:end_cols_index]
@property
def has_complex(self):
has_complex = False
complex_types = ["struct", "array", "map", "uniontype"]
patterns = [re.compile(typ) for typ in complex_types]
for column in self.cols:
if isinstance(column, dict) and 'data_type' in column:
column_type = column['data_type']
else: # Col object
column_type = column.type
if column_type and any(p.match(column_type.lower()) for p in patterns):
has_complex = True
break
return has_complex
class HiveServerTRowSet2:
def __init__(self, row_set, schema):
self.row_set = row_set
self.rows = row_set.rows
self.schema = schema
self.startRowOffset = row_set.startRowOffset
def is_empty(self):
return not self.row_set.columns or not HiveServerTColumnValue2(self.row_set.columns[0]).val
def cols(self, col_names):
cols_rows = []
rs = HiveServerTRow2(self.row_set.columns, self.schema)
cols = [rs.full_col(name) for name in col_names]
for cols_row in itertools.izip(*cols):
cols_rows.append(dict(itertools.izip(col_names, cols_row)))
return cols_rows
def __iter__(self):
return self
def next(self):
if self.row_set.columns:
return HiveServerTRow2(self.row_set.columns, self.schema)
else:
raise StopIteration
class HiveServerTRow2:
def __init__(self, cols, schema):
self.cols = cols
self.schema = schema
def col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnValue2(self.cols[pos]).val[0] # Return only first element
def full_col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnValue2(self.cols[pos]).val # Return the full column and its values
def _get_col_position(self, column_name):
return filter(lambda (i, col): col.columnName == column_name, enumerate(self.schema.columns))[0][0]
def fields(self):
try:
return [HiveServerTColumnValue2(field).val.pop(0) for field in self.cols]
except IndexError:
raise StopIteration
class HiveServerTColumnValue2:
def __init__(self, tcolumn_value):
self.column_value = tcolumn_value
@property
def val(self):
# Could directly get index from schema but would need to cache the schema
if self.column_value.stringVal:
return self._get_val(self.column_value.stringVal)
elif self.column_value.i16Val is not None:
return self._get_val(self.column_value.i16Val)
elif self.column_value.i32Val is not None:
return self._get_val(self.column_value.i32Val)
elif self.column_value.i64Val is not None:
return self._get_val(self.column_value.i64Val)
elif self.column_value.doubleVal is not None:
return self._get_val(self.column_value.doubleVal)
elif self.column_value.boolVal is not None:
return self._get_val(self.column_value.boolVal)
elif self.column_value.byteVal is not None:
return self._get_val(self.column_value.byteVal)
elif self.column_value.binaryVal is not None:
return self._get_val(self.column_value.binaryVal)
@classmethod
def _get_val(cls, column):
column.values = cls.set_nulls(column.values, column.nulls)
column.nulls = '' # Clear the null values for not re-marking again the column with nulls at the next call
return column.values
@classmethod
def mark_nulls(cls, values, bytestring):
mask = bytearray(bytestring)
for n in mask:
yield n & 0x01
yield n & 0x02
yield n & 0x04
yield n & 0x08
yield n & 0x10
yield n & 0x20
yield n & 0x40
yield n & 0x80
@classmethod
def set_nulls(cls, values, bytestring):
if bytestring == '' or re.match('^(\x00)+$', bytestring): # HS2 has just \x00 or '', Impala can have \x00\x00...
return values
else:
_values = [None if is_null else value for value, is_null in itertools.izip(values, cls.mark_nulls(values, bytestring))]
if len(values) != len(_values): # HS2 can have just \x00\x01 instead of \x00\x01\x00...
_values.extend(values[len(_values):])
return _values
class HiveServerDataTable(DataTable):
def __init__(self, results, schema, operation_handle, query_server):
self.schema = schema and schema.schema
self.row_set = HiveServerTRowSet(results.results, schema)
self.operation_handle = operation_handle
if query_server['server_name'] == 'impala':
self.has_more = results.hasMoreRows
else:
self.has_more = not self.row_set.is_empty() # Should be results.hasMoreRows but always True in HS2
self.startRowOffset = self.row_set.startRowOffset # Always 0 in HS2
@property
def ready(self):
return True
def cols(self):
if self.schema:
return [HiveServerTColumnDesc(col) for col in self.schema.columns]
else:
return []
def rows(self):
for row in self.row_set:
yield row.fields()
class HiveServerTTableSchema:
def __init__(self, columns, schema):
self.columns = columns
self.schema = schema
def cols(self):
try:
return HiveServerTRowSet(self.columns, self.schema).cols(('col_name', 'data_type', 'comment'))
except:
# Impala API is different
cols = HiveServerTRowSet(self.columns, self.schema).cols(('name', 'type', 'comment'))
for col in cols:
col['col_name'] = col.pop('name')
col['data_type'] = col.pop('type')
return cols
def col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnDesc(self.columns[pos]).val
def _get_col_position(self, column_name):
return filter(lambda (i, col): col.columnName == column_name, enumerate(self.schema.columns))[0][0]
if beeswax_conf.THRIFT_VERSION.get() >= 7:
HiveServerTRow = HiveServerTRow2
HiveServerTRowSet = HiveServerTRowSet2
else:
# Deprecated. To remove in Hue 4.
class HiveServerTRow:
def __init__(self, row, schema):
self.row = row
self.schema = schema
def col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnValue(self.row.colVals[pos]).val
def _get_col_position(self, column_name):
return filter(lambda (i, col): col.columnName == column_name, enumerate(self.schema.columns))[0][0]
def fields(self):
return [HiveServerTColumnValue(field).val for field in self.row.colVals]
class HiveServerTRowSet:
def __init__(self, row_set, schema):
self.row_set = row_set
self.rows = row_set.rows
self.schema = schema
self.startRowOffset = row_set.startRowOffset
def is_empty(self):
return len(self.rows) == 0
def cols(self, col_names):
cols_rows = []
for row in self.rows:
row = HiveServerTRow(row, self.schema)
cols = {}
for col_name in col_names:
cols[col_name] = row.col(col_name)
cols_rows.append(cols)
return cols_rows
def __iter__(self):
return self
def next(self):
if self.rows:
return HiveServerTRow(self.rows.pop(0), self.schema)
else:
raise StopIteration
class HiveServerTColumnValue:
def __init__(self, tcolumn_value):
self.column_value = tcolumn_value
@property
def val(self):
if self.column_value.boolVal is not None:
return self.column_value.boolVal.value
elif self.column_value.byteVal is not None:
return self.column_value.byteVal.value
elif self.column_value.i16Val is not None:
return self.column_value.i16Val.value
elif self.column_value.i32Val is not None:
return self.column_value.i32Val.value
elif self.column_value.i64Val is not None:
return self.column_value.i64Val.value
elif self.column_value.doubleVal is not None:
return self.column_value.doubleVal.value
elif self.column_value.stringVal is not None:
return self.column_value.stringVal.value
class HiveServerTColumnDesc:
def __init__(self, column):
self.column = column
@property
def name(self):
return self.column.columnName
@property
def comment(self):
return self.column.comment
@property
def type(self):
return self.get_type(self.column.typeDesc)
@classmethod
def get_type(self, typeDesc):
for ttype in typeDesc.types:
if ttype.primitiveEntry is not None:
return TTypeId._VALUES_TO_NAMES[ttype.primitiveEntry.type]
elif ttype.mapEntry is not None:
return ttype.mapEntry
elif ttype.unionEntry is not None:
return ttype.unionEntry
elif ttype.arrayEntry is not None:
return ttype.arrayEntry
elif ttype.structEntry is not None:
return ttype.structEntry
elif ttype.userDefinedTypeEntry is not None:
return ttype.userDefinedTypeEntry
class HiveServerClient:
HS2_MECHANISMS = {
'KERBEROS': 'GSSAPI',
'NONE': 'PLAIN',
'NOSASL': 'NOSASL',
'LDAP': 'PLAIN',
'PAM': 'PLAIN'
}
def __init__(self, query_server, user):
self.query_server = query_server
self.user = user
use_sasl, mechanism, kerberos_principal_short_name, impersonation_enabled, auth_username, auth_password = self.get_security()
LOG.info('use_sasl=%s, mechanism=%s, kerberos_principal_short_name=%s, impersonation_enabled=%s, auth_username=%s' % (
use_sasl, mechanism, kerberos_principal_short_name, impersonation_enabled, auth_username))
self.use_sasl = use_sasl
self.kerberos_principal_short_name = kerberos_principal_short_name
self.impersonation_enabled = impersonation_enabled
if self.query_server['server_name'] == 'impala':
from impala import conf as impala_conf
ssl_enabled = impala_conf.SSL.ENABLED.get()
ca_certs = impala_conf.SSL.CACERTS.get()
keyfile = impala_conf.SSL.KEY.get()
certfile = impala_conf.SSL.CERT.get()
validate = impala_conf.SSL.VALIDATE.get()
timeout = impala_conf.SERVER_CONN_TIMEOUT.get()
else:
ssl_enabled = hiveserver2_use_ssl()
ca_certs = beeswax_conf.SSL.CACERTS.get()
keyfile = beeswax_conf.SSL.KEY.get()
certfile = beeswax_conf.SSL.CERT.get()
validate = beeswax_conf.SSL.VALIDATE.get()
timeout = beeswax_conf.SERVER_CONN_TIMEOUT.get()
if auth_username:
username = auth_username
password = auth_password
else:
username = user.username
password = None
self._client = thrift_util.get_client(TCLIService.Client,
query_server['server_host'],
query_server['server_port'],
service_name=query_server['server_name'],
kerberos_principal=kerberos_principal_short_name,
use_sasl=use_sasl,
mechanism=mechanism,
username=username,
password=password,
timeout_seconds=timeout,
use_ssl=ssl_enabled,
ca_certs=ca_certs,
keyfile=keyfile,
certfile=certfile,
validate=validate,
transport_mode=query_server.get('transport_mode', 'socket'),
http_url=query_server.get('http_url', '')
)
def get_security(self):
principal = self.query_server['principal']
impersonation_enabled = False
auth_username = self.query_server['auth_username'] # Pass-through LDAP/PAM authentication
auth_password = self.query_server['auth_password']
if principal:
kerberos_principal_short_name = principal.split('/', 1)[0]
else:
kerberos_principal_short_name = None
if self.query_server['server_name'] == 'impala':
if auth_password: # Force LDAP/PAM.. auth if auth_password is provided
use_sasl = True
mechanism = HiveServerClient.HS2_MECHANISMS['NONE']
else:
cluster_conf = cluster.get_cluster_conf_for_job_submission()
use_sasl = cluster_conf is not None and cluster_conf.SECURITY_ENABLED.get()
mechanism = HiveServerClient.HS2_MECHANISMS['KERBEROS']
impersonation_enabled = self.query_server['impersonation_enabled']
else:
hive_mechanism = hive_site.get_hiveserver2_authentication()
if hive_mechanism not in HiveServerClient.HS2_MECHANISMS:
raise Exception(_('%s server authentication not supported. Valid are %s.') % (hive_mechanism, HiveServerClient.HS2_MECHANISMS.keys()))
use_sasl = hive_mechanism in ('KERBEROS', 'NONE', 'LDAP', 'PAM')
mechanism = HiveServerClient.HS2_MECHANISMS[hive_mechanism]
impersonation_enabled = hive_site.hiveserver2_impersonation_enabled()
return use_sasl, mechanism, kerberos_principal_short_name, impersonation_enabled, auth_username, auth_password
def open_session(self, user):
kwargs = {
'client_protocol': beeswax_conf.THRIFT_VERSION.get() - 1,
'username': user.username, # If SASL or LDAP, it gets the username from the authentication mechanism" since it dependents on it.
'configuration': {},
}
if self.impersonation_enabled:
kwargs.update({'username': DEFAULT_USER})
if self.query_server['server_name'] == 'impala': # Only when Impala accepts it
kwargs['configuration'].update({'impala.doas.user': user.username})
if self.query_server['server_name'] == 'beeswax': # All the time
kwargs['configuration'].update({'hive.server2.proxy.user': user.username})
if self.query_server['server_name'] == 'sparksql': # All the time
kwargs['configuration'].update({'hive.server2.proxy.user': user.username})
req = TOpenSessionReq(**kwargs)
res = self._client.OpenSession(req)
if res.status is not None and res.status.statusCode not in (TStatusCode.SUCCESS_STATUS,):
if hasattr(res.status, 'errorMessage') and res.status.errorMessage:
message = res.status.errorMessage
else:
message = ''
raise QueryServerException(Exception('Bad status for request %s:\n%s' % (req, res)), message=message)
sessionId = res.sessionHandle.sessionId
LOG.info('Opening session %s' % sessionId)
encoded_status, encoded_guid = HiveServerQueryHandle(secret=sessionId.secret, guid=sessionId.guid).get()
properties = json.dumps(res.configuration)
return Session.objects.create(owner=user,
application=self.query_server['server_name'],
status_code=res.status.statusCode,
secret=encoded_status,
guid=encoded_guid,
server_protocol_version=res.serverProtocolVersion,
properties=properties)
def call(self, fn, req, status=TStatusCode.SUCCESS_STATUS):
session = Session.objects.get_session(self.user, self.query_server['server_name'])
if session is None:
session = self.open_session(self.user)
if hasattr(req, 'sessionHandle') and req.sessionHandle is None:
req.sessionHandle = session.get_handle()
res = fn(req)
# Not supported currently in HS2 and Impala: TStatusCode.INVALID_HANDLE_STATUS
if res.status.statusCode == TStatusCode.ERROR_STATUS and \
re.search('Invalid SessionHandle|Invalid session|Client session expired', res.status.errorMessage or '', re.I):
LOG.info('Retrying with a new session because for %s of %s' % (self.user, res))
session = self.open_session(self.user)
req.sessionHandle = session.get_handle()
# Get back the name of the function to call
res = getattr(self._client, fn.attr)(req)
if status is not None and res.status.statusCode not in (
TStatusCode.SUCCESS_STATUS, TStatusCode.SUCCESS_WITH_INFO_STATUS, TStatusCode.STILL_EXECUTING_STATUS):
if hasattr(res.status, 'errorMessage') and res.status.errorMessage:
message = res.status.errorMessage
else:
message = ''
raise QueryServerException(Exception('Bad status for request %s:\n%s' % (req, res)), message=message)
else:
return res
def close_session(self, sessionHandle):
req = TCloseSessionReq(sessionHandle=sessionHandle)
return self._client.CloseSession(req)
def get_databases(self):
# GetCatalogs() is not implemented in HS2
req = TGetSchemasReq()
res = self.call(self._client.GetSchemas, req)
results, schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=5000)
self.close_operation(res.operationHandle)
col = 'TABLE_SCHEM'
return HiveServerTRowSet(results.results, schema.schema).cols((col,))
def get_database(self, database):
if self.query_server['server_name'] == 'impala':
raise NotImplementedError(_("Impala has not implemented the 'DESCRIBE DATABASE' command: %(issue_ref)s") % {
'issue_ref': "https://issues.cloudera.org/browse/IMPALA-2196"
})
query = 'DESCRIBE DATABASE EXTENDED `%s`' % (database)
(desc_results, desc_schema), operation_handle = self.execute_statement(query, max_rows=5000, orientation=TFetchOrientation.FETCH_NEXT)
self.close_operation(operation_handle)
cols = ('db_name', 'comment', 'location')
if len(HiveServerTRowSet(desc_results.results, desc_schema.schema).cols(cols)) != 1:
raise ValueError(_("%(query)s returned more than 1 row") % {'query': query})
return HiveServerTRowSet(desc_results.results, desc_schema.schema).cols(cols)[0] # Should only contain one row
def get_tables_meta(self, database, table_names):
req = TGetTablesReq(schemaName=database, tableName=table_names)
res = self.call(self._client.GetTables, req)
results, schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=5000)
self.close_operation(res.operationHandle)
cols = ('TABLE_NAME', 'TABLE_TYPE', 'REMARKS')
return HiveServerTRowSet(results.results, schema.schema).cols(cols)
def get_tables(self, database, table_names):
req = TGetTablesReq(schemaName=database, tableName=table_names)
res = self.call(self._client.GetTables, req)
results, schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=5000)
self.close_operation(res.operationHandle)
return HiveServerTRowSet(results.results, schema.schema).cols(('TABLE_NAME',))
def get_table(self, database, table_name, partition_spec=None):
req = TGetTablesReq(schemaName=database, tableName=table_name)
res = self.call(self._client.GetTables, req)
table_results, table_schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT)
self.close_operation(res.operationHandle)
if partition_spec:
query = 'DESCRIBE FORMATTED `%s`.`%s` PARTITION(%s)' % (database, table_name, partition_spec)
else:
query = 'DESCRIBE FORMATTED `%s`.`%s`' % (database, table_name)
(desc_results, desc_schema), operation_handle = self.execute_statement(query, max_rows=5000, orientation=TFetchOrientation.FETCH_NEXT)
self.close_operation(operation_handle)
return HiveServerTable(table_results.results, table_schema.schema, desc_results.results, desc_schema.schema)
def execute_query(self, query, max_rows=1000):
configuration = self._get_query_configuration(query)
return self.execute_query_statement(statement=query.query['query'], max_rows=max_rows, configuration=configuration)
def execute_query_statement(self, statement, max_rows=1000, configuration={}, orientation=TFetchOrientation.FETCH_FIRST):
(results, schema), operation_handle = self.execute_statement(statement=statement, max_rows=max_rows, configuration=configuration, orientation=orientation)
return HiveServerDataTable(results, schema, operation_handle, self.query_server)
def execute_async_query(self, query, statement=0):
if statement == 0:
# Impala just has settings currently
if self.query_server['server_name'] == 'beeswax':
for resource in query.get_configuration_statements():
self.execute_statement(resource.strip())
configuration = {}
if self.query_server['server_name'] == 'impala' and self.query_server['querycache_rows'] > 0:
configuration[IMPALA_RESULTSET_CACHE_SIZE] = str(self.query_server['querycache_rows'])
# The query can override the default configuration
configuration.update(self._get_query_configuration(query))
query_statement = query.get_query_statement(statement)
return self.execute_async_statement(statement=query_statement, confOverlay=configuration)
def execute_statement(self, statement, max_rows=1000, configuration={}, orientation=TFetchOrientation.FETCH_NEXT):
if self.query_server['server_name'] == 'impala' and self.query_server['QUERY_TIMEOUT_S'] > 0:
configuration['QUERY_TIMEOUT_S'] = str(self.query_server['QUERY_TIMEOUT_S'])
req = TExecuteStatementReq(statement=statement.encode('utf-8'), confOverlay=configuration)
res = self.call(self._client.ExecuteStatement, req)
return self.fetch_result(res.operationHandle, max_rows=max_rows, orientation=orientation), res.operationHandle
def execute_async_statement(self, statement, confOverlay):
if self.query_server['server_name'] == 'impala' and self.query_server['QUERY_TIMEOUT_S'] > 0:
confOverlay['QUERY_TIMEOUT_S'] = str(self.query_server['QUERY_TIMEOUT_S'])
req = TExecuteStatementReq(statement=statement.encode('utf-8'), confOverlay=confOverlay, runAsync=True)
res = self.call(self._client.ExecuteStatement, req)
return HiveServerQueryHandle(secret=res.operationHandle.operationId.secret,
guid=res.operationHandle.operationId.guid,
operation_type=res.operationHandle.operationType,
has_result_set=res.operationHandle.hasResultSet,
modified_row_count=res.operationHandle.modifiedRowCount)
def fetch_data(self, operation_handle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=1000):
# Fetch until the result is empty dues to a HS2 bug instead of looking at hasMoreRows
results, schema = self.fetch_result(operation_handle, orientation, max_rows)
return HiveServerDataTable(results, schema, operation_handle, self.query_server)
def cancel_operation(self, operation_handle):
req = TCancelOperationReq(operationHandle=operation_handle)
return self.call(self._client.CancelOperation, req)
def close_operation(self, operation_handle):
req = TCloseOperationReq(operationHandle=operation_handle)
return self.call(self._client.CloseOperation, req)
def get_columns(self, database, table):
req = TGetColumnsReq(schemaName=database, tableName=table)
res = self.call(self._client.GetColumns, req)
res, schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT)
self.close_operation(res.operationHandle)
return res, schema
def fetch_result(self, operation_handle, orientation=TFetchOrientation.FETCH_FIRST, max_rows=1000):
if operation_handle.hasResultSet:
fetch_req = TFetchResultsReq(operationHandle=operation_handle, orientation=orientation, maxRows=max_rows)
res = self.call(self._client.FetchResults, fetch_req)
else:
res = TFetchResultsResp(results=TRowSet(startRowOffset=0, rows=[], columns=[]))
if operation_handle.hasResultSet and TFetchOrientation.FETCH_FIRST: # Only fetch for the first call that should be with start_over
meta_req = TGetResultSetMetadataReq(operationHandle=operation_handle)
schema = self.call(self._client.GetResultSetMetadata, meta_req)
else:
schema = None
return res, schema
def fetch_log(self, operation_handle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=1000):
req = TFetchResultsReq(operationHandle=operation_handle, orientation=orientation, maxRows=max_rows, fetchType=1)
res = self.call(self._client.FetchResults, req)
if beeswax_conf.THRIFT_VERSION.get() >= 7:
lines = res.results.columns[0].stringVal.values
else:
lines = imap(lambda r: r.colVals[0].stringVal.value, res.results.rows)
return '\n'.join(lines)
def get_operation_status(self, operation_handle):
req = TGetOperationStatusReq(operationHandle=operation_handle)
return self.call(self._client.GetOperationStatus, req)
def explain(self, query):
query_statement = query.get_query_statement(0)
configuration = self._get_query_configuration(query)
return self.execute_query_statement(statement='EXPLAIN %s' % query_statement, configuration=configuration, orientation=TFetchOrientation.FETCH_NEXT)
def get_log(self, operation_handle):
try:
req = TGetLogReq(operationHandle=operation_handle)
res = self.call(self._client.GetLog, req)
return res.log
except:
LOG.exception('server does not support GetLog')
return 'Server does not support GetLog()'
def get_partitions(self, database, table_name, partition_spec=None, max_parts=None, reverse_sort=True):
table = self.get_table(database, table_name)
if max_parts is None or max_parts <= 0:
max_rows = 10000
else:
max_rows = 1000 if max_parts <= 250 else max_parts
query = 'SHOW PARTITIONS `%s`.`%s`' % (database, table_name)
if partition_spec:
query += ' PARTITION(%s)' % partition_spec
partition_table = self.execute_query_statement(query, max_rows=max_rows)
partitions = [PartitionValueCompatible(partition, table) for partition in partition_table.rows()]
if reverse_sort:
partitions.reverse()
return partitions[:max_parts]
def _get_query_configuration(self, query):
return dict([(setting['key'], setting['value']) for setting in query.settings])
class HiveServerTableCompatible(HiveServerTable):
"""Same API as Beeswax"""
def __init__(self, hive_table):
self.table = hive_table.table
self.table_schema = hive_table.table_schema
self.desc_results = hive_table.desc_results
self.desc_schema = hive_table.desc_schema
self.describe = HiveServerTTableSchema(self.desc_results, self.desc_schema).cols()
@property
def cols(self):
return [
type('Col', (object,), {
'name': col.get('col_name', '').strip() if col.get('col_name') else '',
'type': col.get('data_type', '').strip() if col.get('data_type') else '',
'comment': col.get('comment', '').strip() if col.get('comment') else ''
}) for col in HiveServerTable.cols.fget(self)
]
class ResultCompatible:
def __init__(self, data_table):
self.data_table = data_table
self.rows = data_table.rows
self.has_more = data_table.has_more
self.start_row = data_table.startRowOffset
self.ready = True
@property
def columns(self):
return self.cols()
def cols(self):
return [col.name for col in self.data_table.cols()]
class PartitionKeyCompatible:
def __init__(self, name, type, comment):
self.name = name
self.type = type
self.comment = comment
def __eq__(self, other):
return isinstance(other, PartitionKeyCompatible) and \
self.name == other.name and \
self.type == other.type and \
self.comment == other.comment
def __repr__(self):
return 'PartitionKey(name:%s, type:%s, comment:%s)' % (self.name, self.type, self.comment)
class PartitionValueCompatible:
def __init__(self, partition_row, table, properties=None):
if properties is None:
properties = {}
# Parses: ['datehour=2013022516'] or ['month=2011-07/dt=2011-07-01/hr=12']
partition = partition_row[0]
parts = partition.split('/')
self.partition_spec = ','.join(["%s='%s'" % (pv[0], pv[1]) for pv in [part.split('=') for part in parts]])
self.values = [pv[1] for pv in [part.split('=') for part in parts]]
self.sd = type('Sd', (object,), properties,)
class ExplainCompatible:
def __init__(self, data_table):
self.textual = '\n'.join([line[0] for line in data_table.rows()])
class ResultMetaCompatible:
def __init__(self):
self.in_tablename = True
class HiveServerClientCompatible(object):
"""Same API as Beeswax"""
def __init__(self, client):
self._client = client
self.user = client.user
self.query_server = client.query_server
def query(self, query, statement=0):
return self._client.execute_async_query(query, statement)
def get_state(self, handle):
operationHandle = handle.get_rpc_handle()
res = self._client.get_operation_status(operationHandle)
return HiveServerQueryHistory.STATE_MAP[res.operationState]
def get_operation_status(self, handle):
operationHandle = handle.get_rpc_handle()
return self._client.get_operation_status(operationHandle)
def use(self, query):
data = self._client.execute_query(query)
self._client.close_operation(data.operation_handle)
return data
def explain(self, query):
data_table = self._client.explain(query)
data = ExplainCompatible(data_table)
self._client.close_operation(data_table.operation_handle)
return data
def fetch(self, handle, start_over=False, max_rows=None):
operationHandle = handle.get_rpc_handle()
if max_rows is None:
max_rows = 1000
if start_over and not (self.query_server['server_name'] == 'impala' and self.query_server['querycache_rows'] == 0): # Backward compatibility for impala
orientation = TFetchOrientation.FETCH_FIRST
else:
orientation = TFetchOrientation.FETCH_NEXT
data_table = self._client.fetch_data(operationHandle, orientation=orientation, max_rows=max_rows)
return ResultCompatible(data_table)
def cancel_operation(self, handle):
operationHandle = handle.get_rpc_handle()
return self._client.cancel_operation(operationHandle)
def close(self, handle):
return self.close_operation(handle)
def close_operation(self, handle):
operationHandle = handle.get_rpc_handle()
return self._client.close_operation(operationHandle)
def close_session(self, session):
operationHandle = session.get_handle()
return self._client.close_session(operationHandle)
def dump_config(self):
return 'Does not exist in HS2'
def get_log(self, handle, start_over=True):
operationHandle = handle.get_rpc_handle()
if beeswax_conf.USE_GET_LOG_API.get() or self.query_server['server_name'] == 'impala':
return self._client.get_log(operationHandle)
else:
if start_over:
orientation = TFetchOrientation.FETCH_FIRST
else:
orientation = TFetchOrientation.FETCH_NEXT
return self._client.fetch_log(operationHandle, orientation=orientation, max_rows=-1)
def get_databases(self):
col = 'TABLE_SCHEM'
return [table[col] for table in self._client.get_databases()]
def get_database(self, database):
return self._client.get_database(database)
def get_tables_meta(self, database, table_names):
tables = self._client.get_tables_meta(database, table_names)
massaged_tables = []
for table in tables:
massaged_tables.append({
'name': table['TABLE_NAME'],
'comment': table['REMARKS'],
'type': table['TABLE_TYPE'].capitalize()}
)
return massaged_tables
def get_tables(self, database, table_names):
tables = [table['TABLE_NAME'] for table in self._client.get_tables(database, table_names)]
tables.sort()
return tables
def get_table(self, database, table_name, partition_spec=None):
table = self._client.get_table(database, table_name, partition_spec)
return HiveServerTableCompatible(table)
def get_columns(self, database, table):
return self._client.get_columns(database, table)
def get_default_configuration(self, *args, **kwargs):
return {}
def get_results_metadata(self, handle):
# We just need to mock
return ResultMetaCompatible()
def create_database(self, name, description): raise NotImplementedError()
def alter_table(self, dbname, tbl_name, new_tbl): raise NotImplementedError()
def open_session(self, user):
return self._client.open_session(user)
def add_partition(self, new_part): raise NotImplementedError()
def get_partition(self, *args, **kwargs): raise NotImplementedError()
def get_partitions(self, database, table_name, partition_spec, max_parts, reverse_sort=True):
return self._client.get_partitions(database, table_name, partition_spec, max_parts, reverse_sort)
def alter_partition(self, db_name, tbl_name, new_part): raise NotImplementedError()
| ahmed-mahran/hue | apps/beeswax/src/beeswax/server/hive_server2_lib.py | Python | apache-2.0 | 38,037 |
"use strict";
var CONST = require('../persistence/sqlconst');
var util = require("util");
var DEFAULT_LIMIT = 50;
var MAX_THRESHOLD = 500;
var knexModule = require("knex");
/**
*
* @param name
* @constructor
*/
function Blog(knexConfig) {
this.knex = knexModule(knexConfig);
}
//DDL Functions
Blog.prototype.dropPostTable = function () {
console.info("Dropping table if exist");
return this.knex.schema.dropTableIfExists(CONST.POST.TABLE);
};
/**
* Table Create Post Table
*/
Blog.prototype.createPostTable = function () {
console.info("Creating %s table if exist", CONST.POST.TABLE);
return this.knex.schema.createTable(CONST.POST.TABLE, function (table) {
table.increments(CONST.POST.PK);
table.string(CONST.POST.GUID).unique();
table.string(CONST.POST.TITLE).unique()
.notNullable();
table.binary(CONST.POST.CONTENT)
.notNullable();
table.datetime(CONST.POST.PUB_DATE).index(CONST.POST.IDX_PUBDATE)
.notNullable();
});
};
Blog.prototype.cleanUp = function () {
console.log("Cleaning up Knex");
this.knex.destroy();
};
Blog.prototype.savePost = function (post) {
var record = {
"title": post.title,
"content": post.content,
"guid": post.guid,
"publication_date": post.publicationDate
};
return this.knex.insert(record).into(CONST.POST.TABLE);
};
Blog.prototype.deletePost = function (postId) {
console.info("Deleting post :%d", postId);
return this.knex(CONST.POST.TABLE).where(CONST.POST.PK, postId).del();
};
//Limit Helper functions
function checkLowerBoundLimit(limit) {
if (util.isNullOrUndefined(limit) || limit === 0) {
return DEFAULT_LIMIT;
} else {
return limit;
}
}
function checkUpperBoundLimit(value) {
if (!util.isNullOrUndefined(value) && value >= MAX_THRESHOLD) {
return MAX_THRESHOLD;
}
else {
return value;
}
}
Blog.prototype._determineDefaultLimit = function (limit) {
var result = checkLowerBoundLimit(limit);
result = checkUpperBoundLimit(result);
return result;
};
// Query functions
function selectAllColumns(knex) {
return knex.
select(CONST.POST.PK, CONST.POST.TITLE, CONST.POST.CONTENT,CONST.POST.PUB_DATE,CONST.POST.GUID).
from(CONST.POST.TABLE);
}
Blog.prototype.findPostById = function (postId) {
return selectAllColumns(this.knex).
where(CONST.POST.PK, postId);
};
Blog.prototype.getAllPosts = function (limit) {
return this.knex.select(CONST.POST.PK,CONST.POST.TITLE,CONST.POST.GUID,CONST.POST.PUB_DATE).
from(CONST.POST.TABLE).limit(this._determineDefaultLimit(limit));
};
Blog.prototype.findPostByTitle = function (title) {
return selectAllColumns(this.knex).
where(CONST.POST.TITLE, title);
};
module.exports = Blog;
| mohan82/myblog-api | persistence/blog.js | JavaScript | apache-2.0 | 2,864 |
<?php
final class DiffusionRepositoryEditEngine
extends PhabricatorEditEngine {
const ENGINECONST = 'diffusion.repository';
private $versionControlSystem;
public function setVersionControlSystem($version_control_system) {
$this->versionControlSystem = $version_control_system;
return $this;
}
public function getVersionControlSystem() {
return $this->versionControlSystem;
}
public function isEngineConfigurable() {
return false;
}
public function isDefaultQuickCreateEngine() {
return true;
}
public function getQuickCreateOrderVector() {
return id(new PhutilSortVector())->addInt(300);
}
public function getEngineName() {
return pht('Repositories');
}
public function getSummaryHeader() {
return pht('Edit Repositories');
}
public function getSummaryText() {
return pht('Creates and edits repositories.');
}
public function getEngineApplicationClass() {
return 'PhabricatorDiffusionApplication';
}
protected function newEditableObject() {
$viewer = $this->getViewer();
$repository = PhabricatorRepository::initializeNewRepository($viewer);
$repository->setDetail('newly-initialized', true);
$vcs = $this->getVersionControlSystem();
if ($vcs) {
$repository->setVersionControlSystem($vcs);
}
// Pick a random open service to allocate this repository on, if any exist.
// If there are no services, we aren't in cluster mode and will allocate
// locally. If there are services but none permit allocations, we fail.
// Eventually we can make this more flexible, but this rule is a reasonable
// starting point as we begin to deploy cluster services.
$services = id(new AlmanacServiceQuery())
->setViewer(PhabricatorUser::getOmnipotentUser())
->withServiceTypes(
array(
AlmanacClusterRepositoryServiceType::SERVICETYPE,
))
->needProperties(true)
->execute();
if ($services) {
// Filter out services which do not permit new allocations.
foreach ($services as $key => $possible_service) {
if ($possible_service->getAlmanacPropertyValue('closed')) {
unset($services[$key]);
}
}
if (!empty($services)) {
shuffle($services);
$service = head($services);
$repository->setAlmanacServicePHID($service->getPHID());
}
}
return $repository;
}
protected function newObjectQuery() {
return new PhabricatorRepositoryQuery();
}
protected function getObjectCreateTitleText($object) {
return pht('Create Repository');
}
protected function getObjectCreateButtonText($object) {
return pht('Create Repository');
}
protected function getObjectEditTitleText($object) {
return pht('Edit Repository: %s', $object->getName());
}
protected function getObjectEditShortText($object) {
return $object->getDisplayName();
}
protected function getObjectCreateShortText() {
return pht('Create Repository');
}
protected function getObjectName() {
return pht('Repository');
}
protected function getObjectViewURI($object) {
return $object->getPathURI('manage/');
}
protected function getCreateNewObjectPolicy() {
return $this->getApplication()->getPolicy(
DiffusionCreateRepositoriesCapability::CAPABILITY);
}
protected function newPages($object) {
$panels = DiffusionRepositoryManagementPanel::getAllPanels();
$pages = array();
$uris = array();
foreach ($panels as $panel_key => $panel) {
$panel->setRepository($object);
$uris[$panel_key] = $panel->getPanelURI();
$page = $panel->newEditEnginePage();
if (!$page) {
continue;
}
$pages[] = $page;
}
$basics_key = DiffusionRepositoryBasicsManagementPanel::PANELKEY;
$basics_uri = $uris[$basics_key];
$more_pages = array(
id(new PhabricatorEditPage())
->setKey('encoding')
->setLabel(pht('Text Encoding'))
->setViewURI($basics_uri)
->setFieldKeys(
array(
'encoding',
)),
id(new PhabricatorEditPage())
->setKey('extensions')
->setLabel(pht('Extensions'))
->setIsDefault(true),
);
foreach ($more_pages as $page) {
$pages[] = $page;
}
return $pages;
}
protected function willConfigureFields($object, array $fields) {
// Change the default field order so related fields are adjacent.
$after = array(
'policy.edit' => array('policy.push'),
);
$result = array();
foreach ($fields as $key => $value) {
$result[$key] = $value;
if (!isset($after[$key])) {
continue;
}
foreach ($after[$key] as $next_key) {
if (!isset($fields[$next_key])) {
continue;
}
unset($result[$next_key]);
$result[$next_key] = $fields[$next_key];
unset($fields[$next_key]);
}
}
return $result;
}
protected function buildCustomEditFields($object) {
$viewer = $this->getViewer();
$policies = id(new PhabricatorPolicyQuery())
->setViewer($viewer)
->setObject($object)
->execute();
$fetch_value = $object->getFetchRules();
$track_value = $object->getTrackOnlyRules();
$permanent_value = $object->getPermanentRefRules();
$automation_instructions = pht(
"Configure **Repository Automation** to allow Phabricator to ".
"write to this repository.".
"\n\n".
"IMPORTANT: This feature is new, experimental, and not supported. ".
"Use it at your own risk.");
$staging_instructions = pht(
"To make it easier to run integration tests and builds on code ".
"under review, you can configure a **Staging Area**. When `arc` ".
"creates a diff, it will push a copy of the changes to the ".
"configured staging area with a corresponding tag.".
"\n\n".
"IMPORTANT: This feature is new, experimental, and not supported. ".
"Use it at your own risk.");
$subpath_instructions = pht(
'If you want to import only part of a repository, like `trunk/`, '.
'you can set a path in **Import Only**. Phabricator will ignore '.
'commits which do not affect this path.');
$filesize_warning = null;
if ($object->isGit()) {
$git_binary = PhutilBinaryAnalyzer::getForBinary('git');
$git_version = $git_binary->getBinaryVersion();
$filesize_version = '1.8.4';
if (version_compare($git_version, $filesize_version, '<')) {
$filesize_warning = pht(
'(WARNING) {icon exclamation-triangle} The version of "git" ("%s") '.
'installed on this server does not support '.
'"--batch-check=<format>", a feature required to enforce filesize '.
'limits. Upgrade to "git" %s or newer to use this feature.',
$git_version,
$filesize_version);
}
}
$track_instructions = pht(
'WARNING: The "Track Only" feature is deprecated. Use "Fetch Refs" '.
'and "Permanent Refs" instead. This feature will be removed in a '.
'future version of Phabricator.');
return array(
id(new PhabricatorSelectEditField())
->setKey('vcs')
->setLabel(pht('Version Control System'))
->setTransactionType(
PhabricatorRepositoryVCSTransaction::TRANSACTIONTYPE)
->setIsFormField(false)
->setIsCopyable(true)
->setOptions(PhabricatorRepositoryType::getAllRepositoryTypes())
->setDescription(pht('Underlying repository version control system.'))
->setConduitDescription(
pht(
'Choose which version control system to use when creating a '.
'repository.'))
->setConduitTypeDescription(pht('Version control system selection.'))
->setValue($object->getVersionControlSystem()),
id(new PhabricatorTextEditField())
->setKey('name')
->setLabel(pht('Name'))
->setIsRequired(true)
->setTransactionType(
PhabricatorRepositoryNameTransaction::TRANSACTIONTYPE)
->setDescription(pht('The repository name.'))
->setConduitDescription(pht('Rename the repository.'))
->setConduitTypeDescription(pht('New repository name.'))
->setValue($object->getName()),
id(new PhabricatorTextEditField())
->setKey('callsign')
->setLabel(pht('Callsign'))
->setTransactionType(
PhabricatorRepositoryCallsignTransaction::TRANSACTIONTYPE)
->setDescription(pht('The repository callsign.'))
->setConduitDescription(pht('Change the repository callsign.'))
->setConduitTypeDescription(pht('New repository callsign.'))
->setValue($object->getCallsign()),
id(new PhabricatorTextEditField())
->setKey('shortName')
->setLabel(pht('Short Name'))
->setTransactionType(
PhabricatorRepositorySlugTransaction::TRANSACTIONTYPE)
->setDescription(pht('Short, unique repository name.'))
->setConduitDescription(pht('Change the repository short name.'))
->setConduitTypeDescription(pht('New short name for the repository.'))
->setValue($object->getRepositorySlug()),
id(new PhabricatorRemarkupEditField())
->setKey('description')
->setLabel(pht('Description'))
->setTransactionType(
PhabricatorRepositoryDescriptionTransaction::TRANSACTIONTYPE)
->setDescription(pht('Repository description.'))
->setConduitDescription(pht('Change the repository description.'))
->setConduitTypeDescription(pht('New repository description.'))
->setValue($object->getDetail('description')),
id(new PhabricatorTextEditField())
->setKey('encoding')
->setLabel(pht('Text Encoding'))
->setIsCopyable(true)
->setTransactionType(
PhabricatorRepositoryEncodingTransaction::TRANSACTIONTYPE)
->setDescription(pht('Default text encoding.'))
->setConduitDescription(pht('Change the default text encoding.'))
->setConduitTypeDescription(pht('New text encoding.'))
->setValue($object->getDetail('encoding')),
id(new PhabricatorBoolEditField())
->setKey('allowDangerousChanges')
->setLabel(pht('Allow Dangerous Changes'))
->setIsCopyable(true)
->setIsFormField(false)
->setOptions(
pht('Prevent Dangerous Changes'),
pht('Allow Dangerous Changes'))
->setTransactionType(
PhabricatorRepositoryDangerousTransaction::TRANSACTIONTYPE)
->setDescription(pht('Permit dangerous changes to be made.'))
->setConduitDescription(pht('Allow or prevent dangerous changes.'))
->setConduitTypeDescription(pht('New protection setting.'))
->setValue($object->shouldAllowDangerousChanges()),
id(new PhabricatorBoolEditField())
->setKey('allowEnormousChanges')
->setLabel(pht('Allow Enormous Changes'))
->setIsCopyable(true)
->setIsFormField(false)
->setOptions(
pht('Prevent Enormous Changes'),
pht('Allow Enormous Changes'))
->setTransactionType(
PhabricatorRepositoryEnormousTransaction::TRANSACTIONTYPE)
->setDescription(pht('Permit enormous changes to be made.'))
->setConduitDescription(pht('Allow or prevent enormous changes.'))
->setConduitTypeDescription(pht('New protection setting.'))
->setValue($object->shouldAllowEnormousChanges()),
id(new PhabricatorSelectEditField())
->setKey('status')
->setLabel(pht('Status'))
->setTransactionType(
PhabricatorRepositoryActivateTransaction::TRANSACTIONTYPE)
->setIsFormField(false)
->setOptions(PhabricatorRepository::getStatusNameMap())
->setDescription(pht('Active or inactive status.'))
->setConduitDescription(pht('Active or deactivate the repository.'))
->setConduitTypeDescription(pht('New repository status.'))
->setValue($object->getStatus()),
id(new PhabricatorTextEditField())
->setKey('defaultBranch')
->setLabel(pht('Default Branch'))
->setTransactionType(
PhabricatorRepositoryDefaultBranchTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDescription(pht('Default branch name.'))
->setConduitDescription(pht('Set the default branch name.'))
->setConduitTypeDescription(pht('New default branch name.'))
->setValue($object->getDetail('default-branch')),
id(new PhabricatorTextAreaEditField())
->setIsStringList(true)
->setKey('fetchRefs')
->setLabel(pht('Fetch Refs'))
->setTransactionType(
PhabricatorRepositoryFetchRefsTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDescription(pht('Fetch only these refs.'))
->setConduitDescription(pht('Set the fetched refs.'))
->setConduitTypeDescription(pht('New fetched refs.'))
->setValue($fetch_value),
id(new PhabricatorTextAreaEditField())
->setIsStringList(true)
->setKey('permanentRefs')
->setLabel(pht('Permanent Refs'))
->setTransactionType(
PhabricatorRepositoryPermanentRefsTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDescription(pht('Only these refs are considered permanent.'))
->setConduitDescription(pht('Set the permanent refs.'))
->setConduitTypeDescription(pht('New permanent ref rules.'))
->setValue($permanent_value),
id(new PhabricatorTextAreaEditField())
->setIsStringList(true)
->setKey('trackOnly')
->setLabel(pht('Track Only'))
->setTransactionType(
PhabricatorRepositoryTrackOnlyTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setControlInstructions($track_instructions)
->setDescription(pht('Track only these branches.'))
->setConduitDescription(pht('Set the tracked branches.'))
->setConduitTypeDescription(pht('New tracked branches.'))
->setValue($track_value),
id(new PhabricatorTextEditField())
->setKey('importOnly')
->setLabel(pht('Import Only'))
->setTransactionType(
PhabricatorRepositorySVNSubpathTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDescription(pht('Subpath to selectively import.'))
->setConduitDescription(pht('Set the subpath to import.'))
->setConduitTypeDescription(pht('New subpath to import.'))
->setValue($object->getDetail('svn-subpath'))
->setControlInstructions($subpath_instructions),
id(new PhabricatorTextEditField())
->setKey('stagingAreaURI')
->setLabel(pht('Staging Area URI'))
->setTransactionType(
PhabricatorRepositoryStagingURITransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDescription(pht('Staging area URI.'))
->setConduitDescription(pht('Set the staging area URI.'))
->setConduitTypeDescription(pht('New staging area URI.'))
->setValue($object->getStagingURI())
->setControlInstructions($staging_instructions),
id(new PhabricatorDatasourceEditField())
->setKey('automationBlueprintPHIDs')
->setLabel(pht('Use Blueprints'))
->setTransactionType(
PhabricatorRepositoryBlueprintsTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDatasource(new DrydockBlueprintDatasource())
->setDescription(pht('Automation blueprints.'))
->setConduitDescription(pht('Change automation blueprints.'))
->setConduitTypeDescription(pht('New blueprint PHIDs.'))
->setValue($object->getAutomationBlueprintPHIDs())
->setControlInstructions($automation_instructions),
id(new PhabricatorStringListEditField())
->setKey('symbolLanguages')
->setLabel(pht('Languages'))
->setTransactionType(
PhabricatorRepositorySymbolLanguagesTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDescription(
pht('Languages which define symbols in this repository.'))
->setConduitDescription(
pht('Change symbol languages for this repository.'))
->setConduitTypeDescription(
pht('New symbol languages.'))
->setValue($object->getSymbolLanguages()),
id(new PhabricatorDatasourceEditField())
->setKey('symbolRepositoryPHIDs')
->setLabel(pht('Uses Symbols From'))
->setTransactionType(
PhabricatorRepositorySymbolSourcesTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDatasource(new DiffusionRepositoryDatasource())
->setDescription(pht('Repositories to link symbols from.'))
->setConduitDescription(pht('Change symbol source repositories.'))
->setConduitTypeDescription(pht('New symbol repositories.'))
->setValue($object->getSymbolSources()),
id(new PhabricatorBoolEditField())
->setKey('publish')
->setLabel(pht('Publish/Notify'))
->setTransactionType(
PhabricatorRepositoryNotifyTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setOptions(
pht('Disable Notifications, Feed, and Herald'),
pht('Enable Notifications, Feed, and Herald'))
->setDescription(pht('Configure how changes are published.'))
->setConduitDescription(pht('Change publishing options.'))
->setConduitTypeDescription(pht('New notification setting.'))
->setValue(!$object->isPublishingDisabled()),
id(new PhabricatorPolicyEditField())
->setKey('policy.push')
->setLabel(pht('Push Policy'))
->setAliases(array('push'))
->setIsCopyable(true)
->setCapability(DiffusionPushCapability::CAPABILITY)
->setPolicies($policies)
->setTransactionType(
PhabricatorRepositoryPushPolicyTransaction::TRANSACTIONTYPE)
->setDescription(
pht('Controls who can push changes to the repository.'))
->setConduitDescription(
pht('Change the push policy of the repository.'))
->setConduitTypeDescription(pht('New policy PHID or constant.'))
->setValue($object->getPolicy(DiffusionPushCapability::CAPABILITY)),
id(new PhabricatorTextEditField())
->setKey('filesizeLimit')
->setLabel(pht('Filesize Limit'))
->setTransactionType(
PhabricatorRepositoryFilesizeLimitTransaction::TRANSACTIONTYPE)
->setDescription(pht('Maximum permitted file size.'))
->setConduitDescription(pht('Change the filesize limit.'))
->setConduitTypeDescription(pht('New repository filesize limit.'))
->setControlInstructions($filesize_warning)
->setValue($object->getFilesizeLimit()),
id(new PhabricatorTextEditField())
->setKey('copyTimeLimit')
->setLabel(pht('Clone/Fetch Timeout'))
->setTransactionType(
PhabricatorRepositoryCopyTimeLimitTransaction::TRANSACTIONTYPE)
->setDescription(
pht('Maximum permitted duration of internal clone/fetch.'))
->setConduitDescription(pht('Change the copy time limit.'))
->setConduitTypeDescription(pht('New repository copy time limit.'))
->setValue($object->getCopyTimeLimit()),
id(new PhabricatorTextEditField())
->setKey('touchLimit')
->setLabel(pht('Touched Paths Limit'))
->setTransactionType(
PhabricatorRepositoryTouchLimitTransaction::TRANSACTIONTYPE)
->setDescription(pht('Maximum permitted paths touched per commit.'))
->setConduitDescription(pht('Change the touch limit.'))
->setConduitTypeDescription(pht('New repository touch limit.'))
->setValue($object->getTouchLimit()),
);
}
}
| wikimedia/phabricator | src/applications/diffusion/editor/DiffusionRepositoryEditEngine.php | PHP | apache-2.0 | 19,968 |
/*
* Created on 21.07.2015
*/
package com.github.dockerjava.core.command;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.dockerjava.api.model.Frame;
import com.github.dockerjava.core.async.ResultCallbackTemplate;
/**
*
* @author Marcus Linke
*
* @deprecated use {@link com.github.dockerjava.api.async.ResultCallback.Adapter}
*/
@Deprecated
public class AttachContainerResultCallback extends ResultCallbackTemplate<AttachContainerResultCallback, Frame> {
private static final Logger LOGGER = LoggerFactory.getLogger(AttachContainerResultCallback.class);
@Override
public void onNext(Frame item) {
LOGGER.debug(item.toString());
}
}
| tejksat/docker-java | docker-java-core/src/main/java/com/github/dockerjava/core/command/AttachContainerResultCallback.java | Java | apache-2.0 | 695 |
package com.orhanobut.wasp;
import android.graphics.Bitmap;
import android.os.Looper;
import android.text.TextUtils;
import android.view.ViewGroup;
import android.widget.ImageView;
import com.orhanobut.wasp.utils.StringUtils;
/**
* This class is responsible of the loading image. It automatically handles the canceling and
* loading images for the recycled view as well.
*
* @author Orhan Obut
*/
final class InternalImageHandler implements ImageHandler {
/**
* It is used to determine which url is current for the ImageView
*/
private static final int KEY_TAG = 0x7f070006;
/**
* Stores the cached images
*/
private final ImageCache imageCache;
/**
* It is used to create network request for the bitmap
*/
private final ImageNetworkHandler imageNetworkHandler;
InternalImageHandler(ImageCache cache, ImageNetworkHandler handler) {
this.imageCache = cache;
this.imageNetworkHandler = handler;
}
@Override
public void load(ImageCreator imageCreator) {
checkMain();
loadImage(imageCreator);
}
private void loadImage(final ImageCreator imageCreator) {
final String url = imageCreator.getUrl();
final ImageView imageView = imageCreator.getImageView();
// clear the target
initImageView(imageCreator);
// if there is any old request. cancel it
String tag = (String) imageView.getTag(KEY_TAG);
if (tag != null) {
imageNetworkHandler.cancelRequest(tag);
}
// update the current url
imageView.setTag(KEY_TAG, url);
int width = imageView.getWidth();
int height = imageView.getHeight();
boolean wrapWidth = false;
boolean wrapHeight = false;
if (imageView.getLayoutParams() != null) {
ViewGroup.LayoutParams params = imageView.getLayoutParams();
wrapWidth = params.width == ViewGroup.LayoutParams.WRAP_CONTENT;
wrapHeight = params.height == ViewGroup.LayoutParams.WRAP_CONTENT;
}
// if the view's bounds aren't known yet, and this is not a wrap-content/wrap-content
// view, hold off on loading the image.
boolean isFullyWrapContent = wrapWidth && wrapHeight;
if (width == 0 && height == 0 && !isFullyWrapContent) {
Logger.d("ImageHandler : width == 0 && height == 0 && !isFullyWrapContent");
// return;
}
// Calculate the max image width / height to use while ignoring WRAP_CONTENT dimens.
int maxWidth = wrapWidth ? 0 : width;
int maxHeight = wrapHeight ? 0 : height;
// check if it is already in cache
final String cacheKey = StringUtils.getCacheKey(url, maxWidth, maxHeight);
final Bitmap bitmap = imageCache.getBitmap(cacheKey);
if (bitmap != null) {
imageView.setImageBitmap(bitmap);
Logger.d("CACHE IMAGE : " + url);
return;
}
// make a new request
imageNetworkHandler.requestImage(imageCreator, maxWidth, maxHeight, new InternalCallback<Container>() {
@Override
public void onSuccess(final Container container) {
Bitmap bitmap = container.bitmap;
if (bitmap == null) {
return;
}
container.waspImageCreator.logSuccess(bitmap);
// cache the image
imageCache.putBitmap(container.cacheKey, container.bitmap);
ImageView imageView = container.waspImageCreator.getImageView();
// if it is the current url, set the image
String tag = (String) imageView.getTag(KEY_TAG);
if (TextUtils.equals(tag, container.waspImageCreator.getUrl())) {
imageView.setImageBitmap(container.bitmap);
imageView.setTag(KEY_TAG, null);
}
}
@Override
public void onError(WaspError error) {
int errorImage = imageCreator.getErrorImage();
if (errorImage != 0) {
imageView.setImageResource(errorImage);
}
error.log();
}
});
imageCreator.logRequest();
}
// clear the target by setting null or default placeholder
private void initImageView(ImageCreator waspImageCreator) {
int defaultImage = waspImageCreator.getDefaultImage();
ImageView imageView = waspImageCreator.getImageView();
if (defaultImage != 0) {
imageView.setImageResource(defaultImage);
return;
}
imageView.setImageBitmap(null);
}
@Override
public void clearCache() {
if (imageCache == null) {
return;
}
imageCache.clearCache();
}
// the call should be done in main thread
private void checkMain() {
if (Looper.myLooper() != Looper.getMainLooper()) {
throw new IllegalStateException("Wasp.Image.load() must be invoked from the main thread.");
}
}
/**
* Simple cache adapter interface.
*/
interface ImageCache {
Bitmap getBitmap(String url);
void putBitmap(String url, Bitmap bitmap);
void clearCache();
}
interface ImageNetworkHandler {
void requestImage(ImageCreator waspImageCreator, int maxWidth, int maxHeight, InternalCallback<Container> waspCallback);
void cancelRequest(String tag);
}
static class Container {
String cacheKey;
Bitmap bitmap;
ImageCreator waspImageCreator;
}
}
| imdatcandan/wasp | wasp/src/main/java/com/orhanobut/wasp/InternalImageHandler.java | Java | apache-2.0 | 5,127 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <thrift/thrift-config.h>
#include <cstring>
#include <sstream>
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#ifdef HAVE_SYS_UN_H
#include <sys/un.h>
#endif
#ifdef HAVE_SYS_POLL_H
#include <sys/poll.h>
#endif
#include <sys/types.h>
#ifdef HAVE_NETINET_IN_H
#include <netinet/in.h>
#include <netinet/tcp.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <fcntl.h>
#include <thrift/concurrency/Monitor.h>
#include <thrift/transport/TSocket.h>
#include <thrift/transport/TTransportException.h>
#include <thrift/transport/PlatformSocket.h>
#ifndef SOCKOPT_CAST_T
# ifndef _WIN32
# define SOCKOPT_CAST_T void
# else
# define SOCKOPT_CAST_T char
# endif // _WIN32
#endif
template<class T>
inline const SOCKOPT_CAST_T* const_cast_sockopt(const T* v) {
return reinterpret_cast<const SOCKOPT_CAST_T*>(v);
}
template<class T>
inline SOCKOPT_CAST_T* cast_sockopt(T* v) {
return reinterpret_cast<SOCKOPT_CAST_T*>(v);
}
namespace apache { namespace thrift { namespace transport {
using namespace std;
// Global var to track total socket sys calls
uint32_t g_socket_syscalls = 0;
/**
* TSocket implementation.
*
*/
TSocket::TSocket(string host, int port) :
host_(host),
port_(port),
path_(""),
socket_(THRIFT_INVALID_SOCKET),
connTimeout_(0),
sendTimeout_(0),
recvTimeout_(0),
keepAlive_(false),
lingerOn_(1),
lingerVal_(0),
noDelay_(1),
maxRecvRetries_(5) {
}
TSocket::TSocket(string path) :
host_(""),
port_(0),
path_(path),
socket_(THRIFT_INVALID_SOCKET),
connTimeout_(0),
sendTimeout_(0),
recvTimeout_(0),
keepAlive_(false),
lingerOn_(1),
lingerVal_(0),
noDelay_(1),
maxRecvRetries_(5) {
cachedPeerAddr_.ipv4.sin_family = AF_UNSPEC;
}
TSocket::TSocket() :
host_(""),
port_(0),
path_(""),
socket_(THRIFT_INVALID_SOCKET),
connTimeout_(0),
sendTimeout_(0),
recvTimeout_(0),
keepAlive_(false),
lingerOn_(1),
lingerVal_(0),
noDelay_(1),
maxRecvRetries_(5) {
cachedPeerAddr_.ipv4.sin_family = AF_UNSPEC;
}
TSocket::TSocket(THRIFT_SOCKET socket) :
host_(""),
port_(0),
path_(""),
socket_(socket),
connTimeout_(0),
sendTimeout_(0),
recvTimeout_(0),
keepAlive_(false),
lingerOn_(1),
lingerVal_(0),
noDelay_(1),
maxRecvRetries_(5) {
cachedPeerAddr_.ipv4.sin_family = AF_UNSPEC;
#ifdef SO_NOSIGPIPE
{
int one = 1;
setsockopt(socket_, SOL_SOCKET, SO_NOSIGPIPE, &one, sizeof(one));
}
#endif
}
TSocket::~TSocket() {
close();
}
bool TSocket::isOpen() {
return (socket_ != THRIFT_INVALID_SOCKET);
}
bool TSocket::peek() {
if (!isOpen()) {
return false;
}
uint8_t buf;
int r = static_cast<int>(recv(socket_, cast_sockopt(&buf), 1, MSG_PEEK));
if (r == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
#if defined __FreeBSD__ || defined __MACH__
/* shigin:
* freebsd returns -1 and THRIFT_ECONNRESET if socket was closed by
* the other side
*/
if (errno_copy == THRIFT_ECONNRESET)
{
close();
return false;
}
#endif
GlobalOutput.perror("TSocket::peek() recv() " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::UNKNOWN, "recv()", errno_copy);
}
return (r > 0);
}
void TSocket::openConnection(struct addrinfo *res) {
if (isOpen()) {
return;
}
if (! path_.empty()) {
socket_ = socket(PF_UNIX, SOCK_STREAM, IPPROTO_IP);
} else {
socket_ = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
}
if (socket_ == THRIFT_INVALID_SOCKET) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() socket() " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, "socket()", errno_copy);
}
// Send timeout
if (sendTimeout_ > 0) {
setSendTimeout(sendTimeout_);
}
// Recv timeout
if (recvTimeout_ > 0) {
setRecvTimeout(recvTimeout_);
}
if(keepAlive_) {
setKeepAlive(keepAlive_);
}
// Linger
setLinger(lingerOn_, lingerVal_);
// No delay
setNoDelay(noDelay_);
#ifdef SO_NOSIGPIPE
{
int one = 1;
setsockopt(socket_, SOL_SOCKET, SO_NOSIGPIPE, &one, sizeof(one));
}
#endif
// Uses a low min RTO if asked to.
#ifdef TCP_LOW_MIN_RTO
if (getUseLowMinRto()) {
int one = 1;
setsockopt(socket_, IPPROTO_TCP, TCP_LOW_MIN_RTO, &one, sizeof(one));
}
#endif
// Set the socket to be non blocking for connect if a timeout exists
int flags = THRIFT_FCNTL(socket_, THRIFT_F_GETFL, 0);
if (connTimeout_ > 0) {
if (-1 == THRIFT_FCNTL(socket_, THRIFT_F_SETFL, flags | THRIFT_O_NONBLOCK)) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() THRIFT_FCNTL() " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, "THRIFT_FCNTL() failed", errno_copy);
}
} else {
if (-1 == THRIFT_FCNTL(socket_, THRIFT_F_SETFL, flags & ~THRIFT_O_NONBLOCK)) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() THRIFT_FCNTL " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, "THRIFT_FCNTL() failed", errno_copy);
}
}
// Connect the socket
int ret;
if (! path_.empty()) {
#ifndef _WIN32
size_t len = path_.size() + 1;
if (len > sizeof(sockaddr_un::sun_path)) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() Unix Domain socket path too long", errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, " Unix Domain socket path too long");
}
struct sockaddr_un address;
address.sun_family = AF_UNIX;
memcpy(address.sun_path, path_.c_str(), len);
socklen_t structlen = static_cast<socklen_t>(sizeof(address));
ret = connect(socket_, (struct sockaddr *) &address, structlen);
#else
GlobalOutput.perror("TSocket::open() Unix Domain socket path not supported on windows", -99);
throw TTransportException(TTransportException::NOT_OPEN, " Unix Domain socket path not supported");
#endif
} else {
ret = connect(socket_, res->ai_addr, static_cast<int>(res->ai_addrlen));
}
// success case
if (ret == 0) {
goto done;
}
if ((THRIFT_GET_SOCKET_ERROR != THRIFT_EINPROGRESS) && (THRIFT_GET_SOCKET_ERROR != THRIFT_EWOULDBLOCK)) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() connect() " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, "connect() failed", errno_copy);
}
struct THRIFT_POLLFD fds[1];
std::memset(fds, 0 , sizeof(fds));
fds[0].fd = socket_;
fds[0].events = THRIFT_POLLOUT;
ret = THRIFT_POLL(fds, 1, connTimeout_);
if (ret > 0) {
// Ensure the socket is connected and that there are no errors set
int val;
socklen_t lon;
lon = sizeof(int);
int ret2 = getsockopt(socket_, SOL_SOCKET, SO_ERROR, cast_sockopt(&val), &lon);
if (ret2 == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() getsockopt() " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, "getsockopt()", errno_copy);
}
// no errors on socket, go to town
if (val == 0) {
goto done;
}
GlobalOutput.perror("TSocket::open() error on socket (after THRIFT_POLL) " + getSocketInfo(), val);
throw TTransportException(TTransportException::NOT_OPEN, "socket open() error", val);
} else if (ret == 0) {
// socket timed out
string errStr = "TSocket::open() timed out " + getSocketInfo();
GlobalOutput(errStr.c_str());
throw TTransportException(TTransportException::NOT_OPEN, "open() timed out");
} else {
// error on THRIFT_POLL()
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() THRIFT_POLL() " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, "THRIFT_POLL() failed", errno_copy);
}
done:
// Set socket back to normal mode (blocking)
THRIFT_FCNTL(socket_, THRIFT_F_SETFL, flags);
if (path_.empty()) {
setCachedAddress(res->ai_addr, static_cast<socklen_t>(res->ai_addrlen));
}
}
void TSocket::open() {
if (isOpen()) {
return;
}
if (! path_.empty()) {
unix_open();
} else {
local_open();
}
}
void TSocket::unix_open(){
if (! path_.empty()) {
// Unix Domain SOcket does not need addrinfo struct, so we pass NULL
openConnection(NULL);
}
}
void TSocket::local_open(){
#ifdef _WIN32
TWinsockSingleton::create();
#endif // _WIN32
if (isOpen()) {
return;
}
// Validate port number
if (port_ < 0 || port_ > 0xFFFF) {
throw TTransportException(TTransportException::NOT_OPEN, "Specified port is invalid");
}
struct addrinfo hints, *res, *res0;
res = NULL;
res0 = NULL;
int error;
char port[sizeof("65535")];
std::memset(&hints, 0, sizeof(hints));
hints.ai_family = PF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_PASSIVE | AI_ADDRCONFIG;
sprintf(port, "%d", port_);
error = getaddrinfo(host_.c_str(), port, &hints, &res0);
#ifdef _WIN32
if (error == WSANO_DATA) {
hints.ai_flags &= ~AI_ADDRCONFIG;
error = getaddrinfo(host_.c_str(), port, &hints, &res0);
}
#endif
if (error) {
string errStr = "TSocket::open() getaddrinfo() " + getSocketInfo() + string(THRIFT_GAI_STRERROR(error));
GlobalOutput(errStr.c_str());
close();
throw TTransportException(TTransportException::NOT_OPEN, "Could not resolve host for client socket.");
}
// Cycle through all the returned addresses until one
// connects or push the exception up.
for (res = res0; res; res = res->ai_next) {
try {
openConnection(res);
break;
} catch (TTransportException&) {
if (res->ai_next) {
close();
} else {
close();
freeaddrinfo(res0); // cleanup on failure
throw;
}
}
}
// Free address structure memory
freeaddrinfo(res0);
}
void TSocket::close() {
if (socket_ != THRIFT_INVALID_SOCKET) {
shutdown(socket_, THRIFT_SHUT_RDWR);
::THRIFT_CLOSESOCKET(socket_);
}
socket_ = THRIFT_INVALID_SOCKET;
}
void TSocket::setSocketFD(THRIFT_SOCKET socket) {
if (socket_ != THRIFT_INVALID_SOCKET) {
close();
}
socket_ = socket;
}
uint32_t TSocket::read(uint8_t* buf, uint32_t len) {
if (socket_ == THRIFT_INVALID_SOCKET) {
throw TTransportException(TTransportException::NOT_OPEN, "Called read on non-open socket");
}
int32_t retries = 0;
// THRIFT_EAGAIN can be signalled both when a timeout has occurred and when
// the system is out of resources (an awesome undocumented feature).
// The following is an approximation of the time interval under which
// THRIFT_EAGAIN is taken to indicate an out of resources error.
uint32_t eagainThresholdMicros = 0;
if (recvTimeout_) {
// if a readTimeout is specified along with a max number of recv retries, then
// the threshold will ensure that the read timeout is not exceeded even in the
// case of resource errors
eagainThresholdMicros = (recvTimeout_*1000)/ ((maxRecvRetries_>0) ? maxRecvRetries_ : 2);
}
try_again:
// Read from the socket
struct timeval begin;
if (recvTimeout_ > 0) {
THRIFT_GETTIMEOFDAY(&begin, NULL);
} else {
// if there is no read timeout we don't need the TOD to determine whether
// an THRIFT_EAGAIN is due to a timeout or an out-of-resource condition.
begin.tv_sec = begin.tv_usec = 0;
}
int got = static_cast<int>(recv(socket_, cast_sockopt(buf), len, 0));
int errno_copy = THRIFT_GET_SOCKET_ERROR; //THRIFT_GETTIMEOFDAY can change THRIFT_GET_SOCKET_ERROR
++g_socket_syscalls;
// Check for error on read
if (got < 0) {
if (errno_copy == THRIFT_EAGAIN) {
// if no timeout we can assume that resource exhaustion has occurred.
if (recvTimeout_ == 0) {
throw TTransportException(TTransportException::TIMED_OUT,
"THRIFT_EAGAIN (unavailable resources)");
}
// check if this is the lack of resources or timeout case
struct timeval end;
THRIFT_GETTIMEOFDAY(&end, NULL);
uint32_t readElapsedMicros = static_cast<uint32_t>(
((end.tv_sec - begin.tv_sec) * 1000 * 1000)
+ (((uint64_t)(end.tv_usec - begin.tv_usec))));
if (!eagainThresholdMicros || (readElapsedMicros < eagainThresholdMicros)) {
if (retries++ < maxRecvRetries_) {
THRIFT_SLEEP_USEC(50);
goto try_again;
} else {
throw TTransportException(TTransportException::TIMED_OUT,
"THRIFT_EAGAIN (unavailable resources)");
}
} else {
// infer that timeout has been hit
throw TTransportException(TTransportException::TIMED_OUT,
"THRIFT_EAGAIN (timed out)");
}
}
// If interrupted, try again
if (errno_copy == THRIFT_EINTR && retries++ < maxRecvRetries_) {
goto try_again;
}
#if defined __FreeBSD__ || defined __MACH__
if (errno_copy == THRIFT_ECONNRESET) {
/* shigin: freebsd doesn't follow POSIX semantic of recv and fails with
* THRIFT_ECONNRESET if peer performed shutdown
* edhall: eliminated close() since we do that in the destructor.
*/
return 0;
}
#endif
#ifdef _WIN32
if(errno_copy == WSAECONNRESET) {
return 0; // EOF
}
#endif
// Now it's not a try again case, but a real probblez
GlobalOutput.perror("TSocket::read() recv() " + getSocketInfo(), errno_copy);
// If we disconnect with no linger time
if (errno_copy == THRIFT_ECONNRESET) {
throw TTransportException(TTransportException::NOT_OPEN, "THRIFT_ECONNRESET");
}
// This ish isn't open
if (errno_copy == THRIFT_ENOTCONN) {
throw TTransportException(TTransportException::NOT_OPEN, "THRIFT_ENOTCONN");
}
// Timed out!
if (errno_copy == THRIFT_ETIMEDOUT) {
throw TTransportException(TTransportException::TIMED_OUT, "THRIFT_ETIMEDOUT");
}
// Some other error, whatevz
throw TTransportException(TTransportException::UNKNOWN, "Unknown", errno_copy);
}
// The remote host has closed the socket
if (got == 0) {
// edhall: we used to call close() here, but our caller may want to deal
// with the socket fd and we'll close() in our destructor in any case.
return 0;
}
// Pack data into string
return got;
}
void TSocket::write(const uint8_t* buf, uint32_t len) {
uint32_t sent = 0;
while (sent < len) {
uint32_t b = write_partial(buf + sent, len - sent);
if (b == 0) {
// This should only happen if the timeout set with SO_SNDTIMEO expired.
// Raise an exception.
throw TTransportException(TTransportException::TIMED_OUT,
"send timeout expired");
}
sent += b;
}
}
uint32_t TSocket::write_partial(const uint8_t* buf, uint32_t len) {
if (socket_ == THRIFT_INVALID_SOCKET) {
throw TTransportException(TTransportException::NOT_OPEN, "Called write on non-open socket");
}
uint32_t sent = 0;
int flags = 0;
#ifdef MSG_NOSIGNAL
// Note the use of MSG_NOSIGNAL to suppress SIGPIPE errors, instead we
// check for the THRIFT_EPIPE return condition and close the socket in that case
flags |= MSG_NOSIGNAL;
#endif // ifdef MSG_NOSIGNAL
int b = static_cast<int>(send(socket_, const_cast_sockopt(buf + sent), len - sent, flags));
++g_socket_syscalls;
if (b < 0) {
if (THRIFT_GET_SOCKET_ERROR == THRIFT_EWOULDBLOCK || THRIFT_GET_SOCKET_ERROR == THRIFT_EAGAIN) {
return 0;
}
// Fail on a send error
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::write_partial() send() " + getSocketInfo(), errno_copy);
if (errno_copy == THRIFT_EPIPE || errno_copy == THRIFT_ECONNRESET || errno_copy == THRIFT_ENOTCONN) {
close();
throw TTransportException(TTransportException::NOT_OPEN, "write() send()", errno_copy);
}
throw TTransportException(TTransportException::UNKNOWN, "write() send()", errno_copy);
}
// Fail on blocked send
if (b == 0) {
throw TTransportException(TTransportException::NOT_OPEN, "Socket send returned 0.");
}
return b;
}
std::string TSocket::getHost() {
return host_;
}
int TSocket::getPort() {
return port_;
}
void TSocket::setHost(string host) {
host_ = host;
}
void TSocket::setPort(int port) {
port_ = port;
}
void TSocket::setLinger(bool on, int linger) {
lingerOn_ = on;
lingerVal_ = linger;
if (socket_ == THRIFT_INVALID_SOCKET) {
return;
}
#ifndef _WIN32
struct linger l = {(lingerOn_ ? 1 : 0), lingerVal_};
#else
struct linger l = {(lingerOn_ ? 1 : 0), static_cast<u_short>(lingerVal_)};
#endif
int ret = setsockopt(socket_, SOL_SOCKET, SO_LINGER, cast_sockopt(&l), sizeof(l));
if (ret == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR; // Copy THRIFT_GET_SOCKET_ERROR because we're allocating memory.
GlobalOutput.perror("TSocket::setLinger() setsockopt() " + getSocketInfo(), errno_copy);
}
}
void TSocket::setNoDelay(bool noDelay) {
noDelay_ = noDelay;
if (socket_ == THRIFT_INVALID_SOCKET || !path_.empty()) {
return;
}
// Set socket to NODELAY
int v = noDelay_ ? 1 : 0;
int ret = setsockopt(socket_, IPPROTO_TCP, TCP_NODELAY, cast_sockopt(&v), sizeof(v));
if (ret == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR; // Copy THRIFT_GET_SOCKET_ERROR because we're allocating memory.
GlobalOutput.perror("TSocket::setNoDelay() setsockopt() " + getSocketInfo(), errno_copy);
}
}
void TSocket::setConnTimeout(int ms) {
connTimeout_ = ms;
}
void setGenericTimeout(THRIFT_SOCKET s, int timeout_ms, int optname)
{
if (timeout_ms < 0) {
char errBuf[512];
sprintf(errBuf, "TSocket::setGenericTimeout with negative input: %d\n", timeout_ms);
GlobalOutput(errBuf);
return;
}
if (s == THRIFT_INVALID_SOCKET) {
return;
}
#ifdef _WIN32
DWORD platform_time = static_cast<DWORD>(timeout_ms);
#else
struct timeval platform_time = {
(int)(timeout_ms/1000),
(int)((timeout_ms%1000)*1000)};
#endif
int ret = setsockopt(s, SOL_SOCKET, optname, cast_sockopt(&platform_time), sizeof(platform_time));
if (ret == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR; // Copy THRIFT_GET_SOCKET_ERROR because we're allocating memory.
GlobalOutput.perror("TSocket::setGenericTimeout() setsockopt() ", errno_copy);
}
}
void TSocket::setRecvTimeout(int ms) {
setGenericTimeout(socket_, ms, SO_RCVTIMEO);
recvTimeout_ = ms;
}
void TSocket::setSendTimeout(int ms) {
setGenericTimeout(socket_, ms, SO_SNDTIMEO);
sendTimeout_ = ms;
}
void TSocket::setKeepAlive(bool keepAlive) {
keepAlive_ = keepAlive;
if (socket_ == -1) {
return;
}
int value = keepAlive_;
int ret = setsockopt(socket_, SOL_SOCKET, SO_KEEPALIVE, const_cast_sockopt(&value), sizeof(value));
if (ret == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR; // Copy THRIFT_GET_SOCKET_ERROR because we're allocating memory.
GlobalOutput.perror("TSocket::setKeepAlive() setsockopt() " + getSocketInfo(), errno_copy);
}
}
void TSocket::setMaxRecvRetries(int maxRecvRetries) {
maxRecvRetries_ = maxRecvRetries;
}
string TSocket::getSocketInfo() {
std::ostringstream oss;
if (host_.empty() || port_ == 0) {
oss << "<Host: " << getPeerAddress();
oss << " Port: " << getPeerPort() << ">";
} else {
oss << "<Host: " << host_ << " Port: " << port_ << ">";
}
return oss.str();
}
std::string TSocket::getPeerHost() {
if (peerHost_.empty() && path_.empty()) {
struct sockaddr_storage addr;
struct sockaddr* addrPtr;
socklen_t addrLen;
if (socket_ == THRIFT_INVALID_SOCKET) {
return host_;
}
addrPtr = getCachedAddress(&addrLen);
if (addrPtr == NULL) {
addrLen = sizeof(addr);
if (getpeername(socket_, (sockaddr*) &addr, &addrLen) != 0) {
return peerHost_;
}
addrPtr = (sockaddr*)&addr;
setCachedAddress(addrPtr, addrLen);
}
char clienthost[NI_MAXHOST];
char clientservice[NI_MAXSERV];
getnameinfo((sockaddr*) addrPtr, addrLen,
clienthost, sizeof(clienthost),
clientservice, sizeof(clientservice), 0);
peerHost_ = clienthost;
}
return peerHost_;
}
std::string TSocket::getPeerAddress() {
if (peerAddress_.empty() && path_.empty()) {
struct sockaddr_storage addr;
struct sockaddr* addrPtr;
socklen_t addrLen;
if (socket_ == THRIFT_INVALID_SOCKET) {
return peerAddress_;
}
addrPtr = getCachedAddress(&addrLen);
if (addrPtr == NULL) {
addrLen = sizeof(addr);
if (getpeername(socket_, (sockaddr*) &addr, &addrLen) != 0) {
return peerAddress_;
}
addrPtr = (sockaddr*)&addr;
setCachedAddress(addrPtr, addrLen);
}
char clienthost[NI_MAXHOST];
char clientservice[NI_MAXSERV];
getnameinfo(addrPtr, addrLen,
clienthost, sizeof(clienthost),
clientservice, sizeof(clientservice),
NI_NUMERICHOST|NI_NUMERICSERV);
peerAddress_ = clienthost;
peerPort_ = std::atoi(clientservice);
}
return peerAddress_;
}
int TSocket::getPeerPort() {
getPeerAddress();
return peerPort_;
}
void TSocket::setCachedAddress(const sockaddr* addr, socklen_t len) {
if (!path_.empty()) {
return;
}
switch (addr->sa_family) {
case AF_INET:
if (len == sizeof(sockaddr_in)) {
memcpy((void*)&cachedPeerAddr_.ipv4, (void*)addr, len);
}
break;
case AF_INET6:
if (len == sizeof(sockaddr_in6)) {
memcpy((void*)&cachedPeerAddr_.ipv6, (void*)addr, len);
}
break;
}
}
sockaddr* TSocket::getCachedAddress(socklen_t* len) const {
switch (cachedPeerAddr_.ipv4.sin_family) {
case AF_INET:
*len = sizeof(sockaddr_in);
return (sockaddr*) &cachedPeerAddr_.ipv4;
case AF_INET6:
*len = sizeof(sockaddr_in6);
return (sockaddr*) &cachedPeerAddr_.ipv6;
default:
return NULL;
}
}
bool TSocket::useLowMinRto_ = false;
void TSocket::setUseLowMinRto(bool useLowMinRto) {
useLowMinRto_ = useLowMinRto;
}
bool TSocket::getUseLowMinRto() {
return useLowMinRto_;
}
const std::string TSocket::getOrigin() {
std::ostringstream oss;
oss << getPeerHost() << ":" << getPeerPort();
return oss.str();
}
}}} // apache::thrift::transport
| rewardStyle/apache.thrift | lib/cpp/src/thrift/transport/TSocket.cpp | C++ | apache-2.0 | 23,335 |
/*-
*
* * Copyright 2016 Skymind,Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package org.deeplearning4j.nn.conf.graph;
import org.deeplearning4j.nn.conf.inputs.InputType;
import org.deeplearning4j.nn.conf.inputs.InvalidInputTypeException;
import org.deeplearning4j.nn.graph.ComputationGraph;
import org.nd4j.linalg.api.ndarray.INDArray;
/** A MergeVertex is used to combine the activations of two or more layers/GraphVertex by means of concatenation/merging.<br>
* Exactly how this is done depends on the type of input.<br>
* For 2d (feed forward layer) inputs: MergeVertex([numExamples,layerSize1],[numExamples,layerSize2]) -> [numExamples,layerSize1 + layerSize2]<br>
* For 3d (time series) inputs: MergeVertex([numExamples,layerSize1,timeSeriesLength],[numExamples,layerSize2,timeSeriesLength])
* -> [numExamples,layerSize1 + layerSize2,timeSeriesLength]<br>
* For 4d (convolutional) inputs: MergeVertex([numExamples,depth1,width,height],[numExamples,depth2,width,height])
* -> [numExamples,depth1 + depth2,width,height]<br>
* @author Alex Black
*/
public class MergeVertex extends GraphVertex {
@Override
public MergeVertex clone() {
return new MergeVertex();
}
@Override
public boolean equals(Object o) {
return o instanceof MergeVertex;
}
@Override
public int hashCode() {
return 433682566;
}
@Override
public int numParams(boolean backprop) {
return 0;
}
@Override
public org.deeplearning4j.nn.graph.vertex.GraphVertex instantiate(ComputationGraph graph, String name, int idx,
INDArray paramsView, boolean initializeParams) {
return new org.deeplearning4j.nn.graph.vertex.impl.MergeVertex(graph, name, idx);
}
@Override
public InputType getOutputType(int layerIndex, InputType... vertexInputs) throws InvalidInputTypeException {
if (vertexInputs.length == 1)
return vertexInputs[0];
InputType first = vertexInputs[0];
if (first.getType() == InputType.Type.CNNFlat) {
//TODO
//Merging flattened CNN format data could be messy?
throw new InvalidInputTypeException(
"Invalid input: MergeVertex cannot currently merge CNN data in flattened format. Got: "
+ vertexInputs);
} else if (first.getType() != InputType.Type.CNN) {
//FF or RNN data inputs
int size = 0;
InputType.Type type = null;
for (int i = 0; i < vertexInputs.length; i++) {
if (vertexInputs[i].getType() != first.getType()) {
throw new InvalidInputTypeException(
"Invalid input: MergeVertex cannot merge activations of different types:"
+ " first type = " + first.getType() + ", input type " + (i + 1)
+ " = " + vertexInputs[i].getType());
}
int thisSize;
switch (vertexInputs[i].getType()) {
case FF:
thisSize = ((InputType.InputTypeFeedForward) vertexInputs[i]).getSize();
type = InputType.Type.FF;
break;
case RNN:
thisSize = ((InputType.InputTypeRecurrent) vertexInputs[i]).getSize();
type = InputType.Type.RNN;
break;
default:
throw new IllegalStateException("Unknown input type: " + vertexInputs[i]); //Should never happen
}
if (thisSize <= 0) {//Size is not defined
size = -1;
} else {
size += thisSize;
}
}
if (size > 0) {
//Size is specified
if (type == InputType.Type.FF)
return InputType.feedForward(size);
else
return InputType.recurrent(size);
} else {
//size is unknown
if (type == InputType.Type.FF)
return InputType.feedForward(-1);
else
return InputType.recurrent(-1);
}
} else {
//CNN inputs... also check that the depth, width and heights match:
InputType.InputTypeConvolutional firstConv = (InputType.InputTypeConvolutional) first;
int fd = firstConv.getDepth();
int fw = firstConv.getWidth();
int fh = firstConv.getHeight();
int depthSum = fd;
for (int i = 1; i < vertexInputs.length; i++) {
if (vertexInputs[i].getType() != InputType.Type.CNN) {
throw new InvalidInputTypeException(
"Invalid input: MergeVertex cannot process activations of different types:"
+ " first type = " + InputType.Type.CNN + ", input type " + (i + 1)
+ " = " + vertexInputs[i].getType());
}
InputType.InputTypeConvolutional otherConv = (InputType.InputTypeConvolutional) vertexInputs[i];
int od = otherConv.getDepth();
int ow = otherConv.getWidth();
int oh = otherConv.getHeight();
if (fw != ow || fh != oh) {
throw new InvalidInputTypeException(
"Invalid input: MergeVertex cannot merge CNN activations of different width/heights:"
+ "first [depth,width,height] = [" + fd + "," + fw + "," + fh
+ "], input " + i + " = [" + od + "," + ow + "," + oh + "]");
}
depthSum += od;
}
return InputType.convolutional(fh, fw, depthSum);
}
}
}
| dmmiller612/deeplearning4j | deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/conf/graph/MergeVertex.java | Java | apache-2.0 | 6,727 |
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/codedeploy/model/ListApplicationRevisionsRequest.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::CodeDeploy::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
ListApplicationRevisionsRequest::ListApplicationRevisionsRequest() :
m_applicationNameHasBeenSet(false),
m_sortBy(ApplicationRevisionSortBy::NOT_SET),
m_sortByHasBeenSet(false),
m_sortOrder(SortOrder::NOT_SET),
m_sortOrderHasBeenSet(false),
m_s3BucketHasBeenSet(false),
m_s3KeyPrefixHasBeenSet(false),
m_deployed(ListStateFilterAction::NOT_SET),
m_deployedHasBeenSet(false),
m_nextTokenHasBeenSet(false)
{
}
Aws::String ListApplicationRevisionsRequest::SerializePayload() const
{
JsonValue payload;
if(m_applicationNameHasBeenSet)
{
payload.WithString("applicationName", m_applicationName);
}
if(m_sortByHasBeenSet)
{
payload.WithString("sortBy", ApplicationRevisionSortByMapper::GetNameForApplicationRevisionSortBy(m_sortBy));
}
if(m_sortOrderHasBeenSet)
{
payload.WithString("sortOrder", SortOrderMapper::GetNameForSortOrder(m_sortOrder));
}
if(m_s3BucketHasBeenSet)
{
payload.WithString("s3Bucket", m_s3Bucket);
}
if(m_s3KeyPrefixHasBeenSet)
{
payload.WithString("s3KeyPrefix", m_s3KeyPrefix);
}
if(m_deployedHasBeenSet)
{
payload.WithString("deployed", ListStateFilterActionMapper::GetNameForListStateFilterAction(m_deployed));
}
if(m_nextTokenHasBeenSet)
{
payload.WithString("nextToken", m_nextToken);
}
return payload.WriteReadable();
}
Aws::Http::HeaderValueCollection ListApplicationRevisionsRequest::GetRequestSpecificHeaders() const
{
Aws::Http::HeaderValueCollection headers;
headers.insert(Aws::Http::HeaderValuePair("X-Amz-Target", "CodeDeploy_20141006.ListApplicationRevisions"));
return headers;
}
| chiaming0914/awe-cpp-sdk | aws-cpp-sdk-codedeploy/source/model/ListApplicationRevisionsRequest.cpp | C++ | apache-2.0 | 2,461 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.builder.endpoint.dsl;
import java.util.Map;
import javax.annotation.Generated;
import org.apache.camel.ExchangePattern;
import org.apache.camel.builder.EndpointConsumerBuilder;
import org.apache.camel.builder.EndpointProducerBuilder;
import org.apache.camel.builder.endpoint.AbstractEndpointBuilder;
import org.apache.camel.spi.ExceptionHandler;
/**
* The rabbitmq component allows you produce and consume messages from RabbitMQ
* instances.
*
* Generated by camel-package-maven-plugin - do not edit this file!
*/
@Generated("org.apache.camel.maven.packaging.EndpointDslMojo")
public interface RabbitMQEndpointBuilderFactory {
/**
* Builder for endpoint consumers for the RabbitMQ component.
*/
public interface RabbitMQEndpointConsumerBuilder
extends
EndpointConsumerBuilder {
default AdvancedRabbitMQEndpointConsumerBuilder advanced() {
return (AdvancedRabbitMQEndpointConsumerBuilder) this;
}
/**
* If this option is set, camel-rabbitmq will try to create connection
* based on the setting of option addresses. The addresses value is a
* string which looks like server1:12345, server2:12345.
*
* The option is a: <code>com.rabbitmq.client.Address[]</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder addresses(Object[] addresses) {
setProperty("addresses", addresses);
return this;
}
/**
* If this option is set, camel-rabbitmq will try to create connection
* based on the setting of option addresses. The addresses value is a
* string which looks like server1:12345, server2:12345.
*
* The option will be converted to a
* <code>com.rabbitmq.client.Address[]</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder addresses(String addresses) {
setProperty("addresses", addresses);
return this;
}
/**
* If it is true, the exchange will be deleted when it is no longer in
* use.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder autoDelete(boolean autoDelete) {
setProperty("autoDelete", autoDelete);
return this;
}
/**
* If it is true, the exchange will be deleted when it is no longer in
* use.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder autoDelete(String autoDelete) {
setProperty("autoDelete", autoDelete);
return this;
}
/**
* To use a custom RabbitMQ connection factory. When this option is set,
* all connection options (connectionTimeout, requestedChannelMax...)
* set on URI are not used.
*
* The option is a: <code>com.rabbitmq.client.ConnectionFactory</code>
* type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder connectionFactory(
Object connectionFactory) {
setProperty("connectionFactory", connectionFactory);
return this;
}
/**
* To use a custom RabbitMQ connection factory. When this option is set,
* all connection options (connectionTimeout, requestedChannelMax...)
* set on URI are not used.
*
* The option will be converted to a
* <code>com.rabbitmq.client.ConnectionFactory</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder connectionFactory(
String connectionFactory) {
setProperty("connectionFactory", connectionFactory);
return this;
}
/**
* The name of the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder deadLetterExchange(
String deadLetterExchange) {
setProperty("deadLetterExchange", deadLetterExchange);
return this;
}
/**
* The type of the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder deadLetterExchangeType(
String deadLetterExchangeType) {
setProperty("deadLetterExchangeType", deadLetterExchangeType);
return this;
}
/**
* The name of the dead letter queue.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder deadLetterQueue(
String deadLetterQueue) {
setProperty("deadLetterQueue", deadLetterQueue);
return this;
}
/**
* The routing key for the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder deadLetterRoutingKey(
String deadLetterRoutingKey) {
setProperty("deadLetterRoutingKey", deadLetterRoutingKey);
return this;
}
/**
* If the option is true, camel declare the exchange and queue name and
* bind them together. If the option is false, camel won't declare the
* exchange and queue name on the server.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder declare(boolean declare) {
setProperty("declare", declare);
return this;
}
/**
* If the option is true, camel declare the exchange and queue name and
* bind them together. If the option is false, camel won't declare the
* exchange and queue name on the server.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder declare(String declare) {
setProperty("declare", declare);
return this;
}
/**
* If we are declaring a durable exchange (the exchange will survive a
* server restart).
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder durable(boolean durable) {
setProperty("durable", durable);
return this;
}
/**
* If we are declaring a durable exchange (the exchange will survive a
* server restart).
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder durable(String durable) {
setProperty("durable", durable);
return this;
}
/**
* The exchange type such as direct or topic.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder exchangeType(String exchangeType) {
setProperty("exchangeType", exchangeType);
return this;
}
/**
* Exclusive queues may only be accessed by the current connection, and
* are deleted when that connection closes.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder exclusive(boolean exclusive) {
setProperty("exclusive", exclusive);
return this;
}
/**
* Exclusive queues may only be accessed by the current connection, and
* are deleted when that connection closes.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder exclusive(String exclusive) {
setProperty("exclusive", exclusive);
return this;
}
/**
* The hostname of the running rabbitmq instance or cluster.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder hostname(String hostname) {
setProperty("hostname", hostname);
return this;
}
/**
* Passive queues depend on the queue already to be available at
* RabbitMQ.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder passive(boolean passive) {
setProperty("passive", passive);
return this;
}
/**
* Passive queues depend on the queue already to be available at
* RabbitMQ.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder passive(String passive) {
setProperty("passive", passive);
return this;
}
/**
* Port number for the host with the running rabbitmq instance or
* cluster. Default value is 5672.
*
* The option is a: <code>int</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder portNumber(int portNumber) {
setProperty("portNumber", portNumber);
return this;
}
/**
* Port number for the host with the running rabbitmq instance or
* cluster. Default value is 5672.
*
* The option will be converted to a <code>int</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder portNumber(String portNumber) {
setProperty("portNumber", portNumber);
return this;
}
/**
* The queue to receive messages from.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder queue(String queue) {
setProperty("queue", queue);
return this;
}
/**
* The routing key to use when binding a consumer queue to the exchange.
* For producer routing keys, you set the header rabbitmq.ROUTING_KEY.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder routingKey(String routingKey) {
setProperty("routingKey", routingKey);
return this;
}
/**
* This can be used if we need to declare the queue but not the
* exchange.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder skipExchangeDeclare(
boolean skipExchangeDeclare) {
setProperty("skipExchangeDeclare", skipExchangeDeclare);
return this;
}
/**
* This can be used if we need to declare the queue but not the
* exchange.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder skipExchangeDeclare(
String skipExchangeDeclare) {
setProperty("skipExchangeDeclare", skipExchangeDeclare);
return this;
}
/**
* If true the queue will not be bound to the exchange after declaring
* it.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder skipQueueBind(
boolean skipQueueBind) {
setProperty("skipQueueBind", skipQueueBind);
return this;
}
/**
* If true the queue will not be bound to the exchange after declaring
* it.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder skipQueueBind(
String skipQueueBind) {
setProperty("skipQueueBind", skipQueueBind);
return this;
}
/**
* If true the producer will not declare and bind a queue. This can be
* used for directing messages via an existing routing key.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder skipQueueDeclare(
boolean skipQueueDeclare) {
setProperty("skipQueueDeclare", skipQueueDeclare);
return this;
}
/**
* If true the producer will not declare and bind a queue. This can be
* used for directing messages via an existing routing key.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder skipQueueDeclare(
String skipQueueDeclare) {
setProperty("skipQueueDeclare", skipQueueDeclare);
return this;
}
/**
* The vhost for the channel.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder vhost(String vhost) {
setProperty("vhost", vhost);
return this;
}
/**
* If messages should be auto acknowledged.
*
* The option is a: <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder autoAck(boolean autoAck) {
setProperty("autoAck", autoAck);
return this;
}
/**
* If messages should be auto acknowledged.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder autoAck(String autoAck) {
setProperty("autoAck", autoAck);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions occurred while the consumer is trying to
* pickup incoming messages, or the likes, will now be processed as a
* message and handled by the routing Error Handler. By default the
* consumer will use the org.apache.camel.spi.ExceptionHandler to deal
* with exceptions, that will be logged at WARN or ERROR level and
* ignored.
*
* The option is a: <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder bridgeErrorHandler(
boolean bridgeErrorHandler) {
setProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions occurred while the consumer is trying to
* pickup incoming messages, or the likes, will now be processed as a
* message and handled by the routing Error Handler. By default the
* consumer will use the org.apache.camel.spi.ExceptionHandler to deal
* with exceptions, that will be logged at WARN or ERROR level and
* ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder bridgeErrorHandler(
String bridgeErrorHandler) {
setProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Number of concurrent consumers when consuming from broker. (eg
* similar as to the same option for the JMS component).
*
* The option is a: <code>int</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder concurrentConsumers(
int concurrentConsumers) {
setProperty("concurrentConsumers", concurrentConsumers);
return this;
}
/**
* Number of concurrent consumers when consuming from broker. (eg
* similar as to the same option for the JMS component).
*
* The option will be converted to a <code>int</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder concurrentConsumers(
String concurrentConsumers) {
setProperty("concurrentConsumers", concurrentConsumers);
return this;
}
/**
* Request exclusive access to the queue (meaning only this consumer can
* access the queue). This is useful when you want a long-lived shared
* queue to be temporarily accessible by just one consumer.
*
* The option is a: <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder exclusiveConsumer(
boolean exclusiveConsumer) {
setProperty("exclusiveConsumer", exclusiveConsumer);
return this;
}
/**
* Request exclusive access to the queue (meaning only this consumer can
* access the queue). This is useful when you want a long-lived shared
* queue to be temporarily accessible by just one consumer.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder exclusiveConsumer(
String exclusiveConsumer) {
setProperty("exclusiveConsumer", exclusiveConsumer);
return this;
}
/**
* The maximum number of messages that the server will deliver, 0 if
* unlimited. You need to specify the option of prefetchSize,
* prefetchCount, prefetchGlobal at the same time.
*
* The option is a: <code>int</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchCount(int prefetchCount) {
setProperty("prefetchCount", prefetchCount);
return this;
}
/**
* The maximum number of messages that the server will deliver, 0 if
* unlimited. You need to specify the option of prefetchSize,
* prefetchCount, prefetchGlobal at the same time.
*
* The option will be converted to a <code>int</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchCount(
String prefetchCount) {
setProperty("prefetchCount", prefetchCount);
return this;
}
/**
* Enables the quality of service on the RabbitMQConsumer side. You need
* to specify the option of prefetchSize, prefetchCount, prefetchGlobal
* at the same time.
*
* The option is a: <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchEnabled(
boolean prefetchEnabled) {
setProperty("prefetchEnabled", prefetchEnabled);
return this;
}
/**
* Enables the quality of service on the RabbitMQConsumer side. You need
* to specify the option of prefetchSize, prefetchCount, prefetchGlobal
* at the same time.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchEnabled(
String prefetchEnabled) {
setProperty("prefetchEnabled", prefetchEnabled);
return this;
}
/**
* If the settings should be applied to the entire channel rather than
* each consumer You need to specify the option of prefetchSize,
* prefetchCount, prefetchGlobal at the same time.
*
* The option is a: <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchGlobal(
boolean prefetchGlobal) {
setProperty("prefetchGlobal", prefetchGlobal);
return this;
}
/**
* If the settings should be applied to the entire channel rather than
* each consumer You need to specify the option of prefetchSize,
* prefetchCount, prefetchGlobal at the same time.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchGlobal(
String prefetchGlobal) {
setProperty("prefetchGlobal", prefetchGlobal);
return this;
}
/**
* The maximum amount of content (measured in octets) that the server
* will deliver, 0 if unlimited. You need to specify the option of
* prefetchSize, prefetchCount, prefetchGlobal at the same time.
*
* The option is a: <code>int</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchSize(int prefetchSize) {
setProperty("prefetchSize", prefetchSize);
return this;
}
/**
* The maximum amount of content (measured in octets) that the server
* will deliver, 0 if unlimited. You need to specify the option of
* prefetchSize, prefetchCount, prefetchGlobal at the same time.
*
* The option will be converted to a <code>int</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchSize(String prefetchSize) {
setProperty("prefetchSize", prefetchSize);
return this;
}
/**
* Password for authenticated access.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointConsumerBuilder password(String password) {
setProperty("password", password);
return this;
}
/**
* Enables SSL on connection, accepted value are true, TLS and 'SSLv3.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointConsumerBuilder sslProtocol(String sslProtocol) {
setProperty("sslProtocol", sslProtocol);
return this;
}
/**
* Configure SSL trust manager, SSL should be enabled for this option to
* be effective.
*
* The option is a: <code>javax.net.ssl.TrustManager</code> type.
*
* Group: security
*/
default RabbitMQEndpointConsumerBuilder trustManager(Object trustManager) {
setProperty("trustManager", trustManager);
return this;
}
/**
* Configure SSL trust manager, SSL should be enabled for this option to
* be effective.
*
* The option will be converted to a
* <code>javax.net.ssl.TrustManager</code> type.
*
* Group: security
*/
default RabbitMQEndpointConsumerBuilder trustManager(String trustManager) {
setProperty("trustManager", trustManager);
return this;
}
/**
* Username in case of authenticated access.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointConsumerBuilder username(String username) {
setProperty("username", username);
return this;
}
}
/**
* Advanced builder for endpoint consumers for the RabbitMQ component.
*/
public interface AdvancedRabbitMQEndpointConsumerBuilder
extends
EndpointConsumerBuilder {
default RabbitMQEndpointConsumerBuilder basic() {
return (RabbitMQEndpointConsumerBuilder) this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*/
default AdvancedRabbitMQEndpointConsumerBuilder exceptionHandler(
ExceptionHandler exceptionHandler) {
setProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*/
default AdvancedRabbitMQEndpointConsumerBuilder exceptionHandler(
String exceptionHandler) {
setProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*/
default AdvancedRabbitMQEndpointConsumerBuilder exchangePattern(
ExchangePattern exchangePattern) {
setProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*/
default AdvancedRabbitMQEndpointConsumerBuilder exchangePattern(
String exchangePattern) {
setProperty("exchangePattern", exchangePattern);
return this;
}
/**
* The consumer uses a Thread Pool Executor with a fixed number of
* threads. This setting allows you to set that number of threads.
*
* The option is a: <code>int</code> type.
*
* Group: consumer (advanced)
*/
default AdvancedRabbitMQEndpointConsumerBuilder threadPoolSize(
int threadPoolSize) {
setProperty("threadPoolSize", threadPoolSize);
return this;
}
/**
* The consumer uses a Thread Pool Executor with a fixed number of
* threads. This setting allows you to set that number of threads.
*
* The option will be converted to a <code>int</code> type.
*
* Group: consumer (advanced)
*/
default AdvancedRabbitMQEndpointConsumerBuilder threadPoolSize(
String threadPoolSize) {
setProperty("threadPoolSize", threadPoolSize);
return this;
}
/**
* Specify arguments for configuring the different RabbitMQ concepts, a
* different prefix is required for each: Exchange: arg.exchange. Queue:
* arg.queue. Binding: arg.binding. For example to declare a queue with
* message ttl argument:
* http://localhost:5672/exchange/queueargs=arg.queue.x-message-ttl=60000.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder args(
Map<String, Object> args) {
setProperty("args", args);
return this;
}
/**
* Specify arguments for configuring the different RabbitMQ concepts, a
* different prefix is required for each: Exchange: arg.exchange. Queue:
* arg.queue. Binding: arg.binding. For example to declare a queue with
* message ttl argument:
* http://localhost:5672/exchange/queueargs=arg.queue.x-message-ttl=60000.
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder args(String args) {
setProperty("args", args);
return this;
}
/**
* Enables connection automatic recovery (uses connection implementation
* that performs automatic recovery when connection shutdown is not
* initiated by the application).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder automaticRecoveryEnabled(
Boolean automaticRecoveryEnabled) {
setProperty("automaticRecoveryEnabled", automaticRecoveryEnabled);
return this;
}
/**
* Enables connection automatic recovery (uses connection implementation
* that performs automatic recovery when connection shutdown is not
* initiated by the application).
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder automaticRecoveryEnabled(
String automaticRecoveryEnabled) {
setProperty("automaticRecoveryEnabled", automaticRecoveryEnabled);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder basicPropertyBinding(
boolean basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder basicPropertyBinding(
String basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Connection client properties (client info used in negotiating with
* the server).
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder clientProperties(
Map<String, Object> clientProperties) {
setProperty("clientProperties", clientProperties);
return this;
}
/**
* Connection client properties (client info used in negotiating with
* the server).
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder clientProperties(
String clientProperties) {
setProperty("clientProperties", clientProperties);
return this;
}
/**
* Connection timeout.
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder connectionTimeout(
int connectionTimeout) {
setProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Connection timeout.
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder connectionTimeout(
String connectionTimeout) {
setProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Network recovery interval in milliseconds (interval used when
* recovering from network failure).
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder networkRecoveryInterval(
Integer networkRecoveryInterval) {
setProperty("networkRecoveryInterval", networkRecoveryInterval);
return this;
}
/**
* Network recovery interval in milliseconds (interval used when
* recovering from network failure).
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder networkRecoveryInterval(
String networkRecoveryInterval) {
setProperty("networkRecoveryInterval", networkRecoveryInterval);
return this;
}
/**
* Connection requested channel max (max number of channels offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestedChannelMax(
int requestedChannelMax) {
setProperty("requestedChannelMax", requestedChannelMax);
return this;
}
/**
* Connection requested channel max (max number of channels offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestedChannelMax(
String requestedChannelMax) {
setProperty("requestedChannelMax", requestedChannelMax);
return this;
}
/**
* Connection requested frame max (max size of frame offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestedFrameMax(
int requestedFrameMax) {
setProperty("requestedFrameMax", requestedFrameMax);
return this;
}
/**
* Connection requested frame max (max size of frame offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestedFrameMax(
String requestedFrameMax) {
setProperty("requestedFrameMax", requestedFrameMax);
return this;
}
/**
* Connection requested heartbeat (heart-beat in seconds offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestedHeartbeat(
int requestedHeartbeat) {
setProperty("requestedHeartbeat", requestedHeartbeat);
return this;
}
/**
* Connection requested heartbeat (heart-beat in seconds offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestedHeartbeat(
String requestedHeartbeat) {
setProperty("requestedHeartbeat", requestedHeartbeat);
return this;
}
/**
* Set timeout for waiting for a reply when using the InOut Exchange
* Pattern (in milliseconds).
*
* The option is a: <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestTimeout(
long requestTimeout) {
setProperty("requestTimeout", requestTimeout);
return this;
}
/**
* Set timeout for waiting for a reply when using the InOut Exchange
* Pattern (in milliseconds).
*
* The option will be converted to a <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestTimeout(
String requestTimeout) {
setProperty("requestTimeout", requestTimeout);
return this;
}
/**
* Set requestTimeoutCheckerInterval for inOut exchange.
*
* The option is a: <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestTimeoutCheckerInterval(
long requestTimeoutCheckerInterval) {
setProperty("requestTimeoutCheckerInterval", requestTimeoutCheckerInterval);
return this;
}
/**
* Set requestTimeoutCheckerInterval for inOut exchange.
*
* The option will be converted to a <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestTimeoutCheckerInterval(
String requestTimeoutCheckerInterval) {
setProperty("requestTimeoutCheckerInterval", requestTimeoutCheckerInterval);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder synchronous(
boolean synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder synchronous(
String synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Enables connection topology recovery (should topology recovery be
* performed).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder topologyRecoveryEnabled(
Boolean topologyRecoveryEnabled) {
setProperty("topologyRecoveryEnabled", topologyRecoveryEnabled);
return this;
}
/**
* Enables connection topology recovery (should topology recovery be
* performed).
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder topologyRecoveryEnabled(
String topologyRecoveryEnabled) {
setProperty("topologyRecoveryEnabled", topologyRecoveryEnabled);
return this;
}
/**
* When true and an inOut Exchange failed on the consumer side send the
* caused Exception back in the response.
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder transferException(
boolean transferException) {
setProperty("transferException", transferException);
return this;
}
/**
* When true and an inOut Exchange failed on the consumer side send the
* caused Exception back in the response.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder transferException(
String transferException) {
setProperty("transferException", transferException);
return this;
}
}
/**
* Builder for endpoint producers for the RabbitMQ component.
*/
public interface RabbitMQEndpointProducerBuilder
extends
EndpointProducerBuilder {
default AdvancedRabbitMQEndpointProducerBuilder advanced() {
return (AdvancedRabbitMQEndpointProducerBuilder) this;
}
/**
* If this option is set, camel-rabbitmq will try to create connection
* based on the setting of option addresses. The addresses value is a
* string which looks like server1:12345, server2:12345.
*
* The option is a: <code>com.rabbitmq.client.Address[]</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder addresses(Object[] addresses) {
setProperty("addresses", addresses);
return this;
}
/**
* If this option is set, camel-rabbitmq will try to create connection
* based on the setting of option addresses. The addresses value is a
* string which looks like server1:12345, server2:12345.
*
* The option will be converted to a
* <code>com.rabbitmq.client.Address[]</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder addresses(String addresses) {
setProperty("addresses", addresses);
return this;
}
/**
* If it is true, the exchange will be deleted when it is no longer in
* use.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder autoDelete(boolean autoDelete) {
setProperty("autoDelete", autoDelete);
return this;
}
/**
* If it is true, the exchange will be deleted when it is no longer in
* use.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder autoDelete(String autoDelete) {
setProperty("autoDelete", autoDelete);
return this;
}
/**
* To use a custom RabbitMQ connection factory. When this option is set,
* all connection options (connectionTimeout, requestedChannelMax...)
* set on URI are not used.
*
* The option is a: <code>com.rabbitmq.client.ConnectionFactory</code>
* type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder connectionFactory(
Object connectionFactory) {
setProperty("connectionFactory", connectionFactory);
return this;
}
/**
* To use a custom RabbitMQ connection factory. When this option is set,
* all connection options (connectionTimeout, requestedChannelMax...)
* set on URI are not used.
*
* The option will be converted to a
* <code>com.rabbitmq.client.ConnectionFactory</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder connectionFactory(
String connectionFactory) {
setProperty("connectionFactory", connectionFactory);
return this;
}
/**
* The name of the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder deadLetterExchange(
String deadLetterExchange) {
setProperty("deadLetterExchange", deadLetterExchange);
return this;
}
/**
* The type of the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder deadLetterExchangeType(
String deadLetterExchangeType) {
setProperty("deadLetterExchangeType", deadLetterExchangeType);
return this;
}
/**
* The name of the dead letter queue.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder deadLetterQueue(
String deadLetterQueue) {
setProperty("deadLetterQueue", deadLetterQueue);
return this;
}
/**
* The routing key for the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder deadLetterRoutingKey(
String deadLetterRoutingKey) {
setProperty("deadLetterRoutingKey", deadLetterRoutingKey);
return this;
}
/**
* If the option is true, camel declare the exchange and queue name and
* bind them together. If the option is false, camel won't declare the
* exchange and queue name on the server.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder declare(boolean declare) {
setProperty("declare", declare);
return this;
}
/**
* If the option is true, camel declare the exchange and queue name and
* bind them together. If the option is false, camel won't declare the
* exchange and queue name on the server.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder declare(String declare) {
setProperty("declare", declare);
return this;
}
/**
* If we are declaring a durable exchange (the exchange will survive a
* server restart).
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder durable(boolean durable) {
setProperty("durable", durable);
return this;
}
/**
* If we are declaring a durable exchange (the exchange will survive a
* server restart).
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder durable(String durable) {
setProperty("durable", durable);
return this;
}
/**
* The exchange type such as direct or topic.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder exchangeType(String exchangeType) {
setProperty("exchangeType", exchangeType);
return this;
}
/**
* Exclusive queues may only be accessed by the current connection, and
* are deleted when that connection closes.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder exclusive(boolean exclusive) {
setProperty("exclusive", exclusive);
return this;
}
/**
* Exclusive queues may only be accessed by the current connection, and
* are deleted when that connection closes.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder exclusive(String exclusive) {
setProperty("exclusive", exclusive);
return this;
}
/**
* The hostname of the running rabbitmq instance or cluster.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder hostname(String hostname) {
setProperty("hostname", hostname);
return this;
}
/**
* Passive queues depend on the queue already to be available at
* RabbitMQ.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder passive(boolean passive) {
setProperty("passive", passive);
return this;
}
/**
* Passive queues depend on the queue already to be available at
* RabbitMQ.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder passive(String passive) {
setProperty("passive", passive);
return this;
}
/**
* Port number for the host with the running rabbitmq instance or
* cluster. Default value is 5672.
*
* The option is a: <code>int</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder portNumber(int portNumber) {
setProperty("portNumber", portNumber);
return this;
}
/**
* Port number for the host with the running rabbitmq instance or
* cluster. Default value is 5672.
*
* The option will be converted to a <code>int</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder portNumber(String portNumber) {
setProperty("portNumber", portNumber);
return this;
}
/**
* The queue to receive messages from.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder queue(String queue) {
setProperty("queue", queue);
return this;
}
/**
* The routing key to use when binding a consumer queue to the exchange.
* For producer routing keys, you set the header rabbitmq.ROUTING_KEY.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder routingKey(String routingKey) {
setProperty("routingKey", routingKey);
return this;
}
/**
* This can be used if we need to declare the queue but not the
* exchange.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder skipExchangeDeclare(
boolean skipExchangeDeclare) {
setProperty("skipExchangeDeclare", skipExchangeDeclare);
return this;
}
/**
* This can be used if we need to declare the queue but not the
* exchange.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder skipExchangeDeclare(
String skipExchangeDeclare) {
setProperty("skipExchangeDeclare", skipExchangeDeclare);
return this;
}
/**
* If true the queue will not be bound to the exchange after declaring
* it.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder skipQueueBind(
boolean skipQueueBind) {
setProperty("skipQueueBind", skipQueueBind);
return this;
}
/**
* If true the queue will not be bound to the exchange after declaring
* it.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder skipQueueBind(
String skipQueueBind) {
setProperty("skipQueueBind", skipQueueBind);
return this;
}
/**
* If true the producer will not declare and bind a queue. This can be
* used for directing messages via an existing routing key.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder skipQueueDeclare(
boolean skipQueueDeclare) {
setProperty("skipQueueDeclare", skipQueueDeclare);
return this;
}
/**
* If true the producer will not declare and bind a queue. This can be
* used for directing messages via an existing routing key.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder skipQueueDeclare(
String skipQueueDeclare) {
setProperty("skipQueueDeclare", skipQueueDeclare);
return this;
}
/**
* The vhost for the channel.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder vhost(String vhost) {
setProperty("vhost", vhost);
return this;
}
/**
* Allow pass null values to header.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder allowNullHeaders(
boolean allowNullHeaders) {
setProperty("allowNullHeaders", allowNullHeaders);
return this;
}
/**
* Allow pass null values to header.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder allowNullHeaders(
String allowNullHeaders) {
setProperty("allowNullHeaders", allowNullHeaders);
return this;
}
/**
* If the bridgeEndpoint is true, the producer will ignore the message
* header of rabbitmq.EXCHANGE_NAME and rabbitmq.ROUTING_KEY.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder bridgeEndpoint(
boolean bridgeEndpoint) {
setProperty("bridgeEndpoint", bridgeEndpoint);
return this;
}
/**
* If the bridgeEndpoint is true, the producer will ignore the message
* header of rabbitmq.EXCHANGE_NAME and rabbitmq.ROUTING_KEY.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder bridgeEndpoint(
String bridgeEndpoint) {
setProperty("bridgeEndpoint", bridgeEndpoint);
return this;
}
/**
* Get maximum number of opened channel in pool.
*
* The option is a: <code>int</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder channelPoolMaxSize(
int channelPoolMaxSize) {
setProperty("channelPoolMaxSize", channelPoolMaxSize);
return this;
}
/**
* Get maximum number of opened channel in pool.
*
* The option will be converted to a <code>int</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder channelPoolMaxSize(
String channelPoolMaxSize) {
setProperty("channelPoolMaxSize", channelPoolMaxSize);
return this;
}
/**
* Set the maximum number of milliseconds to wait for a channel from the
* pool.
*
* The option is a: <code>long</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder channelPoolMaxWait(
long channelPoolMaxWait) {
setProperty("channelPoolMaxWait", channelPoolMaxWait);
return this;
}
/**
* Set the maximum number of milliseconds to wait for a channel from the
* pool.
*
* The option will be converted to a <code>long</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder channelPoolMaxWait(
String channelPoolMaxWait) {
setProperty("channelPoolMaxWait", channelPoolMaxWait);
return this;
}
/**
* When true, an exception will be thrown when the message cannot be
* delivered (basic.return) and the message is marked as mandatory.
* PublisherAcknowledgement will also be activated in this case. See
* also publisher acknowledgements - When will messages be confirmed.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder guaranteedDeliveries(
boolean guaranteedDeliveries) {
setProperty("guaranteedDeliveries", guaranteedDeliveries);
return this;
}
/**
* When true, an exception will be thrown when the message cannot be
* delivered (basic.return) and the message is marked as mandatory.
* PublisherAcknowledgement will also be activated in this case. See
* also publisher acknowledgements - When will messages be confirmed.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder guaranteedDeliveries(
String guaranteedDeliveries) {
setProperty("guaranteedDeliveries", guaranteedDeliveries);
return this;
}
/**
* This flag tells the server how to react if the message cannot be
* routed to a queue consumer immediately. If this flag is set, the
* server will return an undeliverable message with a Return method. If
* this flag is zero, the server will queue the message, but with no
* guarantee that it will ever be consumed. If the header is present
* rabbitmq.IMMEDIATE it will override this option.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder immediate(boolean immediate) {
setProperty("immediate", immediate);
return this;
}
/**
* This flag tells the server how to react if the message cannot be
* routed to a queue consumer immediately. If this flag is set, the
* server will return an undeliverable message with a Return method. If
* this flag is zero, the server will queue the message, but with no
* guarantee that it will ever be consumed. If the header is present
* rabbitmq.IMMEDIATE it will override this option.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder immediate(String immediate) {
setProperty("immediate", immediate);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder lazyStartProducer(
boolean lazyStartProducer) {
setProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder lazyStartProducer(
String lazyStartProducer) {
setProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* This flag tells the server how to react if the message cannot be
* routed to a queue. If this flag is set, the server will return an
* unroutable message with a Return method. If this flag is zero, the
* server silently drops the message. If the header is present
* rabbitmq.MANDATORY it will override this option.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder mandatory(boolean mandatory) {
setProperty("mandatory", mandatory);
return this;
}
/**
* This flag tells the server how to react if the message cannot be
* routed to a queue. If this flag is set, the server will return an
* unroutable message with a Return method. If this flag is zero, the
* server silently drops the message. If the header is present
* rabbitmq.MANDATORY it will override this option.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder mandatory(String mandatory) {
setProperty("mandatory", mandatory);
return this;
}
/**
* When true, the message will be published with publisher
* acknowledgements turned on.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder publisherAcknowledgements(
boolean publisherAcknowledgements) {
setProperty("publisherAcknowledgements", publisherAcknowledgements);
return this;
}
/**
* When true, the message will be published with publisher
* acknowledgements turned on.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder publisherAcknowledgements(
String publisherAcknowledgements) {
setProperty("publisherAcknowledgements", publisherAcknowledgements);
return this;
}
/**
* The amount of time in milliseconds to wait for a basic.ack response
* from RabbitMQ server.
*
* The option is a: <code>long</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder publisherAcknowledgementsTimeout(
long publisherAcknowledgementsTimeout) {
setProperty("publisherAcknowledgementsTimeout", publisherAcknowledgementsTimeout);
return this;
}
/**
* The amount of time in milliseconds to wait for a basic.ack response
* from RabbitMQ server.
*
* The option will be converted to a <code>long</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder publisherAcknowledgementsTimeout(
String publisherAcknowledgementsTimeout) {
setProperty("publisherAcknowledgementsTimeout", publisherAcknowledgementsTimeout);
return this;
}
/**
* Password for authenticated access.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointProducerBuilder password(String password) {
setProperty("password", password);
return this;
}
/**
* Enables SSL on connection, accepted value are true, TLS and 'SSLv3.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointProducerBuilder sslProtocol(String sslProtocol) {
setProperty("sslProtocol", sslProtocol);
return this;
}
/**
* Configure SSL trust manager, SSL should be enabled for this option to
* be effective.
*
* The option is a: <code>javax.net.ssl.TrustManager</code> type.
*
* Group: security
*/
default RabbitMQEndpointProducerBuilder trustManager(Object trustManager) {
setProperty("trustManager", trustManager);
return this;
}
/**
* Configure SSL trust manager, SSL should be enabled for this option to
* be effective.
*
* The option will be converted to a
* <code>javax.net.ssl.TrustManager</code> type.
*
* Group: security
*/
default RabbitMQEndpointProducerBuilder trustManager(String trustManager) {
setProperty("trustManager", trustManager);
return this;
}
/**
* Username in case of authenticated access.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointProducerBuilder username(String username) {
setProperty("username", username);
return this;
}
}
/**
* Advanced builder for endpoint producers for the RabbitMQ component.
*/
public interface AdvancedRabbitMQEndpointProducerBuilder
extends
EndpointProducerBuilder {
default RabbitMQEndpointProducerBuilder basic() {
return (RabbitMQEndpointProducerBuilder) this;
}
/**
* Specify arguments for configuring the different RabbitMQ concepts, a
* different prefix is required for each: Exchange: arg.exchange. Queue:
* arg.queue. Binding: arg.binding. For example to declare a queue with
* message ttl argument:
* http://localhost:5672/exchange/queueargs=arg.queue.x-message-ttl=60000.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder args(
Map<String, Object> args) {
setProperty("args", args);
return this;
}
/**
* Specify arguments for configuring the different RabbitMQ concepts, a
* different prefix is required for each: Exchange: arg.exchange. Queue:
* arg.queue. Binding: arg.binding. For example to declare a queue with
* message ttl argument:
* http://localhost:5672/exchange/queueargs=arg.queue.x-message-ttl=60000.
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder args(String args) {
setProperty("args", args);
return this;
}
/**
* Enables connection automatic recovery (uses connection implementation
* that performs automatic recovery when connection shutdown is not
* initiated by the application).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder automaticRecoveryEnabled(
Boolean automaticRecoveryEnabled) {
setProperty("automaticRecoveryEnabled", automaticRecoveryEnabled);
return this;
}
/**
* Enables connection automatic recovery (uses connection implementation
* that performs automatic recovery when connection shutdown is not
* initiated by the application).
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder automaticRecoveryEnabled(
String automaticRecoveryEnabled) {
setProperty("automaticRecoveryEnabled", automaticRecoveryEnabled);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder basicPropertyBinding(
boolean basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder basicPropertyBinding(
String basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Connection client properties (client info used in negotiating with
* the server).
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder clientProperties(
Map<String, Object> clientProperties) {
setProperty("clientProperties", clientProperties);
return this;
}
/**
* Connection client properties (client info used in negotiating with
* the server).
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder clientProperties(
String clientProperties) {
setProperty("clientProperties", clientProperties);
return this;
}
/**
* Connection timeout.
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder connectionTimeout(
int connectionTimeout) {
setProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Connection timeout.
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder connectionTimeout(
String connectionTimeout) {
setProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Network recovery interval in milliseconds (interval used when
* recovering from network failure).
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder networkRecoveryInterval(
Integer networkRecoveryInterval) {
setProperty("networkRecoveryInterval", networkRecoveryInterval);
return this;
}
/**
* Network recovery interval in milliseconds (interval used when
* recovering from network failure).
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder networkRecoveryInterval(
String networkRecoveryInterval) {
setProperty("networkRecoveryInterval", networkRecoveryInterval);
return this;
}
/**
* Connection requested channel max (max number of channels offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestedChannelMax(
int requestedChannelMax) {
setProperty("requestedChannelMax", requestedChannelMax);
return this;
}
/**
* Connection requested channel max (max number of channels offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestedChannelMax(
String requestedChannelMax) {
setProperty("requestedChannelMax", requestedChannelMax);
return this;
}
/**
* Connection requested frame max (max size of frame offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestedFrameMax(
int requestedFrameMax) {
setProperty("requestedFrameMax", requestedFrameMax);
return this;
}
/**
* Connection requested frame max (max size of frame offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestedFrameMax(
String requestedFrameMax) {
setProperty("requestedFrameMax", requestedFrameMax);
return this;
}
/**
* Connection requested heartbeat (heart-beat in seconds offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestedHeartbeat(
int requestedHeartbeat) {
setProperty("requestedHeartbeat", requestedHeartbeat);
return this;
}
/**
* Connection requested heartbeat (heart-beat in seconds offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestedHeartbeat(
String requestedHeartbeat) {
setProperty("requestedHeartbeat", requestedHeartbeat);
return this;
}
/**
* Set timeout for waiting for a reply when using the InOut Exchange
* Pattern (in milliseconds).
*
* The option is a: <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestTimeout(
long requestTimeout) {
setProperty("requestTimeout", requestTimeout);
return this;
}
/**
* Set timeout for waiting for a reply when using the InOut Exchange
* Pattern (in milliseconds).
*
* The option will be converted to a <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestTimeout(
String requestTimeout) {
setProperty("requestTimeout", requestTimeout);
return this;
}
/**
* Set requestTimeoutCheckerInterval for inOut exchange.
*
* The option is a: <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestTimeoutCheckerInterval(
long requestTimeoutCheckerInterval) {
setProperty("requestTimeoutCheckerInterval", requestTimeoutCheckerInterval);
return this;
}
/**
* Set requestTimeoutCheckerInterval for inOut exchange.
*
* The option will be converted to a <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestTimeoutCheckerInterval(
String requestTimeoutCheckerInterval) {
setProperty("requestTimeoutCheckerInterval", requestTimeoutCheckerInterval);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder synchronous(
boolean synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder synchronous(
String synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Enables connection topology recovery (should topology recovery be
* performed).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder topologyRecoveryEnabled(
Boolean topologyRecoveryEnabled) {
setProperty("topologyRecoveryEnabled", topologyRecoveryEnabled);
return this;
}
/**
* Enables connection topology recovery (should topology recovery be
* performed).
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder topologyRecoveryEnabled(
String topologyRecoveryEnabled) {
setProperty("topologyRecoveryEnabled", topologyRecoveryEnabled);
return this;
}
/**
* When true and an inOut Exchange failed on the consumer side send the
* caused Exception back in the response.
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder transferException(
boolean transferException) {
setProperty("transferException", transferException);
return this;
}
/**
* When true and an inOut Exchange failed on the consumer side send the
* caused Exception back in the response.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder transferException(
String transferException) {
setProperty("transferException", transferException);
return this;
}
}
/**
* Builder for endpoint for the RabbitMQ component.
*/
public interface RabbitMQEndpointBuilder
extends
RabbitMQEndpointConsumerBuilder, RabbitMQEndpointProducerBuilder {
default AdvancedRabbitMQEndpointBuilder advanced() {
return (AdvancedRabbitMQEndpointBuilder) this;
}
/**
* If this option is set, camel-rabbitmq will try to create connection
* based on the setting of option addresses. The addresses value is a
* string which looks like server1:12345, server2:12345.
*
* The option is a: <code>com.rabbitmq.client.Address[]</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder addresses(Object[] addresses) {
setProperty("addresses", addresses);
return this;
}
/**
* If this option is set, camel-rabbitmq will try to create connection
* based on the setting of option addresses. The addresses value is a
* string which looks like server1:12345, server2:12345.
*
* The option will be converted to a
* <code>com.rabbitmq.client.Address[]</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder addresses(String addresses) {
setProperty("addresses", addresses);
return this;
}
/**
* If it is true, the exchange will be deleted when it is no longer in
* use.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder autoDelete(boolean autoDelete) {
setProperty("autoDelete", autoDelete);
return this;
}
/**
* If it is true, the exchange will be deleted when it is no longer in
* use.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder autoDelete(String autoDelete) {
setProperty("autoDelete", autoDelete);
return this;
}
/**
* To use a custom RabbitMQ connection factory. When this option is set,
* all connection options (connectionTimeout, requestedChannelMax...)
* set on URI are not used.
*
* The option is a: <code>com.rabbitmq.client.ConnectionFactory</code>
* type.
*
* Group: common
*/
default RabbitMQEndpointBuilder connectionFactory(
Object connectionFactory) {
setProperty("connectionFactory", connectionFactory);
return this;
}
/**
* To use a custom RabbitMQ connection factory. When this option is set,
* all connection options (connectionTimeout, requestedChannelMax...)
* set on URI are not used.
*
* The option will be converted to a
* <code>com.rabbitmq.client.ConnectionFactory</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder connectionFactory(
String connectionFactory) {
setProperty("connectionFactory", connectionFactory);
return this;
}
/**
* The name of the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder deadLetterExchange(
String deadLetterExchange) {
setProperty("deadLetterExchange", deadLetterExchange);
return this;
}
/**
* The type of the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder deadLetterExchangeType(
String deadLetterExchangeType) {
setProperty("deadLetterExchangeType", deadLetterExchangeType);
return this;
}
/**
* The name of the dead letter queue.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder deadLetterQueue(String deadLetterQueue) {
setProperty("deadLetterQueue", deadLetterQueue);
return this;
}
/**
* The routing key for the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder deadLetterRoutingKey(
String deadLetterRoutingKey) {
setProperty("deadLetterRoutingKey", deadLetterRoutingKey);
return this;
}
/**
* If the option is true, camel declare the exchange and queue name and
* bind them together. If the option is false, camel won't declare the
* exchange and queue name on the server.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder declare(boolean declare) {
setProperty("declare", declare);
return this;
}
/**
* If the option is true, camel declare the exchange and queue name and
* bind them together. If the option is false, camel won't declare the
* exchange and queue name on the server.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder declare(String declare) {
setProperty("declare", declare);
return this;
}
/**
* If we are declaring a durable exchange (the exchange will survive a
* server restart).
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder durable(boolean durable) {
setProperty("durable", durable);
return this;
}
/**
* If we are declaring a durable exchange (the exchange will survive a
* server restart).
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder durable(String durable) {
setProperty("durable", durable);
return this;
}
/**
* The exchange type such as direct or topic.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder exchangeType(String exchangeType) {
setProperty("exchangeType", exchangeType);
return this;
}
/**
* Exclusive queues may only be accessed by the current connection, and
* are deleted when that connection closes.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder exclusive(boolean exclusive) {
setProperty("exclusive", exclusive);
return this;
}
/**
* Exclusive queues may only be accessed by the current connection, and
* are deleted when that connection closes.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder exclusive(String exclusive) {
setProperty("exclusive", exclusive);
return this;
}
/**
* The hostname of the running rabbitmq instance or cluster.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder hostname(String hostname) {
setProperty("hostname", hostname);
return this;
}
/**
* Passive queues depend on the queue already to be available at
* RabbitMQ.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder passive(boolean passive) {
setProperty("passive", passive);
return this;
}
/**
* Passive queues depend on the queue already to be available at
* RabbitMQ.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder passive(String passive) {
setProperty("passive", passive);
return this;
}
/**
* Port number for the host with the running rabbitmq instance or
* cluster. Default value is 5672.
*
* The option is a: <code>int</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder portNumber(int portNumber) {
setProperty("portNumber", portNumber);
return this;
}
/**
* Port number for the host with the running rabbitmq instance or
* cluster. Default value is 5672.
*
* The option will be converted to a <code>int</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder portNumber(String portNumber) {
setProperty("portNumber", portNumber);
return this;
}
/**
* The queue to receive messages from.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder queue(String queue) {
setProperty("queue", queue);
return this;
}
/**
* The routing key to use when binding a consumer queue to the exchange.
* For producer routing keys, you set the header rabbitmq.ROUTING_KEY.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder routingKey(String routingKey) {
setProperty("routingKey", routingKey);
return this;
}
/**
* This can be used if we need to declare the queue but not the
* exchange.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder skipExchangeDeclare(
boolean skipExchangeDeclare) {
setProperty("skipExchangeDeclare", skipExchangeDeclare);
return this;
}
/**
* This can be used if we need to declare the queue but not the
* exchange.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder skipExchangeDeclare(
String skipExchangeDeclare) {
setProperty("skipExchangeDeclare", skipExchangeDeclare);
return this;
}
/**
* If true the queue will not be bound to the exchange after declaring
* it.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder skipQueueBind(boolean skipQueueBind) {
setProperty("skipQueueBind", skipQueueBind);
return this;
}
/**
* If true the queue will not be bound to the exchange after declaring
* it.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder skipQueueBind(String skipQueueBind) {
setProperty("skipQueueBind", skipQueueBind);
return this;
}
/**
* If true the producer will not declare and bind a queue. This can be
* used for directing messages via an existing routing key.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder skipQueueDeclare(
boolean skipQueueDeclare) {
setProperty("skipQueueDeclare", skipQueueDeclare);
return this;
}
/**
* If true the producer will not declare and bind a queue. This can be
* used for directing messages via an existing routing key.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder skipQueueDeclare(String skipQueueDeclare) {
setProperty("skipQueueDeclare", skipQueueDeclare);
return this;
}
/**
* The vhost for the channel.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder vhost(String vhost) {
setProperty("vhost", vhost);
return this;
}
/**
* Password for authenticated access.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointBuilder password(String password) {
setProperty("password", password);
return this;
}
/**
* Enables SSL on connection, accepted value are true, TLS and 'SSLv3.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointBuilder sslProtocol(String sslProtocol) {
setProperty("sslProtocol", sslProtocol);
return this;
}
/**
* Configure SSL trust manager, SSL should be enabled for this option to
* be effective.
*
* The option is a: <code>javax.net.ssl.TrustManager</code> type.
*
* Group: security
*/
default RabbitMQEndpointBuilder trustManager(Object trustManager) {
setProperty("trustManager", trustManager);
return this;
}
/**
* Configure SSL trust manager, SSL should be enabled for this option to
* be effective.
*
* The option will be converted to a
* <code>javax.net.ssl.TrustManager</code> type.
*
* Group: security
*/
default RabbitMQEndpointBuilder trustManager(String trustManager) {
setProperty("trustManager", trustManager);
return this;
}
/**
* Username in case of authenticated access.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointBuilder username(String username) {
setProperty("username", username);
return this;
}
}
/**
* Advanced builder for endpoint for the RabbitMQ component.
*/
public interface AdvancedRabbitMQEndpointBuilder
extends
AdvancedRabbitMQEndpointConsumerBuilder, AdvancedRabbitMQEndpointProducerBuilder {
default RabbitMQEndpointBuilder basic() {
return (RabbitMQEndpointBuilder) this;
}
/**
* Specify arguments for configuring the different RabbitMQ concepts, a
* different prefix is required for each: Exchange: arg.exchange. Queue:
* arg.queue. Binding: arg.binding. For example to declare a queue with
* message ttl argument:
* http://localhost:5672/exchange/queueargs=arg.queue.x-message-ttl=60000.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder args(Map<String, Object> args) {
setProperty("args", args);
return this;
}
/**
* Specify arguments for configuring the different RabbitMQ concepts, a
* different prefix is required for each: Exchange: arg.exchange. Queue:
* arg.queue. Binding: arg.binding. For example to declare a queue with
* message ttl argument:
* http://localhost:5672/exchange/queueargs=arg.queue.x-message-ttl=60000.
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder args(String args) {
setProperty("args", args);
return this;
}
/**
* Enables connection automatic recovery (uses connection implementation
* that performs automatic recovery when connection shutdown is not
* initiated by the application).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder automaticRecoveryEnabled(
Boolean automaticRecoveryEnabled) {
setProperty("automaticRecoveryEnabled", automaticRecoveryEnabled);
return this;
}
/**
* Enables connection automatic recovery (uses connection implementation
* that performs automatic recovery when connection shutdown is not
* initiated by the application).
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder automaticRecoveryEnabled(
String automaticRecoveryEnabled) {
setProperty("automaticRecoveryEnabled", automaticRecoveryEnabled);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder basicPropertyBinding(
boolean basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder basicPropertyBinding(
String basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Connection client properties (client info used in negotiating with
* the server).
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder clientProperties(
Map<String, Object> clientProperties) {
setProperty("clientProperties", clientProperties);
return this;
}
/**
* Connection client properties (client info used in negotiating with
* the server).
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder clientProperties(
String clientProperties) {
setProperty("clientProperties", clientProperties);
return this;
}
/**
* Connection timeout.
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder connectionTimeout(
int connectionTimeout) {
setProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Connection timeout.
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder connectionTimeout(
String connectionTimeout) {
setProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Network recovery interval in milliseconds (interval used when
* recovering from network failure).
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder networkRecoveryInterval(
Integer networkRecoveryInterval) {
setProperty("networkRecoveryInterval", networkRecoveryInterval);
return this;
}
/**
* Network recovery interval in milliseconds (interval used when
* recovering from network failure).
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder networkRecoveryInterval(
String networkRecoveryInterval) {
setProperty("networkRecoveryInterval", networkRecoveryInterval);
return this;
}
/**
* Connection requested channel max (max number of channels offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestedChannelMax(
int requestedChannelMax) {
setProperty("requestedChannelMax", requestedChannelMax);
return this;
}
/**
* Connection requested channel max (max number of channels offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestedChannelMax(
String requestedChannelMax) {
setProperty("requestedChannelMax", requestedChannelMax);
return this;
}
/**
* Connection requested frame max (max size of frame offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestedFrameMax(
int requestedFrameMax) {
setProperty("requestedFrameMax", requestedFrameMax);
return this;
}
/**
* Connection requested frame max (max size of frame offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestedFrameMax(
String requestedFrameMax) {
setProperty("requestedFrameMax", requestedFrameMax);
return this;
}
/**
* Connection requested heartbeat (heart-beat in seconds offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestedHeartbeat(
int requestedHeartbeat) {
setProperty("requestedHeartbeat", requestedHeartbeat);
return this;
}
/**
* Connection requested heartbeat (heart-beat in seconds offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestedHeartbeat(
String requestedHeartbeat) {
setProperty("requestedHeartbeat", requestedHeartbeat);
return this;
}
/**
* Set timeout for waiting for a reply when using the InOut Exchange
* Pattern (in milliseconds).
*
* The option is a: <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestTimeout(
long requestTimeout) {
setProperty("requestTimeout", requestTimeout);
return this;
}
/**
* Set timeout for waiting for a reply when using the InOut Exchange
* Pattern (in milliseconds).
*
* The option will be converted to a <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestTimeout(
String requestTimeout) {
setProperty("requestTimeout", requestTimeout);
return this;
}
/**
* Set requestTimeoutCheckerInterval for inOut exchange.
*
* The option is a: <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestTimeoutCheckerInterval(
long requestTimeoutCheckerInterval) {
setProperty("requestTimeoutCheckerInterval", requestTimeoutCheckerInterval);
return this;
}
/**
* Set requestTimeoutCheckerInterval for inOut exchange.
*
* The option will be converted to a <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestTimeoutCheckerInterval(
String requestTimeoutCheckerInterval) {
setProperty("requestTimeoutCheckerInterval", requestTimeoutCheckerInterval);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder synchronous(boolean synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder synchronous(String synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Enables connection topology recovery (should topology recovery be
* performed).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder topologyRecoveryEnabled(
Boolean topologyRecoveryEnabled) {
setProperty("topologyRecoveryEnabled", topologyRecoveryEnabled);
return this;
}
/**
* Enables connection topology recovery (should topology recovery be
* performed).
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder topologyRecoveryEnabled(
String topologyRecoveryEnabled) {
setProperty("topologyRecoveryEnabled", topologyRecoveryEnabled);
return this;
}
/**
* When true and an inOut Exchange failed on the consumer side send the
* caused Exception back in the response.
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder transferException(
boolean transferException) {
setProperty("transferException", transferException);
return this;
}
/**
* When true and an inOut Exchange failed on the consumer side send the
* caused Exception back in the response.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder transferException(
String transferException) {
setProperty("transferException", transferException);
return this;
}
}
/**
* RabbitMQ (camel-rabbitmq)
* The rabbitmq component allows you produce and consume messages from
* RabbitMQ instances.
*
* Category: messaging
* Available as of version: 2.12
* Maven coordinates: org.apache.camel:camel-rabbitmq
*
* Syntax: <code>rabbitmq:exchangeName</code>
*
* Path parameter: exchangeName (required)
* The exchange name determines which exchange produced messages will sent
* to. In the case of consumers, the exchange name determines which exchange
* the queue will bind to.
*/
default RabbitMQEndpointBuilder rabbitMQ(String path) {
class RabbitMQEndpointBuilderImpl extends AbstractEndpointBuilder implements RabbitMQEndpointBuilder, AdvancedRabbitMQEndpointBuilder {
public RabbitMQEndpointBuilderImpl(String path) {
super("rabbitmq", path);
}
}
return new RabbitMQEndpointBuilderImpl(path);
}
} | Fabryprog/camel | core/camel-endpointdsl/src/main/java/org/apache/camel/builder/endpoint/dsl/RabbitMQEndpointBuilderFactory.java | Java | apache-2.0 | 115,871 |
/*
* Copyright (c) 2014-2015 University of Ulm
*
* See the NOTICE file distributed with this work for additional information
* regarding copyright ownership. Licensed under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package components.execution;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import java.util.Set;
import static com.google.common.base.Preconditions.checkNotNull;
/**
* Created by daniel on 24.07.15.
*/
@Singleton public class Init {
@Inject public Init(ExecutionService executionService, Set<Runnable> runnables,
Set<Schedulable> schedulables) {
checkNotNull(executionService);
checkNotNull(runnables);
checkNotNull(schedulables);
for (Runnable runnable : runnables) {
executionService.execute(runnable);
}
for (Schedulable schedulable : schedulables) {
executionService.schedule(schedulable);
}
}
}
| cha87de/colosseum | app/components/execution/Init.java | Java | apache-2.0 | 1,451 |
/**
* copyright
* Inubit AG
* Schoeneberger Ufer 89
* 10785 Berlin
* Germany
*/
package net.frapu.code.visualization.twf;
import java.awt.Color;
import java.awt.Graphics;
import java.awt.Graphics2D;
import java.awt.Point;
import java.awt.Shape;
import java.awt.geom.Rectangle2D;
import java.util.HashSet;
import java.util.Set;
import net.frapu.code.visualization.ProcessModel;
import net.frapu.code.visualization.ProcessNode;
import net.frapu.code.visualization.ProcessUtils;
/**
* @author ff
*
*/
public class ToolErrorConnector extends ProcessNode {
private static final int DIST_X = 4;
private static final int DIST_Y = 3;
private static final int BUTTON_WIDTH = 25;
public static final int AREA_HEIGHT = 20;
public static String PROP_PARENT_ID = "#ParentToolID";
private Tool f_parent;
private String PROP_NUMBER = "#ConnectorNumber";
/**
* for serialization
*/
public ToolErrorConnector() {
f_parent = null;
setNumber(0);
}
public Tool getParent() {
return f_parent;
}
/**
* @param tool
*/
public ToolErrorConnector(Tool tool, int number) {
f_parent = tool;
setProperty(PROP_PARENT_ID, f_parent.getId());
setNumber(number);
}
@Override
public void addContext(ProcessModel context) {
super.addContext(context);
if(f_parent == null) {
f_parent = (Tool) context.getNodeById(getProperty(PROP_PARENT_ID));
if(f_parent != null)//can happen with legacy models
f_parent.setErrorConnector(this,getNumber());
}
}
@Override
protected Shape getOutlineShape() {
Rectangle2D outline = new Rectangle2D.Float(getPos().x - (getSize().width / 2),
getPos().y - (getSize().height / 2), getSize().width, getSize().height);
return outline;
}
@Override
protected void paintInternal(Graphics g) {
updatePosAndSize();
Graphics2D g2 = (Graphics2D) g;
g2.setStroke(ProcessUtils.defaultStroke);
g2.setColor(Color.WHITE);
g2.fillRect(getPos().x-getSize().width/2, getPos().y-getSize().height/2, getSize().width, getSize().height);
g2.setColor(Color.BLACK);
g2.drawRect(getPos().x-getSize().width/2, getPos().y-getSize().height/2, getSize().width, getSize().height);
}
/**
* @param left
*/
public void setNumber(int number) {
setProperty(PROP_NUMBER , ""+number);
}
public int getNumber() {
try {
return Integer.parseInt(getProperty(PROP_NUMBER));
} catch (NumberFormatException e) {
e.printStackTrace();
return 0;
}
}
/**
*
*/
private void updatePosAndSize() {
if(f_parent != null) {
Point _tlPos = new Point(f_parent.getPos().x-f_parent.getSize().width/2,
f_parent.getPos().y+f_parent.getSize().height/2-AREA_HEIGHT);
_tlPos.x += ((getNumber()+0.5)*BUTTON_WIDTH) + (getNumber()+1)*DIST_X;
_tlPos.y += AREA_HEIGHT/2;
setPos(_tlPos);
setSize(BUTTON_WIDTH, AREA_HEIGHT-2*DIST_Y);
}
}
@Override
public Set<Point> getDefaultConnectionPoints() {
HashSet<Point> cp = new HashSet<Point>();
cp.add(new Point(0, (getSize().height/2)));
return cp;
}
}
| bptlab/processeditor | src/net/frapu/code/visualization/twf/ToolErrorConnector.java | Java | apache-2.0 | 3,182 |
/a/lib/tsc.js --w
//// [/user/username/projects/myproject/a.ts]
export interface Point {
name: string;
c: Coords;
}
export interface Coords {
x2: number;
y: number;
}
//// [/user/username/projects/myproject/b.ts]
import { Point } from "./a";
export interface PointWrapper extends Point {
}
//// [/user/username/projects/myproject/c.ts]
import { PointWrapper } from "./b";
export function getPoint(): PointWrapper {
return {
name: "test",
c: {
x: 1,
y: 2
}
}
};
//// [/user/username/projects/myproject/d.ts]
import { getPoint } from "./c";
getPoint().c.x;
//// [/user/username/projects/myproject/e.ts]
import "./d";
//// [/user/username/projects/myproject/tsconfig.json]
{}
//// [/a/lib/lib.d.ts]
/// <reference no-default-lib="true"/>
interface Boolean {}
interface Function {}
interface CallableFunction {}
interface NewableFunction {}
interface IArguments {}
interface Number { toExponential: any; }
interface Object {}
interface RegExp {}
interface String { charAt: any; }
interface Array<T> { length: number; [n: number]: T; }
//// [/user/username/projects/myproject/a.js]
"use strict";
exports.__esModule = true;
//// [/user/username/projects/myproject/b.js]
"use strict";
exports.__esModule = true;
//// [/user/username/projects/myproject/c.js]
"use strict";
exports.__esModule = true;
exports.getPoint = void 0;
function getPoint() {
return {
name: "test",
c: {
x: 1,
y: 2
}
};
}
exports.getPoint = getPoint;
;
//// [/user/username/projects/myproject/d.js]
"use strict";
exports.__esModule = true;
var c_1 = require("./c");
c_1.getPoint().c.x;
//// [/user/username/projects/myproject/e.js]
"use strict";
exports.__esModule = true;
require("./d");
Output::
>> Screen clear
[[90m12:00:29 AM[0m] Starting compilation in watch mode...
[96mc.ts[0m:[93m6[0m:[93m13[0m - [91merror[0m[90m TS2322: [0mType '{ x: number; y: number; }' is not assignable to type 'Coords'.
Object literal may only specify known properties, and 'x' does not exist in type 'Coords'.
[7m6[0m x: 1,
[7m [0m [91m ~~~~[0m
[96ma.ts[0m:[93m3[0m:[93m5[0m
[7m3[0m c: Coords;
[7m [0m [96m ~[0m
The expected type comes from property 'c' which is declared here on type 'PointWrapper'
[96md.ts[0m:[93m2[0m:[93m14[0m - [91merror[0m[90m TS2339: [0mProperty 'x' does not exist on type 'Coords'.
[7m2[0m getPoint().c.x;
[7m [0m [91m ~[0m
[[90m12:00:40 AM[0m] Found 2 errors. Watching for file changes.
Program root files: ["/user/username/projects/myproject/a.ts","/user/username/projects/myproject/b.ts","/user/username/projects/myproject/c.ts","/user/username/projects/myproject/d.ts","/user/username/projects/myproject/e.ts"]
Program options: {"watch":true,"configFilePath":"/user/username/projects/myproject/tsconfig.json"}
Program files::
/a/lib/lib.d.ts
/user/username/projects/myproject/a.ts
/user/username/projects/myproject/b.ts
/user/username/projects/myproject/c.ts
/user/username/projects/myproject/d.ts
/user/username/projects/myproject/e.ts
Semantic diagnostics in builder refreshed for::
/a/lib/lib.d.ts
/user/username/projects/myproject/a.ts
/user/username/projects/myproject/b.ts
/user/username/projects/myproject/c.ts
/user/username/projects/myproject/d.ts
/user/username/projects/myproject/e.ts
WatchedFiles::
/user/username/projects/myproject/tsconfig.json:
{"fileName":"/user/username/projects/myproject/tsconfig.json","pollingInterval":250}
/user/username/projects/myproject/a.ts:
{"fileName":"/user/username/projects/myproject/a.ts","pollingInterval":250}
/user/username/projects/myproject/b.ts:
{"fileName":"/user/username/projects/myproject/b.ts","pollingInterval":250}
/user/username/projects/myproject/c.ts:
{"fileName":"/user/username/projects/myproject/c.ts","pollingInterval":250}
/user/username/projects/myproject/d.ts:
{"fileName":"/user/username/projects/myproject/d.ts","pollingInterval":250}
/user/username/projects/myproject/e.ts:
{"fileName":"/user/username/projects/myproject/e.ts","pollingInterval":250}
/a/lib/lib.d.ts:
{"fileName":"/a/lib/lib.d.ts","pollingInterval":250}
FsWatches::
FsWatchesRecursive::
/user/username/projects/myproject/node_modules/@types:
{"directoryName":"/user/username/projects/myproject/node_modules/@types","fallbackPollingInterval":500,"fallbackOptions":{"watchFile":"PriorityPollingInterval"}}
/user/username/projects/myproject:
{"directoryName":"/user/username/projects/myproject","fallbackPollingInterval":500,"fallbackOptions":{"watchFile":"PriorityPollingInterval"}}
exitCode:: ExitStatus.undefined
Change:: Rename property x2 to x of interface Coords
//// [/user/username/projects/myproject/a.ts]
export interface Point {
name: string;
c: Coords;
}
export interface Coords {
x: number;
y: number;
}
//// [/user/username/projects/myproject/a.js] file written with same contents
//// [/user/username/projects/myproject/b.js] file written with same contents
Output::
>> Screen clear
[[90m12:00:44 AM[0m] File change detected. Starting incremental compilation...
[[90m12:00:51 AM[0m] Found 0 errors. Watching for file changes.
Program root files: ["/user/username/projects/myproject/a.ts","/user/username/projects/myproject/b.ts","/user/username/projects/myproject/c.ts","/user/username/projects/myproject/d.ts","/user/username/projects/myproject/e.ts"]
Program options: {"watch":true,"configFilePath":"/user/username/projects/myproject/tsconfig.json"}
Program files::
/a/lib/lib.d.ts
/user/username/projects/myproject/a.ts
/user/username/projects/myproject/b.ts
/user/username/projects/myproject/c.ts
/user/username/projects/myproject/d.ts
/user/username/projects/myproject/e.ts
Semantic diagnostics in builder refreshed for::
/user/username/projects/myproject/a.ts
/user/username/projects/myproject/b.ts
/user/username/projects/myproject/c.ts
/user/username/projects/myproject/d.ts
WatchedFiles::
/user/username/projects/myproject/tsconfig.json:
{"fileName":"/user/username/projects/myproject/tsconfig.json","pollingInterval":250}
/user/username/projects/myproject/a.ts:
{"fileName":"/user/username/projects/myproject/a.ts","pollingInterval":250}
/user/username/projects/myproject/b.ts:
{"fileName":"/user/username/projects/myproject/b.ts","pollingInterval":250}
/user/username/projects/myproject/c.ts:
{"fileName":"/user/username/projects/myproject/c.ts","pollingInterval":250}
/user/username/projects/myproject/d.ts:
{"fileName":"/user/username/projects/myproject/d.ts","pollingInterval":250}
/user/username/projects/myproject/e.ts:
{"fileName":"/user/username/projects/myproject/e.ts","pollingInterval":250}
/a/lib/lib.d.ts:
{"fileName":"/a/lib/lib.d.ts","pollingInterval":250}
FsWatches::
FsWatchesRecursive::
/user/username/projects/myproject/node_modules/@types:
{"directoryName":"/user/username/projects/myproject/node_modules/@types","fallbackPollingInterval":500,"fallbackOptions":{"watchFile":"PriorityPollingInterval"}}
/user/username/projects/myproject:
{"directoryName":"/user/username/projects/myproject","fallbackPollingInterval":500,"fallbackOptions":{"watchFile":"PriorityPollingInterval"}}
exitCode:: ExitStatus.undefined
| nojvek/TypeScript | tests/baselines/reference/tscWatch/emitAndErrorUpdates/default/file-not-exporting-a-deep-multilevel-import-that-changes.js | JavaScript | apache-2.0 | 7,454 |
package com.amqtech.opensource.appintroexample.util;
/**
* Created by andrew on 11/17/16.
*/
import android.app.Fragment;
import android.app.FragmentManager;
import android.app.FragmentTransaction;
import android.os.Build;
import android.os.Bundle;
import android.os.Parcelable;
import androidx.fragment.app.FragmentPagerAdapter;
import androidx.viewpager.widget.PagerAdapter;
import android.view.View;
import android.view.ViewGroup;
import java.util.ArrayList;
/**
* Implementation of {@link PagerAdapter} that
* uses a {@link Fragment} to manage each page. This class also handles
* saving and restoring of fragment's state.
* <p>
* <p>This version of the pager is more useful when there are a large number
* of pages, working more like a list view. When pages are not visible to
* the user, their entire fragment may be destroyed, only keeping the saved
* state of that fragment. This allows the pager to hold on to much less
* memory associated with each visited page as compared to
* {@link FragmentPagerAdapter} at the cost of potentially more overhead when
* switching between pages.
* <p>
* <p>When using FragmentPagerAdapter the host ViewPager must have a
* valid ID set.</p>
* <p>
* <p>Subclasses only need to implement {@link #getItem(int)}
* and {@link #getCount()} to have a working adapter.
* <p>
* <p>Here is an example implementation of a pager containing fragments of
* lists:
* <p>
* {@sample frameworks/support/samples/Support13Demos/src/com/example/android/supportv13/app/FragmentStatePagerSupport.java
* complete}
* <p>
* <p>The <code>R.layout.fragment_pager</code> resource of the top-level fragment is:
* <p>
* {@sample frameworks/support/samples/Support13Demos/res/layout/fragment_pager.xml
* complete}
* <p>
* <p>The <code>R.layout.fragment_pager_list</code> resource containing each
* individual fragment's layout is:
* <p>
* {@sample frameworks/support/samples/Support13Demos/res/layout/fragment_pager_list.xml
* complete}
*/
public abstract class FragmentStatePagerAdapter extends PagerAdapter {
private static final String TAG = "FragmentStatePagerAdapter";
private static final boolean DEBUG = false;
private final FragmentManager mFragmentManager;
private FragmentTransaction mCurTransaction = null;
private ArrayList<Fragment.SavedState> mSavedState = new ArrayList<Fragment.SavedState>();
private ArrayList<Fragment> mFragments = new ArrayList<Fragment>();
private Fragment mCurrentPrimaryItem = null;
public FragmentStatePagerAdapter(FragmentManager fm) {
mFragmentManager = fm;
}
/**
* Return the Fragment associated with a specified position.
*/
public abstract Fragment getItem(int position);
@Override
public void startUpdate(ViewGroup container) {
if (container.getId() == View.NO_ID) {
throw new IllegalStateException("ViewPager with adapter " + this
+ " requires a view id");
}
}
@Override
public Object instantiateItem(ViewGroup container, int position) {
// If we already have this item instantiated, there is nothing
// to do. This can happen when we are restoring the entire pager
// from its saved state, where the fragment manager has already
// taken care of restoring the fragments we previously had instantiated.
if (mFragments.size() > position) {
Fragment f = mFragments.get(position);
if (f != null) {
return f;
}
}
if (mCurTransaction == null) {
mCurTransaction = mFragmentManager.beginTransaction();
}
Fragment fragment = getItem(position);
if (mSavedState.size() > position) {
Fragment.SavedState fss = mSavedState.get(position);
if (fss != null) {
fragment.setInitialSavedState(fss);
}
}
while (mFragments.size() <= position) {
mFragments.add(null);
}
fragment.setMenuVisibility(false);
setFragmentUserVisibleHint(fragment);
mFragments.set(position, fragment);
mCurTransaction.add(container.getId(), fragment);
return fragment;
}
public void setFragmentUserVisibleHint(Fragment fragment) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH_MR1) {
fragment.setUserVisibleHint(false);
}
}
@Override
public void destroyItem(ViewGroup container, int position, Object object) {
Fragment fragment = (Fragment) object;
if (mCurTransaction == null) {
mCurTransaction = mFragmentManager.beginTransaction();
}
while (mSavedState.size() <= position) {
mSavedState.add(null);
}
mSavedState.set(position, fragment.isAdded()
? mFragmentManager.saveFragmentInstanceState(fragment) : null);
mFragments.set(position, null);
mCurTransaction.remove(fragment);
}
@Override
public void setPrimaryItem(ViewGroup container, int position, Object object) {
Fragment fragment = (Fragment) object;
if (fragment != mCurrentPrimaryItem) {
if (mCurrentPrimaryItem != null) {
mCurrentPrimaryItem.setMenuVisibility(false);
setFragmentUserVisibleHint(mCurrentPrimaryItem);
}
if (fragment != null) {
fragment.setMenuVisibility(true);
setFragmentUserVisibleHint(fragment);
}
mCurrentPrimaryItem = fragment;
}
}
@Override
public void finishUpdate(ViewGroup container) {
if (mCurTransaction != null) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
mCurTransaction.commitNowAllowingStateLoss();
}
mCurTransaction = null;
}
}
@Override
public boolean isViewFromObject(View view, Object object) {
return ((Fragment) object).getView() == view;
}
@Override
public Parcelable saveState() {
Bundle state = null;
if (mSavedState.size() > 0) {
state = new Bundle();
Fragment.SavedState[] fss = new Fragment.SavedState[mSavedState.size()];
mSavedState.toArray(fss);
state.putParcelableArray("states", fss);
}
for (int i = 0; i < mFragments.size(); i++) {
Fragment f = mFragments.get(i);
if (f != null && f.isAdded()) {
if (state == null) {
state = new Bundle();
}
String key = "f" + i;
mFragmentManager.putFragment(state, key, f);
}
}
return state;
}
@Override
public void restoreState(Parcelable state, ClassLoader loader) {
if (state != null) {
Bundle bundle = (Bundle) state;
bundle.setClassLoader(loader);
Parcelable[] fss = bundle.getParcelableArray("states");
mSavedState.clear();
mFragments.clear();
if (fss != null) {
for (int i = 0; i < fss.length; i++) {
mSavedState.add((Fragment.SavedState) fss[i]);
}
}
Iterable<String> keys = bundle.keySet();
for (String key : keys) {
if (key.startsWith("f")) {
int index = Integer.parseInt(key.substring(1));
Fragment f = mFragmentManager.getFragment(bundle, key);
if (f != null) {
while (mFragments.size() <= index) {
mFragments.add(null);
}
f.setMenuVisibility(false);
mFragments.set(index, f);
}
}
}
}
}
}
| PaoloRotolo/AppIntro | example/src/main/java/com/amqtech/opensource/appintroexample/util/FragmentStatePagerAdapter.java | Java | apache-2.0 | 7,938 |
/*
Derby - Class org.apache.derby.iapi.sql.compile.OptimizerPlan
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to you under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.apache.derby.iapi.sql.compile;
import org.apache.derby.catalog.AliasInfo;
import org.apache.derby.shared.common.error.StandardException;
import org.apache.derby.shared.common.reference.SQLState;
import org.apache.derby.iapi.sql.StatementUtil;
import org.apache.derby.iapi.sql.compile.CompilerContext;
import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;
import org.apache.derby.iapi.sql.dictionary.AliasDescriptor;
import org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor;
import org.apache.derby.iapi.sql.dictionary.DataDictionary;
import org.apache.derby.iapi.sql.dictionary.SchemaDescriptor;
import org.apache.derby.iapi.sql.dictionary.UniqueTupleDescriptor;
import org.apache.derby.iapi.util.IdUtil;
/**
* <p>
* High level description of a plan for consideration by the Optimizer.
* This is used to specify a complete plan via optimizer overrides. A
* plan is a tree whose interior nodes are join operators and whose
* leaves are row sources (conglomerates or tableFunctions).
* </p>
*/
public abstract class OptimizerPlan
{
////////////////////////////////////////////////////////////////////////
//
// CONSTANTS
//
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
//
// FACTORY METHODS
//
////////////////////////////////////////////////////////////////////////
/**
* <p>
* Make a RowSource corresponding to the given tuple descriptor.
* </p>
*/
public static RowSource makeRowSource( UniqueTupleDescriptor utd, DataDictionary dd )
throws StandardException
{
if ( utd == null ) { return null; }
else if ( utd instanceof ConglomerateDescriptor )
{
return new ConglomerateRS( (ConglomerateDescriptor) utd, dd );
}
else if ( utd instanceof AliasDescriptor )
{
return new TableFunctionRS( (AliasDescriptor) utd );
}
else { return null; }
}
////////////////////////////////////////////////////////////////////////
//
// ABSTRACT BEHAVIOR
//
////////////////////////////////////////////////////////////////////////
/**
* <p>
* Bind the conglomerate and table function names in this plan.
* </p>
*
* @param dataDictionary DataDictionary to bind against.
*/
public abstract void bind
(
DataDictionary dataDictionary,
LanguageConnectionContext lcc,
CompilerContext cc
)
throws StandardException;
/**
* <p>
* Return true if this the schema and RowSource names have been resolved.
* </p>
*/
public abstract boolean isBound();
/**
* <p>
* Count the number of leaf nodes under (and including) this node.
* </p>
*/
public abstract int countLeafNodes();
/**
* <p>
* Get the leftmost leaf node in this plan.
* </p>
*/
public abstract OptimizerPlan leftmostLeaf();
/**
* <p>
* Return true if this plan is a (left) leading prefix of the other plan.
* </p>
*/
public abstract boolean isLeftPrefixOf( OptimizerPlan that );
////////////////////////////////////////////////////////////////////////
//
// INNER CLASSES
//
////////////////////////////////////////////////////////////////////////
public static final class Join extends OptimizerPlan
{
final JoinStrategy strategy;
final OptimizerPlan leftChild;
final OptimizerPlan rightChild;
private boolean _isBound;
private int _leafNodeCount = 0;
public Join
(
JoinStrategy strategy,
OptimizerPlan leftChild,
OptimizerPlan rightChild
)
{
this.strategy = strategy;
this.leftChild = leftChild;
this.rightChild = rightChild;
}
public void bind
(
DataDictionary dataDictionary,
LanguageConnectionContext lcc,
CompilerContext cc
)
throws StandardException
{
// only left-deep trees allowed at this time
if ( !( rightChild instanceof RowSource ) )
{
throw StandardException.newException( SQLState.LANG_NOT_LEFT_DEEP );
}
leftChild.bind( dataDictionary, lcc, cc );
rightChild.bind( dataDictionary, lcc, cc );
_isBound = true;
}
public boolean isBound() { return _isBound; }
public int countLeafNodes()
{
if ( _leafNodeCount <= 0 ) { _leafNodeCount = leftChild.countLeafNodes() + rightChild.countLeafNodes(); }
return _leafNodeCount;
}
public OptimizerPlan leftmostLeaf() { return leftChild.leftmostLeaf(); }
public boolean isLeftPrefixOf( OptimizerPlan other )
{
if ( !(other instanceof Join) ) { return false; }
Join that = (Join) other;
int thisLeafCount = this.countLeafNodes();
int thatLeafCount = that.countLeafNodes();
if ( thisLeafCount > thatLeafCount ) { return false; }
else if ( thisLeafCount < thatLeafCount ) { return isLeftPrefixOf( that.leftChild ); }
else { return this.equals( that ); }
}
public String toString()
{
return
"( " +
leftChild.toString() +
" " + strategy.getOperatorSymbol() + " " +
rightChild.toString() +
" )";
}
public boolean equals( Object other )
{
if ( other == null ) { return false; }
if ( !(other instanceof Join) ) { return false; }
Join that = (Join) other;
if ( !this.strategy.getOperatorSymbol().equals( that.strategy.getOperatorSymbol() ) ) { return false; }
return this.leftChild.equals( that.leftChild) && this.rightChild.equals( that.rightChild );
}
}
/** Generic plan for row sources we don't understand */
public static class DeadEnd extends OptimizerPlan
{
private String _name;
public DeadEnd( String name )
{
_name = name;
}
public void bind
(
DataDictionary dataDictionary,
LanguageConnectionContext lcc,
CompilerContext cc
)
throws StandardException
{}
public boolean isBound() { return true; }
public int countLeafNodes() { return 1; }
public OptimizerPlan leftmostLeaf() { return this; }
public boolean isLeftPrefixOf( OptimizerPlan that )
{
return this.equals( that.leftmostLeaf() );
}
public String toString() { return _name; }
}
public abstract static class RowSource<D extends UniqueTupleDescriptor> extends OptimizerPlan
{
protected String _schemaName;
protected String _rowSourceName;
protected SchemaDescriptor _schema;
protected D _descriptor;
public RowSource( String schemaName, String rowSourceName )
{
_schemaName = schemaName;
_rowSourceName = rowSourceName;
}
protected RowSource() {}
/** Get the UniqueTupleDescriptor bound to this RowSource */
public D getDescriptor() { return _descriptor; }
public void bind
(
DataDictionary dataDictionary,
LanguageConnectionContext lcc,
CompilerContext cc
)
throws StandardException
{
// bind the schema name
if ( _schema == null )
{
_schema = StatementUtil.getSchemaDescriptor( _schemaName, true, dataDictionary, lcc, cc );
_schemaName = _schema.getSchemaName();
}
}
public boolean isBound() { return (_descriptor != null); }
public int countLeafNodes() { return 1; }
public OptimizerPlan leftmostLeaf() { return this; }
public boolean isLeftPrefixOf( OptimizerPlan that )
{
return this.equals( that.leftmostLeaf() );
}
public String toString()
{
return IdUtil.mkQualifiedName( _schemaName, _rowSourceName );
}
public boolean equals( Object other )
{
if ( other == null ) { return false; }
if ( other.getClass() != this.getClass() ) { return false; }
RowSource that = (RowSource) other;
if ( !( this.isBound() && that.isBound() ) ) { return false; }
return this._schemaName.equals( that._schemaName ) && this._rowSourceName.equals( that._rowSourceName );
}
}
public static final class ConglomerateRS extends RowSource<ConglomerateDescriptor>
{
public ConglomerateRS( String schemaName, String rowSourceName ) { super( schemaName, rowSourceName ); }
public ConglomerateRS( ConglomerateDescriptor cd, DataDictionary dataDictionary )
throws StandardException
{
_descriptor = cd;
_schema = dataDictionary.getSchemaDescriptor( cd.getSchemaID(), null );
_schemaName = _schema.getSchemaName();
_rowSourceName = cd.getConglomerateName();
}
public void bind
(
DataDictionary dataDictionary,
LanguageConnectionContext lcc,
CompilerContext cc
)
throws StandardException
{
super.bind( dataDictionary, lcc, cc );
if ( _descriptor == null )
{
_descriptor = dataDictionary.getConglomerateDescriptor( _rowSourceName, _schema, false );
}
if ( _descriptor == null )
{
throw StandardException.newException
( SQLState.LANG_INDEX_NOT_FOUND, _schemaName + "." + _rowSourceName );
}
}
}
public static final class TableFunctionRS extends RowSource<AliasDescriptor>
{
public TableFunctionRS( String schemaName, String rowSourceName ) { super( schemaName, rowSourceName ); }
public TableFunctionRS( AliasDescriptor ad )
{
_descriptor = ad;
_schemaName = ad.getSchemaName();
_rowSourceName = ad.getName();
}
public void bind
(
DataDictionary dataDictionary,
LanguageConnectionContext lcc,
CompilerContext cc
)
throws StandardException
{
super.bind( dataDictionary, lcc, cc );
if ( _descriptor == null )
{
_descriptor = dataDictionary.getAliasDescriptor
( _schema.getUUID().toString(), _rowSourceName, AliasInfo.ALIAS_NAME_SPACE_FUNCTION_AS_CHAR );
}
if ( _descriptor == null )
{
throw StandardException.newException
(
SQLState.LANG_OBJECT_NOT_FOUND,
AliasDescriptor.getAliasType( AliasInfo.ALIAS_TYPE_FUNCTION_AS_CHAR ),
_schemaName + "." + _rowSourceName
);
}
}
public String toString() { return super.toString() + "()"; }
}
}
| apache/derby | java/org.apache.derby.engine/org/apache/derby/iapi/sql/compile/OptimizerPlan.java | Java | apache-2.0 | 12,716 |
# Copyright (c) 2011-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import fcntl
import os
import json
class NullResource(object):
""" Implments the lock interface for spawn. """
def __init__(self, *args, **kwargs):
self.owned = False
def remove(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, trace):
pass
def acquire(self, info):
pass
class LockFile(object):
""" Manages locking and unlocking an open file handle
can also be used as a context manager
"""
def __init__(self, fd, lock_operation=fcntl.LOCK_EX,
unlock_operation=fcntl.LOCK_UN):
self.fd = fd
self.file_name = None
if type(fd) != int:
self.fd = self.open(fd)
self.file_name = fd
self.lock_operation = lock_operation
self.unlock_operation = unlock_operation
def __enter__(self):
self.lock(self.lock_operation)
return self
def __exit__(self, exc_type, exc_value, trace):
self.unlock(self.unlock_operation)
return False
def lock(self, operation=fcntl.LOCK_EX):
fcntl.flock(self.fd, operation)
def unlock(self, operation=fcntl.LOCK_UN):
fcntl.flock(self.fd, operation)
def write(self, data):
os.lseek(self.fd, 0, os.SEEK_SET)
os.ftruncate(self.fd, 0)
os.write(self.fd, data)
os.fsync(self.fd)
def read(self):
size = os.lseek(self.fd, 0, os.SEEK_END)
os.lseek(self.fd, 0, os.SEEK_SET)
return os.read(self.fd, size)
def close(self):
try:
os.close(self.fd)
except TypeError, OSError:
pass
self.fd = None
def unlink(self):
self.close()
try:
os.unlink(self.file_name)
except OSError, e:
pass
def _createdir(self, file_name):
try:
dir = os.path.dirname(file_name)
os.makedirs(dir)
except OSError, e:
# ignore if already exists
if e.errno != errno.EEXIST:
raise
def open(self, file_name):
for i in range(0, 2):
try:
# Attempt to create the file
return os.open(file_name, os.O_RDWR | os.O_CREAT)
except OSError, e:
# No such file or directory
if e.errno == errno.ENOENT:
# create the dir and try again
self._createdir(file_name)
continue
# Unknown error
raise
raise RuntimeError("failed to create '%s'" % file_name)
class JsonLockFile(LockFile):
""" Manages a lock file that contains json """
def update(self, info):
data = self.read()
data.update(info)
self.write(data)
def get(self, key, default=None):
try:
data = self.read()
return data[key]
except KeyError:
return default
def write(self, data):
super(JsonLockFile, self).write(json.dumps(data))
def read(self):
try:
return json.loads(super(JsonLockFile, self).read())
except ValueError, e:
return {}
class ResourceFile(JsonLockFile):
""" Manages ownership of a resource file,
can also be used as a context manager
"""
def __init__(self, file_name):
self.file_name = file_name
self.owned = False
self.fd = None
def __enter__(self):
self.fd = self.open(self.file_name)
super(ResourceFile, self).lock()
return self
def __exit__(self, exc_type, exc_value, trace):
super(ResourceFile, self).unlock()
self.close()
return False
def used(self):
""" Returns true if the resource file is in use by someone """
info = self.read()
# If pid is alive, the volume is owned by someone else
if 'pid' in info and self.alive(info['pid']):
return info
return False
def alive(self, pid):
try:
os.kill(pid, 0)
return True
except OSError, e:
return False
def acquire(self, info):
""" Acquire ownership of the file by writing our pid information """
self.update(info)
if 'pid' in info:
# We own the resource
self.owned = True
def remove(self):
if self.owned:
self.unlink()
| rackerlabs/lunr | lunr/common/lock.py | Python | apache-2.0 | 5,070 |
// Copyright (c) 2017, TIG All rights reserved.
// Use of this source code is governed by a Apache License 2.0 that can be found in the LICENSE file.
package main
import (
"flag"
"fmt"
"os"
"runtime"
"strings"
"github.com/tiglabs/containerfs/datanode"
"github.com/tiglabs/containerfs/logger"
"github.com/tiglabs/containerfs/utils"
)
func init() {
var loglevel string
var volMgrHosts string
flag.StringVar(&datanode.DtAddr.Host, "host", "127.0.0.1:8801", "ContainerFS DataNode Host")
flag.StringVar(&datanode.DtAddr.Tier, "tier", "sas", "ContainerFS DataNode Storage Medium")
flag.StringVar(&datanode.DtAddr.Path, "datapath", "", "ContainerFS DataNode Data Path")
flag.StringVar(&datanode.DtAddr.Log, "logpath", "/export/Logs/containerfs/logs/", "ContainerFS Log Path")
flag.StringVar(&loglevel, "loglevel", "error", "ContainerFS Log Level")
flag.StringVar(&volMgrHosts, "volmgr", "10.8.64.216,10.8.64.217,10.8.64.218", "ContainerFS VolMgr hosts")
flag.Parse()
if len(os.Args) >= 2 && (os.Args[1] == "version") {
fmt.Println(utils.Version())
os.Exit(0)
}
tmp := strings.Split(volMgrHosts, ",")
datanode.VolMgrHosts = make([]string, 3)
datanode.VolMgrHosts[0] = tmp[0] + ":7703"
datanode.VolMgrHosts[1] = tmp[1] + ":7713"
datanode.VolMgrHosts[2] = tmp[2] + ":7723"
datanode.DtAddr.Flag = datanode.DtAddr.Path + "/.registryflag"
logger.SetConsole(true)
logger.SetRollingFile(datanode.DtAddr.Log, "datanode.log", 10, 100, logger.MB) //each 100M rolling
switch loglevel {
case "error":
logger.SetLevel(logger.ERROR)
case "debug":
logger.SetLevel(logger.DEBUG)
case "info":
logger.SetLevel(logger.INFO)
default:
logger.SetLevel(logger.ERROR)
}
_, err := os.Stat(datanode.DtAddr.Path)
if err != nil {
logger.Error("data node statup failed : datanode.DtAddr.Path not exist !")
os.Exit(1)
}
datanode.RegistryToVolMgr()
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
datanode.StartDataService()
}
| ipdcode/containerfs | cmd/datanode/main.go | GO | apache-2.0 | 1,965 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.