code
stringlengths
3
1.05M
repo_name
stringlengths
4
116
path
stringlengths
4
991
language
stringclasses
9 values
license
stringclasses
15 values
size
int32
3
1.05M
// Copyright 2009 the Sputnik authors. All rights reserved. // This code is governed by the BSD license found in the LICENSE file. /*--- info: > The initial value of Boolean.prototype is the Boolean prototype object es5id: 15.6.3.1_A1 description: Checking Boolean.prototype property ---*/ //CHECK#1 if (typeof Boolean.prototype !== "object") { $ERROR('#1: typeof Boolean.prototype === "object"'); } //CHECK#2 if (Boolean.prototype != false) { $ERROR('#2: Boolean.prototype == false'); } delete Boolean.prototype.toString; if (Boolean.prototype.toString() !== "[object Boolean]") { $ERROR('#3: The [[Class]] property of the Boolean prototype object is set to "Boolean"'); }
m0ppers/arangodb
3rdParty/V8/V8-5.0.71.39/test/test262/data/test/built-ins/Boolean/prototype/S15.6.3.1_A1.js
JavaScript
apache-2.0
694
/* * Copyright (C) 2007 Júlio Vilmar Gesser. * * This file is part of Java 1.5 parser and Abstract Syntax Tree. * * Java 1.5 parser and Abstract Syntax Tree is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Java 1.5 parser and Abstract Syntax Tree is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Java 1.5 parser and Abstract Syntax Tree. If not, see <http://www.gnu.org/licenses/>. */ /* * Created on 05/10/2006 */ package japa.parser.ast.expr; import japa.parser.ast.visitor.GenericVisitor; import japa.parser.ast.visitor.VoidVisitor; import java.util.List; /** * @author Julio Vilmar Gesser */ public final class ArrayInitializerExpr extends Expression { private List<Expression> values; public ArrayInitializerExpr() { } public ArrayInitializerExpr(List<Expression> values) { this.values = values; } public ArrayInitializerExpr(int beginLine, int beginColumn, int endLine, int endColumn, List<Expression> values) { super(beginLine, beginColumn, endLine, endColumn); this.values = values; } @Override public <R, A> R accept(GenericVisitor<R, A> v, A arg) { return v.visit(this, arg); } @Override public <A> void accept(VoidVisitor<A> v, A arg) { v.visit(this, arg); } public List<Expression> getValues() { return values; } public void setValues(List<Expression> values) { this.values = values; } }
irblsensitivity/irblsensitivity
techniques/BRTracer/src/japa/parser/ast/expr/ArrayInitializerExpr.java
Java
apache-2.0
1,995
package fr.free.nrw.commons.explore; import android.content.Context; import android.util.AttributeSet; import android.view.MotionEvent; import androidx.viewpager.widget.ViewPager; /** * ParentViewPager A custom viewPager whose scrolling can be enabled and disabled. */ public class ParentViewPager extends ViewPager { /** * Boolean variable that stores the current state of pager scroll i.e(enabled or disabled) */ private boolean canScroll = true; /** * Default constructors */ public ParentViewPager(Context context) { super(context); } public ParentViewPager(Context context, AttributeSet attrs) { super(context, attrs); } /** * Setter method for canScroll. */ public void setCanScroll(boolean canScroll) { this.canScroll = canScroll; } /** * Getter method for canScroll. */ public boolean isCanScroll() { return canScroll; } /** * Method that prevents scrolling if canScroll is set to false. */ @Override public boolean onTouchEvent(MotionEvent ev) { return canScroll && super.onTouchEvent(ev); } /** * A facilitator method that allows parent to intercept touch events before its children. thus * making it possible to prevent swiping parent on child end. */ @Override public boolean onInterceptTouchEvent(MotionEvent ev) { return canScroll && super.onInterceptTouchEvent(ev); } }
commons-app/apps-android-commons
app/src/main/java/fr/free/nrw/commons/explore/ParentViewPager.java
Java
apache-2.0
1,500
package liquibase.diff.compare.core; import liquibase.CatalogAndSchema; import liquibase.database.Database; import liquibase.diff.compare.CompareControl; import liquibase.diff.compare.DatabaseObjectComparator; import liquibase.util.StringUtil; /** * DatabaseObjectComparator for Catalog and Schema comparators with common stuff */ public abstract class CommonCatalogSchemaComparator implements DatabaseObjectComparator { protected boolean equalsSchemas(Database accordingTo, String schemaName1, String schemaName2) { if (CatalogAndSchema.CatalogAndSchemaCase.ORIGINAL_CASE.equals(accordingTo.getSchemaAndCatalogCase())){ return StringUtil.trimToEmpty(schemaName1).equals(StringUtil.trimToEmpty(schemaName2)); } else { return StringUtil.trimToEmpty(schemaName1).equalsIgnoreCase(StringUtil.trimToEmpty(schemaName2)); } } protected String getComparisonSchemaOrCatalog(Database accordingTo, CompareControl.SchemaComparison comparison) { if (accordingTo.supportsSchemas()) { return comparison.getComparisonSchema().getSchemaName(); } else if (accordingTo.supportsCatalogs()) { return comparison.getComparisonSchema().getCatalogName(); } return null; } protected String getReferenceSchemaOrCatalog(Database accordingTo, CompareControl.SchemaComparison comparison) { if (accordingTo.supportsSchemas()) { return comparison.getReferenceSchema().getSchemaName(); } else if (accordingTo.supportsCatalogs()) { return comparison.getReferenceSchema().getCatalogName(); } return null; } }
liquibase/liquibase
liquibase-core/src/main/java/liquibase/diff/compare/core/CommonCatalogSchemaComparator.java
Java
apache-2.0
1,666
/* Copyright 2016 VMware, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "noMatch.h" #include "frontends/p4/coreLibrary.h" namespace P4 { const IR::Node* DoHandleNoMatch::postorder(IR::SelectExpression* expression) { for (auto c : expression->selectCases) { if (c->keyset->is<IR::DefaultExpression>()) return expression; } CHECK_NULL(noMatch); auto sc = new IR::SelectCase( new IR::DefaultExpression(), new IR::PathExpression(noMatch->getName())); expression->selectCases.push_back(sc); return expression; } const IR::Node* DoHandleNoMatch::preorder(IR::P4Parser* parser) { P4CoreLibrary& lib = P4CoreLibrary::instance; cstring name = nameGen->newName("noMatch"); LOG2("Inserting " << name << " state"); auto args = new IR::Vector<IR::Argument>(); args->push_back(new IR::Argument(new IR::BoolLiteral(false))); args->push_back(new IR::Argument(new IR::Member( new IR::TypeNameExpression(IR::Type_Error::error), lib.noMatch.Id()))); auto verify = new IR::MethodCallExpression( new IR::PathExpression(IR::ID(IR::ParserState::verify)), args); noMatch = new IR::ParserState(IR::ID(name), { new IR::MethodCallStatement(verify) }, new IR::PathExpression(IR::ID(IR::ParserState::reject))); parser->states.push_back(noMatch); return parser; } } // namespace P4
hanw/p4c
midend/noMatch.cpp
C++
apache-2.0
1,905
/* * JBoss, Home of Professional Open Source. * Copyright 2014 Red Hat, Inc., and individual contributors * as indicated by the @author tags. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ 'use strict'; (function() { var module = angular.module('pnc.common.restclient'); /** * @ngdoc service * @name pnc.common.restclient:Build * @description * */ module.factory('Build', [ 'BuildRecordDAO', 'RunningBuildRecordDAO', '$q', function(BuildRecordDAO, RunningBuildRecordDAO, $q) { return { get: function(spec) { var deffered = $q.defer(); function overrideRejection(response) { return $q.when(response); } /* * In order to return the BuildRecord regardless of whether it is in * progress or compelted we must attempt to fetch both the * RunningBuild and the BuildRecord for the given ID in parralell * (Unless something went wrong one of these requests should succeed * and one fail). As such we have to catch the rejection for the * request that failed and return a resolved promise. We can then * check which request succeeded in the success callback and resolve * the promise returned to the user with it. */ $q.all([ BuildRecordDAO.get(spec).$promise.catch(overrideRejection), RunningBuildRecordDAO.get(spec).$promise.catch(overrideRejection) ]).then( function(results) { // Success - return whichever record we successfully pulled down. if (results[0].id) { deffered.resolve(results[0]); } else if (results[1].id) { deffered.resolve(results[1]); } else { deffered.reject(results); } }, function(results) { // Error deffered.reject(results); } ); return deffered.promise; } }; } ]); })();
emmettu/pnc
ui/app/common/restclient/services/Build.js
JavaScript
apache-2.0
2,590
/* * Copyright (C) 2008 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.example.android.apis.graphics; import android.app.Activity; import android.opengl.GLSurfaceView; import android.os.Bundle; public class TriangleActivity extends Activity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); mGLView = new GLSurfaceView(this); mGLView.setEGLConfigChooser(false); mGLView.setRenderer(new StaticTriangleRenderer(this)); setContentView(mGLView); } @Override protected void onPause() { super.onPause(); mGLView.onPause(); } @Override protected void onResume() { super.onResume(); mGLView.onResume(); } private GLSurfaceView mGLView; }
huang-qiao/ApiDemos-ASProject
app/src/main/java/com/example/android/apis/graphics/TriangleActivity.java
Java
apache-2.0
1,361
/* * Copyright (c) 1994, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package java.util; import java.io.IOException; import java.io.ObjectInputStream; import java.io.StreamCorruptedException; import java.util.function.Consumer; import java.util.function.Predicate; import java.util.function.UnaryOperator; import jdk.internal.util.ArraysSupport; /** * The {@code Vector} class implements a growable array of * objects. Like an array, it contains components that can be * accessed using an integer index. However, the size of a * {@code Vector} can grow or shrink as needed to accommodate * adding and removing items after the {@code Vector} has been created. * * <p>Each vector tries to optimize storage management by maintaining a * {@code capacity} and a {@code capacityIncrement}. The * {@code capacity} is always at least as large as the vector * size; it is usually larger because as components are added to the * vector, the vector's storage increases in chunks the size of * {@code capacityIncrement}. An application can increase the * capacity of a vector before inserting a large number of * components; this reduces the amount of incremental reallocation. * * <p id="fail-fast"> * The iterators returned by this class's {@link #iterator() iterator} and * {@link #listIterator(int) listIterator} methods are <em>fail-fast</em>: * if the vector is structurally modified at any time after the iterator is * created, in any way except through the iterator's own * {@link ListIterator#remove() remove} or * {@link ListIterator#add(Object) add} methods, the iterator will throw a * {@link ConcurrentModificationException}. Thus, in the face of * concurrent modification, the iterator fails quickly and cleanly, rather * than risking arbitrary, non-deterministic behavior at an undetermined * time in the future. The {@link Enumeration Enumerations} returned by * the {@link #elements() elements} method are <em>not</em> fail-fast; if the * Vector is structurally modified at any time after the enumeration is * created then the results of enumerating are undefined. * * <p>Note that the fail-fast behavior of an iterator cannot be guaranteed * as it is, generally speaking, impossible to make any hard guarantees in the * presence of unsynchronized concurrent modification. Fail-fast iterators * throw {@code ConcurrentModificationException} on a best-effort basis. * Therefore, it would be wrong to write a program that depended on this * exception for its correctness: <i>the fail-fast behavior of iterators * should be used only to detect bugs.</i> * * <p>As of the Java 2 platform v1.2, this class was retrofitted to * implement the {@link List} interface, making it a member of the * <a href="{@docRoot}/java.base/java/util/package-summary.html#CollectionsFramework"> * Java Collections Framework</a>. Unlike the new collection * implementations, {@code Vector} is synchronized. If a thread-safe * implementation is not needed, it is recommended to use {@link * ArrayList} in place of {@code Vector}. * * @param <E> Type of component elements * * @author Lee Boynton * @author Jonathan Payne * @see Collection * @see LinkedList * @since 1.0 */ public class Vector<E> extends AbstractList<E> implements List<E>, RandomAccess, Cloneable, java.io.Serializable { /** * The array buffer into which the components of the vector are * stored. The capacity of the vector is the length of this array buffer, * and is at least large enough to contain all the vector's elements. * * <p>Any array elements following the last element in the Vector are null. * * @serial */ @SuppressWarnings("serial") // Conditionally serializable protected Object[] elementData; /** * The number of valid components in this {@code Vector} object. * Components {@code elementData[0]} through * {@code elementData[elementCount-1]} are the actual items. * * @serial */ protected int elementCount; /** * The amount by which the capacity of the vector is automatically * incremented when its size becomes greater than its capacity. If * the capacity increment is less than or equal to zero, the capacity * of the vector is doubled each time it needs to grow. * * @serial */ protected int capacityIncrement; /** use serialVersionUID from JDK 1.0.2 for interoperability */ @java.io.Serial private static final long serialVersionUID = -2767605614048989439L; /** * Constructs an empty vector with the specified initial capacity and * capacity increment. * * @param initialCapacity the initial capacity of the vector * @param capacityIncrement the amount by which the capacity is * increased when the vector overflows * @throws IllegalArgumentException if the specified initial capacity * is negative */ public Vector(int initialCapacity, int capacityIncrement) { super(); if (initialCapacity < 0) throw new IllegalArgumentException("Illegal Capacity: "+ initialCapacity); this.elementData = new Object[initialCapacity]; this.capacityIncrement = capacityIncrement; } /** * Constructs an empty vector with the specified initial capacity and * with its capacity increment equal to zero. * * @param initialCapacity the initial capacity of the vector * @throws IllegalArgumentException if the specified initial capacity * is negative */ public Vector(int initialCapacity) { this(initialCapacity, 0); } /** * Constructs an empty vector so that its internal data array * has size {@code 10} and its standard capacity increment is * zero. */ public Vector() { this(10); } /** * Constructs a vector containing the elements of the specified * collection, in the order they are returned by the collection's * iterator. * * @param c the collection whose elements are to be placed into this * vector * @throws NullPointerException if the specified collection is null * @since 1.2 */ public Vector(Collection<? extends E> c) { Object[] a = c.toArray(); elementCount = a.length; if (c.getClass() == ArrayList.class) { elementData = a; } else { elementData = Arrays.copyOf(a, elementCount, Object[].class); } } /** * Copies the components of this vector into the specified array. * The item at index {@code k} in this vector is copied into * component {@code k} of {@code anArray}. * * @param anArray the array into which the components get copied * @throws NullPointerException if the given array is null * @throws IndexOutOfBoundsException if the specified array is not * large enough to hold all the components of this vector * @throws ArrayStoreException if a component of this vector is not of * a runtime type that can be stored in the specified array * @see #toArray(Object[]) */ public synchronized void copyInto(Object[] anArray) { System.arraycopy(elementData, 0, anArray, 0, elementCount); } /** * Trims the capacity of this vector to be the vector's current * size. If the capacity of this vector is larger than its current * size, then the capacity is changed to equal the size by replacing * its internal data array, kept in the field {@code elementData}, * with a smaller one. An application can use this operation to * minimize the storage of a vector. */ public synchronized void trimToSize() { modCount++; int oldCapacity = elementData.length; if (elementCount < oldCapacity) { elementData = Arrays.copyOf(elementData, elementCount); } } /** * Increases the capacity of this vector, if necessary, to ensure * that it can hold at least the number of components specified by * the minimum capacity argument. * * <p>If the current capacity of this vector is less than * {@code minCapacity}, then its capacity is increased by replacing its * internal data array, kept in the field {@code elementData}, with a * larger one. The size of the new data array will be the old size plus * {@code capacityIncrement}, unless the value of * {@code capacityIncrement} is less than or equal to zero, in which case * the new capacity will be twice the old capacity; but if this new size * is still smaller than {@code minCapacity}, then the new capacity will * be {@code minCapacity}. * * @param minCapacity the desired minimum capacity */ public synchronized void ensureCapacity(int minCapacity) { if (minCapacity > 0) { modCount++; if (minCapacity > elementData.length) grow(minCapacity); } } /** * Increases the capacity to ensure that it can hold at least the * number of elements specified by the minimum capacity argument. * * @param minCapacity the desired minimum capacity * @throws OutOfMemoryError if minCapacity is less than zero */ private Object[] grow(int minCapacity) { int oldCapacity = elementData.length; int newCapacity = ArraysSupport.newLength(oldCapacity, minCapacity - oldCapacity, /* minimum growth */ capacityIncrement > 0 ? capacityIncrement : oldCapacity /* preferred growth */); return elementData = Arrays.copyOf(elementData, newCapacity); } private Object[] grow() { return grow(elementCount + 1); } /** * Sets the size of this vector. If the new size is greater than the * current size, new {@code null} items are added to the end of * the vector. If the new size is less than the current size, all * components at index {@code newSize} and greater are discarded. * * @param newSize the new size of this vector * @throws ArrayIndexOutOfBoundsException if the new size is negative */ public synchronized void setSize(int newSize) { modCount++; if (newSize > elementData.length) grow(newSize); final Object[] es = elementData; for (int to = elementCount, i = newSize; i < to; i++) es[i] = null; elementCount = newSize; } /** * Returns the current capacity of this vector. * * @return the current capacity (the length of its internal * data array, kept in the field {@code elementData} * of this vector) */ public synchronized int capacity() { return elementData.length; } /** * Returns the number of components in this vector. * * @return the number of components in this vector */ public synchronized int size() { return elementCount; } /** * Tests if this vector has no components. * * @return {@code true} if and only if this vector has * no components, that is, its size is zero; * {@code false} otherwise. */ public synchronized boolean isEmpty() { return elementCount == 0; } /** * Returns an enumeration of the components of this vector. The * returned {@code Enumeration} object will generate all items in * this vector. The first item generated is the item at index {@code 0}, * then the item at index {@code 1}, and so on. If the vector is * structurally modified while enumerating over the elements then the * results of enumerating are undefined. * * @return an enumeration of the components of this vector * @see Iterator */ public Enumeration<E> elements() { return new Enumeration<E>() { int count = 0; public boolean hasMoreElements() { return count < elementCount; } public E nextElement() { synchronized (Vector.this) { if (count < elementCount) { return elementData(count++); } } throw new NoSuchElementException("Vector Enumeration"); } }; } /** * Returns {@code true} if this vector contains the specified element. * More formally, returns {@code true} if and only if this vector * contains at least one element {@code e} such that * {@code Objects.equals(o, e)}. * * @param o element whose presence in this vector is to be tested * @return {@code true} if this vector contains the specified element */ public boolean contains(Object o) { return indexOf(o, 0) >= 0; } /** * Returns the index of the first occurrence of the specified element * in this vector, or -1 if this vector does not contain the element. * More formally, returns the lowest index {@code i} such that * {@code Objects.equals(o, get(i))}, * or -1 if there is no such index. * * @param o element to search for * @return the index of the first occurrence of the specified element in * this vector, or -1 if this vector does not contain the element */ public int indexOf(Object o) { return indexOf(o, 0); } /** * Returns the index of the first occurrence of the specified element in * this vector, searching forwards from {@code index}, or returns -1 if * the element is not found. * More formally, returns the lowest index {@code i} such that * {@code (i >= index && Objects.equals(o, get(i)))}, * or -1 if there is no such index. * * @param o element to search for * @param index index to start searching from * @return the index of the first occurrence of the element in * this vector at position {@code index} or later in the vector; * {@code -1} if the element is not found. * @throws IndexOutOfBoundsException if the specified index is negative * @see Object#equals(Object) */ public synchronized int indexOf(Object o, int index) { if (o == null) { for (int i = index ; i < elementCount ; i++) if (elementData[i]==null) return i; } else { for (int i = index ; i < elementCount ; i++) if (o.equals(elementData[i])) return i; } return -1; } /** * Returns the index of the last occurrence of the specified element * in this vector, or -1 if this vector does not contain the element. * More formally, returns the highest index {@code i} such that * {@code Objects.equals(o, get(i))}, * or -1 if there is no such index. * * @param o element to search for * @return the index of the last occurrence of the specified element in * this vector, or -1 if this vector does not contain the element */ public synchronized int lastIndexOf(Object o) { return lastIndexOf(o, elementCount-1); } /** * Returns the index of the last occurrence of the specified element in * this vector, searching backwards from {@code index}, or returns -1 if * the element is not found. * More formally, returns the highest index {@code i} such that * {@code (i <= index && Objects.equals(o, get(i)))}, * or -1 if there is no such index. * * @param o element to search for * @param index index to start searching backwards from * @return the index of the last occurrence of the element at position * less than or equal to {@code index} in this vector; * -1 if the element is not found. * @throws IndexOutOfBoundsException if the specified index is greater * than or equal to the current size of this vector */ public synchronized int lastIndexOf(Object o, int index) { if (index >= elementCount) throw new IndexOutOfBoundsException(index + " >= "+ elementCount); if (o == null) { for (int i = index; i >= 0; i--) if (elementData[i]==null) return i; } else { for (int i = index; i >= 0; i--) if (o.equals(elementData[i])) return i; } return -1; } /** * Returns the component at the specified index. * * <p>This method is identical in functionality to the {@link #get(int)} * method (which is part of the {@link List} interface). * * @param index an index into this vector * @return the component at the specified index * @throws ArrayIndexOutOfBoundsException if the index is out of range * ({@code index < 0 || index >= size()}) */ public synchronized E elementAt(int index) { if (index >= elementCount) { throw new ArrayIndexOutOfBoundsException(index + " >= " + elementCount); } return elementData(index); } /** * Returns the first component (the item at index {@code 0}) of * this vector. * * @return the first component of this vector * @throws NoSuchElementException if this vector has no components */ public synchronized E firstElement() { if (elementCount == 0) { throw new NoSuchElementException(); } return elementData(0); } /** * Returns the last component of the vector. * * @return the last component of the vector, i.e., the component at index * {@code size() - 1} * @throws NoSuchElementException if this vector is empty */ public synchronized E lastElement() { if (elementCount == 0) { throw new NoSuchElementException(); } return elementData(elementCount - 1); } /** * Sets the component at the specified {@code index} of this * vector to be the specified object. The previous component at that * position is discarded. * * <p>The index must be a value greater than or equal to {@code 0} * and less than the current size of the vector. * * <p>This method is identical in functionality to the * {@link #set(int, Object) set(int, E)} * method (which is part of the {@link List} interface). Note that the * {@code set} method reverses the order of the parameters, to more closely * match array usage. Note also that the {@code set} method returns the * old value that was stored at the specified position. * * @param obj what the component is to be set to * @param index the specified index * @throws ArrayIndexOutOfBoundsException if the index is out of range * ({@code index < 0 || index >= size()}) */ public synchronized void setElementAt(E obj, int index) { if (index >= elementCount) { throw new ArrayIndexOutOfBoundsException(index + " >= " + elementCount); } elementData[index] = obj; } /** * Deletes the component at the specified index. Each component in * this vector with an index greater or equal to the specified * {@code index} is shifted downward to have an index one * smaller than the value it had previously. The size of this vector * is decreased by {@code 1}. * * <p>The index must be a value greater than or equal to {@code 0} * and less than the current size of the vector. * * <p>This method is identical in functionality to the {@link #remove(int)} * method (which is part of the {@link List} interface). Note that the * {@code remove} method returns the old value that was stored at the * specified position. * * @param index the index of the object to remove * @throws ArrayIndexOutOfBoundsException if the index is out of range * ({@code index < 0 || index >= size()}) */ public synchronized void removeElementAt(int index) { if (index >= elementCount) { throw new ArrayIndexOutOfBoundsException(index + " >= " + elementCount); } else if (index < 0) { throw new ArrayIndexOutOfBoundsException(index); } int j = elementCount - index - 1; if (j > 0) { System.arraycopy(elementData, index + 1, elementData, index, j); } modCount++; elementCount--; elementData[elementCount] = null; /* to let gc do its work */ } /** * Inserts the specified object as a component in this vector at the * specified {@code index}. Each component in this vector with * an index greater or equal to the specified {@code index} is * shifted upward to have an index one greater than the value it had * previously. * * <p>The index must be a value greater than or equal to {@code 0} * and less than or equal to the current size of the vector. (If the * index is equal to the current size of the vector, the new element * is appended to the Vector.) * * <p>This method is identical in functionality to the * {@link #add(int, Object) add(int, E)} * method (which is part of the {@link List} interface). Note that the * {@code add} method reverses the order of the parameters, to more closely * match array usage. * * @param obj the component to insert * @param index where to insert the new component * @throws ArrayIndexOutOfBoundsException if the index is out of range * ({@code index < 0 || index > size()}) */ public synchronized void insertElementAt(E obj, int index) { if (index > elementCount) { throw new ArrayIndexOutOfBoundsException(index + " > " + elementCount); } modCount++; final int s = elementCount; Object[] elementData = this.elementData; if (s == elementData.length) elementData = grow(); System.arraycopy(elementData, index, elementData, index + 1, s - index); elementData[index] = obj; elementCount = s + 1; } /** * Adds the specified component to the end of this vector, * increasing its size by one. The capacity of this vector is * increased if its size becomes greater than its capacity. * * <p>This method is identical in functionality to the * {@link #add(Object) add(E)} * method (which is part of the {@link List} interface). * * @param obj the component to be added */ public synchronized void addElement(E obj) { modCount++; add(obj, elementData, elementCount); } /** * Removes the first (lowest-indexed) occurrence of the argument * from this vector. If the object is found in this vector, each * component in the vector with an index greater or equal to the * object's index is shifted downward to have an index one smaller * than the value it had previously. * * <p>This method is identical in functionality to the * {@link #remove(Object)} method (which is part of the * {@link List} interface). * * @param obj the component to be removed * @return {@code true} if the argument was a component of this * vector; {@code false} otherwise. */ public synchronized boolean removeElement(Object obj) { modCount++; int i = indexOf(obj); if (i >= 0) { removeElementAt(i); return true; } return false; } /** * Removes all components from this vector and sets its size to zero. * * <p>This method is identical in functionality to the {@link #clear} * method (which is part of the {@link List} interface). */ public synchronized void removeAllElements() { final Object[] es = elementData; for (int to = elementCount, i = elementCount = 0; i < to; i++) es[i] = null; modCount++; } /** * Returns a clone of this vector. The copy will contain a * reference to a clone of the internal data array, not a reference * to the original internal data array of this {@code Vector} object. * * @return a clone of this vector */ public synchronized Object clone() { try { @SuppressWarnings("unchecked") Vector<E> v = (Vector<E>) super.clone(); v.elementData = Arrays.copyOf(elementData, elementCount); v.modCount = 0; return v; } catch (CloneNotSupportedException e) { // this shouldn't happen, since we are Cloneable throw new InternalError(e); } } /** * Returns an array containing all of the elements in this Vector * in the correct order. * * @since 1.2 */ public synchronized Object[] toArray() { return Arrays.copyOf(elementData, elementCount); } /** * Returns an array containing all of the elements in this Vector in the * correct order; the runtime type of the returned array is that of the * specified array. If the Vector fits in the specified array, it is * returned therein. Otherwise, a new array is allocated with the runtime * type of the specified array and the size of this Vector. * * <p>If the Vector fits in the specified array with room to spare * (i.e., the array has more elements than the Vector), * the element in the array immediately following the end of the * Vector is set to null. (This is useful in determining the length * of the Vector <em>only</em> if the caller knows that the Vector * does not contain any null elements.) * * @param <T> type of array elements. The same type as {@code <E>} or a * supertype of {@code <E>}. * @param a the array into which the elements of the Vector are to * be stored, if it is big enough; otherwise, a new array of the * same runtime type is allocated for this purpose. * @return an array containing the elements of the Vector * @throws ArrayStoreException if the runtime type of a, {@code <T>}, is not * a supertype of the runtime type, {@code <E>}, of every element in this * Vector * @throws NullPointerException if the given array is null * @since 1.2 */ @SuppressWarnings("unchecked") public synchronized <T> T[] toArray(T[] a) { if (a.length < elementCount) return (T[]) Arrays.copyOf(elementData, elementCount, a.getClass()); System.arraycopy(elementData, 0, a, 0, elementCount); if (a.length > elementCount) a[elementCount] = null; return a; } // Positional Access Operations @SuppressWarnings("unchecked") E elementData(int index) { return (E) elementData[index]; } @SuppressWarnings("unchecked") static <E> E elementAt(Object[] es, int index) { return (E) es[index]; } /** * Returns the element at the specified position in this Vector. * * @param index index of the element to return * @return object at the specified index * @throws ArrayIndexOutOfBoundsException if the index is out of range * ({@code index < 0 || index >= size()}) * @since 1.2 */ public synchronized E get(int index) { if (index >= elementCount) throw new ArrayIndexOutOfBoundsException(index); return elementData(index); } /** * Replaces the element at the specified position in this Vector with the * specified element. * * @param index index of the element to replace * @param element element to be stored at the specified position * @return the element previously at the specified position * @throws ArrayIndexOutOfBoundsException if the index is out of range * ({@code index < 0 || index >= size()}) * @since 1.2 */ public synchronized E set(int index, E element) { if (index >= elementCount) throw new ArrayIndexOutOfBoundsException(index); E oldValue = elementData(index); elementData[index] = element; return oldValue; } /** * This helper method split out from add(E) to keep method * bytecode size under 35 (the -XX:MaxInlineSize default value), * which helps when add(E) is called in a C1-compiled loop. */ private void add(E e, Object[] elementData, int s) { if (s == elementData.length) elementData = grow(); elementData[s] = e; elementCount = s + 1; } /** * Appends the specified element to the end of this Vector. * * @param e element to be appended to this Vector * @return {@code true} (as specified by {@link Collection#add}) * @since 1.2 */ public synchronized boolean add(E e) { modCount++; add(e, elementData, elementCount); return true; } /** * Removes the first occurrence of the specified element in this Vector * If the Vector does not contain the element, it is unchanged. More * formally, removes the element with the lowest index i such that * {@code Objects.equals(o, get(i))} (if such * an element exists). * * @param o element to be removed from this Vector, if present * @return true if the Vector contained the specified element * @since 1.2 */ public boolean remove(Object o) { return removeElement(o); } /** * Inserts the specified element at the specified position in this Vector. * Shifts the element currently at that position (if any) and any * subsequent elements to the right (adds one to their indices). * * @param index index at which the specified element is to be inserted * @param element element to be inserted * @throws ArrayIndexOutOfBoundsException if the index is out of range * ({@code index < 0 || index > size()}) * @since 1.2 */ public void add(int index, E element) { insertElementAt(element, index); } /** * Removes the element at the specified position in this Vector. * Shifts any subsequent elements to the left (subtracts one from their * indices). Returns the element that was removed from the Vector. * * @param index the index of the element to be removed * @return element that was removed * @throws ArrayIndexOutOfBoundsException if the index is out of range * ({@code index < 0 || index >= size()}) * @since 1.2 */ public synchronized E remove(int index) { modCount++; if (index >= elementCount) throw new ArrayIndexOutOfBoundsException(index); E oldValue = elementData(index); int numMoved = elementCount - index - 1; if (numMoved > 0) System.arraycopy(elementData, index+1, elementData, index, numMoved); elementData[--elementCount] = null; // Let gc do its work return oldValue; } /** * Removes all of the elements from this Vector. The Vector will * be empty after this call returns (unless it throws an exception). * * @since 1.2 */ public void clear() { removeAllElements(); } // Bulk Operations /** * Returns true if this Vector contains all of the elements in the * specified Collection. * * @param c a collection whose elements will be tested for containment * in this Vector * @return true if this Vector contains all of the elements in the * specified collection * @throws NullPointerException if the specified collection is null */ public synchronized boolean containsAll(Collection<?> c) { return super.containsAll(c); } /** * Appends all of the elements in the specified Collection to the end of * this Vector, in the order that they are returned by the specified * Collection's Iterator. The behavior of this operation is undefined if * the specified Collection is modified while the operation is in progress. * (This implies that the behavior of this call is undefined if the * specified Collection is this Vector, and this Vector is nonempty.) * * @param c elements to be inserted into this Vector * @return {@code true} if this Vector changed as a result of the call * @throws NullPointerException if the specified collection is null * @since 1.2 */ public boolean addAll(Collection<? extends E> c) { Object[] a = c.toArray(); modCount++; int numNew = a.length; if (numNew == 0) return false; synchronized (this) { Object[] elementData = this.elementData; final int s = elementCount; if (numNew > elementData.length - s) elementData = grow(s + numNew); System.arraycopy(a, 0, elementData, s, numNew); elementCount = s + numNew; return true; } } /** * Removes from this Vector all of its elements that are contained in the * specified Collection. * * @param c a collection of elements to be removed from the Vector * @return true if this Vector changed as a result of the call * @throws ClassCastException if the types of one or more elements * in this vector are incompatible with the specified * collection * (<a href="Collection.html#optional-restrictions">optional</a>) * @throws NullPointerException if this vector contains one or more null * elements and the specified collection does not support null * elements * (<a href="Collection.html#optional-restrictions">optional</a>), * or if the specified collection is null * @since 1.2 */ public boolean removeAll(Collection<?> c) { Objects.requireNonNull(c); return bulkRemove(e -> c.contains(e)); } /** * Retains only the elements in this Vector that are contained in the * specified Collection. In other words, removes from this Vector all * of its elements that are not contained in the specified Collection. * * @param c a collection of elements to be retained in this Vector * (all other elements are removed) * @return true if this Vector changed as a result of the call * @throws ClassCastException if the types of one or more elements * in this vector are incompatible with the specified * collection * (<a href="Collection.html#optional-restrictions">optional</a>) * @throws NullPointerException if this vector contains one or more null * elements and the specified collection does not support null * elements * (<a href="Collection.html#optional-restrictions">optional</a>), * or if the specified collection is null * @since 1.2 */ public boolean retainAll(Collection<?> c) { Objects.requireNonNull(c); return bulkRemove(e -> !c.contains(e)); } /** * @throws NullPointerException {@inheritDoc} */ @Override public boolean removeIf(Predicate<? super E> filter) { Objects.requireNonNull(filter); return bulkRemove(filter); } // A tiny bit set implementation private static long[] nBits(int n) { return new long[((n - 1) >> 6) + 1]; } private static void setBit(long[] bits, int i) { bits[i >> 6] |= 1L << i; } private static boolean isClear(long[] bits, int i) { return (bits[i >> 6] & (1L << i)) == 0; } private synchronized boolean bulkRemove(Predicate<? super E> filter) { int expectedModCount = modCount; final Object[] es = elementData; final int end = elementCount; int i; // Optimize for initial run of survivors for (i = 0; i < end && !filter.test(elementAt(es, i)); i++) ; // Tolerate predicates that reentrantly access the collection for // read (but writers still get CME), so traverse once to find // elements to delete, a second pass to physically expunge. if (i < end) { final int beg = i; final long[] deathRow = nBits(end - beg); deathRow[0] = 1L; // set bit 0 for (i = beg + 1; i < end; i++) if (filter.test(elementAt(es, i))) setBit(deathRow, i - beg); if (modCount != expectedModCount) throw new ConcurrentModificationException(); modCount++; int w = beg; for (i = beg; i < end; i++) if (isClear(deathRow, i - beg)) es[w++] = es[i]; for (i = elementCount = w; i < end; i++) es[i] = null; return true; } else { if (modCount != expectedModCount) throw new ConcurrentModificationException(); return false; } } /** * Inserts all of the elements in the specified Collection into this * Vector at the specified position. Shifts the element currently at * that position (if any) and any subsequent elements to the right * (increases their indices). The new elements will appear in the Vector * in the order that they are returned by the specified Collection's * iterator. * * @param index index at which to insert the first element from the * specified collection * @param c elements to be inserted into this Vector * @return {@code true} if this Vector changed as a result of the call * @throws ArrayIndexOutOfBoundsException if the index is out of range * ({@code index < 0 || index > size()}) * @throws NullPointerException if the specified collection is null * @since 1.2 */ public synchronized boolean addAll(int index, Collection<? extends E> c) { if (index < 0 || index > elementCount) throw new ArrayIndexOutOfBoundsException(index); Object[] a = c.toArray(); modCount++; int numNew = a.length; if (numNew == 0) return false; Object[] elementData = this.elementData; final int s = elementCount; if (numNew > elementData.length - s) elementData = grow(s + numNew); int numMoved = s - index; if (numMoved > 0) System.arraycopy(elementData, index, elementData, index + numNew, numMoved); System.arraycopy(a, 0, elementData, index, numNew); elementCount = s + numNew; return true; } /** * Compares the specified Object with this Vector for equality. Returns * true if and only if the specified Object is also a List, both Lists * have the same size, and all corresponding pairs of elements in the two * Lists are <em>equal</em>. (Two elements {@code e1} and * {@code e2} are <em>equal</em> if {@code Objects.equals(e1, e2)}.) * In other words, two Lists are defined to be * equal if they contain the same elements in the same order. * * @param o the Object to be compared for equality with this Vector * @return true if the specified Object is equal to this Vector */ public synchronized boolean equals(Object o) { return super.equals(o); } /** * Returns the hash code value for this Vector. */ public synchronized int hashCode() { return super.hashCode(); } /** * Returns a string representation of this Vector, containing * the String representation of each element. */ public synchronized String toString() { return super.toString(); } /** * Returns a view of the portion of this List between fromIndex, * inclusive, and toIndex, exclusive. (If fromIndex and toIndex are * equal, the returned List is empty.) The returned List is backed by this * List, so changes in the returned List are reflected in this List, and * vice-versa. The returned List supports all of the optional List * operations supported by this List. * * <p>This method eliminates the need for explicit range operations (of * the sort that commonly exist for arrays). Any operation that expects * a List can be used as a range operation by operating on a subList view * instead of a whole List. For example, the following idiom * removes a range of elements from a List: * <pre> * list.subList(from, to).clear(); * </pre> * Similar idioms may be constructed for indexOf and lastIndexOf, * and all of the algorithms in the Collections class can be applied to * a subList. * * <p>The semantics of the List returned by this method become undefined if * the backing list (i.e., this List) is <i>structurally modified</i> in * any way other than via the returned List. (Structural modifications are * those that change the size of the List, or otherwise perturb it in such * a fashion that iterations in progress may yield incorrect results.) * * @param fromIndex low endpoint (inclusive) of the subList * @param toIndex high endpoint (exclusive) of the subList * @return a view of the specified range within this List * @throws IndexOutOfBoundsException if an endpoint index value is out of range * {@code (fromIndex < 0 || toIndex > size)} * @throws IllegalArgumentException if the endpoint indices are out of order * {@code (fromIndex > toIndex)} */ public synchronized List<E> subList(int fromIndex, int toIndex) { return Collections.synchronizedList(super.subList(fromIndex, toIndex), this); } /** * Removes from this list all of the elements whose index is between * {@code fromIndex}, inclusive, and {@code toIndex}, exclusive. * Shifts any succeeding elements to the left (reduces their index). * This call shortens the list by {@code (toIndex - fromIndex)} elements. * (If {@code toIndex==fromIndex}, this operation has no effect.) */ protected synchronized void removeRange(int fromIndex, int toIndex) { modCount++; shiftTailOverGap(elementData, fromIndex, toIndex); } /** Erases the gap from lo to hi, by sliding down following elements. */ private void shiftTailOverGap(Object[] es, int lo, int hi) { System.arraycopy(es, hi, es, lo, elementCount - hi); for (int to = elementCount, i = (elementCount -= hi - lo); i < to; i++) es[i] = null; } /** * Loads a {@code Vector} instance from a stream * (that is, deserializes it). * This method performs checks to ensure the consistency * of the fields. * * @param in the stream * @throws java.io.IOException if an I/O error occurs * @throws ClassNotFoundException if the stream contains data * of a non-existing class */ @java.io.Serial private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { ObjectInputStream.GetField gfields = in.readFields(); int count = gfields.get("elementCount", 0); Object[] data = (Object[])gfields.get("elementData", null); if (count < 0 || data == null || count > data.length) { throw new StreamCorruptedException("Inconsistent vector internals"); } elementCount = count; elementData = data.clone(); } /** * Saves the state of the {@code Vector} instance to a stream * (that is, serializes it). * This method performs synchronization to ensure the consistency * of the serialized data. * * @param s the stream * @throws java.io.IOException if an I/O error occurs */ @java.io.Serial private void writeObject(java.io.ObjectOutputStream s) throws java.io.IOException { final java.io.ObjectOutputStream.PutField fields = s.putFields(); final Object[] data; synchronized (this) { fields.put("capacityIncrement", capacityIncrement); fields.put("elementCount", elementCount); data = elementData.clone(); } fields.put("elementData", data); s.writeFields(); } /** * Returns a list iterator over the elements in this list (in proper * sequence), starting at the specified position in the list. * The specified index indicates the first element that would be * returned by an initial call to {@link ListIterator#next next}. * An initial call to {@link ListIterator#previous previous} would * return the element with the specified index minus one. * * <p>The returned list iterator is <a href="#fail-fast"><i>fail-fast</i></a>. * * @throws IndexOutOfBoundsException {@inheritDoc} */ public synchronized ListIterator<E> listIterator(int index) { if (index < 0 || index > elementCount) throw new IndexOutOfBoundsException("Index: "+index); return new ListItr(index); } /** * Returns a list iterator over the elements in this list (in proper * sequence). * * <p>The returned list iterator is <a href="#fail-fast"><i>fail-fast</i></a>. * * @see #listIterator(int) */ public synchronized ListIterator<E> listIterator() { return new ListItr(0); } /** * Returns an iterator over the elements in this list in proper sequence. * * <p>The returned iterator is <a href="#fail-fast"><i>fail-fast</i></a>. * * @return an iterator over the elements in this list in proper sequence */ public synchronized Iterator<E> iterator() { return new Itr(); } /** * An optimized version of AbstractList.Itr */ private class Itr implements Iterator<E> { int cursor; // index of next element to return int lastRet = -1; // index of last element returned; -1 if no such int expectedModCount = modCount; public boolean hasNext() { // Racy but within spec, since modifications are checked // within or after synchronization in next/previous return cursor != elementCount; } public E next() { synchronized (Vector.this) { checkForComodification(); int i = cursor; if (i >= elementCount) throw new NoSuchElementException(); cursor = i + 1; return elementData(lastRet = i); } } public void remove() { if (lastRet == -1) throw new IllegalStateException(); synchronized (Vector.this) { checkForComodification(); Vector.this.remove(lastRet); expectedModCount = modCount; } cursor = lastRet; lastRet = -1; } @Override public void forEachRemaining(Consumer<? super E> action) { Objects.requireNonNull(action); synchronized (Vector.this) { final int size = elementCount; int i = cursor; if (i >= size) { return; } final Object[] es = elementData; if (i >= es.length) throw new ConcurrentModificationException(); while (i < size && modCount == expectedModCount) action.accept(elementAt(es, i++)); // update once at end of iteration to reduce heap write traffic cursor = i; lastRet = i - 1; checkForComodification(); } } final void checkForComodification() { if (modCount != expectedModCount) throw new ConcurrentModificationException(); } } /** * An optimized version of AbstractList.ListItr */ final class ListItr extends Itr implements ListIterator<E> { ListItr(int index) { super(); cursor = index; } public boolean hasPrevious() { return cursor != 0; } public int nextIndex() { return cursor; } public int previousIndex() { return cursor - 1; } public E previous() { synchronized (Vector.this) { checkForComodification(); int i = cursor - 1; if (i < 0) throw new NoSuchElementException(); cursor = i; return elementData(lastRet = i); } } public void set(E e) { if (lastRet == -1) throw new IllegalStateException(); synchronized (Vector.this) { checkForComodification(); Vector.this.set(lastRet, e); } } public void add(E e) { int i = cursor; synchronized (Vector.this) { checkForComodification(); Vector.this.add(i, e); expectedModCount = modCount; } cursor = i + 1; lastRet = -1; } } /** * @throws NullPointerException {@inheritDoc} */ @Override public synchronized void forEach(Consumer<? super E> action) { Objects.requireNonNull(action); final int expectedModCount = modCount; final Object[] es = elementData; final int size = elementCount; for (int i = 0; modCount == expectedModCount && i < size; i++) action.accept(elementAt(es, i)); if (modCount != expectedModCount) throw new ConcurrentModificationException(); } /** * @throws NullPointerException {@inheritDoc} */ @Override public synchronized void replaceAll(UnaryOperator<E> operator) { Objects.requireNonNull(operator); final int expectedModCount = modCount; final Object[] es = elementData; final int size = elementCount; for (int i = 0; modCount == expectedModCount && i < size; i++) es[i] = operator.apply(elementAt(es, i)); if (modCount != expectedModCount) throw new ConcurrentModificationException(); // TODO(8203662): remove increment of modCount from ... modCount++; } @SuppressWarnings("unchecked") @Override public synchronized void sort(Comparator<? super E> c) { final int expectedModCount = modCount; Arrays.sort((E[]) elementData, 0, elementCount, c); if (modCount != expectedModCount) throw new ConcurrentModificationException(); modCount++; } /** * Creates a <em><a href="Spliterator.html#binding">late-binding</a></em> * and <em>fail-fast</em> {@link Spliterator} over the elements in this * list. * * <p>The {@code Spliterator} reports {@link Spliterator#SIZED}, * {@link Spliterator#SUBSIZED}, and {@link Spliterator#ORDERED}. * Overriding implementations should document the reporting of additional * characteristic values. * * @return a {@code Spliterator} over the elements in this list * @since 1.8 */ @Override public Spliterator<E> spliterator() { return new VectorSpliterator(null, 0, -1, 0); } /** Similar to ArrayList Spliterator */ final class VectorSpliterator implements Spliterator<E> { private Object[] array; private int index; // current index, modified on advance/split private int fence; // -1 until used; then one past last index private int expectedModCount; // initialized when fence set /** Creates new spliterator covering the given range. */ VectorSpliterator(Object[] array, int origin, int fence, int expectedModCount) { this.array = array; this.index = origin; this.fence = fence; this.expectedModCount = expectedModCount; } private int getFence() { // initialize on first use int hi; if ((hi = fence) < 0) { synchronized (Vector.this) { array = elementData; expectedModCount = modCount; hi = fence = elementCount; } } return hi; } public Spliterator<E> trySplit() { int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; return (lo >= mid) ? null : new VectorSpliterator(array, lo, index = mid, expectedModCount); } @SuppressWarnings("unchecked") public boolean tryAdvance(Consumer<? super E> action) { Objects.requireNonNull(action); int i; if (getFence() > (i = index)) { index = i + 1; action.accept((E)array[i]); if (modCount != expectedModCount) throw new ConcurrentModificationException(); return true; } return false; } @SuppressWarnings("unchecked") public void forEachRemaining(Consumer<? super E> action) { Objects.requireNonNull(action); final int hi = getFence(); final Object[] a = array; int i; for (i = index, index = hi; i < hi; i++) action.accept((E) a[i]); if (modCount != expectedModCount) throw new ConcurrentModificationException(); } public long estimateSize() { return getFence() - index; } public int characteristics() { return Spliterator.ORDERED | Spliterator.SIZED | Spliterator.SUBSIZED; } } void checkInvariants() { // assert elementCount >= 0; // assert elementCount == elementData.length || elementData[elementCount] == null; } }
mirkosertic/Bytecoder
classlib/java.base/src/main/resources/META-INF/modules/java.base/classes/java/util/Vector.java
Java
apache-2.0
56,351
''' This module sets up a scheme for validating that arbitrary Python objects are correctly typed. It is totally decoupled from Django, composable, easily wrapped, and easily extended. A validator takes two parameters--var_name and val--and returns an error if val is not the correct type. The var_name parameter is used to format error messages. Validators return None when there are no errors. Example primitive validators are check_string, check_int, and check_bool. Compound validators are created by check_list and check_dict. Note that those functions aren't directly called for validation; instead, those functions are called to return other functions that adhere to the validator contract. This is similar to how Python decorators are often parameterized. The contract for check_list and check_dict is that they get passed in other validators to apply to their items. This allows you to build up validators for arbitrarily complex validators. See ValidatorTestCase for example usage. A simple example of composition is this: check_list(check_string)('my_list', ['a', 'b', 'c']) == None To extend this concept, it's simply a matter of writing your own validator for any particular type of object. ''' from __future__ import absolute_import import six def check_string(var_name, val): if not isinstance(val, six.string_types): return '%s is not a string' % (var_name,) return None def check_int(var_name, val): if not isinstance(val, int): return '%s is not an integer' % (var_name,) return None def check_bool(var_name, val): if not isinstance(val, bool): return '%s is not a boolean' % (var_name,) return None def check_none_or(sub_validator): def f(var_name, val): if val is None: return None else: return sub_validator(var_name, val) return f def check_list(sub_validator, length=None): def f(var_name, val): if not isinstance(val, list): return '%s is not a list' % (var_name,) if length is not None and length != len(val): return '%s should have exactly %d items' % (var_name, length) if sub_validator: for i, item in enumerate(val): vname = '%s[%d]' % (var_name, i) error = sub_validator(vname, item) if error: return error return None return f def check_dict(required_keys): # required_keys is a list of tuples of # key_name/validator def f(var_name, val): if not isinstance(val, dict): return '%s is not a dict' % (var_name,) for k, sub_validator in required_keys: if k not in val: return '%s key is missing from %s' % (k, var_name) vname = '%s["%s"]' % (var_name, k) error = sub_validator(vname, val[k]) if error: return error return None return f def check_variable_type(allowed_type_funcs): """ Use this validator if an argument is of a variable type (e.g. processing properties that might be strings or booleans). `allowed_type_funcs`: the check_* validator functions for the possible data types for this variable. """ def enumerated_type_check(var_name, val): for func in allowed_type_funcs: if not func(var_name, val): return None return '%s is not an allowed_type' % (var_name,) return enumerated_type_check def equals(expected_val): def f(var_name, val): if val != expected_val: return '%s != %r (%r is wrong)' % (var_name, expected_val, val) return None return f
dwrpayne/zulip
zerver/lib/validator.py
Python
apache-2.0
3,704
/* Copyright 2011-2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.google.security.zynamics.binnavi.debug.connection.packets.replies; import com.google.security.zynamics.binnavi.debug.models.targetinformation.RegisterValues; /** * Represents the reply that is sent by the debug client whenever a regular breakpoint was hit in * the target process. */ public final class BreakpointHitReply extends AnyBreakpointHitReply { /** * Creates a new breakpoint hit reply object. * * @param packetId Packet ID of the reply. * @param errorCode Error code of the reply. If this error code is 0, the requested operation was * successful. * @param tid Thread ID of the thread that hit the breakpoint. * @param registerValues Values of all registers when the breakpoints was hit. In case of an * error, this argument is null. */ public BreakpointHitReply(final int packetId, final int errorCode, final long tid, final RegisterValues registerValues) { super(packetId, errorCode, tid, registerValues); } }
ispras/binnavi
src/main/java/com/google/security/zynamics/binnavi/debug/connection/packets/replies/BreakpointHitReply.java
Java
apache-2.0
1,580
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. using Azure.Core; using Azure.Core.Pipeline; using System; using System.Threading; using System.Threading.Tasks; namespace Azure.Security.KeyVault.Keys.Cryptography { internal class RemoteCryptographyClient : ICryptographyProvider { private readonly Uri _keyId; protected RemoteCryptographyClient() { } internal RemoteCryptographyClient(Uri keyId, TokenCredential credential, CryptographyClientOptions options) { Argument.AssertNotNull(keyId, nameof(keyId)); Argument.AssertNotNull(credential, nameof(credential)); _keyId = keyId; options ??= new CryptographyClientOptions(); string apiVersion = options.GetVersionString(); HttpPipeline pipeline = HttpPipelineBuilder.Build(options, new ChallengeBasedAuthenticationPolicy(credential)); Pipeline = new KeyVaultPipeline(keyId, apiVersion, pipeline, new ClientDiagnostics(options)); } internal RemoteCryptographyClient(KeyVaultPipeline pipeline) { Pipeline = pipeline; } internal KeyVaultPipeline Pipeline { get; } public bool SupportsOperation(KeyOperation operation) => true; public virtual async Task<Response<EncryptResult>> EncryptAsync(EncryptionAlgorithm algorithm, byte[] plaintext, CancellationToken cancellationToken = default) { var parameters = new KeyEncryptParameters() { Algorithm = algorithm.ToString(), Value = plaintext, }; using DiagnosticScope scope = Pipeline.CreateScope($"{nameof(RemoteCryptographyClient)}.{nameof(Encrypt)}"); scope.AddAttribute("key", _keyId); scope.Start(); try { return await Pipeline.SendRequestAsync(RequestMethod.Post, parameters, () => new EncryptResult { Algorithm = algorithm }, cancellationToken, "/encrypt").ConfigureAwait(false); } catch (Exception e) { scope.Failed(e); throw; } } public virtual Response<EncryptResult> Encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, CancellationToken cancellationToken = default) { var parameters = new KeyEncryptParameters() { Algorithm = algorithm.ToString(), Value = plaintext, }; using DiagnosticScope scope = Pipeline.CreateScope($"{nameof(RemoteCryptographyClient)}.{nameof(Encrypt)}"); scope.AddAttribute("key", _keyId); scope.Start(); try { return Pipeline.SendRequest(RequestMethod.Post, parameters, () => new EncryptResult { Algorithm = algorithm }, cancellationToken, "/encrypt"); } catch (Exception e) { scope.Failed(e); throw; } } public virtual async Task<Response<DecryptResult>> DecryptAsync(EncryptionAlgorithm algorithm, byte[] ciphertext, CancellationToken cancellationToken = default) { var parameters = new KeyEncryptParameters() { Algorithm = algorithm.ToString(), Value = ciphertext, }; using DiagnosticScope scope = Pipeline.CreateScope($"{nameof(RemoteCryptographyClient)}.{nameof(Decrypt)}"); scope.AddAttribute("key", _keyId); scope.Start(); try { return await Pipeline.SendRequestAsync(RequestMethod.Post, parameters, () => new DecryptResult { Algorithm = algorithm }, cancellationToken, "/decrypt").ConfigureAwait(false); } catch (Exception e) { scope.Failed(e); throw; } } public virtual Response<DecryptResult> Decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, CancellationToken cancellationToken = default) { var parameters = new KeyEncryptParameters() { Algorithm = algorithm.ToString(), Value = ciphertext, }; using DiagnosticScope scope = Pipeline.CreateScope($"{nameof(RemoteCryptographyClient)}.{nameof(Decrypt)}"); scope.AddAttribute("key", _keyId); scope.Start(); try { return Pipeline.SendRequest(RequestMethod.Post, parameters, () => new DecryptResult { Algorithm = algorithm }, cancellationToken, "/decrypt"); } catch (Exception e) { scope.Failed(e); throw; } } public virtual async Task<Response<WrapResult>> WrapKeyAsync(KeyWrapAlgorithm algorithm, byte[] key, CancellationToken cancellationToken = default) { var parameters = new KeyWrapParameters() { Algorithm = algorithm.ToString(), Key = key }; using DiagnosticScope scope = Pipeline.CreateScope($"{nameof(RemoteCryptographyClient)}.{nameof(WrapKey)}"); scope.AddAttribute("key", _keyId); scope.Start(); try { return await Pipeline.SendRequestAsync(RequestMethod.Post, parameters, () => new WrapResult { Algorithm = algorithm }, cancellationToken, "/wrapKey").ConfigureAwait(false); } catch (Exception e) { scope.Failed(e); throw; } } public virtual Response<WrapResult> WrapKey(KeyWrapAlgorithm algorithm, byte[] key, CancellationToken cancellationToken = default) { var parameters = new KeyWrapParameters() { Algorithm = algorithm.ToString(), Key = key }; using DiagnosticScope scope = Pipeline.CreateScope($"{nameof(RemoteCryptographyClient)}.{nameof(WrapKey)}"); scope.AddAttribute("key", _keyId); scope.Start(); try { return Pipeline.SendRequest(RequestMethod.Post, parameters, () => new WrapResult { Algorithm = algorithm }, cancellationToken, "/wrapKey"); } catch (Exception e) { scope.Failed(e); throw; } } public virtual async Task<Response<UnwrapResult>> UnwrapKeyAsync(KeyWrapAlgorithm algorithm, byte[] encryptedKey, CancellationToken cancellationToken = default) { var parameters = new KeyWrapParameters() { Algorithm = algorithm.ToString(), Key = encryptedKey }; using DiagnosticScope scope = Pipeline.CreateScope($"{nameof(RemoteCryptographyClient)}.{nameof(UnwrapKey)}"); scope.AddAttribute("key", _keyId); scope.Start(); try { return await Pipeline.SendRequestAsync(RequestMethod.Post, parameters, () => new UnwrapResult { Algorithm = algorithm }, cancellationToken, "/unwrapKey").ConfigureAwait(false); } catch (Exception e) { scope.Failed(e); throw; } } public virtual Response<UnwrapResult> UnwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, CancellationToken cancellationToken = default) { var parameters = new KeyWrapParameters() { Algorithm = algorithm.ToString(), Key = encryptedKey }; using DiagnosticScope scope = Pipeline.CreateScope($"{nameof(RemoteCryptographyClient)}.{nameof(UnwrapKey)}"); scope.AddAttribute("key", _keyId); scope.Start(); try { return Pipeline.SendRequest(RequestMethod.Post, parameters, () => new UnwrapResult { Algorithm = algorithm }, cancellationToken, "/unwrapKey"); } catch (Exception e) { scope.Failed(e); throw; } } public virtual async Task<Response<SignResult>> SignAsync(SignatureAlgorithm algorithm, byte[] digest, CancellationToken cancellationToken = default) { var parameters = new KeySignParameters { Algorithm = algorithm.ToString(), Digest = digest }; using DiagnosticScope scope = Pipeline.CreateScope($"{nameof(RemoteCryptographyClient)}.{nameof(Sign)}"); scope.AddAttribute("key", _keyId); scope.Start(); try { return await Pipeline.SendRequestAsync(RequestMethod.Post, parameters, () => new SignResult { Algorithm = algorithm }, cancellationToken, "/sign").ConfigureAwait(false); } catch (Exception e) { scope.Failed(e); throw; } } public virtual Response<SignResult> Sign(SignatureAlgorithm algorithm, byte[] digest, CancellationToken cancellationToken = default) { var parameters = new KeySignParameters { Algorithm = algorithm.ToString(), Digest = digest }; using DiagnosticScope scope = Pipeline.CreateScope($"{nameof(RemoteCryptographyClient)}.{nameof(Sign)}"); scope.AddAttribute("key", _keyId); scope.Start(); try { return Pipeline.SendRequest(RequestMethod.Post, parameters, () => new SignResult { Algorithm = algorithm }, cancellationToken, "/sign"); } catch (Exception e) { scope.Failed(e); throw; } } public virtual async Task<Response<VerifyResult>> VerifyAsync(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, CancellationToken cancellationToken = default) { var parameters = new KeyVerifyParameters { Algorithm = algorithm.ToString(), Digest = digest, Signature = signature }; using DiagnosticScope scope = Pipeline.CreateScope($"{nameof(RemoteCryptographyClient)}.{nameof(Verify)}"); scope.AddAttribute("key", _keyId); scope.Start(); try { return await Pipeline.SendRequestAsync(RequestMethod.Post, parameters, () => new VerifyResult { Algorithm = algorithm, KeyId = _keyId.ToString() }, cancellationToken, "/verify").ConfigureAwait(false); } catch (Exception e) { scope.Failed(e); throw; } } public virtual Response<VerifyResult> Verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, CancellationToken cancellationToken = default) { var parameters = new KeyVerifyParameters { Algorithm = algorithm.ToString(), Digest = digest, Signature = signature }; using DiagnosticScope scope = Pipeline.CreateScope($"{nameof(RemoteCryptographyClient)}.{nameof(Verify)}"); scope.AddAttribute("key", _keyId); scope.Start(); try { return Pipeline.SendRequest(RequestMethod.Post, parameters, () => new VerifyResult { Algorithm = algorithm, KeyId = _keyId.ToString() }, cancellationToken, "/verify"); } catch (Exception e) { scope.Failed(e); throw; } } internal virtual async Task<Response<KeyVaultKey>> GetKeyAsync(CancellationToken cancellationToken = default) { using DiagnosticScope scope = Pipeline.CreateScope($"{nameof(RemoteCryptographyClient)}.{nameof(GetKey)}"); scope.AddAttribute("key", _keyId); scope.Start(); try { return await Pipeline.SendRequestAsync(RequestMethod.Get, () => new KeyVaultKey(), cancellationToken).ConfigureAwait(false); } catch (Exception e) { scope.Failed(e); throw; } } internal virtual Response<KeyVaultKey> GetKey(CancellationToken cancellationToken = default) { using DiagnosticScope scope = Pipeline.CreateScope($"{nameof(RemoteCryptographyClient)}.{nameof(GetKey)}"); scope.AddAttribute("key", _keyId); scope.Start(); try { return Pipeline.SendRequest(RequestMethod.Get, () => new KeyVaultKey(), cancellationToken); } catch (Exception e) { scope.Failed(e); throw; } } bool ICryptographyProvider.ShouldRemote => false; async Task<EncryptResult> ICryptographyProvider.EncryptAsync(EncryptionAlgorithm algorithm, byte[] plaintext, CancellationToken cancellationToken) { return await EncryptAsync(algorithm, plaintext, cancellationToken).ConfigureAwait(false); } EncryptResult ICryptographyProvider.Encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, CancellationToken cancellationToken) { return Encrypt(algorithm, plaintext, cancellationToken); } async Task<DecryptResult> ICryptographyProvider.DecryptAsync(EncryptionAlgorithm algorithm, byte[] ciphertext, CancellationToken cancellationToken) { return await DecryptAsync(algorithm, ciphertext, cancellationToken).ConfigureAwait(false); } DecryptResult ICryptographyProvider.Decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, CancellationToken cancellationToken) { return Decrypt(algorithm, ciphertext, cancellationToken); } async Task<WrapResult> ICryptographyProvider.WrapKeyAsync(KeyWrapAlgorithm algorithm, byte[] key, CancellationToken cancellationToken) { return await WrapKeyAsync(algorithm, key, cancellationToken).ConfigureAwait(false); } WrapResult ICryptographyProvider.WrapKey(KeyWrapAlgorithm algorithm, byte[] key, CancellationToken cancellationToken) { return WrapKey(algorithm, key, cancellationToken); } async Task<UnwrapResult> ICryptographyProvider.UnwrapKeyAsync(KeyWrapAlgorithm algorithm, byte[] encryptedKey, CancellationToken cancellationToken) { return await UnwrapKeyAsync(algorithm, encryptedKey, cancellationToken).ConfigureAwait(false); } UnwrapResult ICryptographyProvider.UnwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, CancellationToken cancellationToken) { return UnwrapKey(algorithm, encryptedKey, cancellationToken); } async Task<SignResult> ICryptographyProvider.SignAsync(SignatureAlgorithm algorithm, byte[] digest, CancellationToken cancellationToken) { return await SignAsync(algorithm, digest, cancellationToken).ConfigureAwait(false); } SignResult ICryptographyProvider.Sign(SignatureAlgorithm algorithm, byte[] digest, CancellationToken cancellationToken) { return Sign(algorithm, digest, cancellationToken); } async Task<VerifyResult> ICryptographyProvider.VerifyAsync(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, CancellationToken cancellationToken) { return await VerifyAsync(algorithm, digest, signature, cancellationToken).ConfigureAwait(false); } VerifyResult ICryptographyProvider.Verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, CancellationToken cancellationToken) { return Verify(algorithm, digest, signature, cancellationToken); } } }
stankovski/azure-sdk-for-net
sdk/keyvault/Azure.Security.KeyVault.Keys/src/Cryptography/RemoteCryptographyClient.cs
C#
apache-2.0
16,295
########################################################################## # Copyright 2016 ThoughtWorks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ########################################################################## module ApiV3 module Config class TimerRepresenter < ApiV3::BaseRepresenter alias_method :timer, :represented error_representer({"timerSpec" => "spec"}) property :timer_spec, as: :spec property :onlyOnChanges, as: :only_on_changes end end end
ollie314/gocd
server/webapp/WEB-INF/rails.new/app/presenters/api_v3/config/timer_representer.rb
Ruby
apache-2.0
1,014
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* global define, module, require, exports */ (function (root, factory) { if (typeof define === 'function' && define.amd) { define(['jquery', 'Slick', 'nf.ErrorHandler', 'nf.Common', 'nf.Client', 'nf.CanvasUtils', 'nf.ng.Bridge', 'nf.Dialog', 'nf.Shell'], function ($, Slick, nfErrorHandler, nfCommon, nfClient, nfCanvasUtils, nfNgBridge, nfDialog, nfShell) { return (nf.PolicyManagement = factory($, Slick, nfErrorHandler, nfCommon, nfClient, nfCanvasUtils, nfNgBridge, nfDialog, nfShell)); }); } else if (typeof exports === 'object' && typeof module === 'object') { module.exports = (nf.PolicyManagement = factory(require('jquery'), require('Slick'), require('nf.ErrorHandler'), require('nf.Common'), require('nf.Client'), require('nf.CanvasUtils'), require('nf.ng.Bridge'), require('nf.Dialog'), require('nf.Shell'))); } else { nf.PolicyManagement = factory(root.$, root.Slick, root.nf.ErrorHandler, root.nf.Common, root.nf.Client, root.nf.CanvasUtils, root.nf.ng.Bridge, root.nf.Dialog, root.nf.Shell); } }(this, function ($, Slick, nfErrorHandler, nfCommon, nfClient, nfCanvasUtils, nfNgBridge, nfDialog, nfShell) { 'use strict'; var config = { urls: { api: '../nifi-api', searchTenants: '../nifi-api/tenants/search-results' } }; var initialized = false; var initAddTenantToPolicyDialog = function () { $('#new-policy-user-button').on('click', function () { $('#search-users-dialog').modal('show'); $('#search-users-field').focus(); }); $('#delete-policy-button').on('click', function () { promptToDeletePolicy(); }); $('#search-users-dialog').modal({ scrollableContentStyle: 'scrollable', headerText: 'Add Users/Groups', buttons: [{ buttonText: 'Add', color: { base: '#728E9B', hover: '#004849', text: '#ffffff' }, handler: { click: function () { // add to table and update policy var policyGrid = $('#policy-table').data('gridInstance'); var policyData = policyGrid.getData(); // begin the update policyData.beginUpdate(); // add all users/groups $.each(getTenantsToAdd($('#allowed-users')), function (_, user) { // remove the user policyData.addItem(user); }); $.each(getTenantsToAdd($('#allowed-groups')), function (_, group) { // remove the user policyData.addItem(group); }); // end the update policyData.endUpdate(); // update the policy updatePolicy(); // close the dialog $('#search-users-dialog').modal('hide'); } } }, { buttonText: 'Cancel', color: { base: '#E3E8EB', hover: '#C7D2D7', text: '#004849' }, handler: { click: function () { // close the dialog $('#search-users-dialog').modal('hide'); } } }], handler: { close: function () { // reset the search fields $('#search-users-field').userSearchAutocomplete('reset').val(''); // clear the selected users/groups $('#allowed-users, #allowed-groups').empty(); } } }); // listen for removal requests $(document).on('click', 'div.remove-allowed-entity', function () { $(this).closest('li').remove(); }); // configure the user auto complete $.widget('nf.userSearchAutocomplete', $.ui.autocomplete, { reset: function () { this.term = null; }, _create: function() { this._super(); this.widget().menu('option', 'items', '> :not(.search-no-matches)' ); }, _normalize: function (searchResults) { var items = []; items.push(searchResults); return items; }, _renderMenu: function (ul, items) { // results are normalized into a single element array var searchResults = items[0]; var allowedGroups = getAllAllowedGroups(); var allowedUsers = getAllAllowedUsers(); var nfUserSearchAutocomplete = this; $.each(searchResults.userGroups, function (_, tenant) { // see if this match is not already selected if ($.inArray(tenant.id, allowedGroups) === -1) { nfUserSearchAutocomplete._renderGroup(ul, $.extend({ type: 'group' }, tenant)); } }); $.each(searchResults.users, function (_, tenant) { // see if this match is not already selected if ($.inArray(tenant.id, allowedUsers) === -1) { nfUserSearchAutocomplete._renderUser(ul, $.extend({ type: 'user' }, tenant)); } }); // ensure there were some results if (ul.children().length === 0) { ul.append('<li class="unset search-no-matches">No users matched the search terms</li>'); } }, _resizeMenu: function () { var ul = this.menu.element; ul.width($('#search-users-field').outerWidth() - 2); }, _renderUser: function (ul, match) { var userContent = $('<a></a>').text(match.component.identity); return $('<li></li>').data('ui-autocomplete-item', match).append(userContent).appendTo(ul); }, _renderGroup: function (ul, match) { var groupLabel = $('<span></span>').text(match.component.identity); var groupContent = $('<a></a>').append('<div class="fa fa-users" style="margin-right: 5px;"></div>').append(groupLabel); return $('<li></li>').data('ui-autocomplete-item', match).append(groupContent).appendTo(ul); } }); // configure the autocomplete field $('#search-users-field').userSearchAutocomplete({ minLength: 0, appendTo: '#search-users-results', position: { my: 'left top', at: 'left bottom', offset: '0 1' }, source: function (request, response) { // create the search request $.ajax({ type: 'GET', data: { q: request.term }, dataType: 'json', url: config.urls.searchTenants }).done(function (searchResponse) { response(searchResponse); }); }, select: function (event, ui) { addAllowedTenant(ui.item); // reset the search field $(this).val(''); // stop event propagation return false; } }); }; /** * Gets all allowed groups including those already in the policy and those selected while searching (not yet saved). * * @returns {Array} */ var getAllAllowedGroups = function () { var policyGrid = $('#policy-table').data('gridInstance'); var policyData = policyGrid.getData(); var userGroups = []; // consider existing groups in the policy table var items = policyData.getItems(); $.each(items, function (_, item) { if (item.type === 'group') { userGroups.push(item.id); } }); // also consider groups already selected in the search users dialog $.each(getTenantsToAdd($('#allowed-groups')), function (_, group) { userGroups.push(group.id); }); return userGroups; }; /** * Gets the user groups that will be added upon applying the changes. * * @param {jQuery} container * @returns {Array} */ var getTenantsToAdd = function (container) { var tenants = []; // also consider groups already selected in the search users dialog container.children('li').each(function (_, allowedTenant) { var tenant = $(allowedTenant).data('tenant'); if (nfCommon.isDefinedAndNotNull(tenant)) { tenants.push(tenant); } }); return tenants; }; /** * Gets all allowed users including those already in the policy and those selected while searching (not yet saved). * * @returns {Array} */ var getAllAllowedUsers = function () { var policyGrid = $('#policy-table').data('gridInstance'); var policyData = policyGrid.getData(); var users = []; // consider existing users in the policy table var items = policyData.getItems(); $.each(items, function (_, item) { if (item.type === 'user') { users.push(item.id); } }); // also consider users already selected in the search users dialog $.each(getTenantsToAdd($('#allowed-users')), function (_, user) { users.push(user.id); }); return users; }; /** * Added the specified tenant to the listing of users/groups which will be added when applied. * * @param allowedTenant user/group to add */ var addAllowedTenant = function (allowedTenant) { var allowedTenants = allowedTenant.type === 'user' ? $('#allowed-users') : $('#allowed-groups'); // append the user var tenant = $('<span></span>').addClass('allowed-entity ellipsis').text(allowedTenant.component.identity).ellipsis(); var tenantAction = $('<div></div>').addClass('remove-allowed-entity fa fa-trash'); $('<li></li>').data('tenant', allowedTenant).append(tenant).append(tenantAction).appendTo(allowedTenants); }; /** * Determines whether the specified global policy type supports read/write options. * * @param policyType global policy type * @returns {boolean} whether the policy supports read/write options */ var globalPolicySupportsReadWrite = function (policyType) { return policyType === 'controller' || policyType === 'counters' || policyType === 'policies' || policyType === 'tenants'; }; /** * Determines whether the specified global policy type only supports write. * * @param policyType global policy type * @returns {boolean} whether the policy only supports write */ var globalPolicySupportsWrite = function (policyType) { return policyType === 'proxy' || policyType === 'restricted-components'; }; /** * Initializes the policy table. */ var initPolicyTable = function () { $('#override-policy-dialog').modal({ headerText: 'Override Policy', buttons: [{ buttonText: 'Override', color: { base: '#728E9B', hover: '#004849', text: '#ffffff' }, handler: { click: function () { // create the policy, copying if appropriate createPolicy($('#copy-policy-radio-button').is(':checked')); $(this).modal('hide'); } } }, { buttonText: 'Cancel', color: { base: '#E3E8EB', hover: '#C7D2D7', text: '#004849' }, handler: { click: function () { $(this).modal('hide'); } } }], handler: { close: function () { // reset the radio button $('#copy-policy-radio-button').prop('checked', true); } } }); // create/add a policy $('#create-policy-link, #add-local-admin-link').on('click', function () { createPolicy(false); }); // override a policy $('#override-policy-link').on('click', function () { $('#override-policy-dialog').modal('show'); }); // policy type listing $('#policy-type-list').combo({ options: [ nfCommon.getPolicyTypeListing('flow'), nfCommon.getPolicyTypeListing('controller'), nfCommon.getPolicyTypeListing('provenance'), nfCommon.getPolicyTypeListing('restricted-components'), nfCommon.getPolicyTypeListing('policies'), nfCommon.getPolicyTypeListing('tenants'), nfCommon.getPolicyTypeListing('site-to-site'), nfCommon.getPolicyTypeListing('system'), nfCommon.getPolicyTypeListing('proxy'), nfCommon.getPolicyTypeListing('counters')], select: function (option) { if (initialized) { // record the policy type $('#selected-policy-type').text(option.value); // if the option is for a specific component if (globalPolicySupportsReadWrite(option.value)) { // update the policy target and let it relaod the policy $('#controller-policy-target').combo('setSelectedOption', { 'value': 'read' }).show(); } else { $('#controller-policy-target').hide(); // record the action if (globalPolicySupportsWrite(option.value)) { $('#selected-policy-action').text('write'); } else { $('#selected-policy-action').text('read'); } // reload the policy loadPolicy(); } } } }); // controller policy target $('#controller-policy-target').combo({ options: [{ text: 'view', value: 'read' }, { text: 'modify', value: 'write' }], select: function (option) { if (initialized) { // record the policy action $('#selected-policy-action').text(option.value); // reload the policy loadPolicy(); } } }); // component policy target $('#component-policy-target').combo({ options: [{ text: 'view the component', value: 'read-component', description: 'Allows users to view component configuration details' }, { text: 'modify the component', value: 'write-component', description: 'Allows users to modify component configuration details' }, { text: 'view the data', value: 'read-data', description: 'Allows users to view metadata and content for this component through provenance data and flowfile queues in outbound connections' }, { text: 'modify the data', value: 'write-data', description: 'Allows users to empty flowfile queues in outbound connections and submit replays' }, { text: 'receive data via site-to-site', value: 'write-receive-data', description: 'Allows this port to receive data from these NiFi instances', disabled: true }, { text: 'send data via site-to-site', value: 'write-send-data', description: 'Allows this port to send data to these NiFi instances', disabled: true }, { text: 'view the policies', value: 'read-policies', description: 'Allows users to view the list of users who can view/modify this component' }, { text: 'modify the policies', value: 'write-policies', description: 'Allows users to modify the list of users who can view/modify this component' }], select: function (option) { if (initialized) { var resource = $('#selected-policy-component-type').text(); if (option.value === 'read-component') { $('#selected-policy-action').text('read'); } else if (option.value === 'write-component') { $('#selected-policy-action').text('write'); } else if (option.value === 'read-data') { $('#selected-policy-action').text('read'); resource = ('data/' + resource); } else if (option.value === 'write-data') { $('#selected-policy-action').text('write'); resource = ('data/' + resource); } else if (option.value === 'read-policies') { $('#selected-policy-action').text('read'); resource = ('policies/' + resource); } else if (option.value === 'write-policies') { $('#selected-policy-action').text('write'); resource = ('policies/' + resource); } else if (option.value === 'write-receive-data') { $('#selected-policy-action').text('write'); resource = 'data-transfer/input-ports'; } else if (option.value === 'write-send-data') { $('#selected-policy-action').text('write'); resource = 'data-transfer/output-ports'; } // set the resource $('#selected-policy-type').text(resource); // reload the policy loadPolicy(); } } }); // function for formatting the user identity var identityFormatter = function (row, cell, value, columnDef, dataContext) { var markup = ''; if (dataContext.type === 'group') { markup += '<div class="fa fa-users" style="margin-right: 5px;"></div>'; } markup += dataContext.component.identity; return markup; }; // function for formatting the actions column var actionFormatter = function (row, cell, value, columnDef, dataContext) { var markup = ''; // see if the user has permissions for the current policy var currentEntity = $('#policy-table').data('policy'); var isPolicyEditable = $('#delete-policy-button').is(':disabled') === false; if (currentEntity.permissions.canWrite === true && isPolicyEditable) { markup += '<div title="Remove" class="pointer delete-user fa fa-trash"></div>'; } return markup; }; // initialize the templates table var usersColumns = [ { id: 'identity', name: 'User', sortable: true, resizable: true, formatter: identityFormatter }, { id: 'actions', name: '&nbsp;', sortable: false, resizable: false, formatter: actionFormatter, width: 100, maxWidth: 100 } ]; var usersOptions = { forceFitColumns: true, enableTextSelectionOnCells: true, enableCellNavigation: true, enableColumnReorder: false, autoEdit: false }; // initialize the dataview var policyData = new Slick.Data.DataView({ inlineFilters: false }); policyData.setItems([]); // initialize the sort sort({ columnId: 'identity', sortAsc: true }, policyData); // initialize the grid var policyGrid = new Slick.Grid('#policy-table', policyData, usersColumns, usersOptions); policyGrid.setSelectionModel(new Slick.RowSelectionModel()); policyGrid.registerPlugin(new Slick.AutoTooltips()); policyGrid.setSortColumn('identity', true); policyGrid.onSort.subscribe(function (e, args) { sort({ columnId: args.sortCol.id, sortAsc: args.sortAsc }, policyData); }); // configure a click listener policyGrid.onClick.subscribe(function (e, args) { var target = $(e.target); // get the node at this row var item = policyData.getItem(args.row); // determine the desired action if (policyGrid.getColumns()[args.cell].id === 'actions') { if (target.hasClass('delete-user')) { promptToRemoveUserFromPolicy(item); } } }); // wire up the dataview to the grid policyData.onRowCountChanged.subscribe(function (e, args) { policyGrid.updateRowCount(); policyGrid.render(); // update the total number of displayed policy users $('#displayed-policy-users').text(args.current); }); policyData.onRowsChanged.subscribe(function (e, args) { policyGrid.invalidateRows(args.rows); policyGrid.render(); }); // hold onto an instance of the grid $('#policy-table').data('gridInstance', policyGrid); // initialize the number of displayed items $('#displayed-policy-users').text('0'); }; /** * Sorts the specified data using the specified sort details. * * @param {object} sortDetails * @param {object} data */ var sort = function (sortDetails, data) { // defines a function for sorting var comparer = function (a, b) { if(a.permissions.canRead && b.permissions.canRead) { var aString = nfCommon.isDefinedAndNotNull(a.component[sortDetails.columnId]) ? a.component[sortDetails.columnId] : ''; var bString = nfCommon.isDefinedAndNotNull(b.component[sortDetails.columnId]) ? b.component[sortDetails.columnId] : ''; return aString === bString ? 0 : aString > bString ? 1 : -1; } else { if (!a.permissions.canRead && !b.permissions.canRead){ return 0; } if(a.permissions.canRead){ return 1; } else { return -1; } } }; // perform the sort data.sort(comparer, sortDetails.sortAsc); }; /** * Prompts for the removal of the specified user. * * @param item */ var promptToRemoveUserFromPolicy = function (item) { nfDialog.showYesNoDialog({ headerText: 'Update Policy', dialogContent: 'Remove \'' + nfCommon.escapeHtml(item.component.identity) + '\' from this policy?', yesHandler: function () { removeUserFromPolicy(item); } }); }; /** * Removes the specified item from the current policy. * * @param item */ var removeUserFromPolicy = function (item) { var policyGrid = $('#policy-table').data('gridInstance'); var policyData = policyGrid.getData(); // begin the update policyData.beginUpdate(); // remove the user policyData.deleteItem(item.id); // end the update policyData.endUpdate(); // save the configuration updatePolicy(); }; /** * Prompts for the deletion of the selected policy. */ var promptToDeletePolicy = function () { nfDialog.showYesNoDialog({ headerText: 'Delete Policy', dialogContent: 'By deleting this policy, the permissions for this component will revert to the inherited policy if applicable.', yesText: 'Delete', noText: 'Cancel', yesHandler: function () { deletePolicy(); } }); }; /** * Deletes the current policy. */ var deletePolicy = function () { var currentEntity = $('#policy-table').data('policy'); if (nfCommon.isDefinedAndNotNull(currentEntity)) { $.ajax({ type: 'DELETE', url: currentEntity.uri + '?' + $.param(nfClient.getRevision(currentEntity)), dataType: 'json' }).done(function () { loadPolicy(); }).fail(function (xhr, status, error) { nfErrorHandler.handleAjaxError(xhr, status, error); resetPolicy(); loadPolicy(); }); } else { nfDialog.showOkDialog({ headerText: 'Delete Policy', dialogContent: 'No policy selected' }); } }; /** * Gets the currently selected resource. */ var getSelectedResourceAndAction = function () { var componentId = $('#selected-policy-component-id').text(); var resource = $('#selected-policy-type').text(); if (componentId !== '') { resource += ('/' + componentId); } return { 'action': $('#selected-policy-action').text(), 'resource': '/' + resource }; }; /** * Populates the table with the specified users and groups. * * @param users * @param userGroups */ var populateTable = function (users, userGroups) { var policyGrid = $('#policy-table').data('gridInstance'); var policyData = policyGrid.getData(); // begin the update policyData.beginUpdate(); var policyUsers = []; // add each user $.each(users, function (_, user) { policyUsers.push($.extend({ type: 'user' }, user)); }); // add each group $.each(userGroups, function (_, group) { policyUsers.push($.extend({ type: 'group' }, group)); }); // set the rows policyData.setItems(policyUsers); // end the update policyData.endUpdate(); // re-sort and clear selection after updating policyData.reSort(); policyGrid.invalidate(); policyGrid.getSelectionModel().setSelectedRows([]); }; /** * Converts the specified resource into human readable form. * * @param resource */ var getResourceMessage = function (resource) { if (resource === '/policies') { return $('<span>Showing effective policy inherited from all policies.</span>'); } else if (resource === '/controller') { return $('<span>Showing effective policy inherited from the controller.</span>'); } else { // extract the group id var processGroupId = nfCommon.substringAfterLast(resource, '/'); var processGroupName = processGroupId; // attempt to resolve the group name var breadcrumbs = nfNgBridge.injector.get('breadcrumbsCtrl').getBreadcrumbs(); $.each(breadcrumbs, function (_, breadcrumbEntity) { if (breadcrumbEntity.id === processGroupId) { processGroupName = breadcrumbEntity.label; return false; } }); // build the mark up return $('<span>Showing effective policy inherited from Process Group </span>') .append( $('<span class="link ellipsis" style="max-width: 200px; vertical-align: top;"></span>') .text(processGroupName) .attr('title', processGroupName) .on('click', function () { // close the shell $('#shell-close-button').click(); // load the correct group and unselect everything if necessary nfCanvasUtils.getComponentByType('ProcessGroup').enterGroup(processGroupId).done(function () { nfCanvasUtils.getSelection().classed('selected', false); // inform Angular app that values have changed nfNgBridge.digest(); }); }) ).append('<span>.</span>'); } }; /** * Populates the specified policy. * * @param policyEntity */ var populatePolicy = function (policyEntity) { var policy = policyEntity.component; // get the currently selected policy var resourceAndAction = getSelectedResourceAndAction(); // reset of the policy message resetPolicyMessage(); // store the current policy version $('#policy-table').data('policy', policyEntity); // see if the policy is for this resource if (resourceAndAction.resource === policy.resource) { // allow remove when policy is not inherited $('#delete-policy-button').prop('disabled', policyEntity.permissions.canWrite === false); // allow modification if allowed $('#new-policy-user-button').prop('disabled', policyEntity.permissions.canWrite === false); } else { $('#policy-message').append(getResourceMessage(policy.resource)); // policy is inherited, we do not know if the user has permissions to modify the desired policy... show button and let server decide $('#override-policy-message').show(); // do not support policy deletion/modification $('#delete-policy-button').prop('disabled', true); $('#new-policy-user-button').prop('disabled', true); } // populate the table populateTable(policy.users, policy.userGroups); }; /** * Loads the configuration for the specified process group. */ var loadPolicy = function () { var resourceAndAction = getSelectedResourceAndAction(); var policyDeferred; if (resourceAndAction.resource.startsWith('/policies')) { $('#admin-policy-message').show(); policyDeferred = $.Deferred(function (deferred) { $.ajax({ type: 'GET', url: '../nifi-api/policies/' + resourceAndAction.action + resourceAndAction.resource, dataType: 'json' }).done(function (policyEntity) { // update the refresh timestamp $('#policy-last-refreshed').text(policyEntity.generated); // ensure appropriate actions for the loaded policy if (policyEntity.permissions.canRead === true) { var policy = policyEntity.component; // if the return policy is for the desired policy (not inherited, show it) if (resourceAndAction.resource === policy.resource) { // populate the policy details populatePolicy(policyEntity); } else { // reset the policy resetPolicy(); // show an appropriate message $('#policy-message').text('No component specific administrators.'); // we don't know if the user has permissions to the desired policy... show create button and allow the server to decide $('#add-local-admin-message').show(); } } else { // reset the policy resetPolicy(); // show an appropriate message $('#policy-message').text('No component specific administrators.'); // we don't know if the user has permissions to the desired policy... show create button and allow the server to decide $('#add-local-admin-message').show(); } deferred.resolve(); }).fail(function (xhr, status, error) { if (xhr.status === 404) { // reset the policy resetPolicy(); // show an appropriate message $('#policy-message').text('No component specific administrators.'); // we don't know if the user has permissions to the desired policy... show create button and allow the server to decide $('#add-local-admin-message').show(); deferred.resolve(); } else if (xhr.status === 403) { // reset the policy resetPolicy(); // show an appropriate message $('#policy-message').text('Not authorized to access the policy for the specified resource.'); deferred.resolve(); } else { // reset the policy resetPolicy(); deferred.reject(); nfErrorHandler.handleAjaxError(xhr, status, error); } }); }).promise(); } else { $('#admin-policy-message').hide(); policyDeferred = $.Deferred(function (deferred) { $.ajax({ type: 'GET', url: '../nifi-api/policies/' + resourceAndAction.action + resourceAndAction.resource, dataType: 'json' }).done(function (policyEntity) { // return OK so we either have access to the policy or we don't have access to an inherited policy // update the refresh timestamp $('#policy-last-refreshed').text(policyEntity.generated); // ensure appropriate actions for the loaded policy if (policyEntity.permissions.canRead === true) { // populate the policy details populatePolicy(policyEntity); } else { // reset the policy resetPolicy(); // since we cannot read, the policy may be inherited or not... we cannot tell $('#policy-message').text('Not authorized to view the policy.'); // allow option to override because we don't know if it's supported or not $('#override-policy-message').show(); } deferred.resolve(); }).fail(function (xhr, status, error) { if (xhr.status === 404) { // reset the policy resetPolicy(); // show an appropriate message $('#policy-message').text('No policy for the specified resource.'); // we don't know if the user has permissions to the desired policy... show create button and allow the server to decide $('#new-policy-message').show(); deferred.resolve(); } else if (xhr.status === 403) { // reset the policy resetPolicy(); // show an appropriate message $('#policy-message').text('Not authorized to access the policy for the specified resource.'); deferred.resolve(); } else { resetPolicy(); deferred.reject(); nfErrorHandler.handleAjaxError(xhr, status, error); } }); }).promise(); } return policyDeferred; }; /** * Creates a new policy for the current selection. * * @param copyInheritedPolicy Whether or not to copy the inherited policy */ var createPolicy = function (copyInheritedPolicy) { var resourceAndAction = getSelectedResourceAndAction(); var users = []; var userGroups = []; if (copyInheritedPolicy === true) { var policyGrid = $('#policy-table').data('gridInstance'); var policyData = policyGrid.getData(); var items = policyData.getItems(); $.each(items, function (_, item) { var itemCopy = $.extend({}, item); if (itemCopy.type === 'user') { users.push(itemCopy); } else { userGroups.push(itemCopy); } // remove the type as it was added client side to render differently and is not part of the actual schema delete itemCopy.type; }); } var entity = { 'revision': nfClient.getRevision({ 'revision': { 'version': 0 } }), 'component': { 'action': resourceAndAction.action, 'resource': resourceAndAction.resource, 'users': users, 'userGroups': userGroups } }; $.ajax({ type: 'POST', url: '../nifi-api/policies', data: JSON.stringify(entity), dataType: 'json', contentType: 'application/json' }).done(function (policyEntity) { // ensure appropriate actions for the loaded policy if (policyEntity.permissions.canRead === true) { // populate the policy details populatePolicy(policyEntity); } else { // the request succeeded but we don't have access to the policy... reset/reload the policy resetPolicy(); loadPolicy(); } }).fail(nfErrorHandler.handleAjaxError); }; /** * Updates the policy for the current selection. */ var updatePolicy = function () { var policyGrid = $('#policy-table').data('gridInstance'); var policyData = policyGrid.getData(); var users = []; var userGroups = []; var items = policyData.getItems(); $.each(items, function (_, item) { var itemCopy = $.extend({}, item); if (itemCopy.type === 'user') { users.push(itemCopy); } else { userGroups.push(itemCopy); } // remove the type as it was added client side to render differently and is not part of the actual schema delete itemCopy.type; }); var currentEntity = $('#policy-table').data('policy'); if (nfCommon.isDefinedAndNotNull(currentEntity)) { var entity = { 'revision': nfClient.getRevision(currentEntity), 'component': { 'id': currentEntity.id, 'users': users, 'userGroups': userGroups } }; $.ajax({ type: 'PUT', url: currentEntity.uri, data: JSON.stringify(entity), dataType: 'json', contentType: 'application/json' }).done(function (policyEntity) { // ensure appropriate actions for the loaded policy if (policyEntity.permissions.canRead === true) { // populate the policy details populatePolicy(policyEntity); } else { // the request succeeded but we don't have access to the policy... reset/reload the policy resetPolicy(); loadPolicy(); } }).fail(function (xhr, status, error) { nfErrorHandler.handleAjaxError(xhr, status, error); resetPolicy(); loadPolicy(); }).always(function () { nfCanvasUtils.reload({ 'transition': true }); }); } else { nfDialog.showOkDialog({ headerText: 'Update Policy', dialogContent: 'No policy selected' }); } }; /** * Shows the process group configuration. */ var showPolicy = function () { // show the configuration dialog nfShell.showContent('#policy-management').always(function () { reset(); }); // adjust the table size nfPolicyManagement.resetTableSize(); }; /** * Reset the policy message. */ var resetPolicyMessage = function () { $('#policy-message').text('').empty(); $('#new-policy-message').hide(); $('#override-policy-message').hide(); $('#add-local-admin-message').hide(); }; /** * Reset the policy. */ var resetPolicy = function () { resetPolicyMessage(); // reset button state $('#delete-policy-button').prop('disabled', true); $('#new-policy-user-button').prop('disabled', true); // reset the current policy $('#policy-table').removeData('policy'); // populate the table with no users populateTable([], []); } /** * Resets the policy management dialog. */ var reset = function () { resetPolicy(); // clear the selected policy details $('#selected-policy-type').text(''); $('#selected-policy-action').text(''); $('#selected-policy-component-id').text(''); $('#selected-policy-component-type').text(''); // clear the selected component details $('div.policy-selected-component-container').hide(); }; var nfPolicyManagement = { /** * Initializes the settings page. */ init: function () { initAddTenantToPolicyDialog(); initPolicyTable(); $('#policy-refresh-button').on('click', function () { loadPolicy(); }); // reset the policy to initialize resetPolicy(); // mark as initialized initialized = true; }, /** * Update the size of the grid based on its container's current size. */ resetTableSize: function () { var policyTable = $('#policy-table'); if (policyTable.is(':visible')) { var policyGrid = policyTable.data('gridInstance'); if (nfCommon.isDefinedAndNotNull(policyGrid)) { policyGrid.resizeCanvas(); } } }, /** * Shows the controller service policy. * * @param d */ showControllerServicePolicy: function (d) { // reset the policy message resetPolicyMessage(); // update the policy controls visibility $('#component-policy-controls').show(); $('#global-policy-controls').hide(); // update the visibility if (d.permissions.canRead === true) { $('#policy-selected-controller-service-container div.policy-selected-component-name').text(d.component.name); } else { $('#policy-selected-controller-service-container div.policy-selected-component-name').text(d.id); } $('#policy-selected-controller-service-container').show(); // populate the initial resource $('#selected-policy-component-id').text(d.id); $('#selected-policy-component-type').text('controller-services'); $('#component-policy-target') .combo('setOptionEnabled', { value: 'write-receive-data' }, false) .combo('setOptionEnabled', { value: 'write-send-data' }, false) .combo('setOptionEnabled', { value: 'read-data' }, false) .combo('setOptionEnabled', { value: 'write-data' }, false) .combo('setSelectedOption', { value: 'read-component' }); return loadPolicy().always(showPolicy); }, /** * Shows the reporting task policy. * * @param d */ showReportingTaskPolicy: function (d) { // reset the policy message resetPolicyMessage(); // update the policy controls visibility $('#component-policy-controls').show(); $('#global-policy-controls').hide(); // update the visibility if (d.permissions.canRead === true) { $('#policy-selected-reporting-task-container div.policy-selected-component-name').text(d.component.name); } else { $('#policy-selected-reporting-task-container div.policy-selected-component-name').text(d.id); } $('#policy-selected-reporting-task-container').show(); // populate the initial resource $('#selected-policy-component-id').text(d.id); $('#selected-policy-component-type').text('reporting-tasks'); $('#component-policy-target') .combo('setOptionEnabled', { value: 'write-receive-data' }, false) .combo('setOptionEnabled', { value: 'write-send-data' }, false) .combo('setOptionEnabled', { value: 'read-data' }, false) .combo('setOptionEnabled', { value: 'write-data' }, false) .combo('setSelectedOption', { value: 'read-component' }); return loadPolicy().always(showPolicy); }, /** * Shows the template policy. * * @param d */ showTemplatePolicy: function (d) { // reset the policy message resetPolicyMessage(); // update the policy controls visibility $('#component-policy-controls').show(); $('#global-policy-controls').hide(); // update the visibility if (d.permissions.canRead === true) { $('#policy-selected-template-container div.policy-selected-component-name').text(d.template.name); } else { $('#policy-selected-template-container div.policy-selected-component-name').text(d.id); } $('#policy-selected-template-container').show(); // populate the initial resource $('#selected-policy-component-id').text(d.id); $('#selected-policy-component-type').text('templates'); $('#component-policy-target') .combo('setOptionEnabled', { value: 'write-receive-data' }, false) .combo('setOptionEnabled', { value: 'write-send-data' }, false) .combo('setOptionEnabled', { value: 'read-data' }, false) .combo('setOptionEnabled', { value: 'write-data' }, false) .combo('setSelectedOption', { value: 'read-component' }); return loadPolicy().always(showPolicy); }, /** * Shows the component policy dialog. */ showComponentPolicy: function (selection) { // reset the policy message resetPolicyMessage(); // update the policy controls visibility $('#component-policy-controls').show(); $('#global-policy-controls').hide(); // update the visibility $('#policy-selected-component-container').show(); var resource; if (selection.empty()) { $('#selected-policy-component-id').text(nfCanvasUtils.getGroupId()); resource = 'process-groups'; // disable site to site option $('#component-policy-target') .combo('setOptionEnabled', { value: 'write-receive-data' }, false) .combo('setOptionEnabled', { value: 'write-send-data' }, false) .combo('setOptionEnabled', { value: 'read-data' }, true) .combo('setOptionEnabled', { value: 'write-data' }, true); } else { var d = selection.datum(); $('#selected-policy-component-id').text(d.id); if (nfCanvasUtils.isProcessor(selection)) { resource = 'processors'; } else if (nfCanvasUtils.isProcessGroup(selection)) { resource = 'process-groups'; } else if (nfCanvasUtils.isInputPort(selection)) { resource = 'input-ports'; } else if (nfCanvasUtils.isOutputPort(selection)) { resource = 'output-ports'; } else if (nfCanvasUtils.isRemoteProcessGroup(selection)) { resource = 'remote-process-groups'; } else if (nfCanvasUtils.isLabel(selection)) { resource = 'labels'; } else if (nfCanvasUtils.isFunnel(selection)) { resource = 'funnels'; } // enable site to site option $('#component-policy-target') .combo('setOptionEnabled', { value: 'write-receive-data' }, nfCanvasUtils.isInputPort(selection) && nfCanvasUtils.getParentGroupId() === null) .combo('setOptionEnabled', { value: 'write-send-data' }, nfCanvasUtils.isOutputPort(selection) && nfCanvasUtils.getParentGroupId() === null) .combo('setOptionEnabled', { value: 'read-data' }, !nfCanvasUtils.isLabel(selection)) .combo('setOptionEnabled', { value: 'write-data' }, !nfCanvasUtils.isLabel(selection)); } // populate the initial resource $('#selected-policy-component-type').text(resource); $('#component-policy-target').combo('setSelectedOption', { value: 'read-component' }); return loadPolicy().always(showPolicy); }, /** * Shows the global policies dialog. */ showGlobalPolicies: function () { // reset the policy message resetPolicyMessage(); // update the policy controls visibility $('#component-policy-controls').hide(); $('#global-policy-controls').show(); // reload the current policies var policyType = $('#policy-type-list').combo('getSelectedOption').value; $('#selected-policy-type').text(policyType); if (globalPolicySupportsReadWrite(policyType)) { $('#selected-policy-action').text($('#controller-policy-target').combo('getSelectedOption').value); } else if (globalPolicySupportsWrite(policyType)) { $('#selected-policy-action').text('write'); } else { $('#selected-policy-action').text('read'); } return loadPolicy().always(showPolicy); } }; return nfPolicyManagement; }));
tequalsme/nifi
nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-web/nifi-web-ui/src/main/webapp/js/nf/canvas/nf-policy-management.js
JavaScript
apache-2.0
55,421
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.pivot.beans; /** * Thrown when an error is encountered during binding. */ public class BindException extends RuntimeException { private static final long serialVersionUID = 7245531555497832713L; public BindException() { super(); } public BindException(String message) { super(message); } public BindException(Throwable cause) { super(cause); } public BindException(String message, Throwable cause) { super(message, cause); } }
ggeorg/chillverse
src/org/apache/pivot/beans/BindException.java
Java
apache-2.0
1,323
/* * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.lambda.model.transform; import static com.amazonaws.util.StringUtils.UTF8; import static com.amazonaws.util.StringUtils.COMMA_SEPARATOR; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.OutputStreamWriter; import java.io.StringWriter; import java.io.Writer; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.List; import java.util.regex.Pattern; import com.amazonaws.AmazonClientException; import com.amazonaws.Request; import com.amazonaws.DefaultRequest; import com.amazonaws.http.HttpMethodName; import com.amazonaws.services.lambda.model.*; import com.amazonaws.transform.Marshaller; import com.amazonaws.util.BinaryUtils; import com.amazonaws.util.StringUtils; import com.amazonaws.util.StringInputStream; import com.amazonaws.util.json.*; /** * Create Event Source Mapping Request Marshaller */ public class CreateEventSourceMappingRequestMarshaller implements Marshaller<Request<CreateEventSourceMappingRequest>, CreateEventSourceMappingRequest> { private static final String RESOURCE_PATH_TEMPLATE; private static final Map<String, String> STATIC_QUERY_PARAMS; private static final Map<String, String> DYNAMIC_QUERY_PARAMS; static { String path = "/2015-03-31/event-source-mappings/"; Map<String, String> staticMap = new HashMap<String, String>(); Map<String, String> dynamicMap = new HashMap<String, String>(); int index = path.indexOf("?"); if (index != -1) { String queryString = path.substring(index + 1); path = path.substring(0, index); for (String s : queryString.split("[;&]")) { index = s.indexOf("="); if (index != -1) { String name = s.substring(0, index); String value = s.substring(index + 1); if (value.startsWith("{") && value.endsWith("}")) { dynamicMap.put(value.substring(1, value.length() - 1), name); } else { staticMap.put(name, value); } } } } RESOURCE_PATH_TEMPLATE = path; STATIC_QUERY_PARAMS = Collections.unmodifiableMap(staticMap); DYNAMIC_QUERY_PARAMS = Collections.unmodifiableMap(dynamicMap); } public Request<CreateEventSourceMappingRequest> marshall(CreateEventSourceMappingRequest createEventSourceMappingRequest) { if (createEventSourceMappingRequest == null) { throw new AmazonClientException("Invalid argument passed to marshall(...)"); } Request<CreateEventSourceMappingRequest> request = new DefaultRequest<CreateEventSourceMappingRequest>(createEventSourceMappingRequest, "AWSLambda"); String target = "AWSLambda.CreateEventSourceMapping"; request.addHeader("X-Amz-Target", target); request.setHttpMethod(HttpMethodName.POST); String uriResourcePath = RESOURCE_PATH_TEMPLATE; request.setResourcePath(uriResourcePath.replaceAll("//", "/")); for (Map.Entry<String, String> entry : STATIC_QUERY_PARAMS.entrySet()) { request.addParameter(entry.getKey(), entry.getValue()); } try { StringWriter stringWriter = new StringWriter(); JSONWriter jsonWriter = new JSONWriter(stringWriter); jsonWriter.object(); if (createEventSourceMappingRequest.getEventSourceArn() != null) { jsonWriter.key("EventSourceArn").value(createEventSourceMappingRequest.getEventSourceArn()); } if (createEventSourceMappingRequest.getFunctionName() != null) { jsonWriter.key("FunctionName").value(createEventSourceMappingRequest.getFunctionName()); } if (createEventSourceMappingRequest.isEnabled() != null) { jsonWriter.key("Enabled").value(createEventSourceMappingRequest.isEnabled()); } if (createEventSourceMappingRequest.getBatchSize() != null) { jsonWriter.key("BatchSize").value(createEventSourceMappingRequest.getBatchSize()); } if (createEventSourceMappingRequest.getStartingPosition() != null) { jsonWriter.key("StartingPosition").value(createEventSourceMappingRequest.getStartingPosition()); } jsonWriter.endObject(); String snippet = stringWriter.toString(); byte[] content = snippet.getBytes(UTF8); request.setContent(new StringInputStream(snippet)); request.addHeader("Content-Length", Integer.toString(content.length)); request.addHeader("Content-Type", "application/x-amz-json-1.1"); } catch(Throwable t) { throw new AmazonClientException("Unable to marshall request to JSON: " + t.getMessage(), t); } return request; } }
mhurne/aws-sdk-java
aws-java-sdk-lambda/src/main/java/com/amazonaws/services/lambda/model/transform/CreateEventSourceMappingRequestMarshaller.java
Java
apache-2.0
5,568
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.carbondata.scan.model; import java.io.Serializable; import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension; /** * query plan dimension which will holds the information about the query plan dimension * this is done to avoid heavy object serialization */ public class QueryDimension extends QueryColumn implements Serializable { /** * serialVersionUID */ private static final long serialVersionUID = -8492704093776645651L; /** * actual dimension column */ private transient CarbonDimension dimension; public QueryDimension(String columName) { super(columName); } /** * @return the dimension */ public CarbonDimension getDimension() { return dimension; } /** * @param dimension the dimension to set */ public void setDimension(CarbonDimension dimension) { this.dimension = dimension; } }
foryou2030/incubator-carbondata
core/src/main/java/org/apache/carbondata/scan/model/QueryDimension.java
Java
apache-2.0
1,715
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include <aws/ce/model/GetReservationCoverageResult.h> #include <aws/core/utils/json/JsonSerializer.h> #include <aws/core/AmazonWebServiceResult.h> #include <aws/core/utils/StringUtils.h> #include <aws/core/utils/UnreferencedParam.h> #include <utility> using namespace Aws::CostExplorer::Model; using namespace Aws::Utils::Json; using namespace Aws::Utils; using namespace Aws; GetReservationCoverageResult::GetReservationCoverageResult() { } GetReservationCoverageResult::GetReservationCoverageResult(const Aws::AmazonWebServiceResult<JsonValue>& result) { *this = result; } GetReservationCoverageResult& GetReservationCoverageResult::operator =(const Aws::AmazonWebServiceResult<JsonValue>& result) { JsonView jsonValue = result.GetPayload().View(); if(jsonValue.ValueExists("CoveragesByTime")) { Array<JsonView> coveragesByTimeJsonList = jsonValue.GetArray("CoveragesByTime"); for(unsigned coveragesByTimeIndex = 0; coveragesByTimeIndex < coveragesByTimeJsonList.GetLength(); ++coveragesByTimeIndex) { m_coveragesByTime.push_back(coveragesByTimeJsonList[coveragesByTimeIndex].AsObject()); } } if(jsonValue.ValueExists("Total")) { m_total = jsonValue.GetObject("Total"); } if(jsonValue.ValueExists("NextPageToken")) { m_nextPageToken = jsonValue.GetString("NextPageToken"); } return *this; }
jt70471/aws-sdk-cpp
aws-cpp-sdk-ce/source/model/GetReservationCoverageResult.cpp
C++
apache-2.0
1,482
package mina; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class MyLog { private static final Logger log = LoggerFactory.getLogger(MyLog.class); public static void log_cmd(byte[] data){ //int cmd_type = CommandParser.getCommandType(data); //System.out.println("cmd_type:" + cmd_type); } public static void log_data(String sPrefix, byte[] data){ int iLen = data.length; log.debug(sPrefix + "length:" + iLen); StringBuilder sDebug = new StringBuilder(); String sHex; for(int i = 0; i < iLen; i++){ sHex = String.format("0x%02x", data[i]&0xff); sDebug.append(sHex); //sDebug.append(Integer.toHexString(data[i]&0xff)); sDebug.append(" "); } log.debug(sDebug.toString()); log.debug(" "); } public static void log_output(byte[] data){ //log_data("<<<<<<output<<<<<", data); } public static void log_input(byte[] data){ //log_cmd(data); //log_data(">>>>>>input>>>>>>", data); } }
sunjob/sensor
src/mina/MyLog.java
Java
apache-2.0
948
/****************************************************************************** * $Id: ogrpgeogeometry.cpp 33631 2016-03-04 06:28:09Z goatbar $ * * Project: OpenGIS Simple Features Reference Implementation * Purpose: Implements decoder of shapebin geometry for PGeo * Author: Frank Warmerdam, warmerdam@pobox.com * Paul Ramsey, pramsey at cleverelephant.ca * ****************************************************************************** * Copyright (c) 2005, Frank Warmerdam <warmerdam@pobox.com> * Copyright (c) 2011, Paul Ramsey <pramsey at cleverelephant.ca> * Copyright (c) 2011-2014, Even Rouault <even dot rouault at mines-paris dot org> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. ****************************************************************************/ #include "ogrpgeogeometry.h" #include "ogr_p.h" #include "cpl_string.h" #include <limits> CPL_CVSID("$Id: ogrpgeogeometry.cpp 33631 2016-03-04 06:28:09Z goatbar $"); #define SHPP_TRISTRIP 0 #define SHPP_TRIFAN 1 #define SHPP_OUTERRING 2 #define SHPP_INNERRING 3 #define SHPP_FIRSTRING 4 #define SHPP_RING 5 #define SHPP_TRIANGLES 6 /* Multipatch 9.0 specific */ /************************************************************************/ /* OGRCreateFromMultiPatchPart() */ /************************************************************************/ void OGRCreateFromMultiPatchPart(OGRMultiPolygon *poMP, OGRPolygon*& poLastPoly, int nPartType, int nPartPoints, double* padfX, double* padfY, double* padfZ) { nPartType &= 0xf; if( nPartType == SHPP_TRISTRIP ) { if( poLastPoly != NULL ) { poMP->addGeometryDirectly( poLastPoly ); poLastPoly = NULL; } for( int iBaseVert = 0; iBaseVert < nPartPoints-2; iBaseVert++ ) { OGRPolygon *poPoly = new OGRPolygon(); OGRLinearRing *poRing = new OGRLinearRing(); int iSrcVert = iBaseVert; poRing->setPoint( 0, padfX[iSrcVert], padfY[iSrcVert], padfZ[iSrcVert] ); poRing->setPoint( 1, padfX[iSrcVert+1], padfY[iSrcVert+1], padfZ[iSrcVert+1] ); poRing->setPoint( 2, padfX[iSrcVert+2], padfY[iSrcVert+2], padfZ[iSrcVert+2] ); poRing->setPoint( 3, padfX[iSrcVert], padfY[iSrcVert], padfZ[iSrcVert] ); poPoly->addRingDirectly( poRing ); poMP->addGeometryDirectly( poPoly ); } } else if( nPartType == SHPP_TRIFAN ) { if( poLastPoly != NULL ) { poMP->addGeometryDirectly( poLastPoly ); poLastPoly = NULL; } for( int iBaseVert = 0; iBaseVert < nPartPoints-2; iBaseVert++ ) { OGRPolygon *poPoly = new OGRPolygon(); OGRLinearRing *poRing = new OGRLinearRing(); int iSrcVert = iBaseVert; poRing->setPoint( 0, padfX[0], padfY[0], padfZ[0] ); poRing->setPoint( 1, padfX[iSrcVert+1], padfY[iSrcVert+1], padfZ[iSrcVert+1] ); poRing->setPoint( 2, padfX[iSrcVert+2], padfY[iSrcVert+2], padfZ[iSrcVert+2] ); poRing->setPoint( 3, padfX[0], padfY[0], padfZ[0] ); poPoly->addRingDirectly( poRing ); poMP->addGeometryDirectly( poPoly ); } } else if( nPartType == SHPP_OUTERRING || nPartType == SHPP_INNERRING || nPartType == SHPP_FIRSTRING || nPartType == SHPP_RING ) { if( poLastPoly != NULL && (nPartType == SHPP_OUTERRING || nPartType == SHPP_FIRSTRING) ) { poMP->addGeometryDirectly( poLastPoly ); poLastPoly = NULL; } if( poLastPoly == NULL ) poLastPoly = new OGRPolygon(); OGRLinearRing *poRing = new OGRLinearRing; poRing->setPoints( nPartPoints, padfX, padfY, padfZ ); poRing->closeRings(); poLastPoly->addRingDirectly( poRing ); } else if ( nPartType == SHPP_TRIANGLES ) { if( poLastPoly != NULL ) { poMP->addGeometryDirectly( poLastPoly ); poLastPoly = NULL; } for( int iBaseVert = 0; iBaseVert < nPartPoints-2; iBaseVert+=3 ) { OGRPolygon *poPoly = new OGRPolygon(); OGRLinearRing *poRing = new OGRLinearRing(); int iSrcVert = iBaseVert; poRing->setPoint( 0, padfX[iSrcVert], padfY[iSrcVert], padfZ[iSrcVert] ); poRing->setPoint( 1, padfX[iSrcVert+1], padfY[iSrcVert+1], padfZ[iSrcVert+1] ); poRing->setPoint( 2, padfX[iSrcVert+2], padfY[iSrcVert+2], padfZ[iSrcVert+2] ); poRing->setPoint( 3, padfX[iSrcVert], padfY[iSrcVert], padfZ[iSrcVert] ); poPoly->addRingDirectly( poRing ); poMP->addGeometryDirectly( poPoly ); } } else CPLDebug( "OGR", "Unrecognized parttype %d, ignored.", nPartType ); } /************************************************************************/ /* OGRCreateFromMultiPatch() */ /* */ /* Translate a multipatch representation to an OGR geometry */ /* Mostly copied from shape2ogr.cpp */ /************************************************************************/ static OGRGeometry* OGRCreateFromMultiPatch(int nParts, GInt32* panPartStart, GInt32* panPartType, int nPoints, double* padfX, double* padfY, double* padfZ) { OGRMultiPolygon *poMP = new OGRMultiPolygon(); int iPart; OGRPolygon *poLastPoly = NULL; for( iPart = 0; iPart < nParts; iPart++ ) { int nPartPoints, nPartStart; // Figure out details about this part's vertex list. if( panPartStart == NULL ) { nPartPoints = nPoints; nPartStart = 0; } else { if( iPart == nParts - 1 ) nPartPoints = nPoints - panPartStart[iPart]; else nPartPoints = panPartStart[iPart+1] - panPartStart[iPart]; nPartStart = panPartStart[iPart]; } OGRCreateFromMultiPatchPart(poMP, poLastPoly, panPartType[iPart], nPartPoints, padfX + nPartStart, padfY + nPartStart, padfZ + nPartStart); } if( poLastPoly != NULL ) { poMP->addGeometryDirectly( poLastPoly ); poLastPoly = NULL; } return poMP; } /************************************************************************/ /* OGRWriteToShapeBin() */ /* */ /* Translate OGR geometry to a shapefile binary representation */ /************************************************************************/ OGRErr OGRWriteToShapeBin( OGRGeometry *poGeom, GByte **ppabyShape, int *pnBytes ) { int nShpSize = 4; /* All types start with integer type number */ int nShpZSize = 0; /* Z gets tacked onto the end */ GUInt32 nPoints = 0; GUInt32 nParts = 0; /* -------------------------------------------------------------------- */ /* Null or Empty input maps to SHPT_NULL. */ /* -------------------------------------------------------------------- */ if ( ! poGeom || poGeom->IsEmpty() ) { *ppabyShape = (GByte*)VSI_MALLOC_VERBOSE(nShpSize); if( *ppabyShape == NULL ) return OGRERR_FAILURE; GUInt32 zero = SHPT_NULL; memcpy(*ppabyShape, &zero, nShpSize); *pnBytes = nShpSize; return OGRERR_NONE; } OGRwkbGeometryType nOGRType = wkbFlatten(poGeom->getGeometryType()); const bool b3d = wkbHasZ(poGeom->getGeometryType()); const bool bHasM = wkbHasM(poGeom->getGeometryType()); const int nCoordDims = poGeom->CoordinateDimension(); /* -------------------------------------------------------------------- */ /* Calculate the shape buffer size */ /* -------------------------------------------------------------------- */ if ( nOGRType == wkbPoint ) { nShpSize += 8 * nCoordDims; } else if ( nOGRType == wkbLineString ) { OGRLineString *poLine = (OGRLineString*)poGeom; nPoints = poLine->getNumPoints(); nParts = 1; nShpSize += 16 * nCoordDims; /* xy(z)(m) box */ nShpSize += 4; /* nparts */ nShpSize += 4; /* npoints */ nShpSize += 4; /* parts[1] */ nShpSize += 8 * nCoordDims * nPoints; /* points */ nShpZSize = 16 + 8 * nPoints; } else if ( nOGRType == wkbPolygon ) { poGeom->closeRings(); OGRPolygon *poPoly = (OGRPolygon*)poGeom; nParts = poPoly->getNumInteriorRings() + 1; for ( GUInt32 i = 0; i < nParts; i++ ) { OGRLinearRing *poRing; if ( i == 0 ) poRing = poPoly->getExteriorRing(); else poRing = poPoly->getInteriorRing(i-1); nPoints += poRing->getNumPoints(); } nShpSize += 16 * nCoordDims; /* xy(z)(m) box */ nShpSize += 4; /* nparts */ nShpSize += 4; /* npoints */ nShpSize += 4 * nParts; /* parts[nparts] */ nShpSize += 8 * nCoordDims * nPoints; /* points */ nShpZSize = 16 + 8 * nPoints; } else if ( nOGRType == wkbMultiPoint ) { OGRMultiPoint *poMPoint = (OGRMultiPoint*)poGeom; for ( int i = 0; i < poMPoint->getNumGeometries(); i++ ) { OGRPoint *poPoint = (OGRPoint*)(poMPoint->getGeometryRef(i)); if ( poPoint->IsEmpty() ) continue; nPoints++; } nShpSize += 16 * nCoordDims; /* xy(z)(m) box */ nShpSize += 4; /* npoints */ nShpSize += 8 * nCoordDims * nPoints; /* points */ nShpZSize = 16 + 8 * nPoints; } else if ( nOGRType == wkbMultiLineString ) { OGRMultiLineString *poMLine = (OGRMultiLineString*)poGeom; for ( int i = 0; i < poMLine->getNumGeometries(); i++ ) { OGRLineString *poLine = (OGRLineString*)(poMLine->getGeometryRef(i)); /* Skip empties */ if ( poLine->IsEmpty() ) continue; nParts++; nPoints += poLine->getNumPoints(); } nShpSize += 16 * nCoordDims; /* xy(z)(m) box */ nShpSize += 4; /* nparts */ nShpSize += 4; /* npoints */ nShpSize += 4 * nParts; /* parts[nparts] */ nShpSize += 8 * nCoordDims * nPoints ; /* points */ nShpZSize = 16 + 8 * nPoints; } else if ( nOGRType == wkbMultiPolygon ) { poGeom->closeRings(); OGRMultiPolygon *poMPoly = (OGRMultiPolygon*)poGeom; for( int j = 0; j < poMPoly->getNumGeometries(); j++ ) { OGRPolygon *poPoly = (OGRPolygon*)(poMPoly->getGeometryRef(j)); int nRings = poPoly->getNumInteriorRings() + 1; /* Skip empties */ if ( poPoly->IsEmpty() ) continue; nParts += nRings; for ( int i = 0; i < nRings; i++ ) { OGRLinearRing *poRing; if ( i == 0 ) poRing = poPoly->getExteriorRing(); else poRing = poPoly->getInteriorRing(i-1); nPoints += poRing->getNumPoints(); } } nShpSize += 16 * nCoordDims; /* xy(z)(m) box */ nShpSize += 4; /* nparts */ nShpSize += 4; /* npoints */ nShpSize += 4 * nParts; /* parts[nparts] */ nShpSize += 8 * nCoordDims * nPoints ; /* points */ nShpZSize = 16 + 8 * nPoints; } else { return OGRERR_UNSUPPORTED_OPERATION; } /* Allocate our shape buffer */ *ppabyShape = (GByte*)VSI_MALLOC_VERBOSE(nShpSize); if ( ! *ppabyShape ) return OGRERR_FAILURE; /* Fill in the output size. */ *pnBytes = nShpSize; /* Set up write pointers */ unsigned char *pabyPtr = *ppabyShape; unsigned char *pabyPtrZ = NULL; unsigned char *pabyPtrM = NULL; if( bHasM ) pabyPtrM = pabyPtr + nShpSize - nShpZSize; if ( b3d ) { if( bHasM ) pabyPtrZ = pabyPtrM - nShpZSize; else pabyPtrZ = pabyPtr + nShpSize - nShpZSize; } /* -------------------------------------------------------------------- */ /* Write in the Shape type number now */ /* -------------------------------------------------------------------- */ GUInt32 nGType = SHPT_NULL; switch(nOGRType) { case wkbPoint: { nGType = (b3d && bHasM) ? SHPT_POINTZM : (b3d) ? SHPT_POINTZ : (bHasM) ? SHPT_POINTM : SHPT_POINT; break; } case wkbMultiPoint: { nGType = (b3d && bHasM) ? SHPT_MULTIPOINTZM : (b3d) ? SHPT_MULTIPOINTZ : (bHasM) ? SHPT_MULTIPOINTM : SHPT_MULTIPOINT; break; } case wkbLineString: case wkbMultiLineString: { nGType = (b3d && bHasM) ? SHPT_ARCZM : (b3d) ? SHPT_ARCZ : (bHasM) ? SHPT_ARCM : SHPT_ARC; break; } case wkbPolygon: case wkbMultiPolygon: { nGType = (b3d && bHasM) ? SHPT_POLYGONZM : (b3d) ? SHPT_POLYGONZ : (bHasM) ? SHPT_POLYGONM : SHPT_POLYGON; break; } default: { return OGRERR_UNSUPPORTED_OPERATION; } } /* Write in the type number and advance the pointer */ nGType = CPL_LSBWORD32( nGType ); memcpy( pabyPtr, &nGType, 4 ); pabyPtr += 4; /* -------------------------------------------------------------------- */ /* POINT and POINTZ */ /* -------------------------------------------------------------------- */ if ( nOGRType == wkbPoint ) { OGRPoint *poPoint = (OGRPoint*)poGeom; double x = poPoint->getX(); double y = poPoint->getY(); /* Copy in the raw data. */ memcpy( pabyPtr, &x, 8 ); memcpy( pabyPtr+8, &y, 8 ); if( b3d ) { double z = poPoint->getZ(); memcpy( pabyPtr+8+8, &z, 8 ); } if( bHasM ) { double m = poPoint->getM(); memcpy( pabyPtr+8+((b3d) ? 16 : 8), &m, 8 ); } /* Swap if needed. Shape doubles always LSB */ if( OGR_SWAP( wkbNDR ) ) { CPL_SWAPDOUBLE( pabyPtr ); CPL_SWAPDOUBLE( pabyPtr+8 ); if( b3d ) CPL_SWAPDOUBLE( pabyPtr+8+8 ); if( bHasM ) CPL_SWAPDOUBLE( pabyPtr+8+((b3d) ? 16 : 8) ); } return OGRERR_NONE; } /* -------------------------------------------------------------------- */ /* All the non-POINT types require an envelope next */ /* -------------------------------------------------------------------- */ OGREnvelope3D envelope; poGeom->getEnvelope(&envelope); memcpy( pabyPtr, &(envelope.MinX), 8 ); memcpy( pabyPtr+8, &(envelope.MinY), 8 ); memcpy( pabyPtr+8+8, &(envelope.MaxX), 8 ); memcpy( pabyPtr+8+8+8, &(envelope.MaxY), 8 ); /* Swap box if needed. Shape doubles are always LSB */ if( OGR_SWAP( wkbNDR ) ) { for ( int i = 0; i < 4; i++ ) CPL_SWAPDOUBLE( pabyPtr + 8*i ); } pabyPtr += 32; /* Write in the Z bounds at the end of the XY buffer */ if ( b3d ) { memcpy( pabyPtrZ, &(envelope.MinZ), 8 ); memcpy( pabyPtrZ+8, &(envelope.MaxZ), 8 ); /* Swap Z bounds if necessary */ if( OGR_SWAP( wkbNDR ) ) { for ( int i = 0; i < 2; i++ ) CPL_SWAPDOUBLE( pabyPtrZ + 8*i ); } pabyPtrZ += 16; } /* Reserve space for the M bounds at the end of the XY buffer */ GByte* pabyPtrMBounds = NULL; double dfMinM = std::numeric_limits<double>::max(); double dfMaxM = -dfMinM; if ( bHasM ) { pabyPtrMBounds = pabyPtrM; pabyPtrM += 16; } /* -------------------------------------------------------------------- */ /* LINESTRING and LINESTRINGZ */ /* -------------------------------------------------------------------- */ if ( nOGRType == wkbLineString ) { const OGRLineString *poLine = (OGRLineString*)poGeom; /* Write in the nparts (1) */ GUInt32 nPartsLsb = CPL_LSBWORD32( nParts ); memcpy( pabyPtr, &nPartsLsb, 4 ); pabyPtr += 4; /* Write in the npoints */ GUInt32 nPointsLsb = CPL_LSBWORD32( nPoints ); memcpy( pabyPtr, &nPointsLsb, 4 ); pabyPtr += 4; /* Write in the part index (0) */ GUInt32 nPartIndex = 0; memcpy( pabyPtr, &nPartIndex, 4 ); pabyPtr += 4; /* Write in the point data */ poLine->getPoints((OGRRawPoint*)pabyPtr, (double*)pabyPtrZ); if( bHasM ) { for( GUInt32 k = 0; k < nPoints; k++ ) { double dfM = poLine->getM(k); memcpy( pabyPtrM + 8*k, &dfM, 8); if( dfM < dfMinM ) dfMinM = dfM; if( dfM > dfMaxM ) dfMaxM = dfM; } } /* Swap if necessary */ if( OGR_SWAP( wkbNDR ) ) { for( GUInt32 k = 0; k < nPoints; k++ ) { CPL_SWAPDOUBLE( pabyPtr + 16*k ); CPL_SWAPDOUBLE( pabyPtr + 16*k + 8 ); if( b3d ) CPL_SWAPDOUBLE( pabyPtrZ + 8*k ); if( bHasM ) CPL_SWAPDOUBLE( pabyPtrM + 8*k ); } } } /* -------------------------------------------------------------------- */ /* POLYGON and POLYGONZ */ /* -------------------------------------------------------------------- */ else if ( nOGRType == wkbPolygon ) { OGRPolygon *poPoly = (OGRPolygon*)poGeom; /* Write in the part count */ GUInt32 nPartsLsb = CPL_LSBWORD32( nParts ); memcpy( pabyPtr, &nPartsLsb, 4 ); pabyPtr += 4; /* Write in the total point count */ GUInt32 nPointsLsb = CPL_LSBWORD32( nPoints ); memcpy( pabyPtr, &nPointsLsb, 4 ); pabyPtr += 4; /* -------------------------------------------------------------------- */ /* Now we have to visit each ring and write an index number into */ /* the parts list, and the coordinates into the points list. */ /* to do it in one pass, we will use three write pointers. */ /* pabyPtr writes the part indexes */ /* pabyPoints writes the xy coordinates */ /* pabyPtrZ writes the z coordinates */ /* -------------------------------------------------------------------- */ /* Just past the partindex[nparts] array */ unsigned char* pabyPoints = pabyPtr + 4*nParts; int nPointIndexCount = 0; for( GUInt32 i = 0; i < nParts; i++ ) { /* Check our Ring and condition it */ OGRLinearRing *poRing; if ( i == 0 ) { poRing = poPoly->getExteriorRing(); /* Outer ring must be clockwise */ if ( ! poRing->isClockwise() ) poRing->reverseWindingOrder(); } else { poRing = poPoly->getInteriorRing(i-1); /* Inner rings should be anti-clockwise */ if ( poRing->isClockwise() ) poRing->reverseWindingOrder(); } int nRingNumPoints = poRing->getNumPoints(); /* Cannot write un-closed rings to shape */ if( nRingNumPoints <= 2 || ! poRing->get_IsClosed() ) return OGRERR_FAILURE; /* Write in the part index */ GUInt32 nPartIndex = CPL_LSBWORD32( nPointIndexCount ); memcpy( pabyPtr, &nPartIndex, 4 ); /* Write in the point data */ poRing->getPoints((OGRRawPoint*)pabyPoints, (double*)pabyPtrZ); if( bHasM ) { for( int k = 0; k < nRingNumPoints; k++ ) { double dfM = poRing->getM(k); memcpy( pabyPtrM + 8*k, &dfM, 8); if( dfM < dfMinM ) dfMinM = dfM; if( dfM > dfMaxM ) dfMaxM = dfM; } } /* Swap if necessary */ if( OGR_SWAP( wkbNDR ) ) { for( int k = 0; k < nRingNumPoints; k++ ) { CPL_SWAPDOUBLE( pabyPoints + 16*k ); CPL_SWAPDOUBLE( pabyPoints + 16*k + 8 ); if( b3d ) CPL_SWAPDOUBLE( pabyPtrZ + 8*k ); if( bHasM ) CPL_SWAPDOUBLE( pabyPtrM + 8*k ); } } nPointIndexCount += nRingNumPoints; /* Advance the write pointers */ pabyPtr += 4; pabyPoints += 16 * nRingNumPoints; if ( b3d ) pabyPtrZ += 8 * nRingNumPoints; if ( bHasM ) pabyPtrM += 8 * nRingNumPoints; } } /* -------------------------------------------------------------------- */ /* MULTIPOINT and MULTIPOINTZ */ /* -------------------------------------------------------------------- */ else if ( nOGRType == wkbMultiPoint ) { OGRMultiPoint *poMPoint = (OGRMultiPoint*)poGeom; /* Write in the total point count */ GUInt32 nPointsLsb = CPL_LSBWORD32( nPoints ); memcpy( pabyPtr, &nPointsLsb, 4 ); pabyPtr += 4; /* -------------------------------------------------------------------- */ /* Now we have to visit each point write it into the points list */ /* We will use two write pointers. */ /* pabyPtr writes the xy coordinates */ /* pabyPtrZ writes the z coordinates */ /* -------------------------------------------------------------------- */ for( GUInt32 i = 0; i < nPoints; i++ ) { const OGRPoint *poPt = (OGRPoint*)(poMPoint->getGeometryRef(i)); /* Skip empties */ if ( poPt->IsEmpty() ) continue; /* Write the coordinates */ double x = poPt->getX(); double y = poPt->getY(); memcpy(pabyPtr, &x, 8); memcpy(pabyPtr+8, &y, 8); if ( b3d ) { double z = poPt->getZ(); memcpy(pabyPtrZ, &z, 8); } if ( bHasM ) { double dfM = poPt->getM(); memcpy(pabyPtrM, &dfM, 8); if( dfM < dfMinM ) dfMinM = dfM; if( dfM > dfMaxM ) dfMaxM = dfM; } /* Swap if necessary */ if( OGR_SWAP( wkbNDR ) ) { CPL_SWAPDOUBLE( pabyPtr ); CPL_SWAPDOUBLE( pabyPtr + 8 ); if( b3d ) CPL_SWAPDOUBLE( pabyPtrZ ); if( bHasM ) CPL_SWAPDOUBLE( pabyPtrM ); } /* Advance the write pointers */ pabyPtr += 16; if ( b3d ) pabyPtrZ += 8; if ( bHasM ) pabyPtrM += 8; } } /* -------------------------------------------------------------------- */ /* MULTILINESTRING and MULTILINESTRINGZ */ /* -------------------------------------------------------------------- */ else if ( nOGRType == wkbMultiLineString ) { OGRMultiLineString *poMLine = (OGRMultiLineString*)poGeom; /* Write in the part count */ GUInt32 nPartsLsb = CPL_LSBWORD32( nParts ); memcpy( pabyPtr, &nPartsLsb, 4 ); pabyPtr += 4; /* Write in the total point count */ GUInt32 nPointsLsb = CPL_LSBWORD32( nPoints ); memcpy( pabyPtr, &nPointsLsb, 4 ); pabyPtr += 4; /* Just past the partindex[nparts] array */ unsigned char* pabyPoints = pabyPtr + 4*nParts; int nPointIndexCount = 0; for( GUInt32 i = 0; i < nParts; i++ ) { const OGRLineString *poLine = (OGRLineString*)(poMLine->getGeometryRef(i)); /* Skip empties */ if ( poLine->IsEmpty() ) continue; int nLineNumPoints = poLine->getNumPoints(); /* Write in the part index */ GUInt32 nPartIndex = CPL_LSBWORD32( nPointIndexCount ); memcpy( pabyPtr, &nPartIndex, 4 ); /* Write in the point data */ poLine->getPoints((OGRRawPoint*)pabyPoints, (double*)pabyPtrZ); if( bHasM ) { for( int k = 0; k < nLineNumPoints; k++ ) { double dfM = poLine->getM(k); memcpy( pabyPtrM + 8*k, &dfM, 8); if( dfM < dfMinM ) dfMinM = dfM; if( dfM > dfMaxM ) dfMaxM = dfM; } } /* Swap if necessary */ if( OGR_SWAP( wkbNDR ) ) { for( int k = 0; k < nLineNumPoints; k++ ) { CPL_SWAPDOUBLE( pabyPoints + 16*k ); CPL_SWAPDOUBLE( pabyPoints + 16*k + 8 ); if( b3d ) CPL_SWAPDOUBLE( pabyPtrZ + 8*k ); if( bHasM ) CPL_SWAPDOUBLE( pabyPtrM + 8*k ); } } nPointIndexCount += nLineNumPoints; /* Advance the write pointers */ pabyPtr += 4; pabyPoints += 16 * nLineNumPoints; if ( b3d ) pabyPtrZ += 8 * nLineNumPoints; if ( bHasM ) pabyPtrM += 8 * nLineNumPoints; } } /* -------------------------------------------------------------------- */ /* MULTIPOLYGON and MULTIPOLYGONZ */ /* -------------------------------------------------------------------- */ else /* if ( nOGRType == wkbMultiPolygon ) */ { OGRMultiPolygon *poMPoly = (OGRMultiPolygon*)poGeom; /* Write in the part count */ GUInt32 nPartsLsb = CPL_LSBWORD32( nParts ); memcpy( pabyPtr, &nPartsLsb, 4 ); pabyPtr += 4; /* Write in the total point count */ GUInt32 nPointsLsb = CPL_LSBWORD32( nPoints ); memcpy( pabyPtr, &nPointsLsb, 4 ); pabyPtr += 4; /* -------------------------------------------------------------------- */ /* Now we have to visit each ring and write an index number into */ /* the parts list, and the coordinates into the points list. */ /* to do it in one pass, we will use three write pointers. */ /* pabyPtr writes the part indexes */ /* pabyPoints writes the xy coordinates */ /* pabyPtrZ writes the z coordinates */ /* -------------------------------------------------------------------- */ /* Just past the partindex[nparts] array */ unsigned char* pabyPoints = pabyPtr + 4*nParts; int nPointIndexCount = 0; for( int i = 0; i < poMPoly->getNumGeometries(); i++ ) { OGRPolygon *poPoly = (OGRPolygon*)(poMPoly->getGeometryRef(i)); /* Skip empties */ if ( poPoly->IsEmpty() ) continue; int nRings = 1 + poPoly->getNumInteriorRings(); for( int j = 0; j < nRings; j++ ) { /* Check our Ring and condition it */ OGRLinearRing *poRing; if ( j == 0 ) { poRing = poPoly->getExteriorRing(); /* Outer ring must be clockwise */ if ( ! poRing->isClockwise() ) poRing->reverseWindingOrder(); } else { poRing = poPoly->getInteriorRing(j-1); /* Inner rings should be anti-clockwise */ if ( poRing->isClockwise() ) poRing->reverseWindingOrder(); } int nRingNumPoints = poRing->getNumPoints(); /* Cannot write closed rings to shape */ if( nRingNumPoints <= 2 || ! poRing->get_IsClosed() ) return OGRERR_FAILURE; /* Write in the part index */ GUInt32 nPartIndex = CPL_LSBWORD32( nPointIndexCount ); memcpy( pabyPtr, &nPartIndex, 4 ); /* Write in the point data */ poRing->getPoints((OGRRawPoint*)pabyPoints, (double*)pabyPtrZ); if( bHasM ) { for( int k = 0; k < nRingNumPoints; k++ ) { double dfM = poRing->getM(k); memcpy( pabyPtrM + 8*k, &dfM, 8); if( dfM < dfMinM ) dfMinM = dfM; if( dfM > dfMaxM ) dfMaxM = dfM; } } /* Swap if necessary */ if( OGR_SWAP( wkbNDR ) ) { for( int k = 0; k < nRingNumPoints; k++ ) { CPL_SWAPDOUBLE( pabyPoints + 16*k ); CPL_SWAPDOUBLE( pabyPoints + 16*k + 8 ); if( b3d ) CPL_SWAPDOUBLE( pabyPtrZ + 8*k ); if( bHasM ) CPL_SWAPDOUBLE( pabyPtrM + 8*k ); } } nPointIndexCount += nRingNumPoints; /* Advance the write pointers */ pabyPtr += 4; pabyPoints += 16 * nRingNumPoints; if ( b3d ) pabyPtrZ += 8 * nRingNumPoints; if ( bHasM ) pabyPtrM += 8 * nRingNumPoints; } } } if ( bHasM ) { if( dfMinM > dfMaxM ) { dfMinM = 0.0; dfMaxM = 0.0; } memcpy( pabyPtrMBounds, &(dfMinM), 8 ); memcpy( pabyPtrMBounds+8, &(dfMaxM), 8 ); /* Swap M bounds if necessary */ if( OGR_SWAP( wkbNDR ) ) { for ( int i = 0; i < 2; i++ ) CPL_SWAPDOUBLE( pabyPtrMBounds + 8*i ); } } return OGRERR_NONE; } /************************************************************************/ /* OGRWriteMultiPatchToShapeBin() */ /************************************************************************/ OGRErr OGRWriteMultiPatchToShapeBin( OGRGeometry *poGeom, GByte **ppabyShape, int *pnBytes ) { if( wkbFlatten(poGeom->getGeometryType()) != wkbMultiPolygon ) return OGRERR_UNSUPPORTED_OPERATION; poGeom->closeRings(); OGRMultiPolygon *poMPoly = (OGRMultiPolygon*)poGeom; int nParts = 0; int* panPartStart = NULL; int* panPartType = NULL; int nPoints = 0; OGRRawPoint* poPoints = NULL; double* padfZ = NULL; int nBeginLastPart = 0; for( int j = 0; j < poMPoly->getNumGeometries(); j++ ) { OGRPolygon *poPoly = (OGRPolygon*)(poMPoly->getGeometryRef(j)); int nRings = poPoly->getNumInteriorRings() + 1; /* Skip empties */ if ( poPoly->IsEmpty() ) continue; OGRLinearRing *poRing = poPoly->getExteriorRing(); if( nRings == 1 && poRing->getNumPoints() == 4 ) { if( nParts > 0 && poPoints != NULL && ((panPartType[nParts-1] == SHPP_TRIANGLES && nPoints - panPartStart[nParts-1] == 3) || panPartType[nParts-1] == SHPP_TRIFAN) && poRing->getX(0) == poPoints[nBeginLastPart].x && poRing->getY(0) == poPoints[nBeginLastPart].y && poRing->getZ(0) == padfZ[nBeginLastPart] && poRing->getX(1) == poPoints[nPoints-1].x && poRing->getY(1) == poPoints[nPoints-1].y && poRing->getZ(1) == padfZ[nPoints-1] ) { panPartType[nParts-1] = SHPP_TRIFAN; poPoints = (OGRRawPoint*)CPLRealloc(poPoints, (nPoints + 1) * sizeof(OGRRawPoint)); padfZ = (double*)CPLRealloc(padfZ, (nPoints + 1) * sizeof(double)); poPoints[nPoints].x = poRing->getX(2); poPoints[nPoints].y = poRing->getY(2); padfZ[nPoints] = poRing->getZ(2); nPoints ++; } else if( nParts > 0 && poPoints != NULL && ((panPartType[nParts-1] == SHPP_TRIANGLES && nPoints - panPartStart[nParts-1] == 3) || panPartType[nParts-1] == SHPP_TRISTRIP) && poRing->getX(0) == poPoints[nPoints-2].x && poRing->getY(0) == poPoints[nPoints-2].y && poRing->getZ(0) == padfZ[nPoints-2] && poRing->getX(1) == poPoints[nPoints-1].x && poRing->getY(1) == poPoints[nPoints-1].y && poRing->getZ(1) == padfZ[nPoints-1] ) { panPartType[nParts-1] = SHPP_TRISTRIP; poPoints = (OGRRawPoint*)CPLRealloc(poPoints, (nPoints + 1) * sizeof(OGRRawPoint)); padfZ = (double*)CPLRealloc(padfZ, (nPoints + 1) * sizeof(double)); poPoints[nPoints].x = poRing->getX(2); poPoints[nPoints].y = poRing->getY(2); padfZ[nPoints] = poRing->getZ(2); nPoints ++; } else { if( nParts == 0 || panPartType[nParts-1] != SHPP_TRIANGLES ) { nBeginLastPart = nPoints; panPartStart = (int*)CPLRealloc(panPartStart, (nParts + 1) * sizeof(int)); panPartType = (int*)CPLRealloc(panPartType, (nParts + 1) * sizeof(int)); panPartStart[nParts] = nPoints; panPartType[nParts] = SHPP_TRIANGLES; nParts ++; } poPoints = (OGRRawPoint*)CPLRealloc(poPoints, (nPoints + 3) * sizeof(OGRRawPoint)); padfZ = (double*)CPLRealloc(padfZ, (nPoints + 3) * sizeof(double)); for(int i=0;i<3;i++) { poPoints[nPoints+i].x = poRing->getX(i); poPoints[nPoints+i].y = poRing->getY(i); padfZ[nPoints+i] = poRing->getZ(i); } nPoints += 3; } } else { panPartStart = (int*)CPLRealloc(panPartStart, (nParts + nRings) * sizeof(int)); panPartType = (int*)CPLRealloc(panPartType, (nParts + nRings) * sizeof(int)); for ( int i = 0; i < nRings; i++ ) { panPartStart[nParts + i] = nPoints; if ( i == 0 ) { poRing = poPoly->getExteriorRing(); panPartType[nParts + i] = SHPP_OUTERRING; } else { poRing = poPoly->getInteriorRing(i-1); panPartType[nParts + i] = SHPP_INNERRING; } poPoints = (OGRRawPoint*)CPLRealloc(poPoints, (nPoints + poRing->getNumPoints()) * sizeof(OGRRawPoint)); padfZ = (double*)CPLRealloc(padfZ, (nPoints + poRing->getNumPoints()) * sizeof(double)); for( int k = 0; k < poRing->getNumPoints(); k++ ) { poPoints[nPoints+k].x = poRing->getX(k); poPoints[nPoints+k].y = poRing->getY(k); padfZ[nPoints+k] = poRing->getZ(k); } nPoints += poRing->getNumPoints(); } nParts += nRings; } } int nShpSize = 4; /* All types start with integer type number */ nShpSize += 16 * 2; /* xy bbox */ nShpSize += 4; /* nparts */ nShpSize += 4; /* npoints */ nShpSize += 4 * nParts; /* panPartStart[nparts] */ nShpSize += 4 * nParts; /* panPartType[nparts] */ nShpSize += 8 * 2 * nPoints; /* xy points */ nShpSize += 16; /* z bbox */ nShpSize += 8 * nPoints; /* z points */ *pnBytes = nShpSize; *ppabyShape = (GByte*) CPLMalloc(nShpSize); GByte* pabyPtr = *ppabyShape; /* Write in the type number and advance the pointer */ GUInt32 nGType = CPL_LSBWORD32( SHPT_MULTIPATCH ); memcpy( pabyPtr, &nGType, 4 ); pabyPtr += 4; OGREnvelope3D envelope; poGeom->getEnvelope(&envelope); memcpy( pabyPtr, &(envelope.MinX), 8 ); memcpy( pabyPtr+8, &(envelope.MinY), 8 ); memcpy( pabyPtr+8+8, &(envelope.MaxX), 8 ); memcpy( pabyPtr+8+8+8, &(envelope.MaxY), 8 ); int i; /* Swap box if needed. Shape doubles are always LSB */ if( OGR_SWAP( wkbNDR ) ) { for ( i = 0; i < 4; i++ ) CPL_SWAPDOUBLE( pabyPtr + 8*i ); } pabyPtr += 32; /* Write in the part count */ GUInt32 nPartsLsb = CPL_LSBWORD32( nParts ); memcpy( pabyPtr, &nPartsLsb, 4 ); pabyPtr += 4; /* Write in the total point count */ GUInt32 nPointsLsb = CPL_LSBWORD32( nPoints ); memcpy( pabyPtr, &nPointsLsb, 4 ); pabyPtr += 4; for( i = 0; i < nParts; i ++ ) { int nPartStart = CPL_LSBWORD32(panPartStart[i]); memcpy( pabyPtr, &nPartStart, 4 ); pabyPtr += 4; } for( i = 0; i < nParts; i ++ ) { int nPartType = CPL_LSBWORD32(panPartType[i]); memcpy( pabyPtr, &nPartType, 4 ); pabyPtr += 4; } if( poPoints != NULL ) memcpy(pabyPtr, poPoints, 2 * 8 * nPoints); /* Swap box if needed. Shape doubles are always LSB */ if( OGR_SWAP( wkbNDR ) ) { for ( i = 0; i < 2 * nPoints; i++ ) CPL_SWAPDOUBLE( pabyPtr + 8*i ); } pabyPtr += 2 * 8 * nPoints; memcpy( pabyPtr, &(envelope.MinZ), 8 ); memcpy( pabyPtr+8, &(envelope.MaxZ), 8 ); if( OGR_SWAP( wkbNDR ) ) { for ( i = 0; i < 2; i++ ) CPL_SWAPDOUBLE( pabyPtr + 8*i ); } pabyPtr += 16; if( padfZ != NULL ) memcpy(pabyPtr, padfZ, 8 * nPoints); /* Swap box if needed. Shape doubles are always LSB */ if( OGR_SWAP( wkbNDR ) ) { for ( i = 0; i < nPoints; i++ ) CPL_SWAPDOUBLE( pabyPtr + 8*i ); } //pabyPtr += 8 * nPoints; CPLFree(panPartStart); CPLFree(panPartType); CPLFree(poPoints); CPLFree(padfZ); return OGRERR_NONE; } /************************************************************************/ /* OGRCreateFromShapeBin() */ /* */ /* Translate shapefile binary representation to an OGR */ /* geometry. */ /************************************************************************/ OGRErr OGRCreateFromShapeBin( GByte *pabyShape, OGRGeometry **ppoGeom, int nBytes ) { *ppoGeom = NULL; if( nBytes < 4 ) { CPLError(CE_Failure, CPLE_AppDefined, "Shape buffer size (%d) too small", nBytes); return OGRERR_FAILURE; } /* -------------------------------------------------------------------- */ /* Detect zlib compressed shapes and uncompress buffer if necessary */ /* NOTE: this seems to be an undocumented feature, even in the */ /* extended_shapefile_format.pdf found in the FileGDB API documentation*/ /* -------------------------------------------------------------------- */ if( nBytes >= 14 && pabyShape[12] == 0x78 && pabyShape[13] == 0xDA /* zlib marker */) { GInt32 nUncompressedSize, nCompressedSize; memcpy( &nUncompressedSize, pabyShape + 4, 4 ); memcpy( &nCompressedSize, pabyShape + 8, 4 ); CPL_LSBPTR32( &nUncompressedSize ); CPL_LSBPTR32( &nCompressedSize ); if (nCompressedSize + 12 == nBytes && nUncompressedSize > 0) { GByte* pabyUncompressedBuffer = (GByte*)VSI_MALLOC_VERBOSE(nUncompressedSize); if (pabyUncompressedBuffer == NULL) { return OGRERR_FAILURE; } size_t nRealUncompressedSize = 0; if( CPLZLibInflate( pabyShape + 12, nCompressedSize, pabyUncompressedBuffer, nUncompressedSize, &nRealUncompressedSize ) == NULL ) { CPLError(CE_Failure, CPLE_AppDefined, "CPLZLibInflate() failed"); VSIFree(pabyUncompressedBuffer); return OGRERR_FAILURE; } OGRErr eErr = OGRCreateFromShapeBin(pabyUncompressedBuffer, ppoGeom, static_cast<int>(nRealUncompressedSize)); VSIFree(pabyUncompressedBuffer); return eErr; } } int nSHPType = pabyShape[0]; /* -------------------------------------------------------------------- */ /* Return a NULL geometry when SHPT_NULL is encountered. */ /* Watch out, null return does not mean "bad data" it means */ /* "no geometry here". Watch the OGRErr for the error status */ /* -------------------------------------------------------------------- */ if ( nSHPType == SHPT_NULL ) { *ppoGeom = NULL; return OGRERR_NONE; } // CPLDebug( "PGeo", // "Shape type read from PGeo data is nSHPType = %d", // nSHPType ); const bool bIsExtended = ( nSHPType >= SHPT_GENERALPOLYLINE && nSHPType <= SHPT_GENERALMULTIPATCH ); const bool bHasZ = ( nSHPType == SHPT_POINTZ || nSHPType == SHPT_POINTZM || nSHPType == SHPT_MULTIPOINTZ || nSHPType == SHPT_MULTIPOINTZM || nSHPType == SHPT_POLYGONZ || nSHPType == SHPT_POLYGONZM || nSHPType == SHPT_ARCZ || nSHPType == SHPT_ARCZM || nSHPType == SHPT_MULTIPATCH || nSHPType == SHPT_MULTIPATCHM || (bIsExtended && (pabyShape[3] & 0x80) != 0 ) ); const bool bHasM = ( nSHPType == SHPT_POINTM || nSHPType == SHPT_POINTZM || nSHPType == SHPT_MULTIPOINTM || nSHPType == SHPT_MULTIPOINTZM || nSHPType == SHPT_POLYGONM || nSHPType == SHPT_POLYGONZM || nSHPType == SHPT_ARCM || nSHPType == SHPT_ARCZM || nSHPType == SHPT_MULTIPATCHM || (bIsExtended && (pabyShape[3] & 0x40) != 0 ) ); /* -------------------------------------------------------------------- */ /* TODO: These types include additional attributes including */ /* non-linear segments and such. They should be handled. */ /* This is documented in the extended_shapefile_format.pdf */ /* from the FileGDB API */ /* -------------------------------------------------------------------- */ switch( nSHPType ) { case SHPT_GENERALPOLYLINE: nSHPType = SHPT_ARC; break; case SHPT_GENERALPOLYGON: nSHPType = SHPT_POLYGON; break; case SHPT_GENERALPOINT: nSHPType = SHPT_POINT; break; case SHPT_GENERALMULTIPOINT: nSHPType = SHPT_MULTIPOINT; break; case SHPT_GENERALMULTIPATCH: nSHPType = SHPT_MULTIPATCH; } /* ==================================================================== */ /* Extract vertices for a Polygon or Arc. */ /* ==================================================================== */ if( nSHPType == SHPT_ARC || nSHPType == SHPT_ARCZ || nSHPType == SHPT_ARCM || nSHPType == SHPT_ARCZM || nSHPType == SHPT_POLYGON || nSHPType == SHPT_POLYGONZ || nSHPType == SHPT_POLYGONM || nSHPType == SHPT_POLYGONZM || nSHPType == SHPT_MULTIPATCH || nSHPType == SHPT_MULTIPATCHM) { GInt32 nPoints, nParts; int i, nOffset; GInt32 *panPartStart; GInt32 *panPartType = NULL; if (nBytes < 44) { CPLError(CE_Failure, CPLE_AppDefined, "Corrupted Shape : nBytes=%d, nSHPType=%d", nBytes, nSHPType); return OGRERR_FAILURE; } /* -------------------------------------------------------------------- */ /* Extract part/point count, and build vertex and part arrays */ /* to proper size. */ /* -------------------------------------------------------------------- */ memcpy( &nPoints, pabyShape + 40, 4 ); memcpy( &nParts, pabyShape + 36, 4 ); CPL_LSBPTR32( &nPoints ); CPL_LSBPTR32( &nParts ); if (nPoints < 0 || nParts < 0 || nPoints > 50 * 1000 * 1000 || nParts > 10 * 1000 * 1000) { CPLError(CE_Failure, CPLE_AppDefined, "Corrupted Shape : nPoints=%d, nParts=%d.", nPoints, nParts); return OGRERR_FAILURE; } int bIsMultiPatch = ( nSHPType == SHPT_MULTIPATCH || nSHPType == SHPT_MULTIPATCHM ); /* With the previous checks on nPoints and nParts, */ /* we should not overflow here and after */ /* since 50 M * (16 + 8 + 8) = 1 600 MB */ int nRequiredSize = 44 + 4 * nParts + 16 * nPoints; if ( bHasZ ) { nRequiredSize += 16 + 8 * nPoints; } if ( bHasM ) { nRequiredSize += 16 + 8 * nPoints; } if( bIsMultiPatch ) { nRequiredSize += 4 * nParts; } if (nRequiredSize > nBytes) { CPLError(CE_Failure, CPLE_AppDefined, "Corrupted Shape : nPoints=%d, nParts=%d, nBytes=%d, nSHPType=%d, nRequiredSize=%d", nPoints, nParts, nBytes, nSHPType, nRequiredSize); return OGRERR_FAILURE; } panPartStart = (GInt32 *) VSI_CALLOC_VERBOSE(nParts,sizeof(GInt32)); if (panPartStart == NULL) { return OGRERR_FAILURE; } /* -------------------------------------------------------------------- */ /* Copy out the part array from the record. */ /* -------------------------------------------------------------------- */ memcpy( panPartStart, pabyShape + 44, 4 * nParts ); for( i = 0; i < nParts; i++ ) { CPL_LSBPTR32( panPartStart + i ); /* We check that the offset is inside the vertex array */ if (panPartStart[i] < 0 || panPartStart[i] >= nPoints) { CPLError(CE_Failure, CPLE_AppDefined, "Corrupted Shape : panPartStart[%d] = %d, nPoints = %d", i, panPartStart[i], nPoints); CPLFree(panPartStart); return OGRERR_FAILURE; } if (i > 0 && panPartStart[i] <= panPartStart[i-1]) { CPLError(CE_Failure, CPLE_AppDefined, "Corrupted Shape : panPartStart[%d] = %d, panPartStart[%d] = %d", i, panPartStart[i], i - 1, panPartStart[i - 1]); CPLFree(panPartStart); return OGRERR_FAILURE; } } nOffset = 44 + 4*nParts; /* -------------------------------------------------------------------- */ /* If this is a multipatch, we will also have parts types. */ /* -------------------------------------------------------------------- */ if( bIsMultiPatch ) { panPartType = (GInt32 *) VSI_CALLOC_VERBOSE(nParts,sizeof(GInt32)); if (panPartType == NULL) { CPLFree(panPartStart); return OGRERR_FAILURE; } memcpy( panPartType, pabyShape + nOffset, 4*nParts ); for( i = 0; i < nParts; i++ ) { CPL_LSBPTR32( panPartType + i ); } nOffset += 4*nParts; } /* -------------------------------------------------------------------- */ /* Copy out the vertices from the record. */ /* -------------------------------------------------------------------- */ double *padfX = (double *) VSI_MALLOC_VERBOSE(sizeof(double)*nPoints); double *padfY = (double *) VSI_MALLOC_VERBOSE(sizeof(double)*nPoints); double *padfZ = (double *) VSI_CALLOC_VERBOSE(sizeof(double),nPoints); double *padfM = (double *) (bHasM ? VSI_CALLOC_VERBOSE(sizeof(double),nPoints) : NULL); if (padfX == NULL || padfY == NULL || padfZ == NULL || (bHasM && padfM == NULL)) { CPLFree( panPartStart ); CPLFree( panPartType ); CPLFree( padfX ); CPLFree( padfY ); CPLFree( padfZ ); CPLFree( padfM ); return OGRERR_FAILURE; } for( i = 0; i < nPoints; i++ ) { memcpy(padfX + i, pabyShape + nOffset + i * 16, 8 ); memcpy(padfY + i, pabyShape + nOffset + i * 16 + 8, 8 ); CPL_LSBPTR64( padfX + i ); CPL_LSBPTR64( padfY + i ); } nOffset += 16*nPoints; /* -------------------------------------------------------------------- */ /* If we have a Z coordinate, collect that now. */ /* -------------------------------------------------------------------- */ if( bHasZ ) { for( i = 0; i < nPoints; i++ ) { memcpy( padfZ + i, pabyShape + nOffset + 16 + i*8, 8 ); CPL_LSBPTR64( padfZ + i ); } nOffset += 16 + 8*nPoints; } /* -------------------------------------------------------------------- */ /* If we have a M coordinate, collect that now. */ /* -------------------------------------------------------------------- */ if( bHasM ) { for( i = 0; i < nPoints; i++ ) { memcpy( padfM + i, pabyShape + nOffset + 16 + i*8, 8 ); CPL_LSBPTR64( padfM + i ); } //nOffset += 16 + 8*nPoints; } /* -------------------------------------------------------------------- */ /* Build corresponding OGR objects. */ /* -------------------------------------------------------------------- */ if( nSHPType == SHPT_ARC || nSHPType == SHPT_ARCZ || nSHPType == SHPT_ARCM || nSHPType == SHPT_ARCZM ) { /* -------------------------------------------------------------------- */ /* Arc - As LineString */ /* -------------------------------------------------------------------- */ if( nParts == 1 ) { OGRLineString *poLine = new OGRLineString(); *ppoGeom = poLine; poLine->setPoints( nPoints, padfX, padfY, padfZ, padfM ); } /* -------------------------------------------------------------------- */ /* Arc - As MultiLineString */ /* -------------------------------------------------------------------- */ else { OGRMultiLineString *poMulti = new OGRMultiLineString; *ppoGeom = poMulti; for( i = 0; i < nParts; i++ ) { OGRLineString *poLine = new OGRLineString; int nVerticesInThisPart; if( i == nParts-1 ) nVerticesInThisPart = nPoints - panPartStart[i]; else nVerticesInThisPart = panPartStart[i+1] - panPartStart[i]; poLine->setPoints( nVerticesInThisPart, padfX + panPartStart[i], padfY + panPartStart[i], padfZ + panPartStart[i], (padfM != NULL) ? padfM + panPartStart[i] : NULL ); poMulti->addGeometryDirectly( poLine ); } } } /* ARC */ /* -------------------------------------------------------------------- */ /* Polygon */ /* -------------------------------------------------------------------- */ else if( nSHPType == SHPT_POLYGON || nSHPType == SHPT_POLYGONZ || nSHPType == SHPT_POLYGONM || nSHPType == SHPT_POLYGONZM ) { if (nParts != 0) { if (nParts == 1) { OGRPolygon *poOGRPoly = new OGRPolygon; *ppoGeom = poOGRPoly; OGRLinearRing *poRing = new OGRLinearRing; int nVerticesInThisPart = nPoints - panPartStart[0]; poRing->setPoints( nVerticesInThisPart, padfX + panPartStart[0], padfY + panPartStart[0], padfZ + panPartStart[0], (padfM != NULL) ? padfM + panPartStart[0] : NULL ); poOGRPoly->addRingDirectly( poRing ); } else { OGRGeometry *poOGR = NULL; OGRPolygon** tabPolygons = new OGRPolygon*[nParts]; for( i = 0; i < nParts; i++ ) { tabPolygons[i] = new OGRPolygon(); OGRLinearRing *poRing = new OGRLinearRing; int nVerticesInThisPart; if( i == nParts-1 ) nVerticesInThisPart = nPoints - panPartStart[i]; else nVerticesInThisPart = panPartStart[i+1] - panPartStart[i]; poRing->setPoints( nVerticesInThisPart, padfX + panPartStart[i], padfY + panPartStart[i], padfZ + panPartStart[i], (padfM != NULL) ? padfM + panPartStart[i] : NULL ); tabPolygons[i]->addRingDirectly(poRing); } int isValidGeometry; const char* papszOptions[] = { "METHOD=ONLY_CCW", NULL }; poOGR = OGRGeometryFactory::organizePolygons( (OGRGeometry**)tabPolygons, nParts, &isValidGeometry, papszOptions ); if (!isValidGeometry) { CPLError(CE_Warning, CPLE_AppDefined, "Geometry of polygon cannot be translated to Simple Geometry. " "All polygons will be contained in a multipolygon.\n"); } *ppoGeom = poOGR; delete[] tabPolygons; } } } /* polygon */ /* -------------------------------------------------------------------- */ /* Multipatch */ /* -------------------------------------------------------------------- */ else if( bIsMultiPatch ) { *ppoGeom = OGRCreateFromMultiPatch( nParts, panPartStart, panPartType, nPoints, padfX, padfY, padfZ ); } CPLFree( panPartStart ); CPLFree( panPartType ); CPLFree( padfX ); CPLFree( padfY ); CPLFree( padfZ ); CPLFree( padfM ); if (*ppoGeom != NULL) { if( !bHasZ ) (*ppoGeom)->set3D(FALSE); } return OGRERR_NONE; } /* ==================================================================== */ /* Extract vertices for a MultiPoint. */ /* ==================================================================== */ else if( nSHPType == SHPT_MULTIPOINT || nSHPType == SHPT_MULTIPOINTM || nSHPType == SHPT_MULTIPOINTZ || nSHPType == SHPT_MULTIPOINTZM ) { GInt32 nPoints; GInt32 nOffsetZ; GInt32 nOffsetM = 0; int i; memcpy( &nPoints, pabyShape + 36, 4 ); CPL_LSBPTR32( &nPoints ); if (nPoints < 0 || nPoints > 50 * 1000 * 1000 ) { CPLError(CE_Failure, CPLE_AppDefined, "Corrupted Shape : nPoints=%d.", nPoints); return OGRERR_FAILURE; } nOffsetZ = 40 + 2*8*nPoints + 2*8; if( bHasM ) nOffsetM = (bHasZ) ? nOffsetZ + 2*8 * 8*nPoints : nOffsetZ; OGRMultiPoint *poMultiPt = new OGRMultiPoint; *ppoGeom = poMultiPt; for( i = 0; i < nPoints; i++ ) { double x, y; OGRPoint *poPt = new OGRPoint; /* Copy X */ memcpy(&x, pabyShape + 40 + i*16, 8); CPL_LSBPTR64(&x); poPt->setX(x); /* Copy Y */ memcpy(&y, pabyShape + 40 + i*16 + 8, 8); CPL_LSBPTR64(&y); poPt->setY(y); /* Copy Z */ if ( bHasZ ) { double z; memcpy(&z, pabyShape + nOffsetZ + i*8, 8); CPL_LSBPTR64(&z); poPt->setZ(z); } /* Copy M */ if ( bHasM ) { double m; memcpy(&m, pabyShape + nOffsetM + i*8, 8); CPL_LSBPTR64(&m); poPt->setM(m); } poMultiPt->addGeometryDirectly( poPt ); } poMultiPt->set3D( bHasZ ); poMultiPt->setMeasured( bHasM ); return OGRERR_NONE; } /* ==================================================================== */ /* Extract vertices for a point. */ /* ==================================================================== */ else if( nSHPType == SHPT_POINT || nSHPType == SHPT_POINTM || nSHPType == SHPT_POINTZ || nSHPType == SHPT_POINTZM ) { /* int nOffset; */ double dfX, dfY, dfZ = 0, dfM = 0; if (nBytes < 4 + 8 + 8 + ((bHasZ) ? 8 : 0) + ((bHasM) ? 8 : 0)) { CPLError(CE_Failure, CPLE_AppDefined, "Corrupted Shape : nBytes=%d, nSHPType=%d", nBytes, nSHPType); return OGRERR_FAILURE; } memcpy( &dfX, pabyShape + 4, 8 ); memcpy( &dfY, pabyShape + 4 + 8, 8 ); CPL_LSBPTR64( &dfX ); CPL_LSBPTR64( &dfY ); /* nOffset = 20 + 8; */ if( bHasZ ) { memcpy( &dfZ, pabyShape + 4 + 16, 8 ); CPL_LSBPTR64( &dfZ ); } if( bHasM ) { memcpy( &dfM, pabyShape + 4 + 16 + ((bHasZ) ? 8 : 0), 8 ); CPL_LSBPTR64( &dfM ); } if( bHasZ && bHasM ) *ppoGeom = new OGRPoint( dfX, dfY, dfZ, dfM ); else if( bHasZ ) *ppoGeom = new OGRPoint( dfX, dfY, dfZ ); else if( bHasM ) { OGRPoint* poPoint = new OGRPoint( dfX, dfY ); poPoint->setM(dfM); *ppoGeom = poPoint; } else *ppoGeom = new OGRPoint( dfX, dfY ); return OGRERR_NONE; } CPLError(CE_Failure, CPLE_AppDefined, "Unsupported geometry type: %d", nSHPType ); return OGRERR_FAILURE; }
jwoyame/node-gdal
deps/libgdal/gdal/ogr/ogrpgeogeometry.cpp
C++
apache-2.0
64,965
var estabelecimentoViewCtrl = angular.module('estabelecimentoViewCtrl', ['ngResource']); estabelecimentoViewCtrl.controller('estabelecimentoViewCtrl', ['$scope', '$http', function ($scope, $http) { $scope.estabelecimentos = []; $scope.carregarPontosCriticos = function(){ $http.get("../webresources/com.andepuc.estabelecimentos").success(function (data, status){ $scope.estabelecimentos = data; }); }; }]);
ScHaFeR/AndePUCRS-WebService
andePuc/target/andePuc-1.0/js/controllers/estabelecimentoViewController.js
JavaScript
apache-2.0
497
/* * Copyright 2000-2015 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.psi.impl.source.tree.injected; import com.intellij.lang.injection.MultiHostInjector; import com.intellij.openapi.editor.ex.DocumentEx; import com.intellij.openapi.project.Project; import com.intellij.psi.FileViewProvider; import com.intellij.psi.PsiElement; import com.intellij.psi.PsiFile; import com.intellij.psi.PsiManager; import com.intellij.psi.util.CachedValueProvider; import com.intellij.psi.util.ParameterizedCachedValueProvider; import com.intellij.psi.util.PsiModificationTracker; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; class InjectedPsiCachedValueProvider implements ParameterizedCachedValueProvider<InjectionResult, PsiElement> { @Override public CachedValueProvider.Result<InjectionResult> compute(PsiElement element) { PsiFile hostPsiFile = element.getContainingFile(); if (hostPsiFile == null) return null; FileViewProvider viewProvider = hostPsiFile.getViewProvider(); if (viewProvider instanceof InjectedFileViewProvider) return null; // no injection inside injection final DocumentEx hostDocument = (DocumentEx)viewProvider.getDocument(); if (hostDocument == null) return null; PsiManager psiManager = viewProvider.getManager(); final Project project = psiManager.getProject(); InjectedLanguageManagerImpl injectedManager = InjectedLanguageManagerImpl.getInstanceImpl(project); InjectionResult result = doCompute(element, injectedManager, project, hostPsiFile); return CachedValueProvider.Result.create(result, PsiModificationTracker.MODIFICATION_COUNT, hostDocument); } @Nullable static InjectionResult doCompute(@NotNull final PsiElement element, @NotNull InjectedLanguageManagerImpl injectedManager, @NotNull Project project, @NotNull PsiFile hostPsiFile) { MyInjProcessor processor = new MyInjProcessor(project, hostPsiFile); injectedManager.processInPlaceInjectorsFor(element, processor); InjectionRegistrarImpl registrar = processor.hostRegistrar; return registrar == null ? null : registrar.getInjectedResult(); } private static class MyInjProcessor implements InjectedLanguageManagerImpl.InjProcessor { private InjectionRegistrarImpl hostRegistrar; private final Project myProject; private final PsiFile myHostPsiFile; private MyInjProcessor(@NotNull Project project, @NotNull PsiFile hostPsiFile) { myProject = project; myHostPsiFile = hostPsiFile; } @Override public boolean process(@NotNull PsiElement element, @NotNull MultiHostInjector injector) { if (hostRegistrar == null) { hostRegistrar = new InjectionRegistrarImpl(myProject, myHostPsiFile, element); } injector.getLanguagesToInject(hostRegistrar, element); return hostRegistrar.getInjectedResult() == null; } } }
goodwinnk/intellij-community
platform/lang-impl/src/com/intellij/psi/impl/source/tree/injected/InjectedPsiCachedValueProvider.java
Java
apache-2.0
3,537
/* Copyright 2011-2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.google.security.zynamics.reil.algorithms.mono.valuetracking.elements; import java.math.BigInteger; import java.util.Set; import com.google.common.collect.Sets; import com.google.security.zynamics.zylib.disassembly.IAddress; import com.google.security.zynamics.zylib.general.Convert; public class Symbol implements IValueElement { private final String m_value; private final IAddress m_address; public Symbol(final IAddress address, final String value) { if (Convert.isDecString(value)) { throw new IllegalStateException(); } m_address = address; m_value = value; } @Override public Symbol clone() { return new Symbol(m_address, m_value); } @Override public boolean equals(final Object rhs) { return (rhs instanceof Symbol) && ((Symbol) rhs).m_value.equals(m_value) && ((Symbol) rhs).m_address.equals(m_address); } @Override public BigInteger evaluate() { // TODO Auto-generated method stub throw new IllegalStateException("Not yet implemented"); } @Override public IValueElement getSimplified() { return this; } @Override public Set<String> getVariables() { return Sets.newHashSet(m_value); } @Override public int hashCode() { return m_address.hashCode() * m_value.hashCode(); } @Override public String toString() { return m_value + "/" + m_address.toHexString(); } }
chubbymaggie/binnavi
src/main/java/com/google/security/zynamics/reil/algorithms/mono/valuetracking/elements/Symbol.java
Java
apache-2.0
1,988
#!/usr/bin/python """ Program for creating HTML plots """ import os import sys import json import time from readevtlog import * def imaging_iters(logs): start_time = 40.0 start_msg = "kernel init" end_msg = "imaging cleanup" got_start = False for k in sorted(logs): tt = logs[k].time for e in tt : if e.msg == start_msg: start = e.t1 got_start = True if got_start and e.msg == end_msg: print e.t2-start, ",", print "" data_commands = { "imaging_iters" : imaging_iters, } # Get parameters cmd = sys.argv[1] nm = sys.argv[2] # Open input files logs = read_timelines(nm) # Write table data_commands[cmd](logs)
SKA-ScienceDataProcessor/RC
MS6/visualize/csv_generator.py
Python
apache-2.0
744
/* * Copyright 2014 The Netty Project * * The Netty Project licenses this file to you under the Apache License, version 2.0 (the * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package io.netty.handler.codec.http2; import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelHandlerContext; import io.netty.util.internal.UnstableApi; /** * An listener of HTTP/2 frames. */ @UnstableApi public interface Http2FrameListener { /** * Handles an inbound {@code DATA} frame. * * @param ctx the context from the handler where the frame was read. * @param streamId the subject stream for the frame. * @param data payload buffer for the frame. This buffer will be released by the codec. * @param padding the number of padding bytes found at the end of the frame. * @param endOfStream Indicates whether this is the last frame to be sent from the remote endpoint for this stream. * @return the number of bytes that have been processed by the application. The returned bytes are used by the * inbound flow controller to determine the appropriate time to expand the inbound flow control window (i.e. send * {@code WINDOW_UPDATE}). Returning a value equal to the length of {@code data} + {@code padding} will effectively * opt-out of application-level flow control for this frame. Returning a value less than the length of {@code data} * + {@code padding} will defer the returning of the processed bytes, which the application must later return via * {@link Http2LocalFlowController#consumeBytes(Http2Stream, int)}. The returned value must * be >= {@code 0} and <= {@code data.readableBytes()} + {@code padding}. */ int onDataRead(ChannelHandlerContext ctx, int streamId, ByteBuf data, int padding, boolean endOfStream) throws Http2Exception; /** * Handles an inbound {@code HEADERS} frame. * <p> * Only one of the following methods will be called for each {@code HEADERS} frame sequence. * One will be called when the {@code END_HEADERS} flag has been received. * <ul> * <li>{@link #onHeadersRead(ChannelHandlerContext, int, Http2Headers, int, boolean)}</li> * <li>{@link #onHeadersRead(ChannelHandlerContext, int, Http2Headers, int, short, boolean, int, boolean)}</li> * <li>{@link #onPushPromiseRead(ChannelHandlerContext, int, int, Http2Headers, int)}</li> * </ul> * <p> * To say it another way; the {@link Http2Headers} will contain all of the headers * for the current message exchange step (additional queuing is not necessary). * * @param ctx the context from the handler where the frame was read. * @param streamId the subject stream for the frame. * @param headers the received headers. * @param padding the number of padding bytes found at the end of the frame. * @param endOfStream Indicates whether this is the last frame to be sent from the remote endpoint * for this stream. */ void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding, boolean endOfStream) throws Http2Exception; /** * Handles an inbound {@code HEADERS} frame with priority information specified. * Only called if {@code END_HEADERS} encountered. * <p> * Only one of the following methods will be called for each {@code HEADERS} frame sequence. * One will be called when the {@code END_HEADERS} flag has been received. * <ul> * <li>{@link #onHeadersRead(ChannelHandlerContext, int, Http2Headers, int, boolean)}</li> * <li>{@link #onHeadersRead(ChannelHandlerContext, int, Http2Headers, int, short, boolean, int, boolean)}</li> * <li>{@link #onPushPromiseRead(ChannelHandlerContext, int, int, Http2Headers, int)}</li> * </ul> * <p> * To say it another way; the {@link Http2Headers} will contain all of the headers * for the current message exchange step (additional queuing is not necessary). * * @param ctx the context from the handler where the frame was read. * @param streamId the subject stream for the frame. * @param headers the received headers. * @param streamDependency the stream on which this stream depends, or 0 if dependent on the * connection. * @param weight the new weight for the stream. * @param exclusive whether or not the stream should be the exclusive dependent of its parent. * @param padding the number of padding bytes found at the end of the frame. * @param endOfStream Indicates whether this is the last frame to be sent from the remote endpoint * for this stream. */ void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int streamDependency, short weight, boolean exclusive, int padding, boolean endOfStream) throws Http2Exception; /** * Handles an inbound {@code PRIORITY} frame. * <p> * Note that is it possible to have this method called and no stream object exist for either * {@code streamId}, {@code streamDependency}, or both. This is because the {@code PRIORITY} frame can be * sent/received when streams are in the {@code CLOSED} state. * * @param ctx the context from the handler where the frame was read. * @param streamId the subject stream for the frame. * @param streamDependency the stream on which this stream depends, or 0 if dependent on the * connection. * @param weight the new weight for the stream. * @param exclusive whether or not the stream should be the exclusive dependent of its parent. */ void onPriorityRead(ChannelHandlerContext ctx, int streamId, int streamDependency, short weight, boolean exclusive) throws Http2Exception; /** * Handles an inbound {@code RST_STREAM} frame. * * @param ctx the context from the handler where the frame was read. * @param streamId the stream that is terminating. * @param errorCode the error code identifying the type of failure. */ void onRstStreamRead(ChannelHandlerContext ctx, int streamId, long errorCode) throws Http2Exception; /** * Handles an inbound {@code SETTINGS} acknowledgment frame. * @param ctx the context from the handler where the frame was read. */ void onSettingsAckRead(ChannelHandlerContext ctx) throws Http2Exception; /** * Handles an inbound {@code SETTINGS} frame. * * @param ctx the context from the handler where the frame was read. * @param settings the settings received from the remote endpoint. */ void onSettingsRead(ChannelHandlerContext ctx, Http2Settings settings) throws Http2Exception; /** * Handles an inbound {@code PING} frame. * * @param ctx the context from the handler where the frame was read. * @param data the payload of the frame. If this buffer needs to be retained by the listener * they must make a copy. */ void onPingRead(ChannelHandlerContext ctx, ByteBuf data) throws Http2Exception; /** * Handles an inbound {@code PING} acknowledgment. * * @param ctx the context from the handler where the frame was read. * @param data the payload of the frame. If this buffer needs to be retained by the listener * they must make a copy. */ void onPingAckRead(ChannelHandlerContext ctx, ByteBuf data) throws Http2Exception; /** * Handles an inbound {@code PUSH_PROMISE} frame. Only called if {@code END_HEADERS} encountered. * <p> * Promised requests MUST be authoritative, cacheable, and safe. * See <a href="https://tools.ietf.org/html/draft-ietf-httpbis-http2-17#section-8.2">[RFC http2], Seciton 8.2</a>. * <p> * Only one of the following methods will be called for each {@code HEADERS} frame sequence. * One will be called when the {@code END_HEADERS} flag has been received. * <ul> * <li>{@link #onHeadersRead(ChannelHandlerContext, int, Http2Headers, int, boolean)}</li> * <li>{@link #onHeadersRead(ChannelHandlerContext, int, Http2Headers, int, short, boolean, int, boolean)}</li> * <li>{@link #onPushPromiseRead(ChannelHandlerContext, int, int, Http2Headers, int)}</li> * </ul> * <p> * To say it another way; the {@link Http2Headers} will contain all of the headers * for the current message exchange step (additional queuing is not necessary). * * @param ctx the context from the handler where the frame was read. * @param streamId the stream the frame was sent on. * @param promisedStreamId the ID of the promised stream. * @param headers the received headers. * @param padding the number of padding bytes found at the end of the frame. */ void onPushPromiseRead(ChannelHandlerContext ctx, int streamId, int promisedStreamId, Http2Headers headers, int padding) throws Http2Exception; /** * Handles an inbound {@code GO_AWAY} frame. * * @param ctx the context from the handler where the frame was read. * @param lastStreamId the last known stream of the remote endpoint. * @param errorCode the error code, if abnormal closure. * @param debugData application-defined debug data. If this buffer needs to be retained by the * listener they must make a copy. */ void onGoAwayRead(ChannelHandlerContext ctx, int lastStreamId, long errorCode, ByteBuf debugData) throws Http2Exception; /** * Handles an inbound {@code WINDOW_UPDATE} frame. * * @param ctx the context from the handler where the frame was read. * @param streamId the stream the frame was sent on. * @param windowSizeIncrement the increased number of bytes of the remote endpoint's flow * control window. */ void onWindowUpdateRead(ChannelHandlerContext ctx, int streamId, int windowSizeIncrement) throws Http2Exception; /** * Handler for a frame not defined by the HTTP/2 spec. * * @param ctx the context from the handler where the frame was read. * @param frameType the frame type from the HTTP/2 header. * @param streamId the stream the frame was sent on. * @param flags the flags in the frame header. * @param payload the payload of the frame. */ void onUnknownFrame(ChannelHandlerContext ctx, byte frameType, int streamId, Http2Flags flags, ByteBuf payload) throws Http2Exception; }
yrcourage/netty
codec-http2/src/main/java/io/netty/handler/codec/http2/Http2FrameListener.java
Java
apache-2.0
11,025
/* * Copyright 2000-2017 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.jetbrains.python.psi.impl; import com.intellij.lang.ASTNode; import com.intellij.openapi.util.Ref; import com.intellij.psi.PsiElement; import com.intellij.psi.PsiPolyVariantReference; import com.intellij.psi.PsiReference; import com.intellij.psi.util.QualifiedName; import com.jetbrains.python.PyElementTypes; import com.jetbrains.python.PyNames; import com.jetbrains.python.PyTokenTypes; import com.jetbrains.python.PythonDialectsTokenSetProvider; import com.jetbrains.python.codeInsight.typing.PyTypingTypeProvider; import com.jetbrains.python.psi.*; import com.jetbrains.python.psi.impl.references.PyOperatorReference; import com.jetbrains.python.psi.resolve.PyResolveContext; import com.jetbrains.python.psi.types.*; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import java.util.ArrayList; import java.util.List; /** * @author yole */ public class PyPrefixExpressionImpl extends PyElementImpl implements PyPrefixExpression { public PyPrefixExpressionImpl(ASTNode astNode) { super(astNode); } @Override public PyExpression getOperand() { return (PyExpression)childToPsi(PythonDialectsTokenSetProvider.INSTANCE.getExpressionTokens(), 0); } @Nullable public PsiElement getPsiOperator() { final ASTNode node = getNode(); final ASTNode child = node.findChildByType(PyElementTypes.UNARY_OPS); return child != null ? child.getPsi() : null; } @NotNull @Override public PyElementType getOperator() { final PsiElement op = getPsiOperator(); assert op != null; return (PyElementType)op.getNode().getElementType(); } @Override protected void acceptPyVisitor(PyElementVisitor pyVisitor) { pyVisitor.visitPyPrefixExpression(this); } @Override public PsiReference getReference() { return getReference(PyResolveContext.noImplicits()); } @NotNull @Override public PsiPolyVariantReference getReference(@NotNull PyResolveContext context) { return new PyOperatorReference(this, context); } @Override public PyType getType(@NotNull TypeEvalContext context, @NotNull TypeEvalContext.Key key) { if (getOperator() == PyTokenTypes.NOT_KEYWORD) { return PyBuiltinCache.getInstance(this).getBoolType(); } final boolean isAwait = getOperator() == PyTokenTypes.AWAIT_KEYWORD; if (isAwait) { final PyExpression operand = getOperand(); if (operand != null) { final PyType operandType = context.getType(operand); final PyType type = getGeneratorReturnType(operandType, context); if (type != null) { return type; } } } final PsiReference ref = getReference(PyResolveContext.noImplicits().withTypeEvalContext(context)); final PsiElement resolved = ref.resolve(); if (resolved instanceof PyCallable) { // TODO: Make PyPrefixExpression a PyCallSiteExpression, use getCallType() here and analyze it in PyTypeChecker.analyzeCallSite() final PyType returnType = ((PyCallable)resolved).getReturnType(context, key); return isAwait ? getGeneratorReturnType(returnType, context) : returnType; } return null; } @Override public PyExpression getQualifier() { return getOperand(); } @Nullable @Override public QualifiedName asQualifiedName() { return PyPsiUtils.asQualifiedName(this); } @Override public boolean isQualified() { return getQualifier() != null; } @Override public String getReferencedName() { PyElementType t = getOperator(); if (t == PyTokenTypes.PLUS) { return PyNames.POS; } else if (t == PyTokenTypes.MINUS) { return PyNames.NEG; } return getOperator().getSpecialMethodName(); } @Override public ASTNode getNameElement() { final PsiElement op = getPsiOperator(); return op != null ? op.getNode() : null; } @Nullable private static PyType getGeneratorReturnType(@Nullable PyType type, @NotNull TypeEvalContext context) { if (type instanceof PyClassLikeType && type instanceof PyCollectionType) { if (type instanceof PyClassType && PyNames.AWAITABLE.equals(((PyClassType)type).getPyClass().getName())) { return ((PyCollectionType)type).getIteratedItemType(); } else { return Ref.deref(PyTypingTypeProvider.coroutineOrGeneratorElementType(type, context)); } } else if (type instanceof PyUnionType) { final List<PyType> memberReturnTypes = new ArrayList<>(); final PyUnionType unionType = (PyUnionType)type; for (PyType member : unionType.getMembers()) { memberReturnTypes.add(getGeneratorReturnType(member, context)); } return PyUnionType.union(memberReturnTypes); } return null; } }
apixandru/intellij-community
python/src/com/jetbrains/python/psi/impl/PyPrefixExpressionImpl.java
Java
apache-2.0
5,338
//// [typeOfThisInStaticMembers13.ts] class C { static readonly c: "foo" = "foo" static bar = class Inner { static [this.c] = 123; [this.c] = 123; } } //// [typeOfThisInStaticMembers13.js] var C = /** @class */ (function () { function C() { } var _a, _b, _c, _d; _a = C; Object.defineProperty(C, "c", { enumerable: true, configurable: true, writable: true, value: "foo" }); Object.defineProperty(C, "bar", { enumerable: true, configurable: true, writable: true, value: (_b = /** @class */ (function () { function Inner() { Object.defineProperty(this, _d, { enumerable: true, configurable: true, writable: true, value: 123 }); } return Inner; }()), _c = _a.c, _d = _a.c, Object.defineProperty(_b, _c, { enumerable: true, configurable: true, writable: true, value: 123 }), _b) }); return C; }());
Microsoft/TypeScript
tests/baselines/reference/typeOfThisInStaticMembers13(target=es5).js
JavaScript
apache-2.0
1,288
package com.mapswithme.maps.widget; import android.app.Activity; import android.support.annotation.StringRes; import android.support.v7.widget.Toolbar; import android.view.View; import com.mapswithme.maps.R; import com.mapswithme.util.UiUtils; import com.mapswithme.util.Utils; public class ToolbarController { protected final Activity mActivity; protected final Toolbar mToolbar; public ToolbarController(View root, Activity activity) { mActivity = activity; mToolbar = (Toolbar) root.findViewById(R.id.toolbar); UiUtils.showHomeUpButton(mToolbar); mToolbar.setNavigationOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { onUpClick(); } }); } public void onUpClick() { Utils.navigateToParent(mActivity); } public ToolbarController setTitle(CharSequence title) { mToolbar.setTitle(title); return this; } public ToolbarController setTitle(@StringRes int title) { mToolbar.setTitle(title); return this; } public Toolbar getToolbar() { return mToolbar; } }
programming086/omim
android/src/com/mapswithme/maps/widget/ToolbarController.java
Java
apache-2.0
1,109
/** * Copyright 2005-2015 Red Hat, Inc. * * Red Hat licenses this file to you under the Apache License, version * 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. */ package io.fabric8.insight.elasticsearch.discovery; import org.elasticsearch.common.collect.Lists; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.AbstractPlugin; import java.util.Collection; public class FabricDiscoveryPlugin extends AbstractPlugin { private final Settings settings; public FabricDiscoveryPlugin(Settings settings) { this.settings = settings; } @Override public String name() { return "fabric8-discovery"; } @Override public String description() { return "Discovery module using Fabric8"; } }
chirino/fabric8
insight/insight-elasticsearch-discovery/src/main/java/io/fabric8/insight/elasticsearch/discovery/FabricDiscoveryPlugin.java
Java
apache-2.0
1,300
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include <aws/ec2/model/InstanceHealthStatus.h> #include <aws/core/utils/HashingUtils.h> #include <aws/core/Globals.h> #include <aws/core/utils/EnumParseOverflowContainer.h> using namespace Aws::Utils; namespace Aws { namespace EC2 { namespace Model { namespace InstanceHealthStatusMapper { static const int healthy_HASH = HashingUtils::HashString("healthy"); static const int unhealthy_HASH = HashingUtils::HashString("unhealthy"); InstanceHealthStatus GetInstanceHealthStatusForName(const Aws::String& name) { int hashCode = HashingUtils::HashString(name.c_str()); if (hashCode == healthy_HASH) { return InstanceHealthStatus::healthy; } else if (hashCode == unhealthy_HASH) { return InstanceHealthStatus::unhealthy; } EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) { overflowContainer->StoreOverflow(hashCode, name); return static_cast<InstanceHealthStatus>(hashCode); } return InstanceHealthStatus::NOT_SET; } Aws::String GetNameForInstanceHealthStatus(InstanceHealthStatus enumValue) { switch(enumValue) { case InstanceHealthStatus::healthy: return "healthy"; case InstanceHealthStatus::unhealthy: return "unhealthy"; default: EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) { return overflowContainer->RetrieveOverflow(static_cast<int>(enumValue)); } return {}; } } } // namespace InstanceHealthStatusMapper } // namespace Model } // namespace EC2 } // namespace Aws
awslabs/aws-sdk-cpp
aws-cpp-sdk-ec2/source/model/InstanceHealthStatus.cpp
C++
apache-2.0
2,041
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package lsp import ( "context" "strings" "golang.org/x/tools/internal/lsp/protocol" "golang.org/x/tools/internal/lsp/source" "golang.org/x/tools/internal/lsp/telemetry" "golang.org/x/tools/internal/span" "golang.org/x/tools/internal/telemetry/log" ) func (s *Server) Diagnostics(ctx context.Context, view source.View, uri span.URI) { ctx = telemetry.File.With(ctx, uri) f, err := view.GetFile(ctx, uri) if err != nil { log.Error(ctx, "no file", err, telemetry.File) return } // For non-Go files, don't return any diagnostics. gof, ok := f.(source.GoFile) if !ok { return } reports, err := source.Diagnostics(ctx, view, gof, s.disabledAnalyses) if err != nil { log.Error(ctx, "failed to compute diagnostics", err, telemetry.File) return } s.undeliveredMu.Lock() defer s.undeliveredMu.Unlock() for uri, diagnostics := range reports { if err := s.publishDiagnostics(ctx, uri, diagnostics); err != nil { if s.undelivered == nil { s.undelivered = make(map[span.URI][]source.Diagnostic) } log.Error(ctx, "failed to deliver diagnostic (will retry)", err, telemetry.File) s.undelivered[uri] = diagnostics continue } // In case we had old, undelivered diagnostics. delete(s.undelivered, uri) } // Anytime we compute diagnostics, make sure to also send along any // undelivered ones (only for remaining URIs). for uri, diagnostics := range s.undelivered { if err := s.publishDiagnostics(ctx, uri, diagnostics); err != nil { log.Error(ctx, "failed to deliver diagnostic for (will not retry)", err, telemetry.File) } // If we fail to deliver the same diagnostics twice, just give up. delete(s.undelivered, uri) } } func (s *Server) publishDiagnostics(ctx context.Context, uri span.URI, diagnostics []source.Diagnostic) error { protocolDiagnostics, err := toProtocolDiagnostics(ctx, diagnostics) if err != nil { return err } s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{ Diagnostics: protocolDiagnostics, URI: protocol.NewURI(uri), }) return nil } func toProtocolDiagnostics(ctx context.Context, diagnostics []source.Diagnostic) ([]protocol.Diagnostic, error) { reports := []protocol.Diagnostic{} for _, diag := range diagnostics { diagnostic, err := toProtocolDiagnostic(ctx, diag) if err != nil { return nil, err } reports = append(reports, diagnostic) } return reports, nil } func toProtocolDiagnostic(ctx context.Context, diag source.Diagnostic) (protocol.Diagnostic, error) { var severity protocol.DiagnosticSeverity switch diag.Severity { case source.SeverityError: severity = protocol.SeverityError case source.SeverityWarning: severity = protocol.SeverityWarning } return protocol.Diagnostic{ Message: strings.TrimSpace(diag.Message), // go list returns errors prefixed by newline Range: diag.Range, Severity: severity, Source: diag.Source, }, nil }
Miciah/origin
vendor/golang.org/x/tools/internal/lsp/diagnostics.go
GO
apache-2.0
3,070
// create table view var tableview = Titanium.UI.createTableView(); Ti.App.fireEvent("show_indicator"); // create table view event listener tableview.addEventListener('click', function(e) { // event data var index = e.index; var section = e.section; var row = e.row; var rowdata = e.rowData; Titanium.UI.createAlertDialog({title:'Table View',message:'row ' + row + ' index ' + index + ' section ' + section + ' row data ' + rowdata}).show(); }); var navActInd = Titanium.UI.createActivityIndicator(); navActInd.show(); if (Titanium.Platform.name == 'iPhone OS') { Titanium.UI.currentWindow.setRightNavButton(navActInd); } // add table view to the window Titanium.UI.currentWindow.add(tableview); Titanium.Yahoo.yql('select * from flickr.photos.search where text="Cat" limit 10',function(e) { var images = []; var data = e.data; for (var c=0;c<data.photo.length;c++) { var photo = data.photo[c]; // form the flickr url var url = 'http://farm' + photo.farm + '.static.flickr.com/' + photo.server + '/' + photo.id + '_' + photo.secret + '_m.jpg'; Ti.API.info("flickr url = "+url); var row = Ti.UI.createTableViewRow({height:60}); var title = Ti.UI.createLabel({ left:70, right:10, textAlign:'left', height:50, text:photo.title ? photo.title : "Untitled", font:{fontWeight:'bold',fontSize:18} }); var image; if (Titanium.Platform.name == 'android') { // iphone moved to a single image property - android needs to do the same image = Ti.UI.createImageView({ url : url, height:50, width:50, left:10, defaultImage:'../modules/ui/images/photoDefault.png' }); } else { image = Ti.UI.createImageView({ image : url, height:50, width:50, left:10, defaultImage:'../modules/ui/images/photoDefault.png' }); } row.add(image); row.add(title); images[c] = row; } tableview.setData(images); navActInd.hide(); Ti.App.fireEvent("hide_indicator"); });
arnaudsj/titanium_mobile
demos/SmokeTest/Resources/examples/yql_flickr.js
JavaScript
apache-2.0
1,970
package gwt.material.design.client.ui; /* * #%L * GwtMaterial * %% * Copyright (C) 2015 GwtMaterialDesign * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import com.google.gwt.dom.client.Document; import com.google.gwt.dom.client.Element; import com.google.gwt.dom.client.Style; import gwt.material.design.client.base.mixin.ColorsMixin; import gwt.material.design.client.base.mixin.CssNameMixin; import gwt.material.design.client.base.mixin.ToggleStyleMixin; import gwt.material.design.client.constants.IconPosition; import gwt.material.design.client.constants.IconSize; import gwt.material.design.client.base.AbstractButton; import gwt.material.design.client.base.HasIcon; import gwt.material.design.client.base.HasSeparator; import gwt.material.design.client.constants.IconType; //@formatter:off /** * We have included 740 Material Design Icons courtesy of Google. * You can download them directly from the Material Design specs. * * <h3>UiBinder Usage:</h3> * <pre> *{@code <m:MaterialIcon waves="LIGHT" iconType="POLYMER"/> * <m:MaterialIcon waves="LIGHT" iconType="POLYMER" textColor="blue" type="CIRCLE"/> * <m:MaterialIcon waves="LIGHT" iconType="POLYMER" backgroundColor="blue" textColor="white" type="CIRCLE" tooltip="Tooltip" tooltipLocation="BOTTOM"/>} * </pre> * * @author kevzlou7979 * @author Ben Dol * @see <a href="http://www.google.com/design/icons/">Search Google Icons</a> * @see <a href="http://gwt-material-demo.herokuapp.com/#icons">Material Icons Documentation</a> */ //@formatter:on public class MaterialIcon extends AbstractButton implements HasSeparator, HasIcon { private final CssNameMixin<MaterialIcon, IconPosition> posMixin = new CssNameMixin<>(this); private final CssNameMixin<MaterialIcon, IconSize> sizeMixin = new CssNameMixin<>(this); private final ToggleStyleMixin<MaterialIcon> prefixMixin = new ToggleStyleMixin<>(this, "prefix"); private final ColorsMixin<MaterialIcon> colorsMixin = new ColorsMixin<>(this); /** * Creates an empty icon. */ public MaterialIcon() { super(); addStyleName("material-icons"); } /** * Sets a simple icon with a given type. */ public MaterialIcon(IconType iconType) { this(); setIconType(iconType); } /** * Sets an icon with textColor and backgroundColor. */ public MaterialIcon(IconType iconType, String textColor, String bgColor) { this(); setIconType(iconType); setTextColor(textColor); setBackgroundColor(bgColor); } public void setInnerText(String innerText){ getElement().setInnerText(innerText); } @Override protected Element createElement() { return Document.get().createElement("i"); } @Override public MaterialIcon getIcon() { return this; } @Override public void setIconType(IconType icon) { getElement().setInnerText(icon.getCssName()); } @Override public void setIconPosition(IconPosition position) { posMixin.setCssName(position); } @Override public void setIconSize(IconSize size) { sizeMixin.setCssName(size); } @Override public void setIconColor(String iconColor) { colorsMixin.setTextColor(iconColor); } @Override public void setIconFontSize(double size, Style.Unit unit) { getElement().getStyle().setFontSize(size, unit); } @Override public void setIconPrefix(boolean prefix) { prefixMixin.setOn(prefix); } @Override public boolean isIconPrefix() { return prefixMixin.isOn(); } }
gilberto-torrezan/gwt-material
gwt-material/src/main/java/gwt/material/design/client/ui/MaterialIcon.java
Java
apache-2.0
4,174
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from oslo_service import threadgroup from blazar.db import api as db_api LOG = logging.getLogger(__name__) class BaseMonitor(object): """Base class for monitoring classes.""" def __init__(self, monitor_plugins): self.monitor_plugins = monitor_plugins self.tg = threadgroup.ThreadGroup() self.healing_timers = [] def start_monitoring(self): """Start monitoring.""" self.start_periodic_healing() def stop_monitoring(self): """Stop monitoring.""" self.stop_periodic_healing() def start_periodic_healing(self): """Start periodic healing process.""" for plugin in self.monitor_plugins: healing_interval_mins = plugin.get_healing_interval() if healing_interval_mins > 0: self.healing_timers.append( self.tg.add_timer(healing_interval_mins * 60, self.call_monitor_plugin, None, plugin.heal)) def stop_periodic_healing(self): """Stop periodic healing process.""" for timer in self.healing_timers: self.tg.timer_done(timer) def call_monitor_plugin(self, callback, *args, **kwargs): """Call a callback and update lease/reservation flags.""" # This method has to handle any exception internally. It shouldn't # raise an exception because the timer threads in the BaseMonitor class # terminates its execution once the thread has received any exception. try: # The callback() has to return a dictionary of # {reservation id: flags to update}. # e.g. {'dummyid': {'missing_resources': True}} reservation_flags = callback(*args, **kwargs) if reservation_flags: self._update_flags(reservation_flags) except Exception as e: LOG.exception('Caught an exception while executing a callback. ' '%s', str(e)) def _update_flags(self, reservation_flags): """Update lease/reservation flags.""" lease_ids = set([]) for reservation_id, flags in reservation_flags.items(): db_api.reservation_update(reservation_id, flags) LOG.debug('Reservation %s was updated: %s', reservation_id, flags) reservation = db_api.reservation_get(reservation_id) lease_ids.add(reservation['lease_id']) for lease_id in lease_ids: LOG.debug('Lease %s was updated: {"degraded": True}', lease_id) db_api.lease_update(lease_id, {'degraded': True})
stackforge/blazar
blazar/monitor/base.py
Python
apache-2.0
3,283
/** * Copyright 2014 * SMEdit https://github.com/StarMade/SMEdit * SMTools https://github.com/StarMade/SMTools * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. **/ package jo.sm.plugins.all.props; import jo.sm.ui.act.plugin.Description; /** * @Auther Jo Jaquinta for SMEdit Classic - version 1.0 **/ @Description(displayName = "Properties", shortDescription = "Properties affecting the whole application.") public class PropsParameters { @Description(displayName = "Invert X", shortDescription = "Invert X Axis on mouse") private boolean mInvertXAxis; @Description(displayName = "Invert Y", shortDescription = "Invert Y Axis on mouse") private boolean mInvertYAxis; public PropsParameters() { } public boolean isInvertXAxis() { return mInvertXAxis; } public void setInvertXAxis(boolean invertXAxis) { mInvertXAxis = invertXAxis; } public boolean isInvertYAxis() { return mInvertYAxis; } public void setInvertYAxis(boolean invertYAxis) { mInvertYAxis = invertYAxis; } }
skunkiferous/SMEdit
jo_plugin/src/main/java/jo/sm/plugins/all/props/PropsParameters.java
Java
apache-2.0
1,578
// // AuthenticationMethods.cs // // Author: // Jim Borden <jim.borden@couchbase.com> // // Copyright (c) 2015 Couchbase, Inc All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // using System; using System.Collections.Generic; using Couchbase.Lite.Auth; namespace Couchbase.Lite.Listener { /// <summary> /// Methods dealing with authentication for the client /// </summary> internal static class AuthenticationMethods { #region Public Methods /// <summary> /// Verifies and registers a facebook token for use in replication authentication /// </summary> /// <returns>The response state for further HTTP processing</returns> /// <param name="context">The context of the Couchbase Lite HTTP request</param> public static ICouchbaseResponseState RegisterFacebookToken(ICouchbaseListenerContext context) { var response = context.CreateResponse(); var body = context.BodyAs<Dictionary<string, object>>(); string email = body.GetCast<string>("email"); string remoteUrl = body.GetCast<string>("remote_url"); string accessToken = body.GetCast<string>("access_token"); if (email != null && remoteUrl != null && accessToken != null) { Uri siteUrl; if (!Uri.TryCreate(remoteUrl, UriKind.Absolute, out siteUrl)) { response.InternalStatus = StatusCode.BadParam; response.JsonBody = new Body(new Dictionary<string, object> { { "error", "invalid remote_url" } }); } else if (!FacebookAuthorizer.RegisterAccessToken(accessToken, email, siteUrl)) { response.InternalStatus = StatusCode.BadParam; response.JsonBody = new Body(new Dictionary<string, object> { { "error", "invalid access_token" } }); } else { response.JsonBody = new Body(new Dictionary<string, object> { { "ok", "registered" }, { "email", email } }); } } else { response.InternalStatus = StatusCode.BadParam; response.JsonBody = new Body(new Dictionary<string, object> { { "error", "required fields: access_token, email, remote_url" } }); } return response.AsDefaultState(); } /// <summary> /// Verifies and registers a persona token for use in replication authentication /// </summary> /// <returns>The response state for further HTTP processing</returns> /// <param name="context">The context of the Couchbase Lite HTTP request</param> public static ICouchbaseResponseState RegisterPersonaToken(ICouchbaseListenerContext context) { var response = context.CreateResponse(); var body = context.BodyAs<Dictionary<string, object>>(); string email = PersonaAuthorizer.RegisterAssertion(body.GetCast<string>("assertion")); if (email != null) { response.JsonBody = new Body(new Dictionary<string, object> { { "ok", "registered" }, { "email", email } }); } else { response.InternalStatus = StatusCode.BadParam; response.JsonBody = new Body(new Dictionary<string, object> { { "error", "invalid assertion" } }); } return response.AsDefaultState(); } #endregion } }
brettharrisonzya/couchbase-lite-net
src/ListenerComponent/Couchbase.Lite.Listener.Shared/PeerToPeer/AuthenticationMethods.cs
C#
apache-2.0
4,263
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.lens.cube.parse; import static org.apache.hadoop.hive.ql.parse.HiveParser.Identifier; import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_TABLE_OR_COL; import java.util.Iterator; import org.apache.lens.cube.error.LensCubeErrorCode; import org.apache.lens.cube.metadata.CubeMeasure; import org.apache.lens.cube.parse.CandidateTablePruneCause.CandidateTablePruneCode; import org.apache.lens.server.api.error.LensException; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.HiveParser; import org.antlr.runtime.CommonToken; import lombok.extern.slf4j.Slf4j; /** * <p> Replace select and having columns with default aggregate functions on them, if default aggregate is defined and * if there isn't already an aggregate function specified on the columns. </p> <p/> <p> Expressions which already * contain aggregate sub-expressions will not be changed. </p> <p/> <p> At this point it's assumed that aliases have * been added to all columns. </p> */ @Slf4j class AggregateResolver implements ContextRewriter { public AggregateResolver(Configuration conf) { } @Override public void rewriteContext(CubeQueryContext cubeql) throws LensException { if (cubeql.getCube() == null) { return; } boolean nonDefaultAggregates = false; boolean aggregateResolverDisabled = cubeql.getConf().getBoolean(CubeQueryConfUtil.DISABLE_AGGREGATE_RESOLVER, CubeQueryConfUtil.DEFAULT_DISABLE_AGGREGATE_RESOLVER); // Check if the query contains measures // 1. not inside default aggregate expressions // 2. With no default aggregate defined // 3. there are distinct selection of measures // If yes, only the raw (non aggregated) fact can answer this query. // In that case remove aggregate facts from the candidate fact list if (hasMeasuresInDistinctClause(cubeql, cubeql.getSelectAST(), false) || hasMeasuresInDistinctClause(cubeql, cubeql.getHavingAST(), false) || hasMeasuresNotInDefaultAggregates(cubeql, cubeql.getSelectAST(), null, aggregateResolverDisabled) || hasMeasuresNotInDefaultAggregates(cubeql, cubeql.getHavingAST(), null, aggregateResolverDisabled) || hasMeasures(cubeql, cubeql.getWhereAST()) || hasMeasures(cubeql, cubeql.getGroupByAST()) || hasMeasures(cubeql, cubeql.getOrderByAST())) { Iterator<CandidateFact> factItr = cubeql.getCandidateFacts().iterator(); while (factItr.hasNext()) { CandidateFact candidate = factItr.next(); if (candidate.fact.isAggregated()) { cubeql.addFactPruningMsgs(candidate.fact, CandidateTablePruneCause.missingDefaultAggregate()); factItr.remove(); } } nonDefaultAggregates = true; log.info("Query has non default aggregates, no aggregate resolution will be done"); } cubeql.pruneCandidateFactSet(CandidateTablePruneCode.MISSING_DEFAULT_AGGREGATE); if (nonDefaultAggregates || aggregateResolverDisabled) { return; } resolveClause(cubeql, cubeql.getSelectAST()); resolveClause(cubeql, cubeql.getHavingAST()); Configuration distConf = cubeql.getConf(); boolean isDimOnlyDistinctEnabled = distConf.getBoolean(CubeQueryConfUtil.ENABLE_ATTRFIELDS_ADD_DISTINCT, CubeQueryConfUtil.DEFAULT_ATTR_FIELDS_ADD_DISTINCT); if (isDimOnlyDistinctEnabled) { // Check if any measure/aggregate columns and distinct clause used in // select tree. If not, update selectAST token "SELECT" to "SELECT DISTINCT" if (!hasMeasures(cubeql, cubeql.getSelectAST()) && !isDistinctClauseUsed(cubeql.getSelectAST()) && !HQLParser.hasAggregate(cubeql.getSelectAST())) { cubeql.getSelectAST().getToken().setType(HiveParser.TOK_SELECTDI); } } } // We need to traverse the clause looking for eligible measures which can be // wrapped inside aggregates // We have to skip any columns that are already inside an aggregate UDAF private String resolveClause(CubeQueryContext cubeql, ASTNode clause) throws LensException { if (clause == null) { return null; } for (int i = 0; i < clause.getChildCount(); i++) { transform(cubeql, clause, (ASTNode) clause.getChild(i), i); } return HQLParser.getString(clause); } private void transform(CubeQueryContext cubeql, ASTNode parent, ASTNode node, int nodePos) throws LensException { if (node == null) { return; } int nodeType = node.getToken().getType(); if (!(HQLParser.isAggregateAST(node))) { if (nodeType == HiveParser.TOK_TABLE_OR_COL || nodeType == HiveParser.DOT) { // Leaf node ASTNode wrapped = wrapAggregate(cubeql, node); if (wrapped != node) { if (parent != null) { parent.setChild(nodePos, wrapped); // Check if this node has an alias ASTNode sibling = HQLParser.findNodeByPath(parent, Identifier); String expr; if (sibling != null) { expr = HQLParser.getString(parent); } else { expr = HQLParser.getString(wrapped); } cubeql.addAggregateExpr(expr.trim()); } } } else { // Dig deeper in non-leaf nodes for (int i = 0; i < node.getChildCount(); i++) { transform(cubeql, node, (ASTNode) node.getChild(i), i); } } } } // Wrap an aggregate function around the node if its a measure, leave it // unchanged otherwise private ASTNode wrapAggregate(CubeQueryContext cubeql, ASTNode node) throws LensException { String tabname = null; String colname; if (node.getToken().getType() == HiveParser.TOK_TABLE_OR_COL) { colname = ((ASTNode) node.getChild(0)).getText(); } else { // node in 'alias.column' format ASTNode tabident = HQLParser.findNodeByPath(node, TOK_TABLE_OR_COL, Identifier); ASTNode colIdent = (ASTNode) node.getChild(1); colname = colIdent.getText(); tabname = tabident.getText(); } String msrname = StringUtils.isBlank(tabname) ? colname : tabname + "." + colname; if (cubeql.isCubeMeasure(msrname)) { if (cubeql.getQueriedExprs().contains(colname)) { String alias = cubeql.getAliasForTableName(cubeql.getCube().getName()); for (ASTNode exprNode : cubeql.getExprCtx().getExpressionContext(colname, alias).getAllASTNodes()) { transform(cubeql, null, exprNode, 0); } return node; } else { CubeMeasure measure = cubeql.getCube().getMeasureByName(colname); String aggregateFn = measure.getAggregate(); if (StringUtils.isBlank(aggregateFn)) { throw new LensException(LensCubeErrorCode.NO_DEFAULT_AGGREGATE.getLensErrorInfo(), colname); } ASTNode fnroot = new ASTNode(new CommonToken(HiveParser.TOK_FUNCTION)); fnroot.setParent(node.getParent()); ASTNode fnIdentNode = new ASTNode(new CommonToken(HiveParser.Identifier, aggregateFn)); fnIdentNode.setParent(fnroot); fnroot.addChild(fnIdentNode); node.setParent(fnroot); fnroot.addChild(node); return fnroot; } } else { return node; } } private boolean hasMeasuresNotInDefaultAggregates(CubeQueryContext cubeql, ASTNode node, String function, boolean aggregateResolverDisabled) { if (node == null) { return false; } if (HQLParser.isAggregateAST(node)) { if (node.getChild(0).getType() == HiveParser.Identifier) { function = BaseSemanticAnalyzer.unescapeIdentifier(node.getChild(0).getText()); } } else if (cubeql.isCubeMeasure(node)) { // Exit for the recursion String colname; if (node.getToken().getType() == HiveParser.TOK_TABLE_OR_COL) { colname = ((ASTNode) node.getChild(0)).getText(); } else { // node in 'alias.column' format ASTNode colIdent = (ASTNode) node.getChild(1); colname = colIdent.getText(); } colname = colname.toLowerCase(); if (cubeql.getQueriedExprs().contains(colname)) { String cubeAlias = cubeql.getAliasForTableName(cubeql.getCube().getName()); for (ASTNode exprNode : cubeql.getExprCtx().getExpressionContext(colname, cubeAlias).getAllASTNodes()) { if (hasMeasuresNotInDefaultAggregates(cubeql, exprNode, function, aggregateResolverDisabled)) { return true; } } return false; } else { CubeMeasure measure = cubeql.getCube().getMeasureByName(colname); if (function != null && !function.isEmpty()) { // Get the cube measure object and check if the passed function is the // default one set for this measure return !function.equalsIgnoreCase(measure.getAggregate()); } else if (!aggregateResolverDisabled && measure.getAggregate() != null) { // not inside any aggregate, but default aggregate exists return false; } return true; } } for (int i = 0; i < node.getChildCount(); i++) { if (hasMeasuresNotInDefaultAggregates(cubeql, (ASTNode) node.getChild(i), function, aggregateResolverDisabled)) { // Return on the first measure not inside its default aggregate return true; } } return false; } /* * Check if distinct keyword used in node */ private boolean isDistinctClauseUsed(ASTNode node) { if (node == null) { return false; } if (node.getToken() != null) { if (node.getToken().getType() == HiveParser.TOK_FUNCTIONDI || node.getToken().getType() == HiveParser.TOK_SELECTDI) { return true; } } for (int i = 0; i < node.getChildCount(); i++) { if (isDistinctClauseUsed((ASTNode) node.getChild(i))) { return true; } } return false; } private boolean hasMeasuresInDistinctClause(CubeQueryContext cubeql, ASTNode node, boolean hasDistinct) { if (node == null) { return false; } int exprTokenType = node.getToken().getType(); boolean isDistinct = hasDistinct; if (exprTokenType == HiveParser.TOK_FUNCTIONDI || exprTokenType == HiveParser.TOK_SELECTDI) { isDistinct = true; } else if (cubeql.isCubeMeasure(node) && isDistinct) { // Exit for the recursion return true; } for (int i = 0; i < node.getChildCount(); i++) { if (hasMeasuresInDistinctClause(cubeql, (ASTNode) node.getChild(i), isDistinct)) { // Return on the first measure in distinct clause return true; } } return false; } private boolean hasMeasures(CubeQueryContext cubeql, ASTNode node) { if (node == null) { return false; } if (cubeql.isCubeMeasure(node)) { return true; } for (int i = 0; i < node.getChildCount(); i++) { if (hasMeasures(cubeql, (ASTNode) node.getChild(i))) { return true; } } return false; } static void updateAggregates(ASTNode root, CubeQueryContext cubeql) { if (root == null) { return; } if (HQLParser.isAggregateAST(root)) { cubeql.addAggregateExpr(HQLParser.getString(root).trim()); } else { for (int i = 0; i < root.getChildCount(); i++) { ASTNode child = (ASTNode) root.getChild(i); updateAggregates(child, cubeql); } } } }
adeelmahmood/lens
lens-cube/src/main/java/org/apache/lens/cube/parse/AggregateResolver.java
Java
apache-2.0
12,371
# ---------------------------------------------------------------------------- # Copyright 2014 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- """ For machine generated datasets. """ import numpy as np from neon import NervanaObject class Task(NervanaObject): """ Base class from which ticker tasks inherit. """ def fetch_io(self, time_steps): """ Generate inputs, outputs numpy tensor pair of size appropriate for this minibatch """ columns = time_steps * self.be.bsz inputs = np.zeros((self.nin, columns)) outputs = np.zeros((self.nout, columns)) return inputs, outputs def fill_buffers(self, time_steps, inputs, outputs, in_tensor, out_tensor, mask): """ Do some logistical stuff to get our numpy arrays safely to device. This can almost certainly be cleaned up. """ # Put inputs and outputs, which are too small, into properly shaped arrays columns = time_steps * self.be.bsz inC = np.zeros((self.nin, self.max_columns)) outC = np.zeros((self.nout, self.max_columns)) inC[:, :columns] = inputs outC[:, :columns] = outputs # Copy those arrays to device in_tensor.set(inC) out_tensor.set(outC) # Set a mask over the unused part of the buffer mask[:, :columns] = 1 mask[:, columns:] = 0 class CopyTask(Task): """ The copy task from the Neural Turing Machines paper: http://arxiv.org/abs/1410.5401 This version of the task is batched. All sequences in the same mini-batch are the same length, but every new minibatch has a randomly chosen minibatch length. When a given minibatch has length < seq_len_max, we mask the outputs for time steps after time_steps_max. The generated data is laid out in the same way as other RNN data in neon. """ def __init__(self, seq_len_max, vec_size): """ Set up the attributes that Ticker needs to see. Args: seq_len_max (int): longest allowable sequence length vec_size (int): width of the bit-vector to be copied (was 8 in paper) """ self.seq_len_max = seq_len_max self.vec_size = vec_size self.nout = self.vec_size # output has the same dimension as the underlying bit vector self.nin = self.vec_size + 2 # input has more dims (for the start and stop channels) self.time_steps_func = lambda l: 2 * l + 2 self.time_steps_max = 2 * self.seq_len_max + 2 self.time_steps_max = self.time_steps_func(self.seq_len_max) self.max_columns = self.time_steps_max * self.be.bsz def synthesize(self, in_tensor, out_tensor, mask): """ Create a new minibatch of ticker copy task data. Args: in_tensor: device buffer holding inputs out_tensor: device buffer holding outputs mask: device buffer for the output mask """ # All sequences in a minibatch are the same length for convenience seq_len = np.random.randint(1, self.seq_len_max + 1) time_steps = self.time_steps_func(seq_len) # Generate intermediate buffers of the right size inputs, outputs = super(CopyTask, self).fetch_io(time_steps) # Set the start bit inputs[-2, :self.be.bsz] = 1 # Generate the sequence to be copied seq = np.random.randint(2, size=(self.vec_size, seq_len * self.be.bsz)) # Set the stop bit stop_loc = self.be.bsz * (seq_len + 1) inputs[-1, stop_loc:stop_loc + self.be.bsz] = 1 # Place the actual sequence to copy in inputs inputs[:self.vec_size, self.be.bsz:stop_loc] = seq # Now place that same sequence in a different place in outputs outputs[:, self.be.bsz * (seq_len + 2):] = seq # Fill the device minibatch buffers super(CopyTask, self).fill_buffers(time_steps, inputs, outputs, in_tensor, out_tensor, mask) class RepeatCopyTask(Task): """ The repeat copy task from the Neural Turing Machines paper: http://arxiv.org/abs/1410.5401 See comments on CopyTask class for more details. """ def __init__(self, seq_len_max, repeat_count_max, vec_size): """ Set up the attributes that Ticker needs to see. Args: seq_len_max (int): longest allowable sequence length repeat_count_max (int): max number of repeats vec_size (int): width of the bit-vector to be copied (was 8 in paper) """ self.seq_len_max = seq_len_max self.repeat_count_max = seq_len_max self.vec_size = vec_size self.nout = self.vec_size + 1 # we output the sequence and a stop bit in a stop channel self.nin = self.vec_size + 2 # input has more dims (for the start and stop channels) # seq is seen once as input, repeat_count times as output, with a # start bit, stop bit, and output stop bit self.time_steps_func = lambda l, r: l * (r + 1) + 3 self.time_steps_max = self.time_steps_func(self.seq_len_max, self.repeat_count_max) self.max_columns = self.time_steps_max * self.be.bsz def synthesize(self, in_tensor, out_tensor, mask): """ Create a new minibatch of ticker repeat copy task data. Args: in_tensor: device buffer holding inputs out_tensor: device buffer holding outputs mask: device buffer for the output mask """ # All sequences in a minibatch are the same length for convenience seq_len = np.random.randint(1, self.seq_len_max + 1) repeat_count = np.random.randint(1, self.repeat_count_max + 1) time_steps = self.time_steps_func(seq_len, repeat_count) # Get the minibatch specific numpy buffers inputs, outputs = super(RepeatCopyTask, self).fetch_io(time_steps) # Set the start bit inputs[-2, :self.be.bsz] = 1 # Generate the sequence to be copied seq = np.random.randint(2, size=(self.vec_size, seq_len * self.be.bsz)) # Set the repeat count # TODO: should we normalize repeat count? stop_loc = self.be.bsz * (seq_len + 1) inputs[-1, stop_loc:stop_loc + self.be.bsz] = repeat_count # Place the actual sequence to copy in inputs inputs[:self.vec_size, self.be.bsz:stop_loc] = seq # Now place that same sequence repeat_copy times in outputs for i in range(repeat_count): start = self.be.bsz * ((i + 1) * seq_len + 2) stop = start + seq_len * self.be.bsz outputs[:-1, start:stop] = seq # Place the output finish bit outputs[-1, -self.be.bsz:] = 1 # Fill the device minibatch buffers super(RepeatCopyTask, self).fill_buffers(time_steps, inputs, outputs, in_tensor, out_tensor, mask) class PrioritySortTask(Task): """ The priority sort task from the Neural Turing Machines paper: http://arxiv.org/abs/1410.5401 See comments on CopyTask class for more details. """ def __init__(self, seq_len_max, vec_size): """ Set up the attributes that Ticker needs to see. Args: seq_len_max (int): longest allowable sequence length vec_size (int): width of the bit-vector to be copied (was 8 in paper) """ self.seq_len_max = seq_len_max self.vec_size = vec_size self.nout = self.vec_size # we output the sorted sequence, with no stop bit self.nin = self.vec_size + 3 # extra channels for start, stop, and priority # seq is seen once as input with start and stop bits # then we output seq in sorted order self.time_steps_func = lambda l: 2 * l + 2 self.time_steps_max = self.time_steps_func(self.seq_len_max) self.max_columns = self.time_steps_max * self.be.bsz def synthesize(self, in_tensor, out_tensor, mask): """ Create a new minibatch of ticker priority sort task data. Args: in_tensor: device buffer holding inputs out_tensor: device buffer holding outputs mask: device buffer for the output mask """ # All sequences in a minibatch are the same length for convenience seq_len = np.random.randint(1, self.seq_len_max + 1) time_steps = self.time_steps_func(seq_len) # Get the minibatch specific numpy buffers inputs, outputs = super(PrioritySortTask, self).fetch_io(time_steps) # Set the start bit inputs[-3, :self.be.bsz] = 1 # Generate the sequence to be copied seq = np.random.randint(2, size=(self.nin, seq_len * self.be.bsz)).astype(float) # Zero out the start, stop, and priority channels seq[-3:, :] = 0 # Generate the scalar priorities and put them in seq priorities = np.random.uniform(-1, 1, size=(seq_len * self.be.bsz,)) seq[-1, :] = priorities # Set the stop bit stop_loc = self.be.bsz * (seq_len + 1) inputs[-2, stop_loc:stop_loc + self.be.bsz] = 1 # Place the actual sequence to copy in inputs inputs[:, self.be.bsz:stop_loc] = seq # sort the sequences for i in range(self.be.bsz): # for every sequence in the batch # x <- every column in the sequence x = seq[:, i::self.be.bsz] # sort that set of columns by elt in the last row (the priority) x = x[:, x[-1, :].argsort()] # put those columns back into minibatch in the right places seq[:, i::self.be.bsz] = x outputs[:, self.be.bsz * (seq_len + 2):] = seq[:self.nout, :] # Fill the device minibatch buffers super(PrioritySortTask, self).fill_buffers(time_steps, inputs, outputs, in_tensor, out_tensor, mask) class Ticker(NervanaObject): """ This class defines methods for generating and iterating over ticker datasets. """ def reset(self): """ Reset has no meaning in the context of ticker data. """ pass def __init__(self, task): """ Construct a ticker dataset object. Args: Task is an object representing the task to be trained on It contains information about input and output size, sequence length, etc. It also implements a synthesize function, which is used to generate the next minibatch of data. """ self.task = task # These attributes don't make much sense in the context of tickers # but I suspect it will be hard to get rid of them self.batch_index = 0 self.nbatches = 100 self.ndata = self.nbatches * self.be.bsz # Alias these because other code relies on datasets having nin and nout self.nout = task.nout self.nin = task.nin # Configuration elsewhere relies on the existence of this self.shape = (self.nin, self.task.time_steps_max) # Initialize the inputs, the outputs, and the mask self.dev_X = self.be.iobuf((self.nin, self.task.time_steps_max)) self.dev_y = self.be.iobuf((self.nout, self.task.time_steps_max)) self.mask = self.be.iobuf((self.nout, self.task.time_steps_max)) def __iter__(self): """ Generator that can be used to iterate over this dataset. Yields: tuple : the next minibatch of data. The second element of the tuple is itself a tuple (t,m) with: t: the actual target as generated by the task object m: the output mask to account for the difference between the seq_length for this minibatch and the max seq_len, which is also the number of columns in X,t, and m """ self.batch_index = 0 while self.batch_index < self.nbatches: # The task object writes minibatch data into buffers we pass it self.task.synthesize(self.dev_X, self.dev_y, self.mask) self.batch_index += 1 yield self.dev_X, (self.dev_y, self.mask)
coufon/neon-distributed
neon/data/ticker.py
Python
apache-2.0
13,214
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package datafu.pig.random; import java.io.IOException; import java.util.UUID; import org.apache.pig.EvalFunc; import org.apache.pig.builtin.Nondeterministic; import org.apache.pig.data.*; import org.apache.pig.impl.logicalLayer.schema.Schema; /** * Generates a random UUID using java.util.UUID */ @Nondeterministic public class RandomUUID extends EvalFunc<String> { public String exec(Tuple input) throws IOException { return UUID.randomUUID().toString(); } @Override public Schema outputSchema(Schema input) { return new Schema(new Schema.FieldSchema("uuid", DataType.CHARARRAY)); } }
apache/incubator-datafu
datafu-pig/src/main/java/datafu/pig/random/RandomUUID.java
Java
apache-2.0
1,445
package mem import ( "bytes" "encoding/json" "errors" "fmt" "strings" "sync" "sync/atomic" "time" "github.com/portworx/kvdb" "github.com/portworx/kvdb/common" "github.com/sirupsen/logrus" ) const ( // Name is the name of this kvdb implementation. Name = "kv-mem" // KvSnap is an option passed to designate this kvdb as a snap. KvSnap = "KvSnap" // KvUseInterface is an option passed that configures the mem to store // the values as interfaces instead of bytes. It will not create a // copy of the interface that is passed in. USE WITH CAUTION KvUseInterface = "KvUseInterface" bootstrapKey = "bootstrap" ) var ( // ErrSnap is returned if an operation is not supported on a snap. ErrSnap = errors.New("operation not supported on snap") // ErrSnapWithInterfaceNotSupported is returned when a snap kv-mem is // created with KvUseInterface flag on ErrSnapWithInterfaceNotSupported = errors.New("snap kvdb not supported with interfaces") // ErrIllegalSelect is returned when an incorrect select function // implementation is detected. ErrIllegalSelect = errors.New("Illegal Select implementation") ) func init() { if err := kvdb.Register(Name, New, Version); err != nil { panic(err.Error()) } } type memKV struct { common.BaseKvdb // m is the key value database m map[string]*memKVPair // updates is the list of latest few updates dist WatchDistributor // mutex protects m, w, wt mutex sync.Mutex // index current kvdb index index uint64 domain string // locks is the map of currently held locks locks map[string]chan int // noByte will store all the values as interface noByte bool kvdb.Controller } type memKVPair struct { kvdb.KVPair // ivalue is the value for this kv pair stored as an interface ivalue interface{} } func (mkvp *memKVPair) copy() *kvdb.KVPair { copyKvp := mkvp.KVPair if mkvp.Value == nil && mkvp.ivalue != nil { copyKvp.Value, _ = common.ToBytes(mkvp.ivalue) } return &copyKvp } type snapMem struct { *memKV } // watchUpdate refers to an update to this kvdb type watchUpdate struct { // key is the key that was updated key string // kvp is the key-value that was updated kvp memKVPair // err is any error on update err error } // WatchUpdateQueue is a producer consumer queue. type WatchUpdateQueue interface { // Enqueue will enqueue an update. It is non-blocking. Enqueue(update *watchUpdate) // Dequeue will either return an element from front of the queue or // will block until element becomes available Dequeue() *watchUpdate } // WatchDistributor distributes updates to the watchers type WatchDistributor interface { // Add creates a new watch queue to send updates Add() WatchUpdateQueue // Remove removes an existing watch queue Remove(WatchUpdateQueue) // NewUpdate is invoked to distribute a new update NewUpdate(w *watchUpdate) } // distributor implements WatchDistributor interface type distributor struct { sync.Mutex // updates is the list of latest few updates updates []*watchUpdate // watchers watch for updates watchers []WatchUpdateQueue } // NewWatchDistributor returns a new instance of // the WatchDistrubtor interface func NewWatchDistributor() WatchDistributor { return &distributor{} } func (d *distributor) Add() WatchUpdateQueue { d.Lock() defer d.Unlock() q := NewWatchUpdateQueue() for _, u := range d.updates { q.Enqueue(u) } d.watchers = append(d.watchers, q) return q } func (d *distributor) Remove(r WatchUpdateQueue) { d.Lock() defer d.Unlock() for i, q := range d.watchers { if q == r { copy(d.watchers[i:], d.watchers[i+1:]) d.watchers[len(d.watchers)-1] = nil d.watchers = d.watchers[:len(d.watchers)-1] } } } func (d *distributor) NewUpdate(u *watchUpdate) { d.Lock() defer d.Unlock() // collect update d.updates = append(d.updates, u) if len(d.updates) > 100 { d.updates = d.updates[100:] } // send update to watchers for _, q := range d.watchers { q.Enqueue(u) } } // watchQueue implements WatchUpdateQueue interface for watchUpdates type watchQueue struct { // updates is the list of updates updates []*watchUpdate // m is the mutex to protect updates m *sync.Mutex // cv is used to coordinate the producer-consumer threads cv *sync.Cond } // NewWatchUpdateQueue returns an instance of WatchUpdateQueue func NewWatchUpdateQueue() WatchUpdateQueue { mtx := &sync.Mutex{} return &watchQueue{ m: mtx, cv: sync.NewCond(mtx), updates: make([]*watchUpdate, 0)} } func (w *watchQueue) Dequeue() *watchUpdate { w.m.Lock() for { if len(w.updates) > 0 { update := w.updates[0] w.updates = w.updates[1:] w.m.Unlock() return update } w.cv.Wait() } } // Enqueue enqueues and never blocks func (w *watchQueue) Enqueue(update *watchUpdate) { w.m.Lock() w.updates = append(w.updates, update) w.cv.Signal() w.m.Unlock() } type watchData struct { cb kvdb.WatchCB opaque interface{} waitIndex uint64 } // New constructs a new kvdb.Kvdb. func New( domain string, machines []string, options map[string]string, fatalErrorCb kvdb.FatalErrorCB, ) (kvdb.Kvdb, error) { if domain != "" && !strings.HasSuffix(domain, "/") { domain = domain + "/" } mem := &memKV{ BaseKvdb: common.BaseKvdb{FatalCb: fatalErrorCb}, m: make(map[string]*memKVPair), dist: NewWatchDistributor(), domain: domain, Controller: kvdb.ControllerNotSupported, locks: make(map[string]chan int), } var noByte bool if _, noByte = options[KvUseInterface]; noByte { mem.noByte = true } if _, ok := options[KvSnap]; ok && !noByte { return &snapMem{memKV: mem}, nil } else if ok && noByte { return nil, ErrSnapWithInterfaceNotSupported } return mem, nil } // Version returns the supported version of the mem implementation func Version(url string, kvdbOptions map[string]string) (string, error) { return kvdb.MemVersion1, nil } func (kv *memKV) String() string { return Name } func (kv *memKV) Capabilities() int { return kvdb.KVCapabilityOrderedUpdates } func (kv *memKV) get(key string) (*memKVPair, error) { key = kv.domain + key v, ok := kv.m[key] if !ok { return nil, kvdb.ErrNotFound } return v, nil } func (kv *memKV) exists(key string) (*memKVPair, error) { return kv.get(key) } func (kv *memKV) Get(key string) (*kvdb.KVPair, error) { kv.mutex.Lock() defer kv.mutex.Unlock() v, err := kv.get(key) if err != nil { return nil, err } return v.copy(), nil } func (kv *memKV) Snapshot(prefixes []string, consistent bool) (kvdb.Kvdb, uint64, error) { kv.mutex.Lock() defer kv.mutex.Unlock() _, err := kv.put(bootstrapKey, time.Now().UnixNano(), 0) if err != nil { return nil, 0, fmt.Errorf("Failed to create snap bootstrap key: %v", err) } data := make(map[string]*memKVPair) for key, value := range kv.m { if strings.Contains(key, "/_") { continue } found := false for _, prefix := range prefixes { prefix = kv.domain + prefix if strings.HasPrefix(key, prefix) { found = true break } } if !found { continue } snap := &memKVPair{} snap.KVPair = value.KVPair cpy := value.copy() snap.Value = make([]byte, len(cpy.Value)) copy(snap.Value, cpy.Value) data[key] = snap } highestKvPair, _ := kv.delete(bootstrapKey) // Snapshot only data, watches are not copied. return &snapMem{ &memKV{ m: data, domain: kv.domain, }, }, highestKvPair.ModifiedIndex, nil } func (kv *memKV) put( key string, value interface{}, ttl uint64, ) (*kvdb.KVPair, error) { var ( kvp *memKVPair b []byte err error ival interface{} ) suffix := key key = kv.domain + suffix index := atomic.AddUint64(&kv.index, 1) // Either set bytes or interface value if !kv.noByte { b, err = common.ToBytes(value) if err != nil { return nil, err } } else { ival = value } if old, ok := kv.m[key]; ok { old.Value = b old.ivalue = ival old.Action = kvdb.KVSet old.ModifiedIndex = index old.KVDBIndex = index kvp = old } else { kvp = &memKVPair{ KVPair: kvdb.KVPair{ Key: key, Value: b, TTL: int64(ttl), KVDBIndex: index, ModifiedIndex: index, CreatedIndex: index, Action: kvdb.KVCreate, }, ivalue: ival, } kv.m[key] = kvp } kv.normalize(&kvp.KVPair) kv.dist.NewUpdate(&watchUpdate{key, *kvp, nil}) if ttl != 0 { time.AfterFunc(time.Second*time.Duration(ttl), func() { // TODO: handle error kv.mutex.Lock() defer kv.mutex.Unlock() _, _ = kv.delete(suffix) }) } return kvp.copy(), nil } func (kv *memKV) Put( key string, value interface{}, ttl uint64, ) (*kvdb.KVPair, error) { kv.mutex.Lock() defer kv.mutex.Unlock() return kv.put(key, value, ttl) } func (kv *memKV) GetVal(key string, v interface{}) (*kvdb.KVPair, error) { kv.mutex.Lock() defer kv.mutex.Unlock() kvp, err := kv.get(key) if err != nil { return nil, err } cpy := kvp.copy() err = json.Unmarshal(cpy.Value, v) return cpy, err } func (kv *memKV) Create( key string, value interface{}, ttl uint64, ) (*kvdb.KVPair, error) { kv.mutex.Lock() defer kv.mutex.Unlock() result, err := kv.exists(key) if err != nil { return kv.put(key, value, ttl) } return &result.KVPair, kvdb.ErrExist } func (kv *memKV) Update( key string, value interface{}, ttl uint64, ) (*kvdb.KVPair, error) { kv.mutex.Lock() defer kv.mutex.Unlock() if _, err := kv.exists(key); err != nil { return nil, kvdb.ErrNotFound } return kv.put(key, value, ttl) } func (kv *memKV) Enumerate(prefix string) (kvdb.KVPairs, error) { kv.mutex.Lock() defer kv.mutex.Unlock() return kv.enumerate(prefix) } // enumerate returns a list of values and creates a copy if specified func (kv *memKV) enumerate(prefix string) (kvdb.KVPairs, error) { var kvp = make(kvdb.KVPairs, 0, 100) prefix = kv.domain + prefix for k, v := range kv.m { if strings.HasPrefix(k, prefix) && !strings.Contains(k, "/_") { kvpLocal := v.copy() kvpLocal.Key = k kv.normalize(kvpLocal) kvp = append(kvp, kvpLocal) } } return kvp, nil } func (kv *memKV) delete(key string) (*kvdb.KVPair, error) { kvp, err := kv.get(key) if err != nil { return nil, err } kvp.KVDBIndex = atomic.AddUint64(&kv.index, 1) kvp.ModifiedIndex = kvp.KVDBIndex kvp.Action = kvdb.KVDelete delete(kv.m, kv.domain+key) kv.dist.NewUpdate(&watchUpdate{kv.domain + key, *kvp, nil}) return &kvp.KVPair, nil } func (kv *memKV) Delete(key string) (*kvdb.KVPair, error) { kv.mutex.Lock() defer kv.mutex.Unlock() return kv.delete(key) } func (kv *memKV) DeleteTree(prefix string) error { kv.mutex.Lock() defer kv.mutex.Unlock() if len(prefix) > 0 && !strings.HasSuffix(prefix, kvdb.DefaultSeparator) { prefix += kvdb.DefaultSeparator } kvp, err := kv.enumerate(prefix) if err != nil { return err } for _, v := range kvp { // TODO: multiple errors if _, iErr := kv.delete(v.Key); iErr != nil { err = iErr } } return err } func (kv *memKV) Keys(prefix, sep string) ([]string, error) { if "" == sep { sep = "/" } prefix = kv.domain + prefix lenPrefix := len(prefix) lenSep := len(sep) if prefix[lenPrefix-lenSep:] != sep { prefix += sep lenPrefix += lenSep } seen := make(map[string]bool) kv.mutex.Lock() defer kv.mutex.Unlock() for k := range kv.m { if strings.HasPrefix(k, prefix) && !strings.Contains(k, "/_") { key := k[lenPrefix:] if idx := strings.Index(key, sep); idx > 0 { key = key[:idx] } seen[key] = true } } retList := make([]string, len(seen)) i := 0 for k := range seen { retList[i] = k i++ } return retList, nil } func (kv *memKV) CompareAndSet( kvp *kvdb.KVPair, flags kvdb.KVFlags, prevValue []byte, ) (*kvdb.KVPair, error) { kv.mutex.Lock() defer kv.mutex.Unlock() result, err := kv.exists(kvp.Key) if err != nil { return nil, err } if prevValue != nil { cpy := result.copy() if !bytes.Equal(cpy.Value, prevValue) { return nil, kvdb.ErrValueMismatch } } if flags == kvdb.KVModifiedIndex { if kvp.ModifiedIndex != result.ModifiedIndex { return nil, kvdb.ErrValueMismatch } } return kv.put(kvp.Key, kvp.Value, 0) } func (kv *memKV) CompareAndDelete( kvp *kvdb.KVPair, flags kvdb.KVFlags, ) (*kvdb.KVPair, error) { kv.mutex.Lock() defer kv.mutex.Unlock() result, err := kv.exists(kvp.Key) if err != nil { return nil, err } if flags&kvdb.KVModifiedIndex > 0 && result.ModifiedIndex != kvp.ModifiedIndex { return nil, kvdb.ErrModified } else { cpy := result.copy() if !bytes.Equal(cpy.Value, kvp.Value) { return nil, kvdb.ErrNotFound } } return kv.delete(kvp.Key) } func (kv *memKV) WatchKey( key string, waitIndex uint64, opaque interface{}, cb kvdb.WatchCB, ) error { kv.mutex.Lock() defer kv.mutex.Unlock() key = kv.domain + key go kv.watchCb(kv.dist.Add(), key, &watchData{cb: cb, waitIndex: waitIndex, opaque: opaque}, false) return nil } func (kv *memKV) WatchTree( prefix string, waitIndex uint64, opaque interface{}, cb kvdb.WatchCB, ) error { kv.mutex.Lock() defer kv.mutex.Unlock() prefix = kv.domain + prefix go kv.watchCb(kv.dist.Add(), prefix, &watchData{cb: cb, waitIndex: waitIndex, opaque: opaque}, true) return nil } func (kv *memKV) Lock(key string) (*kvdb.KVPair, error) { return kv.LockWithID(key, "locked") } func (kv *memKV) LockWithID( key string, lockerID string, ) (*kvdb.KVPair, error) { return kv.LockWithTimeout(key, lockerID, kvdb.DefaultLockTryDuration, kv.GetLockTimeout()) } func (kv *memKV) LockWithTimeout( key string, lockerID string, lockTryDuration time.Duration, lockHoldDuration time.Duration, ) (*kvdb.KVPair, error) { key = kv.domain + key duration := time.Second result, err := kv.Create(key, lockerID, uint64(duration*3)) startTime := time.Now() for count := 0; err != nil; count++ { time.Sleep(duration) result, err = kv.Create(key, lockerID, uint64(duration*3)) if err != nil && count > 0 && count%15 == 0 { var currLockerID string if _, errGet := kv.GetVal(key, currLockerID); errGet == nil { logrus.Infof("Lock %v locked for %v seconds, tag: %v", key, count, currLockerID) } } if err != nil && time.Since(startTime) > lockTryDuration { return nil, err } } if err != nil { return nil, err } lockChan := make(chan int) kv.mutex.Lock() kv.locks[key] = lockChan kv.mutex.Unlock() if lockHoldDuration > 0 { go func() { timeout := time.After(lockHoldDuration) for { select { case <-timeout: kv.LockTimedout(key) case <-lockChan: return } } }() } return result, err } func (kv *memKV) Unlock(kvp *kvdb.KVPair) error { kv.mutex.Lock() lockChan, ok := kv.locks[kvp.Key] if ok { delete(kv.locks, kvp.Key) } kv.mutex.Unlock() if lockChan != nil { close(lockChan) } _, err := kv.CompareAndDelete(kvp, kvdb.KVFlags(0)) return err } func (kv *memKV) EnumerateWithSelect( prefix string, enumerateSelect kvdb.EnumerateSelect, copySelect kvdb.CopySelect, ) ([]interface{}, error) { if enumerateSelect == nil || copySelect == nil { return nil, ErrIllegalSelect } kv.mutex.Lock() defer kv.mutex.Unlock() var kvi []interface{} prefix = kv.domain + prefix for k, v := range kv.m { if strings.HasPrefix(k, prefix) && !strings.Contains(k, "/_") { if enumerateSelect(v.ivalue) { cpy := copySelect(v.ivalue) if cpy == nil { return nil, ErrIllegalSelect } kvi = append(kvi, cpy) } } } return kvi, nil } func (kv *memKV) EnumerateKVPWithSelect( prefix string, enumerateSelect kvdb.EnumerateKVPSelect, copySelect kvdb.CopyKVPSelect, ) (kvdb.KVPairs, error) { if enumerateSelect == nil || copySelect == nil { return nil, ErrIllegalSelect } kv.mutex.Lock() defer kv.mutex.Unlock() var kvi kvdb.KVPairs prefix = kv.domain + prefix for k, v := range kv.m { if strings.HasPrefix(k, prefix) && !strings.Contains(k, "/_") { if enumerateSelect(&v.KVPair, v.ivalue) { cpy := copySelect(&v.KVPair, v.ivalue) if cpy == nil { return nil, ErrIllegalSelect } kvi = append(kvi, cpy) } } } return kvi, nil } func (kv *memKV) GetWithCopy( key string, copySelect kvdb.CopySelect, ) (interface{}, error) { if copySelect == nil { return nil, ErrIllegalSelect } kv.mutex.Lock() defer kv.mutex.Unlock() kvp, err := kv.get(key) if err != nil { return nil, err } return copySelect(kvp.ivalue), nil } func (kv *memKV) TxNew() (kvdb.Tx, error) { return nil, kvdb.ErrNotSupported } func (kv *memKV) normalize(kvp *kvdb.KVPair) { kvp.Key = strings.TrimPrefix(kvp.Key, kv.domain) } func copyWatchKeys(w map[string]*watchData) []string { keys := make([]string, len(w)) i := 0 for key := range w { keys[i] = key i++ } return keys } func (kv *memKV) watchCb( q WatchUpdateQueue, prefix string, v *watchData, treeWatch bool, ) { for { update := q.Dequeue() if ((treeWatch && strings.HasPrefix(update.key, prefix)) || (!treeWatch && update.key == prefix)) && (v.waitIndex == 0 || v.waitIndex < update.kvp.ModifiedIndex) { kvpCopy := update.kvp.copy() err := v.cb(update.key, v.opaque, kvpCopy, update.err) if err != nil { _ = v.cb("", v.opaque, nil, kvdb.ErrWatchStopped) kv.dist.Remove(q) return } } } } func (kv *memKV) SnapPut(snapKvp *kvdb.KVPair) (*kvdb.KVPair, error) { return nil, kvdb.ErrNotSupported } func (kv *snapMem) SnapPut(snapKvp *kvdb.KVPair) (*kvdb.KVPair, error) { var kvp *memKVPair key := kv.domain + snapKvp.Key kv.mutex.Lock() defer kv.mutex.Unlock() if old, ok := kv.m[key]; ok { old.Value = snapKvp.Value old.Action = kvdb.KVSet old.ModifiedIndex = snapKvp.ModifiedIndex old.KVDBIndex = snapKvp.KVDBIndex kvp = old } else { kvp = &memKVPair{ KVPair: kvdb.KVPair{ Key: key, Value: snapKvp.Value, TTL: 0, KVDBIndex: snapKvp.KVDBIndex, ModifiedIndex: snapKvp.ModifiedIndex, CreatedIndex: snapKvp.CreatedIndex, Action: kvdb.KVCreate, }, } kv.m[key] = kvp } kv.normalize(&kvp.KVPair) return &kvp.KVPair, nil } func (kv *snapMem) Put( key string, value interface{}, ttl uint64, ) (*kvdb.KVPair, error) { return nil, ErrSnap } func (kv *snapMem) Create( key string, value interface{}, ttl uint64, ) (*kvdb.KVPair, error) { return nil, ErrSnap } func (kv *snapMem) Update( key string, value interface{}, ttl uint64, ) (*kvdb.KVPair, error) { return nil, ErrSnap } func (kv *snapMem) Delete(snapKey string) (*kvdb.KVPair, error) { key := kv.domain + snapKey kv.mutex.Lock() defer kv.mutex.Unlock() kvp, ok := kv.m[key] if !ok { return nil, kvdb.ErrNotFound } kvPair := kvp.KVPair delete(kv.m, key) return &kvPair, nil } func (kv *snapMem) DeleteTree(prefix string) error { return ErrSnap } func (kv *snapMem) CompareAndSet( kvp *kvdb.KVPair, flags kvdb.KVFlags, prevValue []byte, ) (*kvdb.KVPair, error) { return nil, ErrSnap } func (kv *snapMem) CompareAndDelete( kvp *kvdb.KVPair, flags kvdb.KVFlags, ) (*kvdb.KVPair, error) { return nil, ErrSnap } func (kv *snapMem) WatchKey( key string, waitIndex uint64, opaque interface{}, watchCB kvdb.WatchCB, ) error { return ErrSnap } func (kv *snapMem) WatchTree( prefix string, waitIndex uint64, opaque interface{}, watchCB kvdb.WatchCB, ) error { return ErrSnap } func (kv *memKV) AddUser(username string, password string) error { return kvdb.ErrNotSupported } func (kv *memKV) RemoveUser(username string) error { return kvdb.ErrNotSupported } func (kv *memKV) GrantUserAccess( username string, permType kvdb.PermissionType, subtree string, ) error { return kvdb.ErrNotSupported } func (kv *memKV) RevokeUsersAccess( username string, permType kvdb.PermissionType, subtree string, ) error { return kvdb.ErrNotSupported } func (kv *memKV) Serialize() ([]byte, error) { kvps, err := kv.Enumerate("") if err != nil { return nil, err } return kv.SerializeAll(kvps) } func (kv *memKV) Deserialize(b []byte) (kvdb.KVPairs, error) { return kv.DeserializeAll(b) }
libopenstorage/stork
vendor/github.com/portworx/kvdb/mem/kv_mem.go
GO
apache-2.0
20,068
/* * Copyright 2000-2017 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.util.ref; import com.intellij.util.containers.ContainerUtil; import org.jetbrains.annotations.TestOnly; import java.beans.Introspector; import java.lang.ref.ReferenceQueue; import java.lang.ref.SoftReference; import java.lang.ref.WeakReference; import java.util.ArrayList; public class GCUtil { /** * Try to force VM to collect all the garbage along with soft- and weak-references. * Method doesn't guarantee to succeed, and should not be used in the production code. */ @TestOnly public static void tryForceGC() { tryGcSoftlyReachableObjects(); WeakReference<Object> weakReference = new WeakReference<Object>(new Object()); do { System.gc(); } while (weakReference.get() != null); } /** * Try to force VM to collect soft references if possible. * Method doesn't guarantee to succeed, and should not be used in the production code. * Commits / hours optimized method code: 5 / 3 */ @TestOnly public static void tryGcSoftlyReachableObjects() { //long started = System.nanoTime(); ReferenceQueue<Object> q = new ReferenceQueue<Object>(); SoftReference<Object> ref = new SoftReference<Object>(new Object(), q); ArrayList<SoftReference<?>> list = ContainerUtil.newArrayListWithCapacity(100 + useReference(ref)); System.gc(); final long freeMemory = Runtime.getRuntime().freeMemory(); int i = 0; while (q.poll() == null) { // full gc is caused by allocation of large enough array below, SoftReference will be cleared after two full gc int bytes = Math.min((int)(freeMemory * 0.05), Integer.MAX_VALUE / 2); list.add(new SoftReference<Object>(new byte[bytes])); i++; if (i > 1000) { //noinspection UseOfSystemOutOrSystemErr System.out.println("GCUtil.tryGcSoftlyReachableObjects: giving up"); break; } } // use ref is important as to loop to finish with several iterations: long runs of the method (~80 run of PsiModificationTrackerTest) // discovered 'ref' being collected and loop iterated 100 times taking a lot of time list.ensureCapacity(list.size() + useReference(ref)); // do not leave a chance for our created SoftReference's content to lie around until next full GC's for(SoftReference createdReference:list) createdReference.clear(); //System.out.println("Done gc'ing refs:" + ((System.nanoTime() - started) / 1000000)); } private static int useReference(SoftReference<Object> ref) { Object o = ref.get(); return o == null ? 0 : Math.abs(o.hashCode()) % 10; } /** * Using java beans (e.g. Groovy does it) results in all referenced class infos being cached in ThreadGroupContext. A valid fix * would be to hold BeanInfo objects on soft references, but that should be done in JDK. So let's clear this cache manually for now, * in clients that are known to create bean infos. */ public static void clearBeanInfoCache() { Introspector.flushCaches(); } }
goodwinnk/intellij-community
platform/util/src/com/intellij/util/ref/GCUtil.java
Java
apache-2.0
3,604
/** * * Copyright 2003-2007 Jive Software. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jivesoftware.smackx.bookmarks; import java.io.IOException; import java.util.ArrayList; import java.util.List; import org.jivesoftware.smack.util.ParserUtils; import org.jivesoftware.smack.util.XmlStringBuilder; import org.jivesoftware.smack.xml.XmlPullParser; import org.jivesoftware.smack.xml.XmlPullParserException; import org.jivesoftware.smackx.iqprivate.packet.PrivateData; import org.jivesoftware.smackx.iqprivate.provider.PrivateDataProvider; import org.jxmpp.jid.EntityBareJid; import org.jxmpp.jid.parts.Resourcepart; /** * Bookmarks is used for storing and retrieving URLS and Conference rooms. * Bookmark Storage (XEP-0048) defined a protocol for the storage of bookmarks to conference rooms and other entities * in a Jabber user's account. * See the following code sample for saving Bookmarks: * <pre> * XMPPConnection con = new XMPPTCPConnection("jabber.org"); * con.login("john", "doe"); * Bookmarks bookmarks = new Bookmarks(); * // Bookmark a URL * BookmarkedURL url = new BookmarkedURL(); * url.setName("Google"); * url.setURL("http://www.jivesoftware.com"); * bookmarks.addURL(url); * // Bookmark a Conference room. * BookmarkedConference conference = new BookmarkedConference(); * conference.setName("My Favorite Room"); * conference.setAutoJoin("true"); * conference.setJID("dev@conference.jivesoftware.com"); * bookmarks.addConference(conference); * // Save Bookmarks using PrivateDataManager. * PrivateDataManager manager = new PrivateDataManager(con); * manager.setPrivateData(bookmarks); * LastActivity activity = LastActivity.getLastActivity(con, "xray@jabber.org"); * </pre> * * @author Derek DeMoro */ public class Bookmarks implements PrivateData { public static final String NAMESPACE = "storage:bookmarks"; public static final String ELEMENT = "storage"; private final List<BookmarkedURL> bookmarkedURLS; private final List<BookmarkedConference> bookmarkedConferences; /** * Required Empty Constructor to use Bookmarks. */ public Bookmarks() { bookmarkedURLS = new ArrayList<>(); bookmarkedConferences = new ArrayList<>(); } /** * Adds a BookmarkedURL. * * @param bookmarkedURL the bookmarked bookmarkedURL. */ public void addBookmarkedURL(BookmarkedURL bookmarkedURL) { bookmarkedURLS.add(bookmarkedURL); } /** * Removes a bookmarked bookmarkedURL. * * @param bookmarkedURL the bookmarked bookmarkedURL to remove. */ public void removeBookmarkedURL(BookmarkedURL bookmarkedURL) { bookmarkedURLS.remove(bookmarkedURL); } /** * Removes all BookmarkedURLs from user's bookmarks. */ public void clearBookmarkedURLS() { bookmarkedURLS.clear(); } /** * Add a BookmarkedConference to bookmarks. * * @param bookmarkedConference the conference to remove. */ public void addBookmarkedConference(BookmarkedConference bookmarkedConference) { bookmarkedConferences.add(bookmarkedConference); } /** * Removes a BookmarkedConference. * * @param bookmarkedConference the BookmarkedConference to remove. */ public void removeBookmarkedConference(BookmarkedConference bookmarkedConference) { bookmarkedConferences.remove(bookmarkedConference); } /** * Removes all BookmarkedConferences from Bookmarks. */ public void clearBookmarkedConferences() { bookmarkedConferences.clear(); } /** * Returns a Collection of all Bookmarked URLs for this user. * * @return a collection of all Bookmarked URLs. */ public List<BookmarkedURL> getBookmarkedURLS() { return bookmarkedURLS; } /** * Returns a Collection of all Bookmarked Conference for this user. * * @return a collection of all Bookmarked Conferences. */ public List<BookmarkedConference> getBookmarkedConferences() { return bookmarkedConferences; } /** * Returns the root element name. * * @return the element name. */ @Override public String getElementName() { return ELEMENT; } /** * Returns the root element XML namespace. * * @return the namespace. */ @Override public String getNamespace() { return NAMESPACE; } /** * Returns the XML representation of the PrivateData. * * @return the private data as XML. */ @Override public XmlStringBuilder toXML() { XmlStringBuilder buf = new XmlStringBuilder(); buf.halfOpenElement(ELEMENT).xmlnsAttribute(NAMESPACE).rightAngleBracket(); for (BookmarkedURL urlStorage : getBookmarkedURLS()) { if (urlStorage.isShared()) { continue; } buf.halfOpenElement("url").attribute("name", urlStorage.getName()).attribute("url", urlStorage.getURL()); buf.condAttribute(urlStorage.isRss(), "rss", "true"); buf.closeEmptyElement(); } // Add Conference additions for (BookmarkedConference conference : getBookmarkedConferences()) { if (conference.isShared()) { continue; } buf.halfOpenElement("conference"); buf.attribute("name", conference.getName()); buf.attribute("autojoin", Boolean.toString(conference.isAutoJoin())); buf.attribute("jid", conference.getJid()); buf.rightAngleBracket(); buf.optElement("nick", conference.getNickname()); buf.optElement("password", conference.getPassword()); buf.closeElement("conference"); } buf.closeElement(ELEMENT); return buf; } /** * The IQ Provider for BookmarkStorage. * * @author Derek DeMoro */ public static class Provider implements PrivateDataProvider { /** * Empty Constructor for PrivateDataProvider. */ public Provider() { super(); } @Override public PrivateData parsePrivateData(XmlPullParser parser) throws XmlPullParserException, IOException { Bookmarks storage = new Bookmarks(); boolean done = false; while (!done) { XmlPullParser.Event eventType = parser.next(); if (eventType == XmlPullParser.Event.START_ELEMENT && "url".equals(parser.getName())) { final BookmarkedURL urlStorage = getURLStorage(parser); if (urlStorage != null) { storage.addBookmarkedURL(urlStorage); } } else if (eventType == XmlPullParser.Event.START_ELEMENT && "conference".equals(parser.getName())) { final BookmarkedConference conference = getConferenceStorage(parser); storage.addBookmarkedConference(conference); } else if (eventType == XmlPullParser.Event.END_ELEMENT && "storage".equals(parser.getName())) { done = true; } } return storage; } } private static BookmarkedURL getURLStorage(XmlPullParser parser) throws IOException, XmlPullParserException { String name = parser.getAttributeValue("", "name"); String url = parser.getAttributeValue("", "url"); String rssString = parser.getAttributeValue("", "rss"); boolean rss = rssString != null && "true".equals(rssString); BookmarkedURL urlStore = new BookmarkedURL(url, name, rss); boolean done = false; while (!done) { XmlPullParser.Event eventType = parser.next(); if (eventType == XmlPullParser.Event.START_ELEMENT && "shared_bookmark".equals(parser.getName())) { urlStore.setShared(true); } else if (eventType == XmlPullParser.Event.END_ELEMENT && "url".equals(parser.getName())) { done = true; } } return urlStore; } private static BookmarkedConference getConferenceStorage(XmlPullParser parser) throws XmlPullParserException, IOException { String name = parser.getAttributeValue("", "name"); boolean autojoin = ParserUtils.getBooleanAttribute(parser, "autojoin", false); EntityBareJid jid = ParserUtils.getBareJidAttribute(parser); BookmarkedConference conf = new BookmarkedConference(jid); conf.setName(name); conf.setAutoJoin(autojoin); // Check for nickname boolean done = false; while (!done) { XmlPullParser.Event eventType = parser.next(); if (eventType == XmlPullParser.Event.START_ELEMENT && "nick".equals(parser.getName())) { String nickString = parser.nextText(); conf.setNickname(Resourcepart.from(nickString)); } else if (eventType == XmlPullParser.Event.START_ELEMENT && "password".equals(parser.getName())) { conf.setPassword(parser.nextText()); } else if (eventType == XmlPullParser.Event.START_ELEMENT && "shared_bookmark".equals(parser.getName())) { conf.setShared(true); } else if (eventType == XmlPullParser.Event.END_ELEMENT && "conference".equals(parser.getName())) { done = true; } } return conf; } }
igniterealtime/Smack
smack-extensions/src/main/java/org/jivesoftware/smackx/bookmarks/Bookmarks.java
Java
apache-2.0
10,216
/* * Copyright (C) FuseSource, Inc. * http://fusesource.com * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @XmlSchema(namespace = "http://fuse.fusesource.org/schema/bai", xmlns = { @XmlNs(namespaceURI = Namespaces.DEFAULT_NAMESPACE, prefix = "c"), @XmlNs(namespaceURI = AuditConstants.AUDIT_NAMESPACE, prefix = AuditConstants.EXPRESSION_NAMESPACE_PREFIX) }, elementFormDefault = javax.xml.bind.annotation.XmlNsForm.QUALIFIED) package org.fusesource.bai.config; import org.apache.camel.builder.xml.Namespaces; import org.fusesource.bai.AuditConstants; import javax.xml.bind.annotation.XmlNs; import javax.xml.bind.annotation.XmlSchema;
janstey/fuse
bai/bai-core/src/main/java/org/fusesource/bai/config/package-info.java
Java
apache-2.0
1,208
// // This file was pubmed.openAccess.jaxb.generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, vJAXB 2.1.10 in JDK 6 // See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a> // Any modifications to this file will be lost upon recompilation of the source schema. // Generated on: 2011.06.04 at 07:58:30 PM BST // package elsevier.jaxb.math.mathml; import java.math.BigInteger; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlSchemaType; import javax.xml.bind.annotation.XmlType; /** * <p>Java class for mglyph.type complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType name="mglyph.type"> * &lt;complexContent> * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"> * &lt;attGroup ref="{http://www.w3.org/1998/Math/MathML}mglyph.attlist"/> * &lt;/restriction> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "mglyph.type") @XmlRootElement(name = "mglyph") public class Mglyph { @XmlAttribute protected String alt; @XmlAttribute protected String fontfamily; @XmlAttribute @XmlSchemaType(name = "positiveInteger") protected BigInteger index; /** * Gets the value of the alt property. * * @return * possible object is * {@link String } * */ public String getAlt() { return alt; } /** * Sets the value of the alt property. * * @param value * allowed object is * {@link String } * */ public void setAlt(String value) { this.alt = value; } /** * Gets the value of the fontfamily property. * * @return * possible object is * {@link String } * */ public String getFontfamily() { return fontfamily; } /** * Sets the value of the fontfamily property. * * @param value * allowed object is * {@link String } * */ public void setFontfamily(String value) { this.fontfamily = value; } /** * Gets the value of the index property. * * @return * possible object is * {@link BigInteger } * */ public BigInteger getIndex() { return index; } /** * Sets the value of the index property. * * @param value * allowed object is * {@link BigInteger } * */ public void setIndex(BigInteger value) { this.index = value; } }
alexgarciac/biotea
src/elsevier/jaxb/math/mathml/Mglyph.java
Java
apache-2.0
2,890
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.util.indexing; import com.intellij.openapi.application.ReadAction; import com.intellij.openapi.diagnostic.Logger; import com.intellij.openapi.fileTypes.FileType; import com.intellij.openapi.fileTypes.ex.FileTypeManagerEx; import com.intellij.openapi.project.Project; import com.intellij.openapi.vfs.VirtualFile; import com.intellij.openapi.vfs.VirtualFileWithId; import com.intellij.openapi.vfs.newvfs.impl.VirtualFileSystemEntry; import com.intellij.psi.search.FileTypeIndex; import com.intellij.util.containers.ContainerUtil; import com.intellij.util.indexing.projectFilter.ProjectIndexableFilesFilterHolder; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import java.io.IOException; import java.util.Collection; import java.util.List; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; final class UnindexedFilesFinder { private static final Logger LOG = Logger.getInstance(UnindexedFilesFinder.class); private final Project myProject; private final boolean myDoTraceForFilesToBeIndexed = FileBasedIndexImpl.LOG.isTraceEnabled(); private final FileBasedIndexImpl myFileBasedIndex; private final UpdatableIndex<FileType, Void, FileContent> myFileTypeIndex; private final Collection<FileBasedIndexInfrastructureExtension.FileIndexingStatusProcessor> myStateProcessors; private final @NotNull ProjectIndexableFilesFilterHolder myIndexableFilesFilterHolder; private final boolean myShouldProcessUpToDateFiles; UnindexedFilesFinder(@NotNull Project project, @NotNull FileBasedIndexImpl fileBasedIndex) { myProject = project; myFileBasedIndex = fileBasedIndex; myFileTypeIndex = fileBasedIndex.getIndex(FileTypeIndex.NAME); myStateProcessors = FileBasedIndexInfrastructureExtension .EP_NAME .extensions() .map(ex -> ex.createFileIndexingStatusProcessor(project)) .filter(Objects::nonNull) .collect(Collectors.toList()); myShouldProcessUpToDateFiles = ContainerUtil.find(myStateProcessors, p -> p.shouldProcessUpToDateFiles()) != null; myIndexableFilesFilterHolder = fileBasedIndex.getIndexableFilesFilterHolder(); } @Nullable("null if the file is not subject for indexing (a directory, invalid, etc.)") public UnindexedFileStatus getFileStatus(@NotNull VirtualFile file) { return ReadAction.compute(() -> { if (myProject.isDisposed() || !file.isValid() || !(file instanceof VirtualFileWithId)) { return null; } AtomicBoolean indexesWereProvidedByInfrastructureExtension = new AtomicBoolean(); AtomicLong timeProcessingUpToDateFiles = new AtomicLong(); AtomicLong timeUpdatingContentLessIndexes = new AtomicLong(); AtomicLong timeIndexingWithoutContent = new AtomicLong(); IndexedFileImpl indexedFile = new IndexedFileImpl(file, myProject); int inputId = FileBasedIndex.getFileId(file); boolean fileWereJustAdded = myIndexableFilesFilterHolder.addFileId(inputId, myProject); if (file instanceof VirtualFileSystemEntry && ((VirtualFileSystemEntry)file).isFileIndexed()) { boolean wasInvalidated = false; if (fileWereJustAdded) { List<ID<?, ?>> ids = IndexingStamp.getNontrivialFileIndexedStates(inputId); for (FileBasedIndexInfrastructureExtension.FileIndexingStatusProcessor processor : myStateProcessors) { for (ID<?, ?> id : ids) { if (myFileBasedIndex.needsFileContentLoading(id)) { long nowTime = System.nanoTime(); try { if (!processor.processUpToDateFile(indexedFile, inputId, id)) { wasInvalidated = true; } } finally { timeProcessingUpToDateFiles.addAndGet(System.nanoTime() - nowTime); } } } } } if (!wasInvalidated) { IndexingStamp.flushCache(inputId); return new UnindexedFileStatus(false, false, timeProcessingUpToDateFiles.get(), timeUpdatingContentLessIndexes.get(), timeIndexingWithoutContent.get()); } } AtomicBoolean shouldIndex = new AtomicBoolean(); FileTypeManagerEx.getInstanceEx().freezeFileTypeTemporarilyIn(file, () -> { boolean isDirectory = file.isDirectory(); FileIndexingState fileTypeIndexState = null; if (!isDirectory && !myFileBasedIndex.isTooLarge(file)) { if ((fileTypeIndexState = myFileTypeIndex.getIndexingStateForFile(inputId, indexedFile)) == FileIndexingState.OUT_DATED) { myFileBasedIndex.dropNontrivialIndexedStates(inputId); shouldIndex.set(true); } else { final List<ID<?, ?>> affectedIndexCandidates = myFileBasedIndex.getAffectedIndexCandidates(indexedFile); //noinspection ForLoopReplaceableByForEach for (int i = 0, size = affectedIndexCandidates.size(); i < size; ++i) { final ID<?, ?> indexId = affectedIndexCandidates.get(i); try { if (myFileBasedIndex.needsFileContentLoading(indexId)) { FileIndexingState fileIndexingState = myFileBasedIndex.shouldIndexFile(indexedFile, indexId); boolean indexInfrastructureExtensionInvalidated = false; if (fileIndexingState == FileIndexingState.UP_TO_DATE) { if (myShouldProcessUpToDateFiles) { for (FileBasedIndexInfrastructureExtension.FileIndexingStatusProcessor p : myStateProcessors) { long nowTime = System.nanoTime(); try { if (!p.processUpToDateFile(indexedFile, inputId, indexId)) { indexInfrastructureExtensionInvalidated = true; } } finally { timeProcessingUpToDateFiles.addAndGet(System.nanoTime() - nowTime); } } } } if (indexInfrastructureExtensionInvalidated) { fileIndexingState = myFileBasedIndex.shouldIndexFile(indexedFile, indexId); } if (fileIndexingState.updateRequired()) { if (myDoTraceForFilesToBeIndexed) { LOG.trace("Scheduling indexing of " + file + " by request of index " + indexId); } long nowTime = System.nanoTime(); boolean wasIndexedByInfrastructure; try { wasIndexedByInfrastructure = tryIndexWithoutContentViaInfrastructureExtension(indexedFile, inputId, indexId); } finally { timeIndexingWithoutContent.addAndGet(System.nanoTime() - nowTime); } if (wasIndexedByInfrastructure) { indexesWereProvidedByInfrastructureExtension.set(true); } else { shouldIndex.set(true); // NOTE! Do not break the loop here. We must process ALL IDs and pass them to the FileIndexingStatusProcessor // so that it can invalidate all "indexing states" (by means of clearing IndexingStamp) // for all indexes that became invalid. See IDEA-252846 for more details. } } } } catch (RuntimeException e) { final Throwable cause = e.getCause(); if (cause instanceof IOException || cause instanceof StorageException) { LOG.info(e); myFileBasedIndex.requestRebuild(indexId); } else { throw e; } } } } } long nowTime = System.nanoTime(); try { for (ID<?, ?> indexId : myFileBasedIndex.getContentLessIndexes(isDirectory)) { if (FileTypeIndex.NAME.equals(indexId) && fileTypeIndexState != null && !fileTypeIndexState.updateRequired()) { continue; } if (myFileBasedIndex.shouldIndexFile(indexedFile, indexId).updateRequired()) { myFileBasedIndex.updateSingleIndex(indexId, file, inputId, new IndexedFileWrapper(indexedFile)); } } } finally { timeUpdatingContentLessIndexes.addAndGet(System.nanoTime() - nowTime); } IndexingStamp.flushCache(inputId); if (!shouldIndex.get()) { IndexingFlag.setFileIndexed(file); } }); return new UnindexedFileStatus(shouldIndex.get(), indexesWereProvidedByInfrastructureExtension.get(), timeProcessingUpToDateFiles.get(), timeUpdatingContentLessIndexes.get(), timeIndexingWithoutContent.get()); }); } private boolean tryIndexWithoutContentViaInfrastructureExtension(IndexedFile fileContent, int inputId, ID<?, ?> indexId) { for (FileBasedIndexInfrastructureExtension.FileIndexingStatusProcessor processor : myStateProcessors) { if (processor.tryIndexFileWithoutContent(fileContent, inputId, indexId)) { FileBasedIndexImpl.setIndexedState(myFileBasedIndex.getIndex(indexId), fileContent, inputId, true); return true; } } return false; } }
ingokegel/intellij-community
platform/lang-impl/src/com/intellij/util/indexing/UnindexedFilesFinder.java
Java
apache-2.0
10,024
'use strict'; let angular = require('angular'); module.exports = angular.module('spinnaker.serverGroup.configure.gce.instanceArchetypeCtrl', []) .controller('gceInstanceArchetypeCtrl', function($scope, instanceTypeService, modalWizardService) { var wizard = modalWizardService.getWizard(); $scope.$watch('command.viewState.instanceProfile', function() { if (!$scope.command.viewState.instanceProfile || $scope.command.viewState.instanceProfile === 'custom') { wizard.excludePage('instance-type'); } else { wizard.includePage('instance-type'); wizard.markClean('instance-profile'); wizard.markComplete('instance-profile'); } }); $scope.$watch('command.viewState.instanceType', function(newVal) { if (newVal) { wizard.markClean('instance-profile'); wizard.markComplete('instance-profile'); } }); }).name;
zanthrash/deck-1
app/scripts/modules/google/serverGroup/configure/wizard/ServerGroupInstanceArchetype.controller.js
JavaScript
apache-2.0
911
<?php /** * Utility functions for the DrEdit PHP application. * * @author Burcu Dogan <jbd@google.com> * * Copyright 2013 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Returns the current user in the session or NULL. */ function get_user() { if (isset($_SESSION["user"])) { return json_decode($_SESSION["user"]); } return NULL; } /** * Sets the current user. */ function set_user($tokens) { $_SESSION["user"] = json_encode(array( 'tokens' => $tokens )); } /** * Deletes the user in the session. */ function delete_user() { $_SESSION["user"] = NULL; } /** * Checks whether or not there is an authenticated * user in the session. If not, responds with error message. */ function checkUserAuthentication($app) { $user = get_user(); if (!$user) { $app->renderErrJson($app, 401, 'User is not authenticated.'); } } /** * Checks whether or not all given params are represented in the * request's query parameters. If not, responds with error message. */ function checkRequiredQueryParams($app, $params = array()) { foreach ($params as &$param) { if (!$app->request()->get($param)) { renderErrJson($app, 400, 'Required parameter missing.'); } } }; /** * Renders the given object as JSON. */ function renderJson($app, $obj) { echo json_encode($obj); } /** * Renders the given message as JSON and responds with the * given HTTP status code. */ function renderErrJson($app, $statusCode, $message) { echo json_encode(array( message => $message )); $app->halt($statusCode); } /** * Renders the given Exception object as JSON. */ function renderEx($app, $ex) { echo json_encode($ex); }
murat8505/dredit_text_editor_Google-Drive
php/utils.php
PHP
apache-2.0
2,194
(function () { "use strict"; var fs = require("fs"), fse = require("fs-extra"), path = require("path"), readline = require("readline"); var _domainManager; function init(domainManager) { _domainManager = domainManager; if (!_domainManager.hasDomain("importNode")) { _domainManager.registerDomain("importNode", {major: 0, minor: 1}); } // Get shared project from share function getSharedProject(callback) { var sharedPath = path.join(process.cwd(), "share"); fs.readdir(sharedPath, function(error, files) { callback(null, files); }); } function getSharedFile(projectName, callback) { var fileName; var sharedPath = path.join(process.cwd(), "share", projectName); // Get target name from makefile var makePath = path.join(sharedPath, "makefile"); if (fs.existsSync(makePath)) { var lineReader = readline.createInterface({ input: fs.createReadStream(makePath) }); lineReader.on("line", function(line) { if (line.startsWith("TARGET")) { var file = line.split("=")[1].trim(); fileName = file.split(".")[0]; } }); lineReader.on("close", function() { // FIXME: We just checked wasm and js whether it was exsited // or not. We need a way to find correct result file. var wasmPath = path.join(sharedPath, fileName + ".wasm"); var loaderPath = path.join(sharedPath, fileName + ".js"); var fileList = []; if (fs.existsSync(wasmPath) && fs.existsSync(loaderPath)) { fileList.push(fileName + ".wasm"); fileList.push(fileName + ".js"); callback(null, fileList); } else { callback("Not found wasm"); } }); } else { callback("Not found makefile"); } } function copySharedFile(projectName, fileList, targetId, callback) { var sharedPath = path.join(process.cwd(), "share", projectName); var destPath = path.join(process.cwd(), "projects", targetId); // Copy files to the target project fileList.forEach(function(file) { var sourcePath = path.join(sharedPath, file); if (fs.existsSync(sourcePath)) { var destFilePath = path.join(destPath, file); try { fse.copySync(sourcePath, destFilePath); } catch (error) { return callback("Fail to copy files"); } } }); callback(null); } function copyFile(projectId, src, name, dest, callback) { const sourcePath = path.join(process.cwd(), 'projects', projectId, src, name); const destPath = path.join(process.cwd(), 'projects', projectId, dest, name); fse.copy(sourcePath, destPath, (err) => { if (err) { return callback(err); } callback(); }); } function moveFile(projectId, src, name, dest, callback) { const sourcePath = path.join(process.cwd(), 'projects', projectId, src, name); const destPath = path.join(process.cwd(), 'projects', projectId, dest, name); fse.move(sourcePath, destPath, (err) => { if (err) { return callback(err); } callback(); }); } _domainManager.registerCommand( "importNode", "getSharedProject", getSharedProject, true, "Get Shared Project", null, [ {name: "data", type: "array"} ] ); _domainManager.registerCommand( "importNode", "getSharedFile", getSharedFile, true, "Get Shared File", [ {name: "projectName", type: "string"} ], [ {name: "result", type: "array"} ] ); _domainManager.registerCommand( "importNode", "copySharedFile", copySharedFile, true, "Copy Shared File", [ {name: "projectName", type: "string"}, {name: "fileList", type: "array"}, {name: "targetId", type: "string"} ], [] ); _domainManager.registerCommand( "importNode", "COPY", copyFile, true, "Copy File", [ {name: "projectId", type: "string"}, {name: "src", type: "string"}, {name: "name", type: "string"}, {name: "dest", type: "string"} ], [] ); _domainManager.registerCommand( "importNode", "CUT", moveFile, true, "Move File", [ {name: "projectId", type: "string"}, {name: "src", type: "string"}, {name: "name", type: "string"}, {name: "dest", type: "string"} ], [] ); } exports.init = init; }());
hyundukkim/WATT
libs/brackets-server/embedded-ext/importfile/node/ImportDomain.js
JavaScript
apache-2.0
5,812
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.beam.sdk.util; import static com.google.common.base.Preconditions.checkState; import com.google.auth.Credentials; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; import com.google.common.collect.ImmutableList; import com.google.protobuf.ByteString; import com.google.protobuf.Timestamp; import com.google.pubsub.v1.AcknowledgeRequest; import com.google.pubsub.v1.DeleteSubscriptionRequest; import com.google.pubsub.v1.DeleteTopicRequest; import com.google.pubsub.v1.GetSubscriptionRequest; import com.google.pubsub.v1.ListSubscriptionsRequest; import com.google.pubsub.v1.ListSubscriptionsResponse; import com.google.pubsub.v1.ListTopicsRequest; import com.google.pubsub.v1.ListTopicsResponse; import com.google.pubsub.v1.ModifyAckDeadlineRequest; import com.google.pubsub.v1.PublishRequest; import com.google.pubsub.v1.PublishResponse; import com.google.pubsub.v1.PublisherGrpc; import com.google.pubsub.v1.PublisherGrpc.PublisherBlockingStub; import com.google.pubsub.v1.PubsubMessage; import com.google.pubsub.v1.PullRequest; import com.google.pubsub.v1.PullResponse; import com.google.pubsub.v1.ReceivedMessage; import com.google.pubsub.v1.SubscriberGrpc; import com.google.pubsub.v1.SubscriberGrpc.SubscriberBlockingStub; import com.google.pubsub.v1.Subscription; import com.google.pubsub.v1.Topic; import io.grpc.Channel; import io.grpc.ClientInterceptors; import io.grpc.ManagedChannel; import io.grpc.auth.ClientAuthInterceptor; import io.grpc.netty.GrpcSslContexts; import io.grpc.netty.NegotiationType; import io.grpc.netty.NettyChannelBuilder; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import javax.annotation.Nullable; import org.apache.beam.sdk.options.GcpOptions; import org.apache.beam.sdk.options.PubsubOptions; /** * A helper class for talking to Pubsub via grpc. * * <p>CAUTION: Currently uses the application default credentials and does not respect any * credentials-related arguments in {@link GcpOptions}. */ public class PubsubGrpcClient extends PubsubClient { private static final String PUBSUB_ADDRESS = "pubsub.googleapis.com"; private static final int PUBSUB_PORT = 443; private static final int LIST_BATCH_SIZE = 1000; private static final int DEFAULT_TIMEOUT_S = 15; private static class PubsubGrpcClientFactory implements PubsubClientFactory { @Override public PubsubClient newClient( @Nullable String timestampLabel, @Nullable String idLabel, PubsubOptions options) throws IOException { ManagedChannel channel = NettyChannelBuilder .forAddress(PUBSUB_ADDRESS, PUBSUB_PORT) .negotiationType(NegotiationType.TLS) .sslContext(GrpcSslContexts.forClient().ciphers(null).build()) .build(); return new PubsubGrpcClient(timestampLabel, idLabel, DEFAULT_TIMEOUT_S, channel, options.getGcpCredential()); } @Override public String getKind() { return "Grpc"; } } /** * Factory for creating Pubsub clients using gRCP transport. */ public static final PubsubClientFactory FACTORY = new PubsubGrpcClientFactory(); /** * Timeout for grpc calls (in s). */ private final int timeoutSec; /** * Underlying netty channel, or {@literal null} if closed. */ @Nullable private ManagedChannel publisherChannel; /** * Credentials determined from options and environment. */ private final Credentials credentials; /** * Label to use for custom timestamps, or {@literal null} if should use Pubsub publish time * instead. */ @Nullable private final String timestampLabel; /** * Label to use for custom ids, or {@literal null} if should use Pubsub provided ids. */ @Nullable private final String idLabel; /** * Cached stubs, or null if not cached. */ @Nullable private PublisherGrpc.PublisherBlockingStub cachedPublisherStub; private SubscriberGrpc.SubscriberBlockingStub cachedSubscriberStub; @VisibleForTesting PubsubGrpcClient( @Nullable String timestampLabel, @Nullable String idLabel, int timeoutSec, ManagedChannel publisherChannel, Credentials credentials) { this.timestampLabel = timestampLabel; this.idLabel = idLabel; this.timeoutSec = timeoutSec; this.publisherChannel = publisherChannel; this.credentials = credentials; } /** * Gracefully close the underlying netty channel. */ @Override public void close() { if (publisherChannel == null) { // Already closed. return; } // Can gc the underlying stubs. cachedPublisherStub = null; cachedSubscriberStub = null; // Mark the client as having been closed before going further // in case we have an exception from the channel. ManagedChannel publisherChannel = this.publisherChannel; this.publisherChannel = null; // Gracefully shutdown the channel. publisherChannel.shutdown(); try { publisherChannel.awaitTermination(timeoutSec, TimeUnit.SECONDS); } catch (InterruptedException e) { // Ignore. Thread.currentThread().interrupt(); } } /** * Return channel with interceptor for returning credentials. */ private Channel newChannel() throws IOException { checkState(publisherChannel != null, "PubsubGrpcClient has been closed"); ClientAuthInterceptor interceptor = new ClientAuthInterceptor(credentials, Executors.newSingleThreadExecutor()); return ClientInterceptors.intercept(publisherChannel, interceptor); } /** * Return a stub for making a publish request with a timeout. */ private PublisherBlockingStub publisherStub() throws IOException { if (cachedPublisherStub == null) { cachedPublisherStub = PublisherGrpc.newBlockingStub(newChannel()); } return cachedPublisherStub.withDeadlineAfter(timeoutSec, TimeUnit.SECONDS); } /** * Return a stub for making a subscribe request with a timeout. */ private SubscriberBlockingStub subscriberStub() throws IOException { if (cachedSubscriberStub == null) { cachedSubscriberStub = SubscriberGrpc.newBlockingStub(newChannel()); } return cachedSubscriberStub.withDeadlineAfter(timeoutSec, TimeUnit.SECONDS); } @Override public int publish(TopicPath topic, List<OutgoingMessage> outgoingMessages) throws IOException { PublishRequest.Builder request = PublishRequest.newBuilder() .setTopic(topic.getPath()); for (OutgoingMessage outgoingMessage : outgoingMessages) { PubsubMessage.Builder message = PubsubMessage.newBuilder() .setData(ByteString.copyFrom(outgoingMessage.elementBytes)); if (timestampLabel != null) { message.getMutableAttributes() .put(timestampLabel, String.valueOf(outgoingMessage.timestampMsSinceEpoch)); } if (idLabel != null && !Strings.isNullOrEmpty(outgoingMessage.recordId)) { message.getMutableAttributes().put(idLabel, outgoingMessage.recordId); } request.addMessages(message); } PublishResponse response = publisherStub().publish(request.build()); return response.getMessageIdsCount(); } @Override public List<IncomingMessage> pull( long requestTimeMsSinceEpoch, SubscriptionPath subscription, int batchSize, boolean returnImmediately) throws IOException { PullRequest request = PullRequest.newBuilder() .setSubscription(subscription.getPath()) .setReturnImmediately(returnImmediately) .setMaxMessages(batchSize) .build(); PullResponse response = subscriberStub().pull(request); if (response.getReceivedMessagesCount() == 0) { return ImmutableList.of(); } List<IncomingMessage> incomingMessages = new ArrayList<>(response.getReceivedMessagesCount()); for (ReceivedMessage message : response.getReceivedMessagesList()) { PubsubMessage pubsubMessage = message.getMessage(); @Nullable Map<String, String> attributes = pubsubMessage.getAttributes(); // Payload. byte[] elementBytes = pubsubMessage.getData().toByteArray(); // Timestamp. String pubsubTimestampString = null; Timestamp timestampProto = pubsubMessage.getPublishTime(); if (timestampProto != null) { pubsubTimestampString = String.valueOf(timestampProto.getSeconds() + timestampProto.getNanos() / 1000L); } long timestampMsSinceEpoch = extractTimestamp(timestampLabel, pubsubTimestampString, attributes); // Ack id. String ackId = message.getAckId(); checkState(!Strings.isNullOrEmpty(ackId)); // Record id, if any. @Nullable String recordId = null; if (idLabel != null && attributes != null) { recordId = attributes.get(idLabel); } if (Strings.isNullOrEmpty(recordId)) { // Fall back to the Pubsub provided message id. recordId = pubsubMessage.getMessageId(); } incomingMessages.add(new IncomingMessage(elementBytes, timestampMsSinceEpoch, requestTimeMsSinceEpoch, ackId, recordId)); } return incomingMessages; } @Override public void acknowledge(SubscriptionPath subscription, List<String> ackIds) throws IOException { AcknowledgeRequest request = AcknowledgeRequest.newBuilder() .setSubscription(subscription.getPath()) .addAllAckIds(ackIds) .build(); subscriberStub().acknowledge(request); // ignore Empty result. } @Override public void modifyAckDeadline( SubscriptionPath subscription, List<String> ackIds, int deadlineSeconds) throws IOException { ModifyAckDeadlineRequest request = ModifyAckDeadlineRequest.newBuilder() .setSubscription(subscription.getPath()) .addAllAckIds(ackIds) .setAckDeadlineSeconds(deadlineSeconds) .build(); subscriberStub().modifyAckDeadline(request); // ignore Empty result. } @Override public void createTopic(TopicPath topic) throws IOException { Topic request = Topic.newBuilder() .setName(topic.getPath()) .build(); publisherStub().createTopic(request); // ignore Topic result. } @Override public void deleteTopic(TopicPath topic) throws IOException { DeleteTopicRequest request = DeleteTopicRequest.newBuilder() .setTopic(topic.getPath()) .build(); publisherStub().deleteTopic(request); // ignore Empty result. } @Override public List<TopicPath> listTopics(ProjectPath project) throws IOException { ListTopicsRequest.Builder request = ListTopicsRequest.newBuilder() .setProject(project.getPath()) .setPageSize(LIST_BATCH_SIZE); ListTopicsResponse response = publisherStub().listTopics(request.build()); if (response.getTopicsCount() == 0) { return ImmutableList.of(); } List<TopicPath> topics = new ArrayList<>(response.getTopicsCount()); while (true) { for (Topic topic : response.getTopicsList()) { topics.add(topicPathFromPath(topic.getName())); } if (response.getNextPageToken().isEmpty()) { break; } request.setPageToken(response.getNextPageToken()); response = publisherStub().listTopics(request.build()); } return topics; } @Override public void createSubscription( TopicPath topic, SubscriptionPath subscription, int ackDeadlineSeconds) throws IOException { Subscription request = Subscription.newBuilder() .setTopic(topic.getPath()) .setName(subscription.getPath()) .setAckDeadlineSeconds(ackDeadlineSeconds) .build(); subscriberStub().createSubscription(request); // ignore Subscription result. } @Override public void deleteSubscription(SubscriptionPath subscription) throws IOException { DeleteSubscriptionRequest request = DeleteSubscriptionRequest.newBuilder() .setSubscription(subscription.getPath()) .build(); subscriberStub().deleteSubscription(request); // ignore Empty result. } @Override public List<SubscriptionPath> listSubscriptions(ProjectPath project, TopicPath topic) throws IOException { ListSubscriptionsRequest.Builder request = ListSubscriptionsRequest.newBuilder() .setProject(project.getPath()) .setPageSize(LIST_BATCH_SIZE); ListSubscriptionsResponse response = subscriberStub().listSubscriptions(request.build()); if (response.getSubscriptionsCount() == 0) { return ImmutableList.of(); } List<SubscriptionPath> subscriptions = new ArrayList<>(response.getSubscriptionsCount()); while (true) { for (Subscription subscription : response.getSubscriptionsList()) { if (subscription.getTopic().equals(topic.getPath())) { subscriptions.add(subscriptionPathFromPath(subscription.getName())); } } if (response.getNextPageToken().isEmpty()) { break; } request.setPageToken(response.getNextPageToken()); response = subscriberStub().listSubscriptions(request.build()); } return subscriptions; } @Override public int ackDeadlineSeconds(SubscriptionPath subscription) throws IOException { GetSubscriptionRequest request = GetSubscriptionRequest.newBuilder() .setSubscription(subscription.getPath()) .build(); Subscription response = subscriberStub().getSubscription(request); return response.getAckDeadlineSeconds(); } @Override public boolean isEOF() { return false; } }
yafengguo/Apache-beam
sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubGrpcClient.java
Java
apache-2.0
15,532
package com.planet_ink.coffee_mud.MOBS; import com.planet_ink.coffee_mud.core.interfaces.*; import com.planet_ink.coffee_mud.core.*; import com.planet_ink.coffee_mud.core.collections.*; import com.planet_ink.coffee_mud.Abilities.interfaces.*; import com.planet_ink.coffee_mud.Areas.interfaces.*; import com.planet_ink.coffee_mud.Behaviors.interfaces.*; import com.planet_ink.coffee_mud.CharClasses.interfaces.*; import com.planet_ink.coffee_mud.Commands.interfaces.*; import com.planet_ink.coffee_mud.Common.interfaces.*; import com.planet_ink.coffee_mud.Exits.interfaces.*; import com.planet_ink.coffee_mud.Items.interfaces.*; import com.planet_ink.coffee_mud.Libraries.interfaces.*; import com.planet_ink.coffee_mud.Locales.interfaces.*; import com.planet_ink.coffee_mud.MOBS.interfaces.*; import com.planet_ink.coffee_mud.Races.interfaces.*; import java.util.*; /* Copyright 2013-2015 Bo Zimmerman Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ public class Squirrel extends StdMOB { @Override public String ID(){return "Squirrel";} public Squirrel() { super(); final Random randomizer = new Random(System.currentTimeMillis()); username="a squirrel"; setDescription("It\\`s small, cute, and quick with a big expressive tail."); setDisplayText("A squirrel darts around."); CMLib.factions().setAlignment(this,Faction.Align.NEUTRAL); setMoney(0); basePhyStats.setWeight(4450 + Math.abs(randomizer.nextInt() % 5)); setWimpHitPoint(2); basePhyStats().setDamage(2); baseCharStats().setStat(CharStats.STAT_INTELLIGENCE,1); baseCharStats().setMyRace(CMClass.getRace("Squirrel")); baseCharStats().getMyRace().startRacing(this,false); basePhyStats().setAbility(0); basePhyStats().setLevel(1); basePhyStats().setArmor(90); baseState.setHitPoints(CMLib.dice().roll(basePhyStats().level(),11,basePhyStats().level())); recoverMaxState(); resetToMaxState(); recoverPhyStats(); recoverCharStats(); } }
Tycheo/coffeemud
com/planet_ink/coffee_mud/MOBS/Squirrel.java
Java
apache-2.0
2,521
/******************************************************************************* * Copyright 2013 * Ubiquitous Knowledge Processing (UKP) Lab and FG Language Technology * Technische Universität Darmstadt * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package de.tudarmstadt.ukp.clarin.webanno.brat.controller; /** * Throw an exception if the arc annotation is not in the same sentence * * @author Seid Muhie Yimam */ public class ArcCrossedMultipleSentenceException extends BratAnnotationException { private static final long serialVersionUID = 1280015349963924638L; public ArcCrossedMultipleSentenceException(String message) { super(message); } }
debovis/webanno
webanno-brat/src/main/java/de/tudarmstadt/ukp/clarin/webanno/brat/controller/ArcCrossedMultipleSentenceException.java
Java
apache-2.0
1,274
/* * Copyright 2000-2016 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.openapi.roots.ui.configuration.libraryEditor; import com.intellij.notification.Notification; import com.intellij.notification.NotificationType; import com.intellij.openapi.application.Application; import com.intellij.openapi.application.ApplicationBundle; import com.intellij.openapi.application.ApplicationManager; import com.intellij.openapi.application.ModalityState; import com.intellij.openapi.diagnostic.Logger; import com.intellij.openapi.progress.ProgressIndicator; import com.intellij.openapi.progress.ProgressManager; import com.intellij.openapi.progress.Task; import com.intellij.openapi.progress.util.ProgressIndicatorBase; import com.intellij.openapi.ui.Messages; import com.intellij.openapi.util.SystemInfo; import com.intellij.openapi.vfs.VfsUtilCore; import com.intellij.openapi.vfs.VirtualFile; import com.sun.jna.platform.mac.XAttrUtil; import org.jetbrains.annotations.NotNull; import java.io.IOException; import java.net.URI; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.stream.Stream; /** * Files downloaded from Internet are marked as 'quarantined' by OS X. * For such files opening urls of type file://path#fragment via * <a href="https://developer.apple.com/library/mac/documentation/Carbon/Conceptual/LaunchServicesConcepts/LSCIntro/LSCIntro.html"> * Launch Services API * </a> * (used internally by {@link java.awt.Desktop#browse(URI)}) won't work as expected (fragment will be ignored on file opening). * This class allows to clear quarantine status from folder containing Javadoc, if confirmed by user. */ public class JavadocQuarantineStatusCleaner { private static final Logger LOG = Logger.getInstance(JavadocQuarantineStatusCleaner.class); private static final String QUARANTINE_ATTRIBUTE = "com.apple.quarantine"; public static void cleanIfNeeded(@NotNull VirtualFile javadocFolder) { Application application = ApplicationManager.getApplication(); assert !application.isDispatchThread(); if (!SystemInfo.isMac || !javadocFolder.isInLocalFileSystem() || !javadocFolder.isDirectory()) return; String folderPath = VfsUtilCore.virtualToIoFile(javadocFolder).getAbsolutePath(); // UserDefinedFileAttributeView isn't supported by JDK for HFS+ extended attributes on OS X, so we resort to JNA if (XAttrUtil.getXAttr(folderPath, QUARANTINE_ATTRIBUTE) == null) return; application.invokeLater(() -> { int result = Messages.showYesNoDialog(ApplicationBundle.message("quarantine.dialog.message"), ApplicationBundle.message("quarantine.dialog.title"), null); if (result == Messages.YES) { cleanQuarantineStatusInBackground(folderPath); } }, ModalityState.any()); } private static void cleanQuarantineStatusInBackground(@NotNull String folderPath) { ProgressIndicatorBase progressIndicator = new ProgressIndicatorBase(); String message = ApplicationBundle.message("quarantine.clean.progress", folderPath); ProgressManager.getInstance().runProcessWithProgressAsynchronously(new Task.Backgroundable(null, message) { @Override public void run(@NotNull ProgressIndicator indicator) { try(Stream<Path> s = Files.walk(Paths.get(folderPath))) { s.forEach(p -> { ProgressManager.checkCanceled(); XAttrUtil.removeXAttr(p.toFile().getAbsolutePath(), QUARANTINE_ATTRIBUTE); }); } catch (IOException e) { throw new RuntimeException(e); } } @Override public void onError(@NotNull Exception error) { LOG.warn(error); new Notification(ApplicationBundle.message("quarantine.error.group"), ApplicationBundle.message("quarantine.error.title"), ApplicationBundle.message("quarantine.error.message"), NotificationType.WARNING).notify(null); } }, progressIndicator); } }
michaelgallacher/intellij-community
java/idea-ui/src/com/intellij/openapi/roots/ui/configuration/libraryEditor/JavadocQuarantineStatusCleaner.java
Java
apache-2.0
4,651
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Autogenerated by Thrift Compiler (0.9.2) * * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING * @generated */ #include "workspace_model_constants.h" namespace apache { namespace airavata { namespace model { namespace workspace { const workspace_modelConstants g_workspace_model_constants; workspace_modelConstants::workspace_modelConstants() { } }}}} // namespace
hasinitg/airavata
airavata-api/airavata-client-sdks/airavata-cpp-sdk/src/main/resources/lib/airavata/workspace_model_constants.cpp
C++
apache-2.0
1,201
using System; using Magnum.StateMachine; using MassTransit.Saga; using Serilog; namespace MassTransit.Persistence.MongoDb.Tests.Sagas { public class AuctionSaga : SagaStateMachine<AuctionSaga>, ISaga { public static readonly ILogger Logger = Log.Logger.ForContext<AuctionSaga>(); static AuctionSaga() { Define( () => { Correlate(Bid).By((saga, message) => saga.CorrelationId == message.AuctionId); Initially( When(Create).Then( (saga, message) => { saga.OpeningBid = message.OpeningBid; saga.OwnerEmail = message.OwnerEmail; saga.Title = message.Title; }).TransitionTo(Open)); During(Open, When(Bid).Call((saga, message) => saga.Handle(message))); }); } public AuctionSaga(Guid correlationId) { this.CorrelationId = correlationId; } public decimal? CurrentBid { get; set; } public string HighBidder { get; set; } public Guid HighBidId { get; set; } public decimal OpeningBid { get; set; } public string OwnerEmail { get; set; } public string Title { get; set; } public static State Initial { get; set; } public static State Completed { get; set; } public static State Open { get; set; } public static State Closed { get; set; } public static Event<CreateAuction> Create { get; set; } public static Event<PlaceBid> Bid { get; set; } public Guid CorrelationId { get; set; } public IServiceBus Bus { get; set; } private void Handle(PlaceBid bid) { if (!this.CurrentBid.HasValue || bid.MaximumBid > this.CurrentBid) { if (this.HighBidder != null) { this.Bus.Publish(new Outbid(this.HighBidId)); } this.CurrentBid = bid.MaximumBid; this.HighBidder = bid.BidderEmail; this.HighBidId = bid.BidId; } else { // already outbid this.Bus.Publish(new Outbid(bid.BidId)); } } } }
cwooldridge/MassTransit.Persistence.MongoDb
MassTransit.Persistence.MongoDb.Tests/Sagas/AuctionSaga.cs
C#
apache-2.0
2,517
// Copyright 2020 Red Hat, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "github.com/coreos/go-semver/semver" "github.com/coreos/ignition/v2/config/shared/errors" "github.com/coreos/vcontext/path" "github.com/coreos/vcontext/report" ) func (v Ignition) Semver() (*semver.Version, error) { return semver.NewVersion(v.Version) } func (ic IgnitionConfig) Validate(c path.ContextPath) (r report.Report) { for i, res := range ic.Merge { r.AddOnError(c.Append("merge", i), res.validateRequiredSource()) } return } func (v Ignition) Validate(c path.ContextPath) (r report.Report) { c = c.Append("version") tv, err := v.Semver() if err != nil { r.AddOnError(c, errors.ErrInvalidVersion) return } if MaxVersion != *tv { r.AddOnError(c, errors.ErrUnknownVersion) } return }
coreos/ignition
config/v3_4_experimental/types/ignition.go
GO
apache-2.0
1,335
"""Helpers that help with state related things.""" import asyncio from collections import defaultdict import datetime as dt import logging from types import ModuleType, TracebackType from typing import Dict, Iterable, List, Optional, Type, Union from homeassistant.components.sun import STATE_ABOVE_HORIZON, STATE_BELOW_HORIZON from homeassistant.const import ( STATE_CLOSED, STATE_HOME, STATE_LOCKED, STATE_NOT_HOME, STATE_OFF, STATE_ON, STATE_OPEN, STATE_UNKNOWN, STATE_UNLOCKED, ) from homeassistant.core import Context, State from homeassistant.loader import IntegrationNotFound, async_get_integration, bind_hass import homeassistant.util.dt as dt_util from .typing import HomeAssistantType _LOGGER = logging.getLogger(__name__) class AsyncTrackStates: """ Record the time when the with-block is entered. Add all states that have changed since the start time to the return list when with-block is exited. Must be run within the event loop. """ def __init__(self, hass: HomeAssistantType) -> None: """Initialize a TrackStates block.""" self.hass = hass self.states: List[State] = [] # pylint: disable=attribute-defined-outside-init def __enter__(self) -> List[State]: """Record time from which to track changes.""" self.now = dt_util.utcnow() return self.states def __exit__( self, exc_type: Optional[Type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: """Add changes states to changes list.""" self.states.extend(get_changed_since(self.hass.states.async_all(), self.now)) def get_changed_since( states: Iterable[State], utc_point_in_time: dt.datetime ) -> List[State]: """Return list of states that have been changed since utc_point_in_time.""" return [state for state in states if state.last_updated >= utc_point_in_time] @bind_hass async def async_reproduce_state( hass: HomeAssistantType, states: Union[State, Iterable[State]], blocking: bool = False, context: Optional[Context] = None, ) -> None: """Reproduce a list of states on multiple domains.""" if isinstance(states, State): states = [states] to_call: Dict[str, List[State]] = defaultdict(list) for state in states: to_call[state.domain].append(state) async def worker(domain: str, states_by_domain: List[State]) -> None: try: integration = await async_get_integration(hass, domain) except IntegrationNotFound: _LOGGER.warning( "Trying to reproduce state for unknown integration: %s", domain ) return try: platform: Optional[ModuleType] = integration.get_platform("reproduce_state") except ImportError: _LOGGER.warning("Integration %s does not support reproduce state", domain) return await platform.async_reproduce_states( # type: ignore hass, states_by_domain, context=context ) if to_call: # run all domains in parallel await asyncio.gather( *(worker(domain, data) for domain, data in to_call.items()) ) def state_as_number(state: State) -> float: """ Try to coerce our state to a number. Raises ValueError if this is not possible. """ if state.state in ( STATE_ON, STATE_LOCKED, STATE_ABOVE_HORIZON, STATE_OPEN, STATE_HOME, ): return 1 if state.state in ( STATE_OFF, STATE_UNLOCKED, STATE_UNKNOWN, STATE_BELOW_HORIZON, STATE_CLOSED, STATE_NOT_HOME, ): return 0 return float(state.state)
leppa/home-assistant
homeassistant/helpers/state.py
Python
apache-2.0
3,813
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package predicates import ( "fmt" "math/rand" "strconv" "sync" "time" "github.com/golang/glog" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/util/workqueue" v1helper "k8s.io/kubernetes/pkg/api/v1/helper" v1qos "k8s.io/kubernetes/pkg/api/v1/helper/qos" corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" schedutil "k8s.io/kubernetes/plugin/pkg/scheduler/util" "k8s.io/metrics/pkg/client/clientset_generated/clientset" ) // predicatePrecomputations: Helper types/variables... type PredicateMetadataModifier func(pm *predicateMetadata) var predicatePrecomputeRegisterLock sync.Mutex var predicatePrecomputations map[string]PredicateMetadataModifier = make(map[string]PredicateMetadataModifier) func RegisterPredicatePrecomputation(predicateName string, precomp PredicateMetadataModifier) { predicatePrecomputeRegisterLock.Lock() defer predicatePrecomputeRegisterLock.Unlock() predicatePrecomputations[predicateName] = precomp } // Other types for predicate functions... type NodeInfo interface { GetNodeInfo(nodeID string) (*v1.Node, error) } type PersistentVolumeInfo interface { GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) } // CachedPersistentVolumeInfo implements PersistentVolumeInfo type CachedPersistentVolumeInfo struct { corelisters.PersistentVolumeLister } func (c *CachedPersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) { return c.Get(pvID) } type PersistentVolumeClaimInfo interface { GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error) } // CachedPersistentVolumeClaimInfo implements PersistentVolumeClaimInfo type CachedPersistentVolumeClaimInfo struct { corelisters.PersistentVolumeClaimLister } // GetPersistentVolumeClaimInfo fetches the claim in specified namespace with specified name func (c *CachedPersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error) { return c.PersistentVolumeClaims(namespace).Get(name) } type CachedNodeInfo struct { corelisters.NodeLister } // GetNodeInfo returns cached data for the node 'id'. func (c *CachedNodeInfo) GetNodeInfo(id string) (*v1.Node, error) { node, err := c.Get(id) if apierrors.IsNotFound(err) { return nil, fmt.Errorf("node '%v' not found", id) } if err != nil { return nil, fmt.Errorf("error retrieving node '%v' from cache: %v", id, err) } return node, nil } // Note that predicateMetadata and matchingPodAntiAffinityTerm need to be declared in the same file // due to the way declarations are processed in predicate declaration unit tests. type matchingPodAntiAffinityTerm struct { term *v1.PodAffinityTerm node *v1.Node } type predicateMetadata struct { pod *v1.Pod podBestEffort bool podRequest *schedulercache.Resource podPorts map[int]bool matchingAntiAffinityTerms []matchingPodAntiAffinityTerm serviceAffinityMatchingPodList []*v1.Pod serviceAffinityMatchingPodServices []*v1.Service } func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool { // fast path if there is no conflict checking targets. if volume.GCEPersistentDisk == nil && volume.AWSElasticBlockStore == nil && volume.RBD == nil && volume.ISCSI == nil { return false } for _, existingVolume := range pod.Spec.Volumes { // Same GCE disk mounted by multiple pods conflicts unless all pods mount it read-only. if volume.GCEPersistentDisk != nil && existingVolume.GCEPersistentDisk != nil { disk, existingDisk := volume.GCEPersistentDisk, existingVolume.GCEPersistentDisk if disk.PDName == existingDisk.PDName && !(disk.ReadOnly && existingDisk.ReadOnly) { return true } } if volume.AWSElasticBlockStore != nil && existingVolume.AWSElasticBlockStore != nil { if volume.AWSElasticBlockStore.VolumeID == existingVolume.AWSElasticBlockStore.VolumeID { return true } } if volume.ISCSI != nil && existingVolume.ISCSI != nil { iqn := volume.ISCSI.IQN eiqn := existingVolume.ISCSI.IQN // two ISCSI volumes are same, if they share the same iqn. As iscsi volumes are of type // RWO or ROX, we could permit only one RW mount. Same iscsi volume mounted by multiple Pods // conflict unless all other pods mount as read only. if iqn == eiqn && !(volume.ISCSI.ReadOnly && existingVolume.ISCSI.ReadOnly) { return true } } if volume.RBD != nil && existingVolume.RBD != nil { mon, pool, image := volume.RBD.CephMonitors, volume.RBD.RBDPool, volume.RBD.RBDImage emon, epool, eimage := existingVolume.RBD.CephMonitors, existingVolume.RBD.RBDPool, existingVolume.RBD.RBDImage // two RBDs images are the same if they share the same Ceph monitor, are in the same RADOS Pool, and have the same image name // only one read-write mount is permitted for the same RBD image. // same RBD image mounted by multiple Pods conflicts unless all Pods mount the image read-only if haveSame(mon, emon) && pool == epool && image == eimage && !(volume.RBD.ReadOnly && existingVolume.RBD.ReadOnly) { return true } } } return false } // NoDiskConflict evaluates if a pod can fit due to the volumes it requests, and those that // are already mounted. If there is already a volume mounted on that node, another pod that uses the same volume // can't be scheduled there. // This is GCE, Amazon EBS, and Ceph RBD specific for now: // - GCE PD allows multiple mounts as long as they're all read-only // - AWS EBS forbids any two pods mounting the same volume ID // - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image. // - ISCSI forbids if any two pods share at least same IQN, LUN and Target // TODO: migrate this into some per-volume specific code? func NoDiskConflict(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { for _, v := range pod.Spec.Volumes { for _, ev := range nodeInfo.Pods() { if isVolumeConflict(v, ev) { return false, []algorithm.PredicateFailureReason{ErrDiskConflict}, nil } } } return true, nil, nil } type MaxPDVolumeCountChecker struct { filter VolumeFilter maxVolumes int pvInfo PersistentVolumeInfo pvcInfo PersistentVolumeClaimInfo } // VolumeFilter contains information on how to filter PD Volumes when checking PD Volume caps type VolumeFilter struct { // Filter normal volumes FilterVolume func(vol *v1.Volume) (id string, relevant bool) FilterPersistentVolume func(pv *v1.PersistentVolume) (id string, relevant bool) } // NewMaxPDVolumeCountPredicate creates a predicate which evaluates whether a pod can fit based on the // number of volumes which match a filter that it requests, and those that are already present. The // maximum number is configurable to accommodate different systems. // // The predicate looks for both volumes used directly, as well as PVC volumes that are backed by relevant volume // types, counts the number of unique volumes, and rejects the new pod if it would place the total count over // the maximum. func NewMaxPDVolumeCountPredicate(filter VolumeFilter, maxVolumes int, pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate { c := &MaxPDVolumeCountChecker{ filter: filter, maxVolumes: maxVolumes, pvInfo: pvInfo, pvcInfo: pvcInfo, } return c.predicate } func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace string, filteredVolumes map[string]bool) error { for i := range volumes { vol := &volumes[i] if id, ok := c.filter.FilterVolume(vol); ok { filteredVolumes[id] = true } else if vol.PersistentVolumeClaim != nil { pvcName := vol.PersistentVolumeClaim.ClaimName if pvcName == "" { return fmt.Errorf("PersistentVolumeClaim had no name") } pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName) if err != nil { // if the PVC is not found, log the error and count the PV towards the PV limit // generate a random volume ID since its required for de-dup utilruntime.HandleError(fmt.Errorf("Unable to look up PVC info for %s/%s, assuming PVC matches predicate when counting limits: %v", namespace, pvcName, err)) source := rand.NewSource(time.Now().UnixNano()) generatedID := "missingPVC" + strconv.Itoa(rand.New(source).Intn(1000000)) filteredVolumes[generatedID] = true return nil } if pvc == nil { return fmt.Errorf("PersistentVolumeClaim not found: %q", pvcName) } pvName := pvc.Spec.VolumeName if pvName == "" { return fmt.Errorf("PersistentVolumeClaim is not bound: %q", pvcName) } pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName) if err != nil { // if the PV is not found, log the error // and count the PV towards the PV limit // generate a random volume ID since it is required for de-dup utilruntime.HandleError(fmt.Errorf("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err)) source := rand.NewSource(time.Now().UnixNano()) generatedID := "missingPV" + strconv.Itoa(rand.New(source).Intn(1000000)) filteredVolumes[generatedID] = true return nil } if pv == nil { return fmt.Errorf("PersistentVolume not found: %q", pvName) } if id, ok := c.filter.FilterPersistentVolume(pv); ok { filteredVolumes[id] = true } } } return nil } func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { // If a pod doesn't have any volume attached to it, the predicate will always be true. // Thus we make a fast path for it, to avoid unnecessary computations in this case. if len(pod.Spec.Volumes) == 0 { return true, nil, nil } newVolumes := make(map[string]bool) if err := c.filterVolumes(pod.Spec.Volumes, pod.Namespace, newVolumes); err != nil { return false, nil, err } // quick return if len(newVolumes) == 0 { return true, nil, nil } // count unique volumes existingVolumes := make(map[string]bool) for _, existingPod := range nodeInfo.Pods() { if err := c.filterVolumes(existingPod.Spec.Volumes, existingPod.Namespace, existingVolumes); err != nil { return false, nil, err } } numExistingVolumes := len(existingVolumes) // filter out already-mounted volumes for k := range existingVolumes { if _, ok := newVolumes[k]; ok { delete(newVolumes, k) } } numNewVolumes := len(newVolumes) if numExistingVolumes+numNewVolumes > c.maxVolumes { // violates MaxEBSVolumeCount or MaxGCEPDVolumeCount return false, []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil } return true, nil, nil } // EBSVolumeFilter is a VolumeFilter for filtering AWS ElasticBlockStore Volumes var EBSVolumeFilter VolumeFilter = VolumeFilter{ FilterVolume: func(vol *v1.Volume) (string, bool) { if vol.AWSElasticBlockStore != nil { return vol.AWSElasticBlockStore.VolumeID, true } return "", false }, FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { if pv.Spec.AWSElasticBlockStore != nil { return pv.Spec.AWSElasticBlockStore.VolumeID, true } return "", false }, } // GCEPDVolumeFilter is a VolumeFilter for filtering GCE PersistentDisk Volumes var GCEPDVolumeFilter VolumeFilter = VolumeFilter{ FilterVolume: func(vol *v1.Volume) (string, bool) { if vol.GCEPersistentDisk != nil { return vol.GCEPersistentDisk.PDName, true } return "", false }, FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { if pv.Spec.GCEPersistentDisk != nil { return pv.Spec.GCEPersistentDisk.PDName, true } return "", false }, } // AzureDiskVolumeFilter is a VolumeFilter for filtering Azure Disk Volumes var AzureDiskVolumeFilter VolumeFilter = VolumeFilter{ FilterVolume: func(vol *v1.Volume) (string, bool) { if vol.AzureDisk != nil { return vol.AzureDisk.DiskName, true } return "", false }, FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { if pv.Spec.AzureDisk != nil { return pv.Spec.AzureDisk.DiskName, true } return "", false }, } type VolumeZoneChecker struct { pvInfo PersistentVolumeInfo pvcInfo PersistentVolumeClaimInfo } // VolumeZonePredicate evaluates if a pod can fit due to the volumes it requests, given // that some volumes may have zone scheduling constraints. The requirement is that any // volume zone-labels must match the equivalent zone-labels on the node. It is OK for // the node to have more zone-label constraints (for example, a hypothetical replicated // volume might allow region-wide access) // // Currently this is only supported with PersistentVolumeClaims, and looks to the labels // only on the bound PersistentVolume. // // Working with volumes declared inline in the pod specification (i.e. not // using a PersistentVolume) is likely to be harder, as it would require // determining the zone of a volume during scheduling, and that is likely to // require calling out to the cloud provider. It seems that we are moving away // from inline volume declarations anyway. func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate { c := &VolumeZoneChecker{ pvInfo: pvInfo, pvcInfo: pvcInfo, } return c.predicate } func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { // If a pod doesn't have any volume attached to it, the predicate will always be true. // Thus we make a fast path for it, to avoid unnecessary computations in this case. if len(pod.Spec.Volumes) == 0 { return true, nil, nil } node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } nodeConstraints := make(map[string]string) for k, v := range node.ObjectMeta.Labels { if k != kubeletapis.LabelZoneFailureDomain && k != kubeletapis.LabelZoneRegion { continue } nodeConstraints[k] = v } if len(nodeConstraints) == 0 { // The node has no zone constraints, so we're OK to schedule. // In practice, when using zones, all nodes must be labeled with zone labels. // We want to fast-path this case though. return true, nil, nil } namespace := pod.Namespace manifest := &(pod.Spec) for i := range manifest.Volumes { volume := &manifest.Volumes[i] if volume.PersistentVolumeClaim != nil { pvcName := volume.PersistentVolumeClaim.ClaimName if pvcName == "" { return false, nil, fmt.Errorf("PersistentVolumeClaim had no name") } pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName) if err != nil { return false, nil, err } if pvc == nil { return false, nil, fmt.Errorf("PersistentVolumeClaim was not found: %q", pvcName) } pvName := pvc.Spec.VolumeName if pvName == "" { return false, nil, fmt.Errorf("PersistentVolumeClaim is not bound: %q", pvcName) } pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName) if err != nil { return false, nil, err } if pv == nil { return false, nil, fmt.Errorf("PersistentVolume not found: %q", pvName) } for k, v := range pv.ObjectMeta.Labels { if k != kubeletapis.LabelZoneFailureDomain && k != kubeletapis.LabelZoneRegion { continue } nodeV, _ := nodeConstraints[k] if v != nodeV { glog.V(10).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, node.Name, pvName, k) return false, []algorithm.PredicateFailureReason{ErrVolumeZoneConflict}, nil } } } } return true, nil, nil } // Returns a *schedulercache.Resource that covers the largest width in each // resource dimension. Because init-containers run sequentially, we collect the // max in each dimension iteratively. In contrast, we sum the resource vectors // for regular containers since they run simultaneously. // // Example: // // Pod: // InitContainers // IC1: // CPU: 2 // Memory: 1G // IC2: // CPU: 2 // Memory: 3G // Containers // C1: // CPU: 2 // Memory: 1G // C2: // CPU: 1 // Memory: 1G // // Result: CPU: 3, Memory: 3G func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource { result := schedulercache.Resource{} for _, container := range pod.Spec.Containers { for rName, rQuantity := range container.Resources.Requests { switch rName { case v1.ResourceMemory: result.Memory += rQuantity.Value() case v1.ResourceCPU: result.MilliCPU += rQuantity.MilliValue() case v1.ResourceNvidiaGPU: result.NvidiaGPU += rQuantity.Value() case v1.ResourceStorageOverlay: result.StorageOverlay += rQuantity.Value() default: if v1helper.IsOpaqueIntResourceName(rName) { result.AddOpaque(rName, rQuantity.Value()) } } } } // Account for storage requested by emptydir volumes // If the storage medium is memory, should exclude the size for _, vol := range pod.Spec.Volumes { if vol.EmptyDir != nil && vol.EmptyDir.Medium != v1.StorageMediumMemory { result.StorageScratch += vol.EmptyDir.SizeLimit.Value() } } // take max_resource(sum_pod, any_init_container) for _, container := range pod.Spec.InitContainers { for rName, rQuantity := range container.Resources.Requests { switch rName { case v1.ResourceMemory: if mem := rQuantity.Value(); mem > result.Memory { result.Memory = mem } case v1.ResourceCPU: if cpu := rQuantity.MilliValue(); cpu > result.MilliCPU { result.MilliCPU = cpu } case v1.ResourceNvidiaGPU: if gpu := rQuantity.Value(); gpu > result.NvidiaGPU { result.NvidiaGPU = gpu } case v1.ResourceStorageOverlay: if overlay := rQuantity.Value(); overlay > result.StorageOverlay { result.StorageOverlay = overlay } default: if v1helper.IsOpaqueIntResourceName(rName) { value := rQuantity.Value() if value > result.OpaqueIntResources[rName] { result.SetOpaque(rName, value) } } } } } return &result } func podName(pod *v1.Pod) string { return pod.Namespace + "/" + pod.Name } // PodFitsResources checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod. // First return value indicates whether a node has sufficient resources to run a pod while the second return value indicates the // predicate failure reasons if the node has insufficient resources to run the pod. func PodFitsResources(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } var predicateFails []algorithm.PredicateFailureReason allowedPodNumber := nodeInfo.AllowedPodNumber() if len(nodeInfo.Pods())+1 > allowedPodNumber { predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourcePods, 1, int64(len(nodeInfo.Pods())), int64(allowedPodNumber))) } var podRequest *schedulercache.Resource if predicateMeta, ok := meta.(*predicateMetadata); ok { podRequest = predicateMeta.podRequest } else { // We couldn't parse metadata - fallback to computing it. podRequest = GetResourceRequest(pod) } if podRequest.MilliCPU == 0 && podRequest.Memory == 0 && podRequest.NvidiaGPU == 0 && podRequest.StorageOverlay == 0 && podRequest.StorageScratch == 0 && len(podRequest.OpaqueIntResources) == 0 { return len(predicateFails) == 0, predicateFails, nil } allocatable := nodeInfo.AllocatableResource() if allocatable.MilliCPU < podRequest.MilliCPU+nodeInfo.RequestedResource().MilliCPU { predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceCPU, podRequest.MilliCPU, nodeInfo.RequestedResource().MilliCPU, allocatable.MilliCPU)) } if allocatable.Memory < podRequest.Memory+nodeInfo.RequestedResource().Memory { predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceMemory, podRequest.Memory, nodeInfo.RequestedResource().Memory, allocatable.Memory)) } if allocatable.NvidiaGPU < podRequest.NvidiaGPU+nodeInfo.RequestedResource().NvidiaGPU { predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceNvidiaGPU, podRequest.NvidiaGPU, nodeInfo.RequestedResource().NvidiaGPU, allocatable.NvidiaGPU)) } scratchSpaceRequest := podRequest.StorageScratch if allocatable.StorageOverlay == 0 { scratchSpaceRequest += podRequest.StorageOverlay //scratchSpaceRequest += nodeInfo.RequestedResource().StorageOverlay nodeScratchRequest := nodeInfo.RequestedResource().StorageOverlay + nodeInfo.RequestedResource().StorageScratch if allocatable.StorageScratch < scratchSpaceRequest+nodeScratchRequest { predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceStorageScratch, scratchSpaceRequest, nodeScratchRequest, allocatable.StorageScratch)) } } else if allocatable.StorageScratch < scratchSpaceRequest+nodeInfo.RequestedResource().StorageScratch { predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceStorageScratch, scratchSpaceRequest, nodeInfo.RequestedResource().StorageScratch, allocatable.StorageScratch)) } if allocatable.StorageOverlay > 0 && allocatable.StorageOverlay < podRequest.StorageOverlay+nodeInfo.RequestedResource().StorageOverlay { predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceStorageOverlay, podRequest.StorageOverlay, nodeInfo.RequestedResource().StorageOverlay, allocatable.StorageOverlay)) } for rName, rQuant := range podRequest.OpaqueIntResources { if allocatable.OpaqueIntResources[rName] < rQuant+nodeInfo.RequestedResource().OpaqueIntResources[rName] { predicateFails = append(predicateFails, NewInsufficientResourceError(rName, podRequest.OpaqueIntResources[rName], nodeInfo.RequestedResource().OpaqueIntResources[rName], allocatable.OpaqueIntResources[rName])) } } if glog.V(10) { if len(predicateFails) == 0 { // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. glog.Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.", podName(pod), node.Name, len(nodeInfo.Pods()), allowedPodNumber) } } return len(predicateFails) == 0, predicateFails, nil } // nodeMatchesNodeSelectorTerms checks if a node's labels satisfy a list of node selector terms, // terms are ORed, and an empty list of terms will match nothing. func nodeMatchesNodeSelectorTerms(node *v1.Node, nodeSelectorTerms []v1.NodeSelectorTerm) bool { for _, req := range nodeSelectorTerms { nodeSelector, err := v1helper.NodeSelectorRequirementsAsSelector(req.MatchExpressions) if err != nil { glog.V(10).Infof("Failed to parse MatchExpressions: %+v, regarding as not match.", req.MatchExpressions) return false } if nodeSelector.Matches(labels.Set(node.Labels)) { return true } } return false } // The pod can only schedule onto nodes that satisfy requirements in both NodeAffinity and nodeSelector. func podMatchesNodeLabels(pod *v1.Pod, node *v1.Node) bool { // Check if node.Labels match pod.Spec.NodeSelector. if len(pod.Spec.NodeSelector) > 0 { selector := labels.SelectorFromSet(pod.Spec.NodeSelector) if !selector.Matches(labels.Set(node.Labels)) { return false } } // 1. nil NodeSelector matches all nodes (i.e. does not filter out any nodes) // 2. nil []NodeSelectorTerm (equivalent to non-nil empty NodeSelector) matches no nodes // 3. zero-length non-nil []NodeSelectorTerm matches no nodes also, just for simplicity // 4. nil []NodeSelectorRequirement (equivalent to non-nil empty NodeSelectorTerm) matches no nodes // 5. zero-length non-nil []NodeSelectorRequirement matches no nodes also, just for simplicity // 6. non-nil empty NodeSelectorRequirement is not allowed nodeAffinityMatches := true affinity := pod.Spec.Affinity if affinity != nil && affinity.NodeAffinity != nil { nodeAffinity := affinity.NodeAffinity // if no required NodeAffinity requirements, will do no-op, means select all nodes. // TODO: Replace next line with subsequent commented-out line when implement RequiredDuringSchedulingRequiredDuringExecution. if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { // if nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution == nil && nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { return true } // Match node selector for requiredDuringSchedulingRequiredDuringExecution. // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. // if nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { // nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution.NodeSelectorTerms // glog.V(10).Infof("Match for RequiredDuringSchedulingRequiredDuringExecution node selector terms %+v", nodeSelectorTerms) // nodeAffinityMatches = nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms) // } // Match node selector for requiredDuringSchedulingIgnoredDuringExecution. if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms glog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms) nodeAffinityMatches = nodeAffinityMatches && nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms) } } return nodeAffinityMatches } // PodMatchNodeSelector checks if a pod node selector matches the node label. func PodMatchNodeSelector(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } if podMatchesNodeLabels(pod, node) { return true, nil, nil } return false, []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch}, nil } // PodFitsHost checks if a pod spec node name matches the current node. func PodFitsHost(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { if len(pod.Spec.NodeName) == 0 { return true, nil, nil } node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } if pod.Spec.NodeName == node.Name { return true, nil, nil } return false, []algorithm.PredicateFailureReason{ErrPodNotMatchHostName}, nil } type NodeLabelChecker struct { labels []string presence bool } func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicate { labelChecker := &NodeLabelChecker{ labels: labels, presence: presence, } return labelChecker.CheckNodeLabelPresence } // CheckNodeLabelPresence checks whether all of the specified labels exists on a node or not, regardless of their value // If "presence" is false, then returns false if any of the requested labels matches any of the node's labels, // otherwise returns true. // If "presence" is true, then returns false if any of the requested labels does not match any of the node's labels, // otherwise returns true. // // Consider the cases where the nodes are placed in regions/zones/racks and these are identified by labels // In some cases, it is required that only nodes that are part of ANY of the defined regions/zones/racks be selected // // Alternately, eliminating nodes that have a certain label, regardless of value, is also useful // A node may have a label with "retiring" as key and the date as the value // and it may be desirable to avoid scheduling new pods on this node func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } var exists bool nodeLabels := labels.Set(node.Labels) for _, label := range n.labels { exists = nodeLabels.Has(label) if (exists && !n.presence) || (!exists && n.presence) { return false, []algorithm.PredicateFailureReason{ErrNodeLabelPresenceViolated}, nil } } return true, nil, nil } type ServiceAffinity struct { podLister algorithm.PodLister serviceLister algorithm.ServiceLister nodeInfo NodeInfo labels []string } // serviceAffinityPrecomputation should be run once by the scheduler before looping through the Predicate. It is a helper function that // only should be referenced by NewServiceAffinityPredicate. func (s *ServiceAffinity) serviceAffinityPrecomputation(pm *predicateMetadata) { if pm.pod == nil { glog.Errorf("Cannot precompute service affinity, a pod is required to calculate service affinity.") return } var errSvc, errList error // Store services which match the pod. pm.serviceAffinityMatchingPodServices, errSvc = s.serviceLister.GetPodServices(pm.pod) selector := CreateSelectorFromLabels(pm.pod.Labels) // consider only the pods that belong to the same namespace allMatches, errList := s.podLister.List(selector) // In the future maybe we will return them as part of the function. if errSvc != nil || errList != nil { glog.Errorf("Some Error were found while precomputing svc affinity: \nservices:%v , \npods:%v", errSvc, errList) } pm.serviceAffinityMatchingPodList = FilterPodsByNamespace(allMatches, pm.pod.Namespace) } func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, nodeInfo NodeInfo, labels []string) (algorithm.FitPredicate, PredicateMetadataModifier) { affinity := &ServiceAffinity{ podLister: podLister, serviceLister: serviceLister, nodeInfo: nodeInfo, labels: labels, } return affinity.checkServiceAffinity, affinity.serviceAffinityPrecomputation } // checkServiceAffinity is a predicate which matches nodes in such a way to force that // ServiceAffinity.labels are homogenous for pods that are scheduled to a node. // (i.e. it returns true IFF this pod can be added to this node such that all other pods in // the same service are running on nodes with // the exact same ServiceAffinity.label values). // // For example: // If the first pod of a service was scheduled to a node with label "region=foo", // all the other subsequent pods belong to the same service will be schedule on // nodes with the same "region=foo" label. // // Details: // // If (the svc affinity labels are not a subset of pod's label selectors ) // The pod has all information necessary to check affinity, the pod's label selector is sufficient to calculate // the match. // Otherwise: // Create an "implicit selector" which guarantees pods will land on nodes with similar values // for the affinity labels. // // To do this, we "reverse engineer" a selector by introspecting existing pods running under the same service+namespace. // These backfilled labels in the selector "L" are defined like so: // - L is a label that the ServiceAffinity object needs as a matching constraints. // - L is not defined in the pod itself already. // - and SOME pod, from a service, in the same namespace, ALREADY scheduled onto a node, has a matching value. // // WARNING: This Predicate is NOT guaranteed to work if some of the predicateMetadata data isn't precomputed... // For that reason it is not exported, i.e. it is highly coupled to the implementation of the FitPredicate construction. func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var services []*v1.Service var pods []*v1.Pod if pm, ok := meta.(*predicateMetadata); ok && (pm.serviceAffinityMatchingPodList != nil || pm.serviceAffinityMatchingPodServices != nil) { services = pm.serviceAffinityMatchingPodServices pods = pm.serviceAffinityMatchingPodList } else { // Make the predicate resilient in case metadata is missing. pm = &predicateMetadata{pod: pod} s.serviceAffinityPrecomputation(pm) pods, services = pm.serviceAffinityMatchingPodList, pm.serviceAffinityMatchingPodServices } node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } // check if the pod being scheduled has the affinity labels specified in its NodeSelector affinityLabels := FindLabelsInSet(s.labels, labels.Set(pod.Spec.NodeSelector)) // Step 1: If we don't have all constraints, introspect nodes to find the missing constraints. if len(s.labels) > len(affinityLabels) { if len(services) > 0 { if len(pods) > 0 { nodeWithAffinityLabels, err := s.nodeInfo.GetNodeInfo(pods[0].Spec.NodeName) if err != nil { return false, nil, err } AddUnsetLabelsToMap(affinityLabels, s.labels, labels.Set(nodeWithAffinityLabels.Labels)) } } } // Step 2: Finally complete the affinity predicate based on whatever set of predicates we were able to find. if CreateSelectorFromLabels(affinityLabels).Matches(labels.Set(node.Labels)) { return true, nil, nil } return false, []algorithm.PredicateFailureReason{ErrServiceAffinityViolated}, nil } // PodFitsHostPorts checks if a node has free ports for the requested pod ports. func PodFitsHostPorts(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var wantPorts map[int]bool if predicateMeta, ok := meta.(*predicateMetadata); ok { wantPorts = predicateMeta.podPorts } else { // We couldn't parse metadata - fallback to computing it. wantPorts = schedutil.GetUsedPorts(pod) } if len(wantPorts) == 0 { return true, nil, nil } existingPorts := nodeInfo.UsedPorts() for wport := range wantPorts { if wport != 0 && existingPorts[wport] { return false, []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts}, nil } } return true, nil, nil } // search two arrays and return true if they have at least one common element; return false otherwise func haveSame(a1, a2 []string) bool { for _, val1 := range a1 { for _, val2 := range a2 { if val1 == val2 { return true } } } return false } // GeneralPredicates checks whether noncriticalPredicates and EssentialPredicates pass. noncriticalPredicates are the predicates // that only non-critical pods need and EssentialPredicates are the predicates that all pods, including critical pods, need func GeneralPredicates(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var predicateFails []algorithm.PredicateFailureReason fit, reasons, err := noncriticalPredicates(pod, meta, nodeInfo) if err != nil { return false, predicateFails, err } if !fit { predicateFails = append(predicateFails, reasons...) } fit, reasons, err = EssentialPredicates(pod, meta, nodeInfo) if err != nil { return false, predicateFails, err } if !fit { predicateFails = append(predicateFails, reasons...) } return len(predicateFails) == 0, predicateFails, nil } // noncriticalPredicates are the predicates that only non-critical pods need func noncriticalPredicates(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var predicateFails []algorithm.PredicateFailureReason fit, reasons, err := PodFitsResources(pod, meta, nodeInfo) if err != nil { return false, predicateFails, err } if !fit { predicateFails = append(predicateFails, reasons...) } return len(predicateFails) == 0, predicateFails, nil } // EssentialPredicates are the predicates that all pods, including critical pods, need func EssentialPredicates(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var predicateFails []algorithm.PredicateFailureReason fit, reasons, err := PodFitsHost(pod, meta, nodeInfo) if err != nil { return false, predicateFails, err } if !fit { predicateFails = append(predicateFails, reasons...) } // TODO: PodFitsHostPorts is essential for now, but kubelet should ideally // preempt pods to free up host ports too fit, reasons, err = PodFitsHostPorts(pod, meta, nodeInfo) if err != nil { return false, predicateFails, err } if !fit { predicateFails = append(predicateFails, reasons...) } fit, reasons, err = PodMatchNodeSelector(pod, meta, nodeInfo) if err != nil { return false, predicateFails, err } if !fit { predicateFails = append(predicateFails, reasons...) } return len(predicateFails) == 0, predicateFails, nil } type PodAffinityChecker struct { info NodeInfo podLister algorithm.PodLister } func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) algorithm.FitPredicate { checker := &PodAffinityChecker{ info: info, podLister: podLister, } return checker.InterPodAffinityMatches } // InterPodAffinityMatches checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration. // First return value indicates whether a pod can be scheduled on the specified node while the second return value indicates the // predicate failure reasons if the pod cannot be scheduled on the specified node. func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } if !c.satisfiesExistingPodsAntiAffinity(pod, meta, node) { return false, []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}, nil } // Now check if <pod> requirements will be satisfied on this node. affinity := pod.Spec.Affinity if affinity == nil || (affinity.PodAffinity == nil && affinity.PodAntiAffinity == nil) { return true, nil, nil } if !c.satisfiesPodsAffinityAntiAffinity(pod, node, affinity) { return false, []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}, nil } if glog.V(10) { // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. glog.Infof("Schedule Pod %+v on Node %+v is allowed, pod (anti)affinity constraints satisfied", podName(pod), node.Name) } return true, nil, nil } // anyPodMatchesPodAffinityTerm checks if any of given pods can match the specific podAffinityTerm. // First return value indicates whether a matching pod exists on a node that matches the topology key, // while the second return value indicates whether a matching pod exists anywhere. // TODO: Do we really need any pod matching, or all pods matching? I think the latter. func (c *PodAffinityChecker) anyPodMatchesPodAffinityTerm(pod *v1.Pod, allPods []*v1.Pod, node *v1.Node, term *v1.PodAffinityTerm) (bool, bool, error) { if len(term.TopologyKey) == 0 { return false, false, fmt.Errorf("empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity") } matchingPodExists := false namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(pod, term) selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) if err != nil { return false, false, err } for _, existingPod := range allPods { match := priorityutil.PodMatchesTermsNamespaceAndSelector(existingPod, namespaces, selector) if match { matchingPodExists = true existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName) if err != nil { return false, matchingPodExists, err } if priorityutil.NodesHaveSameTopologyKey(node, existingPodNode, term.TopologyKey) { return true, matchingPodExists, nil } } } return false, matchingPodExists, nil } func getPodAffinityTerms(podAffinity *v1.PodAffinity) (terms []v1.PodAffinityTerm) { if podAffinity != nil { if len(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 { terms = podAffinity.RequiredDuringSchedulingIgnoredDuringExecution } // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. //if len(podAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 { // terms = append(terms, podAffinity.RequiredDuringSchedulingRequiredDuringExecution...) //} } return terms } func getPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.PodAffinityTerm) { if podAntiAffinity != nil { if len(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 { terms = podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution } // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. //if len(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 { // terms = append(terms, podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution...) //} } return terms } func getMatchingAntiAffinityTerms(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) ([]matchingPodAntiAffinityTerm, error) { allNodeNames := make([]string, 0, len(nodeInfoMap)) for name := range nodeInfoMap { allNodeNames = append(allNodeNames, name) } var lock sync.Mutex var result []matchingPodAntiAffinityTerm var firstError error appendResult := func(toAppend []matchingPodAntiAffinityTerm) { lock.Lock() defer lock.Unlock() result = append(result, toAppend...) } catchError := func(err error) { lock.Lock() defer lock.Unlock() if firstError == nil { firstError = err } } processNode := func(i int) { nodeInfo := nodeInfoMap[allNodeNames[i]] node := nodeInfo.Node() if node == nil { catchError(fmt.Errorf("node not found")) return } var nodeResult []matchingPodAntiAffinityTerm for _, existingPod := range nodeInfo.PodsWithAffinity() { affinity := existingPod.Spec.Affinity if affinity == nil { continue } for _, term := range getPodAntiAffinityTerms(affinity.PodAntiAffinity) { namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term) selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) if err != nil { catchError(err) return } if priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) { nodeResult = append(nodeResult, matchingPodAntiAffinityTerm{term: &term, node: node}) } } } if len(nodeResult) > 0 { appendResult(nodeResult) } } workqueue.Parallelize(16, len(allNodeNames), processNode) return result, firstError } func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *v1.Pod, allPods []*v1.Pod) ([]matchingPodAntiAffinityTerm, error) { var result []matchingPodAntiAffinityTerm for _, existingPod := range allPods { affinity := existingPod.Spec.Affinity if affinity != nil && affinity.PodAntiAffinity != nil { existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName) if err != nil { return nil, err } for _, term := range getPodAntiAffinityTerms(affinity.PodAntiAffinity) { namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term) selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) if err != nil { return nil, err } if priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) { result = append(result, matchingPodAntiAffinityTerm{term: &term, node: existingPodNode}) } } } } return result, nil } // Checks if scheduling the pod onto this node would break any anti-affinity // rules indicated by the existing pods. func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta interface{}, node *v1.Node) bool { var matchingTerms []matchingPodAntiAffinityTerm if predicateMeta, ok := meta.(*predicateMetadata); ok { matchingTerms = predicateMeta.matchingAntiAffinityTerms } else { allPods, err := c.podLister.List(labels.Everything()) if err != nil { glog.Errorf("Failed to get all pods, %+v", err) return false } if matchingTerms, err = c.getMatchingAntiAffinityTerms(pod, allPods); err != nil { glog.Errorf("Failed to get all terms that pod %+v matches, err: %+v", podName(pod), err) return false } } for _, term := range matchingTerms { if len(term.term.TopologyKey) == 0 { glog.Error("Empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity") return false } if priorityutil.NodesHaveSameTopologyKey(node, term.node, term.term.TopologyKey) { glog.V(10).Infof("Cannot schedule pod %+v onto node %v,because of PodAntiAffinityTerm %v", podName(pod), node.Name, term.term) return false } } if glog.V(10) { // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. glog.Infof("Schedule Pod %+v on Node %+v is allowed, existing pods anti-affinity rules satisfied.", podName(pod), node.Name) } return true } // Checks if scheduling the pod onto this node would break any rules of this pod. func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, node *v1.Node, affinity *v1.Affinity) bool { allPods, err := c.podLister.List(labels.Everything()) if err != nil { return false } // Check all affinity terms. for _, term := range getPodAffinityTerms(affinity.PodAffinity) { termMatches, matchingPodExists, err := c.anyPodMatchesPodAffinityTerm(pod, allPods, node, &term) if err != nil { glog.Errorf("Cannot schedule pod %+v onto node %v,because of PodAffinityTerm %v, err: %v", podName(pod), node.Name, term, err) return false } if !termMatches { // If the requirement matches a pod's own labels are namespace, and there are // no other such pods, then disregard the requirement. This is necessary to // not block forever because the first pod of the collection can't be scheduled. if matchingPodExists { glog.V(10).Infof("Cannot schedule pod %+v onto node %v,because of PodAffinityTerm %v, err: %v", podName(pod), node.Name, term, err) return false } namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(pod, &term) selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) if err != nil { glog.Errorf("Cannot parse selector on term %v for pod %v. Details %v", term, podName(pod), err) return false } match := priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) if !match { glog.V(10).Infof("Cannot schedule pod %+v onto node %v,because of PodAffinityTerm %v, err: %v", podName(pod), node.Name, term, err) return false } } } // Check all anti-affinity terms. for _, term := range getPodAntiAffinityTerms(affinity.PodAntiAffinity) { termMatches, _, err := c.anyPodMatchesPodAffinityTerm(pod, allPods, node, &term) if err != nil || termMatches { glog.V(10).Infof("Cannot schedule pod %+v onto node %v,because of PodAntiAffinityTerm %v, err: %v", podName(pod), node.Name, term, err) return false } } if glog.V(10) { // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. glog.Infof("Schedule Pod %+v on Node %+v is allowed, pod afinnity/anti-affinity constraints satisfied.", podName(pod), node.Name) } return true } // PodToleratesNodeTaints checks if a pod tolertaions can tolerate the node taints func PodToleratesNodeTaints(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { taints, err := nodeInfo.Taints() if err != nil { return false, nil, err } if v1helper.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taints, func(t *v1.Taint) bool { // PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints. return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute }) { return true, nil, nil } return false, []algorithm.PredicateFailureReason{ErrTaintsTolerationsNotMatch}, nil } // isPodBestEffort checks if pod is scheduled with best-effort QoS func isPodBestEffort(pod *v1.Pod) bool { return v1qos.GetPodQOS(pod) == v1.PodQOSBestEffort } // CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node // reporting memory pressure condition. func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var podBestEffort bool if predicateMeta, ok := meta.(*predicateMetadata); ok { podBestEffort = predicateMeta.podBestEffort } else { // We couldn't parse metadata - fallback to computing it. podBestEffort = isPodBestEffort(pod) } // pod is not BestEffort pod if !podBestEffort { return true, nil, nil } // check if node is under memory preasure if nodeInfo.MemoryPressureCondition() == v1.ConditionTrue { return false, []algorithm.PredicateFailureReason{ErrNodeUnderMemoryPressure}, nil } return true, nil, nil } // CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node // reporting disk pressure condition. func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { // check if node is under disk preasure if nodeInfo.DiskPressureCondition() == v1.ConditionTrue { return false, []algorithm.PredicateFailureReason{ErrNodeUnderDiskPressure}, nil } return true, nil, nil } type VolumeNodeChecker struct { pvInfo PersistentVolumeInfo pvcInfo PersistentVolumeClaimInfo client clientset.Interface } // VolumeNodeChecker evaluates if a pod can fit due to the volumes it requests, given // that some volumes have node topology constraints, particularly when using Local PVs. // The requirement is that any pod that uses a PVC that is bound to a PV with topology constraints // must be scheduled to a node that satisfies the PV's topology labels. func NewVolumeNodePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo, client clientset.Interface) algorithm.FitPredicate { c := &VolumeNodeChecker{ pvInfo: pvInfo, pvcInfo: pvcInfo, client: client, } return c.predicate } func (c *VolumeNodeChecker) predicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { if !utilfeature.DefaultFeatureGate.Enabled(features.PersistentLocalVolumes) { return true, nil, nil } // If a pod doesn't have any volume attached to it, the predicate will always be true. // Thus we make a fast path for it, to avoid unnecessary computations in this case. if len(pod.Spec.Volumes) == 0 { return true, nil, nil } node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } glog.V(2).Infof("Checking for prebound volumes with node affinity") namespace := pod.Namespace manifest := &(pod.Spec) for i := range manifest.Volumes { volume := &manifest.Volumes[i] if volume.PersistentVolumeClaim == nil { continue } pvcName := volume.PersistentVolumeClaim.ClaimName if pvcName == "" { return false, nil, fmt.Errorf("PersistentVolumeClaim had no name") } pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName) if err != nil { return false, nil, err } if pvc == nil { return false, nil, fmt.Errorf("PersistentVolumeClaim was not found: %q", pvcName) } pvName := pvc.Spec.VolumeName if pvName == "" { return false, nil, fmt.Errorf("PersistentVolumeClaim is not bound: %q", pvcName) } pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName) if err != nil { return false, nil, err } if pv == nil { return false, nil, fmt.Errorf("PersistentVolume not found: %q", pvName) } err = volumeutil.CheckNodeAffinity(pv, node.Labels) if err != nil { glog.V(2).Infof("Won't schedule pod %q onto node %q due to volume %q node mismatch: %v", pod.Name, node.Name, pvName, err.Error()) return false, []algorithm.PredicateFailureReason{ErrVolumeNodeConflict}, nil } glog.V(4).Infof("VolumeNode predicate allows node %q for pod %q due to volume %q", node.Name, pod.Name, pvName) } return true, nil, nil }
abdasgupta/kubernetes
plugin/pkg/scheduler/algorithm/predicates/predicates.go
GO
apache-2.0
53,588
package mil.nga.giat.geowave.cli.geoserver; import java.io.File; import java.util.ArrayList; import java.util.List; import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; import mil.nga.giat.geowave.core.cli.annotations.GeowaveOperation; import mil.nga.giat.geowave.core.cli.api.Command; import mil.nga.giat.geowave.core.cli.api.OperationParams; import mil.nga.giat.geowave.core.cli.operations.config.options.ConfigOptions; import com.beust.jcommander.Parameter; import com.beust.jcommander.ParameterException; import com.beust.jcommander.Parameters; @GeowaveOperation(name = "rmstyle", parentOperation = GeoServerSection.class) @Parameters(commandDescription = "Remove GeoServer Style") public class GeoServerRemoveStyleCommand implements Command { private GeoServerRestClient geoserverClient = null; @Parameter(description = "<style name>") private List<String> parameters = new ArrayList<String>(); private String styleName = null; @Override public boolean prepare( OperationParams params ) { if (geoserverClient == null) { // Get the local config for GeoServer File propFile = (File) params.getContext().get( ConfigOptions.PROPERTIES_FILE_CONTEXT); GeoServerConfig config = new GeoServerConfig( propFile); // Create the rest client geoserverClient = new GeoServerRestClient( config); } // Successfully prepared return true; } @Override public void execute( OperationParams params ) throws Exception { if (parameters.size() != 1) { throw new ParameterException( "Requires argument: <style name>"); } styleName = parameters.get(0); Response deleteStyleResponse = geoserverClient.deleteStyle(styleName); if (deleteStyleResponse.getStatus() == Status.OK.getStatusCode()) { System.out.println("Delete style '" + styleName + "' on GeoServer: OK"); } else { System.err.println("Error deleting style '" + styleName + "' on GeoServer; code = " + deleteStyleResponse.getStatus()); } } }
Becca42/geowave
extensions/cli/geoserver/src/main/java/mil/nga/giat/geowave/cli/geoserver/GeoServerRemoveStyleCommand.java
Java
apache-2.0
2,009
/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package androidx.appcompat.widget; import static android.view.View.MeasureSpec.AT_MOST; import static android.view.View.MeasureSpec.EXACTLY; import static android.view.View.MeasureSpec.getMode; import static androidx.annotation.RestrictTo.Scope.LIBRARY; import android.content.Context; import android.graphics.Rect; import android.util.AttributeSet; import android.util.DisplayMetrics; import android.util.TypedValue; import android.widget.FrameLayout; import androidx.annotation.NonNull; import androidx.annotation.Nullable; import androidx.annotation.RestrictTo; import androidx.core.view.ViewCompat; /** * @hide */ @RestrictTo(LIBRARY) public class ContentFrameLayout extends FrameLayout { public interface OnAttachListener { void onDetachedFromWindow(); void onAttachedFromWindow(); } private TypedValue mMinWidthMajor; private TypedValue mMinWidthMinor; private TypedValue mFixedWidthMajor; private TypedValue mFixedWidthMinor; private TypedValue mFixedHeightMajor; private TypedValue mFixedHeightMinor; private final Rect mDecorPadding; private OnAttachListener mAttachListener; public ContentFrameLayout(@NonNull Context context) { this(context, null); } public ContentFrameLayout(@NonNull Context context, @Nullable AttributeSet attrs) { this(context, attrs, 0); } public ContentFrameLayout( @NonNull Context context, @Nullable AttributeSet attrs, int defStyleAttr) { super(context, attrs, defStyleAttr); mDecorPadding = new Rect(); } /** * @hide */ @RestrictTo(LIBRARY) public void dispatchFitSystemWindows(Rect insets) { fitSystemWindows(insets); } public void setAttachListener(OnAttachListener attachListener) { mAttachListener = attachListener; } /** * Notify this view of the window decor view's padding. We use these values when working out * our size for the window size attributes. * * @hide */ @RestrictTo(LIBRARY) public void setDecorPadding(int left, int top, int right, int bottom) { mDecorPadding.set(left, top, right, bottom); if (ViewCompat.isLaidOut(this)) { requestLayout(); } } @Override protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) { final DisplayMetrics metrics = getContext().getResources().getDisplayMetrics(); final boolean isPortrait = metrics.widthPixels < metrics.heightPixels; final int widthMode = getMode(widthMeasureSpec); final int heightMode = getMode(heightMeasureSpec); boolean fixedWidth = false; if (widthMode == AT_MOST) { final TypedValue tvw = isPortrait ? mFixedWidthMinor : mFixedWidthMajor; if (tvw != null && tvw.type != TypedValue.TYPE_NULL) { int w = 0; if (tvw.type == TypedValue.TYPE_DIMENSION) { w = (int) tvw.getDimension(metrics); } else if (tvw.type == TypedValue.TYPE_FRACTION) { w = (int) tvw.getFraction(metrics.widthPixels, metrics.widthPixels); } if (w > 0) { w -= (mDecorPadding.left + mDecorPadding.right); final int widthSize = MeasureSpec.getSize(widthMeasureSpec); widthMeasureSpec = MeasureSpec.makeMeasureSpec( Math.min(w, widthSize), EXACTLY); fixedWidth = true; } } } if (heightMode == AT_MOST) { final TypedValue tvh = isPortrait ? mFixedHeightMajor : mFixedHeightMinor; if (tvh != null && tvh.type != TypedValue.TYPE_NULL) { int h = 0; if (tvh.type == TypedValue.TYPE_DIMENSION) { h = (int) tvh.getDimension(metrics); } else if (tvh.type == TypedValue.TYPE_FRACTION) { h = (int) tvh.getFraction(metrics.heightPixels, metrics.heightPixels); } if (h > 0) { h -= (mDecorPadding.top + mDecorPadding.bottom); final int heightSize = MeasureSpec.getSize(heightMeasureSpec); heightMeasureSpec = MeasureSpec.makeMeasureSpec( Math.min(h, heightSize), EXACTLY); } } } super.onMeasure(widthMeasureSpec, heightMeasureSpec); int width = getMeasuredWidth(); boolean measure = false; widthMeasureSpec = MeasureSpec.makeMeasureSpec(width, EXACTLY); if (!fixedWidth && widthMode == AT_MOST) { final TypedValue tv = isPortrait ? mMinWidthMinor : mMinWidthMajor; if (tv != null && tv.type != TypedValue.TYPE_NULL) { int min = 0; if (tv.type == TypedValue.TYPE_DIMENSION) { min = (int) tv.getDimension(metrics); } else if (tv.type == TypedValue.TYPE_FRACTION) { min = (int) tv.getFraction(metrics.widthPixels, metrics.widthPixels); } if (min > 0) { min -= (mDecorPadding.left + mDecorPadding.right); } if (width < min) { widthMeasureSpec = MeasureSpec.makeMeasureSpec(min, EXACTLY); measure = true; } } } if (measure) { super.onMeasure(widthMeasureSpec, heightMeasureSpec); } } public TypedValue getMinWidthMajor() { if (mMinWidthMajor == null) mMinWidthMajor = new TypedValue(); return mMinWidthMajor; } public TypedValue getMinWidthMinor() { if (mMinWidthMinor == null) mMinWidthMinor = new TypedValue(); return mMinWidthMinor; } public TypedValue getFixedWidthMajor() { if (mFixedWidthMajor == null) mFixedWidthMajor = new TypedValue(); return mFixedWidthMajor; } public TypedValue getFixedWidthMinor() { if (mFixedWidthMinor == null) mFixedWidthMinor = new TypedValue(); return mFixedWidthMinor; } public TypedValue getFixedHeightMajor() { if (mFixedHeightMajor == null) mFixedHeightMajor = new TypedValue(); return mFixedHeightMajor; } public TypedValue getFixedHeightMinor() { if (mFixedHeightMinor == null) mFixedHeightMinor = new TypedValue(); return mFixedHeightMinor; } @Override protected void onAttachedToWindow() { super.onAttachedToWindow(); if (mAttachListener != null) { mAttachListener.onAttachedFromWindow(); } } @Override protected void onDetachedFromWindow() { super.onDetachedFromWindow(); if (mAttachListener != null) { mAttachListener.onDetachedFromWindow(); } } }
AndroidX/androidx
appcompat/appcompat/src/main/java/androidx/appcompat/widget/ContentFrameLayout.java
Java
apache-2.0
7,578
/* Copyright (c) 2007 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ using System; using System.Text; using System.Xml; using Google.GData.Client; using Google.GData.Extensions; using Google.GData.Extensions.Apps; namespace Google.GData.Apps { /// <summary> /// Feed API customization class for defining user account feed. /// </summary> public class UserFeed : AbstractFeed { /// <summary> /// Constructor /// </summary> /// <param name="uriBase">The uri for the user account feed.</param> /// <param name="iService">The user account service.</param> public UserFeed(Uri uriBase, IService iService) : base(uriBase, iService) { GAppsExtensions.AddProvisioningExtensions(this); } /// <summary> /// Overridden. Returns a new <code>UserEntry</code>. /// </summary> /// <returns>the new <code>UserEntry</code></returns> public override AtomEntry CreateFeedEntry() { return new UserEntry(); } } }
michael-jia-sage/libgoogle
src/gapps/userfeed.cs
C#
apache-2.0
1,602
/* * Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.gemstone.gemfire.admin.internal; import com.gemstone.gemfire.admin.*; import com.gemstone.gemfire.distributed.internal.DM; import com.gemstone.gemfire.distributed.internal.DistributionManager; import com.gemstone.gemfire.internal.admin.GemFireVM; import com.gemstone.gemfire.internal.admin.remote.RemoteApplicationVM; import com.gemstone.gemfire.internal.i18n.LocalizedStrings; /** * Implements the administrative interface to a cache server. * * @author David Whitlock * @since 3.5 */ public class CacheServerImpl extends ManagedSystemMemberImpl implements CacheVm, CacheServer { private static final String GFXD_LAUNCHER_NAME = "gfxd"; private static final String GFE_LAUNCHER_NAME = "cacheserver"; private static final boolean isSqlFire = Boolean.getBoolean("isSqlFire"); /** How many new <code>CacheServer</code>s have been created? */ private static int newCacheServers = 0; /////////////////////// Instance Fields /////////////////////// /** The configuration object for this cache server */ private final CacheServerConfigImpl config; ///////////////////////// Constructors //////////////////////// /** * Creates a new <code>CacheServerImpl</code> that represents a * non-existsing (unstarted) cache server in a given distributed * system. */ public CacheServerImpl(AdminDistributedSystemImpl system, CacheVmConfig config) throws AdminException { super(system, config); this.config = (CacheServerConfigImpl) config; this.config.setManagedEntity(this); } /** * Creates a new <code>CacheServerImpl</code> that represents an * existing dedicated cache server in a given distributed system. */ public CacheServerImpl(AdminDistributedSystemImpl system, GemFireVM vm) throws AdminException { super(system, vm); this.config = new CacheServerConfigImpl(vm); } ////////////////////// Instance Methods ////////////////////// @Override public SystemMemberType getType() { return SystemMemberType.CACHE_VM; } public String getNewId() { synchronized (CacheServerImpl.class) { return "CacheVm" + (++newCacheServers); } } public void start() throws AdminException { if (!needToStart()) { return; } this.config.validate(); this.controller.start(this); this.config.setManagedEntity(this); } public void stop() { if (!needToStop()) { return; } this.controller.stop(this); // NOTE: DistributedSystem nodeLeft will then set this.manager to null this.config.setManagedEntity(null); } public boolean isRunning() { DM dm = ((AdminDistributedSystemImpl)getDistributedSystem()).getDistributionManager(); if(dm == null) { try { return this.controller.isRunning(this); } catch (IllegalStateException e) { return false; } } return ((DistributionManager)dm).getDistributionManagerIdsIncludingAdmin().contains(getDistributedMember()); } public CacheServerConfig getConfig() { return this.config; } public CacheVmConfig getVmConfig() { return this.config; } //////////////////////// Command execution //////////////////////// public ManagedEntityConfig getEntityConfig() { return this.getConfig(); } public String getEntityType() { // Fix bug 32564 return "Cache Vm"; } public String getStartCommand() { StringBuffer sb = new StringBuffer(); if (!isSqlFire) { sb.append(this.controller.getProductExecutable(this, GFE_LAUNCHER_NAME)); sb.append(" start -dir="); } else { sb.append(this.controller.getProductExecutable(this, GFXD_LAUNCHER_NAME)); sb.append(" server start -dir="); } sb.append(this.getConfig().getWorkingDirectory()); String file = this.getConfig().getCacheXMLFile(); if (file != null && file.length() > 0) { sb.append(" "); sb.append(com.gemstone.gemfire.distributed.internal.DistributionConfig.CACHE_XML_FILE_NAME); sb.append("="); sb.append(file); } String classpath = this.getConfig().getClassPath(); if (classpath != null && classpath.length() > 0) { sb.append(" -classpath="); sb.append(classpath); } appendConfiguration(sb); return sb.toString().trim(); } public String getStopCommand() { StringBuffer sb = new StringBuffer(); if (!isSqlFire) { sb.append(this.controller.getProductExecutable(this, GFE_LAUNCHER_NAME)); sb.append(" stop -dir="); } else { sb.append(this.controller.getProductExecutable(this, GFXD_LAUNCHER_NAME)); sb.append(" server stop -dir="); } sb.append(this.getConfig().getWorkingDirectory()); return sb.toString().trim(); } public String getIsRunningCommand() { StringBuffer sb = new StringBuffer(); if (!isSqlFire) { sb.append(this.controller.getProductExecutable(this, GFE_LAUNCHER_NAME)); sb.append(" status -dir="); } else { sb.append(this.controller.getProductExecutable(this, GFXD_LAUNCHER_NAME)); sb.append(" server status -dir="); } sb.append(this.getConfig().getWorkingDirectory()); return sb.toString().trim(); } /** * Find whether this server is primary for given client (durableClientId) * * @param durableClientId - * durable-id of the client * @return true if the server is primary for given client * * @since 5.6 */ public boolean isPrimaryForDurableClient(String durableClientId) { RemoteApplicationVM vm = (RemoteApplicationVM)this.getGemFireVM(); boolean isPrimary = false; if (vm != null) { isPrimary = vm.isPrimaryForDurableClient(durableClientId); } return isPrimary; } }
papicella/snappy-store
gemfire-core/src/main/java/com/gemstone/gemfire/admin/internal/CacheServerImpl.java
Java
apache-2.0
6,469
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.isis.viewer.restfulobjects.tck.domainobject.oid.collection; import org.jboss.resteasy.client.core.executors.URLConnectionClientExecutor; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.apache.isis.viewer.restfulobjects.applib.RestfulHttpMethod; import org.apache.isis.viewer.restfulobjects.applib.client.RestfulClient; import org.apache.isis.viewer.restfulobjects.applib.client.RestfulRequest; import org.apache.isis.viewer.restfulobjects.applib.client.RestfulResponse; import org.apache.isis.viewer.restfulobjects.applib.version.VersionRepresentation; import org.apache.isis.viewer.restfulobjects.tck.IsisWebServerRule; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertThat; public class Get_whenDoesntExistOid_then_404 { @Rule public IsisWebServerRule webServerRule = new IsisWebServerRule(); private RestfulClient client; @Before public void setUp() throws Exception { client = webServerRule.getClient(new URLConnectionClientExecutor()); } @Test public void returns404() throws Exception { givenWhenThen("73", RestfulResponse.HttpStatusCode.OK); givenWhenThen("nonExistentOid", RestfulResponse.HttpStatusCode.NOT_FOUND); } private void givenWhenThen(String oid, RestfulResponse.HttpStatusCode statusCode1) { // given RestfulRequest request = client.createRequest(RestfulHttpMethod.GET, "objects/BSRL/" + oid + "/collections/visibleAndEditableCollection"); // when RestfulResponse<VersionRepresentation> restfulResponse = request.executeT(); assertThat(restfulResponse.getStatus(), is(statusCode1)); } }
peridotperiod/isis
tck/tck-viewer-restfulobjects/src/test/java/org/apache/isis/viewer/restfulobjects/tck/domainobject/oid/collection/Get_whenDoesntExistOid_then_404.java
Java
apache-2.0
2,570
# Copyright (C) Mesosphere, Inc. See LICENSE file for details. import copy import logging import os import time import pytest import requests from generic_test_code.common import ( generic_correct_upstream_dest_test, generic_correct_upstream_request_test, generic_upstream_headers_verify_test, generic_verify_response_test, overridden_file_content, verify_header, ) from util import GuardedSubprocess, LineBufferFilter, SearchCriteria log = logging.getLogger(__name__) class TestServiceEndpoint: # Majority of /service endpoint tests are done with generic tests framework def test_if_accept_encoding_header_is_removed_from_upstream_request( self, master_ar_process_perclass, mocker, valid_user_header): headers = copy.deepcopy(valid_user_header) headers['Accept-Encoding'] = 'gzip' generic_upstream_headers_verify_test(master_ar_process_perclass, headers, '/service/scheduler-alwaysthere/foo/bar/', assert_headers_absent=["Accept-Encoding"], ) class TestAgentEndpoint: # Tests for /agent endpoint routing are done in test_cache.py def test_if_accept_encoding_header_is_removed_from_upstream_request( self, master_ar_process_perclass, mocker, valid_user_header): headers = copy.deepcopy(valid_user_header) headers['Accept-Encoding'] = 'gzip' generic_upstream_headers_verify_test(master_ar_process_perclass, headers, '/agent/de1baf83-c36c-4d23-9cb0-f89f596cd6ab-S1/', assert_headers_absent=["Accept-Encoding"], ) class TestSystemAgentEndpoint: # Tests for /agent endpoint routing are done in test_cache.py def test_if_accept_encoding_header_is_removed_from_upstream_request( self, master_ar_process_perclass, mocker, valid_user_header): headers = copy.deepcopy(valid_user_header) headers['Accept-Encoding'] = 'gzip' generic_upstream_headers_verify_test( master_ar_process_perclass, headers, '/system/v1/agent/de1baf83-c36c-4d23-9cb0-f89f596cd6ab-S0/logs', assert_headers_absent=["Accept-Encoding"], ) class TestHistoryServiceRouting: def test_if_invalid_cache_case_is_handled( self, nginx_class, valid_user_header, dns_server_mock): ar = nginx_class() url = ar.make_url_from_path('/dcos-history-service/foo/bar') with GuardedSubprocess(ar): # Unfortunatelly there are upstreams that use `leader.mesos` and # removing this entry too early will result in Nginx failing to start. # So we need to do it right after nginx starts, but before first # cache update. time.sleep(1) dns_server_mock.remove_dns_entry('leader.mesos.') resp = requests.get(url, allow_redirects=False, headers=valid_user_header) assert resp.status_code == 503 assert 'cache is invalid' in resp.text def test_if_leader_is_unknown_state_is_handled( self, nginx_class, valid_user_header): ar = nginx_class(host_ip=None) url = ar.make_url_from_path('/dcos-history-service/foo/bar') with GuardedSubprocess(ar): resp = requests.get(url, allow_redirects=False, headers=valid_user_header) assert resp.status_code == 503 assert 'mesos leader is unknown' in resp.text def test_if_leader_is_local_state_is_handled( self, nginx_class, valid_user_header): ar = nginx_class() path_sent = '/dcos-history-service/foo/bar?a1=GET+param&a2=foobarism' path_expected = '/foo/bar?a1=GET+param&a2=foobarism' with GuardedSubprocess(ar): generic_correct_upstream_dest_test( ar, valid_user_header, path_sent, "http://127.0.0.1:15055") generic_correct_upstream_request_test( ar, valid_user_header, path_sent, path_expected) generic_upstream_headers_verify_test( ar, valid_user_header, path_sent) def test_if_leader_is_nonlocal_state_is_handled( self, nginx_class, valid_user_header, dns_server_mock): ar = nginx_class() path_sent = '/dcos-history-service/foo/bar?a1=GET+param&a2=foobarism' path_expected = '/dcos-history-service/foo/bar?a1=GET+param&a2=foobarism' dns_server_mock.set_dns_entry('leader.mesos.', ip='127.0.0.3') with GuardedSubprocess(ar): generic_correct_upstream_dest_test( ar, valid_user_header, path_sent, "http://127.0.0.3:80") generic_correct_upstream_request_test( ar, valid_user_header, path_sent, path_expected) generic_upstream_headers_verify_test( ar, valid_user_header, path_sent, assert_headers={"DCOS-Forwarded": "true"}) def test_if_proxy_loop_is_handled( self, nginx_class, valid_user_header, dns_server_mock): ar = nginx_class() url = ar.make_url_from_path('/dcos-history-service/foo/bar') dns_server_mock.set_dns_entry('leader.mesos.', ip='127.0.0.3') h = valid_user_header h.update({"DCOS-Forwarded": "true"}) with GuardedSubprocess(ar): resp = requests.get(url, allow_redirects=False, headers=h) assert resp.status_code == 503 assert 'mesos leader is unknown' in resp.text class TestMetadata: @pytest.mark.parametrize("public_ip", ['1.2.3.4', "10.20.20.30"]) def test_if_public_ip_detection_works( self, master_ar_process_perclass, valid_user_header, public_ip): url = master_ar_process_perclass.make_url_from_path('/metadata') with overridden_file_content( '/usr/local/detect_ip_public_data.txt', "return ip {}".format(public_ip)): resp = requests.get( url, allow_redirects=False, headers=valid_user_header) assert resp.status_code == 200 resp_data = resp.json() assert resp_data['PUBLIC_IPV4'] == public_ip def test_if_clusterid_is_returned( self, master_ar_process_perclass, valid_user_header): url = master_ar_process_perclass.make_url_from_path('/metadata') resp = requests.get( url, allow_redirects=False, headers=valid_user_header) assert resp.status_code == 200 resp_data = resp.json() assert resp_data['CLUSTER_ID'] == 'fdb1d7c0-06cf-4d65-bb9b-a8920bb854ef' with overridden_file_content( '/var/lib/dcos/cluster-id', "fd21689b-4fe2-4779-8c30-9125149eef11"): resp = requests.get( url, allow_redirects=False, headers=valid_user_header) assert resp.status_code == 200 resp_data = resp.json() assert resp_data['CLUSTER_ID'] == "fd21689b-4fe2-4779-8c30-9125149eef11" def test_if_missing_clusterid_file_is_handled( self, master_ar_process_perclass, valid_user_header): url = master_ar_process_perclass.make_url_from_path('/metadata') with overridden_file_content('/var/lib/dcos/cluster-id'): os.unlink('/var/lib/dcos/cluster-id') resp = requests.get( url, allow_redirects=False, headers=valid_user_header) assert resp.status_code == 200 resp_data = resp.json() assert 'CLUSTER_ID' not in resp_data def test_if_public_ip_detect_script_failue_is_handled( self, master_ar_process_perclass, valid_user_header): url = master_ar_process_perclass.make_url_from_path('/metadata') filter_regexp = { 'Traceback \(most recent call last\):': SearchCriteria(1, True), ("FileNotFoundError: \[Errno 2\] No such file or directory:" " '/usr/local/detect_ip_public_data.txt'"): SearchCriteria(1, True), } lbf = LineBufferFilter(filter_regexp, line_buffer=master_ar_process_perclass.stderr_line_buffer) with lbf, overridden_file_content('/usr/local/detect_ip_public_data.txt'): os.unlink('/usr/local/detect_ip_public_data.txt') resp = requests.get( url, allow_redirects=False, headers=valid_user_header) assert resp.status_code == 200 assert lbf.extra_matches == {} resp_data = resp.json() assert resp_data['PUBLIC_IPV4'] == "127.0.0.1" @pytest.mark.xfail(reason="Needs some refactoring, tracked in DCOS_OSS-1007") def test_if_public_ip_detect_script_execution_is_timed_out( self, master_ar_process_perclass, valid_user_header): url = master_ar_process_perclass.make_url_from_path('/metadata') ts_start = time.time() with overridden_file_content('/usr/local/detect_ip_public_data.txt', "timeout 10"): requests.get( url, allow_redirects=False, headers=valid_user_header) ts_total = time.time() - ts_start assert ts_total < 10 # TODO (prozlach): tune it a bit # assert resp.status_code == 200 # resp_data = resp.json() # assert resp_data['PUBLIC_IPV4'] == "127.0.0.1" @pytest.mark.xfail(reason="Needs some refactoring, tracked in DCOS_OSS-1007") def test_if_public_ip_detect_script_nonzero_exit_status_is_handled( self, master_ar_process_perclass, valid_user_header): url = master_ar_process_perclass.make_url_from_path('/metadata') with overridden_file_content( '/usr/local/detect_ip_public_data.txt', "break with 1"): resp = requests.get( url, allow_redirects=False, headers=valid_user_header) assert resp.status_code == 200 resp_data = resp.json() assert resp_data['PUBLIC_IPV4'] == "127.0.0.1" class TestUiRoot: @pytest.mark.parametrize("uniq_content", ["(。◕‿‿◕。)", "plain text 1234"]) @pytest.mark.parametrize("path", ["plain-ui-testfile.html", "nest1/nested-ui-testfile.html"]) def test_if_ui_files_are_handled( self, master_ar_process_perclass, valid_user_header, uniq_content, path): url = master_ar_process_perclass.make_url_from_path('/{}'.format(path)) with overridden_file_content( '/opt/mesosphere/active/dcos-ui/usr/{}'.format(path), uniq_content): resp = requests.get( url, allow_redirects=False, headers=valid_user_header) assert resp.status_code == 200 resp.encoding = 'utf-8' assert resp.text == uniq_content verify_header(resp.headers.items(), 'X-Frame-Options', 'DENY') class TestMisc: @pytest.mark.parametrize("content", ["{'data': '1234'}", "{'data': 'abcd'}"]) def test_if_buildinfo_is_served( self, master_ar_process_perclass, valid_user_header, content): url = master_ar_process_perclass.make_url_from_path( '/pkgpanda/active.buildinfo.full.json') with overridden_file_content( '/opt/mesosphere/active.buildinfo.full.json', content): resp = requests.get( url, allow_redirects=False, headers=valid_user_header ) assert resp.status_code == 200 assert resp.text == content @pytest.mark.parametrize("content", ["{'data': '1234'}", "{'data': 'abcd'}"]) def test_if_dcos_metadata_is_served( self, master_ar_process_perclass, valid_user_header, content): url = master_ar_process_perclass.make_url_from_path( '/dcos-metadata/dcos-version.json') with overridden_file_content( '/opt/mesosphere/active/dcos-metadata/etc/dcos-version.json', content): resp = requests.get( url, allow_redirects=False, headers=valid_user_header ) assert resp.status_code == 200 assert resp.text == content def test_if_xaccel_header_is_passed_to_client_by_ar( self, master_ar_process_perclass, valid_user_header, mocker): accel_buff_header = {"X-Accel-Buffering": "TEST"} mocker.send_command( endpoint_id='http:///run/dcos/dcos-log.sock', func_name='set_response_headers', aux_data=accel_buff_header, ) generic_verify_response_test( master_ar_process_perclass, valid_user_header, '/system/v1/logs/foo/bar', assert_headers=accel_buff_header)
surdy/dcos
packages/adminrouter/extra/src/test-harness/tests/test_master.py
Python
apache-2.0
13,807
# encoding: UTF-8 require 'simplecov' require 'coveralls' SimpleCov.formatter = if ENV['CI'] Coveralls::SimpleCov::Formatter else SimpleCov::Formatter::HTMLFormatter end SimpleCov.start
sbagdadi-gpsw/custom-prometheus-client
spec/spec_helper.rb
Ruby
apache-2.0
203
<?php require_once __DIR__ . "/http_server.php"; /* class swoole_http_server extends swoole_server { public function on($name, $cb) {} // 与 tcp server 的on接受的eventname 不同 } class swoole_http_response { public function cookie() {} public function rawcookie() {} public function status() {} public function gzip() {} public function header() {} public function write() {} public function end() {} public function sendfile() {} } class swoole_http_request { public function rawcontent() {} } */ $host = isset($argv[1]) ? $argv[1] : HTTP_SERVER_HOST; $port = isset($argv[2]) ? $argv[2] : HTTP_SERVER_PORT; (new HttpServer($host, $port, true))->start();
youzan/zan
php-test/apitest/swoole_http_server/simple_https_server.php
PHP
apache-2.0
704
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include <aws/rds/model/CreateDBParameterGroupRequest.h> #include <aws/core/utils/StringUtils.h> #include <aws/core/utils/memory/stl/AWSStringStream.h> using namespace Aws::RDS::Model; using namespace Aws::Utils; CreateDBParameterGroupRequest::CreateDBParameterGroupRequest() : m_dBParameterGroupNameHasBeenSet(false), m_dBParameterGroupFamilyHasBeenSet(false), m_descriptionHasBeenSet(false), m_tagsHasBeenSet(false) { } Aws::String CreateDBParameterGroupRequest::SerializePayload() const { Aws::StringStream ss; ss << "Action=CreateDBParameterGroup&"; if(m_dBParameterGroupNameHasBeenSet) { ss << "DBParameterGroupName=" << StringUtils::URLEncode(m_dBParameterGroupName.c_str()) << "&"; } if(m_dBParameterGroupFamilyHasBeenSet) { ss << "DBParameterGroupFamily=" << StringUtils::URLEncode(m_dBParameterGroupFamily.c_str()) << "&"; } if(m_descriptionHasBeenSet) { ss << "Description=" << StringUtils::URLEncode(m_description.c_str()) << "&"; } if(m_tagsHasBeenSet) { unsigned tagsCount = 1; for(auto& item : m_tags) { item.OutputToStream(ss, "Tags.member.", tagsCount, ""); tagsCount++; } } ss << "Version=2014-10-31"; return ss.str(); } void CreateDBParameterGroupRequest::DumpBodyToUrl(Aws::Http::URI& uri ) const { uri.SetQueryString(SerializePayload()); }
jt70471/aws-sdk-cpp
aws-cpp-sdk-rds/source/model/CreateDBParameterGroupRequest.cpp
C++
apache-2.0
1,483
var __extends = this.__extends || function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; function __() { this.constructor = d; } __.prototype = b.prototype; d.prototype = new __(); }; var S18 = (function (_super) { __extends(S18, _super); function S18() { _super.apply(this, arguments); } return S18; })(S18); (new S18()).blah;
hippich/typescript
tests/baselines/reference/recursiveBaseCheck6.js
JavaScript
apache-2.0
400
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.commons.collections4.iterators; import java.util.Iterator; import java.util.NoSuchElementException; import org.apache.commons.collections4.Predicate; /** * Decorates another {@link Iterator} using a predicate to filter elements. * <p> * This iterator decorates the underlying iterator, only allowing through * those elements that match the specified {@link Predicate Predicate}. * * @since 1.0 * @version $Id: FilterIterator.java 1477802 2013-04-30 20:01:28Z tn $ */ public class FilterIterator<E> implements Iterator<E> { /** The iterator being used */ private Iterator<? extends E> iterator; /** The predicate being used */ private Predicate<? super E> predicate; /** The next object in the iteration */ private E nextObject; /** Whether the next object has been calculated yet */ private boolean nextObjectSet = false; //----------------------------------------------------------------------- /** * Constructs a new <code>FilterIterator</code> that will not function * until {@link #setIterator(Iterator) setIterator} is invoked. */ public FilterIterator() { super(); } /** * Constructs a new <code>FilterIterator</code> that will not function * until {@link #setPredicate(Predicate) setPredicate} is invoked. * * @param iterator the iterator to use */ public FilterIterator(final Iterator<? extends E> iterator) { super(); this.iterator = iterator; } /** * Constructs a new <code>FilterIterator</code> that will use the * given iterator and predicate. * * @param iterator the iterator to use * @param predicate the predicate to use */ public FilterIterator(final Iterator<? extends E> iterator, final Predicate<? super E> predicate) { super(); this.iterator = iterator; this.predicate = predicate; } //----------------------------------------------------------------------- /** * Returns true if the underlying iterator contains an object that * matches the predicate. * * @return true if there is another object that matches the predicate * @throws NullPointerException if either the iterator or predicate are null */ public boolean hasNext() { return nextObjectSet || setNextObject(); } /** * Returns the next object that matches the predicate. * * @return the next object which matches the given predicate * @throws NullPointerException if either the iterator or predicate are null * @throws NoSuchElementException if there are no more elements that * match the predicate */ public E next() { if (!nextObjectSet) { if (!setNextObject()) { throw new NoSuchElementException(); } } nextObjectSet = false; return nextObject; } /** * Removes from the underlying collection of the base iterator the last * element returned by this iterator. * This method can only be called * if <code>next()</code> was called, but not after * <code>hasNext()</code>, because the <code>hasNext()</code> call * changes the base iterator. * * @throws IllegalStateException if <code>hasNext()</code> has already * been called. */ public void remove() { if (nextObjectSet) { throw new IllegalStateException("remove() cannot be called"); } iterator.remove(); } //----------------------------------------------------------------------- /** * Gets the iterator this iterator is using. * * @return the iterator */ public Iterator<? extends E> getIterator() { return iterator; } /** * Sets the iterator for this iterator to use. * If iteration has started, this effectively resets the iterator. * * @param iterator the iterator to use */ public void setIterator(final Iterator<? extends E> iterator) { this.iterator = iterator; nextObject = null; nextObjectSet = false; } //----------------------------------------------------------------------- /** * Gets the predicate this iterator is using. * * @return the predicate */ public Predicate<? super E> getPredicate() { return predicate; } /** * Sets the predicate this the iterator to use. * * @param predicate the predicate to use */ public void setPredicate(final Predicate<? super E> predicate) { this.predicate = predicate; nextObject = null; nextObjectSet = false; } //----------------------------------------------------------------------- /** * Set nextObject to the next object. If there are no more * objects then return false. Otherwise, return true. */ private boolean setNextObject() { while (iterator.hasNext()) { final E object = iterator.next(); if (predicate.evaluate(object)) { nextObject = object; nextObjectSet = true; return true; } } return false; } }
krivachy/compgs03_mutation_testing
src/main/java/org/apache/commons/collections4/iterators/FilterIterator.java
Java
apache-2.0
6,056
/* * Copyright 2000-2013 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.openapi.externalSystem.service.settings; import com.intellij.openapi.externalSystem.ExternalSystemManager; import com.intellij.openapi.externalSystem.model.ProjectSystemId; import com.intellij.openapi.externalSystem.settings.AbstractExternalSystemSettings; import com.intellij.openapi.externalSystem.settings.ExternalProjectSettings; import com.intellij.openapi.externalSystem.settings.ExternalSystemSettingsListener; import com.intellij.openapi.externalSystem.util.*; import com.intellij.openapi.fileChooser.FileChooserDescriptor; import com.intellij.openapi.options.ConfigurationException; import com.intellij.openapi.project.Project; import com.intellij.openapi.ui.TextComponentAccessor; import com.intellij.openapi.ui.TextFieldWithBrowseButton; import com.intellij.openapi.util.text.StringUtil; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import javax.swing.*; import javax.swing.event.DocumentEvent; import javax.swing.event.DocumentListener; import java.awt.*; /** * A control which knows how to manage settings of external project being imported. * * @author Denis Zhdanov * @since 4/30/13 2:33 PM */ public abstract class AbstractImportFromExternalSystemControl< ProjectSettings extends ExternalProjectSettings, L extends ExternalSystemSettingsListener<ProjectSettings>, SystemSettings extends AbstractExternalSystemSettings<SystemSettings, ProjectSettings, L>> { @NotNull private final SystemSettings mySystemSettings; @NotNull private final ProjectSettings myProjectSettings; @NotNull private final PaintAwarePanel myComponent = new PaintAwarePanel(new GridBagLayout()); @NotNull private final TextFieldWithBrowseButton myLinkedProjectPathField = new TextFieldWithBrowseButton(); @NotNull private final ExternalSystemSettingsControl<ProjectSettings> myProjectSettingsControl; @NotNull private final ProjectSystemId myExternalSystemId; @Nullable private final ExternalSystemSettingsControl<SystemSettings> mySystemSettingsControl; @Nullable Project myCurrentProject; @SuppressWarnings("AbstractMethodCallInConstructor") protected AbstractImportFromExternalSystemControl(@NotNull ProjectSystemId externalSystemId, @NotNull SystemSettings systemSettings, @NotNull ProjectSettings projectSettings) { myExternalSystemId = externalSystemId; mySystemSettings = systemSettings; myProjectSettings = projectSettings; myProjectSettingsControl = createProjectSettingsControl(projectSettings); mySystemSettingsControl = createSystemSettingsControl(systemSettings); JLabel linkedProjectPathLabel = new JLabel(ExternalSystemBundle.message("settings.label.select.project", externalSystemId.getReadableName())); ExternalSystemManager<?, ?, ?, ?, ?> manager = ExternalSystemApiUtil.getManager(externalSystemId); assert manager != null; FileChooserDescriptor fileChooserDescriptor = manager.getExternalProjectDescriptor(); myLinkedProjectPathField.addBrowseFolderListener("", ExternalSystemBundle .message("settings.label.select.project", externalSystemId.getReadableName()), null, fileChooserDescriptor, TextComponentAccessor.TEXT_FIELD_WHOLE_TEXT, false); myLinkedProjectPathField.getTextField().getDocument().addDocumentListener(new DocumentListener() { @Override public void insertUpdate(DocumentEvent e) { onLinkedProjectPathChange(myLinkedProjectPathField.getText()); } @Override public void removeUpdate(DocumentEvent e) { onLinkedProjectPathChange(myLinkedProjectPathField.getText()); } @Override public void changedUpdate(DocumentEvent e) { onLinkedProjectPathChange(myLinkedProjectPathField.getText()); } }); myComponent.add(linkedProjectPathLabel, ExternalSystemUiUtil.getLabelConstraints(0)); myComponent.add(myLinkedProjectPathField, ExternalSystemUiUtil.getFillLineConstraints(0)); myProjectSettingsControl.fillUi(myComponent, 0); if (mySystemSettingsControl != null) { mySystemSettingsControl.fillUi(myComponent, 0); } ExternalSystemUiUtil.fillBottom(myComponent); } /** * This control is assumed to be used at least at two circumstances: * <pre> * <ul> * <li>new ide project is being created on the external project basis;</li> * <li>new ide module(s) is being added to the existing ide project on the external project basis;</li> * </ul> * </pre> * We need to differentiate these situations, for example, we don't want to allow linking an external project to existing ide * project if it's already linked. * <p/> * This property helps us to achieve that - when an ide project is defined, that means that new modules are being imported * to that ide project from external project; when this property is <code>null</code> that means that new ide project is being * created on the target external project basis. * * @param currentProject current ide project (if any) */ public void setCurrentProject(@Nullable Project currentProject) { myCurrentProject = currentProject; } protected abstract void onLinkedProjectPathChange(@NotNull String path); /** * Creates a control for managing given project settings. * * @param settings target external project settings * @return control for managing given project settings */ @NotNull protected abstract ExternalSystemSettingsControl<ProjectSettings> createProjectSettingsControl(@NotNull ProjectSettings settings); /** * Creates a control for managing given system-level settings (if any). * * @param settings target system settings * @return a control for managing given system-level settings; * <code>null</code> if current external system doesn't have system-level settings (only project-level settings) */ @Nullable protected abstract ExternalSystemSettingsControl<SystemSettings> createSystemSettingsControl(@NotNull SystemSettings settings); @NotNull public JComponent getComponent() { return myComponent; } @NotNull public ExternalSystemSettingsControl<ProjectSettings> getProjectSettingsControl() { return myProjectSettingsControl; } public void setLinkedProjectPath(@NotNull String path) { myProjectSettings.setExternalProjectPath(path); myLinkedProjectPathField.setText(path); } @NotNull public SystemSettings getSystemSettings() { return mySystemSettings; } @NotNull public ProjectSettings getProjectSettings() { return myProjectSettings; } public void reset() { myLinkedProjectPathField.setText(""); myProjectSettingsControl.reset(); if (mySystemSettingsControl != null) { mySystemSettingsControl.reset(); } } public void apply() throws ConfigurationException { String linkedProjectPath = myLinkedProjectPathField.getText(); if (StringUtil.isEmpty(linkedProjectPath)) { throw new ConfigurationException(ExternalSystemBundle.message("error.project.undefined")); } else if (myCurrentProject != null) { ExternalSystemManager<?, ?, ?, ?, ?> manager = ExternalSystemApiUtil.getManager(myExternalSystemId); assert manager != null; AbstractExternalSystemSettings<?, ?,?> settings = manager.getSettingsProvider().fun(myCurrentProject); if (settings.getLinkedProjectSettings(linkedProjectPath) != null) { throw new ConfigurationException(ExternalSystemBundle.message("error.project.already.registered")); } } //noinspection ConstantConditions myProjectSettings.setExternalProjectPath(ExternalSystemApiUtil.normalizePath(linkedProjectPath)); myProjectSettingsControl.validate(myProjectSettings); myProjectSettingsControl.apply(myProjectSettings); if (mySystemSettingsControl != null) { mySystemSettingsControl.validate(mySystemSettings); mySystemSettingsControl.apply(mySystemSettings); } } }
romankagan/DDBWorkbench
platform/external-system-impl/src/com/intellij/openapi/externalSystem/service/settings/AbstractImportFromExternalSystemControl.java
Java
apache-2.0
9,041
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2005-2019, NumPy Developers. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * * Neither the name of the NumPy Developers nor the names of any * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*! * \file np_einsum_op.cc * \brief CPU Implementation of numpy-compatible einsum */ #include "./np_einsum_op-inl.h" #include <cstdlib> #include <cstring> namespace mxnet { namespace op { inline std::vector<std::string> _parse_einsum_input(std::string subscripts, const mxnet::ShapeVector& shapes) { const std::string einsum_symbols = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; std::bitset<MAXAXIS> einsum_symbols_set; for (const char& c : einsum_symbols) { einsum_symbols_set.set(c); } CHECK_NE(shapes.size(), 0U) << "No input operands"; auto end_pos = std::remove(subscripts.begin(), subscripts.end(), ' '); subscripts.erase(end_pos, subscripts.end()); // Ensure all characters are valid for (const char& c : subscripts) { if (c == '.' || c == ',' || c == '-' || c == '>') { continue; } CHECK(einsum_symbols_set.test(c)) << "Character " << c << " is not a valid symbol."; } // Check for proper "->" if (subscripts.find('-') != std::string::npos || subscripts.find('>') != std::string::npos) { bool invalid = (std::count(subscripts.begin(), subscripts.end(), '-') > 1 || std::count(subscripts.begin(), subscripts.end(), '>') > 1); CHECK(!invalid && _count_substring(subscripts, "->") == 1) << "Subscripts can only contain one '->'."; } // Parse ellipses if (subscripts.find('.') != std::string::npos) { std::string used = subscripts; used.erase(std::remove_if(used.begin(), used.end(), [](const char& c){return c == '.' || c == ',' || c == '-' || c == '>';}), used.end()); std::bitset<MAXAXIS> used_set = str2set(used); std::string ellipse_inds = ""; for (const char& c : einsum_symbols) { if (!used_set.test(static_cast<int>(c))) { ellipse_inds.append(1, c); } } int longest = 0; std::string input_tmp, output_sub; std::vector<std::string> split_subscripts; bool out_sub; if (subscripts.find("->") != std::string::npos) { std::vector<std::string> tmp = split(subscripts, "->"); input_tmp = tmp[0]; output_sub = tmp[1]; split_subscripts = split(input_tmp, ","); out_sub = true; } else { split_subscripts = split(subscripts, ","); out_sub = false; } size_t size_split_subscripts = split_subscripts.size(); subscripts = ""; for (size_t i = 0; i < size_split_subscripts; ++i) { const std::string& sub = split_subscripts[i]; if (sub.find('.') != std::string::npos) { CHECK_EQ(std::count(sub.begin(), sub.end(), '.'), 3) << "Invalid Ellipses"; CHECK_EQ(_count_substring(sub, "..."), 1) << "Invalid Ellipses"; // Take into account numerical values int ellipse_count = 0; if (shapes[i].ndim() == 0) { ellipse_count = 0; } else { ellipse_count = std::max(shapes[i].ndim(), 1); ellipse_count -= sub.length() - 3; } if (ellipse_count > longest) { longest = ellipse_count; } CHECK_GE(ellipse_count, 0) << "Ellipses lengths do not match."; if (ellipse_count == 0) { split_subscripts[i].erase(sub.find("..."), 3); } else { std::string rep_inds = ellipse_inds.substr(ellipse_inds.length() - ellipse_count); split_subscripts[i].replace(sub.find("..."), 3, rep_inds); } } subscripts += split_subscripts[i]; if (i + 1 < size_split_subscripts) { subscripts += ","; } } std::string out_ellipse; if (longest == 0) { out_ellipse = ""; } else { out_ellipse = ellipse_inds.substr(ellipse_inds.length() - longest); } if (out_sub) { output_sub.replace(output_sub.find("..."), 3, out_ellipse); subscripts += "->" + output_sub; } else { // Special care for outputless ellipses std::bitset<MAXAXIS> out_ellipse_set = str2set(out_ellipse); std::string tmp_subscripts = subscripts, output_subscript = ""; size_t len_tmp_subscripts = tmp_subscripts.length(); std::sort(tmp_subscripts.begin(), tmp_subscripts.end()); for (size_t i = 0; i < len_tmp_subscripts; ++i) { const char& c = tmp_subscripts[i]; if (c == ',') { continue; } CHECK(einsum_symbols_set.test(c)) << "Character " << c << " is not a valid symbol."; if ((i == 0 || tmp_subscripts[i - 1] != c) && (i == len_tmp_subscripts - 1 || tmp_subscripts[i + 1] != c) && !out_ellipse_set.test(c)) { output_subscript.append(1, c); } } subscripts += "->" + out_ellipse + output_subscript; } } // Build output string if does not exist std::vector<std::string> ret(2); if (subscripts.find("->") != std::string::npos) { ret = split(subscripts, "->"); } else { ret[0] = subscripts; ret[1] = ""; // Build output subscripts std::string tmp_subscripts = subscripts; size_t len_tmp_subscripts = tmp_subscripts.length(); std::sort(tmp_subscripts.begin(), tmp_subscripts.end()); for (size_t i = 0; i < len_tmp_subscripts; ++i) { const char& c = tmp_subscripts[i]; if (c == ',') { continue; } CHECK(einsum_symbols_set.test(c)) << "Character " << c << " is not a valid symbol."; if ((i == 0 || tmp_subscripts[i - 1] != c) && (i == len_tmp_subscripts - 1 || tmp_subscripts[i + 1] != c)) { ret[1].append(1, c); } } } // Make sure output subscripts are in the input std::bitset<MAXAXIS> input_subscripts_set = str2set(ret[0]); for (const char& c : ret[1]) { CHECK(input_subscripts_set.test(c)) << "Output character " << c << " did not appear in the input"; } // Make sure number operands is equivalent to the number of terms CHECK_EQ(std::count(ret[0].begin(), ret[0].end(), ',') + 1, shapes.size()) << "Number of einsum subscripts must be equal to the " << "number of operands."; return ret; } bool NumpyEinsumShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const NumpyEinsumParam &param = nnvm::get<NumpyEinsumParam>(attrs.parsed); const std::string& subscripts = param.subscripts; int num_args = param.num_args; CHECK_EQ(in_attrs->size(), num_args); CHECK_EQ(out_attrs->size(), 1U); for (int i = 0; i < num_args; i++) { if (!shape_is_known(in_attrs->at(i))) { return false; } } // Parsing std::vector<std::string> parsed_subscripts = _parse_einsum_input(subscripts, *in_attrs); // Build a few useful list and sets std::vector<std::string> input_list = split(parsed_subscripts[0], ","); size_t isize = input_list.size(); // Get length of each unique dimension and ensure all dimensions are correct dim_t dimension_dict[MAXAXIS]; memset(dimension_dict, -1, sizeof(dimension_dict)); for (size_t i = 0; i < isize; ++i) { const std::string& term = input_list[i]; const TShape& sh = in_attrs->at(i); CHECK_EQ(sh.ndim(), term.length()) << "Einstein sum subscript " << input_list[i] << " does not contain the " << "correct number of indices for operand " << i << "."; size_t len_term = term.length(); for (size_t j = 0; j < len_term; ++j) { dim_t dim = sh[j]; const char& c = term[j]; if (dimension_dict[static_cast<int>(c)] != -1) { // For broadcasting cases we always want the largest dim size if (dimension_dict[static_cast<int>(c)] == 1) { dimension_dict[static_cast<int>(c)] = dim; } CHECK(dim == 1 || dim == dimension_dict[static_cast<int>(c)]) << "Size of label '" << c << "' for operand " << i << " (" << dimension_dict[static_cast<int>(c)] << ") does not match previous terms (" << dim << ")."; } else { dimension_dict[static_cast<int>(c)] = dim; } } } // Get oshape const std::string& output_str = parsed_subscripts[1]; size_t odim = output_str.size(); TShape oshape(odim, -1); for (size_t i = 0; i < odim; ++i) { oshape[i] = dimension_dict[static_cast<int>(output_str[i])]; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape); size_t lim = static_cast<size_t>(std::numeric_limits<index_t>::max()); for (int i = 0; i < num_args; ++i) { CHECK_LE(in_attrs->at(i).Size(), lim) << "Size of operand " << i << " exceeds the maximum index." << " Try setting `USE_INT64_TENSOR_SIZE`."; } CHECK_LE(oshape.Size(), lim) << "Size of output" << " exceeds the maximum index." << " Try setting `USE_INT64_TENSOR_SIZE`."; return shape_is_known(oshape); } OpStatePtr CreateEinsumState(const NodeAttrs& attrs, Context ctx, const mxnet::ShapeVector& in_shapes, const std::vector<int>& in_types) { const NumpyEinsumParam& param = dmlc::get<NumpyEinsumParam>(attrs.parsed); return OpStatePtr::Create<EinsumOp>(param.num_args, param.optimize, param.subscripts); } DMLC_REGISTER_PARAMETER(NumpyEinsumParam); NNVM_REGISTER_OP(_npi_einsum) .describe(R"doc()doc" ADD_FILELINE) .set_attr_parser(ParamParser<NumpyEinsumParam>) .set_num_inputs([](const nnvm::NodeAttrs& attrs) { const NumpyEinsumParam& param = dmlc::get<NumpyEinsumParam>(attrs.parsed); return static_cast<uint32_t>(param.num_args); }) .set_num_outputs(1) .set_attr<std::string>("key_var_num_args", "num_args") .set_attr<nnvm::FListInputNames>("FListInputNames", [](const nnvm::NodeAttrs& attrs) { int num_args = dmlc::get<NumpyEinsumParam>(attrs.parsed).num_args; std::vector<std::string> ret; ret.reserve(num_args); for (int i = 0; i < num_args; i++) { ret.push_back(std::string("arg") + std::to_string(i)); } return ret; }) .set_attr<mxnet::FInferShape>("FInferShape", NumpyEinsumShape) .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<-1, 1>) .set_attr<FCreateOpState>("FCreateOpState", CreateEinsumState) .set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& attrs) { return std::vector<ResourceRequest>(1, ResourceRequest::kTempSpace); }) .set_attr<FStatefulCompute>("FStatefulCompute<cpu>", NumpyEinsumForward<cpu>) .set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_einsum"}) .add_argument("data", "NDArray-or-Symbol[]", "List of eimsum operands") .add_arguments(NumpyEinsumParam::__FIELDS__()); NNVM_REGISTER_OP(_backward_npi_einsum) .set_attr_parser(ParamParser<NumpyEinsumParam>) .set_num_inputs([](const nnvm::NodeAttrs& attrs) { const NumpyEinsumParam& param = dmlc::get<NumpyEinsumParam>(attrs.parsed); return static_cast<uint32_t>(param.num_args + 1); }) .set_num_outputs([](const nnvm::NodeAttrs& attrs) { const NumpyEinsumParam& param = dmlc::get<NumpyEinsumParam>(attrs.parsed); return static_cast<uint32_t>(param.num_args); }) .set_attr<bool>("TIsLayerOpBackward", true) .set_attr<nnvm::TIsBackward>("TIsBackward", true) .set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& attrs) { return std::vector<ResourceRequest>(1, ResourceRequest::kTempSpace); }) .set_attr<FStatefulCompute>("FStatefulCompute<cpu>", NumpyEinsumBackward<cpu>); } // namespace op } // namespace mxnet
yajiedesign/mxnet
src/operator/numpy/np_einsum_op.cc
C++
apache-2.0
14,145
/* * Copyright 2014 The Closure Compiler Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.javascript.jscomp; import static com.google.common.base.MoreObjects.firstNonNull; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.javascript.jscomp.parsing.TypeTransformationParser; import com.google.javascript.jscomp.parsing.TypeTransformationParser.Keywords; import com.google.javascript.rhino.JSDocInfo; import com.google.javascript.rhino.JSTypeExpression; import com.google.javascript.rhino.Node; import com.google.javascript.rhino.jstype.JSType; import com.google.javascript.rhino.jstype.JSTypeNative; import com.google.javascript.rhino.jstype.JSTypeRegistry; import com.google.javascript.rhino.jstype.ObjectType; import com.google.javascript.rhino.jstype.StaticTypedScope; import com.google.javascript.rhino.jstype.StaticTypedSlot; import java.util.Arrays; import java.util.Collection; import java.util.LinkedHashMap; import java.util.Map; /** * A class for processing type transformation expressions * * @author lpino@google.com (Luis Fernando Pino Duque) */ class TypeTransformation { private static final String VIRTUAL_FILE = "<TypeTransformation.java>"; static final DiagnosticType UNKNOWN_TYPEVAR = DiagnosticType.warning("TYPEVAR_UNDEFINED", "Reference to an unknown type variable {0}"); static final DiagnosticType UNKNOWN_STRVAR = DiagnosticType.warning("UNKNOWN_STRVAR", "Reference to an unknown string variable {0}"); static final DiagnosticType UNKNOWN_TYPENAME = DiagnosticType.warning("TYPENAME_UNDEFINED", "Reference to an unknown type name {0}"); static final DiagnosticType BASETYPE_INVALID = DiagnosticType.warning("BASETYPE_INVALID", "The type {0} cannot be templatized"); static final DiagnosticType TEMPTYPE_INVALID = DiagnosticType.warning("TEMPTYPE_INVALID", "Expected templatized type in {0} found {1}"); static final DiagnosticType INDEX_OUTOFBOUNDS = DiagnosticType.warning("INDEX_OUTOFBOUNDS", "Index out of bounds in templateTypeOf: expected a number less than {0}, found {1}"); static final DiagnosticType DUPLICATE_VARIABLE = DiagnosticType.warning("DUPLICATE_VARIABLE", "The variable {0} is already defined"); // This warning is never exercised. static final DiagnosticType UNKNOWN_NAMEVAR = DiagnosticType.warning("UNKNOWN_NAMEVAR", "Reference to an unknown name variable {0}"); static final DiagnosticType RECTYPE_INVALID = DiagnosticType.warning("RECTYPE_INVALID", "The first parameter of a maprecord must be a record type, " + "found {0}"); static final DiagnosticType MAPRECORD_BODY_INVALID = DiagnosticType.warning("MAPRECORD_BODY_INVALID", "The body of a maprecord function must evaluate to a record type or " + "a no type, found {0}"); static final DiagnosticType VAR_UNDEFINED = DiagnosticType.warning("VAR_UNDEFINED", "Variable {0} is undefined in the scope"); static final DiagnosticType INVALID_CTOR = DiagnosticType.warning("INVALID_CTOR", "Expected a constructor type, found {0}"); static final DiagnosticType RECPARAM_INVALID = DiagnosticType.warning("RECPARAM_INVALID", "Expected a record type, found {0}"); static final DiagnosticType PROPTYPE_INVALID = DiagnosticType.warning("PROPTYPE_INVALID", "Expected object type, found {0}"); private final AbstractCompiler compiler; private final JSTypeRegistry registry; private final StaticTypedScope typeEnv; /** * A helper class for holding the information about the type variables * and the name variables in maprecord expressions */ private static class NameResolver { ImmutableMap<String, JSType> typeVars; ImmutableMap<String, String> nameVars; NameResolver(ImmutableMap<String, JSType> typeVars, ImmutableMap<String, String> nameVars) { this.typeVars = typeVars; this.nameVars = nameVars; } } @SuppressWarnings("unchecked") TypeTransformation(AbstractCompiler compiler, StaticTypedScope typeEnv) { this.compiler = compiler; this.registry = compiler.getTypeRegistry(); this.typeEnv = typeEnv; } private boolean isTypeVar(Node n) { return n.isName(); } private boolean isTypeName(Node n) { return n.isString(); } private boolean isBooleanOperation(Node n) { return n.isAnd() || n.isOr() || n.isNot(); } private Keywords nameToKeyword(String s) { return TypeTransformationParser.Keywords.valueOf(s.toUpperCase()); } private JSType getType(String typeName) { JSType type = registry.getType(typeEnv, typeName); if (type != null) { return type; } StaticTypedSlot slot = typeEnv.getSlot(typeName); type = slot != null ? slot.getType() : null; if (type != null) { if (type.isConstructor() || type.isInterface()) { return type.toMaybeFunctionType().getInstanceType().getRawType(); } if (type.isEnumElementType()) { return type.getEnumeratedTypeOfEnumElement(); } return type; } JSDocInfo jsdoc = slot == null ? null : slot.getJSDocInfo(); if (jsdoc != null && jsdoc.hasTypedefType()) { return this.registry.evaluateTypeExpression(jsdoc.getTypedefType(), typeEnv); } return null; } private JSType getUnknownType() { return registry.getNativeObjectType(JSTypeNative.UNKNOWN_TYPE); } private JSType getNoType() { return registry.getNativeObjectType(JSTypeNative.NO_TYPE); } private JSType getAllType() { return registry.getNativeType(JSTypeNative.ALL_TYPE); } private JSType getObjectType() { return registry.getNativeType(JSTypeNative.OBJECT_TYPE); } private JSType createUnionType(JSType[] variants) { return registry.createUnionType(Arrays.asList(variants)); } private JSType createTemplatizedType(ObjectType baseType, JSType[] params) { return registry.instantiateGenericType(baseType, ImmutableList.copyOf(params)); } private JSType createRecordType(ImmutableMap<String, JSType> props) { return this.registry.createRecordType(props); } private void reportWarning(Node n, DiagnosticType msg, String... param) { compiler.report(JSError.make(n, msg, param)); } private <T> ImmutableMap<String, T> addNewEntry( ImmutableMap<String, T> map, String name, T type) { return new ImmutableMap.Builder<String, T>() .putAll(map) .put(name, type) .build(); } private String getFunctionParameter(Node n, int i) { Preconditions.checkArgument(n.isFunction(), "Expected a function node, found %s", n); return n.getSecondChild().getChildAtIndex(i).getString(); } private String getCallName(Node n) { Preconditions.checkArgument(n.isCall(), "Expected a call node, found %s", n); return n.getFirstChild().getString(); } private Node getCallArgument(Node n, int i) { Preconditions.checkArgument(n.isCall(), "Expected a call node, found %s", n); return n.getChildAtIndex(i + 1); } private int getCallParamCount(Node n) { Preconditions.checkArgument(n.isCall(), "Expected a call node, found %s", n); return n.getChildCount() - 1; } // TODO(dimvar): rewrite the uses of this method to use siblings() and delete it. // Copying is unnecessarily inefficient. private ImmutableList<Node> getCallParams(Node n) { Preconditions.checkArgument(n.isCall(), "Expected a call node, found %s", n); ImmutableList.Builder<Node> builder = new ImmutableList.Builder<>(); for (int i = 0; i < getCallParamCount(n); i++) { builder.add(getCallArgument(n, i)); } return builder.build(); } private Node getComputedPropValue(Node n) { Preconditions.checkArgument( n.isComputedProp(), "Expected a computed property node, found %s", n); return n.getSecondChild(); } private String getComputedPropName(Node n) { Preconditions.checkArgument( n.isComputedProp(), "Expected a computed property node, found %s", n); return n.getFirstChild().getString(); } /** Evaluates the type transformation expression and returns the resulting type. * * @param ttlAst The node representing the type transformation expression * @param typeVars The environment containing the information about the type variables * @return JSType The resulting type after the transformation */ JSType eval(Node ttlAst, ImmutableMap<String, JSType> typeVars) { return eval(ttlAst, typeVars, ImmutableMap.of()); } /** Evaluates the type transformation expression and returns the resulting type. * * @param ttlAst The node representing the type transformation expression * @param typeVars The environment containing the information about the type variables * @param nameVars The environment containing the information about the name variables * @return JSType The resulting type after the transformation */ @SuppressWarnings("unchecked") @VisibleForTesting JSType eval(Node ttlAst, ImmutableMap<String, JSType> typeVars, ImmutableMap<String, String> nameVars) { JSType result = evalInternal(ttlAst, new NameResolver(typeVars, nameVars)); return result.isEmptyType() ? getUnknownType() : result; } private JSType evalInternal(Node ttlAst, NameResolver nameResolver) { if (isTypeName(ttlAst)) { return evalTypeName(ttlAst); } if (isTypeVar(ttlAst)) { return evalTypeVar(ttlAst, nameResolver); } String name = getCallName(ttlAst); Keywords keyword = nameToKeyword(name); switch (keyword.kind) { case TYPE_CONSTRUCTOR: return evalTypeExpression(ttlAst, nameResolver); case OPERATION: return evalOperationExpression(ttlAst, nameResolver); default: throw new IllegalStateException( "Could not evaluate the type transformation expression"); } } private JSType evalOperationExpression(Node ttlAst, NameResolver nameResolver) { String name = getCallName(ttlAst); Keywords keyword = nameToKeyword(name); switch (keyword) { case COND: return evalConditional(ttlAst, nameResolver); case MAPUNION: return evalMapunion(ttlAst, nameResolver); case MAPRECORD: return evalMaprecord(ttlAst, nameResolver); case TYPEOFVAR: return evalTypeOfVar(ttlAst); case INSTANCEOF: return evalInstanceOf(ttlAst, nameResolver); case PRINTTYPE: return evalPrintType(ttlAst, nameResolver); case PROPTYPE: return evalPropType(ttlAst, nameResolver); default: throw new IllegalStateException("Invalid type transformation operation"); } } private JSType evalTypeExpression(Node ttlAst, NameResolver nameResolver) { String name = getCallName(ttlAst); Keywords keyword = nameToKeyword(name); switch (keyword) { case TYPE: return evalTemplatizedType(ttlAst, nameResolver); case UNION: return evalUnionType(ttlAst, nameResolver); case NONE: return getNoType(); case ALL: return getAllType(); case UNKNOWN: return getUnknownType(); case RAWTYPEOF: return evalRawTypeOf(ttlAst, nameResolver); case TEMPLATETYPEOF: return evalTemplateTypeOf(ttlAst, nameResolver); case RECORD: return evalRecordType(ttlAst, nameResolver); case TYPEEXPR: return evalNativeTypeExpr(ttlAst); default: throw new IllegalStateException("Invalid type expression"); } } private JSType evalTypeName(Node ttlAst) { String typeName = ttlAst.getString(); JSType resultingType = getType(typeName); // If the type name is not defined then return UNKNOWN and report a warning if (resultingType == null) { reportWarning(ttlAst, UNKNOWN_TYPENAME, typeName); return getUnknownType(); } return resultingType; } private JSType evalTemplatizedType(Node ttlAst, NameResolver nameResolver) { ImmutableList<Node> params = getCallParams(ttlAst); JSType firstParam = evalInternal(params.get(0), nameResolver); if (firstParam.isFullyInstantiated()) { reportWarning(ttlAst, BASETYPE_INVALID, firstParam.toString()); return getUnknownType(); } // TODO(lpino): Check that the number of parameters correspond with the // number of template types that the base type can take when creating // a templatized type. For instance, if the base type is Array then there // must be just one parameter. JSType[] templatizedTypes = new JSType[params.size() - 1]; for (int i = 0; i < templatizedTypes.length; i++) { templatizedTypes[i] = evalInternal(params.get(i + 1), nameResolver); } ObjectType baseType = firstParam.toMaybeObjectType(); return createTemplatizedType(baseType, templatizedTypes); } private JSType evalTypeVar(Node ttlAst, NameResolver nameResolver) { String typeVar = ttlAst.getString(); JSType resultingType = nameResolver.typeVars.get(typeVar); // If the type variable is not defined then return UNKNOWN and report a warning if (resultingType == null) { reportWarning(ttlAst, UNKNOWN_TYPEVAR, typeVar); return getUnknownType(); } return resultingType; } private JSType evalUnionType(Node ttlAst, NameResolver nameResolver) { // Get the parameters of the union ImmutableList<Node> params = getCallParams(ttlAst); int paramCount = params.size(); // Create an array of types after evaluating each parameter JSType[] basicTypes = new JSType[paramCount]; for (int i = 0; i < paramCount; i++) { basicTypes[i] = evalInternal(params.get(i), nameResolver); } return createUnionType(basicTypes); } private JSType[] evalTypeParams(Node ttlAst, NameResolver nameResolver) { ImmutableList<Node> params = getCallParams(ttlAst); int paramCount = params.size(); JSType[] result = new JSType[paramCount]; for (int i = 0; i < paramCount; i++) { result[i] = evalInternal(params.get(i), nameResolver); } return result; } private String evalString(Node ttlAst, NameResolver nameResolver) { if (ttlAst.isName()) { // Return the empty string if the name variable cannot be resolved if (!nameResolver.nameVars.containsKey(ttlAst.getString())) { reportWarning(ttlAst, UNKNOWN_STRVAR, ttlAst.getString()); return ""; } return nameResolver.nameVars.get(ttlAst.getString()); } return ttlAst.getString(); } private String[] evalStringParams(Node ttlAst, NameResolver nameResolver) { ImmutableList<Node> params = getCallParams(ttlAst); int paramCount = params.size(); String[] result = new String[paramCount]; for (int i = 0; i < paramCount; i++) { result[i] = evalString(params.get(i), nameResolver); } return result; } private boolean evalTypePredicate(Node ttlAst, NameResolver nameResolver) { JSType[] params = evalTypeParams(ttlAst, nameResolver); String name = getCallName(ttlAst); Keywords keyword = nameToKeyword(name); JSType type = params[0]; switch (keyword) { case EQ: return type.isEquivalentTo(params[1]); case SUB: return type.isSubtypeOf(params[1]); case ISCTOR: return type.isConstructor(); case ISTEMPLATIZED: return type.isObjectType() && type.toMaybeObjectType().isGenericObjectType() && type.isPartiallyInstantiated(); case ISRECORD: return type.isRecordType(); case ISUNKNOWN: return type.isSomeUnknownType(); default: throw new IllegalStateException( "Invalid type predicate in the type transformation"); } } private boolean evalStringPredicate(Node ttlAst, NameResolver nameResolver) { String[] params = evalStringParams(ttlAst, nameResolver); // If any of the parameters evaluates to the empty string then they were // not resolved by the name resolver. In this case we always return false. for (int i = 0; i < params.length; i++) { if (params[i].isEmpty()) { return false; } } String name = getCallName(ttlAst); Keywords keyword = nameToKeyword(name); switch (keyword) { case STREQ: return params[0].equals(params[1]); default: throw new IllegalStateException( "Invalid string predicate in the type transformation"); } } private boolean evalTypevarPredicate(Node ttlAst, NameResolver nameResolver) { String name = getCallName(ttlAst); Keywords keyword = nameToKeyword(name); switch (keyword) { case ISDEFINED: return nameResolver.typeVars.containsKey(getCallArgument(ttlAst, 0).getString()); default: throw new IllegalStateException( "Invalid typevar predicate in the type transformation"); } } private boolean evalBooleanOperation(Node ttlAst, NameResolver nameResolver) { boolean param0 = evalBoolean(ttlAst.getFirstChild(), nameResolver); if (ttlAst.isNot()) { return !param0; } if (ttlAst.isAnd()) { return param0 && evalBoolean(ttlAst.getLastChild(), nameResolver); } if (ttlAst.isOr()) { return param0 || evalBoolean(ttlAst.getLastChild(), nameResolver); } throw new IllegalStateException( "Invalid boolean predicate in the type transformation"); } private boolean evalBoolean(Node ttlAst, NameResolver nameResolver) { if (isBooleanOperation(ttlAst)) { return evalBooleanOperation(ttlAst, nameResolver); } String name = getCallName(ttlAst); Keywords keyword = nameToKeyword(name); switch (keyword.kind) { case STRING_PREDICATE: return evalStringPredicate(ttlAst, nameResolver); case TYPE_PREDICATE: return evalTypePredicate(ttlAst, nameResolver); case TYPEVAR_PREDICATE: return evalTypevarPredicate(ttlAst, nameResolver); default: throw new IllegalStateException( "Invalid boolean predicate in the type transformation"); } } private JSType evalConditional(Node ttlAst, NameResolver nameResolver) { ImmutableList<Node> params = getCallParams(ttlAst); if (evalBoolean(params.get(0), nameResolver)) { return evalInternal(params.get(1), nameResolver); } else { return evalInternal(params.get(2), nameResolver); } } private JSType evalMapunion(Node ttlAst, NameResolver nameResolver) { ImmutableList<Node> params = getCallParams(ttlAst); Node unionParam = params.get(0); Node mapFunction = params.get(1); String paramName = getFunctionParameter(mapFunction, 0); // The mapunion variable must not be defined in the environment if (nameResolver.typeVars.containsKey(paramName)) { reportWarning(ttlAst, DUPLICATE_VARIABLE, paramName); return getUnknownType(); } Node mapFunctionBody = NodeUtil.getFunctionBody(mapFunction); JSType unionType = evalInternal(unionParam, nameResolver); // If the first parameter does not correspond to a union type then // consider it as a union with a single type and evaluate if (!unionType.isUnionType()) { NameResolver newNameResolver = new NameResolver( addNewEntry(nameResolver.typeVars, paramName, unionType), nameResolver.nameVars); return evalInternal(mapFunctionBody, newNameResolver); } // Otherwise obtain the elements in the union type. Note that the block // above guarantees the casting to be safe Collection<JSType> unionElms = ImmutableList.copyOf(unionType.getUnionMembers()); // Evaluate the map function body using each element in the union type int unionSize = unionElms.size(); JSType[] newUnionElms = new JSType[unionSize]; int i = 0; for (JSType elm : unionElms) { NameResolver newNameResolver = new NameResolver( addNewEntry(nameResolver.typeVars, paramName, elm), nameResolver.nameVars); newUnionElms[i] = evalInternal(mapFunctionBody, newNameResolver); i++; } return createUnionType(newUnionElms); } private JSType evalRawTypeOf(Node ttlAst, NameResolver nameResolver) { ImmutableList<Node> params = getCallParams(ttlAst); JSType type = evalInternal(params.get(0), nameResolver); if (!type.isGenericObjectType()) { reportWarning(ttlAst, TEMPTYPE_INVALID, "rawTypeOf", type.toString()); return getUnknownType(); } return type.toMaybeObjectType().getRawType(); } private JSType evalTemplateTypeOf(Node ttlAst, NameResolver nameResolver) { ImmutableList<Node> params = getCallParams(ttlAst); JSType type = evalInternal(params.get(0), nameResolver); if (!type.isGenericObjectType()) { reportWarning(ttlAst, TEMPTYPE_INVALID, "templateTypeOf", type.toString()); return getUnknownType(); } int index = (int) params.get(1).getDouble(); ImmutableList<? extends JSType> templateTypes = type.toMaybeObjectType().getTemplateTypes(); if (index >= templateTypes.size()) { reportWarning(ttlAst, INDEX_OUTOFBOUNDS, Integer.toString(templateTypes.size()), Integer.toString(index)); return getUnknownType(); } return templateTypes.get(index); } private JSType evalRecord(Node record, NameResolver nameResolver) { Map<String, JSType> props = new LinkedHashMap<>(); for (Node propNode : record.children()) { // If it is a computed property then find the property name using the resolver if (propNode.isComputedProp()) { String compPropName = getComputedPropName(propNode); // If the name does not exist then report a warning if (!nameResolver.nameVars.containsKey(compPropName)) { reportWarning(record, UNKNOWN_NAMEVAR, compPropName); return getUnknownType(); } // Otherwise add the property Node propValue = getComputedPropValue(propNode); String resolvedName = nameResolver.nameVars.get(compPropName); JSType resultingType = evalInternal(propValue, nameResolver); props.put(resolvedName, resultingType); } else { String propName = propNode.getString(); JSType resultingType = evalInternal(propNode.getFirstChild(), nameResolver); props.put(propName, resultingType); } } return this.registry.createRecordType(props); } private JSType evalRecordParam(Node ttlAst, NameResolver nameResolver) { if (ttlAst.isObjectLit()) { return evalRecord(ttlAst, nameResolver); } // The parameter of record can be a type transformation expression return evalInternal(ttlAst, nameResolver); } private JSType evalRecordType(Node ttlAst, NameResolver nameResolver) { int paramCount = getCallParamCount(ttlAst); ImmutableList.Builder<ObjectType> recTypesBuilder = new ImmutableList.Builder<>(); for (int i = 0; i < paramCount; i++) { JSType type = evalRecordParam(getCallArgument(ttlAst, i), nameResolver); // Check that each parameter evaluates to an object ObjectType objType = type.toMaybeObjectType(); if (objType == null || objType.isUnknownType()) { reportWarning(ttlAst, RECPARAM_INVALID, type.toString()); return getUnknownType(); } JSType recType = this.registry.buildRecordTypeFromObject(objType); if (!recType.isEquivalentTo(getObjectType())) { recTypesBuilder.add(recType.toMaybeObjectType()); } } return joinRecordTypes(recTypesBuilder.build()); } private void putNewPropInPropertyMap(Map<String, JSType> props, String newPropName, JSType newPropValue) { // TODO(lpino): Decide if the best strategy is to collapse the properties // to a union type or not. So far, new values replace the old ones except // if they are two record types in which case the properties are joined // together // Three cases: // (i) If the key does not exist then add it to the map with the new value // (ii) If the key to be added already exists in the map and the new value // is not a record type then the current value is replaced with the new one // (iii) If the new value is a record type and the current is not then // the current value is replaced with the new one if (!props.containsKey(newPropName) || !newPropValue.isRecordType() || !props.get(newPropName).isRecordType()) { props.put(newPropName, newPropValue); return; } // Otherwise join the current value with the new one since both are records props.put(newPropName, joinRecordTypes(ImmutableList.of( (ObjectType) props.get(newPropName), (ObjectType) newPropValue))); } /** * Merges a list of record types. * Example * {r:{s:string, n:number}} and {a:boolean} * is transformed into {r:{s:string, n:number}, a:boolean} */ private JSType joinRecordTypes(ImmutableList<ObjectType> recTypes) { Map<String, JSType> props = new LinkedHashMap<>(); for (ObjectType recType : recTypes) { for (String newPropName : recType.getOwnPropertyNames()) { JSType newPropValue = recType.getPropertyType(newPropName); // Put the new property depending if it already exists in the map putNewPropInPropertyMap(props, newPropName, newPropValue); } } return createRecordType(ImmutableMap.copyOf(props)); } private JSType evalMaprecord(Node ttlAst, NameResolver nameResolver) { Node recordNode = ttlAst.getSecondChild(); Node mapFunction = ttlAst.getChildAtIndex(2); JSType type = evalInternal(recordNode, nameResolver); // If it is an empty record type (Object) then return if (type.isEquivalentTo(getObjectType())) { return getObjectType(); } // The parameter must be a valid record type if (!type.isRecordType()) { // TODO(lpino): Decide how to handle non-record types reportWarning(recordNode, RECTYPE_INVALID, type.toString()); return getUnknownType(); } ObjectType objtype = type.toMaybeObjectType(); // Fetch the information of the map function String paramKey = getFunctionParameter(mapFunction, 0); String paramValue = getFunctionParameter(mapFunction, 1); // The maprecord variables must not be defined in the environment if (nameResolver.nameVars.containsKey(paramKey)) { reportWarning(ttlAst, DUPLICATE_VARIABLE, paramKey); return getUnknownType(); } if (nameResolver.typeVars.containsKey(paramValue)) { reportWarning(ttlAst, DUPLICATE_VARIABLE, paramValue); return getUnknownType(); } // Compute the new properties using the map function Node mapFnBody = NodeUtil.getFunctionBody(mapFunction); Map<String, JSType> newProps = new LinkedHashMap<>(); for (String propName : objtype.getOwnPropertyNames()) { // The value of the current property JSType propValue = objtype.getPropertyType(propName); // Evaluate the map function body with paramValue and paramKey replaced // by the values of the current property NameResolver newNameResolver = new NameResolver( addNewEntry(nameResolver.typeVars, paramValue, propValue), addNewEntry(nameResolver.nameVars, paramKey, propName)); JSType body = evalInternal(mapFnBody, newNameResolver); // If the body returns unknown then the whole expression returns unknown if (body.isUnknownType()) { return getUnknownType(); } // Skip the property when the body evaluates to NO_TYPE // or the empty record (Object) if (body.isEmptyType() || body.isEquivalentTo(getObjectType())) { continue; } // Otherwise the body must evaluate to a record type if (!body.isRecordType()) { reportWarning(ttlAst, MAPRECORD_BODY_INVALID, body.toString()); return getUnknownType(); } // Add the properties of the resulting record type to the original one ObjectType bodyAsObj = body.toMaybeObjectType(); for (String newPropName : bodyAsObj.getOwnPropertyNames()) { JSType newPropValue = bodyAsObj.getPropertyType(newPropName); // If the key already exists then we have to mix it with the current property value putNewPropInPropertyMap(newProps, newPropName, newPropValue); } } return createRecordType(ImmutableMap.copyOf(newProps)); } private JSType evalTypeOfVar(Node ttlAst) { String name = getCallArgument(ttlAst, 0).getString(); StaticTypedSlot slot = typeEnv.getSlot(name); JSType type = slot != null ? slot.getType() : null; if (type == null) { reportWarning(ttlAst, VAR_UNDEFINED, name); return getUnknownType(); } return type; } private JSType evalInstanceOf(Node ttlAst, NameResolver nameResolver) { JSType type = evalInternal(getCallArgument(ttlAst, 0), nameResolver); if (type.isUnknownType() || !type.isConstructor()) { reportWarning(ttlAst, INVALID_CTOR, type.getDisplayName()); return getUnknownType(); } return type.toMaybeFunctionType().getInstanceType(); } private JSType evalNativeTypeExpr(Node ttlAst) { JSTypeExpression expr = new JSTypeExpression(getCallArgument(ttlAst, 0), VIRTUAL_FILE); return this.registry.evaluateTypeExpression(expr, this.typeEnv); } private JSType evalPrintType(Node ttlAst, NameResolver nameResolver) { JSType type = evalInternal(getCallArgument(ttlAst, 1), nameResolver); String msg = getCallArgument(ttlAst, 0).getString() + type; System.out.println(msg); return type; } private JSType evalPropType(Node ttlAst, NameResolver nameResolver) { JSType type = evalInternal(getCallArgument(ttlAst, 1), nameResolver); ObjectType objType = type.toMaybeObjectType(); if (objType == null) { reportWarning(ttlAst, PROPTYPE_INVALID, type.toString()); return getUnknownType(); } JSType propType = objType.getPropertyType(getCallArgument(ttlAst, 0).getString()); return firstNonNull(propType, getUnknownType()); } }
mprobst/closure-compiler
src/com/google/javascript/jscomp/TypeTransformation.java
Java
apache-2.0
30,898
from django.utils.translation import ugettext_lazy as _ import horizon from {{ dash_path }} import dashboard class {{ panel_name|title }}(horizon.Panel): name = _("{{ panel_name|title }}") slug = "{{ panel_name|slugify }}" dashboard.register({{ panel_name|title }})
xhorn/xchorizon
horizon/conf/panel_template/panel.py
Python
apache-2.0
280
'use strict'; var TodoServiceFactory = function(database){ return { // Return all todos in the database getTodos: function(){ return database('Todo').select().orderBy('createdAt', 'desc'); }, // Return a single todo by Id getTodo: function(id){ var p = Promise.defer(); database('Todo').where('id', id).select() .then(function(rows){ if(rows.length === 0){ //not found p.reject('TodoService: not found'); } else { p.resolve(rows[0]); } }); return p.promise; }, //Update a todo in the database updateTodo: function(todo){ var p = Promise.defer(); //TODO: real-world validation database('Todo').update({ text: todo.text, completed: todo.completed }) .where('id', todo.id) .then(function(affectedRows){ if(affectedRows === 1){ p.resolve(todo); } else { p.reject('Not found'); } }); return p.promise; }, //Create a new todo in the database createTodo: function(todo){ var p = Promise.defer(); //TODO: real-world validation database('Todo').insert(todo) .then(function(idArray){ //return the newly created todo todo.id = idArray[0]; p.resolve(todo); }) .catch(function(err){ p.reject('TodoService: create failed. Error:' + err.toString()); }); return p.promise; }, //Delete a todo specified by Id deleteTodo: function(todoId){ var p = Promise.defer(); database('Todo').where('id', todoId).del() .then(function(affectedRows){ if(affectedRows === 1){ return p.resolve(true); } else { return p.reject('TodoService: not found'); } }) .catch(function(err){ p.reject('TodoService: delete failed. Error' + err.toString()); }); return p.promise; } } } module.exports = TodoServiceFactory;
raffaeu/Syncho
src/node_modules/kontainer-di/examples/express/services/todo_service.js
JavaScript
apache-2.0
2,050
/* * Copyright @ 2015 Atlassian Pty Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.java.sip.communicator.impl.protocol.jabber.extensions.jingle; /** * A representation of the <tt>remote-candidate</tt> ICE transport element. * * @author Emil Ivov */ public class RemoteCandidatePacketExtension extends CandidatePacketExtension { /** * The name of the "candidate" element. */ public static final String ELEMENT_NAME = "remote-candidate"; /** * Creates a new {@link CandidatePacketExtension} */ public RemoteCandidatePacketExtension() { super(ELEMENT_NAME); } }
gardenia/jitsi-hammer
src/net/java/sip/communicator/impl/protocol/jabber/extensions/jingle/RemoteCandidatePacketExtension.java
Java
apache-2.0
1,149
/* * Copyright 2015-present Boundless Spatial Inc., http://boundlessgeo.com * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and limitations * under the License. */ import React from 'react'; import {connect} from 'react-redux'; import PropTypes from 'prop-types'; import * as mapActions from '../../actions/map'; /** @module components/map/zoom-control * @example * import SdkZoomControl from '@boundlessgeo/sdk/components/map/zoom-control'; * import { Provider } from 'react-redux'; * import SdkMap from '@boundlessgeo/sdk/components/map'; * import ReactDOM from 'react-dom'; * * ReactDOM.render(<Provider store={store}> * <SdkMap> * <SdkZoomControl /> * </SdkMap> * </Provider>, document.getElementById('map')); * * @desc Provides 2 buttons to zoom the map (zoom in and out). */ class ZoomControl extends React.Component { render() { let className = 'sdk-zoom-control'; if (this.props.className) { className += ' ' + this.props.className; } return ( <div className={className} style={this.props.style}> <button className='sdk-zoom-in' onClick={this.props.zoomIn} title={this.props.zoomInTitle}>+</button> <button className='sdk-zoom-out' onClick={this.props.zoomOut} title={this.props.zoomOutTitle}>{'\u2212'}</button> </div> ); } } ZoomControl.propTypes = { /** * Css className for the root div. */ className: PropTypes.string, /** * Style config object for root div. */ style: PropTypes.object, /** * Title for the zoom in button. */ zoomInTitle: PropTypes.string, /** * Title for the zoom out button. */ zoomOutTitle: PropTypes.string, }; ZoomControl.defaultProps = { zoomInTitle: 'Zoom in', zoomOutTitle: 'Zoom out', }; function mapDispatchToProps(dispatch) { return { zoomIn: () => { dispatch(mapActions.zoomIn()); }, zoomOut: () => { dispatch(mapActions.zoomOut()); }, }; } export default connect(null, mapDispatchToProps)(ZoomControl);
boundlessgeo/sdk
src/components/map/zoom-control.js
JavaScript
apache-2.0
2,469
/* Copyright 2015 Coursera Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.coursera.android.shift; import android.os.Bundle; import android.support.v7.widget.LinearLayoutManager; import android.support.v7.widget.RecyclerView; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; public class ShiftActionsFragment extends ViewPagerFragment { private static final String TAB_TITLE_ACTIONS = "Actions"; public ShiftActionsFragment() { super(TAB_TITLE_ACTIONS); } public static ShiftActionsFragment getNewInstance() { return new ShiftActionsFragment(); } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View view = inflater.inflate(R.layout.actions_fragment, container, false); RecyclerView recyclerView = (RecyclerView) view.findViewById(R.id.recycler_view); recyclerView.setHasFixedSize(true); recyclerView.setLayoutManager(new LinearLayoutManager(getActivity())); int bottomMargin = (int) getResources().getDimension(R.dimen.card_margin); recyclerView.addItemDecoration(new VerticalMarginItemDecoration(bottomMargin)); ShiftActionRecyclerViewAdapter adapter = new ShiftActionRecyclerViewAdapter(getActivity(), ShiftManager.getInstance().getActionManager().getActionList()); recyclerView.setAdapter(adapter); return view; } }
coursera/shift
shift/src/main/java/org/coursera/android/shift/ShiftActionsFragment.java
Java
apache-2.0
2,029
using MongoDB.Bson; namespace MongoDB.Protocol { /// <summary> /// Deprecated. OP_MSG sends a diagnostic message to the database. /// The database sends back a fixed resonse. /// </summary> /// <remarks> /// struct { /// MsgHeader header; // standard message header /// cstring message; // message for the database /// } /// </remarks> internal class MsgMessage : RequestMessageBase { /// <summary> /// Initializes a new instance of the <see cref="MsgMessage"/> class. /// </summary> public MsgMessage() : base(new BsonWriterSettings()){ Header = new MessageHeader(OpCode.Msg); } /// <summary> /// Gets or sets the message. /// </summary> /// <value>The message.</value> public string Message { get; set; } /// <summary> /// Writes the body. /// </summary> /// <param name="writer">The writer.</param> protected override void WriteBody(BsonWriter writer){ writer.Write(Message, false); } /// <summary> /// Calculates the size of the body. /// </summary> /// <param name="writer">The writer.</param> /// <returns></returns> protected override int CalculateBodySize(BsonWriter writer){ return writer.CalculateSize(Message, false); } } }
mongodb-csharp/mongodb-csharp
source/MongoDB/Protocol/MsgMessage.cs
C#
apache-2.0
1,486
package com.inomma.kandu.server; public enum RequestMethod { POST,GET,PUT; }
kandu-community/android
kandu-android/trunk/src/com/inomma/kandu/server/RequestMethod.java
Java
apache-2.0
80
package cgeo.geocaching.files; import cgeo.geocaching.R; import cgeo.geocaching.ui.recyclerview.AbstractRecyclerViewAdapter; import cgeo.geocaching.ui.recyclerview.AbstractRecyclerViewHolder; import android.graphics.Typeface; import android.support.annotation.NonNull; import android.view.LayoutInflater; import android.view.View; import android.view.View.OnClickListener; import android.view.ViewGroup; import android.widget.TextView; import java.io.File; import java.util.List; import butterknife.BindView; public class FileSelectionListAdapter extends AbstractRecyclerViewAdapter<FileSelectionListAdapter.ViewHolder> { private final IFileSelectionView parentView; @NonNull private final List<File> files; public FileSelectionListAdapter(@NonNull final IFileSelectionView parentIn, @NonNull final List<File> listIn) { files = listIn; parentView = parentIn; } @Override public int getItemCount() { return files.size(); } @Override public ViewHolder onCreateViewHolder(final ViewGroup parent, final int position) { final View view = LayoutInflater.from(parent.getContext()).inflate(R.layout.mapfile_item, parent, false); final ViewHolder viewHolder = new ViewHolder(view); viewHolder.itemView.setOnClickListener(new OnClickListener() { @Override public void onClick(final View view) { final File file = files.get(viewHolder.getItemPosition()); parentView.setCurrentFile(file.toString()); parentView.close(); } }); return viewHolder; } @Override public void onBindViewHolder(final ViewHolder holder, final int position) { super.onBindViewHolder(holder, position); final File file = files.get(position); final String currentFile = parentView.getCurrentFile(); if (currentFile != null && file.equals(new File(currentFile))) { holder.filename.setTypeface(holder.filename.getTypeface(), Typeface.BOLD); } else { holder.filename.setTypeface(holder.filename.getTypeface(), Typeface.NORMAL); } holder.filepath.setText(file.getParent()); holder.filename.setText(file.getName()); } protected static final class ViewHolder extends AbstractRecyclerViewHolder { @BindView(R.id.mapfilepath) TextView filepath; @BindView(R.id.mapfilename) TextView filename; ViewHolder(final View view) { super(view); } } }
kumy/cgeo
main/src/cgeo/geocaching/files/FileSelectionListAdapter.java
Java
apache-2.0
2,548
/* * Copyright 2005 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools.eclipse.flow.ruleflow.view.property.constraint; import java.util.HashMap; import java.util.List; import java.util.Map; import org.drools.eclipse.flow.common.view.property.EditBeanDialog; import org.eclipse.swt.SWT; import org.eclipse.swt.events.SelectionAdapter; import org.eclipse.swt.events.SelectionEvent; import org.eclipse.swt.layout.GridData; import org.eclipse.swt.layout.GridLayout; import org.eclipse.swt.widgets.Button; import org.eclipse.swt.widgets.Composite; import org.eclipse.swt.widgets.Control; import org.eclipse.swt.widgets.Display; import org.eclipse.swt.widgets.Label; import org.eclipse.swt.widgets.Shell; import org.jbpm.workflow.core.Constraint; import org.jbpm.workflow.core.WorkflowProcess; import org.jbpm.workflow.core.impl.ConnectionRef; import org.jbpm.workflow.core.impl.NodeImpl; import org.jbpm.workflow.core.node.StateNode; import org.kie.api.definition.process.Connection; /** * Dialog for editing constraints. */ public class StateConstraintListDialog extends EditBeanDialog<Map<ConnectionRef, Constraint>> { private WorkflowProcess process; private StateNode stateNode; private Map<ConnectionRef, Constraint> newMap; private Map<Connection, Label> labels = new HashMap<Connection, Label>(); protected StateConstraintListDialog(Shell parentShell, WorkflowProcess process, StateNode stateNode) { super(parentShell, "Edit Constraints"); this.process = process; this.stateNode = stateNode; } protected Control createDialogArea(Composite parent) { Composite composite = (Composite) super.createDialogArea(parent); GridLayout gridLayout = new GridLayout(); gridLayout.numColumns = 3; composite.setLayout(gridLayout); List<Connection> outgoingConnections = stateNode.getOutgoingConnections(NodeImpl.CONNECTION_DEFAULT_TYPE); labels.clear(); for (Connection outgoingConnection: outgoingConnections) { Label label1 = new Label(composite, SWT.NONE); label1.setText("To node " + outgoingConnection.getTo().getName() + ": "); Label label2 = new Label(composite, SWT.NONE); labels.put(outgoingConnection, label2); GridData gridData = new GridData(); gridData.grabExcessHorizontalSpace = true; gridData.horizontalAlignment = GridData.FILL; label2.setLayoutData(gridData); Constraint constraint = newMap.get( new ConnectionRef(outgoingConnection.getTo().getId(), outgoingConnection.getToType())); if (constraint != null) { label2.setText(constraint.getName()); } Button editButton = new Button(composite, SWT.NONE); editButton.setText("Edit"); editButton.addSelectionListener(new EditButtonListener( outgoingConnection)); } return composite; } public void setValue(Map<ConnectionRef, Constraint> value) { super.setValue(value); this.newMap = new HashMap<ConnectionRef, Constraint>((Map<ConnectionRef, Constraint>) value); } protected Map<ConnectionRef, Constraint> updateValue(Map<ConnectionRef, Constraint> value) { return newMap; } private void editItem(final Connection connection) { final Runnable r = new Runnable() { public void run() { RuleFlowConstraintDialog dialog = new RuleFlowConstraintDialog( getShell(), process); dialog.create(); ConnectionRef connectionRef = new ConnectionRef(connection.getTo().getId(), connection.getToType()); Constraint constraint = newMap.get(connectionRef); dialog.setConstraint(constraint); dialog.fixType(0); dialog.fixDialect(0); int code = dialog.open(); if (code != CANCEL) { constraint = dialog.getConstraint(); newMap.put( connectionRef, constraint); setConnectionText( (Label) labels.get(connection), constraint.getName()); } } }; r.run(); } private void setConnectionText(final Label connection, final String name) { Display.getDefault().asyncExec(new Runnable() { public void run() { connection.setText(name); } }); } private class EditButtonListener extends SelectionAdapter { private Connection connection; public EditButtonListener(Connection connection) { this.connection = connection; } public void widgetSelected(SelectionEvent e) { editItem(connection); } } }
pkman/droolsjbpm-tools
drools-eclipse/org.drools.eclipse/src/main/java/org/drools/eclipse/flow/ruleflow/view/property/constraint/StateConstraintListDialog.java
Java
apache-2.0
5,496
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.builder.component.dsl; import javax.annotation.Generated; import org.apache.camel.Component; import org.apache.camel.builder.component.AbstractComponentBuilder; import org.apache.camel.builder.component.ComponentBuilder; import org.apache.camel.component.atomix.client.set.AtomixSetComponent; /** * Access Atomix's distributed set. * * Generated by camel-package-maven-plugin - do not edit this file! */ @Generated("org.apache.camel.maven.packaging.ComponentDslMojo") public interface AtomixSetComponentBuilderFactory { /** * Atomix Set (camel-atomix) * Access Atomix's distributed set. * * Category: clustering * Since: 2.20 * Maven coordinates: org.apache.camel:camel-atomix * * @return the dsl builder */ @Deprecated static AtomixSetComponentBuilder atomixSet() { return new AtomixSetComponentBuilderImpl(); } /** * Builder for the Atomix Set component. */ interface AtomixSetComponentBuilder extends ComponentBuilder<AtomixSetComponent> { /** * The Atomix instance to use. * * The option is a: &lt;code&gt;io.atomix.Atomix&lt;/code&gt; type. * * Group: common * * @param atomix the value to set * @return the dsl builder */ default AtomixSetComponentBuilder atomix(io.atomix.Atomix atomix) { doSetProperty("atomix", atomix); return this; } /** * The shared component configuration. * * The option is a: * &lt;code&gt;org.apache.camel.component.atomix.client.set.AtomixSetConfiguration&lt;/code&gt; type. * * Group: common * * @param configuration the value to set * @return the dsl builder */ default AtomixSetComponentBuilder configuration( org.apache.camel.component.atomix.client.set.AtomixSetConfiguration configuration) { doSetProperty("configuration", configuration); return this; } /** * The path to the AtomixClient configuration. * * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type. * * Group: common * * @param configurationUri the value to set * @return the dsl builder */ default AtomixSetComponentBuilder configurationUri( java.lang.String configurationUri) { doSetProperty("configurationUri", configurationUri); return this; } /** * The default action. * * The option is a: * &lt;code&gt;org.apache.camel.component.atomix.client.set.AtomixSet.Action&lt;/code&gt; type. * * Default: ADD * Group: common * * @param defaultAction the value to set * @return the dsl builder */ default AtomixSetComponentBuilder defaultAction( org.apache.camel.component.atomix.client.set.AtomixSet.Action defaultAction) { doSetProperty("defaultAction", defaultAction); return this; } /** * The nodes the AtomixClient should connect to. * * The option is a: * &lt;code&gt;java.util.List&amp;lt;io.atomix.catalyst.transport.Address&amp;gt;&lt;/code&gt; type. * * Group: common * * @param nodes the value to set * @return the dsl builder */ default AtomixSetComponentBuilder nodes( java.util.List<io.atomix.catalyst.transport.Address> nodes) { doSetProperty("nodes", nodes); return this; } /** * The header that wil carry the result. * * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type. * * Group: common * * @param resultHeader the value to set * @return the dsl builder */ default AtomixSetComponentBuilder resultHeader( java.lang.String resultHeader) { doSetProperty("resultHeader", resultHeader); return this; } /** * The class name (fqn) of the Atomix transport. * * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type. * * Default: io.atomix.catalyst.transport.netty.NettyTransport * Group: common * * @param transportClassName the value to set * @return the dsl builder */ default AtomixSetComponentBuilder transportClassName( java.lang.String transportClassName) { doSetProperty("transportClassName", transportClassName); return this; } /** * The resource ttl. * * The option is a: &lt;code&gt;long&lt;/code&gt; type. * * Group: common * * @param ttl the value to set * @return the dsl builder */ default AtomixSetComponentBuilder ttl(long ttl) { doSetProperty("ttl", ttl); return this; } /** * Allows for bridging the consumer to the Camel routing Error Handler, * which mean any exceptions occurred while the consumer is trying to * pickup incoming messages, or the likes, will now be processed as a * message and handled by the routing Error Handler. By default the * consumer will use the org.apache.camel.spi.ExceptionHandler to deal * with exceptions, that will be logged at WARN or ERROR level and * ignored. * * The option is a: &lt;code&gt;boolean&lt;/code&gt; type. * * Default: false * Group: consumer * * @param bridgeErrorHandler the value to set * @return the dsl builder */ default AtomixSetComponentBuilder bridgeErrorHandler( boolean bridgeErrorHandler) { doSetProperty("bridgeErrorHandler", bridgeErrorHandler); return this; } /** * Whether the producer should be started lazy (on the first message). * By starting lazy you can use this to allow CamelContext and routes to * startup in situations where a producer may otherwise fail during * starting and cause the route to fail being started. By deferring this * startup to be lazy then the startup failure can be handled during * routing messages via Camel's routing error handlers. Beware that when * the first message is processed then creating and starting the * producer may take a little time and prolong the total processing time * of the processing. * * The option is a: &lt;code&gt;boolean&lt;/code&gt; type. * * Default: false * Group: producer * * @param lazyStartProducer the value to set * @return the dsl builder */ default AtomixSetComponentBuilder lazyStartProducer( boolean lazyStartProducer) { doSetProperty("lazyStartProducer", lazyStartProducer); return this; } /** * Whether autowiring is enabled. This is used for automatic autowiring * options (the option must be marked as autowired) by looking up in the * registry to find if there is a single instance of matching type, * which then gets configured on the component. This can be used for * automatic configuring JDBC data sources, JMS connection factories, * AWS Clients, etc. * * The option is a: &lt;code&gt;boolean&lt;/code&gt; type. * * Default: true * Group: advanced * * @param autowiredEnabled the value to set * @return the dsl builder */ default AtomixSetComponentBuilder autowiredEnabled( boolean autowiredEnabled) { doSetProperty("autowiredEnabled", autowiredEnabled); return this; } /** * The cluster wide default resource configuration. * * The option is a: &lt;code&gt;java.util.Properties&lt;/code&gt; type. * * Group: advanced * * @param defaultResourceConfig the value to set * @return the dsl builder */ default AtomixSetComponentBuilder defaultResourceConfig( java.util.Properties defaultResourceConfig) { doSetProperty("defaultResourceConfig", defaultResourceConfig); return this; } /** * The local default resource options. * * The option is a: &lt;code&gt;java.util.Properties&lt;/code&gt; type. * * Group: advanced * * @param defaultResourceOptions the value to set * @return the dsl builder */ default AtomixSetComponentBuilder defaultResourceOptions( java.util.Properties defaultResourceOptions) { doSetProperty("defaultResourceOptions", defaultResourceOptions); return this; } /** * Sets if the local member should join groups as PersistentMember or * not. If set to ephemeral the local member will receive an auto * generated ID thus the local one is ignored. * * The option is a: &lt;code&gt;boolean&lt;/code&gt; type. * * Default: false * Group: advanced * * @param ephemeral the value to set * @return the dsl builder */ default AtomixSetComponentBuilder ephemeral(boolean ephemeral) { doSetProperty("ephemeral", ephemeral); return this; } /** * The read consistency level. * * The option is a: * &lt;code&gt;io.atomix.resource.ReadConsistency&lt;/code&gt; type. * * Group: advanced * * @param readConsistency the value to set * @return the dsl builder */ default AtomixSetComponentBuilder readConsistency( io.atomix.resource.ReadConsistency readConsistency) { doSetProperty("readConsistency", readConsistency); return this; } /** * Cluster wide resources configuration. * * The option is a: &lt;code&gt;java.util.Map&amp;lt;java.lang.String, * java.util.Properties&amp;gt;&lt;/code&gt; type. * * Group: advanced * * @param resourceConfigs the value to set * @return the dsl builder */ default AtomixSetComponentBuilder resourceConfigs( java.util.Map<java.lang.String, java.util.Properties> resourceConfigs) { doSetProperty("resourceConfigs", resourceConfigs); return this; } /** * Local resources configurations. * * The option is a: &lt;code&gt;java.util.Map&amp;lt;java.lang.String, * java.util.Properties&amp;gt;&lt;/code&gt; type. * * Group: advanced * * @param resourceOptions the value to set * @return the dsl builder */ default AtomixSetComponentBuilder resourceOptions( java.util.Map<java.lang.String, java.util.Properties> resourceOptions) { doSetProperty("resourceOptions", resourceOptions); return this; } } class AtomixSetComponentBuilderImpl extends AbstractComponentBuilder<AtomixSetComponent> implements AtomixSetComponentBuilder { @Override protected AtomixSetComponent buildConcreteComponent() { return new AtomixSetComponent(); } private org.apache.camel.component.atomix.client.set.AtomixSetConfiguration getOrCreateConfiguration( org.apache.camel.component.atomix.client.set.AtomixSetComponent component) { if (component.getConfiguration() == null) { component.setConfiguration(new org.apache.camel.component.atomix.client.set.AtomixSetConfiguration()); } return component.getConfiguration(); } @Override protected boolean setPropertyOnComponent( Component component, String name, Object value) { switch (name) { case "atomix": getOrCreateConfiguration((AtomixSetComponent) component).setAtomix((io.atomix.Atomix) value); return true; case "configuration": ((AtomixSetComponent) component).setConfiguration((org.apache.camel.component.atomix.client.set.AtomixSetConfiguration) value); return true; case "configurationUri": ((AtomixSetComponent) component).setConfigurationUri((java.lang.String) value); return true; case "defaultAction": getOrCreateConfiguration((AtomixSetComponent) component).setDefaultAction((org.apache.camel.component.atomix.client.set.AtomixSet.Action) value); return true; case "nodes": ((AtomixSetComponent) component).setNodes((java.util.List) value); return true; case "resultHeader": getOrCreateConfiguration((AtomixSetComponent) component).setResultHeader((java.lang.String) value); return true; case "transportClassName": getOrCreateConfiguration((AtomixSetComponent) component).setTransportClassName((java.lang.String) value); return true; case "ttl": getOrCreateConfiguration((AtomixSetComponent) component).setTtl((long) value); return true; case "bridgeErrorHandler": ((AtomixSetComponent) component).setBridgeErrorHandler((boolean) value); return true; case "lazyStartProducer": ((AtomixSetComponent) component).setLazyStartProducer((boolean) value); return true; case "autowiredEnabled": ((AtomixSetComponent) component).setAutowiredEnabled((boolean) value); return true; case "defaultResourceConfig": getOrCreateConfiguration((AtomixSetComponent) component).setDefaultResourceConfig((java.util.Properties) value); return true; case "defaultResourceOptions": getOrCreateConfiguration((AtomixSetComponent) component).setDefaultResourceOptions((java.util.Properties) value); return true; case "ephemeral": getOrCreateConfiguration((AtomixSetComponent) component).setEphemeral((boolean) value); return true; case "readConsistency": getOrCreateConfiguration((AtomixSetComponent) component).setReadConsistency((io.atomix.resource.ReadConsistency) value); return true; case "resourceConfigs": getOrCreateConfiguration((AtomixSetComponent) component).setResourceConfigs((java.util.Map) value); return true; case "resourceOptions": getOrCreateConfiguration((AtomixSetComponent) component).setResourceOptions((java.util.Map) value); return true; default: return false; } } } }
christophd/camel
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/AtomixSetComponentBuilderFactory.java
Java
apache-2.0
16,066
define(function(require, exports, module) { var Morris=require("morris"); require("jquery.bootstrap-datetimepicker"); var Validator = require('bootstrap.validator'); var autoSubmitCondition=require("./autoSubmitCondition.js"); require('common/validator-rules').inject(Validator); var now = new Date(); exports.run = function() { if($('#data').length > 0){ var data = eval ("(" + $('#data').attr("value") + ")"); Morris.Line({ element: 'line-data', data: data, xkey: 'date', ykeys: ['count'], labels: [Translator.trans('班级营收额')], xLabels:"day" }); } $("[name=endTime]").datetimepicker({ autoclose: true, format: 'yyyy-mm-dd', minView: 'month' }); $('[name=endTime]').datetimepicker('setEndDate', now); $('[name=endTime]').datetimepicker('setStartDate', $('#classroomIncomeStartDate').attr("value")); $("[name=startTime]").datetimepicker({ autoclose: true, format: 'yyyy-mm-dd', minView: 'month' }); $('[name=startTime]').datetimepicker('setEndDate', now); $('[name=startTime]').datetimepicker('setStartDate', $('#classroomIncomeStartDate').attr("value")); var validator = new Validator({ element: '#operation-form'}); validator.addItem({ element: '[name=startTime]', required: true, rule:'date_check' }); validator.addItem({ element: '[name=endTime]', required: true, rule:'date_check' }); validator.addItem({ element: '[name=analysisDateType]', required: true }); autoSubmitCondition.autoSubmitCondition(); }; });
18826252059/im
web/bundles/topxiaadmin/js/controller/analysis/classroom-income.js
JavaScript
apache-2.0
1,998
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include <aws/license-manager/model/Tag.h> #include <aws/core/utils/json/JsonSerializer.h> #include <utility> using namespace Aws::Utils::Json; using namespace Aws::Utils; namespace Aws { namespace LicenseManager { namespace Model { Tag::Tag() : m_keyHasBeenSet(false), m_valueHasBeenSet(false) { } Tag::Tag(JsonView jsonValue) : m_keyHasBeenSet(false), m_valueHasBeenSet(false) { *this = jsonValue; } Tag& Tag::operator =(JsonView jsonValue) { if(jsonValue.ValueExists("Key")) { m_key = jsonValue.GetString("Key"); m_keyHasBeenSet = true; } if(jsonValue.ValueExists("Value")) { m_value = jsonValue.GetString("Value"); m_valueHasBeenSet = true; } return *this; } JsonValue Tag::Jsonize() const { JsonValue payload; if(m_keyHasBeenSet) { payload.WithString("Key", m_key); } if(m_valueHasBeenSet) { payload.WithString("Value", m_value); } return payload; } } // namespace Model } // namespace LicenseManager } // namespace Aws
aws/aws-sdk-cpp
aws-cpp-sdk-license-manager/source/model/Tag.cpp
C++
apache-2.0
1,136
# # Cookbook Name:: apache2 # Recipe:: mod_auth_cas # # Copyright 2013, Opscode, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # include_recipe 'apache2::default' if node['apache']['mod_auth_cas']['from_source'] package 'httpd-devel' do package_name value_for_platform_family( %w[rhel fedora suse] => 'httpd-devel', 'debian' => 'apache2-dev' ) end git '/tmp/mod_auth_cas' do repository 'git://github.com/Jasig/mod_auth_cas.git' revision node['apache']['mod_auth_cas']['source_revision'] notifies :run, 'execute[compile mod_auth_cas]', :immediately end execute 'compile mod_auth_cas' do command './configure && make && make install' cwd '/tmp/mod_auth_cas' not_if "test -f #{node['apache']['libexecdir']}/mod_auth_cas.so" end template "#{node['apache']['dir']}/mods-available/auth_cas.load" do source 'mods/auth_cas.load.erb' owner 'root' group node['apache']['root_group'] mode '0644' end else case node['platform_family'] when 'debian' package 'libapache2-mod-auth-cas' when 'rhel', 'fedora' yum_package 'mod_auth_cas' do notifies :run, 'execute[generate-module-list]', :immediately end file "#{node['apache']['dir']}/conf-available/auth_cas.conf" do action :delete backup false end end end apache_module 'auth_cas' do conf true end directory "#{node['apache']['cache_dir']}/mod_auth_cas" do owner node['apache']['user'] group node['apache']['group'] mode '0700' end
ceros/apache2-onehealth
recipes/mod_auth_cas.rb
Ruby
apache-2.0
2,026
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @Version("3.0.0") package org.apache.jackrabbit.oak.plugins.tree; import org.osgi.annotation.versioning.Version;
mduerig/jackrabbit-oak
oak-security-spi/src/main/java/org/apache/jackrabbit/oak/plugins/tree/package-info.java
Java
apache-2.0
916
<?php # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/osconfig/v1/vulnerability.proto namespace Google\Cloud\OsConfig\V1\VulnerabilityReport\Vulnerability; use Google\Protobuf\Internal\GPBType; use Google\Protobuf\Internal\RepeatedField; use Google\Protobuf\Internal\GPBUtil; /** * OS inventory item that is affected by a vulnerability or fixed as a * result of a vulnerability. * * Generated from protobuf message <code>google.cloud.osconfig.v1.VulnerabilityReport.Vulnerability.Item</code> */ class Item extends \Google\Protobuf\Internal\Message { /** * Corresponds to the `INSTALLED_PACKAGE` inventory item on the VM. * This field displays the inventory items affected by this vulnerability. * If the vulnerability report was not updated after the VM inventory * update, these values might not display in VM inventory. For some * operating systems, this field might be empty. * * Generated from protobuf field <code>string installed_inventory_item_id = 1;</code> */ private $installed_inventory_item_id = ''; /** * Corresponds to the `AVAILABLE_PACKAGE` inventory item on the VM. * If the vulnerability report was not updated after the VM inventory * update, these values might not display in VM inventory. If there is no * available fix, the field is empty. The `inventory_item` value specifies * the latest `SoftwarePackage` available to the VM that fixes the * vulnerability. * * Generated from protobuf field <code>string available_inventory_item_id = 2;</code> */ private $available_inventory_item_id = ''; /** * The recommended [CPE URI](https://cpe.mitre.org/specification/) update * that contains a fix for this vulnerability. * * Generated from protobuf field <code>string fixed_cpe_uri = 3;</code> */ private $fixed_cpe_uri = ''; /** * The upstream OS patch, packages or KB that fixes the vulnerability. * * Generated from protobuf field <code>string upstream_fix = 4;</code> */ private $upstream_fix = ''; /** * Constructor. * * @param array $data { * Optional. Data for populating the Message object. * * @type string $installed_inventory_item_id * Corresponds to the `INSTALLED_PACKAGE` inventory item on the VM. * This field displays the inventory items affected by this vulnerability. * If the vulnerability report was not updated after the VM inventory * update, these values might not display in VM inventory. For some * operating systems, this field might be empty. * @type string $available_inventory_item_id * Corresponds to the `AVAILABLE_PACKAGE` inventory item on the VM. * If the vulnerability report was not updated after the VM inventory * update, these values might not display in VM inventory. If there is no * available fix, the field is empty. The `inventory_item` value specifies * the latest `SoftwarePackage` available to the VM that fixes the * vulnerability. * @type string $fixed_cpe_uri * The recommended [CPE URI](https://cpe.mitre.org/specification/) update * that contains a fix for this vulnerability. * @type string $upstream_fix * The upstream OS patch, packages or KB that fixes the vulnerability. * } */ public function __construct($data = NULL) { \GPBMetadata\Google\Cloud\Osconfig\V1\Vulnerability::initOnce(); parent::__construct($data); } /** * Corresponds to the `INSTALLED_PACKAGE` inventory item on the VM. * This field displays the inventory items affected by this vulnerability. * If the vulnerability report was not updated after the VM inventory * update, these values might not display in VM inventory. For some * operating systems, this field might be empty. * * Generated from protobuf field <code>string installed_inventory_item_id = 1;</code> * @return string */ public function getInstalledInventoryItemId() { return $this->installed_inventory_item_id; } /** * Corresponds to the `INSTALLED_PACKAGE` inventory item on the VM. * This field displays the inventory items affected by this vulnerability. * If the vulnerability report was not updated after the VM inventory * update, these values might not display in VM inventory. For some * operating systems, this field might be empty. * * Generated from protobuf field <code>string installed_inventory_item_id = 1;</code> * @param string $var * @return $this */ public function setInstalledInventoryItemId($var) { GPBUtil::checkString($var, True); $this->installed_inventory_item_id = $var; return $this; } /** * Corresponds to the `AVAILABLE_PACKAGE` inventory item on the VM. * If the vulnerability report was not updated after the VM inventory * update, these values might not display in VM inventory. If there is no * available fix, the field is empty. The `inventory_item` value specifies * the latest `SoftwarePackage` available to the VM that fixes the * vulnerability. * * Generated from protobuf field <code>string available_inventory_item_id = 2;</code> * @return string */ public function getAvailableInventoryItemId() { return $this->available_inventory_item_id; } /** * Corresponds to the `AVAILABLE_PACKAGE` inventory item on the VM. * If the vulnerability report was not updated after the VM inventory * update, these values might not display in VM inventory. If there is no * available fix, the field is empty. The `inventory_item` value specifies * the latest `SoftwarePackage` available to the VM that fixes the * vulnerability. * * Generated from protobuf field <code>string available_inventory_item_id = 2;</code> * @param string $var * @return $this */ public function setAvailableInventoryItemId($var) { GPBUtil::checkString($var, True); $this->available_inventory_item_id = $var; return $this; } /** * The recommended [CPE URI](https://cpe.mitre.org/specification/) update * that contains a fix for this vulnerability. * * Generated from protobuf field <code>string fixed_cpe_uri = 3;</code> * @return string */ public function getFixedCpeUri() { return $this->fixed_cpe_uri; } /** * The recommended [CPE URI](https://cpe.mitre.org/specification/) update * that contains a fix for this vulnerability. * * Generated from protobuf field <code>string fixed_cpe_uri = 3;</code> * @param string $var * @return $this */ public function setFixedCpeUri($var) { GPBUtil::checkString($var, True); $this->fixed_cpe_uri = $var; return $this; } /** * The upstream OS patch, packages or KB that fixes the vulnerability. * * Generated from protobuf field <code>string upstream_fix = 4;</code> * @return string */ public function getUpstreamFix() { return $this->upstream_fix; } /** * The upstream OS patch, packages or KB that fixes the vulnerability. * * Generated from protobuf field <code>string upstream_fix = 4;</code> * @param string $var * @return $this */ public function setUpstreamFix($var) { GPBUtil::checkString($var, True); $this->upstream_fix = $var; return $this; } }
googleapis/google-cloud-php-osconfig
src/V1/VulnerabilityReport/Vulnerability/Item.php
PHP
apache-2.0
7,814
// Copyright 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "fusion/config/gefConfigUtil.h" #include <cstdlib> #include <algorithm> #include "fusion/autoingest/.idl/Systemrc.h" #include "fusion/autoingest/.idl/VolumeStorage.h" #include "fusion/autoingest/geAssetRoot.h" #include "common/khSpawn.h" #include "common/khException.h" #include "common/geUsers.h" #include "common/khFileUtils.h" #include "common/config/geConfigUtil.h" #include "common/config/geCapabilities.h" namespace { const uint32 kMaxNumJobsDefault = 8; uint32 GetDefaultMaxNumJobs() { char* variable; uint32 max_num_jobs = kMaxNumJobsDefault; if ((variable = getenv("KH_GOOGLE_MAX_NUM_JOBS")) != NULL) { char *endptr = NULL; const uint32 value = static_cast<uint32>( std::strtoul(variable, &endptr, 0)); if (endptr != variable) { max_num_jobs = value; } } return max_num_jobs; } } // namespace // **************************************************************************** // *** ValidateHostReadyForConfig // **************************************************************************** bool IsFusionRunning(void) { return (geCheckPidFile("gesystemmanager") || geCheckPidFile("geresourceprovider")); } namespace { void AssertFusionNotRunning(void) { if (IsFusionRunning()) { throw khException(kh::tr("Please stop fusion before proceeding.\n" "(e.g. /etc/init.d/gefusion stop)")); } } } std::string ValidateHostReadyForConfig(void) { AssertRunningAsRoot(); AssertFusionNotRunning(); return GetAndValidateHostname(); } void LoadSystemrc(Systemrc &systemrc) { static Systemrc cached_systemrc; static bool use_cached = false; if (use_cached) { systemrc = cached_systemrc; return; } if (khExists(Systemrc::Filename())) { use_cached = true; // load into a tmp to avoid a partial load on an earlier file // affecting the defaults for this load Systemrc tmp; if (tmp.Load()) { uint32 max_num_jobs = GetMaxNumJobs(); // If uninitialized or greater than maximum allowable number of // concurrent jobs, the maxjobs defaults to the min of the values: // maximum allowable number of concurrent jobs or limit on max number // of jobs. if (tmp.maxjobs == 0 || tmp.maxjobs > max_num_jobs) { tmp.maxjobs = std::min(max_num_jobs, kMaxNumJobsLimit); } systemrc = cached_systemrc = tmp; } } else { throw khException(kh::tr("'%1' is missing").arg(Systemrc::Filename())); } } std::string CommandlineAssetRootDefault(void) { Systemrc systemrc; LoadSystemrc(systemrc); return systemrc.assetroot; } uint32 CommandlineNumCPUsDefault(void) { Systemrc systemrc; LoadSystemrc(systemrc); return systemrc.maxjobs; } // **************************************************************************** // *** Volume routines // **************************************************************************** void LoadVolumesOrThrow(const std::string &assetroot, VolumeDefList &volumes) { std::string volumefname = geAssetRoot::Filename(assetroot, geAssetRoot::VolumeFile); if (!khExists(volumefname) || !volumes.Load(volumefname)) { throw khException(kh::tr("Unable to load volumes for %1") .arg(assetroot)); } } void SaveVolumesOrThrow(const std::string &assetroot, const VolumeDefList &volumes) { std::string volumefname = geAssetRoot::Filename(assetroot, geAssetRoot::VolumeFile); if (!volumes.Save(volumefname)) { throw khException(kh::tr("Unable to save volumes for %1") .arg(assetroot)); } (void)khChmod(volumefname, geAssetRoot::FilePerms(geAssetRoot::VolumeFile)); } void SwitchToUser(const std::string username, const std::string group_name) { geUserId ge_user(username, group_name); ge_user.SwitchEffectiveToThis(); } uint32 GetMaxNumJobs() { uint32 max_num_jobs = 0; // Get maximum allowable number of concurrent jobs. // Note: KH_MAX_NUM_JOBS_COEFF can be used to build GEE Fusion licensing // KH_MAX_NUM_JOBS_COEFF*kMaxNumJobsDefault (8/16/24..) concurrent jobs. #ifdef KH_MAX_NUM_JOBS_COEFF max_num_jobs = kMaxNumJobsDefault * static_cast<uint32>(KH_MAX_NUM_JOBS_COEFF); #endif // Note: Apply an internal multiplier in case of GEE Fusion is built // with maximum number of concurrent jobs equals 0 (internal usage). if (max_num_jobs == 0) { max_num_jobs = GetDefaultMaxNumJobs(); } // Set the max_num_jobs to the min of the values: number of CPUs or max // allowable number of jobs. max_num_jobs = std::min(max_num_jobs, GetNumCPUs()); return max_num_jobs; }
tst-ahernandez/earthenterprise
earth_enterprise/src/fusion/config/gefConfigUtil.cpp
C++
apache-2.0
5,241
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal.processors.cache.persistence.pagemem; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.LockSupport; import org.apache.ignite.IgniteLogger; import org.apache.ignite.internal.processors.cache.persistence.CheckpointLockStateChecker; import org.apache.ignite.internal.processors.cache.persistence.CheckpointWriteProgressSupplier; import org.apache.ignite.internal.util.GridConcurrentHashSet; import org.apache.ignite.internal.util.typedef.internal.U; /** * Throttles threads that generate dirty pages during ongoing checkpoint. * Designed to avoid zero dropdowns that can happen if checkpoint buffer is overflowed. * Uses average checkpoint write speed and moment speed of marking pages as dirty. */ public class PagesWriteSpeedBasedThrottle implements PagesWriteThrottlePolicy { /** Maximum dirty pages in region. */ private static final double MAX_DIRTY_PAGES = 0.75; /** Page memory. */ private final PageMemoryImpl pageMemory; /** Database manager. */ private final CheckpointWriteProgressSupplier cpProgress; /** Starting throttle time. Limits write speed to 1000 MB/s. */ private static final long STARTING_THROTTLE_NANOS = 4000; /** Backoff ratio. Each next park will be this times longer. */ private static final double BACKOFF_RATIO = 1.05; /** Percent of dirty pages which will not cause throttling. */ private static final double MIN_RATIO_NO_THROTTLE = 0.03; /** Exponential backoff counter. */ private final AtomicInteger exponentialBackoffCntr = new AtomicInteger(0); /** Counter of written pages from checkpoint. Value is saved here for detecting checkpoint start. */ private final AtomicInteger lastObservedWritten = new AtomicInteger(0); /** * Dirty pages ratio was observed at checkpoint start (here start is moment when first page was actually saved to * store). This ratio is excluded from throttling. */ private volatile double initDirtyRatioAtCpBegin = MIN_RATIO_NO_THROTTLE; /** * Target (maximum) dirty pages ratio, after which throttling will start using * {@link #getParkTime(double, long, int, int, long, long)}. */ private volatile double targetDirtyRatio; /** * Current dirty pages ratio (percent of dirty pages in most used segment), negative value means no cp is running. */ private volatile double currDirtyRatio; /** Speed average checkpoint write speed. Current and 3 past checkpoints used. Pages/second. */ private final IntervalBasedMeasurement speedCpWrite = new IntervalBasedMeasurement(); /** Last estimated speed for marking all clear pages as dirty till the end of checkpoint. */ private volatile long speedForMarkAll; /** Threads set. Contains identifiers of all threads which were marking pages for current checkpoint. */ private final GridConcurrentHashSet<Long> threadIds = new GridConcurrentHashSet<>(); /** * Used for calculating speed of marking pages dirty. * Value from past 750-1000 millis only. * {@link IntervalBasedMeasurement#getSpeedOpsPerSec(long)} returns pages marked/second. * {@link IntervalBasedMeasurement#getAverage()} returns average throttle time. * */ private final IntervalBasedMeasurement speedMarkAndAvgParkTime = new IntervalBasedMeasurement(250, 3); /** Total pages which is possible to store in page memory. */ private long totalPages; /** Checkpoint lock state provider. */ private CheckpointLockStateChecker cpLockStateChecker; /** Logger. */ private IgniteLogger log; /** Previous warning time, nanos. */ private AtomicLong prevWarnTime = new AtomicLong(); /** Warning min delay nanoseconds. */ private static final long WARN_MIN_DELAY_NS = TimeUnit.SECONDS.toNanos(10); /** Warning threshold: minimal level of pressure that causes warning messages to log. */ static final double WARN_THRESHOLD = 0.2; /** * @param pageMemory Page memory. * @param cpProgress Database manager. * @param stateChecker Checkpoint lock state provider. * @param log Logger. */ public PagesWriteSpeedBasedThrottle( PageMemoryImpl pageMemory, CheckpointWriteProgressSupplier cpProgress, CheckpointLockStateChecker stateChecker, IgniteLogger log ) { this.pageMemory = pageMemory; this.cpProgress = cpProgress; totalPages = pageMemory.totalPages(); this.cpLockStateChecker = stateChecker; this.log = log; } /** {@inheritDoc} */ @Override public void onMarkDirty(boolean isPageInCheckpoint) { assert cpLockStateChecker.checkpointLockIsHeldByThread(); AtomicInteger writtenPagesCntr = cpProgress.writtenPagesCounter(); if (writtenPagesCntr == null) { speedForMarkAll = 0; targetDirtyRatio = -1; currDirtyRatio = -1; return; // Don't throttle if checkpoint is not running. } int cpWrittenPages = writtenPagesCntr.get(); long fullyCompletedPages = (cpWrittenPages + cpSyncedPages()) / 2; // written & sync'ed long curNanoTime = System.nanoTime(); speedCpWrite.setCounter(fullyCompletedPages, curNanoTime); long markDirtySpeed = speedMarkAndAvgParkTime.getSpeedOpsPerSec(curNanoTime); long curCpWriteSpeed = speedCpWrite.getSpeedOpsPerSec(curNanoTime); threadIds.add(Thread.currentThread().getId()); ThrottleMode level = ThrottleMode.NO; //should apply delay (throttling) for current page modification if (isPageInCheckpoint) { int checkpointBufLimit = pageMemory.checkpointBufferPagesSize() * 2 / 3; if (pageMemory.checkpointBufferPagesCount() > checkpointBufLimit) level = ThrottleMode.EXPONENTIAL; } long throttleParkTimeNs = 0; if (level == ThrottleMode.NO) { int nThreads = threadIds.size(); int cpTotalPages = cpTotalPages(); if (cpTotalPages == 0) { boolean throttleByCpSpeed = curCpWriteSpeed > 0 && markDirtySpeed > curCpWriteSpeed; if (throttleByCpSpeed) { throttleParkTimeNs = calcDelayTime(curCpWriteSpeed, nThreads, 1); level = ThrottleMode.LIMITED; } } else { double dirtyPagesRatio = pageMemory.getDirtyPagesRatio(); currDirtyRatio = dirtyPagesRatio; detectCpPagesWriteStart(cpWrittenPages, dirtyPagesRatio); if (dirtyPagesRatio >= MAX_DIRTY_PAGES) level = ThrottleMode.NO; // too late to throttle, will wait on safe to update instead. else { int notEvictedPagesTotal = cpTotalPages - cpEvictedPages(); throttleParkTimeNs = getParkTime(dirtyPagesRatio, fullyCompletedPages, notEvictedPagesTotal < 0 ? 0 : notEvictedPagesTotal, nThreads, markDirtySpeed, curCpWriteSpeed); level = throttleParkTimeNs == 0 ? ThrottleMode.NO : ThrottleMode.LIMITED; } } } if (level == ThrottleMode.EXPONENTIAL) { int exponent = exponentialBackoffCntr.getAndIncrement(); throttleParkTimeNs = (long)(STARTING_THROTTLE_NANOS * Math.pow(BACKOFF_RATIO, exponent)); } else { if (isPageInCheckpoint) exponentialBackoffCntr.set(0); if (level == ThrottleMode.NO) throttleParkTimeNs = 0; } if (throttleParkTimeNs > 0) { recurrentLogIfNeed(); doPark(throttleParkTimeNs); } speedMarkAndAvgParkTime.addMeasurementForAverageCalculation(throttleParkTimeNs); } /** * Disables the current thread for thread scheduling purposes. May be overriden by subclasses for tests * * @param throttleParkTimeNs the maximum number of nanoseconds to wait */ protected void doPark(long throttleParkTimeNs) { if (throttleParkTimeNs > LOGGING_THRESHOLD) { U.warn(log, "Parking thread=" + Thread.currentThread().getName() + " for timeout(ms)=" + (throttleParkTimeNs / 1_000_000)); } LockSupport.parkNanos(throttleParkTimeNs); } /** * @return number of written pages. */ private int cpWrittenPages() { AtomicInteger writtenPagesCntr = cpProgress.writtenPagesCounter(); return writtenPagesCntr == null ? 0 : writtenPagesCntr.get(); } /** * @return Number of pages in current checkpoint. */ private int cpTotalPages() { return cpProgress.currentCheckpointPagesCount(); } /** * @return Counter for fsynced checkpoint pages. */ private int cpSyncedPages() { AtomicInteger syncedPagesCntr = cpProgress.syncedPagesCounter(); return syncedPagesCntr == null ? 0 : syncedPagesCntr.get(); } /** * @return number of evicted pages. */ private int cpEvictedPages() { AtomicInteger evictedPagesCntr = cpProgress.evictedPagesCntr(); return evictedPagesCntr == null ? 0 : evictedPagesCntr.get(); } /** * Prints warning to log if throttling is occurred and requires markable amount of time. */ private void recurrentLogIfNeed() { long prevWarningNs = prevWarnTime.get(); long curNs = System.nanoTime(); if (prevWarningNs != 0 && (curNs - prevWarningNs) <= WARN_MIN_DELAY_NS) return; double weight = throttleWeight(); if (weight <= WARN_THRESHOLD) return; if (prevWarnTime.compareAndSet(prevWarningNs, curNs)) { String msg = String.format("Throttling is applied to page modifications " + "[percentOfPartTime=%.2f, markDirty=%d pages/sec, checkpointWrite=%d pages/sec, " + "estIdealMarkDirty=%d pages/sec, curDirty=%.2f, maxDirty=%.2f, avgParkTime=%d ns, " + "pages: (total=%d, evicted=%d, written=%d, synced=%d, cpBufUsed=%d, cpBufTotal=%d)]", weight, getMarkDirtySpeed(), getCpWriteSpeed(), getLastEstimatedSpeedForMarkAll(), getCurrDirtyRatio(), getTargetDirtyRatio(), throttleParkTime(), cpTotalPages(), cpEvictedPages(), cpWrittenPages(), cpSyncedPages(), pageMemory.checkpointBufferPagesCount(), pageMemory.checkpointBufferPagesSize()); log.info(msg); } } /** * @param dirtyPagesRatio actual percent of dirty pages. * @param fullyCompletedPages written & fsynced pages count. * @param cpTotalPages total checkpoint scope. * @param nThreads number of threads providing data during current checkpoint. * @param markDirtySpeed registered mark dirty speed, pages/sec. * @param curCpWriteSpeed average checkpoint write speed, pages/sec. * @return time in nanoseconds to part or 0 if throttling is not required. */ long getParkTime( double dirtyPagesRatio, long fullyCompletedPages, int cpTotalPages, int nThreads, long markDirtySpeed, long curCpWriteSpeed) { long speedForMarkAll = calcSpeedToMarkAllSpaceTillEndOfCp(dirtyPagesRatio, fullyCompletedPages, curCpWriteSpeed, cpTotalPages); double targetDirtyRatio = calcTargetDirtyRatio(fullyCompletedPages, cpTotalPages); this.speedForMarkAll = speedForMarkAll; //publish for metrics this.targetDirtyRatio = targetDirtyRatio; //publish for metrics boolean lowSpaceLeft = dirtyPagesRatio > targetDirtyRatio && (dirtyPagesRatio + 0.05 > MAX_DIRTY_PAGES); int slowdown = lowSpaceLeft ? 3 : 1; double multiplierForSpeedForMarkAll = lowSpaceLeft ? 0.8 : 1.0; boolean markingTooFast = speedForMarkAll > 0 && markDirtySpeed > multiplierForSpeedForMarkAll * speedForMarkAll; boolean throttleBySizeAndMarkSpeed = dirtyPagesRatio > targetDirtyRatio && markingTooFast; //for case of speedForMarkAll >> markDirtySpeed, allow write little bit faster than CP average double allowWriteFasterThanCp = (speedForMarkAll > 0 && markDirtySpeed > 0 && speedForMarkAll > markDirtySpeed) ? (0.1 * speedForMarkAll / markDirtySpeed) : (dirtyPagesRatio > targetDirtyRatio ? 0.0 : 0.1); double fasterThanCpWriteSpeed = lowSpaceLeft ? 1.0 : 1.0 + allowWriteFasterThanCp; boolean throttleByCpSpeed = curCpWriteSpeed > 0 && markDirtySpeed > (fasterThanCpWriteSpeed * curCpWriteSpeed); long delayByCpWrite = throttleByCpSpeed ? calcDelayTime(curCpWriteSpeed, nThreads, slowdown) : 0; long delayByMarkAllWrite = throttleBySizeAndMarkSpeed ? calcDelayTime(speedForMarkAll, nThreads, slowdown) : 0; return Math.max(delayByCpWrite, delayByMarkAllWrite); } /** * @param dirtyPagesRatio current percent of dirty pages. * @param fullyCompletedPages count of written and sync'ed pages * @param curCpWriteSpeed pages/second checkpoint write speed. 0 speed means 'no data'. * @param cpTotalPages total pages in checkpoint. * @return pages/second to mark to mark all clean pages as dirty till the end of checkpoint. 0 speed means 'no * data'. */ private long calcSpeedToMarkAllSpaceTillEndOfCp(double dirtyPagesRatio, long fullyCompletedPages, long curCpWriteSpeed, int cpTotalPages) { if (curCpWriteSpeed == 0) return 0; if (cpTotalPages <= 0) return 0; if (dirtyPagesRatio >= MAX_DIRTY_PAGES) return 0; double remainedClear = (MAX_DIRTY_PAGES - dirtyPagesRatio) * totalPages; double timeRemainedSeconds = 1.0 * (cpTotalPages - fullyCompletedPages) / curCpWriteSpeed; return (long)(remainedClear / timeRemainedSeconds); } /** * @param fullyCompletedPages number of completed. * @param cpTotalPages Total amount of pages under checkpoint. * @return size-based calculation of target ratio. */ private double calcTargetDirtyRatio(long fullyCompletedPages, int cpTotalPages) { double cpProgress = ((double)fullyCompletedPages) / cpTotalPages; // Starting with initialDirtyRatioAtCpBegin to avoid throttle right after checkpoint start double constStart = initDirtyRatioAtCpBegin; double throttleTotalWeight = 1.0 - constStart; // .75 is maximum ratio of dirty pages return (cpProgress * throttleTotalWeight + constStart) * MAX_DIRTY_PAGES; } /** * @param baseSpeed speed to slow down. * @param nThreads operating threads. * @param coefficient how much it is needed to slowdown base speed. 1.0 means delay to get exact base speed. * @return sleep time in nanoseconds. */ private long calcDelayTime(long baseSpeed, int nThreads, double coefficient) { if (coefficient <= 0.0) return 0; if (baseSpeed <= 0) return 0; long updTimeNsForOnePage = TimeUnit.SECONDS.toNanos(1) * nThreads / (baseSpeed); return (long)(coefficient * updTimeNsForOnePage); } /** * @param cpWrittenPages current counter of written pages. * @param dirtyPagesRatio current percent of dirty pages. */ private void detectCpPagesWriteStart(int cpWrittenPages, double dirtyPagesRatio) { if (cpWrittenPages > 0 && lastObservedWritten.compareAndSet(0, cpWrittenPages)) { double newMinRatio = dirtyPagesRatio; if (newMinRatio < MIN_RATIO_NO_THROTTLE) newMinRatio = MIN_RATIO_NO_THROTTLE; if (newMinRatio > 1) newMinRatio = 1; //for slow cp is completed now, drop previous dirty page percent initDirtyRatioAtCpBegin = newMinRatio; } } /** {@inheritDoc} */ @Override public void onBeginCheckpoint() { speedCpWrite.setCounter(0L, System.nanoTime()); initDirtyRatioAtCpBegin = MIN_RATIO_NO_THROTTLE; lastObservedWritten.set(0); } /** {@inheritDoc} */ @Override public void onFinishCheckpoint() { exponentialBackoffCntr.set(0); speedCpWrite.finishInterval(); speedMarkAndAvgParkTime.finishInterval(); threadIds.clear(); } /** * @return Exponential backoff counter. */ public long throttleParkTime() { return speedMarkAndAvgParkTime.getAverage(); } /** * @return Target (maximum) dirty pages ratio, after which throttling will start. */ public double getTargetDirtyRatio() { return targetDirtyRatio; } /** * @return Current dirty pages ratio. */ public double getCurrDirtyRatio() { double ratio = currDirtyRatio; if (ratio >= 0) return ratio; return pageMemory.getDirtyPagesRatio(); } /** * @return Speed of marking pages dirty. Value from past 750-1000 millis only. Pages/second. */ public long getMarkDirtySpeed() { return speedMarkAndAvgParkTime.getSpeedOpsPerSec(System.nanoTime()); } /** * @return Speed average checkpoint write speed. Current and 3 past checkpoints used. Pages/second. */ public long getCpWriteSpeed() { return speedCpWrite.getSpeedOpsPerSecReadOnly(); } /** * @return Returns {@link #speedForMarkAll}. */ public long getLastEstimatedSpeedForMarkAll() { return speedForMarkAll; } /** * Measurement shows how much throttling time is involved into average marking time. * @return metric started from 0.0 and showing how much throttling is involved into current marking process. */ public double throttleWeight() { long speed = speedMarkAndAvgParkTime.getSpeedOpsPerSec(System.nanoTime()); if (speed <= 0) return 0; long timeForOnePage = calcDelayTime(speed, threadIds.size(), 1); if (timeForOnePage == 0) return 0; return 1.0 * throttleParkTime() / timeForOnePage; } /** * Throttling mode for page. */ private enum ThrottleMode { /** No delay is applied. */ NO, /** Limited, time is based on target speed. */ LIMITED, /** Exponential. */ EXPONENTIAL } }
alexzaitzev/ignite
modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteSpeedBasedThrottle.java
Java
apache-2.0
19,621
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.oodt.cas.workflow.engine; //OODT imports import org.apache.oodt.cas.metadata.Metadata; import org.apache.oodt.cas.resource.system.XmlRpcResourceManagerClient; import org.apache.oodt.cas.workflow.structs.Workflow; import org.apache.oodt.cas.workflow.structs.WorkflowInstance; import org.apache.oodt.cas.workflow.structs.WorkflowStatus; import org.apache.oodt.cas.workflow.structs.WorkflowTask; import org.apache.oodt.cas.workflow.structs.exceptions.EngineException; import org.apache.oodt.cas.workflow.structs.exceptions.InstanceRepositoryException; import org.apache.oodt.cas.workflow.engine.IterativeWorkflowProcessorThread; import org.apache.oodt.cas.workflow.instrepo.WorkflowInstanceRepository; import org.apache.oodt.commons.util.DateConvert; //JDK imports import java.net.URL; import java.text.ParseException; import java.util.Date; import java.util.HashMap; import java.util.logging.Level; import java.util.logging.Logger; //java.util.concurrent imports import EDU.oswego.cs.dl.util.concurrent.BoundedBuffer; import EDU.oswego.cs.dl.util.concurrent.Channel; import EDU.oswego.cs.dl.util.concurrent.LinkedQueue; import EDU.oswego.cs.dl.util.concurrent.PooledExecutor; /** * * The ThreadPooling portion of the WorkflowEngine. This class is meant to be an * extension point for WorkflowEngines that want to implement ThreadPooling. * This WorkflowEngine provides everything needed to manage a ThreadPool using * Doug Lea's wonderful java.util.concurrent package that made it into JDK5. * * @author mattmann * @version $Revsion$ * */ public class ThreadPoolWorkflowEngine implements WorkflowEngine, WorkflowStatus { /* our thread pool */ private PooledExecutor pool = null; /* our worker thread hash mapping worker threads to workflow instance ids */ private HashMap workerMap = null; /* our log stream */ private static final Logger LOG = Logger .getLogger(ThreadPoolWorkflowEngine.class.getName()); /* our instance repository */ private WorkflowInstanceRepository instRep = null; /* our resource manager client */ private XmlRpcResourceManagerClient rClient = null; /* the URL pointer to the parent Workflow Manager */ private URL wmgrUrl = null; /** * Default Constructor. * * @param instRep * The WorkflowInstanceRepository to be used by this engine. * @param queueSize * The size of the queue that the workflow engine should use * (irrelevant if unlimitedQueue is set to true) * @param maxPoolSize * The minimum thread pool size. * @param minPoolSize * The maximum thread pool size. * @param threadKeepAliveTime * The amount of minutes that each thread in the pool should be kept * alive. * @param unlimitedQueue * Whether or not to use a queue whose bounds are dictated by the * physical memory of the underlying hardware. * @param resUrl * A URL pointer to a resource manager. If this is set Tasks will be * wrapped as Resource Manager {@link Job}s and sent through the * Resource Manager. If this parameter is not set, local execution * (the default) will be used */ public ThreadPoolWorkflowEngine(WorkflowInstanceRepository instRep, int queueSize, int maxPoolSize, int minPoolSize, long threadKeepAliveTime, boolean unlimitedQueue, URL resUrl) { this.instRep = instRep; Channel c = null; if (unlimitedQueue) { c = new LinkedQueue(); } else { c = new BoundedBuffer(queueSize); } pool = new PooledExecutor(c, maxPoolSize); pool.setMinimumPoolSize(minPoolSize); pool.setKeepAliveTime(1000 * 60 * threadKeepAliveTime); workerMap = new HashMap(); if (resUrl != null) rClient = new XmlRpcResourceManagerClient(resUrl); } /* * (non-Javadoc) * * @see * org.apache.oodt.cas.workflow.engine.WorkflowEngine#pauseWorkflowInstance * (java.lang.String) */ public synchronized void pauseWorkflowInstance(String workflowInstId) { // okay, try and look up that worker thread in our hash map IterativeWorkflowProcessorThread worker = (IterativeWorkflowProcessorThread) workerMap .get(workflowInstId); if (worker == null) { LOG.log(Level.WARNING, "WorkflowEngine: Attempt to pause workflow instance id: " + workflowInstId + ", however, this engine is not tracking its execution"); return; } // otherwise, all good worker.pause(); } /* * (non-Javadoc) * * @see * org.apache.oodt.cas.workflow.engine.WorkflowEngine#resumeWorkflowInstance * (java.lang.String) */ public synchronized void resumeWorkflowInstance(String workflowInstId) { // okay, try and look up that worker thread in our hash map IterativeWorkflowProcessorThread worker = (IterativeWorkflowProcessorThread) workerMap .get(workflowInstId); if (worker == null) { LOG.log(Level.WARNING, "WorkflowEngine: Attempt to resume workflow instance id: " + workflowInstId + ", however, this engine is " + "not tracking its execution"); return; } // also check to make sure that the worker is currently paused // only can resume WorkflowInstances that are paused, right? if (!worker.isPaused()) { LOG.log(Level.WARNING, "WorkflowEngine: Attempt to resume a workflow that " + "isn't paused currently: instance id: " + workflowInstId); return; } // okay, all good worker.resume(); } /* * (non-Javadoc) * * @see * org.apache.oodt.cas.workflow.engine.WorkflowEngine#startWorkflow(org.apache * .oodt.cas.workflow.structs.Workflow, org.apache.oodt.cas.metadata.Metadata) */ public synchronized WorkflowInstance startWorkflow(Workflow workflow, Metadata metadata) throws EngineException { // to start the workflow, we create a default workflow instance // populate it // persist it // add it to the worker map // start it WorkflowInstance wInst = new WorkflowInstance(); wInst.setWorkflow(workflow); wInst.setCurrentTaskId(((WorkflowTask) workflow.getTasks().get(0)) .getTaskId()); wInst.setSharedContext(metadata); wInst.setStatus(CREATED); persistWorkflowInstance(wInst); IterativeWorkflowProcessorThread worker = new IterativeWorkflowProcessorThread( wInst, instRep, this.wmgrUrl); worker.setRClient(rClient); workerMap.put(wInst.getId(), worker); wInst.setStatus(QUEUED); persistWorkflowInstance(wInst); try { pool.execute(worker); } catch (InterruptedException e) { throw new EngineException(e); } return wInst; } /* * (non-Javadoc) * * @see * org.apache.oodt.cas.workflow.engine.WorkflowEngine#getInstanceRepository() */ public WorkflowInstanceRepository getInstanceRepository() { return this.instRep; } /* * (non-Javadoc) * * @see * org.apache.oodt.cas.workflow.engine.WorkflowEngine#updateMetadata(java. * lang.String, org.apache.oodt.cas.metadata.Metadata) */ public synchronized boolean updateMetadata(String workflowInstId, Metadata met) { // okay, try and look up that worker thread in our hash map IterativeWorkflowProcessorThread worker = (IterativeWorkflowProcessorThread) workerMap .get(workflowInstId); if (worker == null) { LOG.log(Level.WARNING, "WorkflowEngine: Attempt to update metadata context " + "for workflow instance id: " + workflowInstId + ", however, this engine is " + "not tracking its execution"); return false; } worker.getWorkflowInstance().setSharedContext(met); try { persistWorkflowInstance(worker.getWorkflowInstance()); } catch (Exception e) { LOG.log( Level.WARNING, "Exception persisting workflow instance: [" + worker.getWorkflowInstance().getId() + "]: Message: " + e.getMessage()); return false; } return true; } /* * (non-Javadoc) * * @see * org.apache.oodt.cas.workflow.engine.WorkflowEngine#setWorkflowManagerUrl * (java.net.URL) */ public void setWorkflowManagerUrl(URL url) { this.wmgrUrl = url; } /* * (non-Javadoc) * * @see * org.apache.oodt.cas.workflow.engine.WorkflowEngine#stopWorkflow(java.lang * .String) */ public synchronized void stopWorkflow(String workflowInstId) { // okay, try and look up that worker thread in our hash map IterativeWorkflowProcessorThread worker = (IterativeWorkflowProcessorThread) workerMap .get(workflowInstId); if (worker == null) { LOG.log(Level.WARNING, "WorkflowEngine: Attempt to stop workflow instance id: " + workflowInstId + ", however, this engine is " + "not tracking its execution"); return; } worker.stop(); } /* * (non-Javadoc) * * @see org.apache.oodt.cas.workflow.engine.WorkflowEngine# * getCurrentTaskWallClockMinutes(java.lang.String) */ public double getCurrentTaskWallClockMinutes(String workflowInstId) { // get the workflow instance that we're talking about WorkflowInstance inst = safeGetWorkflowInstanceById(workflowInstId); return getCurrentTaskWallClockMinutes(inst); } /* * (non-Javadoc) * * @see * org.apache.oodt.cas.workflow.engine.WorkflowEngine#getWorkflowInstanceMetadata * (java.lang.String) */ public Metadata getWorkflowInstanceMetadata(String workflowInstId) { // okay, try and look up that worker thread in our hash map IterativeWorkflowProcessorThread worker = (IterativeWorkflowProcessorThread) workerMap .get(workflowInstId); if (worker == null) { // try and get the metadata // from the workflow instance repository (as it was persisted) try { WorkflowInstance inst = instRep.getWorkflowInstanceById(workflowInstId); return inst.getSharedContext(); } catch (InstanceRepositoryException e) { LOG.log(Level.FINEST, "WorkflowEngine: Attempt to get metadata " + "for workflow instance id: " + workflowInstId + ", however, this engine is " + "not tracking its execution and the id: [" + workflowInstId + "] " + "was never persisted to " + "the instance repository"); e.printStackTrace(); return new Metadata(); } } return worker.getWorkflowInstance().getSharedContext(); } /* * (non-Javadoc) * * @see * org.apache.oodt.cas.workflow.engine.WorkflowEngine#getWallClockMinutes( * java.lang.String) */ public double getWallClockMinutes(String workflowInstId) { // get the workflow instance that we're talking about WorkflowInstance inst = safeGetWorkflowInstanceById(workflowInstId); return getWallClockMinutes(inst); } protected static double getWallClockMinutes(WorkflowInstance inst) { if (inst == null) { return 0.0; } Date currentDateOrStopTime = (inst.getEndDateTimeIsoStr() != null && !inst.getEndDateTimeIsoStr().equals("") && !inst .getEndDateTimeIsoStr().equals("null")) ? safeDateConvert(inst .getEndDateTimeIsoStr()) : new Date(); Date workflowStartDateTime = null; if (inst.getStartDateTimeIsoStr() == null || (inst.getStartDateTimeIsoStr() != null && (inst .getStartDateTimeIsoStr().equals("") || inst .getStartDateTimeIsoStr().equals("null")))) { return 0.0; } try { workflowStartDateTime = DateConvert.isoParse(inst .getStartDateTimeIsoStr()); } catch (ParseException e) { return 0.0; } long diffMs = currentDateOrStopTime.getTime() - workflowStartDateTime.getTime(); double diffSecs = (diffMs * 1.0 / 1000.0); double diffMins = diffSecs / 60.0; return diffMins; } protected static double getCurrentTaskWallClockMinutes(WorkflowInstance inst) { if (inst == null) { return 0.0; } Date currentDateOrStopTime = (inst.getCurrentTaskEndDateTimeIsoStr() != null && !inst.getCurrentTaskEndDateTimeIsoStr().equals("") && !inst .getCurrentTaskEndDateTimeIsoStr().equals("null")) ? safeDateConvert(inst .getCurrentTaskEndDateTimeIsoStr()) : new Date(); Date workflowTaskStartDateTime = null; if (inst.getCurrentTaskStartDateTimeIsoStr() == null || (inst.getCurrentTaskStartDateTimeIsoStr() != null && (inst .getCurrentTaskStartDateTimeIsoStr().equals("") || inst .getCurrentTaskStartDateTimeIsoStr().equals("null")))) { return 0.0; } try { workflowTaskStartDateTime = DateConvert.isoParse(inst .getCurrentTaskStartDateTimeIsoStr()); } catch (ParseException e) { return 0.0; } // should never be in this state, so return 0 if (workflowTaskStartDateTime.after(currentDateOrStopTime)) { LOG.log( Level.WARNING, "Start date time: [" + DateConvert.isoFormat(workflowTaskStartDateTime) + " of workflow inst [" + inst.getId() + "] is AFTER " + "End date time: [" + DateConvert.isoFormat(currentDateOrStopTime) + "] of workflow inst."); return 0.0; } long diffMs = currentDateOrStopTime.getTime() - workflowTaskStartDateTime.getTime(); double diffSecs = (diffMs * 1.0 / 1000.0); double diffMins = diffSecs / 60.0; return diffMins; } private synchronized void persistWorkflowInstance(WorkflowInstance wInst) throws EngineException { try { if (wInst.getId() == null || (wInst.getId() != null && wInst.getId().equals(""))) { // we have to persist it by adding it // rather than updating it instRep.addWorkflowInstance(wInst); } else { // persist by update instRep.updateWorkflowInstance(wInst); } } catch (InstanceRepositoryException e) { e.printStackTrace(); throw new EngineException(e.getMessage()); } } private WorkflowInstance safeGetWorkflowInstanceById(String workflowInstId) { try { return instRep.getWorkflowInstanceById(workflowInstId); } catch (Exception e) { return null; } } private static Date safeDateConvert(String isoTimeStr) { try { return DateConvert.isoParse(isoTimeStr); } catch (Exception ignore) { ignore.printStackTrace(); return null; } } }
OSBI/oodt
workflow/src/main/java/org/apache/oodt/cas/workflow/engine/ThreadPoolWorkflowEngine.java
Java
apache-2.0
15,482
<?php /* * InlineStyle MIT License * * Copyright (c) 2010 Christiaan Baartse * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /** * Parses a html file and applies all embedded and external stylesheets inline * * @author Christiaan Baartse <christiaan@baartse.nl> * @copyright 2010 Christiaan Baartse */ class InlineStyle { /** * @var DOMDocument the HTML as DOMDocument */ protected $_dom; /** * @var CSSQuery instance to use css based selectors on our DOMDocument */ protected $_cssquery; /** * Prepare all the necessary objects * * @param string $html */ public function __construct($html, $encoding = 'UTF-8') { if(!class_exists("CSSQuery")) { throw new Exception( "InlineStyle needs the CSSQuery class"); } $html = htmlspecialchars_decode(htmlentities((string) $html, ENT_NOQUOTES, $encoding), ENT_NOQUOTES); $this->_dom = new DOMDocument(); $this->_dom->formatOutput = true; $this->_dom->preserveWhitespace = false; if(file_exists($html)) { $this->_dom->loadHTMLFile($html); } else { $this->_dom->loadHTML($html); } $this->_cssquery = new CSSQuery($this->_dom); } /** * Applies one or more stylesheets to the current document * * @param string $stylesheet * @return InlineStyle self */ public function applyStylesheet($stylesheet) { $stylesheet = (array) $stylesheet; foreach($stylesheet as $ss) { foreach($this->parseStylesheet($ss) as $arr) { list($selector, $style) = $arr; $this->applyRule($selector, $style); } } return $this; } /** * Applies a style rule on the document * @param string $selector * @param string $style * @return InlineStyle self */ public function applyRule($selector, $style) { $selector = trim(trim($selector), ","); if($selector) { $nodes = array(); foreach(explode(",", $selector) as $sel) { if(false === stripos($sel, ":hover") && false === stripos($sel, ":active") && false === stripos($sel, ":link") && false === stripos($sel, ":visited")) { $nodes = array_merge($nodes, $this->_cssquery->query($sel)); } } $style = $this->_styleToArray($style); foreach($nodes as $node) { $current = $node->hasAttribute("style") ? $this->_styleToArray($node->getAttribute("style")) : array(); $current = $this->_mergeStyles($current, $style); $st = array(); foreach($current as $prop => $val) { $st[] = "{$prop}:{$val}"; } $node->setAttribute("style", implode(";", $st)); } } return $this; } /** * Returns the DOMDocument as html * * @return string the HTML */ public function getHTML() { return $this->_dom->saveHTML(); } /** * Recursively extracts the stylesheet nodes from the DOMNode * @param DOMNode $node leave empty to extract from the whole document * @return array the extracted stylesheets */ public function extractStylesheets(DOMNode $node = null, $base = "") { if(null === $node) { $node = $this->_dom; } $stylesheets = array(); if(strtolower($node->nodeName) === "style") { $stylesheets[] = $node->nodeValue; $node->parentNode->removeChild($node); } else if(strtolower($node->nodeName) === "link") { if($node->hasAttribute("href")) { $href = $node->getAttribute("href"); if($base && false === strpos($href, "://")) { $href = "{$base}/{$href}"; } $ext = @file_get_contents($href); if($ext) { $stylesheets[] = $ext; $node->parentNode->removeChild($node); } } } if($node->hasChildNodes()) { foreach($node->childNodes as $child) { $stylesheets = array_merge($stylesheets, $this->extractStylesheets($child, $base)); } } return $stylesheets; } /** * Parses a stylesheet to selectors and properties * @param string $stylesheet * @return array */ public function parseStylesheet($stylesheet) { $parsed = array(); $stylesheet = $this->_stripStylesheet($stylesheet); $stylesheet = trim(trim($stylesheet), "}"); foreach(explode("}", $stylesheet) as $rule) { list($selector, $style) = explode("{", $rule, 2); $parsed[] = array(trim($selector), trim(trim($style), ";")); } return $parsed; } /** * Parses style properties to a array which can be merged by mergeStyles() * @param string $style * @return array */ protected function _styleToArray($style) { $styles = array(); $style = trim(trim($style), ";"); if($style) { foreach(explode(";",$style) as $props) { $props = trim(trim($props), ";"); preg_match('#^([-a-z0-9]+):(.*)$#i', $props, $matches); list($match, $prop, $val) = $matches; $styles[$prop] = $val; } } return $styles; } /** * Merges two sets of style properties taking !important into account * @param array $styleA * @param array $styleB * @return array */ protected function _mergeStyles(array $styleA, array $styleB) { foreach($styleB as $prop => $val) { if(!isset($styleA[$prop]) || substr(str_replace(" ", "", strtolower($styleA[$prop])), -10) !== "!important") { $styleA[$prop] = $val; } } return $styleA; } protected function _stripStylesheet($s) { $s = preg_replace('!/\*[^*]*\*+([^/][^*]*\*+)*/!','', $s); $s = str_replace(array("\r\n","\r","\n","\t",' ',' ',' '),'',$s); $s = str_replace('{ ', '{', $s); $s = str_replace(' }', '}', $s); $s = str_replace('; ', ';', $s); return $s; } } /** * This file has had some love from Christiaan Baartse <christiaan@baartse.nl> * * This package contains one class for using Cascading Style Sheet * selectors to retrieve elements from a DOMDocument object similarly * to DOMXPath does with XPath selectors * * PHP version 5 * * @category HTML * @package CSSQuery * @author Sam Shull <sam.shull@jhspecialty.com> * @copyright Copyright (c) 2009 Sam Shull <sam.shull@jhspeicalty.com> * @license <http://www.opensource.org/licenses/mit-license.html> * @version 1.4 * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * * CHANGES: * 06-08-2009 - added normalize-space function to CSSQuery::className * and removed unecessary sprintf(s) in favor of " strings * and fixed runtime pass-by-reference errors * 07-14-2009 - added references and type hinting to many of the functions * in order to improve performance a little * 07-25-2009 - added support for class (.) and id (#) as filters (div#id.class) * 08-05-2009 - corrected my horrible typing errors * changed the attribute filter handling to match the entire operator */ /** * Perform a CSS query on a DOMDocument using DOMXPath * * <code> * $doc = new DOMDocument(); * $doc->loadHTML('<html><body><p>hello world</p></body></html>'); * $css = new CSSQuery($doc); * print count( $css->query("p:contains('hello world')") ); * </code> * * * @category HTML * @package CSSQuery * @author Sam Shull <sam.shull@jhspecialty.com> * @copyright Copyright (c) 2009 Sam Shull <sam.shull@jhspeicalty.com> * @license <http://www.opensource.org/licenses/mit-license.html> * @version Release: @package_version@ * @link * @since Class available since Release 1.0 */ class CSSQuery { /** * This PCRE string matches one valid css selector at a time * * @const string */ const CHUNKER = '/^\s*([#\.>~\+:\[,]?)\s*(\*|[^\*#\.>~\+:\[\]\)\(\s,]*)/'; /** * This PCRE string matches one psuedo css selector at a time * * @const string */ const PSUEDO = '/^\s*:([\w\-]+)\s*(\(\s*([^\(\)]*(\([^\(\)]*\))?)?\s*\))?\s*/'; /** * This PCRE string matches one css attribute selector at a time * * @const string */ const ATTRIBUTES = '/\[@?([\w\-]+(\|[\w\-]+)?)\s*((\S*=)\s*([\'"]?)(?(5)([^\\5]*)\\5|([^\]]+)))?\s*\]/i'; /** * An array of functions representing psuedo selectors * * @access public * * @staticvar array */ public static $filters; /** * An array of functions representing attribute selectors * * @access public * * @staticvar array */ public static $attributeFilters; /** * An instance of DOMXPath for finding the information on the document * * @access public * * @var DOMXPath */ public $xpath; /** * The document that the queries will originate from * * @access public * * @var DOMDocument */ public $document; /** * Initialize the object - opens a new DOMXPath * * @access public * * @param DOMDocument $document */ public function __construct (DOMDocument &$document) { $this->xpath = new DOMXPath($document); $this->document =& $document; } /** * register a namespace * * @access public * * @param string $prefix * @param string $URI * * @returns boolean */ public function registerNamespace ($prefix, $URI) { return $this->xpath->registerNamespace($prefix, $URI); } /** * Get an array of DOMNodes that match a CSS query expression * * @access public * * @param string $expression * @param mixed $context - a DOMNode or an array of DOMNodes * * @returns array */ public function query ($expression, $context=null) { $original_context = func_num_args() < 3 ? $context : func_get_arg(2); $current = $context instanceof DOMNode ? array($context) : self::makeArray($context); $new = array(); $m = array(''); if ($expression && preg_match(self::CHUNKER, $expression, $m)) { //replace a pipe with a semi-colon in a selector //for namespace uses $m[2] = $m[2] ? str_replace('|', ':', $m[2]) : '*'; switch ($m[1]) { case ',': { $new = $this->query(ltrim(substr($expression, strpos($expression, $m[1]) + 1)), array(), $original_context); $new = array_merge($current, $new); return self::unique($new); } //#id case '#': { $new = $this->id($m[2], $current); break; } //.class case '.': { $new = $this->className($m[2], $current); break; } // > child case '>': { $new = $this->children($m[2], $current); break; } // + adjacent sibling case '+': { $new = $this->adjacentSibling($m[2],$current); break; } // ~ general sibling case '~': { $new = $this->generalSibling($m[2], $current); break; } //:psuedo-filter case ':': { if ($m[2] == 'root') { $new = array($this->document->documentElement); } //a psuedo selector is a filter elseif (preg_match(self::PSUEDO, $expression, $n)) { if ($n[1] && isset(self::$filters[$n[1]]) && is_callable(self::$filters[$n[1]])) { if (!$current) { $current = $this->xpath->query('//*'); $current = self::makeArray($current); } $i = 0; foreach ($current as $elem) { if ($item = call_user_func(self::$filters[$n[1]], $elem, $i++, $n, $current, $this)) { if ($item instanceof DOMNode) { if (self::inArray($item, $new) < 0) { $new[] = $item; } } //usually boolean elseif (is_scalar($item)) { if ($item) { $new[] = $elem; } } else { $new = array_merge($new, self::makeArray($item)); $new = self::unique($new); } } } } else { throw new Exception("Unknown psuedo-filter: {$m[2]}, in {$expression}"); } //set this for the substr $m[0] = $n[0]; } else { throw new Exception("Unknown use of semi-colon: {$m[2]}, in {$expression}"); } break; } //[attribute="value"] filter case '[': { if (preg_match(self::ATTRIBUTES, $expression, $n)) { //change a pipe to a semi-colon for namespace purposes $n[1] = str_replace('|', ':', $n[1]); if (!isset($n[4]) || !$n[4]) { $n[4] = ''; $n[6] = null; } if (!isset(self::$attributeFilters[$n[4]]) || !is_callable(self::$attributeFilters[$n[4]])) { //print_r($n); //thrown if there is no viable attributeFilter function for the given operator throw new Exception("Unknown attribute filter: {$n[4]}"); } if (!$current) { $current = $this->xpath->query('//*'); $current = self::makeArray($current); } foreach ($current as $elem) { if (true === call_user_func(self::$attributeFilters[$n[4]], $elem, $n[1], $n[6], $n, $current)) { $new[] = $elem; } } //set this for the substr $m[0] = $n[0]; } else { //only thrown if query is malformed throw new Exception("Unidentified use of '[' in {$m[0]}"); } break; } //just a tag - i.e. any descendant of the current context default: { $new = $this->tag($m[2], $current); break; } } //check for # or . as filter $exp = substr($expression, strlen($m[0])); while ($exp && ($exp[0] == "." || $exp[0] == "#")) { if (preg_match(self::CHUNKER, $exp, $m)) { $expression = $exp; $new = $m[1] == "." ? $this->className($m[2], $new, true) : $this->id($m[2], $new, true); $exp = substr($expression, strlen($m[0])); } } } return (strlen($m[0]) < strlen($expression) && !empty($new)) //return strlen($m[0]) < strlen($expression) ? $this->query(substr($expression, strlen($m[0])), $new, $original_context) : self::unique($new); } /** * get an element by its id attribute * * @access public * * @param string $id * @param array $context * * @returns array */ public function id (&$id, array &$context=array(), $filter=false) { $new = array(); //if a context is present - div#id should act like a filter if ($filter || $context) { foreach ($context as $elem) { if ($elem instanceof DOMElement && $elem->hasAttribute('id') && $elem->getAttribute('id') == $id) { $new[] = $elem; } } } elseif (($items = $this->xpath->query("//*[@id='{$id}']")) && $items->length > 0) { foreach ($items as $item) { $new[] = $item; } } return $new; } /** * get an element by its class attribute * * @access public * * @param string $id * @param array $context * * @returns array */ public function className (&$className, array &$context=array(), $filter=false) { $new = array(); if ($filter && $context) { $regex = '/\s+' . preg_quote($className, '/') . '\s+/'; foreach ($context as $elem) { if ($elem->hasAttribute('class') && preg_match($regex, " {$elem->getAttribute('class')} ")) { $new[] = $elem; } } } //if there is a context for the query elseif ($context) { //06-08-2009 - added normalize-space function, http://westhoffswelt.de/blog/0036_xpath_to_select_html_by_class.html $query = "./descendant::*[ @class and contains( concat(' ', normalize-space(@class), ' '), ' {$className} ') ]"; foreach ($context as $elem) { if ( ($items = $this->xpath->query($query, $elem)) && $items->length > 0 ) { foreach ($items as $item) { $new[] = $item; } } } } //otherwise select any element in the document that matches the selector elseif (($items = $this->xpath->query("//*[ @class and contains( concat(' ', normalize-space(@class), ' '), ' {$className} ') ]")) && $items->length > 0) { foreach ($items as $item) { $new[] = $item; } } return $new; } /** * get the children elements * * @access public * * @param string $tag * @param array $context * * @returns array */ public function children (&$tag='*', array &$context=array()) { $new = array(); $query = "./{$tag}"; //if there is a context for the query if ($context) { foreach ($context as $elem) { if (($items = $this->xpath->query($query, $elem)) && $items->length > 0) { foreach ($items as $item) { $new[] = $item; } } } } //otherwise select any element in the document that matches the selector elseif (($items = $this->xpath->query($query, $this->document->documentElement)) && $items->length > 0) { foreach ($items as $item) { $new[] = $item; } } return $new; } /** * get the adjacent sibling elements * * @access public * * @param string $tag * @param array $context * * @returns array */ public function adjacentSibling (&$tag='*', array &$context=array()) { $new = array(); $tag = strtolower($tag); //if there is a context for the query if ($context) { foreach ($context as $elem) { if ($tag == '*' || strtolower($elem->nextSibling->nodeName) == $tag) { $new[] = $elem->nextSibling; } } } return $new; } /** * get the all sibling elements * * @access public * * @param string $tag * @param array $context * * @returns array */ public function generalSibling (&$tag='*', array &$context=array()) { $new = array(); //if there is a context for the query if ($context) { $query = "./following-sibling::{$tag} | ./preceding-sibling::{$tag}"; foreach ($context as $elem) { if (($items = $this->xpath->query($query, $elem)) && $items->length > 0) { foreach ($items as $item) { $new[] = $item; } } } } return $new; } /** * get the all descendant elements * * @access public * * @param string $tag * @param array $context * * @returns array */ public function tag (&$tag='*', array &$context=array()) { $new = array(); //get all the descendants with the given tagName if ($context) { $query = "./descendant::{$tag}"; foreach ($context as $elem) { if ($items = $this->xpath->query($query, $elem)) { foreach ($items as $item) { $new[] = $item; } } } } //get all elements with the given tagName else { if ($items = $this->xpath->query("//{$tag}")) { foreach ($items as $item) { $new[] = $item; } } } return $new; } /** * A utility function for calculating nth-* style psuedo selectors * * @static * @access public * * @param DOMNode $context - the element whose position is being calculated * @param string $func - the name of the psuedo function that is being calculated for * @param string $expr - the string argument for the selector * @param DOMXPath $xpath - an existing xpath instance for the document that the context belong to * * @returns boolean */ public static function nthChild (DOMNode &$context, $func, $expr, DOMXPath &$xpath) { //remove all the whitespace $expr = preg_replace('/\s+/', '', trim(strtolower($expr))); //all if ($expr == 'n' || $expr == 'n+0' || $expr == '1n+0' || $expr == '1n') { return true; } //the direction we will look for siblings $DIR = (stristr($func, 'last') ? 'following' : 'preceding'); //do a tagName check? $type = stristr($func, 'type') ? '[local-name()=name(.)]' : ''; //the position of this node $count = $xpath->evaluate("count( {$DIR}-sibling::*{$type} ) + 1", $context); //odd if($expr == 'odd' || $expr == '2n+1') { return $count % 2 != 0; } //even elseif($expr == 'even' || $expr == '2n' || $expr == '2n+0') { return $count > 0 && $count % 2 == 0; } //a particular position elseif(preg_match('/^([\+\-]?\d+)$/i', $expr, $mat)) { $d = (stristr($func, 'last') ? -1 : 1) * intval($mat[1]); $r = $xpath->query(sprintf('../%s', $type ? $context->tagName : '*'), $context); return $r && $r->length >= abs($d) && ($d > 0 ? $r->item($d - 1)->isSameNode($context) : $r->item($r->length + $d)->isSameNode($context)); } //grouped after a particular position elseif(preg_match('/^([\+\-]?\d*)?n([\+\-]\d+)?/i', $expr, $mat)) { $a = (isset($mat[2]) && $mat[2] ? intval($mat[2]) : 0); $b = (isset($mat[2]) && $mat[2] ? intval($mat[2]) : 1); return ($a == 0 && $count == $b) || ($a > 0 && $count >= $b && ($count - $b) % $a == 0) || ($a < 0 && $count <= $b && (($b - $count) % ($a * -1)) == 0); } return false; } /** * A utility function for filtering inputs of a specific type * * @static * @access public * * @param mixed $elem * @param string $type * * @returns boolean */ public static function inputFilter (&$elem, $type) { $t = trim(strtolower($type)); //gotta be a -DOMNode- DOMElement return $elem instanceof DOMElement && //with the tagName input strtolower($elem->tagName) == 'input' && ( ($t == 'text' && !$elem->hasAttribute('type')) || ($t == 'button' && strtolower($e->tagName) == "button") || ( //and the attribute type $elem->hasAttribute('type') && //the attribute type should match the given variable type case insensitive trim(strtolower($elem->getAttribute('type'))) == $t ) ); } /** * A utility function for making an iterable object into an array * * @static * @access public * * @param array|Traversable $arr * * @return array */ public static function makeArray (&$arr) { if (is_array($arr)) { return array_values($arr); } $ret = array(); if ($arr) { foreach ($arr as $elem) { $ret[count($ret)] = $elem; } } return $ret; } /** * A utility function for stripping duplicate elements from an array * works on DOMNodes * * @static * @access public * * @param array|Traversable $arr * * @returns array */ public static function unique (&$arr) { //first step make sure all the elements are unique $new = array(); foreach ($arr as $current) { if ( //if the new array is empty //just put the element in the array empty($new) || ( //if it is not an instance of a DOMNode //no need to check for isSameNode !($current instanceof DOMNode) && !in_array($current, $new) ) || //do DOMNode test on array self::inArray($current, $new) < 0 ) { $new[] = $current; } } return $new; } /** * A utility function for determining the position of an element in an array * works on DOMNodes, returns -1 on failure * * @static * @access public * * @param mixed $elem * @param array|Traversable $arr * * @returns integer */ public static function inArray (DOMNode $elem, $arr) { $i = 0; foreach ($arr as $current) { //if it is an identical object or a DOMElement that represents the same node if ($current === $elem || ($current instanceof DOMNode && $current->isSameNode($elem))) { return $i; } $i += 1; } return -1; } /** * A utility function for filtering elements from an array or array-like object * * @static * @access public * * @param mixed $elem * @param array|Traversable $arr * * @returns array */ public static function filter ($array, $func) { $ret = array(); if (!is_callable($func)) { return $array; } foreach ($array as $n => $v) { if (false !== call_user_func($func, $v, $n, $array, $this)) { $ret[] = $v; } } return $ret; } /** * A static function designed to make it easier to get the info * * @static * @access public * * @param string $query * @param mixed $context * @param array|Traversable $ret - passed by reference * * @return array */ public static function find ($query, $context, $ret=null) { $new = array(); //query using DOMDocument if ($context instanceof DOMDocument) { $css = new self($context); $new = $css->query($query); } elseif ($context instanceof DOMNodeList) { if ($context->length) { $css = new self($context->item(0)->ownerDocument); $new = $css->query($query, $context); } } //should be an array if it isn't a DOMNode //in which case the first element should be a DOMNode //representing the desired context elseif (!($context instanceof DOMNode) && count($context)) { $css = new self($context[0]->ownerDocument); $new = $css->query($query, $context); } //otherwise use the ownerDocument and the context as the context of the query else { $css = new self($context->ownerDocument); $new = $css->query($query, $context); } //if there is a place to store the newly selected elements if ($ret) { //append the newly selected elements to the given array|object //or if it is an instance of ArrayAccess just push it on to the object if (is_array($ret)) { $new = array_merge($ret, $new); $new = self::unique($new); $ret = $new; } elseif (is_object($ret)) { if ($ret instanceof ArrayAccess) { foreach ($new as $elem) { $ret[count($ret)] = $elem; } } //appending elements to a DOMDocumentFragment is a fast way to move them around elseif ($ret instanceof DOMDocumentFragment) { foreach ($new as $elem) { //appendChild, but don't forget to verify same document $ret->appendChild( !$ret->ownerDocument->isSameNode($elem->ownerDocument) ? $ret->ownerDocument->importNode($elem, true) : $elem); } } //otherwise we need to find a method to use to attach the elements elseif (($m = method_exists($ret, 'push')) || method_exists($ret, 'add')) { $method = $m ? 'push' : 'add'; foreach ($new as $elem) { $ret->$method($elem); } } elseif (($m = method_exists($ret, 'concat')) || method_exists($ret, 'concatenate')) { $method = $m ? 'concat' : 'concatenate'; $ret->$method($new); } } //this will save the selected elements into a string elseif (is_string($ret)) { foreach ($new as $elem) { $ret .= $elem->ownerDocument->saveXML($elem); } } } return $new; } } /** * this creates the default filters array on the CSSQuery object * * <code> * //prototype function (DOMNode $element, integer $i, array $matches, array $context, CSSQuery $cssQuery); * CSSQuery::$filters['myfilter'] = create_function('', ''); * * </code> */ CSSQuery::$filters = new RecursiveArrayIterator(array( //CSS3 selectors 'first-child' => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return !$e->isSameNode($e->ownerDocument->documentElement) && $c->xpath->query("../*[position()=1]", $e)->item(0)->isSameNode($e);'), 'last-child' => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return !$e->isSameNode($e->ownerDocument->documentElement) && $c->xpath->query("../*[last()]", $e)->item(0)->isSameNode($e);'), 'only-child' => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return !$e->isSameNode($e->ownerDocument->documentElement) && $e->parentNode->getElementsByTagName("*")->length == 1;'), 'checked' => create_function('DOMNode $e', 'return strtolower($e->tagName) == "input" && $e->hasAttribute("checked");'), 'disabled' => create_function('DOMNode $e', 'return $e->hasAttribute("disabled") && stristr("|input|textarea|select|button|", "|".$e->tagName."|") !== false;'), 'enabled' => create_function('DOMNode $e', 'return !$e->hasAttribute("disabled") && stristr("|input|textarea|select|button|", "|".$e->tagName . "|") !== false && (!$e->hasAttribute("type") || strtolower($e->getAttribute("type")) != "hidden");'), //nth child selectors "nth-child" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return CSSQuery::nthChild($e, "nth-child", $m[3], $c->xpath);'), "nth-last-child" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return CSSQuery::nthChild($e, "nth-last-child", $m[3], $c->xpath);'), "nth-of-type" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return CSSQuery::nthChild($e, "nth-of-type", $m[3], $c->xpath);'), "nth-last-of-type" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return CSSQuery::nthChild($e, "nth-last-of-type", $m[3], $c->xpath);'), "first-of-type" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return call_user_func(CSSQuery::$filters["nth-of-type"], $e, $i, array(0,1,1,1), $a, $c);'), "last-of-type" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return call_user_func(CSSQuery::$filters["nth-last-of-type"],$e, $i, array(0,1,1,1), $a, $c);'), "only-of-type" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return call_user_func(CSSQuery::$filters["first-of-type"], $e, $i, $m, $a, $c) && call_user_func(CSSQuery::$filters["last-of-type"], $e, $i, $m, $a, $c);'), //closest thing to the lang filter "lang" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return $c->xpath->evaluate( sprintf( "count(./ancestor-or-self::*[@lang and (@lang =". " \"%s\" or substring(@lang, 1, %u)=\"%s-\")])", $m[3], strlen($m[3]) + 1, $m[3] ), $e ) > 0;'), //negation filter "not" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return CSSQuery::inArray($e, $c->query(trim($m[3]))) == -1;'), //element has no child nodes "empty" => create_function('DOMNode $e', 'return !$e->hasChildNodes();'), //element has child nodes that are elements "parent" => create_function('DOMNode $e', 'return ($n = $e->getElementsByTagName("*")) && $n->length > 0;'), //get the parent node of the current element "parent-node" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', '//if there is no filter just return the first parentNode if (!$m || !isset($m[3]) || !trim($m[3])) return $e->parentNode; //otherwise if the filter is more than a tagName return preg_match("/^(\*|\w+)([^\w]+.+)/", trim($m[3]), $n) ? CSSQuery::find(trim($n[2]), $c->xpath->query("./ancestor::{$n[1]}", $e)) //if the filter is only a tagName save the trouble : $c->xpath->query(sprintf("./ancestor::%s", trim($m[3])), $e);'), //get the ancestors of the current element "parents" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', '$r = $c->xpath->query("./ancestor::*", $e); return $m && isset($m[3]) && trim($m[3]) ? CSSQuery::find(trim($m[3]), $r) : $r;'), //the element has nextSiblings "next-sibling" => create_function('DOMNode $e', 'return ($n = $e->parentNode->getElementsByTagName("*")) && !$n->item($n->length-1)->isSameNode($e);'), //the element has previousSiblings "previous-sibling" => create_function('DOMNode $e', 'return !$e->parentNode->getElementsByTagName("*")->item(0)->isSameNode($e);'), //get the previousSiblings of the current element "previous-siblings" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', '$r = $c->xpath->query("./preceding-sibling::*", $e); return $m && isset($m[3]) && trim($m[3]) ? CSSQuery::find(trim($m[3]), $r) : $r;'), //get the nextSiblings of the current element "next-siblings" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', '$r = $c->xpath->query("./following-sibling::*", $e); return $m && isset($m[3]) && trim($m[3]) ? CSSQuery::find(trim($m[3]), $r) : $r;'), //get all the siblings of the current element "siblings" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', '$r = $c->xpath->query("./preceding-sibling::* | ./following-sibling::*", $e); return $m && isset($m[3]) && trim($m[3]) ? CSSQuery::find(trim($m[3]), $r) : $r;'), //select the header elements "header" => create_function('DOMNode $e', 'return (bool)preg_match("/^h[1-6]$/i", $e->tagName);'), //form element selectors "selected" => create_function('DOMNode $e', 'return $e->hasAttribute("selected");'), //any element that would be considered input based on tagName "input" => create_function('DOMNode $e', 'return stristr("|input|textarea|select|button|", "|" . $e->tagName . "|") !== false;'), //any input element and type "radio" => create_function('DOMNode $e', 'return CSSQuery::inputFilter($e, "radio");'), "checkbox" => create_function('DOMNode $e', 'return CSSQuery::inputFilter($e, "checkbox");'), "file" => create_function('DOMNode $e', 'return CSSQuery::inputFilter($e, "file");'), "password" => create_function('DOMNode $e', 'return CSSQuery::inputFilter($e, "password");'), "submit" => create_function('DOMNode $e', 'return CSSQuery::inputFilter($e, "submit");'), "image" => create_function('DOMNode $e', 'return CSSQuery::inputFilter($e, "image");'), "reset" => create_function('DOMNode $e', 'return CSSQuery::inputFilter($e, "reset");'), "button" => create_function('DOMNode $e', 'return CSSQuery::inputFilter($e, "button");'), "text" => create_function('DOMNode $e', 'return CSSQuery::inputFilter($e, "text");'), //limiting filter "has" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return count($c->query($m[3], $e)) > 0;'), //text limiting filter "contains" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return strstr($e->textContent, preg_replace("/^\s*([\'\"])(.*)\\\\1\s*$/", "\\\\2", $m[3]));'), "Contains" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return stristr($e->textContent, preg_replace("/^\s*([\'\"])(.*)\\\\1\s*$/", "\\\\2", $m[3]));'), //positional selectors for the current node-set "first" => create_function('DOMNode $e,$i', 'return $i === 0;'), "last" => create_function('DOMNode $e,$i,$m,$a', 'return $i === (count($a) - 1);'), "lt" => create_function('DOMNode $e,$i,$m', 'return $i < $m[3];'), "gt" => create_function('DOMNode $e,$i,$m', 'return $i > $m[3];'), "eq" => create_function('DOMNode $e,$i,$m', 'return $i === intval($m[3]);'), //works like nth-child on the currently selected node-set "nth" => create_function('DOMNode $e,$i,$m', '$expr = preg_replace("/\s+/", "", strtolower(trim($m[3]))); //these selectors select all so dont waste time figuring them out if ($expr == "n" || $expr == "n+0" || $expr == "1n+0" || $expr == "1n") { return true; } //even numbered elements elseif ($expr == "even" || $expr == "2n" || $expr == "2n+0") { return $i % 2 == 0; } //odd numbered elements elseif ($expr == "odd" || $expr == "2n+1") { return $i % 2 != 0; } //positional - a negative position is not supported elseif (preg_match("/^([\+\-]?\d+)$/i", $expr, $mat)) { return $i == intval($mat[1]); } //grouped according to a position elseif (preg_match("/^([\+\-]?\d*)?n([\+\-]\d+)?/i", $expr, $mat)) { $a = (isset($mat[2]) && $mat[2] ? intval($mat[2]) : 0); $b = (isset($mat[2]) && $mat[2] ? intval($mat[2]) : 1); return ($a == 0 && $i == $b) || ($a > 0 && $i >= $b && ($i - $b) % $a == 0) || ($a < 0 && $i <= $b && (($b - $i) % ($a * -1)) == 0); } return false; '), ), 2); /** * create a default array of attribute filters * * <code> * //prototype function (DOMNode $element, string $attributeName, string $value = '', array $matches, array $context=array()); * CSSQuery::$attributeFilters['>'] = create_function('', ''); * * </code> */ CSSQuery::$attributeFilters = new RecursiveArrayIterator(array( //hasAttribute and/or attribute == value "" => create_function('$e,$a,$v=null', 'return $e->hasAttribute($a);'), //hasAttribute and/or attribute == value "=" => create_function('$e,$a,$v=null', 'return $e->hasAttribute($a) && $e->getAttribute($a) == $v;'), //!hasAttribute or attribute != value "!=" => create_function('$e,$a,$v', 'return !$e->hasAttribute($a) || $e->getAttribute($a) != $v;'), //hasAttribute and the attribute begins with value "^=" => create_function('$e,$a,$v', 'return $e->hasAttribute($a) && substr($e->getAttribute($a), 0, strlen($v)) == $v;'), //hasAttribute and the attribute ends with value '$=' => create_function('$e,$a,$v', 'return $e->hasAttribute($a) && substr($e->getAttribute($a), -strlen($v)) == $v;'), //hasAttribute and the attribute begins with value . - "|=" => create_function('$e,$a,$v', 'return $e->hasAttribute($a) && substr($e->getAttribute($a), 0, strlen($v) + 1) == $v."-";'), //hasAttribute and attribute contains value "*=" => create_function('$e,$a,$v', 'return $e->hasAttribute($a) && strstr($e->getAttribute($a), $v);'), //special //hasAttribute and attribute contains value - case insensitive "%=" => create_function('$e,$a,$v', 'return $e->hasAttribute($a) && stristr($e->getAttribute($a), $v);'), //hasAttribute and the attrributes value matches the given PCRE pattern "@=" => create_function('$e,$a,$v', 'return $e->hasAttribute($a) && preg_match($v, $e->getAttribute($a));'), ), 2); ?>
juniorug/vidacon
js/myMail/classes/libs/InlineStyle.php
PHP
apache-2.0
53,238
'use strict'; /** * @license * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ goog.provide('tachyfont.WorkQueue'); goog.require('goog.Promise'); goog.require('tachyfont.Reporter'); /** * This class manages work queues. * @param {string} name An identifier useful for error reports. * @constructor @struct @final */ tachyfont.WorkQueue = function(name) { /** @private @const {string} */ this.name_ = name; /** @private {!Array<!tachyfont.WorkQueue.Task>} */ this.queue_ = []; }; /** * Adds a task. * @param {function(?)} taskFunction The function to call. * @param {*} data The data to pass to the function. * @param {string} fontId An identifier useful for error messages. * @param {number=} opt_watchDogTime Option watch dog time. * @return {!tachyfont.WorkQueue.Task} A task object. */ tachyfont.WorkQueue.prototype.addTask = function( taskFunction, data, fontId, opt_watchDogTime) { var task = new tachyfont.WorkQueue.Task( taskFunction, data, fontId, opt_watchDogTime); this.queue_.push(task); if (this.queue_.length == 1) { this.runNextTask(); } return task; }; /** * Runs a task. */ tachyfont.WorkQueue.prototype.runNextTask = function() { if (this.queue_.length < 1) { return; } var task = this.queue_[0]; task.run().thenAlways( /** @this {tachyfont.WorkQueue} */ function() { this.queue_.shift(); this.runNextTask(); }, this); }; /** * Gets the queue length. * @return {number} */ tachyfont.WorkQueue.prototype.getLength = function() { return this.queue_.length; }; /** * Enum for error values. * @enum {string} */ // LINT.IfChange tachyfont.WorkQueue.Error = { FILE_ID: 'EWQ', WATCH_DOG_TIMER: '01', END: '00' }; // LINT.ThenChange(//depot/google3/\ // java/com/google/i18n/tachyfont/boq/gen204/error-reports.properties) /** * The error reporter for this file. * @param {string} errNum The error number; * @param {string} errId Identifies the error. * @param {*} errInfo The error object; */ tachyfont.WorkQueue.reportError = function(errNum, errId, errInfo) { tachyfont.Reporter.reportError( tachyfont.WorkQueue.Error.FILE_ID + errNum, errId, errInfo); }; /** * Gets the work queue name * @return {string} */ tachyfont.WorkQueue.prototype.getName = function() { return this.name_; }; /** * A class that holds a task. * @param {function(?)} taskFunction The function to call. * @param {*} data The data to pass to the function. * @param {string} fontId An indentifer for error reporting. * @param {number=} opt_watchDogTime Option watch dog time. * @constructor @struct @final */ tachyfont.WorkQueue.Task = function( taskFunction, data, fontId, opt_watchDogTime) { var resolver; /** @private {function(?)} */ this.function_ = taskFunction; /** @private {*} */ this.data_ = data; /** @private {string} */ this.fontId_ = fontId; /** @private {!goog.Promise<?,?>} */ this.result_ = new goog.Promise(function(resolve, reject) { // resolver = resolve; }); /** @private {function(*=)} */ this.resolver_ = resolver; /** @private {?number} */ this.timeoutId_ = null; /** @private {number} */ this.watchDogTime_ = opt_watchDogTime || tachyfont.WorkQueue.Task.WATCH_DOG_TIME; }; /** * The time in milliseconds to wait before reporting that a running task did not * complete. * @type {number} */ tachyfont.WorkQueue.Task.WATCH_DOG_TIME = 10 * 60 * 1000; /** * Gets the task result promise (may be unresolved). * @return {!goog.Promise<?,?>} */ tachyfont.WorkQueue.Task.prototype.getResult = function() { return this.result_; }; /** * Resolves the task result promise. * @param {*} result The result of the function. May be any value including a * resolved/rejected promise. * @return {!goog.Promise<?,?>} * @private */ tachyfont.WorkQueue.Task.prototype.resolve_ = function(result) { this.resolver_(result); return this.result_; }; /** * Runs the task. * @return {*} */ tachyfont.WorkQueue.Task.prototype.run = function() { this.startWatchDogTimer_(); var result; try { result = this.function_(this.data_); } catch (e) { result = goog.Promise.reject(e); } this.result_.thenAlways(function() { // this.stopWatchDogTimer_(); }.bind(this)); return this.resolve_(result); }; /** * Starts the watch dog timer. * @return {*} * @private */ tachyfont.WorkQueue.Task.prototype.startWatchDogTimer_ = function() { this.timeoutId_ = setTimeout(function() { this.timeoutId_ = null; tachyfont.WorkQueue.reportError( tachyfont.WorkQueue.Error.WATCH_DOG_TIMER, this.fontId_, ''); }.bind(this), this.watchDogTime_); }; /** * Stops the watch dog timer. * @private */ tachyfont.WorkQueue.Task.prototype.stopWatchDogTimer_ = function() { if (this.timeoutId_) { clearTimeout(this.timeoutId_); } this.timeoutId_ = null; };
googlefonts/TachyFont
run_time/src/gae_server/www/js/tachyfont/work_queue.js
JavaScript
apache-2.0
5,429
package com.thinkbiganalytics.metadata.jpa.app; /*- * #%L * thinkbig-operational-metadata-jpa * %% * Copyright (C) 2017 ThinkBig Analytics * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import com.thinkbiganalytics.KyloVersionUtil; import com.thinkbiganalytics.KyloVersion; import com.thinkbiganalytics.metadata.api.app.KyloVersionProvider; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.domain.Sort; import org.springframework.data.domain.Sort.Direction; import java.io.IOException; import java.io.InputStream; import java.net.URL; import java.util.List; import java.util.Properties; import javax.annotation.PostConstruct; /** * Provider for accessing and updating the Kylo version */ public class JpaKyloVersionProvider implements KyloVersionProvider { private static final Logger log = LoggerFactory.getLogger(JpaKyloVersionProvider.class); private static final Sort SORT_ORDER = new Sort(new Sort.Order(Direction.DESC, "majorVersion"), new Sort.Order(Direction.DESC, "minorVersion"), new Sort.Order(Direction.DESC, "pointVersion"), new Sort.Order(Direction.DESC, "tag") ); private KyloVersionRepository kyloVersionRepository; private String currentVersion; private String buildTimestamp; @Autowired public JpaKyloVersionProvider(KyloVersionRepository kyloVersionRepository) { this.kyloVersionRepository = kyloVersionRepository; } @Override public boolean isUpToDate() { KyloVersion buildVer = KyloVersionUtil.getBuildVersion(); KyloVersion currentVer = getCurrentVersion(); return currentVer != null && buildVer.matches(currentVer.getMajorVersion(), currentVer.getMinorVersion(), currentVer.getPointVersion()); } @Override public KyloVersion getCurrentVersion() { List<JpaKyloVersion> versions = kyloVersionRepository.findAll(SORT_ORDER); if (versions != null && !versions.isEmpty()) { return versions.get(0); } return null; } /* (non-Javadoc) * @see com.thinkbiganalytics.metadata.api.app.KyloVersionProvider#setCurrentVersion(com.thinkbiganalytics.KyloVersion) */ @Override public void setCurrentVersion(KyloVersion version) { JpaKyloVersion update = new JpaKyloVersion(version.getMajorVersion(), version.getMinorVersion(), version.getPointVersion(), version.getTag()); kyloVersionRepository.save(update); } @Override public KyloVersion getBuildVersion() { return KyloVersionUtil.getBuildVersion(); } @PostConstruct private void init() { getBuildVersion(); } }
peter-gergely-horvath/kylo
core/operational-metadata/operational-metadata-jpa/src/main/java/com/thinkbiganalytics/metadata/jpa/app/JpaKyloVersionProvider.java
Java
apache-2.0
3,679
package com.planet_ink.coffee_mud.Abilities.Spells; import com.planet_ink.coffee_mud.core.interfaces.*; import com.planet_ink.coffee_mud.core.*; import com.planet_ink.coffee_mud.core.collections.*; import com.planet_ink.coffee_mud.Abilities.interfaces.*; import com.planet_ink.coffee_mud.Areas.interfaces.*; import com.planet_ink.coffee_mud.Behaviors.interfaces.*; import com.planet_ink.coffee_mud.CharClasses.interfaces.*; import com.planet_ink.coffee_mud.Commands.interfaces.*; import com.planet_ink.coffee_mud.Common.interfaces.*; import com.planet_ink.coffee_mud.Exits.interfaces.*; import com.planet_ink.coffee_mud.Items.interfaces.*; import com.planet_ink.coffee_mud.Libraries.interfaces.*; import com.planet_ink.coffee_mud.Locales.interfaces.*; import com.planet_ink.coffee_mud.MOBS.interfaces.*; import com.planet_ink.coffee_mud.Races.interfaces.*; import java.util.*; /* Copyright 2003-2015 Bo Zimmerman Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ @SuppressWarnings("rawtypes") public class Spell_ArcaneMark extends Spell { @Override public String ID() { return "Spell_ArcaneMark"; } private final static String localizedName = CMLib.lang().L("Arcane Mark"); @Override public String name() { return localizedName; } @Override public int abstractQuality(){ return Ability.QUALITY_INDIFFERENT;} @Override protected int canAffectCode(){return 0;} @Override protected int canTargetCode(){return CAN_ITEMS;} @Override public int classificationCode(){ return Ability.ACODE_SPELL|Ability.DOMAIN_ALTERATION;} @Override public boolean invoke(MOB mob, Vector commands, Physical givenTarget, boolean auto, int asLevel) { if(commands.size()<2) { mob.tell(L("You must specify what object you want the spell cast on, and the message you wish the object have marked upon it. ")); return false; } final Physical target=mob.location().fetchFromMOBRoomFavorsItems(mob,null,((String)commands.elementAt(0)),Wearable.FILTER_UNWORNONLY); if((target==null)||(!CMLib.flags().canBeSeenBy(target,mob))) { mob.tell(L("You don't see '@x1' here.",((String)commands.elementAt(0)))); return false; } if((!(target instanceof Item))||(!target.isGeneric())) { mob.tell(L("You can't can't cast this on @x1.",target.name(mob))); return false; } final String message=CMParms.combine(commands,1); if(!super.invoke(mob,commands,givenTarget,auto,asLevel)) return false; final boolean success=proficiencyCheck(mob,0,auto); if(success) { final CMMsg msg=CMClass.getMsg(mob,target,this,verbalCastCode(mob,target,auto),L("^S<S-NAME> invoke(s) a spell upon <T-NAMESELF>.^?")); if(mob.location().okMessage(mob,msg)) { mob.location().send(mob,msg); if(target.description().indexOf("Some markings on it say")>=0) target.setDescription(L("@x1 Some other markings say `@x2`.",target.description(),message)); else target.setDescription(L("@x1 Some markings on it say `@x2`.",target.description(),message)); } } else beneficialWordsFizzle(mob,target,L("<S-NAME> attempt(s) to invoke a spell upon <T-NAMESELF>, but the spell fizzles.")); // return whether it worked return success; } }
Tycheo/coffeemud
com/planet_ink/coffee_mud/Abilities/Spells/Spell_ArcaneMark.java
Java
apache-2.0
3,763
package oidc import ( "crypto/rand" "encoding/base64" "errors" "fmt" "net" "net/http" "net/url" "strings" "time" "github.com/coreos/go-oidc/jose" ) func ParseTokenFromRequest(r *http.Request) (token jose.JWT, err error) { ah := r.Header.Get("Authorization") if ah == "" { err = errors.New("missing Authorization header") return } if len(ah) <= 6 || strings.ToUpper(ah[0:6]) != "BEARER" { err = errors.New("should be a bearer token") return } return jose.ParseJWT(ah[7:]) } func NewClaims(iss, sub, aud string, iat, exp time.Time) jose.Claims { return jose.Claims{ // required "iss": iss, "sub": sub, "aud": aud, "iat": float64(iat.Unix()), "exp": float64(exp.Unix()), } } func GenClientID(hostport string) (string, error) { b, err := randBytes(32) if err != nil { return "", err } var host string if strings.Contains(hostport, ":") { host, _, err = net.SplitHostPort(hostport) if err != nil { return "", err } } else { host = hostport } return fmt.Sprintf("%s@%s", base64.URLEncoding.EncodeToString(b), host), nil } func randBytes(n int) ([]byte, error) { b := make([]byte, n) got, err := rand.Read(b) if err != nil { return nil, err } else if n != got { return nil, errors.New("unable to generate enough random data") } return b, nil } // urlEqual checks two urls for equality using only the host and path portions. func urlEqual(url1, url2 string) bool { u1, err := url.Parse(url1) if err != nil { return false } u2, err := url.Parse(url2) if err != nil { return false } return strings.ToLower(u1.Host+u1.Path) == strings.ToLower(u2.Host+u2.Path) }
yifan-gu/go-oidc
oidc/util.go
GO
apache-2.0
1,643
'use strict'; var debug = require('debug')('ali-oss:sts'); var crypto = require('crypto'); var querystring = require('querystring'); var copy = require('copy-to'); var AgentKeepalive = require('agentkeepalive'); var is = require('is-type-of'); var ms = require('humanize-ms'); var urllib = require('urllib'); var globalHttpAgent = new AgentKeepalive(); module.exports = STS; function STS(options) { if (!(this instanceof STS)) { return new STS(options); } if (!options || !options.accessKeyId || !options.accessKeySecret) { throw new Error('require accessKeyId, accessKeySecret'); } this.options = { endpoint: options.endpoint || 'https://sts.aliyuncs.com', format: 'JSON', apiVersion: '2015-04-01', sigMethod: 'HMAC-SHA1', sigVersion: '1.0', timeout: '60s' }; copy(options).to(this.options); // support custom agent and urllib client if (this.options.urllib) { this.urllib = this.options.urllib; } else { this.urllib = urllib; this.agent = this.options.agent || globalHttpAgent; } }; var proto = STS.prototype; /** * STS opertaions */ proto.assumeRole = function* assumeRole(role, policy, expiration, session, options) { var opts = this.options; var params = { 'Action': 'AssumeRole', 'RoleArn': role, 'RoleSessionName': session || 'app', 'DurationSeconds': expiration || 3600, 'Format': opts.format, 'Version': opts.apiVersion, 'AccessKeyId': opts.accessKeyId, 'SignatureMethod': opts.sigMethod, 'SignatureVersion': opts.sigVersion, 'SignatureNonce': Math.random(), 'Timestamp': new Date().toISOString() }; if (policy) { var policyStr; if (is.string(policy)) { try { policyStr = JSON.stringify(JSON.parse(policy)); } catch (err) { throw new Error('Policy string is not a valid JSON: ' + err.message); } } else { policyStr = JSON.stringify(policy); } params.Policy = policyStr; } var signature = this._getSignature('POST', params, opts.accessKeySecret); params.Signature = signature; var reqUrl = opts.endpoint; var reqParams = { agent: this.agent, timeout: ms(options && options.timeout || opts.timeout), method: 'POST', content: querystring.stringify(params), headers: { 'Content-Type': 'application/x-www-form-urlencoded' }, ctx: options && options.ctx, }; var result = yield this.urllib.request(reqUrl, reqParams); debug('response %s %s, got %s, headers: %j', reqParams.method, reqUrl, result.status, result.headers); if (Math.floor(result.status / 100) !== 2) { var err = yield this._requestError(result); err.params = reqParams; throw err; } result.data = JSON.parse(result.data); return { res: result.res, credentials: result.data.Credentials }; }; proto._requestError = function* _requestError(result) { var err = new Error(); err.status = result.status; try { var resp = yield JSON.parse(result.data) || {}; err.code = resp.Code; err.message = resp.Code + ': ' + resp.Message; err.requestId = resp.RequestId; } catch (e) { err.message = 'UnknownError: ' + String(result.data); } return err; }; proto._getSignature = function _getSignature(method, params, key) { var that = this; var canoQuery = Object.keys(params).sort().map(function (key) { return that._escape(key) + '=' + that._escape(params[key]) }).join('&'); var stringToSign = method.toUpperCase() + '&' + this._escape('/') + '&' + this._escape(canoQuery); debug('string to sign: %s', stringToSign); var signature = crypto.createHmac('sha1', key + '&'); signature = signature.update(stringToSign).digest('base64'); debug('signature: %s', signature); return signature; }; /** * Since `encodeURIComponent` doesn't encode '*', which causes * 'SignatureDoesNotMatch'. We need do it ourselves. */ proto._escape = function _escape(str) { return encodeURIComponent(str).replace(/\*/g, '%2A'); };
WebDeltaSync/WebR2sync_plus
node_modules/ali-oss/lib/sts.js
JavaScript
apache-2.0
4,037
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.wicketTutorial; import org.apache.wicket.request.mapper.parameter.PageParameters; import org.apache.wicket.markup.html.basic.Label; import org.apache.wicket.markup.html.WebPage; public class HomePage extends WebPage { private static final long serialVersionUID = 1L; public HomePage(final PageParameters parameters) { super(parameters); add(new Label("version", getApplication().getFrameworkSettings().getVersion())); // TODO Add your page's components here } }
mafulafunk/wicket
wicket-user-guide/examples/MarkupInheritanceExample/src/main/java/org/wicketTutorial/HomePage.java
Java
apache-2.0
1,308
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.arrow.vector.complex.impl; import org.apache.arrow.util.Preconditions; import org.apache.arrow.vector.complex.ListVector; import org.apache.arrow.vector.complex.NonNullableStructVector; import org.apache.arrow.vector.complex.StateTool; import org.apache.arrow.vector.complex.StructVector; import org.apache.arrow.vector.complex.writer.BaseWriter.ComplexWriter; import org.apache.arrow.vector.types.pojo.Field; public class ComplexWriterImpl extends AbstractFieldWriter implements ComplexWriter { private NullableStructWriter structRoot; private UnionListWriter listRoot; private final NonNullableStructVector container; Mode mode = Mode.INIT; private final String name; private final boolean unionEnabled; private final NullableStructWriterFactory nullableStructWriterFactory; private enum Mode { INIT, STRUCT, LIST } public ComplexWriterImpl( String name, NonNullableStructVector container, boolean unionEnabled, boolean caseSensitive) { this.name = name; this.container = container; this.unionEnabled = unionEnabled; nullableStructWriterFactory = caseSensitive ? NullableStructWriterFactory.getNullableCaseSensitiveStructWriterFactoryInstance() : NullableStructWriterFactory.getNullableStructWriterFactoryInstance(); } public ComplexWriterImpl(String name, NonNullableStructVector container, boolean unionEnabled) { this(name, container, unionEnabled, false); } public ComplexWriterImpl(String name, NonNullableStructVector container) { this(name, container, false); } @Override public Field getField() { return container.getField(); } @Override public int getValueCapacity() { return container.getValueCapacity(); } private void check(Mode... modes) { StateTool.check(mode, modes); } @Override public void reset() { setPosition(0); } @Override public void close() throws Exception { clear(); structRoot.close(); if (listRoot != null) { listRoot.close(); } } @Override public void clear() { switch (mode) { case STRUCT: structRoot.clear(); break; case LIST: listRoot.clear(); break; default: break; } } @Override public void setValueCount(int count) { switch (mode) { case STRUCT: structRoot.setValueCount(count); break; case LIST: listRoot.setValueCount(count); break; default: break; } } @Override public void setPosition(int index) { super.setPosition(index); switch (mode) { case STRUCT: structRoot.setPosition(index); break; case LIST: listRoot.setPosition(index); break; default: break; } } public StructWriter directStruct() { Preconditions.checkArgument(name == null); switch (mode) { case INIT: structRoot = nullableStructWriterFactory.build((StructVector) container); structRoot.setPosition(idx()); mode = Mode.STRUCT; break; case STRUCT: break; default: check(Mode.INIT, Mode.STRUCT); } return structRoot; } @Override public StructWriter rootAsStruct() { switch (mode) { case INIT: // TODO allow dictionaries in complex types StructVector struct = container.addOrGetStruct(name); structRoot = nullableStructWriterFactory.build(struct); structRoot.setPosition(idx()); mode = Mode.STRUCT; break; case STRUCT: break; default: check(Mode.INIT, Mode.STRUCT); } return structRoot; } @Override public void allocate() { if (structRoot != null) { structRoot.allocate(); } else if (listRoot != null) { listRoot.allocate(); } } @Override public ListWriter rootAsList() { switch (mode) { case INIT: int vectorCount = container.size(); // TODO allow dictionaries in complex types ListVector listVector = container.addOrGetList(name); if (container.size() > vectorCount) { listVector.allocateNew(); } listRoot = new UnionListWriter(listVector, nullableStructWriterFactory); listRoot.setPosition(idx()); mode = Mode.LIST; break; case LIST: break; default: check(Mode.INIT, Mode.STRUCT); } return listRoot; } }
pcmoritz/arrow
java/vector/src/main/java/org/apache/arrow/vector/complex/impl/ComplexWriterImpl.java
Java
apache-2.0
5,289
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package e2e import ( "fmt" "io/ioutil" "math/rand" "net" "net/http" "sort" "strconv" "strings" "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/intstr" utilnet "k8s.io/kubernetes/pkg/util/net" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" ) // Maximum time a kube-proxy daemon on a node is allowed to not // notice a Service update, such as type=NodePort. // TODO: This timeout should be O(10s), observed values are O(1m), 5m is very // liberal. Fix tracked in #20567. const kubeProxyLagTimeout = 5 * time.Minute // Maximum time a load balancer is allowed to not respond after creation. const loadBalancerLagTimeout = 2 * time.Minute // How long to wait for a load balancer to be created/modified. //TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable const loadBalancerCreateTimeout = 20 * time.Minute // This should match whatever the default/configured range is var ServiceNodePortRange = utilnet.PortRange{Base: 30000, Size: 2768} var _ = Describe("Services", func() { f := NewFramework("services") var c *client.Client var extraNamespaces []string BeforeEach(func() { var err error c, err = loadClient() Expect(err).NotTo(HaveOccurred()) }) AfterEach(func() { if testContext.DeleteNamespace { for _, ns := range extraNamespaces { By(fmt.Sprintf("Destroying namespace %v", ns)) if err := deleteNS(c, ns, 5*time.Minute /* namespace deletion timeout */); err != nil { Failf("Couldn't delete namespace %s: %s", ns, err) } } extraNamespaces = nil } else { Logf("Found DeleteNamespace=false, skipping namespace deletion!") } }) // TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here. It("should provide secure master service [Conformance]", func() { _, err := c.Services(api.NamespaceDefault).Get("kubernetes") Expect(err).NotTo(HaveOccurred()) }) It("should serve a basic endpoint from pods [Conformance]", func() { // TODO: use the ServiceTestJig here serviceName := "endpoint-test2" ns := f.Namespace.Name labels := map[string]string{ "foo": "bar", "baz": "blah", } By("creating service " + serviceName + " in namespace " + ns) defer func() { err := c.Services(ns).Delete(serviceName) Expect(err).NotTo(HaveOccurred()) }() service := &api.Service{ ObjectMeta: api.ObjectMeta{ Name: serviceName, }, Spec: api.ServiceSpec{ Selector: labels, Ports: []api.ServicePort{{ Port: 80, TargetPort: intstr.FromInt(80), }}, }, } _, err := c.Services(ns).Create(service) Expect(err).NotTo(HaveOccurred()) validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{}) names := map[string]bool{} defer func() { for name := range names { err := c.Pods(ns).Delete(name, nil) Expect(err).NotTo(HaveOccurred()) } }() name1 := "pod1" name2 := "pod2" createPodOrFail(c, ns, name1, labels, []api.ContainerPort{{ContainerPort: 80}}) names[name1] = true validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name1: {80}}) createPodOrFail(c, ns, name2, labels, []api.ContainerPort{{ContainerPort: 80}}) names[name2] = true validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name1: {80}, name2: {80}}) deletePodOrFail(c, ns, name1) delete(names, name1) validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name2: {80}}) deletePodOrFail(c, ns, name2) delete(names, name2) validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{}) }) It("should serve multiport endpoints from pods [Conformance]", func() { // TODO: use the ServiceTestJig here // repacking functionality is intentionally not tested here - it's better to test it in an integration test. serviceName := "multi-endpoint-test" ns := f.Namespace.Name defer func() { err := c.Services(ns).Delete(serviceName) Expect(err).NotTo(HaveOccurred()) }() labels := map[string]string{"foo": "bar"} svc1port := "svc1" svc2port := "svc2" By("creating service " + serviceName + " in namespace " + ns) service := &api.Service{ ObjectMeta: api.ObjectMeta{ Name: serviceName, }, Spec: api.ServiceSpec{ Selector: labels, Ports: []api.ServicePort{ { Name: "portname1", Port: 80, TargetPort: intstr.FromString(svc1port), }, { Name: "portname2", Port: 81, TargetPort: intstr.FromString(svc2port), }, }, }, } _, err := c.Services(ns).Create(service) Expect(err).NotTo(HaveOccurred()) port1 := 100 port2 := 101 validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{}) names := map[string]bool{} defer func() { for name := range names { err := c.Pods(ns).Delete(name, nil) Expect(err).NotTo(HaveOccurred()) } }() containerPorts1 := []api.ContainerPort{ { Name: svc1port, ContainerPort: port1, }, } containerPorts2 := []api.ContainerPort{ { Name: svc2port, ContainerPort: port2, }, } podname1 := "pod1" podname2 := "pod2" createPodOrFail(c, ns, podname1, labels, containerPorts1) names[podname1] = true validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}}) createPodOrFail(c, ns, podname2, labels, containerPorts2) names[podname2] = true validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}, podname2: {port2}}) deletePodOrFail(c, ns, podname1) delete(names, podname1) validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname2: {port2}}) deletePodOrFail(c, ns, podname2) delete(names, podname2) validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{}) }) It("should be able to up and down services", func() { // TODO: use the ServiceTestJig here // this test uses NodeSSHHosts that does not work if a Node only reports LegacyHostIP SkipUnlessProviderIs(providersWithSSH...) ns := f.Namespace.Name numPods, servicePort := 3, 80 By("creating service1 in namespace " + ns) podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) By("creating service2 in namespace " + ns) podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) hosts, err := NodeSSHHosts(c) Expect(err).NotTo(HaveOccurred()) if len(hosts) == 0 { Failf("No ssh-able nodes") } host := hosts[0] By("verifying service1 is up") expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) By("verifying service2 is up") expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) // Stop service 1 and make sure it is gone. By("stopping service1") expectNoError(stopServeHostnameService(c, ns, "service1")) By("verifying service1 is not up") expectNoError(verifyServeHostnameServiceDown(c, host, svc1IP, servicePort)) By("verifying service2 is still up") expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) // Start another service and verify both are up. By("creating service3 in namespace " + ns) podNames3, svc3IP, err := startServeHostnameService(c, ns, "service3", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) if svc2IP == svc3IP { Failf("service IPs conflict: %v", svc2IP) } By("verifying service2 is still up") expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) By("verifying service3 is up") expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames3, svc3IP, servicePort)) }) It("should work after restarting kube-proxy [Disruptive]", func() { // TODO: use the ServiceTestJig here SkipUnlessProviderIs("gce", "gke") ns := f.Namespace.Name numPods, servicePort := 3, 80 svc1 := "service1" svc2 := "service2" defer func() { expectNoError(stopServeHostnameService(c, ns, svc1)) }() podNames1, svc1IP, err := startServeHostnameService(c, ns, svc1, servicePort, numPods) Expect(err).NotTo(HaveOccurred()) defer func() { expectNoError(stopServeHostnameService(c, ns, svc2)) }() podNames2, svc2IP, err := startServeHostnameService(c, ns, svc2, servicePort, numPods) Expect(err).NotTo(HaveOccurred()) if svc1IP == svc2IP { Failf("VIPs conflict: %v", svc1IP) } hosts, err := NodeSSHHosts(c) Expect(err).NotTo(HaveOccurred()) if len(hosts) == 0 { Failf("No ssh-able nodes") } host := hosts[0] expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) By("Restarting kube-proxy") if err := restartKubeProxy(host); err != nil { Failf("error restarting kube-proxy: %v", err) } expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) By("Removing iptable rules") result, err := SSH(` sudo iptables -t nat -F KUBE-SERVICES || true; sudo iptables -t nat -F KUBE-PORTALS-HOST || true; sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true`, host, testContext.Provider) if err != nil || result.Code != 0 { LogSSHResult(result) Failf("couldn't remove iptable rules: %v", err) } expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) }) It("should work after restarting apiserver [Disruptive]", func() { // TODO: use the ServiceTestJig here // TODO: restartApiserver doesn't work in GKE - fix it and reenable this test. SkipUnlessProviderIs("gce") ns := f.Namespace.Name numPods, servicePort := 3, 80 defer func() { expectNoError(stopServeHostnameService(c, ns, "service1")) }() podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) hosts, err := NodeSSHHosts(c) Expect(err).NotTo(HaveOccurred()) if len(hosts) == 0 { Failf("No ssh-able nodes") } host := hosts[0] expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) // Restart apiserver if err := restartApiserver(); err != nil { Failf("error restarting apiserver: %v", err) } if err := waitForApiserverUp(c); err != nil { Failf("error while waiting for apiserver up: %v", err) } expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) // Create a new service and check if it's not reusing IP. defer func() { expectNoError(stopServeHostnameService(c, ns, "service2")) }() podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) if svc1IP == svc2IP { Failf("VIPs conflict: %v", svc1IP) } expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) }) // TODO: Run this test against the userspace proxy and nodes // configured with a default deny firewall to validate that the // proxy whitelists NodePort traffic. It("should be able to create a functioning NodePort service", func() { serviceName := "nodeport-test" ns := f.Namespace.Name jig := NewServiceTestJig(c, serviceName) nodeIP := pickNodeIP(jig.Client) // for later By("creating service " + serviceName + " with type=NodePort in namespace " + ns) service := jig.CreateTCPServiceOrFail(ns, func(svc *api.Service) { svc.Spec.Type = api.ServiceTypeNodePort }) jig.SanityCheckService(service, api.ServiceTypeNodePort) nodePort := service.Spec.Ports[0].NodePort By("creating pod to be part of service " + serviceName) jig.RunOrFail(ns, nil) By("hitting the pod through the service's NodePort") jig.TestReachableHTTP(nodeIP, nodePort, kubeProxyLagTimeout) By("verifying the node port is locked") hostExec := LaunchHostExecPod(f.Client, f.Namespace.Name, "hostexec") // Even if the node-ip:node-port check above passed, this hostexec pod // might fall on a node with a laggy kube-proxy. cmd := fmt.Sprintf(`for i in $(seq 1 300); do if ss -ant46 'sport = :%d' | grep ^LISTEN; then exit 0; fi; sleep 1; done; exit 1`, nodePort) stdout, err := RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) if err != nil { Failf("expected node port %d to be in use, stdout: %v", nodePort, stdout) } }) It("should be able to change the type and ports of a service", func() { // requires cloud load-balancer support SkipUnlessProviderIs("gce", "gke", "aws") // This test is more monolithic than we'd like because LB turnup can be // very slow, so we lumped all the tests into one LB lifecycle. serviceName := "mutability-test" ns1 := f.Namespace.Name // LB1 in ns1 on TCP Logf("namespace for TCP test: %s", ns1) By("creating a second namespace") namespacePtr, err := createTestingNS("services", c, nil) Expect(err).NotTo(HaveOccurred()) ns2 := namespacePtr.Name // LB2 in ns2 on UDP Logf("namespace for UDP test: %s", ns2) extraNamespaces = append(extraNamespaces, ns2) jig := NewServiceTestJig(c, serviceName) nodeIP := pickNodeIP(jig.Client) // for later // Test TCP and UDP Services. Services with the same name in different // namespaces should get different node ports and load balancers. By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1) tcpService := jig.CreateTCPServiceOrFail(ns1, nil) jig.SanityCheckService(tcpService, api.ServiceTypeClusterIP) By("creating a UDP service " + serviceName + " with type=ClusterIP in namespace " + ns2) udpService := jig.CreateUDPServiceOrFail(ns2, nil) jig.SanityCheckService(udpService, api.ServiceTypeClusterIP) By("verifying that TCP and UDP use the same port") if tcpService.Spec.Ports[0].Port != udpService.Spec.Ports[0].Port { Failf("expected to use the same port for TCP and UDP") } svcPort := tcpService.Spec.Ports[0].Port Logf("service port (TCP and UDP): %d", svcPort) By("creating a pod to be part of the TCP service " + serviceName) jig.RunOrFail(ns1, nil) By("creating a pod to be part of the UDP service " + serviceName) jig.RunOrFail(ns2, nil) // Change the services to NodePort. By("changing the TCP service " + serviceName + " to type=NodePort") tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *api.Service) { s.Spec.Type = api.ServiceTypeNodePort }) jig.SanityCheckService(tcpService, api.ServiceTypeNodePort) tcpNodePort := tcpService.Spec.Ports[0].NodePort Logf("TCP node port: %d", tcpNodePort) By("changing the UDP service " + serviceName + " to type=NodePort") udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) { s.Spec.Type = api.ServiceTypeNodePort }) jig.SanityCheckService(udpService, api.ServiceTypeNodePort) udpNodePort := udpService.Spec.Ports[0].NodePort Logf("UDP node port: %d", udpNodePort) By("hitting the TCP service's NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout) By("hitting the UDP service's NodePort") jig.TestReachableUDP(nodeIP, udpNodePort, kubeProxyLagTimeout) // Change the services to LoadBalancer. requestedIP := "" if providerIs("gce", "gke") { By("creating a static load balancer IP") rand.Seed(time.Now().UTC().UnixNano()) staticIPName := fmt.Sprintf("e2e-external-lb-test-%d", rand.Intn(65535)) requestedIP, err = createGCEStaticIP(staticIPName) Expect(err).NotTo(HaveOccurred()) defer func() { // Release GCE static IP - this is not kube-managed and will not be automatically released. deleteGCEStaticIP(staticIPName) }() Logf("Allocated static load balancer IP: %s", requestedIP) } By("changing the TCP service " + serviceName + " to type=LoadBalancer") tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *api.Service) { s.Spec.LoadBalancerIP = requestedIP // will be "" if not applicable s.Spec.Type = api.ServiceTypeLoadBalancer }) By("changing the UDP service " + serviceName + " to type=LoadBalancer") udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) { s.Spec.Type = api.ServiceTypeLoadBalancer }) By("waiting for the TCP service " + serviceName + " to have a load balancer") // Wait for the load balancer to be created asynchronously tcpService = jig.WaitForLoadBalancerOrFail(ns1, tcpService.Name) jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer) if tcpService.Spec.Ports[0].NodePort != tcpNodePort { Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort) } if requestedIP != "" && getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP { Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } tcpIngressIP := getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) Logf("TCP load balancer: %s", tcpIngressIP) By("waiting for the UDP service " + serviceName + " to have a load balancer") // 2nd one should be faster since they ran in parallel. udpService = jig.WaitForLoadBalancerOrFail(ns2, udpService.Name) jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer) if udpService.Spec.Ports[0].NodePort != udpNodePort { Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort) } udpIngressIP := getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) Logf("UDP load balancer: %s", tcpIngressIP) By("verifying that TCP and UDP use different load balancers") if tcpIngressIP == udpIngressIP { Failf("Load balancers are not different: %s", getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } By("hitting the TCP service's NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout) By("hitting the UDP service's NodePort") jig.TestReachableUDP(nodeIP, udpNodePort, kubeProxyLagTimeout) By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) By("hitting the UDP service's LoadBalancer") jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) // Change the services' node ports. By("changing the TCP service's " + serviceName + " NodePort") tcpService = jig.ChangeServiceNodePortOrFail(ns1, tcpService.Name, tcpNodePort) jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer) tcpNodePortOld := tcpNodePort tcpNodePort = tcpService.Spec.Ports[0].NodePort if tcpNodePort == tcpNodePortOld { Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort) } if getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } Logf("TCP node port: %d", tcpNodePort) By("changing the UDP service's " + serviceName + " NodePort") udpService = jig.ChangeServiceNodePortOrFail(ns2, udpService.Name, udpNodePort) jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer) udpNodePortOld := udpNodePort udpNodePort = udpService.Spec.Ports[0].NodePort if udpNodePort == udpNodePortOld { Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort) } if getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) } Logf("UDP node port: %d", udpNodePort) By("hitting the TCP service's new NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout) By("hitting the UDP service's new NodePort") jig.TestReachableUDP(nodeIP, udpNodePort, kubeProxyLagTimeout) By("checking the old TCP NodePort is closed") jig.TestNotReachableHTTP(nodeIP, tcpNodePortOld, kubeProxyLagTimeout) By("checking the old UDP NodePort is closed") jig.TestNotReachableUDP(nodeIP, udpNodePortOld, kubeProxyLagTimeout) By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) By("hitting the UDP service's LoadBalancer") jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) // Change the services' main ports. By("changing the TCP service's port") tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *api.Service) { s.Spec.Ports[0].Port++ }) jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer) svcPortOld := svcPort svcPort = tcpService.Spec.Ports[0].Port if svcPort == svcPortOld { Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort) } if tcpService.Spec.Ports[0].NodePort != tcpNodePort { Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort) } if getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } Logf("service port (TCP and UDP): %d", svcPort) By("changing the UDP service's port") udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) { s.Spec.Ports[0].Port++ }) jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer) if udpService.Spec.Ports[0].Port != svcPort { Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port) } if udpService.Spec.Ports[0].NodePort != udpNodePort { Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort) } if getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) } By("hitting the TCP service's NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout) By("hitting the UDP service's NodePort") jig.TestReachableUDP(nodeIP, udpNodePort, kubeProxyLagTimeout) By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) // this may actually recreate the LB By("hitting the UDP service's LoadBalancer") jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) // this may actually recreate the LB) // Change the services back to ClusterIP. By("changing TCP service " + serviceName + " back to type=ClusterIP") tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *api.Service) { s.Spec.Type = api.ServiceTypeClusterIP s.Spec.Ports[0].NodePort = 0 }) // Wait for the load balancer to be destroyed asynchronously tcpService = jig.WaitForLoadBalancerDestroyOrFail(ns1, tcpService.Name, tcpIngressIP, svcPort) jig.SanityCheckService(tcpService, api.ServiceTypeClusterIP) By("changing UDP service " + serviceName + " back to type=ClusterIP") udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) { s.Spec.Type = api.ServiceTypeClusterIP s.Spec.Ports[0].NodePort = 0 }) // Wait for the load balancer to be destroyed asynchronously udpService = jig.WaitForLoadBalancerDestroyOrFail(ns2, udpService.Name, udpIngressIP, svcPort) jig.SanityCheckService(udpService, api.ServiceTypeClusterIP) By("checking the TCP NodePort is closed") jig.TestNotReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout) By("checking the UDP NodePort is closed") jig.TestNotReachableUDP(nodeIP, udpNodePort, kubeProxyLagTimeout) By("checking the TCP LoadBalancer is closed") jig.TestNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) By("checking the UDP LoadBalancer is closed") jig.TestNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) }) It("should prevent NodePort collisions", func() { // TODO: use the ServiceTestJig here baseName := "nodeport-collision-" serviceName1 := baseName + "1" serviceName2 := baseName + "2" ns := f.Namespace.Name t := NewServerTest(c, ns, serviceName1) defer func() { defer GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { Failf("errors in cleanup: %v", errs) } }() By("creating service " + serviceName1 + " with type NodePort in namespace " + ns) service := t.BuildServiceSpec() service.Spec.Type = api.ServiceTypeNodePort result, err := t.CreateService(service) Expect(err).NotTo(HaveOccurred()) if result.Spec.Type != api.ServiceTypeNodePort { Failf("got unexpected Spec.Type for new service: %v", result) } if len(result.Spec.Ports) != 1 { Failf("got unexpected len(Spec.Ports) for new service: %v", result) } port := result.Spec.Ports[0] if port.NodePort == 0 { Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", result) } By("creating service " + serviceName2 + " with conflicting NodePort") service2 := t.BuildServiceSpec() service2.Name = serviceName2 service2.Spec.Type = api.ServiceTypeNodePort service2.Spec.Ports[0].NodePort = port.NodePort result2, err := t.CreateService(service2) if err == nil { Failf("Created service with conflicting NodePort: %v", result2) } expectedErr := fmt.Sprintf("%d.*port is already allocated", port.NodePort) Expect(fmt.Sprintf("%v", err)).To(MatchRegexp(expectedErr)) By("deleting service " + serviceName1 + " to release NodePort") err = t.DeleteService(serviceName1) Expect(err).NotTo(HaveOccurred()) By("creating service " + serviceName2 + " with no-longer-conflicting NodePort") _, err = t.CreateService(service2) Expect(err).NotTo(HaveOccurred()) }) It("should check NodePort out-of-range", func() { // TODO: use the ServiceTestJig here serviceName := "nodeport-range-test" ns := f.Namespace.Name t := NewServerTest(c, ns, serviceName) defer func() { defer GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { Failf("errors in cleanup: %v", errs) } }() service := t.BuildServiceSpec() service.Spec.Type = api.ServiceTypeNodePort By("creating service " + serviceName + " with type NodePort in namespace " + ns) service, err := t.CreateService(service) Expect(err).NotTo(HaveOccurred()) if service.Spec.Type != api.ServiceTypeNodePort { Failf("got unexpected Spec.Type for new service: %v", service) } if len(service.Spec.Ports) != 1 { Failf("got unexpected len(Spec.Ports) for new service: %v", service) } port := service.Spec.Ports[0] if port.NodePort == 0 { Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service) } if !ServiceNodePortRange.Contains(port.NodePort) { Failf("got unexpected (out-of-range) port for new service: %v", service) } outOfRangeNodePort := 0 for { outOfRangeNodePort = 1 + rand.Intn(65535) if !ServiceNodePortRange.Contains(outOfRangeNodePort) { break } } By(fmt.Sprintf("changing service "+serviceName+" to out-of-range NodePort %d", outOfRangeNodePort)) result, err := updateService(c, ns, serviceName, func(s *api.Service) { s.Spec.Ports[0].NodePort = outOfRangeNodePort }) if err == nil { Failf("failed to prevent update of service with out-of-range NodePort: %v", result) } expectedErr := fmt.Sprintf("%d.*port is not in the valid range", outOfRangeNodePort) Expect(fmt.Sprintf("%v", err)).To(MatchRegexp(expectedErr)) By("deleting original service " + serviceName) err = t.DeleteService(serviceName) Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("creating service "+serviceName+" with out-of-range NodePort %d", outOfRangeNodePort)) service = t.BuildServiceSpec() service.Spec.Type = api.ServiceTypeNodePort service.Spec.Ports[0].NodePort = outOfRangeNodePort service, err = t.CreateService(service) if err == nil { Failf("failed to prevent create of service with out-of-range NodePort (%d): %v", outOfRangeNodePort, service) } Expect(fmt.Sprintf("%v", err)).To(MatchRegexp(expectedErr)) }) It("should release NodePorts on delete", func() { // TODO: use the ServiceTestJig here serviceName := "nodeport-reuse" ns := f.Namespace.Name t := NewServerTest(c, ns, serviceName) defer func() { defer GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { Failf("errors in cleanup: %v", errs) } }() service := t.BuildServiceSpec() service.Spec.Type = api.ServiceTypeNodePort By("creating service " + serviceName + " with type NodePort in namespace " + ns) service, err := t.CreateService(service) Expect(err).NotTo(HaveOccurred()) if service.Spec.Type != api.ServiceTypeNodePort { Failf("got unexpected Spec.Type for new service: %v", service) } if len(service.Spec.Ports) != 1 { Failf("got unexpected len(Spec.Ports) for new service: %v", service) } port := service.Spec.Ports[0] if port.NodePort == 0 { Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service) } if !ServiceNodePortRange.Contains(port.NodePort) { Failf("got unexpected (out-of-range) port for new service: %v", service) } nodePort := port.NodePort By("deleting original service " + serviceName) err = t.DeleteService(serviceName) Expect(err).NotTo(HaveOccurred()) hostExec := LaunchHostExecPod(f.Client, f.Namespace.Name, "hostexec") cmd := fmt.Sprintf(`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort) var stdout string if pollErr := wait.PollImmediate(poll, kubeProxyLagTimeout, func() (bool, error) { var err error stdout, err = RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) if err != nil { Logf("expected node port (%d) to not be in use, stdout: %v", nodePort, stdout) return false, nil } return true, nil }); pollErr != nil { Failf("expected node port (%d) to not be in use in %v, stdout: %v", nodePort, kubeProxyLagTimeout, stdout) } By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", nodePort)) service = t.BuildServiceSpec() service.Spec.Type = api.ServiceTypeNodePort service.Spec.Ports[0].NodePort = nodePort service, err = t.CreateService(service) Expect(err).NotTo(HaveOccurred()) }) }) // updateService fetches a service, calls the update function on it, // and then attempts to send the updated service. It retries up to 2 // times in the face of timeouts and conflicts. func updateService(c *client.Client, namespace, serviceName string, update func(*api.Service)) (*api.Service, error) { var service *api.Service var err error for i := 0; i < 3; i++ { service, err = c.Services(namespace).Get(serviceName) if err != nil { return service, err } update(service) service, err = c.Services(namespace).Update(service) if !errors.IsConflict(err) && !errors.IsServerTimeout(err) { return service, err } } return service, err } func getContainerPortsByPodUID(endpoints *api.Endpoints) PortsByPodUID { m := PortsByPodUID{} for _, ss := range endpoints.Subsets { for _, port := range ss.Ports { for _, addr := range ss.Addresses { containerPort := port.Port hostPort := port.Port // use endpoint annotations to recover the container port in a Mesos setup // compare contrib/mesos/pkg/service/endpoints_controller.syncService key := fmt.Sprintf("k8s.mesosphere.io/containerPort_%s_%s_%d", port.Protocol, addr.IP, hostPort) mesosContainerPortString := endpoints.Annotations[key] if mesosContainerPortString != "" { var err error containerPort, err = strconv.Atoi(mesosContainerPortString) if err != nil { continue } Logf("Mapped mesos host port %d to container port %d via annotation %s=%s", hostPort, containerPort, key, mesosContainerPortString) } // Logf("Found pod %v, host port %d and container port %d", addr.TargetRef.UID, hostPort, containerPort) if _, ok := m[addr.TargetRef.UID]; !ok { m[addr.TargetRef.UID] = make([]int, 0) } m[addr.TargetRef.UID] = append(m[addr.TargetRef.UID], containerPort) } } } return m } type PortsByPodName map[string][]int type PortsByPodUID map[types.UID][]int func translatePodNameToUIDOrFail(c *client.Client, ns string, expectedEndpoints PortsByPodName) PortsByPodUID { portsByUID := make(PortsByPodUID) for name, portList := range expectedEndpoints { pod, err := c.Pods(ns).Get(name) if err != nil { Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err) } portsByUID[pod.ObjectMeta.UID] = portList } // Logf("successfully translated pod names to UIDs: %v -> %v on namespace %s", expectedEndpoints, portsByUID, ns) return portsByUID } func validatePortsOrFail(endpoints PortsByPodUID, expectedEndpoints PortsByPodUID) { if len(endpoints) != len(expectedEndpoints) { // should not happen because we check this condition before Failf("invalid number of endpoints got %v, expected %v", endpoints, expectedEndpoints) } for podUID := range expectedEndpoints { if _, ok := endpoints[podUID]; !ok { Failf("endpoint %v not found", podUID) } if len(endpoints[podUID]) != len(expectedEndpoints[podUID]) { Failf("invalid list of ports for uid %v. Got %v, expected %v", podUID, endpoints[podUID], expectedEndpoints[podUID]) } sort.Ints(endpoints[podUID]) sort.Ints(expectedEndpoints[podUID]) for index := range endpoints[podUID] { if endpoints[podUID][index] != expectedEndpoints[podUID][index] { Failf("invalid list of ports for uid %v. Got %v, expected %v", podUID, endpoints[podUID], expectedEndpoints[podUID]) } } } } func validateEndpointsOrFail(c *client.Client, namespace, serviceName string, expectedEndpoints PortsByPodName) { By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", serviceStartTimeout, serviceName, namespace, expectedEndpoints)) i := 1 for start := time.Now(); time.Since(start) < serviceStartTimeout; time.Sleep(1 * time.Second) { endpoints, err := c.Endpoints(namespace).Get(serviceName) if err != nil { Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err) continue } // Logf("Found endpoints %v", endpoints) portsByPodUID := getContainerPortsByPodUID(endpoints) // Logf("Found port by pod UID %v", portsByPodUID) expectedPortsByPodUID := translatePodNameToUIDOrFail(c, namespace, expectedEndpoints) if len(portsByPodUID) == len(expectedEndpoints) { validatePortsOrFail(portsByPodUID, expectedPortsByPodUID) Logf("successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, time.Since(start)) return } if i%5 == 0 { Logf("Unexpected endpoints: found %v, expected %v (%v elapsed, will retry)", portsByPodUID, expectedEndpoints, time.Since(start)) } i++ } if pods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{}); err == nil { for _, pod := range pods.Items { Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp) } } else { Logf("Can't list pod debug info: %v", err) } Failf("Timed out waiting for service %s in namespace %s to expose endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, serviceStartTimeout) } // createExecPodOrFail creates a simple busybox pod in a sleep loop used as a // vessel for kubectl exec commands. func createExecPodOrFail(c *client.Client, ns, name string) { Logf("Creating new exec pod") immediate := int64(0) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, Namespace: ns, }, Spec: api.PodSpec{ TerminationGracePeriodSeconds: &immediate, Containers: []api.Container{ { Name: "exec", Image: "gcr.io/google_containers/busybox", Command: []string{"sh", "-c", "while true; do sleep 5; done"}, }, }, }, } _, err := c.Pods(ns).Create(pod) Expect(err).NotTo(HaveOccurred()) err = wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) { retrievedPod, err := c.Pods(pod.Namespace).Get(pod.Name) if err != nil { return false, nil } return retrievedPod.Status.Phase == api.PodRunning, nil }) Expect(err).NotTo(HaveOccurred()) } func createPodOrFail(c *client.Client, ns, name string, labels map[string]string, containerPorts []api.ContainerPort) { By(fmt.Sprintf("creating pod %s in namespace %s", name, ns)) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, Labels: labels, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "test", Image: "gcr.io/google_containers/pause:2.0", Ports: containerPorts, }, }, }, } _, err := c.Pods(ns).Create(pod) Expect(err).NotTo(HaveOccurred()) } func deletePodOrFail(c *client.Client, ns, name string) { By(fmt.Sprintf("deleting pod %s in namespace %s", name, ns)) err := c.Pods(ns).Delete(name, nil) Expect(err).NotTo(HaveOccurred()) } func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []string { ips := []string{} for i := range nodes.Items { item := &nodes.Items[i] for j := range item.Status.Addresses { nodeAddress := &item.Status.Addresses[j] if nodeAddress.Type == addressType { ips = append(ips, nodeAddress.Address) } } } return ips } func getNodePublicIps(c *client.Client) ([]string, error) { nodes := ListSchedulableNodesOrDie(c) ips := collectAddresses(nodes, api.NodeExternalIP) if len(ips) == 0 { ips = collectAddresses(nodes, api.NodeLegacyHostIP) } return ips, nil } func pickNodeIP(c *client.Client) string { publicIps, err := getNodePublicIps(c) Expect(err).NotTo(HaveOccurred()) if len(publicIps) == 0 { Failf("got unexpected number (%d) of public IPs", len(publicIps)) } ip := publicIps[0] return ip } func testReachableHTTP(ip string, port int, request string, expect string) (bool, error) { url := fmt.Sprintf("http://%s:%d%s", ip, port, request) if ip == "" { Failf("Got empty IP for reachability check (%s)", url) return false, nil } if port == 0 { Failf("Got port==0 for reachability check (%s)", url) return false, nil } Logf("Testing HTTP reachability of %v", url) resp, err := httpGetNoConnectionPool(url) if err != nil { Logf("Got error testing for reachability of %s: %v", url, err) return false, nil } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { Logf("Got error reading response from %s: %v", url, err) return false, nil } if resp.StatusCode != 200 { return false, fmt.Errorf("received non-success return status %q trying to access %s; got body: %s", resp.Status, url, string(body)) } if !strings.Contains(string(body), expect) { return false, fmt.Errorf("received response body without expected substring %q: %s", expect, string(body)) } Logf("Successfully reached %v", url) return true, nil } func testNotReachableHTTP(ip string, port int) (bool, error) { url := fmt.Sprintf("http://%s:%d", ip, port) if ip == "" { Failf("Got empty IP for non-reachability check (%s)", url) return false, nil } if port == 0 { Failf("Got port==0 for non-reachability check (%s)", url) return false, nil } Logf("Testing HTTP non-reachability of %v", url) resp, err := httpGetNoConnectionPool(url) if err != nil { Logf("Confirmed that %s is not reachable", url) return true, nil } resp.Body.Close() return false, nil } func testReachableUDP(ip string, port int, request string, expect string) (bool, error) { uri := fmt.Sprintf("udp://%s:%d", ip, port) if ip == "" { Failf("Got empty IP for reachability check (%s)", uri) return false, nil } if port == 0 { Failf("Got port==0 for reachability check (%s)", uri) return false, nil } Logf("Testing UDP reachability of %v", uri) con, err := net.Dial("udp", ip+":"+strconv.Itoa(port)) if err != nil { return false, fmt.Errorf("Failed to dial %s:%d: %v", ip, port, err) } _, err = con.Write([]byte(fmt.Sprintf("%s\n", request))) if err != nil { return false, fmt.Errorf("Failed to send request: %v", err) } var buf []byte = make([]byte, len(expect)+1) err = con.SetDeadline(time.Now().Add(3 * time.Second)) if err != nil { return false, fmt.Errorf("Failed to set deadline: %v", err) } _, err = con.Read(buf) if err != nil { return false, nil } if !strings.Contains(string(buf), expect) { return false, fmt.Errorf("Failed to retrieve %q, got %q", expect, string(buf)) } Logf("Successfully reached %v", uri) return true, nil } func testNotReachableUDP(ip string, port int, request string) (bool, error) { uri := fmt.Sprintf("udp://%s:%d", ip, port) if ip == "" { Failf("Got empty IP for reachability check (%s)", uri) return false, nil } if port == 0 { Failf("Got port==0 for reachability check (%s)", uri) return false, nil } Logf("Testing UDP non-reachability of %v", uri) con, err := net.Dial("udp", ip+":"+strconv.Itoa(port)) if err != nil { Logf("Confirmed that %s is not reachable", uri) return true, nil } _, err = con.Write([]byte(fmt.Sprintf("%s\n", request))) if err != nil { Logf("Confirmed that %s is not reachable", uri) return true, nil } var buf []byte = make([]byte, 1) err = con.SetDeadline(time.Now().Add(3 * time.Second)) if err != nil { return false, fmt.Errorf("Failed to set deadline: %v", err) } _, err = con.Read(buf) if err != nil { Logf("Confirmed that %s is not reachable", uri) return true, nil } return false, nil } // Creates a replication controller that serves its hostname and a service on top of it. func startServeHostnameService(c *client.Client, ns, name string, port, replicas int) ([]string, string, error) { podNames := make([]string, replicas) By("creating service " + name + " in namespace " + ns) _, err := c.Services(ns).Create(&api.Service{ ObjectMeta: api.ObjectMeta{ Name: name, }, Spec: api.ServiceSpec{ Ports: []api.ServicePort{{ Port: port, TargetPort: intstr.FromInt(9376), Protocol: "TCP", }}, Selector: map[string]string{ "name": name, }, }, }) if err != nil { return podNames, "", err } var createdPods []*api.Pod maxContainerFailures := 0 config := RCConfig{ Client: c, Image: "gcr.io/google_containers/serve_hostname:1.1", Name: name, Namespace: ns, PollInterval: 3 * time.Second, Timeout: podReadyBeforeTimeout, Replicas: replicas, CreatedPods: &createdPods, MaxContainerFailures: &maxContainerFailures, } err = RunRC(config) if err != nil { return podNames, "", err } if len(createdPods) != replicas { return podNames, "", fmt.Errorf("Incorrect number of running pods: %v", len(createdPods)) } for i := range createdPods { podNames[i] = createdPods[i].ObjectMeta.Name } sort.StringSlice(podNames).Sort() service, err := c.Services(ns).Get(name) if err != nil { return podNames, "", err } if service.Spec.ClusterIP == "" { return podNames, "", fmt.Errorf("Service IP is blank for %v", name) } serviceIP := service.Spec.ClusterIP return podNames, serviceIP, nil } func stopServeHostnameService(c *client.Client, ns, name string) error { if err := DeleteRC(c, ns, name); err != nil { return err } if err := c.Services(ns).Delete(name); err != nil { return err } return nil } // verifyServeHostnameServiceUp wgets the given serviceIP:servicePort from the // given host and from within a pod. The host is expected to be an SSH-able node // in the cluster. Each pod in the service is expected to echo its name. These // names are compared with the given expectedPods list after a sort | uniq. func verifyServeHostnameServiceUp(c *client.Client, ns, host string, expectedPods []string, serviceIP string, servicePort int) error { execPodName := "execpod" createExecPodOrFail(c, ns, execPodName) defer func() { deletePodOrFail(c, ns, execPodName) }() // Loop a bunch of times - the proxy is randomized, so we want a good // chance of hitting each backend at least once. buildCommand := func(wget string) string { return fmt.Sprintf("for i in $(seq 1 %d); do %s http://%s:%d 2>&1 || true; echo; done", 50*len(expectedPods), wget, serviceIP, servicePort) } commands := []func() string{ // verify service from node func() string { cmd := "set -e; " + buildCommand("wget -q --timeout=0.2 --tries=1 -O -") Logf("Executing cmd %q on host %v", cmd, host) result, err := SSH(cmd, host, testContext.Provider) if err != nil || result.Code != 0 { LogSSHResult(result) Logf("error while SSH-ing to node: %v", err) } return result.Stdout }, // verify service from pod func() string { cmd := buildCommand("wget -q -T 1 -O -") Logf("Executing cmd %q in pod %v/%v", cmd, ns, execPodName) // TODO: Use exec-over-http via the netexec pod instead of kubectl exec. output, err := RunHostCmd(ns, execPodName, cmd) if err != nil { Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, execPodName, err, output) } return output }, } sort.StringSlice(expectedPods).Sort() By(fmt.Sprintf("verifying service has %d reachable backends", len(expectedPods))) for _, cmdFunc := range commands { passed := false gotPods := []string{} // Retry cmdFunc for a while for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { pods := strings.Split(strings.TrimSpace(cmdFunc()), "\n") // Uniq pods before the sort because inserting them into a set // (which is implemented using dicts) can re-order them. gotPods = sets.NewString(pods...).List() if api.Semantic.DeepEqual(gotPods, expectedPods) { passed = true break } Logf("Waiting for expected pods for %s: %v, got: %v", serviceIP, expectedPods, gotPods) } if !passed { return fmt.Errorf("service verification failed for: %s, expected %v, got %v", serviceIP, expectedPods, gotPods) } } return nil } func verifyServeHostnameServiceDown(c *client.Client, host string, serviceIP string, servicePort int) error { command := fmt.Sprintf( "curl -s --connect-timeout 2 http://%s:%d && exit 99", serviceIP, servicePort) for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { result, err := SSH(command, host, testContext.Provider) if err != nil { LogSSHResult(result) Logf("error while SSH-ing to node: %v", err) } if result.Code != 99 { return nil } Logf("service still alive - still waiting") } return fmt.Errorf("waiting for service to be down timed out") } // Does an HTTP GET, but does not reuse TCP connections // This masks problems where the iptables rule has changed, but we don't see it // This is intended for relatively quick requests (status checks), so we set a short (5 seconds) timeout func httpGetNoConnectionPool(url string) (*http.Response, error) { tr := &http.Transport{ DisableKeepAlives: true, } client := &http.Client{ Transport: tr, Timeout: 5 * time.Second, } return client.Get(url) } // A test jig to help testing. type ServiceTestJig struct { ID string Name string Client *client.Client Labels map[string]string } // NewServiceTestJig allocates and inits a new ServiceTestJig. func NewServiceTestJig(client *client.Client, name string) *ServiceTestJig { j := &ServiceTestJig{} j.Client = client j.Name = name j.ID = j.Name + "-" + string(util.NewUUID()) j.Labels = map[string]string{"testid": j.ID} return j } // newServiceTemplate returns the default api.Service template for this jig, but // does not actually create the Service. The default Service has the same name // as the jig and exposes port 80. func (j *ServiceTestJig) newServiceTemplate(namespace string, proto api.Protocol) *api.Service { service := &api.Service{ ObjectMeta: api.ObjectMeta{ Namespace: namespace, Name: j.Name, Labels: j.Labels, }, Spec: api.ServiceSpec{ Selector: j.Labels, Ports: []api.ServicePort{ { Protocol: proto, Port: 80, }, }, }, } return service } // CreateTCPServiceOrFail creates a new TCP Service based on the jig's // defaults. Callers can provide a function to tweak the Service object before // it is created. func (j *ServiceTestJig) CreateTCPServiceOrFail(namespace string, tweak func(svc *api.Service)) *api.Service { svc := j.newServiceTemplate(namespace, api.ProtocolTCP) if tweak != nil { tweak(svc) } result, err := j.Client.Services(namespace).Create(svc) if err != nil { Failf("Failed to create TCP Service %q: %v", svc.Name, err) } return result } // CreateUDPServiceOrFail creates a new UDP Service based on the jig's // defaults. Callers can provide a function to tweak the Service object before // it is created. func (j *ServiceTestJig) CreateUDPServiceOrFail(namespace string, tweak func(svc *api.Service)) *api.Service { svc := j.newServiceTemplate(namespace, api.ProtocolUDP) if tweak != nil { tweak(svc) } result, err := j.Client.Services(namespace).Create(svc) if err != nil { Failf("Failed to create UDP Service %q: %v", svc.Name, err) } return result } func (j *ServiceTestJig) SanityCheckService(svc *api.Service, svcType api.ServiceType) { if svc.Spec.Type != svcType { Failf("unexpected Spec.Type (%s) for service, expected %s", svc.Spec.Type, svcType) } expectNodePorts := false if svcType != api.ServiceTypeClusterIP { expectNodePorts = true } for i, port := range svc.Spec.Ports { hasNodePort := (port.NodePort != 0) if hasNodePort != expectNodePorts { Failf("unexpected Spec.Ports[%d].NodePort (%d) for service", i, port.NodePort) } if hasNodePort { if !ServiceNodePortRange.Contains(port.NodePort) { Failf("out-of-range nodePort (%d) for service", port.NodePort) } } } expectIngress := false if svcType == api.ServiceTypeLoadBalancer { expectIngress = true } hasIngress := len(svc.Status.LoadBalancer.Ingress) != 0 if hasIngress != expectIngress { Failf("unexpected number of Status.LoadBalancer.Ingress (%d) for service", len(svc.Status.LoadBalancer.Ingress)) } if hasIngress { for i, ing := range svc.Status.LoadBalancer.Ingress { if ing.IP == "" && ing.Hostname == "" { Failf("unexpected Status.LoadBalancer.Ingress[%d] for service: %#v", i, ing) } } } } // UpdateService fetches a service, calls the update function on it, and // then attempts to send the updated service. It tries up to 3 times in the // face of timeouts and conflicts. func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*api.Service)) (*api.Service, error) { for i := 0; i < 3; i++ { service, err := j.Client.Services(namespace).Get(name) if err != nil { return nil, fmt.Errorf("Failed to get Service %q: %v", name, err) } update(service) service, err = j.Client.Services(namespace).Update(service) if err == nil { return service, nil } if !errors.IsConflict(err) && !errors.IsServerTimeout(err) { return nil, fmt.Errorf("Failed to update Service %q: %v", name, err) } } return nil, fmt.Errorf("Too many retries updating Service %q", name) } // UpdateServiceOrFail fetches a service, calls the update function on it, and // then attempts to send the updated service. It tries up to 3 times in the // face of timeouts and conflicts. func (j *ServiceTestJig) UpdateServiceOrFail(namespace, name string, update func(*api.Service)) *api.Service { svc, err := j.UpdateService(namespace, name, update) if err != nil { Failf(err.Error()) } return svc } func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, initial int) *api.Service { var err error var service *api.Service for i := 1; i < ServiceNodePortRange.Size; i++ { offs1 := initial - ServiceNodePortRange.Base offs2 := (offs1 + i) % ServiceNodePortRange.Size newPort := ServiceNodePortRange.Base + offs2 service, err = j.UpdateService(namespace, name, func(s *api.Service) { s.Spec.Ports[0].NodePort = newPort }) if err != nil && strings.Contains(err.Error(), "provided port is already allocated") { Logf("tried nodePort %d, but it is in use, will try another", newPort) continue } // Otherwise err was nil or err was a real error break } if err != nil { Failf("Could not change the nodePort: %v", err) } return service } func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string) *api.Service { var service *api.Service Logf("Waiting up to %v for service %q to have a LoadBalancer", loadBalancerCreateTimeout, name) pollFunc := func() (bool, error) { svc, err := j.Client.Services(namespace).Get(name) if err != nil { return false, err } if len(svc.Status.LoadBalancer.Ingress) > 0 { service = svc return true, nil } return false, nil } if err := wait.PollImmediate(poll, loadBalancerCreateTimeout, pollFunc); err != nil { Failf("Timeout waiting for service %q to have a load balancer", name) } return service } func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string, ip string, port int) *api.Service { // TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable defer func() { if err := EnsureLoadBalancerResourcesDeleted(ip, strconv.Itoa(port)); err != nil { Logf("Failed to delete cloud resources for service: %s %d (%v)", ip, port, err) } }() var service *api.Service Logf("Waiting up to %v for service %q to have no LoadBalancer", loadBalancerCreateTimeout, name) pollFunc := func() (bool, error) { svc, err := j.Client.Services(namespace).Get(name) if err != nil { return false, err } if len(svc.Status.LoadBalancer.Ingress) == 0 { service = svc return true, nil } return false, nil } if err := wait.PollImmediate(poll, loadBalancerCreateTimeout, pollFunc); err != nil { Failf("Timeout waiting for service %q to have no load balancer", name) } return service } func (j *ServiceTestJig) TestReachableHTTP(host string, port int, timeout time.Duration) { if err := wait.PollImmediate(poll, timeout, func() (bool, error) { return testReachableHTTP(host, port, "/echo?msg=hello", "hello") }); err != nil { Failf("Could not reach HTTP service through %v:%v after %v: %v", host, port, timeout, err) } } func (j *ServiceTestJig) TestNotReachableHTTP(host string, port int, timeout time.Duration) { if err := wait.PollImmediate(poll, timeout, func() (bool, error) { return testNotReachableHTTP(host, port) }); err != nil { Failf("Could still reach HTTP service through %v:%v after %v: %v", host, port, timeout, err) } } func (j *ServiceTestJig) TestReachableUDP(host string, port int, timeout time.Duration) { if err := wait.PollImmediate(poll, timeout, func() (bool, error) { return testReachableUDP(host, port, "echo hello", "hello") }); err != nil { Failf("Could not reach UDP service through %v:%v after %v: %v", host, port, timeout, err) } } func (j *ServiceTestJig) TestNotReachableUDP(host string, port int, timeout time.Duration) { if err := wait.PollImmediate(poll, timeout, func() (bool, error) { return testNotReachableUDP(host, port, "echo hello") }); err != nil { Failf("Could still reach UDP service through %v:%v after %v: %v", host, port, timeout, err) } } func getIngressPoint(ing *api.LoadBalancerIngress) string { host := ing.IP if host == "" { host = ing.Hostname } return host } // newRCTemplate returns the default api.ReplicationController object for // this jig, but does not actually create the RC. The default RC has the same // name as the jig and runs the "netexec" container. func (j *ServiceTestJig) newRCTemplate(namespace string) *api.ReplicationController { rc := &api.ReplicationController{ ObjectMeta: api.ObjectMeta{ Namespace: namespace, Name: j.Name, Labels: j.Labels, }, Spec: api.ReplicationControllerSpec{ Replicas: 1, Selector: j.Labels, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: j.Labels, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "netexec", Image: "gcr.io/google_containers/netexec:1.4", Args: []string{"--http-port=80", "--udp-port=80"}, ReadinessProbe: &api.Probe{ PeriodSeconds: 3, Handler: api.Handler{ HTTPGet: &api.HTTPGetAction{ Port: intstr.FromInt(80), Path: "/hostName", }, }, }, }, }, TerminationGracePeriodSeconds: new(int64), }, }, }, } return rc } // RunOrFail creates a ReplicationController and Pod(s) and waits for the // Pod(s) to be running. Callers can provide a function to tweak the RC object // before it is created. func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *api.ReplicationController)) *api.ReplicationController { rc := j.newRCTemplate(namespace) if tweak != nil { tweak(rc) } result, err := j.Client.ReplicationControllers(namespace).Create(rc) if err != nil { Failf("Failed to created RC %q: %v", rc.Name, err) } pods, err := j.waitForPodsCreated(namespace, rc.Spec.Replicas) if err != nil { Failf("Failed to create pods: %v", err) } if err := j.waitForPodsReady(namespace, pods); err != nil { Failf("Failed waiting for pods to be running: %v", err) } return result } func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]string, error) { timeout := 2 * time.Minute // List the pods, making sure we observe all the replicas. label := labels.SelectorFromSet(labels.Set(j.Labels)) Logf("Waiting up to %v for %d pods to be created", timeout, replicas) for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { options := api.ListOptions{LabelSelector: label} pods, err := j.Client.Pods(namespace).List(options) if err != nil { return nil, err } found := []string{} for _, pod := range pods.Items { if pod.DeletionTimestamp != nil { continue } found = append(found, pod.Name) } if len(found) == replicas { Logf("Found all %d pods", replicas) return found, nil } Logf("Found %d/%d pods - will retry", len(found), replicas) } return nil, fmt.Errorf("Timeout waiting for %d pods to be created", replicas) } func (j *ServiceTestJig) waitForPodsReady(namespace string, pods []string) error { timeout := 2 * time.Minute if !checkPodsRunningReady(j.Client, namespace, pods, timeout) { return fmt.Errorf("Timeout waiting for %d pods to be ready") } return nil } // Simple helper class to avoid too much boilerplate in tests type ServiceTestFixture struct { ServiceName string Namespace string Client *client.Client TestId string Labels map[string]string rcs map[string]bool services map[string]bool name string image string } func NewServerTest(client *client.Client, namespace string, serviceName string) *ServiceTestFixture { t := &ServiceTestFixture{} t.Client = client t.Namespace = namespace t.ServiceName = serviceName t.TestId = t.ServiceName + "-" + string(util.NewUUID()) t.Labels = map[string]string{ "testid": t.TestId, } t.rcs = make(map[string]bool) t.services = make(map[string]bool) t.name = "webserver" t.image = "gcr.io/google_containers/test-webserver" return t } // Build default config for a service (which can then be changed) func (t *ServiceTestFixture) BuildServiceSpec() *api.Service { service := &api.Service{ ObjectMeta: api.ObjectMeta{ Name: t.ServiceName, Namespace: t.Namespace, }, Spec: api.ServiceSpec{ Selector: t.Labels, Ports: []api.ServicePort{{ Port: 80, TargetPort: intstr.FromInt(80), }}, }, } return service } // CreateWebserverRC creates rc-backed pods with the well-known webserver // configuration and records it for cleanup. func (t *ServiceTestFixture) CreateWebserverRC(replicas int) *api.ReplicationController { rcSpec := rcByNamePort(t.name, replicas, t.image, 80, api.ProtocolTCP, t.Labels) rcAct, err := t.createRC(rcSpec) if err != nil { Failf("Failed to create rc %s: %v", rcSpec.Name, err) } if err := verifyPods(t.Client, t.Namespace, t.name, false, replicas); err != nil { Failf("Failed to create %d pods with name %s: %v", replicas, t.name, err) } return rcAct } // createRC creates a replication controller and records it for cleanup. func (t *ServiceTestFixture) createRC(rc *api.ReplicationController) (*api.ReplicationController, error) { rc, err := t.Client.ReplicationControllers(t.Namespace).Create(rc) if err == nil { t.rcs[rc.Name] = true } return rc, err } // Create a service, and record it for cleanup func (t *ServiceTestFixture) CreateService(service *api.Service) (*api.Service, error) { result, err := t.Client.Services(t.Namespace).Create(service) if err == nil { t.services[service.Name] = true } return result, err } // Delete a service, and remove it from the cleanup list func (t *ServiceTestFixture) DeleteService(serviceName string) error { err := t.Client.Services(t.Namespace).Delete(serviceName) if err == nil { delete(t.services, serviceName) } return err } func (t *ServiceTestFixture) Cleanup() []error { var errs []error for rcName := range t.rcs { By("stopping RC " + rcName + " in namespace " + t.Namespace) // First, resize the RC to 0. old, err := t.Client.ReplicationControllers(t.Namespace).Get(rcName) if err != nil { errs = append(errs, err) } old.Spec.Replicas = 0 if _, err := t.Client.ReplicationControllers(t.Namespace).Update(old); err != nil { errs = append(errs, err) } // TODO(mikedanese): Wait. // Then, delete the RC altogether. if err := t.Client.ReplicationControllers(t.Namespace).Delete(rcName); err != nil { errs = append(errs, err) } } for serviceName := range t.services { By("deleting service " + serviceName + " in namespace " + t.Namespace) err := t.Client.Services(t.Namespace).Delete(serviceName) if err != nil { errs = append(errs, err) } } return errs }
dcbw/kubernetes
test/e2e/service.go
GO
apache-2.0
63,318
/* Copyright (c) 2016 Microsoft Corporation. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Author: Leonardo de Moura */ #include "library/vm/vm.h" namespace lean { static void del_instr_at(unsigned pc, buffer<vm_instr> & code) { code.erase(pc); // we must adjust pc of other instructions for (unsigned i = 0; i < code.size(); i++) { vm_instr & c = code[i]; for (unsigned j = 0; j < c.get_num_pcs(); j++) { if (c.get_pc(j) > pc) c.set_pc(j, c.get_pc(j) - 1); } } } typedef rb_tree<unsigned, unsigned_cmp> addr_set; /* Collect addresses in addr_set that are goto/branching targets */ static void collect_targets(buffer<vm_instr> & code, addr_set & r) { for (auto c : code) { for (unsigned j = 0; j < c.get_num_pcs(); j++) r.insert(c.get_pc(j)); } } /** \brief Applies the following transformation ... pc: drop n pc+1: drop m ... ===> ... pc: drop n+m ... */ static void compress_drop_drop(buffer<vm_instr> & code) { if (code.empty()) return; addr_set targets; collect_targets(code, targets); unsigned i = code.size() - 1; while (i > 0) { --i; if (code[i].op() == opcode::Drop && code[i+1].op() == opcode::Drop && /* If i+1 is a goto/branch target, then we should not merge the two Drops */ !targets.contains(i+1)) { code[i] = mk_drop_instr(code[i].get_num() + code[i+1].get_num()); del_instr_at(i+1, code); } } } /** \brief Applies the following transformation pc_1 : goto pc_2 ... pc_2 : ret ===> pc_1 : ret ... pc_2 : ret */ static void compress_goto_ret(buffer<vm_instr> & code) { unsigned i = code.size(); while (i > 0) { --i; if (code[i].op() == opcode::Goto) { unsigned pc = code[i].get_goto_pc(); if (code[pc].op() == opcode::Ret) { code[i] = mk_ret_instr(); } } } } void optimize(environment const &, buffer<vm_instr> & code) { compress_goto_ret(code); compress_drop_drop(code); } }
fgdorais/lean
src/library/vm/optimize.cpp
C++
apache-2.0
2,195
// Copyright 2017 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.devtools.build.android.desugar; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; import static com.google.common.base.Preconditions.checkState; import com.google.common.collect.ImmutableList; import java.util.Comparator; import java.util.HashSet; import java.util.Iterator; import java.util.Set; import java.util.TreeSet; import org.objectweb.asm.ClassReader; import org.objectweb.asm.ClassVisitor; import org.objectweb.asm.MethodVisitor; import org.objectweb.asm.Opcodes; import org.objectweb.asm.Type; import org.objectweb.asm.tree.AbstractInsnNode; import org.objectweb.asm.tree.InsnList; import org.objectweb.asm.tree.InsnNode; import org.objectweb.asm.tree.MethodInsnNode; import org.objectweb.asm.tree.MethodNode; /** * Fixer of classes that extend interfaces with default methods to declare any missing methods * explicitly and call the corresponding companion method generated by {@link InterfaceDesugaring}. */ public class DefaultMethodClassFixer extends ClassVisitor { private final ClassReaderFactory classpath; private final ClassReaderFactory bootclasspath; private final ClassLoader targetLoader; private final DependencyCollector depsCollector; private final HashSet<String> instanceMethods = new HashSet<>(); private boolean isInterface; private String internalName; private ImmutableList<String> directInterfaces; private String superName; /** This method node caches <clinit>, and flushes out in {@code visitEnd()}; */ private MethodNode clInitMethodNode; public DefaultMethodClassFixer( ClassVisitor dest, ClassReaderFactory classpath, DependencyCollector depsCollector, ClassReaderFactory bootclasspath, ClassLoader targetLoader) { super(Opcodes.ASM5, dest); this.classpath = classpath; this.bootclasspath = bootclasspath; this.targetLoader = targetLoader; this.depsCollector = depsCollector; } @Override public void visit( int version, int access, String name, String signature, String superName, String[] interfaces) { checkState(this.directInterfaces == null); isInterface = BitFlags.isSet(access, Opcodes.ACC_INTERFACE); internalName = name; checkArgument( superName != null || "java/lang/Object".equals(name), // ASM promises this "Type without superclass: %s", name); this.directInterfaces = ImmutableList.copyOf(interfaces); this.superName = superName; super.visit(version, access, name, signature, superName, interfaces); } @Override public void visitEnd() { if (!isInterface && defaultMethodsDefined(directInterfaces)) { // Inherited methods take precedence over default methods, so visit all superclasses and // figure out what methods they declare before stubbing in any missing default methods. recordInheritedMethods(); stubMissingDefaultAndBridgeMethods(); // Check whether there are interfaces with default methods and <clinit>. If yes, the following // method call will return a list of interface fields to access in the <clinit> to trigger // the initialization of these interfaces. ImmutableList<String> companionsToTriggerInterfaceClinit = collectOrderedCompanionsToTriggerInterfaceClinit(directInterfaces); if (!companionsToTriggerInterfaceClinit.isEmpty()) { if (clInitMethodNode == null) { clInitMethodNode = new MethodNode(Opcodes.ACC_STATIC, "<clinit>", "()V", null, null); } desugarClinitToTriggerInterfaceInitializers(companionsToTriggerInterfaceClinit); } } if (clInitMethodNode != null && super.cv != null) { // Write <clinit> to the chained visitor. clInitMethodNode.accept(super.cv); } super.visitEnd(); } private boolean isClinitAlreadyDesugared( ImmutableList<String> companionsToAccessToTriggerInterfaceClinit) { InsnList instructions = clInitMethodNode.instructions; if (instructions.size() <= companionsToAccessToTriggerInterfaceClinit.size()) { // The <clinit> must end with RETURN, so if the instruction count is less than or equal to // the companion class count, this <clinit> has not been desugared. return false; } Iterator<AbstractInsnNode> iterator = instructions.iterator(); for (String companion : companionsToAccessToTriggerInterfaceClinit) { if (!iterator.hasNext()) { return false; } AbstractInsnNode first = iterator.next(); if (!(first instanceof MethodInsnNode)) { return false; } MethodInsnNode methodInsnNode = (MethodInsnNode) first; if (methodInsnNode.getOpcode() != Opcodes.INVOKESTATIC || !methodInsnNode.owner.equals(companion) || !methodInsnNode.name.equals( InterfaceDesugaring.COMPANION_METHOD_TO_TRIGGER_INTERFACE_CLINIT_NAME)) { return false; } checkState( methodInsnNode.desc.equals( InterfaceDesugaring.COMPANION_METHOD_TO_TRIGGER_INTERFACE_CLINIT_DESC), "Inconsistent method desc: %s vs %s", methodInsnNode.desc, InterfaceDesugaring.COMPANION_METHOD_TO_TRIGGER_INTERFACE_CLINIT_DESC); if (!iterator.hasNext()) { return false; } AbstractInsnNode second = iterator.next(); if (second.getOpcode() != Opcodes.POP) { return false; } } return true; } private void desugarClinitToTriggerInterfaceInitializers( ImmutableList<String> companionsToTriggerInterfaceClinit) { if (isClinitAlreadyDesugared(companionsToTriggerInterfaceClinit)) { return; } InsnList desugarInsts = new InsnList(); for (String companionClass : companionsToTriggerInterfaceClinit) { desugarInsts.add( new MethodInsnNode( Opcodes.INVOKESTATIC, companionClass, InterfaceDesugaring.COMPANION_METHOD_TO_TRIGGER_INTERFACE_CLINIT_NAME, InterfaceDesugaring.COMPANION_METHOD_TO_TRIGGER_INTERFACE_CLINIT_DESC, false)); } if (clInitMethodNode.instructions.size() == 0) { clInitMethodNode.instructions.insert(new InsnNode(Opcodes.RETURN)); } clInitMethodNode.instructions.insertBefore( clInitMethodNode.instructions.getFirst(), desugarInsts); } @Override public MethodVisitor visitMethod( int access, String name, String desc, String signature, String[] exceptions) { // Keep track of instance methods implemented in this class for later. if (!isInterface) { recordIfInstanceMethod(access, name, desc); } if ("<clinit>".equals(name)) { checkState(clInitMethodNode == null, "This class fixer has been used. "); clInitMethodNode = new MethodNode(access, name, desc, signature, exceptions); return clInitMethodNode; } return super.visitMethod(access, name, desc, signature, exceptions); } private void stubMissingDefaultAndBridgeMethods() { TreeSet<Class<?>> allInterfaces = new TreeSet<>(InterfaceComparator.INSTANCE); for (String direct : directInterfaces) { // Loading ensures all transitively implemented interfaces can be loaded, which is necessary // to produce correct default method stubs in all cases. We could do without classloading but // it's convenient to rely on Class.isAssignableFrom to compute subtype relationships, and // we'd still have to insist that all transitively implemented interfaces can be loaded. // We don't load the visited class, however, in case it's a generated lambda class. Class<?> itf = loadFromInternal(direct); collectInterfaces(itf, allInterfaces); } Class<?> superclass = loadFromInternal(superName); for (Class<?> interfaceToVisit : allInterfaces) { // if J extends I, J is allowed to redefine I's default methods. The comparator we used // above makes sure we visit J before I in that case so we can use J's definition. if (superclass != null && interfaceToVisit.isAssignableFrom(superclass)) { // superclass already implements this interface, so we must skip it. The superclass will // be similarly rewritten or comes from the bootclasspath; either way we don't need to and // shouldn't stub default methods for this interface. continue; } stubMissingDefaultAndBridgeMethods(interfaceToVisit.getName().replace('.', '/')); } } private Class<?> loadFromInternal(String internalName) { try { return targetLoader.loadClass(internalName.replace('/', '.')); } catch (ClassNotFoundException e) { throw new IllegalStateException( "Couldn't load " + internalName + ", is the classpath complete?", e); } } private void collectInterfaces(Class<?> itf, Set<Class<?>> dest) { checkArgument(itf.isInterface()); if (!dest.add(itf)) { return; } for (Class<?> implemented : itf.getInterfaces()) { collectInterfaces(implemented, dest); } } private void recordInheritedMethods() { InstanceMethodRecorder recorder = new InstanceMethodRecorder(); String internalName = superName; while (internalName != null) { ClassReader bytecode = bootclasspath.readIfKnown(internalName); if (bytecode == null) { bytecode = checkNotNull( classpath.readIfKnown(internalName), "Superclass not found: %s", internalName); } bytecode.accept(recorder, ClassReader.SKIP_CODE | ClassReader.SKIP_DEBUG); internalName = bytecode.getSuperName(); } } private void recordIfInstanceMethod(int access, String name, String desc) { if (BitFlags.noneSet(access, Opcodes.ACC_STATIC)) { // Record all declared instance methods, including abstract, bridge, and native methods, as // they all take precedence over default methods. instanceMethods.add(name + ":" + desc); } } /** * Starting from the given interfaces, this method scans the interface hierarchy, finds the * interfaces that have default methods and <clinit>, and returns the companion class names of * these interfaces. * * <p>Note that the returned companion classes are ordered in the order of the interface * initialization, which is consistent with the JVM behavior. For example, "class A implements I1, * I2", the returned list would be [I1$$CC, I2$$CC], not [I2$$CC, I1$$CC]. */ private ImmutableList<String> collectOrderedCompanionsToTriggerInterfaceClinit( ImmutableList<String> interfaces) { ImmutableList.Builder<String> companionCollector = ImmutableList.builder(); HashSet<String> visitedInterfaces = new HashSet<>(); for (String anInterface : interfaces) { collectOrderedCompanionsToTriggerInterfaceClinit( anInterface, visitedInterfaces, companionCollector); } return companionCollector.build(); } private void collectOrderedCompanionsToTriggerInterfaceClinit( String anInterface, HashSet<String> visitedInterfaces, ImmutableList.Builder<String> companionCollector) { if (!visitedInterfaces.add(anInterface)) { return; } ClassReader bytecode = classpath.readIfKnown(anInterface); if (bytecode == null || bootclasspath.isKnown(anInterface)) { return; } String[] parentInterfaces = bytecode.getInterfaces(); if (parentInterfaces != null && parentInterfaces.length > 0) { for (String parentInterface : parentInterfaces) { collectOrderedCompanionsToTriggerInterfaceClinit( parentInterface, visitedInterfaces, companionCollector); } } InterfaceInitializationNecessityDetector necessityDetector = new InterfaceInitializationNecessityDetector(bytecode.getClassName()); bytecode.accept(necessityDetector, ClassReader.SKIP_DEBUG); if (necessityDetector.needsToInitialize()) { // If we need to initialize this interface, we initialize its companion class, and its // companion class will initialize the interface then. This desigin decision is made to avoid // access issue, e.g., package-private interfaces. companionCollector.add(InterfaceDesugaring.getCompanionClassName(anInterface)); } } /** * Recursively searches the given interfaces for default methods not implemented by this class * directly. If this method returns true we need to think about stubbing missing default methods. */ private boolean defaultMethodsDefined(ImmutableList<String> interfaces) { for (String implemented : interfaces) { if (bootclasspath.isKnown(implemented)) { continue; } ClassReader bytecode = classpath.readIfKnown(implemented); if (bytecode == null) { // Interface isn't on the classpath, which indicates incomplete classpaths. Record missing // dependency so we can check it later. If we don't check then we may get runtime failures // or wrong behavior from default methods that should've been stubbed in. // TODO(kmb): Print a warning so people can start fixing their deps? depsCollector.missingImplementedInterface(internalName, implemented); } else { // Class in classpath and bootclasspath is a bad idea but in any event, assume the // bootclasspath will take precedence like in a classloader. // We can skip code attributes as we just need to find default methods to stub. DefaultMethodFinder finder = new DefaultMethodFinder(); bytecode.accept(finder, ClassReader.SKIP_CODE | ClassReader.SKIP_DEBUG); if (finder.foundDefaultMethods()) { return true; } } } return false; } /** Returns {@code true} for non-bridge default methods not in {@link #instanceMethods}. */ private boolean shouldStubAsDefaultMethod(int access, String name, String desc) { // Ignore private methods, which technically aren't default methods and can only be called from // other methods defined in the interface. This also ignores lambda body methods, which is fine // as we don't want or need to stub those. Also ignore bridge methods as javac adds them to // concrete classes as needed anyway and we handle them separately for generated lambda classes. // Note that an exception is that, if a bridge method is for a default interface method, javac // will NOT generate the bridge method in the implementing class. So we need extra logic to // handle these bridge methods. return isNonBridgeDefaultMethod(access) && !instanceMethods.contains(name + ":" + desc); } private static boolean isNonBridgeDefaultMethod(int access) { return BitFlags.noneSet( access, Opcodes.ACC_ABSTRACT | Opcodes.ACC_STATIC | Opcodes.ACC_BRIDGE | Opcodes.ACC_PRIVATE); } /** * Check whether an interface method is a bridge method for a default interface method. This type * of bridge methods is special, as they are not put in the implementing classes by javac. */ private boolean shouldStubAsBridgeDefaultMethod(int access, String name, String desc) { return BitFlags.isSet(access, Opcodes.ACC_BRIDGE | Opcodes.ACC_PUBLIC) && BitFlags.noneSet(access, Opcodes.ACC_ABSTRACT | Opcodes.ACC_STATIC) && !instanceMethods.contains(name + ":" + desc); } private void stubMissingDefaultAndBridgeMethods(String implemented) { if (bootclasspath.isKnown(implemented)) { // Default methods on the bootclasspath will be available at runtime, so just ignore them. return; } ClassReader bytecode = checkNotNull( classpath.readIfKnown(implemented), "Couldn't find interface %s implemented by %s", implemented, internalName); bytecode.accept(new DefaultMethodStubber(), ClassReader.SKIP_DEBUG); } /** * Visitor for interfaces that produces delegates in the class visited by the outer {@link * DefaultMethodClassFixer} for every default method encountered. */ private class DefaultMethodStubber extends ClassVisitor { private String stubbedInterfaceName; public DefaultMethodStubber() { super(Opcodes.ASM5); } @Override public void visit( int version, int access, String name, String signature, String superName, String[] interfaces) { checkArgument(BitFlags.isSet(access, Opcodes.ACC_INTERFACE)); checkState(stubbedInterfaceName == null); stubbedInterfaceName = name; } @Override public MethodVisitor visitMethod( int access, String name, String desc, String signature, String[] exceptions) { if (shouldStubAsDefaultMethod(access, name, desc)) { // Remember we stubbed this method in case it's also defined by subsequently visited // interfaces. javac would force the method to be defined explicitly if there any two // definitions conflict, but see stubMissingDefaultMethods() for how we deal with default // methods redefined in interfaces extending another. recordIfInstanceMethod(access, name, desc); depsCollector.assumeCompanionClass( internalName, InterfaceDesugaring.getCompanionClassName(stubbedInterfaceName)); // Add this method to the class we're desugaring and stub in a body to call the default // implementation in the interface's companion class. ijar omits these methods when setting // ACC_SYNTHETIC modifier, so don't. // Signatures can be wrong, e.g., when type variables are introduced, instantiated, or // refined in the class we're processing, so drop them. MethodVisitor stubMethod = DefaultMethodClassFixer.this.visitMethod(access, name, desc, (String) null, exceptions); int slot = 0; stubMethod.visitVarInsn(Opcodes.ALOAD, slot++); // load the receiver Type neededType = Type.getMethodType(desc); for (Type arg : neededType.getArgumentTypes()) { stubMethod.visitVarInsn(arg.getOpcode(Opcodes.ILOAD), slot); slot += arg.getSize(); } stubMethod.visitMethodInsn( Opcodes.INVOKESTATIC, InterfaceDesugaring.getCompanionClassName(stubbedInterfaceName), name, InterfaceDesugaring.companionDefaultMethodDescriptor(stubbedInterfaceName, desc), /*itf*/ false); stubMethod.visitInsn(neededType.getReturnType().getOpcode(Opcodes.IRETURN)); stubMethod.visitMaxs(0, 0); // rely on class writer to compute these stubMethod.visitEnd(); return null; } else if (shouldStubAsBridgeDefaultMethod(access, name, desc)) { recordIfInstanceMethod(access, name, desc); // For bridges we just copy their bodies instead of going through the companion class. // Meanwhile, we also need to desugar the copied method bodies, so that any calls to // interface methods are correctly handled. return new InterfaceDesugaring.InterfaceInvocationRewriter( DefaultMethodClassFixer.this.visitMethod(access, name, desc, (String) null, exceptions), stubbedInterfaceName, bootclasspath, depsCollector, internalName); } else { return null; // we don't care about the actual code in these methods } } } /** * Visitor for interfaces that recursively searches interfaces for default method declarations. */ private class DefaultMethodFinder extends ClassVisitor { @SuppressWarnings("hiding") private ImmutableList<String> interfaces; private boolean found; public DefaultMethodFinder() { super(Opcodes.ASM5); } @Override public void visit( int version, int access, String name, String signature, String superName, String[] interfaces) { checkArgument(BitFlags.isSet(access, Opcodes.ACC_INTERFACE)); checkState(this.interfaces == null); this.interfaces = ImmutableList.copyOf(interfaces); } public boolean foundDefaultMethods() { return found; } @Override public void visitEnd() { if (!found) { found = defaultMethodsDefined(this.interfaces); } } @Override public MethodVisitor visitMethod( int access, String name, String desc, String signature, String[] exceptions) { if (!found && shouldStubAsDefaultMethod(access, name, desc)) { // Found a default method we're not ignoring (instanceMethods at this point contains methods // the top-level visited class implements itself). found = true; } return null; // we don't care about the actual code in these methods } } private class InstanceMethodRecorder extends ClassVisitor { public InstanceMethodRecorder() { super(Opcodes.ASM5); } @Override public void visit( int version, int access, String name, String signature, String superName, String[] interfaces) { checkArgument(BitFlags.noneSet(access, Opcodes.ACC_INTERFACE)); super.visit(version, access, name, signature, superName, interfaces); } @Override public MethodVisitor visitMethod( int access, String name, String desc, String signature, String[] exceptions) { recordIfInstanceMethod(access, name, desc); return null; } } /** * Detector to determine whether an interface needs to be initialized when it is loaded. * * <p>If the interface has a default method, and its <clinit> initializes any of its fields, then * this interface needs to be initialized. */ private static class InterfaceInitializationNecessityDetector extends ClassVisitor { private final String internalName; private boolean hasFieldInitializedInClinit; private boolean hasDefaultMethods; public InterfaceInitializationNecessityDetector(String internalName) { super(Opcodes.ASM5); this.internalName = internalName; } public boolean needsToInitialize() { return hasDefaultMethods && hasFieldInitializedInClinit; } @Override public void visit( int version, int access, String name, String signature, String superName, String[] interfaces) { super.visit(version, access, name, signature, superName, interfaces); checkState( internalName.equals(name), "Inconsistent internal names: expected=%s, real=%s", internalName, name); checkArgument( BitFlags.isSet(access, Opcodes.ACC_INTERFACE), "This class visitor is only used for interfaces."); } @Override public MethodVisitor visitMethod( int access, String name, String desc, String signature, String[] exceptions) { if (!hasDefaultMethods) { hasDefaultMethods = isNonBridgeDefaultMethod(access); } if ("<clinit>".equals(name)) { return new MethodVisitor(Opcodes.ASM5) { @Override public void visitFieldInsn(int opcode, String owner, String name, String desc) { if (opcode == Opcodes.PUTSTATIC && internalName.equals(owner)) { hasFieldInitializedInClinit = true; } } }; } return null; // Do not care about the code. } } /** Comparator for interfaces that compares by whether interfaces extend one another. */ enum InterfaceComparator implements Comparator<Class<?>> { INSTANCE; @Override public int compare(Class<?> o1, Class<?> o2) { checkArgument(o1.isInterface()); checkArgument(o2.isInterface()); if (o1 == o2) { return 0; } if (o1.isAssignableFrom(o2)) { // o1 is supertype of o2 return 1; // we want o1 to come after o2 } if (o2.isAssignableFrom(o1)) { // o2 is supertype of o1 return -1; // we want o2 to come after o1 } // o1 and o2 aren't comparable so arbitrarily impose lexicographical ordering return o1.getName().compareTo(o2.getName()); } } }
damienmg/bazel
src/tools/android/java/com/google/devtools/build/android/desugar/DefaultMethodClassFixer.java
Java
apache-2.0
24,887
/* * Copyright (C) 2010 JFrog Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jfrog.hudson.util; import org.apache.commons.lang.StringUtils; import org.jfrog.hudson.ArtifactoryServer; import org.jfrog.hudson.DeployerOverrider; import org.jfrog.hudson.ResolverOverrider; import org.jfrog.hudson.util.plugins.PluginsUtils; /** * A utility class the helps find the preferred credentials to use out of each setting and server * * @author Noam Y. Tenne */ public abstract class CredentialManager { private CredentialManager() { } /** * Decides and returns the preferred deployment credentials to use from this builder settings and selected server * * @param deployerOverrider Deploy-overriding capable builder * @param server Selected Artifactory server * @return Preferred deployment credentials */ public static Credentials getPreferredDeployer(DeployerOverrider deployerOverrider, ArtifactoryServer server) { if (deployerOverrider.isOverridingDefaultDeployer()) { String credentialsId = deployerOverrider.getDeployerCredentialsId(); return PluginsUtils.credentialsLookup(credentialsId); } if (server != null) { Credentials deployerCredentials = server.getDeployerCredentials(); if (deployerCredentials != null) { return deployerCredentials; } } return new Credentials(null, null); } public static Credentials getPreferredDeployer(String credentialsId, ArtifactoryServer server) { String username; String password; if(StringUtils.isBlank(credentialsId)) { Credentials deployedCredentials = server.getDeployerCredentials(); username = deployedCredentials.getUsername(); password = deployedCredentials.getPassword(); } else { return PluginsUtils.credentialsLookup(credentialsId); } return new Credentials(username, password); } /** * Decides and returns the preferred resolver credentials to use from this builder settings and selected server * Override priority: * 1) Job override resolver * 2) Plugin manage override resolver * 3) Plugin manage general * @param resolverOverrider Resolve-overriding capable builder * @param server Selected Artifactory server * @return Preferred resolver credentials */ public static Credentials getPreferredResolver(ResolverOverrider resolverOverrider, ArtifactoryServer server) { if (resolverOverrider != null && resolverOverrider.isOverridingDefaultResolver()) { String credentialsId = resolverOverrider.getResolverCredentialsId(); return PluginsUtils.credentialsLookup(credentialsId); } return server.getResolvingCredentials(); } public static Credentials getPreferredResolver(String credentialsId, ArtifactoryServer server) { String username; String password; if(StringUtils.isBlank(credentialsId)) { Credentials deployedCredentials = server.getResolvingCredentials(); username = deployedCredentials.getUsername(); password = deployedCredentials.getPassword(); } else { return PluginsUtils.credentialsLookup(credentialsId); } return new Credentials(username, password); } }
christ66/jenkins-artifactory-plugin
src/main/java/org/jfrog/hudson/util/CredentialManager.java
Java
apache-2.0
3,973